code stringlengths 3.13k 58.3k | apis list | extract_api stringlengths 499 39.4k |
|---|---|---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
import abc
import math
from collections import defaultdict, deque
from dataclasses import dataclass
from enum import Enum
from typing import (
Any,
Callable,
cast,
Deque,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
import torch
import torch.distributed as dist
import torch.nn as nn
from torchmetrics import Metric
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import (
compose_metric_key,
MetricNameBase,
MetricNamespaceBase,
MetricPrefix,
)
RecModelOutput = Union[torch.Tensor, Dict[str, torch.Tensor]]
@dataclass(frozen=True)
class MetricComputationReport:
name: MetricNameBase
metric_prefix: MetricPrefix
value: torch.Tensor
DefaultValueT = TypeVar("DefaultValueT")
ComputeIterType = Iterator[
Tuple[RecTaskInfo, MetricNameBase, torch.Tensor, MetricPrefix]
]
MAX_BUFFER_COUNT = 1000
class RecMetricException(Exception):
pass
class WindowBuffer:
def __init__(self, max_size: int, max_buffer_count: int) -> None:
self._max_size: int = max_size
self._max_buffer_count: int = max_buffer_count
self._buffers: Deque[torch.Tensor] = deque(maxlen=max_buffer_count)
self._used_sizes: Deque[int] = deque(maxlen=max_buffer_count)
self._window_used_size = 0
def aggregate_state(
self, window_state: torch.Tensor, curr_state: torch.Tensor, size: int
) -> None:
def remove(window_state: torch.Tensor) -> None:
window_state -= self._buffers.popleft()
self._window_used_size -= self._used_sizes.popleft()
if len(self._buffers) == self._buffers.maxlen:
remove(window_state)
self._buffers.append(curr_state)
self._used_sizes.append(size)
window_state += curr_state
self._window_used_size += size
while self._window_used_size > self._max_size:
remove(window_state)
@property
def buffers(self) -> Deque[torch.Tensor]:
return self._buffers
class RecMetricComputation(Metric, abc.ABC):
r"""The internal computation class template.
A metric implementation should overwrite update() and compute(). These two
APIs focuses the actual mathematical meaning of the metric, without the
detail knowledge of model output and task information.
Args:
my_rank (int): the rank of this trainer.
batch_size (int): batch size used by this trainer.
n_tasks (int): the number tasks this communication obj
will have to compute.
window_size (int): the window size for the window metric.
compute_on_all_ranks (bool): whether to compute metrics on all ranks. This
is necessary if non-leader rank want to consum metrics result.
process_group (Optional[ProcessGroup]): the process group used for the
communication. Will use the default process group if not specified.
"""
_batch_window_buffers: Optional[Dict[str, WindowBuffer]]
def __init__(
self,
my_rank: int,
batch_size: int,
n_tasks: int,
window_size: int,
compute_on_all_ranks: bool = False,
# pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type.
process_group: Optional[dist.ProcessGroup] = None,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(process_group=process_group, *args, **kwargs)
self._my_rank = my_rank
self._n_tasks = n_tasks
self._batch_size = batch_size
self._window_size = window_size
self._compute_on_all_ranks = compute_on_all_ranks
if self._window_size > 0:
self._batch_window_buffers = {}
else:
self._batch_window_buffers = None
self._add_state(
"has_valid_update",
torch.zeros(self._n_tasks, dtype=torch.uint8),
add_window_state=False,
dist_reduce_fx=lambda x: torch.any(x, dim=0).byte(),
persistent=True,
)
@staticmethod
def get_window_state_name(state_name: str) -> str:
return f"window_{state_name}"
def get_window_state(self, state_name: str) -> torch.Tensor:
return getattr(self, self.get_window_state_name(state_name))
def _add_state(
self, name: str, default: DefaultValueT, add_window_state: bool, **kwargs: Any
) -> None:
# pyre-fixme[6]: Expected `Union[List[typing.Any], torch.Tensor]` for 2nd
# param but got `DefaultValueT`.
super().add_state(name, default, **kwargs)
if add_window_state:
if self._batch_window_buffers is None:
raise RuntimeError(
"Users is adding a window state while window metric is disabled."
)
kwargs["persistent"] = False
window_state_name = self.get_window_state_name(name)
# Avoid pyre error
assert isinstance(default, torch.Tensor)
super().add_state(window_state_name, default.detach().clone(), **kwargs)
self._batch_window_buffers[window_state_name] = WindowBuffer(
max_size=self._window_size,
max_buffer_count=MAX_BUFFER_COUNT,
)
def _aggregate_window_state(
self, state_name: str, state: torch.Tensor, num_samples: int
) -> None:
if self._batch_window_buffers is None:
raise RuntimeError(
"Users is adding a window state while window metric is disabled."
)
window_state_name = self.get_window_state_name(state_name)
assert self._batch_window_buffers is not None
self._batch_window_buffers[window_state_name].aggregate_state(
getattr(self, window_state_name), curr_state=state, size=num_samples
)
@abc.abstractmethod
# pyre-fixme[14]: `update` overrides method defined in `Metric` inconsistently.
def update(
self,
*,
predictions: Optional[torch.Tensor],
labels: torch.Tensor,
weights: Optional[torch.Tensor],
) -> None: # pragma: no cover
pass
@abc.abstractmethod
def _compute(self) -> List[MetricComputationReport]: # pragma: no cover
pass
def pre_compute(self) -> None:
r"""If a metric need to do some work before `compute()`, the metric
has to override this `pre_compute()`. One possible usage is to do
some pre-processing of the local state before `compute()` as TorchMetric
wraps `RecMetricComputation.compute()` and will do the global aggregation
before `RecMetricComputation.compute()` is called.
"""
return
def compute(self) -> List[MetricComputationReport]:
if self._my_rank == 0 or self._compute_on_all_ranks:
return self._compute()
else:
return []
def local_compute(self) -> List[MetricComputationReport]:
return self._compute()
class RecMetric(nn.Module, abc.ABC):
r"""The main class template to implement a recommendation metric.
This class contains the recommendation tasks information (RecTaskInfo) and
the actual computation object (RecMetricComputation). RecMetric processes
all the information related to RecTaskInfo and models and pass the required
signals to the computation object, allowing the implementation of
RecMetricComputation to focus on the mathemetical meaning.
A new metric that inherit RecMetric must override the following attributes
in its own __init__(): `_namespace` and `_metrics_computations`. No other
methods should be overridden.
Args:
world_size (int): the number of trainers.
my_rank (int): the rank of this trainer.
batch_size (int): batch size used by this trainer.
tasks (List[RecTaskInfo]): the information of the model tasks.
compute_mode (RecComputeMode): the computation mode. See RecComputeMode.
window_size (int): the window size for the window metric.
fused_update_limit (int): the maximum number of updates to be fused.
compute_on_all_ranks (bool): whether to compute metrics on all ranks. This
is necessary if non-leader rank want to consume global metrics result.
process_group (Optional[ProcessGroup]): the process group used for the
communication. Will use the default process group if not specified.
Call Args:
Not supported.
Returns:
Not supported.
Example::
ne = NEMetric(
world_size=4,
my_rank=0,
batch_size=128,
tasks=DefaultTaskInfo,
)
"""
_computation_class: Type[RecMetricComputation]
_namespace: MetricNamespaceBase
_metrics_computations: nn.ModuleList
_tasks: List[RecTaskInfo]
_window_size: int
_tasks_iter: Callable[[str], ComputeIterType]
_update_buffers: Dict[str, List[RecModelOutput]]
_default_weights: Dict[Tuple[int, ...], torch.Tensor]
PREDICTIONS: str = "predictions"
LABELS: str = "labels"
WEIGHTS: str = "weights"
def __init__(
self,
world_size: int,
my_rank: int,
batch_size: int,
tasks: List[RecTaskInfo],
compute_mode: RecComputeMode = RecComputeMode.UNFUSED_TASKS_COMPUTATION,
window_size: int = 100,
fused_update_limit: int = 0,
compute_on_all_ranks: bool = False,
process_group: Optional[dist.ProcessGroup] = None,
**kwargs: Any,
) -> None:
# TODO(stellaya): consider to inherit from TorchMetrics.Metric or
# TorchMetrics.MetricCollection.
if (
compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION
and fused_update_limit > 0
):
raise ValueError(
"The fused tasks computation and the fused update cannot be set at the same time"
)
super().__init__()
self._world_size = world_size
self._my_rank = my_rank
self._window_size = math.ceil(window_size / world_size)
self._batch_size = batch_size
self._tasks = tasks
self._compute_mode = compute_mode
self._fused_update_limit = fused_update_limit
self._default_weights = {}
self._update_buffers = {
self.PREDICTIONS: [],
self.LABELS: [],
self.WEIGHTS: [],
}
if compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION:
n_metrics = 1
task_per_metric = len(self._tasks)
self._tasks_iter = self._fused_tasks_iter
else:
n_metrics = len(self._tasks)
task_per_metric = 1
self._tasks_iter = self._unfused_tasks_iter
self._metrics_computations: nn.ModuleList = nn.ModuleList(
[
# This Pyre error seems to be Pyre's bug as it can be inferred by mypy
# according to https://github.com/python/mypy/issues/3048.
# pyre-fixme[45]: Cannot instantiate abstract class `RecMetricCoputation`.
self._computation_class(
my_rank,
batch_size,
task_per_metric,
self._window_size,
compute_on_all_ranks,
process_group,
**kwargs,
)
for _ in range(n_metrics)
]
)
# TODO(stellaya): Refactor the _[fused, unfused]_tasks_iter methods and replace the
# compute_scope str input with an enum
def _fused_tasks_iter(self, compute_scope: str) -> ComputeIterType:
assert len(self._metrics_computations) == 1
self._metrics_computations[0].pre_compute()
for metric_report in getattr(
self._metrics_computations[0], compute_scope + "compute"
)():
for task, metric_value, has_valid_update in zip(
self._tasks,
metric_report.value,
self._metrics_computations[0].has_valid_update,
):
# The attribute has_valid_update is a tensor whose length equals to the
# number of tasks. Each value in it is corresponding to whether a task
# has valid updates or not.
# If for a task there's no valid updates, the calculated metric_value
# will be meaningless, so we mask it with the default value, i.e. 0.
valid_metric_value = (
metric_value
if has_valid_update > 0
else torch.zeros_like(metric_value)
)
yield task, metric_report.name, valid_metric_value, compute_scope + metric_report.metric_prefix.value
def _unfused_tasks_iter(self, compute_scope: str) -> ComputeIterType:
for task, metric_computation in zip(self._tasks, self._metrics_computations):
metric_computation.pre_compute()
for metric_report in getattr(
metric_computation, compute_scope + "compute"
)():
# The attribute has_valid_update is a tensor with only 1 value
# corresponding to whether the task has valid updates or not.
# If there's no valid update, the calculated metric_report.value
# will be meaningless, so we mask it with the default value, i.e. 0.
valid_metric_value = (
metric_report.value
if metric_computation.has_valid_update[0] > 0
else torch.zeros_like(metric_report.value)
)
yield task, metric_report.name, valid_metric_value, compute_scope + metric_report.metric_prefix.value
def _fuse_update_buffers(self) -> Dict[str, RecModelOutput]:
def fuse(outputs: List[RecModelOutput]) -> RecModelOutput:
assert len(outputs) > 0
if isinstance(outputs[0], torch.Tensor):
return torch.cat(cast(List[torch.Tensor], outputs))
else:
task_outputs: Dict[str, List[torch.Tensor]] = defaultdict(list)
for output in outputs:
assert isinstance(output, dict)
for task_name, tensor in output.items():
task_outputs[task_name].append(tensor)
return {
name: torch.cat(tensors) for name, tensors in task_outputs.items()
}
ret: Dict[str, RecModelOutput] = {}
for key, output_list in self._update_buffers.items():
if len(output_list) > 0:
ret[key] = fuse(output_list)
else:
assert key == self.WEIGHTS
output_list.clear()
return ret
def _check_fused_update(self, force: bool) -> None:
if self._fused_update_limit <= 0:
return
if len(self._update_buffers[self.PREDICTIONS]) == 0:
return
if (
not force
and len(self._update_buffers[self.PREDICTIONS]) < self._fused_update_limit
):
return
fused_arguments = self._fuse_update_buffers()
self._update(
predictions=fused_arguments[self.PREDICTIONS],
labels=fused_arguments[self.LABELS],
weights=fused_arguments.get(self.WEIGHTS, None),
)
def _create_default_weights(self, predictions: torch.Tensor) -> torch.Tensor:
weights = self._default_weights.get(predictions.size(), None)
if weights is None:
weights = torch.ones_like(predictions)
self._default_weights[predictions.size()] = weights
return weights
def _check_nonempty_weights(self, weights: torch.Tensor) -> torch.Tensor:
return torch.gt(torch.count_nonzero(weights, dim=-1), 0)
def _update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: Optional[RecModelOutput],
) -> None:
with torch.no_grad():
if self._compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION:
assert isinstance(predictions, torch.Tensor)
# Reshape the predictions to size([len(self._tasks), self._batch_size])
predictions = predictions.view(-1, self._batch_size)
assert isinstance(labels, torch.Tensor)
labels = labels.view(-1, self._batch_size)
if weights is None:
weights = self._create_default_weights(predictions)
else:
assert isinstance(weights, torch.Tensor)
weights = weights.view(-1, self._batch_size)
# has_valid_weights is a tensor of bool whose length equals to the number
# of tasks. Each value in it is corresponding to whether the weights
# are valid, i.e. are set to non-zero values for that task in this update.
# If has_valid_weights are Falses for all the tasks, we just ignore this
# update.
has_valid_weights = self._check_nonempty_weights(weights)
if torch.any(has_valid_weights):
self._metrics_computations[0].update(
predictions=predictions, labels=labels, weights=weights
)
self._metrics_computations[0].has_valid_update.logical_or_(
has_valid_weights
).byte()
else:
for task, metric_ in zip(self._tasks, self._metrics_computations):
if task.name not in predictions:
continue
if torch.numel(predictions[task.name]) == 0:
assert torch.numel(labels[task.name]) == 0
assert weights is None or torch.numel(weights[task.name]) == 0
continue
# Reshape the predictions to size([1, self._batch_size])
task_predictions = predictions[task.name].view(1, -1)
task_labels = labels[task.name].view(1, -1)
if weights is None:
task_weights = self._create_default_weights(task_predictions)
else:
task_weights = weights[task.name].view(1, -1)
# has_valid_weights is a tensor with only 1 value corresponding to
# whether the weights are valid, i.e. are set to non-zero values for
# the task in this update.
# If has_valid_update[0] is False, we just ignore this update.
has_valid_weights = self._check_nonempty_weights(task_weights)
if has_valid_weights[0]:
metric_.update(
predictions=task_predictions,
labels=task_labels,
weights=task_weights,
)
metric_.has_valid_update.logical_or_(has_valid_weights).byte()
def update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: Optional[RecModelOutput],
) -> None:
if self._fused_update_limit > 0:
self._update_buffers[self.PREDICTIONS].append(predictions)
self._update_buffers[self.LABELS].append(labels)
if weights is not None:
self._update_buffers[self.WEIGHTS].append(weights)
self._check_fused_update(force=False)
else:
self._update(predictions=predictions, labels=labels, weights=weights)
# The implementation of compute is very similar to local_compute, but compute overwrites
# the abstract method compute in torchmetrics.Metric, which is wrapped by _wrap_compute
def compute(self) -> Dict[str, torch.Tensor]:
self._check_fused_update(force=True)
ret = {}
for task, metric_name, metric_value, prefix in self._tasks_iter(""):
metric_key = compose_metric_key(
self._namespace, task.name, metric_name, prefix
)
ret[metric_key] = metric_value
return ret
def local_compute(self) -> Dict[str, torch.Tensor]:
self._check_fused_update(force=True)
ret = {}
for task, metric_name, metric_value, prefix in self._tasks_iter("local_"):
metric_key = compose_metric_key(
self._namespace, task.name, metric_name, prefix
)
ret[metric_key] = metric_value
return ret
def sync(self) -> None:
for computation in self._metrics_computations:
computation.sync()
def unsync(self) -> None:
for computation in self._metrics_computations:
if computation._is_synced:
computation.unsync()
def reset(self) -> None:
for computation in self._metrics_computations:
computation.reset()
def get_memory_usage(self) -> Dict[torch.Tensor, int]:
r"""Estimates the memory of the rec metric instance's
underlying tensors; returns the map of tensor to size
"""
tensor_map = {}
attributes_q = deque(self.__dict__.values())
while attributes_q:
attribute = attributes_q.popleft()
if isinstance(attribute, torch.Tensor):
tensor_map[attribute] = (
attribute.size().numel() * attribute.element_size()
)
elif isinstance(attribute, WindowBuffer):
attributes_q.extend(attribute.buffers)
elif isinstance(attribute, Mapping):
attributes_q.extend(attribute.values())
elif isinstance(attribute, Sequence) and not isinstance(attribute, str):
attributes_q.extend(attribute)
elif hasattr(attribute, "__dict__") and not isinstance(attribute, Enum):
attributes_q.extend(attribute.__dict__.values())
return tensor_map
# pyre-fixme[14]: `state_dict` overrides method defined in `Module` inconsistently.
def state_dict(
self,
destination: Optional[Dict[str, torch.Tensor]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, torch.Tensor]:
# We need to flush the cached output to ensure checkpointing correctness.
self._check_fused_update(force=True)
destination = super().state_dict(
destination=destination, prefix=prefix, keep_vars=keep_vars
)
return self._metrics_computations.state_dict(
destination=destination,
prefix=f"{prefix}_metrics_computations.",
keep_vars=keep_vars,
)
class RecMetricList(nn.Module):
"""
A list module to encapulate multiple RecMetric instances and provide the
same interfaces as RecMetric.
Args:
rec_metrics (List[RecMetric]: the list of the input RecMetrics.
Call Args:
Not supported.
Returns:
Not supported.
Example::
ne = NEMetric(
world_size=4,
my_rank=0,
batch_size=128,
tasks=DefaultTaskInfo
)
metrics = RecMetricList([ne])
"""
rec_metrics: nn.ModuleList
def __init__(self, rec_metrics: List[RecMetric]) -> None:
# TODO(stellaya): consider to inherit from TorchMetrics.MetricCollection.
# The prequsite to use MetricCollection is that RecMetric inherits from
# TorchMetrics.Metric or TorchMetrics.MetricCollection
super().__init__()
self.rec_metrics = nn.ModuleList(rec_metrics)
def __len__(self) -> int:
return len(self.rec_metrics)
def __getitem__(self, idx: int) -> nn.Module:
return self.rec_metrics[idx]
def update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: RecModelOutput,
) -> None:
for metric in self.rec_metrics:
metric.update(predictions=predictions, labels=labels, weights=weights)
def compute(self) -> Dict[str, torch.Tensor]:
ret = {}
for metric in self.rec_metrics:
ret.update(metric.compute())
return ret
def local_compute(self) -> Dict[str, torch.Tensor]:
ret = {}
for metric in self.rec_metrics:
ret.update(metric.local_compute())
return ret
def sync(self) -> None:
for metric in self.rec_metrics:
metric.sync()
def unsync(self) -> None:
for metric in self.rec_metrics:
metric.unsync()
def reset(self) -> None:
for metric in self.rec_metrics:
metric.reset()
| [
"torchrec.metrics.metrics_namespace.compose_metric_key"
] | [((943, 965), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (952, 965), False, 'from dataclasses import dataclass\n'), ((1096, 1120), 'typing.TypeVar', 'TypeVar', (['"""DefaultValueT"""'], {}), "('DefaultValueT')\n", (1103, 1120), False, 'from typing import Any, Callable, cast, Deque, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union\n'), ((1523, 1553), 'collections.deque', 'deque', ([], {'maxlen': 'max_buffer_count'}), '(maxlen=max_buffer_count)\n', (1528, 1553), False, 'from collections import defaultdict, deque\n'), ((1593, 1623), 'collections.deque', 'deque', ([], {'maxlen': 'max_buffer_count'}), '(maxlen=max_buffer_count)\n', (1598, 1623), False, 'from collections import defaultdict, deque\n'), ((10435, 10470), 'math.ceil', 'math.ceil', (['(window_size / world_size)'], {}), '(window_size / world_size)\n', (10444, 10470), False, 'import math\n'), ((24195, 24221), 'torch.nn.ModuleList', 'nn.ModuleList', (['rec_metrics'], {}), '(rec_metrics)\n', (24208, 24221), True, 'import torch.nn as nn\n'), ((4195, 4240), 'torch.zeros', 'torch.zeros', (['self._n_tasks'], {'dtype': 'torch.uint8'}), '(self._n_tasks, dtype=torch.uint8)\n', (4206, 4240), False, 'import torch\n'), ((16013, 16041), 'torch.ones_like', 'torch.ones_like', (['predictions'], {}), '(predictions)\n', (16028, 16041), False, 'import torch\n'), ((16232, 16268), 'torch.count_nonzero', 'torch.count_nonzero', (['weights'], {'dim': '(-1)'}), '(weights, dim=-1)\n', (16251, 16268), False, 'import torch\n'), ((16456, 16471), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16469, 16471), False, 'import torch\n'), ((20564, 20631), 'torchrec.metrics.metrics_namespace.compose_metric_key', 'compose_metric_key', (['self._namespace', 'task.name', 'metric_name', 'prefix'], {}), '(self._namespace, task.name, metric_name, prefix)\n', (20582, 20631), False, 'from torchrec.metrics.metrics_namespace import compose_metric_key, MetricNameBase, MetricNamespaceBase, MetricPrefix\n'), ((20951, 21018), 'torchrec.metrics.metrics_namespace.compose_metric_key', 'compose_metric_key', (['self._namespace', 'task.name', 'metric_name', 'prefix'], {}), '(self._namespace, task.name, metric_name, prefix)\n', (20969, 21018), False, 'from torchrec.metrics.metrics_namespace import compose_metric_key, MetricNameBase, MetricNamespaceBase, MetricPrefix\n'), ((14541, 14558), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (14552, 14558), False, 'from collections import defaultdict, deque\n'), ((17613, 17641), 'torch.any', 'torch.any', (['has_valid_weights'], {}), '(has_valid_weights)\n', (17622, 17641), False, 'import torch\n'), ((13010, 13040), 'torch.zeros_like', 'torch.zeros_like', (['metric_value'], {}), '(metric_value)\n', (13026, 13040), False, 'import torch\n'), ((13997, 14034), 'torch.zeros_like', 'torch.zeros_like', (['metric_report.value'], {}), '(metric_report.value)\n', (14013, 14034), False, 'import torch\n'), ((14426, 14459), 'typing.cast', 'cast', (['List[torch.Tensor]', 'outputs'], {}), '(List[torch.Tensor], outputs)\n', (14430, 14459), False, 'from typing import Any, Callable, cast, Deque, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union\n'), ((14825, 14843), 'torch.cat', 'torch.cat', (['tensors'], {}), '(tensors)\n', (14834, 14843), False, 'import torch\n'), ((18164, 18199), 'torch.numel', 'torch.numel', (['predictions[task.name]'], {}), '(predictions[task.name])\n', (18175, 18199), False, 'import torch\n'), ((4315, 4334), 'torch.any', 'torch.any', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (4324, 4334), False, 'import torch\n'), ((18237, 18267), 'torch.numel', 'torch.numel', (['labels[task.name]'], {}), '(labels[task.name])\n', (18248, 18267), False, 'import torch\n'), ((18323, 18354), 'torch.numel', 'torch.numel', (['weights[task.name]'], {}), '(weights[task.name])\n', (18334, 18354), False, 'import torch\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import itertools
import logging
from typing import List, Optional, Tuple, Iterator
import torch
import torch.distributed as dist
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops import (
EmbeddingLocation,
IntNBitTableBatchedEmbeddingBagsCodegen,
rounded_row_size_in_bytes,
)
from torchrec.distributed.batched_embedding_kernel import BaseBatchedEmbeddingBag
from torchrec.distributed.embedding_kernel import BaseEmbeddingBag
from torchrec.distributed.embedding_types import GroupedEmbeddingConfig
from torchrec.distributed.utils import append_prefix
from torchrec.modules.embedding_configs import (
DataType,
DATA_TYPE_NUM_BITS,
)
from torchrec.sparse.jagged_tensor import (
KeyedJaggedTensor,
KeyedTensor,
)
logger: logging.Logger = logging.getLogger(__name__)
class QuantBatchedEmbeddingBag(BaseBatchedEmbeddingBag):
def __init__(
self,
config: GroupedEmbeddingConfig,
# pyre-fixme[11]
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
) -> None:
super().__init__(config, pg, device)
self._emb_module: IntNBitTableBatchedEmbeddingBagsCodegen = (
IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
local_rows,
table.embedding_dim,
QuantBatchedEmbeddingBag.to_sparse_type(config.data_type),
EmbeddingLocation.DEVICE
if (device is not None and device.type == "cuda")
else EmbeddingLocation.HOST,
)
for local_rows, table in zip(
self._local_rows, config.embedding_tables
)
],
device=device,
pooling_mode=self._pooling,
feature_table_map=self._feature_table_map,
)
)
if device is not None and device.type != "meta":
self._emb_module.initialize_weights()
@staticmethod
def to_sparse_type(data_type: DataType) -> SparseType:
if data_type == DataType.FP16:
return SparseType.FP16
elif data_type == DataType.INT8:
return SparseType.INT8
elif data_type == DataType.INT4:
return SparseType.INT4
elif data_type == DataType.INT2:
return SparseType.INT2
else:
raise ValueError(f"Invalid DataType {data_type}")
def init_parameters(self) -> None:
pass
@property
def emb_module(
self,
) -> IntNBitTableBatchedEmbeddingBagsCodegen:
return self._emb_module
def forward(self, features: KeyedJaggedTensor) -> KeyedTensor:
values = self.emb_module(
indices=features.values().int(),
offsets=features.offsets().int(),
per_sample_weights=features.weights_or_none(),
).float()
return KeyedTensor(
keys=self._emb_names,
values=values,
length_per_key=self._lengths_per_emb,
)
def named_buffers(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
for config, weight in zip(
self._config.embedding_tables,
self.emb_module.split_embedding_weights(),
):
yield append_prefix(prefix, f"{config.name}.weight"), weight[0]
def split_embedding_weights(self) -> List[torch.Tensor]:
return [
weight
for weight, _ in self.emb_module.split_embedding_weights(
split_scale_shifts=False
)
]
@classmethod
def from_float(cls, module: BaseEmbeddingBag) -> "QuantBatchedEmbeddingBag":
assert hasattr(
module, "qconfig"
), "EmbeddingBagCollectionInterface input float module must have qconfig defined"
def _to_data_type(dtype: torch.dtype) -> DataType:
if dtype == torch.quint8 or dtype == torch.qint8:
return DataType.INT8
elif dtype == torch.quint4 or dtype == torch.qint4:
return DataType.INT4
elif dtype == torch.quint2 or dtype == torch.qint2:
return DataType.INT2
else:
raise Exception(f"Invalid data type {dtype}")
# pyre-ignore [16]
data_type = _to_data_type(module.qconfig.weight().dtype)
sparse_type = QuantBatchedEmbeddingBag.to_sparse_type(data_type)
state_dict = dict(
itertools.chain(module.named_buffers(), module.named_parameters())
)
device = next(iter(state_dict.values())).device
# Adjust config to quantized version.
# This obviously doesn't work for column-wise sharding.
# pyre-ignore [29]
config = copy.deepcopy(module.config())
config.data_type = data_type
for table in config.embedding_tables:
table.local_cols = rounded_row_size_in_bytes(table.local_cols, sparse_type)
if table.local_metadata is not None:
table.local_metadata.shard_sizes = [
table.local_rows,
table.local_cols,
]
if table.global_metadata is not None:
for shard_meta in table.global_metadata.shards_metadata:
if shard_meta != table.local_metadata:
shard_meta.shard_sizes = [
shard_meta.shard_sizes[0],
rounded_row_size_in_bytes(
shard_meta.shard_sizes[1], sparse_type
),
]
table.global_metadata.size = torch.Size(
[
table.global_metadata.size[0],
sum(
shard_meta.shard_sizes[1]
for shard_meta in table.global_metadata.shards_metadata
),
]
)
ret = QuantBatchedEmbeddingBag(config=config, device=device)
# Quantize weights.
quant_weight_list = []
for _, weight in state_dict.items():
quantized_weights = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
weight, DATA_TYPE_NUM_BITS[data_type]
)
# weight and 4 byte scale shift (2xfp16)
quant_weight = quantized_weights[:, :-4]
scale_shift = quantized_weights[:, -4:]
quant_weight_list.append((quant_weight, scale_shift))
ret.emb_module.assign_embedding_weights(quant_weight_list)
return ret
| [
"torchrec.distributed.utils.append_prefix",
"torchrec.sparse.jagged_tensor.KeyedTensor"
] | [((1068, 1095), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1085, 1095), False, 'import logging\n'), ((3324, 3415), 'torchrec.sparse.jagged_tensor.KeyedTensor', 'KeyedTensor', ([], {'keys': 'self._emb_names', 'values': 'values', 'length_per_key': 'self._lengths_per_emb'}), '(keys=self._emb_names, values=values, length_per_key=self.\n _lengths_per_emb)\n', (3335, 3415), False, 'from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor\n'), ((5356, 5412), 'fbgemm_gpu.split_table_batched_embeddings_ops.rounded_row_size_in_bytes', 'rounded_row_size_in_bytes', (['table.local_cols', 'sparse_type'], {}), '(table.local_cols, sparse_type)\n', (5381, 5412), False, 'from fbgemm_gpu.split_table_batched_embeddings_ops import EmbeddingLocation, IntNBitTableBatchedEmbeddingBagsCodegen, rounded_row_size_in_bytes\n'), ((6656, 6754), 'torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf', 'torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf', (['weight', 'DATA_TYPE_NUM_BITS[data_type]'], {}), '(weight,\n DATA_TYPE_NUM_BITS[data_type])\n', (6711, 6754), False, 'import torch\n'), ((3742, 3788), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', 'f"""{config.name}.weight"""'], {}), "(prefix, f'{config.name}.weight')\n", (3755, 3788), False, 'from torchrec.distributed.utils import append_prefix\n'), ((5926, 5991), 'fbgemm_gpu.split_table_batched_embeddings_ops.rounded_row_size_in_bytes', 'rounded_row_size_in_bytes', (['shard_meta.shard_sizes[1]', 'sparse_type'], {}), '(shard_meta.shard_sizes[1], sparse_type)\n', (5951, 5991), False, 'from fbgemm_gpu.split_table_batched_embeddings_ops import EmbeddingLocation, IntNBitTableBatchedEmbeddingBagsCodegen, rounded_row_size_in_bytes\n')] |
from typing import (
Iterator,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
)
import io
import torch
import torch.utils.data.datapipes as dp
from torchdata.datapipes.iter import S3FileLister, S3FileLoader
from torchdata.datapipes.utils import StreamWrapper
from torchrec.datasets.utils import (
LoadFiles,
ReadLinesFromCSV)
from torch.utils.data import IterDataPipe
from torchrec.datasets.criteo import _default_row_mapper
s3_prefixes = ['s3://criteo-dataset/day_0']
dp_s3_urls = S3FileLister(s3_prefixes)
dp_s3_files = S3FileLoader(dp_s3_urls) # outputs in (url, BytesIO)
# more datapipes to convert loaded bytes, e.g.
class LoadWithTextIOWrapper(IterDataPipe):
def __init__(self, paths, **open_kw):
self.paths = paths
self.open_kw: Any = open_kw # pyre-ignore[4]
def __iter__(self) -> Iterator[Any]:
for url, buffer in self.paths:
yield url, io.TextIOWrapper(buffer, encoding='utf-8')
class S3CriteoIterDataPipe(IterDataPipe):
"""
IterDataPipe that can be used to stream either the Criteo 1TB Click Logs Dataset
(https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/) or the
Kaggle/Criteo Display Advertising Dataset
(https://www.kaggle.com/c/criteo-display-ad-challenge/) from the source TSV
files.
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example:
>>> datapipe = CriteoIterDataPipe(
>>> ("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
>>> )
>>> datapipe = dp.iter.Batcher(datapipe, 100)
>>> datapipe = dp.iter.Collator(datapipe)
>>> batch = next(iter(datapipe))
"""
def __init__(
self,
paths: S3FileLoader,
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> None:
self.paths = paths
self.row_mapper = row_mapper
self.open_kw: Any = open_kw # pyre-ignore[4]
# pyre-ignore[3]
def __iter__(self) -> Iterator[Any]:
worker_info = torch.utils.data.get_worker_info()
paths = self.paths
if worker_info is not None:
paths = (
path
for (idx, path) in enumerate(paths)
if idx % worker_info.num_workers == worker_info.id
)
# datapipe = LoadFiles(paths, mode="r", **self.open_kw)
datapipe = LoadWithTextIOWrapper(paths)
datapipe = ReadLinesFromCSV(datapipe, delimiter="\t")
if self.row_mapper:
datapipe = dp.iter.Mapper(datapipe, self.row_mapper)
yield from datapipe
#print(dp_s3_files)
#datapipe = StreamWrapper(dp_s3_files).parse_csv_files(delimiter=' ')
#for d in datapipe: # Start loading data
datapipe = S3CriteoIterDataPipe(dp_s3_files)
datapipe = dp.iter.Batcher(datapipe, 100)
datapipe = dp.iter.Collator(datapipe)
batch = next(iter(datapipe))
print(batch.keys())
| [
"torchrec.datasets.utils.ReadLinesFromCSV"
] | [((520, 545), 'torchdata.datapipes.iter.S3FileLister', 'S3FileLister', (['s3_prefixes'], {}), '(s3_prefixes)\n', (532, 545), False, 'from torchdata.datapipes.iter import S3FileLister, S3FileLoader\n'), ((560, 584), 'torchdata.datapipes.iter.S3FileLoader', 'S3FileLoader', (['dp_s3_urls'], {}), '(dp_s3_urls)\n', (572, 584), False, 'from torchdata.datapipes.iter import S3FileLister, S3FileLoader\n'), ((3173, 3203), 'torch.utils.data.datapipes.iter.Batcher', 'dp.iter.Batcher', (['datapipe', '(100)'], {}), '(datapipe, 100)\n', (3188, 3203), True, 'import torch.utils.data.datapipes as dp\n'), ((3215, 3241), 'torch.utils.data.datapipes.iter.Collator', 'dp.iter.Collator', (['datapipe'], {}), '(datapipe)\n', (3231, 3241), True, 'import torch.utils.data.datapipes as dp\n'), ((2416, 2450), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (2448, 2450), False, 'import torch\n'), ((2821, 2863), 'torchrec.datasets.utils.ReadLinesFromCSV', 'ReadLinesFromCSV', (['datapipe'], {'delimiter': '"""\t"""'}), "(datapipe, delimiter='\\t')\n", (2837, 2863), False, 'from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV\n'), ((2915, 2956), 'torch.utils.data.datapipes.iter.Mapper', 'dp.iter.Mapper', (['datapipe', 'self.row_mapper'], {}), '(datapipe, self.row_mapper)\n', (2929, 2956), True, 'import torch.utils.data.datapipes as dp\n'), ((932, 974), 'io.TextIOWrapper', 'io.TextIOWrapper', (['buffer'], {'encoding': '"""utf-8"""'}), "(buffer, encoding='utf-8')\n", (948, 974), False, 'import io\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
from typing import List, Tuple, Optional, Dict, cast
from torchrec.distributed.planner.constants import MAX_SIZE
from torchrec.distributed.planner.types import (
Partitioner,
Topology,
ShardingOption,
Storage,
PartitionByType,
PlannerError,
DeviceHardware,
)
from torchrec.distributed.types import ShardingType
def greedy_partition(
num_partitions: int,
sharding_options: List[ShardingOption],
shard_idxes: Optional[List[Tuple[int, int]]] = None,
partition_sums: Optional[List[float]] = None,
mem_cap: Optional[List[Storage]] = None,
) -> List[List[Tuple[int, int]]]:
"""
Divides indices among `num_partitions` partitions in a greedy fashion based on perf
weights associated with each [option_idx, shard_idx].
Returns:
List[List[Tuple[int, int]]]: list of indices of (option_idx, shard_idx) that should be allocated to each partition.
Example::
sharding_options = [
[0,1,2,3] with perfs [10,20,30,40]
[0,1] with perfs [200,300]
]
# with num_partitions=3
# The final output would be:
[
partition_0 = [(1,1)], with a perf of 300
partition_1 = [(1,0)], with a perf of 200
partition_2 = [(0,0),(0,1),(0,2),(0,3)], with a perf of 100 (10+20+30+40)
]
"""
if shard_idxes is None:
shard_idxes = []
for option_idx, sharding_option in enumerate(sharding_options):
for shard_idx in range(sharding_option.num_shards):
shard_idxes.append((option_idx, shard_idx))
def _to_comparable(order_shard_idx: Tuple[int, int]) -> Tuple[float, Storage]:
sharding_option: ShardingOption = sharding_options[order_shard_idx[0]]
return (
cast(float, sharding_option.shards[order_shard_idx[1]].perf),
cast(Storage, sharding_option.shards[order_shard_idx[1]].storage),
)
sorted_shard_idxes = sorted(
shard_idxes, key=lambda order_shard_idx: _to_comparable(order_shard_idx)
)
partitions = [[] for p in range(num_partitions)]
if partition_sums is None:
partition_sums = [0.0] * num_partitions
partition_size_sums = [Storage(hbm=0, ddr=0) for _ in range(num_partitions)]
if mem_cap is None:
mem_cap = [Storage(hbm=MAX_SIZE, ddr=MAX_SIZE) for _ in range(num_partitions)]
assert len(partition_size_sums) == len(
mem_cap
), "partition_size_sums and mem_cap must have the same dimensions"
"""
Successively add remaining pairs to the partition with the minimum sum.
"""
while sorted_shard_idxes:
option_idx, shard_idx = sorted_shard_idxes.pop()
storage_size = cast(
Storage, sharding_options[option_idx].shards[shard_idx].storage
)
perf = cast(float, sharding_options[option_idx].shards[shard_idx].perf)
min_sum = MAX_SIZE
min_partition_idx = -1
for partition_idx in range(num_partitions):
partition_mem_cap: Storage = mem_cap[partition_idx]
partition_size_sum: Storage = partition_size_sums[partition_idx]
if (
partition_mem_cap.hbm >= partition_size_sum.hbm + storage_size.hbm
) and (partition_mem_cap.ddr >= partition_size_sum.ddr + storage_size.ddr):
if partition_sums[partition_idx] < min_sum:
min_sum = partition_sums[partition_idx]
min_partition_idx = partition_idx
if min_partition_idx == -1:
raise PlannerError(
f"Table of size {storage_size} GB cannot be added to any rank. partition_size_sums: {partition_size_sums}. mem_cap: {mem_cap}."
)
partitions[min_partition_idx].append((option_idx, shard_idx))
partition_size_sums[min_partition_idx] += storage_size
partition_sums[min_partition_idx] += perf
return partitions
def uniform_partition(
num_partitions: int,
sharding_options: List[ShardingOption],
mem_cap: List[Storage],
shard_idxes: Optional[List[Tuple[int, int]]] = None,
) -> List[List[Tuple[int, int]]]:
"""
Assigns one shard to each rank.
Example::
sharding_options = [
[0,1,2,3],
[0,1,2,3],
]
# with num_partitions=4
# The final output would be:
[
partition_0 = [(0,0),(1,0)]
partition_1 = [(0,1),(1,1)]
partition_2 = [(0,2),(1,2)]
partition_3 = [(0,3),(1,3)]
]
"""
partition_size_sums = [Storage(hbm=0, ddr=0) for _ in range(num_partitions)]
if shard_idxes is None:
shard_idxes = []
for option_idx, sharding_option in enumerate(sharding_options):
for shard_idx in range(sharding_option.num_shards):
shard_idxes.append((option_idx, shard_idx))
partitions: List[List[Tuple[int, int]]] = [[] for _ in range(num_partitions)]
for option_idx, shard_idx in shard_idxes:
storage_size = cast(
Storage, sharding_options[option_idx].shards[shard_idx].storage
)
if partition_size_sums[shard_idx] + storage_size > mem_cap[shard_idx]:
raise PlannerError(
f"Table of size {storage_size} GB cannot be added to any rank. partition_size_sums: {partition_size_sums}. mem_cap: {mem_cap}."
)
partition_size_sums[shard_idx] += storage_size
partitions[shard_idx].append((option_idx, shard_idx))
return partitions
def _group_sharding_options(
sharding_options: List[ShardingOption],
) -> Dict[str, List[ShardingOption]]:
partition_by_groups = {}
for sharding_option in sharding_options:
if sharding_option.partition_by not in partition_by_groups:
partition_by_groups[sharding_option.partition_by] = []
partition_by_groups[sharding_option.partition_by].append(sharding_option)
return partition_by_groups
class GreedyPerfPartitioner(Partitioner):
"""
Greedy Partitioner
"""
def partition(
self,
proposal: List[ShardingOption],
storage_constraint: Topology,
) -> List[ShardingOption]:
"""
Places sharding options on topology based on each sharding option's
`partition_by` attribute.
Topology storage and perfs are updated at the end of the placement.
Args:
proposal (List[ShardingOption]): list of populated sharding options.
storage_constraint (Topology): device topology.
Returns:
List[ShardingOption]: list of sharding options for selected plan.
Example::
sharding_options = [
ShardingOption(partition_by="uniform",
shards=[
Shards(storage=1, perf=1),
Shards(storage=1, perf=1),
]),
ShardingOption(partition_by="uniform",
shards=[
Shards(storage=2, perf=2),
Shards(storage=2, perf=2),
]),
ShardingOption(partition_by="device",
shards=[
Shards(storage=3, perf=3),
Shards(storage=3, perf=3),
])
ShardingOption(partition_by="device",
shards=[
Shards(storage=4, perf=4),
Shards(storage=4, perf=4),
]),
]
topology = Topology(world_size=2)
# First [sharding_options[0] and sharding_options[1]] will be placed on the
# topology with the uniform strategy, resulting in
topology.devices[0].perf = (1,2)
topology.devices[1].perf = (1,2)
# Finally sharding_options[2] and sharding_options[3]] will be placed on the
# topology with the device strategy (see docstring of `partition_by_device` for
# more details).
topology.devices[0].perf = (1,2) + (3,4)
topology.devices[1].perf = (1,2) + (3,4)
# The topology updates are done after the end of all the placements (the other
# in the example is just for clarity).
"""
# pyre-ignore [16]: `GreedyPerfPartitioner` has no attribute `_topology`.
self._topology: Topology = copy.deepcopy(storage_constraint)
plan = copy.deepcopy(proposal)
grouped_sharding_options = _group_sharding_options(plan)
if PartitionByType.UNIFORM.value in grouped_sharding_options:
self._partition_by_uniform(
grouped_sharding_options[PartitionByType.UNIFORM.value]
)
if PartitionByType.HOST.value in grouped_sharding_options:
self._partition_by_host(
grouped_sharding_options[PartitionByType.HOST.value]
)
if PartitionByType.DEVICE.value in grouped_sharding_options:
self._partition_by_device(
grouped_sharding_options[PartitionByType.DEVICE.value]
)
return plan
def _partition_by_uniform(self, sharding_options: List[ShardingOption]) -> None:
partitions = uniform_partition(
# pyre-ignore [16]: `GreedyPerfPartitioner` has no attribute `_topology`.
num_partitions=self._topology.world_size,
sharding_options=sharding_options,
mem_cap=[device.storage for device in self._topology.devices],
)
self._update_shards(partitions, sharding_options)
def _partition_by_device(self, sharding_options: List[ShardingOption]) -> None:
# pyre-ignore [16]: `GreedyPerfPartitioner` has no attribute `_topology`.
partition_sums = [float(device.perf) for device in self._topology.devices]
mem_cap: List[Storage] = [device.storage for device in self._topology.devices]
partitions = greedy_partition(
num_partitions=self._topology.world_size,
sharding_options=sharding_options,
partition_sums=partition_sums,
mem_cap=mem_cap,
)
self._update_shards(partitions, sharding_options)
def _partition_by_host(self, sharding_options: List[ShardingOption]) -> None:
# pyre-ignore [16]: `GreedyPerfPartitioner` has no attribute `_topology`.
num_hosts: int = self._topology.world_size // self._topology.local_world_size
mem_cap: List[Storage] = []
partition_sums = []
shard_idxes = []
for option_idx, _ in enumerate(sharding_options):
# only take the first shard from each sharding option. We can infer the rest
shard_idxes.append((option_idx, 0))
host_level_devices: Dict[int, List[DeviceHardware]] = {}
for i in range(num_hosts):
devices_in_host = self._topology.devices[
i
* self._topology.local_world_size : (i + 1)
* self._topology.local_world_size
]
host_level_devices[i] = devices_in_host
# mem_cap of a host is the min of the storage of all devies on that host
mem_cap.append(min([device.storage for device in devices_in_host]))
# perf of a host is the max across all of its devices. Typically this should be zero at entry point.
partition_sums.append(
max([float(device.perf) for device in devices_in_host])
)
host_level_partitions: List[List[Tuple[int, int]]] = greedy_partition(
num_partitions=num_hosts,
sharding_options=sharding_options,
shard_idxes=shard_idxes,
partition_sums=partition_sums,
mem_cap=mem_cap,
)
partitions: List[List[Tuple[int, int]]] = [[] for _ in self._topology.devices]
for host_idx, host_partition in enumerate(host_level_partitions):
self._uniform_device_level_partition(
partitions=partitions,
sharding_options=sharding_options,
option_idxes=[
option_idx
for option_idx, _ in host_partition
if _base_partition_by(sharding_options[option_idx].sharding_type)
== PartitionByType.UNIFORM.value
],
host_level_devices=host_level_devices[host_idx],
host_idx=host_idx,
)
self._greedy_device_level_partition(
partitions=partitions,
sharding_options=sharding_options,
option_idxes=[
option_idx
for option_idx, _ in host_partition
if _base_partition_by(sharding_options[option_idx].sharding_type)
== PartitionByType.DEVICE.value
],
host_level_devices=host_level_devices[host_idx],
host_idx=host_idx,
)
self._update_shards(partitions, sharding_options)
def _uniform_device_level_partition(
self,
partitions: List[List[Tuple[int, int]]],
sharding_options: List[ShardingOption],
option_idxes: List[int],
host_level_devices: List[DeviceHardware],
host_idx: int,
) -> None:
shard_idxes = []
for option_idx in option_idxes:
for shard_idx in range(sharding_options[option_idx].num_shards):
shard_idxes.append((option_idx, shard_idx))
if shard_idxes:
device_level_partitions: List[List[Tuple[int, int]]] = uniform_partition(
# pyre-ignore [16]: `GreedyPerfPartitioner` has no attribute `_topology`.
num_partitions=self._topology.local_world_size,
sharding_options=sharding_options,
mem_cap=[device.storage for device in host_level_devices],
shard_idxes=shard_idxes,
)
for device_idx, device_partition in enumerate(device_level_partitions):
for option_idx, shard_idx in device_partition:
partitions[
self._topology.local_world_size * host_idx + device_idx
].append((option_idx, shard_idx))
def _greedy_device_level_partition(
self,
partitions: List[List[Tuple[int, int]]],
sharding_options: List[ShardingOption],
option_idxes: List[int],
host_level_devices: List[DeviceHardware],
host_idx: int,
) -> None:
shard_idxes = []
for option_idx in option_idxes:
for shard_idx in range(sharding_options[option_idx].num_shards):
shard_idxes.append((option_idx, shard_idx))
if shard_idxes:
device_level_partitions: List[List[Tuple[int, int]]] = greedy_partition(
# pyre-ignore [16]: `GreedyPerfPartitioner` has no attribute `_topology`.
num_partitions=self._topology.local_world_size,
sharding_options=sharding_options,
shard_idxes=shard_idxes,
partition_sums=[float(device.perf) for device in host_level_devices],
mem_cap=[device.storage for device in host_level_devices],
)
for device_idx, device_partition in enumerate(device_level_partitions):
for option_idx, shard_idx in device_partition:
partitions[
self._topology.local_world_size * host_idx + device_idx
].append((option_idx, shard_idx))
def _update_shards(
self,
partitions: List[List[Tuple[int, int]]],
sharding_options: List[ShardingOption],
) -> None:
"""
Updates the ranks of the shards as well as device perfs.
"""
for partition_idx, partition in enumerate(partitions):
for [option_idx, shard_idx] in partition:
sharding_options[option_idx].shards[shard_idx].rank = partition_idx
# pyre-ignore [16]: `GreedyPerfPartitioner` has no attribute `_topology`.
self._topology.devices[partition_idx].storage -= (
sharding_options[option_idx].shards[shard_idx].storage
)
self._topology.devices[partition_idx].perf += (
sharding_options[option_idx].shards[shard_idx].perf
)
def _base_partition_by(sharding_type: str) -> str:
if sharding_type == ShardingType.TABLE_ROW_WISE.value:
return PartitionByType.UNIFORM.value
elif sharding_type == ShardingType.TABLE_COLUMN_WISE.value:
return PartitionByType.DEVICE.value
else:
raise ValueError(
f"Sharding type provided must have a partition_by value of HOST: {sharding_type}"
)
| [
"torchrec.distributed.planner.types.PlannerError",
"torchrec.distributed.planner.types.Storage"
] | [((2463, 2484), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(0)', 'ddr': '(0)'}), '(hbm=0, ddr=0)\n', (2470, 2484), False, 'from torchrec.distributed.planner.types import Partitioner, Topology, ShardingOption, Storage, PartitionByType, PlannerError, DeviceHardware\n'), ((2964, 3033), 'typing.cast', 'cast', (['Storage', 'sharding_options[option_idx].shards[shard_idx].storage'], {}), '(Storage, sharding_options[option_idx].shards[shard_idx].storage)\n', (2968, 3033), False, 'from typing import List, Tuple, Optional, Dict, cast\n'), ((3071, 3135), 'typing.cast', 'cast', (['float', 'sharding_options[option_idx].shards[shard_idx].perf'], {}), '(float, sharding_options[option_idx].shards[shard_idx].perf)\n', (3075, 3135), False, 'from typing import List, Tuple, Optional, Dict, cast\n'), ((4829, 4850), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(0)', 'ddr': '(0)'}), '(hbm=0, ddr=0)\n', (4836, 4850), False, 'from torchrec.distributed.planner.types import Partitioner, Topology, ShardingOption, Storage, PartitionByType, PlannerError, DeviceHardware\n'), ((5285, 5354), 'typing.cast', 'cast', (['Storage', 'sharding_options[option_idx].shards[shard_idx].storage'], {}), '(Storage, sharding_options[option_idx].shards[shard_idx].storage)\n', (5289, 5354), False, 'from typing import List, Tuple, Optional, Dict, cast\n'), ((8830, 8863), 'copy.deepcopy', 'copy.deepcopy', (['storage_constraint'], {}), '(storage_constraint)\n', (8843, 8863), False, 'import copy\n'), ((8879, 8902), 'copy.deepcopy', 'copy.deepcopy', (['proposal'], {}), '(proposal)\n', (8892, 8902), False, 'import copy\n'), ((2030, 2090), 'typing.cast', 'cast', (['float', 'sharding_option.shards[order_shard_idx[1]].perf'], {}), '(float, sharding_option.shards[order_shard_idx[1]].perf)\n', (2034, 2090), False, 'from typing import List, Tuple, Optional, Dict, cast\n'), ((2104, 2169), 'typing.cast', 'cast', (['Storage', 'sharding_option.shards[order_shard_idx[1]].storage'], {}), '(Storage, sharding_option.shards[order_shard_idx[1]].storage)\n', (2108, 2169), False, 'from typing import List, Tuple, Optional, Dict, cast\n'), ((2561, 2596), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': 'MAX_SIZE', 'ddr': 'MAX_SIZE'}), '(hbm=MAX_SIZE, ddr=MAX_SIZE)\n', (2568, 2596), False, 'from torchrec.distributed.planner.types import Partitioner, Topology, ShardingOption, Storage, PartitionByType, PlannerError, DeviceHardware\n'), ((3805, 3956), 'torchrec.distributed.planner.types.PlannerError', 'PlannerError', (['f"""Table of size {storage_size} GB cannot be added to any rank. partition_size_sums: {partition_size_sums}. mem_cap: {mem_cap}."""'], {}), "(\n f'Table of size {storage_size} GB cannot be added to any rank. partition_size_sums: {partition_size_sums}. mem_cap: {mem_cap}.'\n )\n", (3817, 3956), False, 'from torchrec.distributed.planner.types import Partitioner, Topology, ShardingOption, Storage, PartitionByType, PlannerError, DeviceHardware\n'), ((5474, 5625), 'torchrec.distributed.planner.types.PlannerError', 'PlannerError', (['f"""Table of size {storage_size} GB cannot be added to any rank. partition_size_sums: {partition_size_sums}. mem_cap: {mem_cap}."""'], {}), "(\n f'Table of size {storage_size} GB cannot be added to any rank. partition_size_sums: {partition_size_sums}. mem_cap: {mem_cap}.'\n )\n", (5486, 5625), False, 'from torchrec.distributed.planner.types import Partitioner, Topology, ShardingOption, Storage, PartitionByType, PlannerError, DeviceHardware\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import List, cast
import torch
from torchrec.distributed.embeddingbag import (
EmbeddingBagCollectionSharder,
)
from torchrec.distributed.planner.enumerators import EmbeddingEnumerator
from torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer
from torchrec.distributed.planner.types import Topology, ShardingOption
from torchrec.distributed.tests.test_model import TestSparseNN
from torchrec.modules.embedding_configs import EmbeddingBagConfig
class TestProposers(unittest.TestCase):
def setUp(self) -> None:
topology = Topology(world_size=2, compute_device="cuda")
self.enumerator = EmbeddingEnumerator(topology=topology)
self.greedy_proposer = GreedyProposer()
self.uniform_proposer = UniformProposer()
def test_greedy_two_table_perf(self) -> None:
tables = [
EmbeddingBagConfig(
num_embeddings=100,
embedding_dim=10,
name="table_0",
feature_names=["feature_0"],
),
EmbeddingBagConfig(
num_embeddings=100,
embedding_dim=10,
name="table_1",
feature_names=["feature_1"],
),
]
model = TestSparseNN(tables=tables, sparse_device=torch.device("meta"))
search_space = self.enumerator.enumerate(
module=model, sharders=[EmbeddingBagCollectionSharder()]
)
self.greedy_proposer.load(search_space)
# simulate first five iterations:
output = []
for _ in range(5):
proposal = cast(List[ShardingOption], self.greedy_proposer.propose())
proposal.sort(
key=lambda sharding_option: (
max([shard.perf for shard in sharding_option.shards]),
sharding_option.name,
)
)
output.append(
[
(
candidate.name,
candidate.sharding_type,
candidate.compute_kernel,
)
for candidate in proposal
]
)
self.greedy_proposer.feedback(partitionable=True)
expected_output = [
[
(
"table_0",
"data_parallel",
"batched_dense",
),
(
"table_1",
"data_parallel",
"batched_dense",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"data_parallel",
"dense",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"row_wise",
"batched_fused",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"table_wise",
"batched_fused",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"row_wise",
"batched_dense",
),
],
]
self.assertEqual(expected_output, output)
def test_uniform_three_table_perf(self) -> None:
tables = [
EmbeddingBagConfig(
num_embeddings=100 * i,
embedding_dim=10 * i,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
)
for i in range(1, 4)
]
model = TestSparseNN(tables=tables, sparse_device=torch.device("meta"))
search_space = self.enumerator.enumerate(
module=model, sharders=[EmbeddingBagCollectionSharder()]
)
self.uniform_proposer.load(search_space)
output = []
proposal = self.uniform_proposer.propose()
while proposal:
proposal.sort(
key=lambda sharding_option: (
max([shard.perf for shard in sharding_option.shards]),
sharding_option.name,
)
)
output.append(
[
(
candidate.name,
candidate.sharding_type,
candidate.compute_kernel,
)
for candidate in proposal
]
)
self.uniform_proposer.feedback(partitionable=True)
proposal = self.uniform_proposer.propose()
expected_output = [
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_2",
"data_parallel",
"batched_dense",
),
(
"table_3",
"data_parallel",
"batched_dense",
),
],
[
(
"table_1",
"table_wise",
"batched_fused",
),
(
"table_2",
"table_wise",
"batched_fused",
),
(
"table_3",
"table_wise",
"batched_fused",
),
],
[
(
"table_1",
"row_wise",
"batched_fused",
),
(
"table_2",
"row_wise",
"batched_fused",
),
(
"table_3",
"row_wise",
"batched_fused",
),
],
[
(
"table_1",
"table_row_wise",
"batched_fused",
),
(
"table_2",
"table_row_wise",
"batched_fused",
),
(
"table_3",
"table_row_wise",
"batched_fused",
),
],
]
self.assertEqual(expected_output, output)
| [
"torchrec.modules.embedding_configs.EmbeddingBagConfig",
"torchrec.distributed.planner.proposers.GreedyProposer",
"torchrec.distributed.planner.types.Topology",
"torchrec.distributed.planner.proposers.UniformProposer",
"torchrec.distributed.planner.enumerators.EmbeddingEnumerator",
"torchrec.distributed.e... | [((824, 869), 'torchrec.distributed.planner.types.Topology', 'Topology', ([], {'world_size': '(2)', 'compute_device': '"""cuda"""'}), "(world_size=2, compute_device='cuda')\n", (832, 869), False, 'from torchrec.distributed.planner.types import Topology, ShardingOption\n'), ((896, 934), 'torchrec.distributed.planner.enumerators.EmbeddingEnumerator', 'EmbeddingEnumerator', ([], {'topology': 'topology'}), '(topology=topology)\n', (915, 934), False, 'from torchrec.distributed.planner.enumerators import EmbeddingEnumerator\n'), ((966, 982), 'torchrec.distributed.planner.proposers.GreedyProposer', 'GreedyProposer', ([], {}), '()\n', (980, 982), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer\n'), ((1015, 1032), 'torchrec.distributed.planner.proposers.UniformProposer', 'UniformProposer', ([], {}), '()\n', (1030, 1032), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer\n'), ((1115, 1220), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'num_embeddings': '(100)', 'embedding_dim': '(10)', 'name': '"""table_0"""', 'feature_names': "['feature_0']"}), "(num_embeddings=100, embedding_dim=10, name='table_0',\n feature_names=['feature_0'])\n", (1133, 1220), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((1309, 1414), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'num_embeddings': '(100)', 'embedding_dim': '(10)', 'name': '"""table_1"""', 'feature_names': "['feature_1']"}), "(num_embeddings=100, embedding_dim=10, name='table_1',\n feature_names=['feature_1'])\n", (1327, 1414), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((1560, 1580), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (1572, 1580), False, 'import torch\n'), ((4546, 4566), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (4558, 4566), False, 'import torch\n'), ((1668, 1699), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {}), '()\n', (1697, 1699), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder\n'), ((4654, 4685), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {}), '()\n', (4683, 4685), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
from functools import reduce
from typing import Tuple, Dict, Optional, List, cast, Union
import torch
import torch.distributed as dist
from torch import nn
from torchrec.distributed.collective_utils import (
invoke_on_rank_and_broadcast_result,
)
from torchrec.distributed.planner.constants import MAX_SIZE
from torchrec.distributed.planner.enumerators import EmbeddingEnumerator
from torchrec.distributed.planner.partitioners import GreedyPerfPartitioner
from torchrec.distributed.planner.perf_models import NoopPerfModel
from torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer
from torchrec.distributed.planner.stats import EmbeddingStats
from torchrec.distributed.planner.storage_reservations import (
HeuristicalStorageReservation,
)
from torchrec.distributed.planner.types import (
ParameterConstraints,
Partitioner,
Topology,
Stats,
Shard,
Storage,
ShardingOption,
StorageReservation,
Enumerator,
Proposer,
PerfModel,
PlannerError,
)
from torchrec.distributed.types import (
EnumerableShardingSpec,
ShardMetadata,
)
from torchrec.distributed.types import (
ShardingPlan,
ShardingPlanner,
ModuleSharder,
ShardingType,
ParameterSharding,
)
def _merge_shards_by_dim(shards: List[Shard], dim: int) -> List[Shard]:
# merges shards down to one per rank along dimension.
# Will recompute shard offsets
merged_shards = []
shards = sorted(shards, key=lambda x: x.rank)
current_rank = -1
current_shard: Optional[Shard] = None
current_dim_offset = 0
for shard in shards:
if shard.rank != current_rank:
current_shard = copy.deepcopy(shard)
current_shard.offset[dim] = current_dim_offset
merged_shards.append(current_shard)
current_rank = shard.rank
else:
# pyre-ignore [16]
current_shard.size[dim] += shard.size[dim]
# pyre-ignore [16]
current_shard.storage += shard.storage
# pyre-ignore [16]
current_shard.perf += shard.perf
current_dim_offset += shard.size[dim]
return merged_shards
def _to_sharding_plan(
sharding_options: List[ShardingOption],
topology: Topology,
) -> ShardingPlan:
def _placement(
compute_device: str,
rank: int,
local_size: int,
) -> str:
param_device = compute_device
if compute_device == "cuda":
param_device = torch.device("cuda", rank % local_size)
return f"rank:{rank}/{param_device}"
compute_device = topology.compute_device
local_size = topology.local_world_size
plan = {}
for sharding_option in sharding_options:
shards = sharding_option.shards
sharding_type = sharding_option.sharding_type
module_plan = plan.get(sharding_option.path, {})
module_plan[sharding_option.name] = ParameterSharding(
sharding_spec=None
if sharding_type == ShardingType.DATA_PARALLEL.value
else EnumerableShardingSpec(
[
ShardMetadata(
shard_sizes=shard.size,
shard_offsets=shard.offset,
placement=_placement(
compute_device, cast(int, shard.rank), local_size
),
)
for shard in shards
]
),
sharding_type=sharding_type,
compute_kernel=sharding_option.compute_kernel,
ranks=[cast(int, shard.rank) for shard in shards],
)
plan[sharding_option.path] = module_plan
return ShardingPlan(plan)
class EmbeddingShardingPlanner(ShardingPlanner):
def __init__(
self,
topology: Topology,
enumerator: Optional[Enumerator] = None,
storage_reservation: Optional[StorageReservation] = None,
proposer: Optional[Union[Proposer, List[Proposer]]] = None,
partitioner: Optional[Partitioner] = None,
performance_model: Optional[PerfModel] = None,
stats: Optional[Stats] = None,
constraints: Optional[Dict[str, ParameterConstraints]] = None,
debug: bool = False,
) -> None:
self._topology = topology
self._constraints = constraints
self._enumerator: Enumerator = (
enumerator
if enumerator
else EmbeddingEnumerator(
topology=topology,
constraints=constraints,
)
)
self._storage_reservation: StorageReservation = (
storage_reservation
if storage_reservation
else HeuristicalStorageReservation(percentage=0.15)
)
self._partitioner: Partitioner = (
partitioner if partitioner else GreedyPerfPartitioner()
)
if proposer:
self._proposers: List[Proposer] = (
[proposer] if not isinstance(proposer, list) else proposer
)
else:
self._proposers = [
GreedyProposer(),
GreedyProposer(use_depth=False),
UniformProposer(),
]
self._perf_model: PerfModel = (
performance_model if performance_model else NoopPerfModel(topology=topology)
)
self._stats: Stats = stats if stats else EmbeddingStats()
self._debug = debug
self._num_proposals: int = 0
self._num_plans: int = 0
def collective_plan(
self,
module: nn.Module,
sharders: List[ModuleSharder[nn.Module]],
# pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type.
pg: dist.ProcessGroup,
) -> ShardingPlan:
"""
Call self.plan(...) on rank 0 and broadcast
"""
return invoke_on_rank_and_broadcast_result(
pg,
0,
self.plan,
module,
sharders,
)
def plan(
self,
module: nn.Module,
sharders: List[ModuleSharder[nn.Module]],
) -> ShardingPlan:
best_plan = None
lowest_storage = Storage(MAX_SIZE, MAX_SIZE)
best_perf_rating = MAX_SIZE
storage_constraint: Topology = self._storage_reservation.reserve(
topology=self._topology,
module=module,
sharders=sharders,
constraints=self._constraints,
)
search_space = self._enumerator.enumerate(
module=module,
sharders=sharders,
)
if not search_space:
# No shardable parameters
return ShardingPlan({})
proposal_cache: Dict[
Tuple[int, ...],
Tuple[bool, Optional[List[ShardingOption]], Optional[float]],
] = {}
for proposer in self._proposers:
proposer.load(search_space=search_space)
for proposer in self._proposers:
proposal = proposer.propose()
while proposal:
proposal_key = tuple(sorted(map(hash, proposal)))
if proposal_key in proposal_cache:
partitionable, plan, perf_rating = proposal_cache[proposal_key]
proposer.feedback(
partitionable=partitionable,
plan=plan,
perf_rating=perf_rating,
)
proposal = proposer.propose()
continue
self._num_proposals += 1
try:
plan = self._partitioner.partition(
proposal=proposal,
storage_constraint=storage_constraint,
)
self._num_plans += 1
perf_rating = self._perf_model.rate(plan=plan)
if perf_rating < best_perf_rating:
best_perf_rating = perf_rating
best_plan = plan
proposal_cache[proposal_key] = (True, plan, perf_rating)
proposer.feedback(
partitionable=True, plan=plan, perf_rating=perf_rating
)
except PlannerError:
current_storage = cast(
Storage,
reduce(
lambda x, y: x + y,
[
shard.storage
for option in proposal
for shard in option.shards
],
),
)
if current_storage < lowest_storage:
lowest_storage = current_storage
proposal_cache[proposal_key] = (False, None, None)
proposer.feedback(partitionable=False)
proposal = proposer.propose()
if best_plan:
sharding_plan = _to_sharding_plan(best_plan, self._topology)
self._stats.log(
sharding_plan=sharding_plan,
topology=self._topology,
num_proposals=self._num_proposals,
num_plans=self._num_plans,
best_plan=best_plan,
constraints=self._constraints,
debug=self._debug,
)
return sharding_plan
else:
global_storage_capacity = reduce(
lambda x, y: x + y,
[device.storage for device in self._topology.devices],
)
global_storge_constraints = reduce(
lambda x, y: x + y,
[device.storage for device in storage_constraint.devices],
)
raise PlannerError(
f"Unable to find a plan for this model are evaluating {self._num_proposals} proposals."
"\nPossible solutions:"
f"\n 1) Increase the number of devices ({self._topology.world_size})"
f"\n 2) Reduce the model size ("
f"\n\t Global storage: {global_storage_capacity.hbm}, "
f"\n\t Available for model parallel: {global_storge_constraints},"
f"\n\t Requirement for model parallel: {lowest_storage})"
f"\n 3) Reduce local batch size ({self._topology.batch_size})"
"\n 4) Remove planner constraints that might be reducing search space or available storage\n"
)
| [
"torchrec.distributed.collective_utils.invoke_on_rank_and_broadcast_result",
"torchrec.distributed.planner.storage_reservations.HeuristicalStorageReservation",
"torchrec.distributed.planner.proposers.GreedyProposer",
"torchrec.distributed.planner.stats.EmbeddingStats",
"torchrec.distributed.planner.types.Pl... | [((3958, 3976), 'torchrec.distributed.types.ShardingPlan', 'ShardingPlan', (['plan'], {}), '(plan)\n', (3970, 3976), False, 'from torchrec.distributed.types import ShardingPlan, ShardingPlanner, ModuleSharder, ShardingType, ParameterSharding\n'), ((6132, 6203), 'torchrec.distributed.collective_utils.invoke_on_rank_and_broadcast_result', 'invoke_on_rank_and_broadcast_result', (['pg', '(0)', 'self.plan', 'module', 'sharders'], {}), '(pg, 0, self.plan, module, sharders)\n', (6167, 6203), False, 'from torchrec.distributed.collective_utils import invoke_on_rank_and_broadcast_result\n'), ((6455, 6482), 'torchrec.distributed.planner.types.Storage', 'Storage', (['MAX_SIZE', 'MAX_SIZE'], {}), '(MAX_SIZE, MAX_SIZE)\n', (6462, 6482), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Partitioner, Topology, Stats, Shard, Storage, ShardingOption, StorageReservation, Enumerator, Proposer, PerfModel, PlannerError\n'), ((1924, 1944), 'copy.deepcopy', 'copy.deepcopy', (['shard'], {}), '(shard)\n', (1937, 1944), False, 'import copy\n'), ((2740, 2779), 'torch.device', 'torch.device', (['"""cuda"""', '(rank % local_size)'], {}), "('cuda', rank % local_size)\n", (2752, 2779), False, 'import torch\n'), ((4712, 4775), 'torchrec.distributed.planner.enumerators.EmbeddingEnumerator', 'EmbeddingEnumerator', ([], {'topology': 'topology', 'constraints': 'constraints'}), '(topology=topology, constraints=constraints)\n', (4731, 4775), False, 'from torchrec.distributed.planner.enumerators import EmbeddingEnumerator\n'), ((4975, 5021), 'torchrec.distributed.planner.storage_reservations.HeuristicalStorageReservation', 'HeuristicalStorageReservation', ([], {'percentage': '(0.15)'}), '(percentage=0.15)\n', (5004, 5021), False, 'from torchrec.distributed.planner.storage_reservations import HeuristicalStorageReservation\n'), ((5119, 5142), 'torchrec.distributed.planner.partitioners.GreedyPerfPartitioner', 'GreedyPerfPartitioner', ([], {}), '()\n', (5140, 5142), False, 'from torchrec.distributed.planner.partitioners import GreedyPerfPartitioner\n'), ((5585, 5617), 'torchrec.distributed.planner.perf_models.NoopPerfModel', 'NoopPerfModel', ([], {'topology': 'topology'}), '(topology=topology)\n', (5598, 5617), False, 'from torchrec.distributed.planner.perf_models import NoopPerfModel\n'), ((5677, 5693), 'torchrec.distributed.planner.stats.EmbeddingStats', 'EmbeddingStats', ([], {}), '()\n', (5691, 5693), False, 'from torchrec.distributed.planner.stats import EmbeddingStats\n'), ((6948, 6964), 'torchrec.distributed.types.ShardingPlan', 'ShardingPlan', (['{}'], {}), '({})\n', (6960, 6964), False, 'from torchrec.distributed.types import ShardingPlan, ShardingPlanner, ModuleSharder, ShardingType, ParameterSharding\n'), ((9802, 9888), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', '[device.storage for device in self._topology.devices]'], {}), '(lambda x, y: x + y, [device.storage for device in self._topology.\n devices])\n', (9808, 9888), False, 'from functools import reduce\n'), ((9971, 10061), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', '[device.storage for device in storage_constraint.devices]'], {}), '(lambda x, y: x + y, [device.storage for device in storage_constraint\n .devices])\n', (9977, 10061), False, 'from functools import reduce\n'), ((10122, 10667), 'torchrec.distributed.planner.types.PlannerError', 'PlannerError', (['f"""Unable to find a plan for this model are evaluating {self._num_proposals} proposals.\nPossible solutions:\n 1) Increase the number of devices ({self._topology.world_size})\n 2) Reduce the model size (\n\t Global storage: {global_storage_capacity.hbm}, \n\t Available for model parallel: {global_storge_constraints},\n\t Requirement for model parallel: {lowest_storage})\n 3) Reduce local batch size ({self._topology.batch_size})\n 4) Remove planner constraints that might be reducing search space or available storage\n"""'], {}), '(\n f"""Unable to find a plan for this model are evaluating {self._num_proposals} proposals.\nPossible solutions:\n 1) Increase the number of devices ({self._topology.world_size})\n 2) Reduce the model size (\n\t Global storage: {global_storage_capacity.hbm}, \n\t Available for model parallel: {global_storge_constraints},\n\t Requirement for model parallel: {lowest_storage})\n 3) Reduce local batch size ({self._topology.batch_size})\n 4) Remove planner constraints that might be reducing search space or available storage\n"""\n )\n', (10134, 10667), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Partitioner, Topology, Stats, Shard, Storage, ShardingOption, StorageReservation, Enumerator, Proposer, PerfModel, PlannerError\n'), ((5373, 5389), 'torchrec.distributed.planner.proposers.GreedyProposer', 'GreedyProposer', ([], {}), '()\n', (5387, 5389), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer\n'), ((5407, 5438), 'torchrec.distributed.planner.proposers.GreedyProposer', 'GreedyProposer', ([], {'use_depth': '(False)'}), '(use_depth=False)\n', (5421, 5438), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer\n'), ((5456, 5473), 'torchrec.distributed.planner.proposers.UniformProposer', 'UniformProposer', ([], {}), '()\n', (5471, 5473), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer\n'), ((3844, 3865), 'typing.cast', 'cast', (['int', 'shard.rank'], {}), '(int, shard.rank)\n', (3848, 3865), False, 'from typing import Tuple, Dict, Optional, List, cast, Union\n'), ((8661, 8758), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', '[shard.storage for option in proposal for shard in option.shards]'], {}), '(lambda x, y: x + y, [shard.storage for option in proposal for shard in\n option.shards])\n', (8667, 8758), False, 'from functools import reduce\n'), ((3569, 3590), 'typing.cast', 'cast', (['int', 'shard.rank'], {}), '(int, shard.rank)\n', (3573, 3590), False, 'from typing import Tuple, Dict, Optional, List, cast, Union\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.data.datapipes as dp
from iopath.common.file_io import PathManager, PathManagerFactory
from pyre_extensions import none_throws
from torch.utils.data import IterableDataset, IterDataPipe
from torchrec.datasets.utils import (
Batch,
LoadFiles,
PATH_MANAGER_KEY,
ReadLinesFromCSV,
safe_cast,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
FREQUENCY_THRESHOLD = 3
INT_FEATURE_COUNT = 13
CAT_FEATURE_COUNT = 26
DAYS = 24
DEFAULT_LABEL_NAME = "label"
DEFAULT_INT_NAMES: List[str] = [f"int_{idx}" for idx in range(INT_FEATURE_COUNT)]
DEFAULT_CAT_NAMES: List[str] = [f"cat_{idx}" for idx in range(CAT_FEATURE_COUNT)]
DEFAULT_COLUMN_NAMES: List[str] = [
DEFAULT_LABEL_NAME,
*DEFAULT_INT_NAMES,
*DEFAULT_CAT_NAMES,
]
TOTAL_TRAINING_SAMPLES = 4195197692 # Number of rows across days 0-22 (day 23 is used for validation and testing)
COLUMN_TYPE_CASTERS: List[Callable[[Union[int, str]], Union[int, str]]] = [
lambda val: safe_cast(val, int, 0),
*(lambda val: safe_cast(val, int, 0) for _ in range(INT_FEATURE_COUNT)),
*(lambda val: safe_cast(val, str, "") for _ in range(CAT_FEATURE_COUNT)),
]
def _default_row_mapper(example: List[str]) -> Dict[str, Union[int, str]]:
column_names = reversed(DEFAULT_COLUMN_NAMES)
column_type_casters = reversed(COLUMN_TYPE_CASTERS)
return {
next(column_names): next(column_type_casters)(val) for val in reversed(example)
}
class CriteoIterDataPipe(IterDataPipe):
"""
IterDataPipe that can be used to stream either the Criteo 1TB Click Logs Dataset
(https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/) or the
Kaggle/Criteo Display Advertising Dataset
(https://www.kaggle.com/c/criteo-display-ad-challenge/) from the source TSV
files.
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example::
datapipe = CriteoIterDataPipe(
("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
)
datapipe = dp.iter.Batcher(datapipe, 100)
datapipe = dp.iter.Collator(datapipe)
batch = next(iter(datapipe))
"""
def __init__(
self,
paths: Iterable[str],
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> None:
self.paths = paths
self.row_mapper = row_mapper
self.open_kw: Any = open_kw # pyre-ignore[4]
# pyre-ignore[3]
def __iter__(self) -> Iterator[Any]:
worker_info = torch.utils.data.get_worker_info()
paths = self.paths
if worker_info is not None:
paths = (
path
for (idx, path) in enumerate(paths)
if idx % worker_info.num_workers == worker_info.id
)
datapipe = LoadFiles(paths, mode="r", **self.open_kw)
datapipe = ReadLinesFromCSV(datapipe, delimiter="\t")
if self.row_mapper:
datapipe = dp.iter.Mapper(datapipe, self.row_mapper)
yield from datapipe
def criteo_terabyte(
paths: Iterable[str],
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> IterDataPipe:
"""`Criteo 1TB Click Logs <https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/>`_ Dataset
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo 1TB
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example::
datapipe = criteo_terabyte(
("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
)
datapipe = dp.iter.Batcher(datapipe, 100)
datapipe = dp.iter.Collator(datapipe)
batch = next(iter(datapipe))
"""
return CriteoIterDataPipe(paths, row_mapper=row_mapper, **open_kw)
def criteo_kaggle(
path: str,
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> IterDataPipe:
"""`Kaggle/Criteo Display Advertising <https://www.kaggle.com/c/criteo-display-ad-challenge/>`_ Dataset
Args:
root (str): local path to train or test dataset file.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split TSV line.
open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open.
Example::
train_datapipe = criteo_kaggle(
"/home/datasets/criteo_kaggle/train.txt",
)
example = next(iter(train_datapipe))
test_datapipe = criteo_kaggle(
"/home/datasets/criteo_kaggle/test.txt",
)
example = next(iter(test_datapipe))
"""
return CriteoIterDataPipe((path,), row_mapper=row_mapper, **open_kw)
class BinaryCriteoUtils:
"""
Utility functions used to preprocess, save, load, partition, etc. the Criteo
dataset in a binary (numpy) format.
"""
@staticmethod
def tsv_to_npys(
in_file: str,
out_dense_file: str,
out_sparse_file: str,
out_labels_file: str,
path_manager_key: str = PATH_MANAGER_KEY,
) -> None:
"""
Convert one Criteo tsv file to three npy files: one for dense (np.float32), one
for sparse (np.int32), and one for labels (np.int32).
Args:
in_file (str): Input tsv file path.
out_dense_file (str): Output dense npy file path.
out_sparse_file (str): Output sparse npy file path.
out_labels_file (str): Output labels npy file path.
path_manager_key (str): Path manager key used to load from different
filesystems.
Returns:
None.
"""
def row_mapper(row: List[str]) -> Tuple[List[int], List[int], int]:
label = safe_cast(row[0], int, 0)
dense = [safe_cast(row[i], int, 0) for i in range(1, 1 + INT_FEATURE_COUNT)]
sparse = [
int(safe_cast(row[i], str, "0") or "0", 16)
for i in range(
1 + INT_FEATURE_COUNT, 1 + INT_FEATURE_COUNT + CAT_FEATURE_COUNT
)
]
return dense, sparse, label # pyre-ignore[7]
dense, sparse, labels = [], [], []
for (row_dense, row_sparse, row_label) in CriteoIterDataPipe(
[in_file], row_mapper=row_mapper
):
dense.append(row_dense)
sparse.append(row_sparse)
labels.append(row_label)
# PyTorch tensors can't handle uint32, but we can save space by not
# using int64. Numpy will automatically handle dense values >= 2 ** 31.
dense_np = np.array(dense, dtype=np.int32)
del dense
sparse_np = np.array(sparse, dtype=np.int32)
del sparse
labels_np = np.array(labels, dtype=np.int32)
del labels
# Log is expensive to compute at runtime.
dense_np += 3
dense_np = np.log(dense_np, dtype=np.float32)
# To be consistent with dense and sparse.
labels_np = labels_np.reshape((-1, 1))
path_manager = PathManagerFactory().get(path_manager_key)
for (fname, arr) in [
(out_dense_file, dense_np),
(out_sparse_file, sparse_np),
(out_labels_file, labels_np),
]:
with path_manager.open(fname, "wb") as fout:
np.save(fout, arr)
@staticmethod
def get_shape_from_npy(
path: str, path_manager_key: str = PATH_MANAGER_KEY
) -> Tuple[int, ...]:
"""
Returns the shape of an npy file using only its header.
Args:
path (str): Input npy file path.
path_manager_key (str): Path manager key used to load from different
filesystems.
Returns:
shape (Tuple[int, ...]): Shape tuple.
"""
path_manager = PathManagerFactory().get(path_manager_key)
with path_manager.open(path, "rb") as fin:
np.lib.format.read_magic(fin)
shape, _order, _dtype = np.lib.format.read_array_header_1_0(fin)
return shape
@staticmethod
def get_file_idx_to_row_range(
lengths: List[int],
rank: int,
world_size: int,
) -> Dict[int, Tuple[int, int]]:
"""
Given a rank, world_size, and the lengths (number of rows) for a list of files,
return which files and which portions of those files (represented as row ranges
- all range indices are inclusive) should be handled by the rank. Each rank
will be assigned the same number of rows.
The ranges are determined in such a way that each rank deals with large
continuous ranges of files. This enables each rank to reduce the amount of data
it needs to read while avoiding seeks.
Args:
lengths (List[int]): A list of row counts for each file.
rank (int): rank.
world_size (int): world size.
Returns:
output (Dict[int, Tuple[int, int]]): Mapping of which files to the range in
those files to be handled by the rank. The keys of this dict are indices
of lengths.
"""
# All ..._g variables are globals indices (meaning they range from 0 to
# total_length - 1). All ..._l variables are local indices (meaning they range
# from 0 to lengths[i] - 1 for the ith file).
total_length = sum(lengths)
rows_per_rank = total_length // world_size
# Global indices that rank is responsible for. All ranges (left, right) are
# inclusive.
rank_left_g = rank * rows_per_rank
rank_right_g = (rank + 1) * rows_per_rank - 1
output = {}
# Find where range (rank_left_g, rank_right_g) intersects each file's range.
file_left_g, file_right_g = -1, -1
for idx, length in enumerate(lengths):
file_left_g = file_right_g + 1
file_right_g = file_left_g + length - 1
# If the ranges overlap.
if rank_left_g <= file_right_g and rank_right_g >= file_left_g:
overlap_left_g, overlap_right_g = max(rank_left_g, file_left_g), min(
rank_right_g, file_right_g
)
# Convert overlap in global numbers to (local) numbers specific to the
# file.
overlap_left_l = overlap_left_g - file_left_g
overlap_right_l = overlap_right_g - file_left_g
output[idx] = (overlap_left_l, overlap_right_l)
return output
@staticmethod
def load_npy_range(
fname: str,
start_row: int,
num_rows: int,
path_manager_key: str = PATH_MANAGER_KEY,
mmap_mode: bool = False,
) -> np.ndarray:
"""
Load part of an npy file.
NOTE: Assumes npy represents a numpy array of ndim 2.
Args:
fname (str): path string to npy file.
start_row (int): starting row from the npy file.
num_rows (int): number of rows to get from the npy file.
path_manager_key (str): Path manager key used to load from different
filesystems.
Returns:
output (np.ndarray): numpy array with the desired range of data from the
supplied npy file.
"""
path_manager = PathManagerFactory().get(path_manager_key)
with path_manager.open(fname, "rb") as fin:
np.lib.format.read_magic(fin)
shape, _order, dtype = np.lib.format.read_array_header_1_0(fin)
if len(shape) == 2:
total_rows, row_size = shape
else:
raise ValueError("Cannot load range for npy with ndim == 2.")
if not (0 <= start_row < total_rows):
raise ValueError(
f"start_row ({start_row}) is out of bounds. It must be between 0 "
f"and {total_rows - 1}, inclusive."
)
if not (start_row + num_rows <= total_rows):
raise ValueError(
f"num_rows ({num_rows}) exceeds number of available rows "
f"({total_rows}) for the given start_row ({start_row})."
)
if mmap_mode:
data = np.load(fname, mmap_mode="r")
data = data[start_row : start_row + num_rows]
else:
offset = start_row * row_size * dtype.itemsize
fin.seek(offset, os.SEEK_CUR)
num_entries = num_rows * row_size
data = np.fromfile(fin, dtype=dtype, count=num_entries)
return data.reshape((num_rows, row_size))
@staticmethod
def sparse_to_contiguous(
in_files: List[str],
output_dir: str,
frequency_threshold: int = FREQUENCY_THRESHOLD,
columns: int = CAT_FEATURE_COUNT,
path_manager_key: str = PATH_MANAGER_KEY,
output_file_suffix: str = "_contig_freq.npy",
) -> None:
"""
Convert all sparse .npy files to have contiguous integers. Store in a separate
.npy file. All input files must be processed together because columns
can have matching IDs between files. Hence, they must be transformed
together. Also, the transformed IDs are not unique between columns. IDs
that appear less than frequency_threshold amount of times will be remapped
to have a value of 1.
Example transformation, frequenchy_threshold of 2:
day_0_sparse.npy
| col_0 | col_1 |
-----------------
| abc | xyz |
| iop | xyz |
day_1_sparse.npy
| col_0 | col_1 |
-----------------
| iop | tuv |
| lkj | xyz |
day_0_sparse_contig.npy
| col_0 | col_1 |
-----------------
| 1 | 2 |
| 2 | 2 |
day_1_sparse_contig.npy
| col_0 | col_1 |
-----------------
| 2 | 1 |
| 1 | 2 |
Args:
in_files List[str]: Input directory of npy files.
out_dir (str): Output directory of processed npy files.
frequency_threshold: IDs occuring less than this frequency will be remapped to a value of 1.
path_manager_key (str): Path manager key used to load from different filesystems.
Returns:
None.
"""
# Load each .npy file of sparse features. Transformations are made along the columns.
# Thereby, transpose the input to ease operations.
# E.g. file_to_features = {"day_0_sparse": [array([[3,6,7],[7,9,3]]}
file_to_features: Dict[str, np.ndarray] = {}
for f in in_files:
name = os.path.basename(f).split(".")[0]
file_to_features[name] = np.load(f).transpose()
print(f"Successfully loaded file: {f}")
# Iterate through each column in each file and map the sparse ids to contiguous ids.
for col in range(columns):
print(f"Processing column: {col}")
# Iterate through each row in each file for the current column and determine the
# frequency of each sparse id.
sparse_to_frequency: Dict[int, int] = {}
if frequency_threshold > 1:
for f in file_to_features:
for _, sparse in enumerate(file_to_features[f][col]):
if sparse in sparse_to_frequency:
sparse_to_frequency[sparse] += 1
else:
sparse_to_frequency[sparse] = 1
# Iterate through each row in each file for the current column and remap each
# sparse id to a contiguous id. The contiguous ints start at a value of 2 so that
# infrequenct IDs (determined by the frequency_threshold) can be remapped to 1.
running_sum = 2
sparse_to_contiguous_int: Dict[int, int] = {}
for f in file_to_features:
print(f"Processing file: {f}")
for i, sparse in enumerate(file_to_features[f][col]):
if sparse not in sparse_to_contiguous_int:
# If the ID appears less than frequency_threshold amount of times
# remap the value to 1.
if (
frequency_threshold > 1
and sparse_to_frequency[sparse] < frequency_threshold
):
sparse_to_contiguous_int[sparse] = 1
else:
sparse_to_contiguous_int[sparse] = running_sum
running_sum += 1
# Re-map sparse value to contiguous in place.
file_to_features[f][col][i] = sparse_to_contiguous_int[sparse]
path_manager = PathManagerFactory().get(path_manager_key)
for f, features in file_to_features.items():
output_file = os.path.join(output_dir, f + output_file_suffix)
with path_manager.open(output_file, "wb") as fout:
print(f"Writing file: {output_file}")
# Transpose back the features when saving, as they were transposed when loading.
np.save(fout, features.transpose())
@staticmethod
def shuffle(
input_dir_labels_and_dense: str,
input_dir_sparse: str,
output_dir_shuffled: str,
rows_per_day: Dict[int, int],
output_dir_full_set: Optional[str] = None,
days: int = DAYS,
int_columns: int = INT_FEATURE_COUNT,
sparse_columns: int = CAT_FEATURE_COUNT,
path_manager_key: str = PATH_MANAGER_KEY,
) -> None:
"""
Shuffle the dataset. Expects the files to be in .npy format and the data
to be split by day and by dense, sparse and label data.
Dense data must be in: day_x_dense.npy
Sparse data must be in: day_x_sparse.npy
Labels data must be in: day_x_labels.npy
The dataset will be reconstructed, shuffled and then split back into
separate dense, sparse and labels files.
Args:
input_dir_labels_and_dense (str): Input directory of labels and dense npy files.
input_dir_sparse (str): Input directory of sparse npy files.
output_dir_shuffled (str): Output directory for shuffled labels, dense and sparse npy files.
rows_per_day Dict[int, int]: Number of rows in each file.
output_dir_full_set (str): Output directory of the full dataset, if desired.
days (int): Number of day files.
int_columns (int): Number of columns with dense features.
columns (int): Total number of columns.
path_manager_key (str): Path manager key used to load from different filesystems.
"""
total_rows = sum(rows_per_day.values())
columns = int_columns + sparse_columns + 1 # add 1 for label column
full_dataset = np.zeros((total_rows, columns), dtype=np.float32)
curr_first_row = 0
curr_last_row = 0
for d in range(0, days):
curr_last_row += rows_per_day[d]
# dense
path_to_file = os.path.join(
input_dir_labels_and_dense, f"day_{d}_dense.npy"
)
data = np.load(path_to_file)
print(
f"Day {d} dense- {curr_first_row}-{curr_last_row} loaded files - {time.time()} - {path_to_file}"
)
full_dataset[curr_first_row:curr_last_row, 0:int_columns] = data
del data
# sparse
path_to_file = os.path.join(input_dir_sparse, f"day_{d}_sparse.npy")
data = np.load(path_to_file)
print(
f"Day {d} sparse- {curr_first_row}-{curr_last_row} loaded files - {time.time()} - {path_to_file}"
)
full_dataset[curr_first_row:curr_last_row, int_columns : columns - 1] = data
del data
# labels
path_to_file = os.path.join(
input_dir_labels_and_dense, f"day_{d}_labels.npy"
)
data = np.load(path_to_file)
print(
f"Day {d} labels- {curr_first_row}-{curr_last_row} loaded files - {time.time()} - {path_to_file}"
)
full_dataset[curr_first_row:curr_last_row, columns - 1 :] = data
del data
curr_first_row = curr_last_row
path_manager = PathManagerFactory().get(path_manager_key)
# Save the full dataset
if output_dir_full_set is not None:
full_output_file = os.path.join(output_dir_full_set, "full.npy")
with path_manager.open(full_output_file, "wb") as fout:
print(f"Writing full set file: {full_output_file}")
np.save(fout, full_dataset)
print("Shuffling dataset")
np.random.shuffle(full_dataset)
# Slice and save each portion into dense, sparse and labels
curr_first_row = 0
curr_last_row = 0
for d in range(0, days):
curr_last_row += rows_per_day[d]
# write dense columns
shuffled_dense_file = os.path.join(
output_dir_shuffled, f"day_{d}_dense.npy"
)
with path_manager.open(shuffled_dense_file, "wb") as fout:
print(
f"Writing rows {curr_first_row}-{curr_last_row-1} dense file: {shuffled_dense_file}"
)
np.save(fout, full_dataset[curr_first_row:curr_last_row, 0:int_columns])
# write sparse columns
shuffled_sparse_file = os.path.join(
output_dir_shuffled, f"day_{d}_sparse.npy"
)
with path_manager.open(shuffled_sparse_file, "wb") as fout:
print(
f"Writing rows {curr_first_row}-{curr_last_row-1} sparse file: {shuffled_sparse_file}"
)
np.save(
fout,
full_dataset[
curr_first_row:curr_last_row, int_columns : columns - 1
].astype(np.int32),
)
# write labels columns
shuffled_labels_file = os.path.join(
output_dir_shuffled, f"day_{d}_labels.npy"
)
with path_manager.open(shuffled_labels_file, "wb") as fout:
print(
f"Writing rows {curr_first_row}-{curr_last_row-1} labels file: {shuffled_labels_file}"
)
np.save(
fout,
full_dataset[curr_first_row:curr_last_row, columns - 1 :].astype(
np.int32
),
)
curr_first_row = curr_last_row
class InMemoryBinaryCriteoIterDataPipe(IterableDataset):
"""
Datapipe designed to operate over binary (npy) versions of Criteo datasets. Loads
the entire dataset into memory to prevent disk speed from affecting throughout. Each
rank reads only the data for the portion of the dataset it is responsible for.
The torchrec/datasets/scripts/preprocess_criteo.py script can be used to convert
the Criteo tsv files to the npy files expected by this dataset.
Args:
dense_paths (List[str]): List of path strings to dense npy files.
sparse_paths (List[str]): List of path strings to sparse npy files.
labels_paths (List[str]): List of path strings to labels npy files.
batch_size (int): batch size.
rank (int): rank.
world_size (int): world size.
shuffle_batches (bool): Whether to shuffle batches
hashes (Optional[int]): List of max categorical feature value for each feature.
Length of this list should be CAT_FEATURE_COUNT.
path_manager_key (str): Path manager key used to load from different
filesystems.
Example::
template = "/home/datasets/criteo/1tb_binary/day_{}_{}.npy"
datapipe = InMemoryBinaryCriteoIterDataPipe(
dense_paths=[template.format(0, "dense"), template.format(1, "dense")],
sparse_paths=[template.format(0, "sparse"), template.format(1, "sparse")],
labels_paths=[template.format(0, "labels"), template.format(1, "labels")],
batch_size=1024,
rank=torch.distributed.get_rank(),
world_size=torch.distributed.get_world_size(),
)
batch = next(iter(datapipe))
"""
def __init__(
self,
dense_paths: List[str],
sparse_paths: List[str],
labels_paths: List[str],
batch_size: int,
rank: int,
world_size: int,
shuffle_batches: bool = False,
mmap_mode: bool = False,
hashes: Optional[List[int]] = None,
path_manager_key: str = PATH_MANAGER_KEY,
) -> None:
self.dense_paths = dense_paths
self.sparse_paths = sparse_paths
self.labels_paths = labels_paths
self.batch_size = batch_size
self.rank = rank
self.world_size = world_size
self.shuffle_batches = shuffle_batches
self.mmap_mode = mmap_mode
self.hashes = hashes
self.path_manager_key = path_manager_key
self.path_manager: PathManager = PathManagerFactory().get(path_manager_key)
self._load_data_for_rank()
self.num_rows_per_file: List[int] = [a.shape[0] for a in self.dense_arrs]
self.num_batches: int = sum(self.num_rows_per_file) // batch_size
# These values are the same for the KeyedJaggedTensors in all batches, so they
# are computed once here. This avoids extra work from the KeyedJaggedTensor sync
# functions.
self._num_ids_in_batch: int = CAT_FEATURE_COUNT * batch_size
self.keys: List[str] = DEFAULT_CAT_NAMES
self.lengths: torch.Tensor = torch.ones(
(self._num_ids_in_batch,), dtype=torch.int32
)
self.offsets: torch.Tensor = torch.arange(
0, self._num_ids_in_batch + 1, dtype=torch.int32
)
self.stride = batch_size
self.length_per_key: List[int] = CAT_FEATURE_COUNT * [batch_size]
self.offset_per_key: List[int] = [
batch_size * i for i in range(CAT_FEATURE_COUNT + 1)
]
self.index_per_key: Dict[str, int] = {
key: i for (i, key) in enumerate(self.keys)
}
def _load_data_for_rank(self) -> None:
file_idx_to_row_range = BinaryCriteoUtils.get_file_idx_to_row_range(
lengths=[
BinaryCriteoUtils.get_shape_from_npy(
path, path_manager_key=self.path_manager_key
)[0]
for path in self.dense_paths
],
rank=self.rank,
world_size=self.world_size,
)
self.dense_arrs, self.sparse_arrs, self.labels_arrs = [], [], []
for arrs, paths in zip(
[self.dense_arrs, self.sparse_arrs, self.labels_arrs],
[self.dense_paths, self.sparse_paths, self.labels_paths],
):
for idx, (range_left, range_right) in file_idx_to_row_range.items():
arrs.append(
BinaryCriteoUtils.load_npy_range(
paths[idx],
range_left,
range_right - range_left + 1,
path_manager_key=self.path_manager_key,
mmap_mode=self.mmap_mode,
)
)
# When mmap_mode is enabled, the hash is applied in def __iter__, which is
# where samples are batched during training.
# Otherwise, the ML dataset is preloaded, and the hash is applied here in
# the preload stage, as shown:
if not self.mmap_mode and self.hashes is not None:
hashes_np = np.array(self.hashes).reshape((1, CAT_FEATURE_COUNT))
for sparse_arr in self.sparse_arrs:
sparse_arr %= hashes_np
def _np_arrays_to_batch(
self, dense: np.ndarray, sparse: np.ndarray, labels: np.ndarray
) -> Batch:
if self.shuffle_batches:
# Shuffle all 3 in unison
shuffler = np.random.permutation(len(dense))
dense = dense[shuffler]
sparse = sparse[shuffler]
labels = labels[shuffler]
return Batch(
dense_features=torch.from_numpy(dense),
sparse_features=KeyedJaggedTensor(
keys=self.keys,
# transpose + reshape(-1) incurs an additional copy.
values=torch.from_numpy(sparse.transpose(1, 0).reshape(-1)),
lengths=self.lengths,
offsets=self.offsets,
stride=self.stride,
length_per_key=self.length_per_key,
offset_per_key=self.offset_per_key,
index_per_key=self.index_per_key,
),
labels=torch.from_numpy(labels.reshape(-1)),
)
def __iter__(self) -> Iterator[Batch]:
# Invariant: buffer never contains more than batch_size rows.
buffer: Optional[List[np.ndarray]] = None
def append_to_buffer(
dense: np.ndarray, sparse: np.ndarray, labels: np.ndarray
) -> None:
nonlocal buffer
if buffer is None:
buffer = [dense, sparse, labels]
else:
for idx, arr in enumerate([dense, sparse, labels]):
buffer[idx] = np.concatenate((buffer[idx], arr))
# Maintain a buffer that can contain up to batch_size rows. Fill buffer as
# much as possible on each iteration. Only return a new batch when batch_size
# rows are filled.
file_idx = 0
row_idx = 0
batch_idx = 0
while batch_idx < self.num_batches:
buffer_row_count = 0 if buffer is None else none_throws(buffer)[0].shape[0]
if buffer_row_count == self.batch_size:
yield self._np_arrays_to_batch(*none_throws(buffer))
batch_idx += 1
buffer = None
else:
rows_to_get = min(
self.batch_size - buffer_row_count,
self.num_rows_per_file[file_idx] - row_idx,
)
slice_ = slice(row_idx, row_idx + rows_to_get)
dense_inputs = self.dense_arrs[file_idx][slice_, :]
sparse_inputs = self.sparse_arrs[file_idx][slice_, :]
target_labels = self.labels_arrs[file_idx][slice_, :]
if self.mmap_mode and self.hashes is not None:
sparse_inputs = sparse_inputs % np.array(self.hashes).reshape(
(1, CAT_FEATURE_COUNT)
)
append_to_buffer(
dense_inputs,
sparse_inputs,
target_labels,
)
row_idx += rows_to_get
if row_idx >= self.num_rows_per_file[file_idx]:
file_idx += 1
row_idx = 0
def __len__(self) -> int:
return self.num_batches
| [
"torchrec.datasets.utils.LoadFiles",
"torchrec.datasets.utils.safe_cast",
"torchrec.datasets.utils.ReadLinesFromCSV"
] | [((1358, 1380), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['val', 'int', '(0)'], {}), '(val, int, 0)\n', (1367, 1380), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((3250, 3284), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (3282, 3284), False, 'import torch\n'), ((3543, 3585), 'torchrec.datasets.utils.LoadFiles', 'LoadFiles', (['paths'], {'mode': '"""r"""'}), "(paths, mode='r', **self.open_kw)\n", (3552, 3585), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((3605, 3647), 'torchrec.datasets.utils.ReadLinesFromCSV', 'ReadLinesFromCSV', (['datapipe'], {'delimiter': '"""\t"""'}), "(datapipe, delimiter='\\t')\n", (3621, 3647), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((7668, 7699), 'numpy.array', 'np.array', (['dense'], {'dtype': 'np.int32'}), '(dense, dtype=np.int32)\n', (7676, 7699), True, 'import numpy as np\n'), ((7738, 7770), 'numpy.array', 'np.array', (['sparse'], {'dtype': 'np.int32'}), '(sparse, dtype=np.int32)\n', (7746, 7770), True, 'import numpy as np\n'), ((7810, 7842), 'numpy.array', 'np.array', (['labels'], {'dtype': 'np.int32'}), '(labels, dtype=np.int32)\n', (7818, 7842), True, 'import numpy as np\n'), ((7954, 7988), 'numpy.log', 'np.log', (['dense_np'], {'dtype': 'np.float32'}), '(dense_np, dtype=np.float32)\n', (7960, 7988), True, 'import numpy as np\n'), ((20171, 20220), 'numpy.zeros', 'np.zeros', (['(total_rows, columns)'], {'dtype': 'np.float32'}), '((total_rows, columns), dtype=np.float32)\n', (20179, 20220), True, 'import numpy as np\n'), ((22100, 22131), 'numpy.random.shuffle', 'np.random.shuffle', (['full_dataset'], {}), '(full_dataset)\n', (22117, 22131), True, 'import numpy as np\n'), ((27130, 27186), 'torch.ones', 'torch.ones', (['(self._num_ids_in_batch,)'], {'dtype': 'torch.int32'}), '((self._num_ids_in_batch,), dtype=torch.int32)\n', (27140, 27186), False, 'import torch\n'), ((27246, 27308), 'torch.arange', 'torch.arange', (['(0)', '(self._num_ids_in_batch + 1)'], {'dtype': 'torch.int32'}), '(0, self._num_ids_in_batch + 1, dtype=torch.int32)\n', (27258, 27308), False, 'import torch\n'), ((3699, 3740), 'torch.utils.data.datapipes.iter.Mapper', 'dp.iter.Mapper', (['datapipe', 'self.row_mapper'], {}), '(datapipe, self.row_mapper)\n', (3713, 3740), True, 'import torch.utils.data.datapipes as dp\n'), ((6806, 6831), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['row[0]', 'int', '(0)'], {}), '(row[0], int, 0)\n', (6815, 6831), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((8999, 9028), 'numpy.lib.format.read_magic', 'np.lib.format.read_magic', (['fin'], {}), '(fin)\n', (9023, 9028), True, 'import numpy as np\n'), ((9065, 9105), 'numpy.lib.format.read_array_header_1_0', 'np.lib.format.read_array_header_1_0', (['fin'], {}), '(fin)\n', (9100, 9105), True, 'import numpy as np\n'), ((12526, 12555), 'numpy.lib.format.read_magic', 'np.lib.format.read_magic', (['fin'], {}), '(fin)\n', (12550, 12555), True, 'import numpy as np\n'), ((12591, 12631), 'numpy.lib.format.read_array_header_1_0', 'np.lib.format.read_array_header_1_0', (['fin'], {}), '(fin)\n', (12626, 12631), True, 'import numpy as np\n'), ((18143, 18191), 'os.path.join', 'os.path.join', (['output_dir', '(f + output_file_suffix)'], {}), '(output_dir, f + output_file_suffix)\n', (18155, 18191), False, 'import os\n'), ((20400, 20462), 'os.path.join', 'os.path.join', (['input_dir_labels_and_dense', 'f"""day_{d}_dense.npy"""'], {}), "(input_dir_labels_and_dense, f'day_{d}_dense.npy')\n", (20412, 20462), False, 'import os\n'), ((20512, 20533), 'numpy.load', 'np.load', (['path_to_file'], {}), '(path_to_file)\n', (20519, 20533), True, 'import numpy as np\n'), ((20828, 20881), 'os.path.join', 'os.path.join', (['input_dir_sparse', 'f"""day_{d}_sparse.npy"""'], {}), "(input_dir_sparse, f'day_{d}_sparse.npy')\n", (20840, 20881), False, 'import os\n'), ((20901, 20922), 'numpy.load', 'np.load', (['path_to_file'], {}), '(path_to_file)\n', (20908, 20922), True, 'import numpy as np\n'), ((21230, 21293), 'os.path.join', 'os.path.join', (['input_dir_labels_and_dense', 'f"""day_{d}_labels.npy"""'], {}), "(input_dir_labels_and_dense, f'day_{d}_labels.npy')\n", (21242, 21293), False, 'import os\n'), ((21343, 21364), 'numpy.load', 'np.load', (['path_to_file'], {}), '(path_to_file)\n', (21350, 21364), True, 'import numpy as np\n'), ((21830, 21875), 'os.path.join', 'os.path.join', (['output_dir_full_set', '"""full.npy"""'], {}), "(output_dir_full_set, 'full.npy')\n", (21842, 21875), False, 'import os\n'), ((22401, 22456), 'os.path.join', 'os.path.join', (['output_dir_shuffled', 'f"""day_{d}_dense.npy"""'], {}), "(output_dir_shuffled, f'day_{d}_dense.npy')\n", (22413, 22456), False, 'import os\n'), ((22864, 22920), 'os.path.join', 'os.path.join', (['output_dir_shuffled', 'f"""day_{d}_sparse.npy"""'], {}), "(output_dir_shuffled, f'day_{d}_sparse.npy')\n", (22876, 22920), False, 'import os\n'), ((23465, 23521), 'os.path.join', 'os.path.join', (['output_dir_shuffled', 'f"""day_{d}_labels.npy"""'], {}), "(output_dir_shuffled, f'day_{d}_labels.npy')\n", (23477, 23521), False, 'import os\n'), ((1400, 1422), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['val', 'int', '(0)'], {}), '(val, int, 0)\n', (1409, 1422), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((1477, 1500), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['val', 'str', '""""""'], {}), "(val, str, '')\n", (1486, 1500), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((6853, 6878), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['row[i]', 'int', '(0)'], {}), '(row[i], int, 0)\n', (6862, 6878), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((8111, 8131), 'iopath.common.file_io.PathManagerFactory', 'PathManagerFactory', ([], {}), '()\n', (8129, 8131), False, 'from iopath.common.file_io import PathManager, PathManagerFactory\n'), ((8392, 8410), 'numpy.save', 'np.save', (['fout', 'arr'], {}), '(fout, arr)\n', (8399, 8410), True, 'import numpy as np\n'), ((8893, 8913), 'iopath.common.file_io.PathManagerFactory', 'PathManagerFactory', ([], {}), '()\n', (8911, 8913), False, 'from iopath.common.file_io import PathManager, PathManagerFactory\n'), ((12419, 12439), 'iopath.common.file_io.PathManagerFactory', 'PathManagerFactory', ([], {}), '()\n', (12437, 12439), False, 'from iopath.common.file_io import PathManager, PathManagerFactory\n'), ((13365, 13394), 'numpy.load', 'np.load', (['fname'], {'mmap_mode': '"""r"""'}), "(fname, mmap_mode='r')\n", (13372, 13394), True, 'import numpy as np\n'), ((13657, 13705), 'numpy.fromfile', 'np.fromfile', (['fin'], {'dtype': 'dtype', 'count': 'num_entries'}), '(fin, dtype=dtype, count=num_entries)\n', (13668, 13705), True, 'import numpy as np\n'), ((18021, 18041), 'iopath.common.file_io.PathManagerFactory', 'PathManagerFactory', ([], {}), '()\n', (18039, 18041), False, 'from iopath.common.file_io import PathManager, PathManagerFactory\n'), ((21679, 21699), 'iopath.common.file_io.PathManagerFactory', 'PathManagerFactory', ([], {}), '()\n', (21697, 21699), False, 'from iopath.common.file_io import PathManager, PathManagerFactory\n'), ((22028, 22055), 'numpy.save', 'np.save', (['fout', 'full_dataset'], {}), '(fout, full_dataset)\n', (22035, 22055), True, 'import numpy as np\n'), ((22720, 22792), 'numpy.save', 'np.save', (['fout', 'full_dataset[curr_first_row:curr_last_row, 0:int_columns]'], {}), '(fout, full_dataset[curr_first_row:curr_last_row, 0:int_columns])\n', (22727, 22792), True, 'import numpy as np\n'), ((26542, 26562), 'iopath.common.file_io.PathManagerFactory', 'PathManagerFactory', ([], {}), '()\n', (26560, 26562), False, 'from iopath.common.file_io import PathManager, PathManagerFactory\n'), ((29679, 29702), 'torch.from_numpy', 'torch.from_numpy', (['dense'], {}), '(dense)\n', (29695, 29702), False, 'import torch\n'), ((15913, 15923), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (15920, 15923), True, 'import numpy as np\n'), ((29129, 29150), 'numpy.array', 'np.array', (['self.hashes'], {}), '(self.hashes)\n', (29137, 29150), True, 'import numpy as np\n'), ((30789, 30823), 'numpy.concatenate', 'np.concatenate', (['(buffer[idx], arr)'], {}), '((buffer[idx], arr))\n', (30803, 30823), True, 'import numpy as np\n'), ((6964, 6991), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['row[i]', 'str', '"""0"""'], {}), "(row[i], str, '0')\n", (6973, 6991), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((15842, 15861), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (15858, 15861), False, 'import os\n'), ((20635, 20646), 'time.time', 'time.time', ([], {}), '()\n', (20644, 20646), False, 'import time\n'), ((21025, 21036), 'time.time', 'time.time', ([], {}), '()\n', (21034, 21036), False, 'import time\n'), ((21467, 21478), 'time.time', 'time.time', ([], {}), '()\n', (21476, 21478), False, 'import time\n'), ((31184, 31203), 'pyre_extensions.none_throws', 'none_throws', (['buffer'], {}), '(buffer)\n', (31195, 31203), False, 'from pyre_extensions import none_throws\n'), ((31316, 31335), 'pyre_extensions.none_throws', 'none_throws', (['buffer'], {}), '(buffer)\n', (31327, 31335), False, 'from pyre_extensions import none_throws\n'), ((31977, 31998), 'numpy.array', 'np.array', (['self.hashes'], {}), '(self.hashes)\n', (31985, 31998), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import cast, Dict, List, Optional, Tuple, Union
import torch
import torch.distributed as dist
import torch.nn as nn
from fbgemm_gpu.split_embedding_configs import EmbOptimType
from torchrec.distributed.embedding_types import EmbeddingTableConfig
from torchrec.distributed.model_parallel import DistributedModelParallel
from torchrec.distributed.planner import (
EmbeddingShardingPlanner,
ParameterConstraints,
Topology,
)
from torchrec.distributed.test_utils.multi_process import MultiProcessContext
from torchrec.distributed.test_utils.test_model import (
ModelInput,
TestEBCSharder,
TestEBSharder,
TestETCSharder,
TestETSharder,
TestSparseNNBase,
)
from torchrec.distributed.types import (
ModuleSharder,
ShardedTensor,
ShardingEnv,
ShardingPlan,
ShardingType,
)
from torchrec.modules.embedding_configs import BaseEmbeddingConfig
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizerWrapper
class SharderType(Enum):
EMBEDDING_BAG = "embedding_bag"
EMBEDDING_BAG_COLLECTION = "embedding_bag_collection"
EMBEDDING_TOWER = "embedding_tower"
EMBEDDING_TOWER_COLLECTION = "embedding_tower_collection"
def create_test_sharder(
sharder_type: str, sharding_type: str, kernel_type: str
) -> Union[TestEBSharder, TestEBCSharder, TestETSharder, TestETCSharder]:
if sharder_type == SharderType.EMBEDDING_BAG.value:
return TestEBSharder(sharding_type, kernel_type, {"learning_rate": 0.1})
elif sharder_type == SharderType.EMBEDDING_BAG_COLLECTION.value:
return TestEBCSharder(sharding_type, kernel_type, {"learning_rate": 0.1})
elif sharder_type == SharderType.EMBEDDING_TOWER.value:
return TestETSharder(sharding_type, kernel_type, {"learning_rate": 0.1})
elif sharder_type == SharderType.EMBEDDING_TOWER_COLLECTION.value:
return TestETCSharder(sharding_type, kernel_type, {"learning_rate": 0.1})
else:
raise ValueError(f"Sharder not supported {sharder_type}")
def generate_inputs(
world_size: int,
tables: List[EmbeddingTableConfig],
weighted_tables: Optional[List[EmbeddingTableConfig]] = None,
batch_size: int = 4,
num_float_features: int = 16,
) -> Tuple[ModelInput, List[ModelInput]]:
return ModelInput.generate(
batch_size=batch_size,
world_size=world_size,
num_float_features=num_float_features,
tables=tables,
weighted_tables=weighted_tables or [],
)
def gen_model_and_input(
model_class: TestSparseNNBase,
tables: List[EmbeddingTableConfig],
embedding_groups: Dict[str, List[str]],
world_size: int,
weighted_tables: Optional[List[EmbeddingTableConfig]] = None,
num_float_features: int = 16,
dense_device: Optional[torch.device] = None,
sparse_device: Optional[torch.device] = None,
) -> Tuple[nn.Module, List[Tuple[ModelInput, List[ModelInput]]]]:
torch.manual_seed(0)
model = model_class(
tables=cast(List[BaseEmbeddingConfig], tables),
num_float_features=num_float_features,
weighted_tables=cast(List[BaseEmbeddingConfig], weighted_tables),
embedding_groups=embedding_groups,
dense_device=dense_device,
sparse_device=sparse_device,
)
inputs = [
generate_inputs(
world_size=world_size,
tables=tables,
weighted_tables=weighted_tables,
num_float_features=num_float_features,
)
]
return (model, inputs)
def copy_state_dict(
loc: Dict[str, Union[torch.Tensor, ShardedTensor]],
glob: Dict[str, torch.Tensor],
) -> None:
for name, tensor in loc.items():
assert name in glob
global_tensor = glob[name]
if isinstance(global_tensor, ShardedTensor):
global_tensor = global_tensor.local_shards()[0].tensor
if isinstance(tensor, ShardedTensor):
for local_shard in tensor.local_shards():
assert global_tensor.ndim == local_shard.tensor.ndim
shard_meta = local_shard.metadata
t = global_tensor.detach()
if t.ndim == 1:
t = t[
shard_meta.shard_offsets[0] : shard_meta.shard_offsets[0]
+ local_shard.tensor.shape[0]
]
elif t.ndim == 2:
t = t[
shard_meta.shard_offsets[0] : shard_meta.shard_offsets[0]
+ local_shard.tensor.shape[0],
shard_meta.shard_offsets[1] : shard_meta.shard_offsets[1]
+ local_shard.tensor.shape[1],
]
else:
raise ValueError("Tensors with ndim > 2 are not supported")
local_shard.tensor.copy_(t)
else:
tensor.copy_(global_tensor)
def sharding_single_rank_test(
rank: int,
world_size: int,
model_class: TestSparseNNBase,
embedding_groups: Dict[str, List[str]],
tables: List[EmbeddingTableConfig],
sharders: List[ModuleSharder[nn.Module]],
backend: str,
optim: EmbOptimType,
weighted_tables: Optional[List[EmbeddingTableConfig]] = None,
constraints: Optional[Dict[str, ParameterConstraints]] = None,
local_size: Optional[int] = None,
) -> None:
with MultiProcessContext(rank, world_size, backend, local_size) as ctx:
# Generate model & inputs.
(global_model, inputs) = gen_model_and_input(
model_class=model_class,
tables=tables,
weighted_tables=weighted_tables,
embedding_groups=embedding_groups,
world_size=world_size,
num_float_features=16,
)
global_model = global_model.to(ctx.device)
global_input = inputs[0][0].to(ctx.device)
local_input = inputs[0][1][rank].to(ctx.device)
# Shard model.
local_model = model_class(
tables=cast(List[BaseEmbeddingConfig], tables),
weighted_tables=cast(List[BaseEmbeddingConfig], weighted_tables),
embedding_groups=embedding_groups,
dense_device=ctx.device,
sparse_device=torch.device("meta"),
num_float_features=16,
)
planner = EmbeddingShardingPlanner(
topology=Topology(
world_size, ctx.device.type, local_world_size=ctx.local_size
),
constraints=constraints,
)
plan: ShardingPlan = planner.collective_plan(local_model, sharders, ctx.pg)
"""
Simulating multiple nodes on a single node. However, metadata information and
tensor placement must still be consistent. Here we overwrite this to do so.
NOTE:
inter/intra process groups should still behave as expected.
TODO: may need to add some checks that only does this if we're running on a
single GPU (which should be most cases).
"""
for group in plan.plan:
for _, parameter_sharding in plan.plan[group].items():
if (
parameter_sharding.sharding_type
in {
ShardingType.TABLE_ROW_WISE.value,
ShardingType.TABLE_COLUMN_WISE.value,
}
and ctx.device.type != "cpu"
):
sharding_spec = parameter_sharding.sharding_spec
if sharding_spec is not None:
# pyre-ignore
for shard in sharding_spec.shards:
placement = shard.placement
rank: Optional[int] = placement.rank()
assert rank is not None
shard.placement = torch.distributed._remote_device(
f"rank:{rank}/cuda:{rank}"
)
local_model = DistributedModelParallel(
local_model,
env=ShardingEnv.from_process_group(ctx.pg),
plan=plan,
sharders=sharders,
device=ctx.device,
)
dense_optim = KeyedOptimizerWrapper(
dict(local_model.named_parameters()),
lambda params: torch.optim.SGD(params, lr=0.1),
)
local_opt = CombinedOptimizer([local_model.fused_optimizer, dense_optim])
# Load model state from the global model.
copy_state_dict(local_model.state_dict(), global_model.state_dict())
# Run a single training step of the sharded model.
local_pred = gen_full_pred_after_one_step(local_model, local_opt, local_input)
all_local_pred = []
for _ in range(world_size):
all_local_pred.append(torch.empty_like(local_pred))
dist.all_gather(all_local_pred, local_pred, group=ctx.pg)
# Run second training step of the unsharded model.
assert optim == EmbOptimType.EXACT_SGD
global_opt = torch.optim.SGD(global_model.parameters(), lr=0.1)
global_pred = gen_full_pred_after_one_step(
global_model, global_opt, global_input
)
# Compare predictions of sharded vs unsharded models.
torch.testing.assert_allclose(global_pred, torch.cat(all_local_pred))
def gen_full_pred_after_one_step(
model: nn.Module,
opt: torch.optim.Optimizer,
input: ModelInput,
) -> torch.Tensor:
# Run a single training step of the global model.
opt.zero_grad()
model.train(True)
loss, _ = model(input)
loss.backward()
# pyre-fixme[20]: Argument `closure` expected.
opt.step()
# Run a forward pass of the global model.
with torch.no_grad():
model.train(False)
full_pred = model(input)
return full_pred
| [
"torchrec.distributed.types.ShardingEnv.from_process_group",
"torchrec.distributed.test_utils.test_model.TestETSharder",
"torchrec.distributed.test_utils.test_model.TestEBCSharder",
"torchrec.distributed.planner.Topology",
"torchrec.distributed.test_utils.test_model.ModelInput.generate",
"torchrec.distrib... | [((2533, 2700), 'torchrec.distributed.test_utils.test_model.ModelInput.generate', 'ModelInput.generate', ([], {'batch_size': 'batch_size', 'world_size': 'world_size', 'num_float_features': 'num_float_features', 'tables': 'tables', 'weighted_tables': '(weighted_tables or [])'}), '(batch_size=batch_size, world_size=world_size,\n num_float_features=num_float_features, tables=tables, weighted_tables=\n weighted_tables or [])\n', (2552, 2700), False, 'from torchrec.distributed.test_utils.test_model import ModelInput, TestEBCSharder, TestEBSharder, TestETCSharder, TestETSharder, TestSparseNNBase\n'), ((3175, 3195), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (3192, 3195), False, 'import torch\n'), ((1684, 1749), 'torchrec.distributed.test_utils.test_model.TestEBSharder', 'TestEBSharder', (['sharding_type', 'kernel_type', "{'learning_rate': 0.1}"], {}), "(sharding_type, kernel_type, {'learning_rate': 0.1})\n", (1697, 1749), False, 'from torchrec.distributed.test_utils.test_model import ModelInput, TestEBCSharder, TestEBSharder, TestETCSharder, TestETSharder, TestSparseNNBase\n'), ((5611, 5669), 'torchrec.distributed.test_utils.multi_process.MultiProcessContext', 'MultiProcessContext', (['rank', 'world_size', 'backend', 'local_size'], {}), '(rank, world_size, backend, local_size)\n', (5630, 5669), False, 'from torchrec.distributed.test_utils.multi_process import MultiProcessContext\n'), ((8630, 8691), 'torchrec.optim.keyed.CombinedOptimizer', 'CombinedOptimizer', (['[local_model.fused_optimizer, dense_optim]'], {}), '([local_model.fused_optimizer, dense_optim])\n', (8647, 8691), False, 'from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizerWrapper\n'), ((9104, 9161), 'torch.distributed.all_gather', 'dist.all_gather', (['all_local_pred', 'local_pred'], {'group': 'ctx.pg'}), '(all_local_pred, local_pred, group=ctx.pg)\n', (9119, 9161), True, 'import torch.distributed as dist\n'), ((9992, 10007), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10005, 10007), False, 'import torch\n'), ((1834, 1900), 'torchrec.distributed.test_utils.test_model.TestEBCSharder', 'TestEBCSharder', (['sharding_type', 'kernel_type', "{'learning_rate': 0.1}"], {}), "(sharding_type, kernel_type, {'learning_rate': 0.1})\n", (1848, 1900), False, 'from torchrec.distributed.test_utils.test_model import ModelInput, TestEBCSharder, TestEBSharder, TestETCSharder, TestETSharder, TestSparseNNBase\n'), ((3237, 3276), 'typing.cast', 'cast', (['List[BaseEmbeddingConfig]', 'tables'], {}), '(List[BaseEmbeddingConfig], tables)\n', (3241, 3276), False, 'from typing import cast, Dict, List, Optional, Tuple, Union\n'), ((3349, 3397), 'typing.cast', 'cast', (['List[BaseEmbeddingConfig]', 'weighted_tables'], {}), '(List[BaseEmbeddingConfig], weighted_tables)\n', (3353, 3397), False, 'from typing import cast, Dict, List, Optional, Tuple, Union\n'), ((9568, 9593), 'torch.cat', 'torch.cat', (['all_local_pred'], {}), '(all_local_pred)\n', (9577, 9593), False, 'import torch\n'), ((1976, 2041), 'torchrec.distributed.test_utils.test_model.TestETSharder', 'TestETSharder', (['sharding_type', 'kernel_type', "{'learning_rate': 0.1}"], {}), "(sharding_type, kernel_type, {'learning_rate': 0.1})\n", (1989, 2041), False, 'from torchrec.distributed.test_utils.test_model import ModelInput, TestEBCSharder, TestEBSharder, TestETCSharder, TestETSharder, TestSparseNNBase\n'), ((6239, 6278), 'typing.cast', 'cast', (['List[BaseEmbeddingConfig]', 'tables'], {}), '(List[BaseEmbeddingConfig], tables)\n', (6243, 6278), False, 'from typing import cast, Dict, List, Optional, Tuple, Union\n'), ((6308, 6356), 'typing.cast', 'cast', (['List[BaseEmbeddingConfig]', 'weighted_tables'], {}), '(List[BaseEmbeddingConfig], weighted_tables)\n', (6312, 6356), False, 'from typing import cast, Dict, List, Optional, Tuple, Union\n'), ((6468, 6488), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (6480, 6488), False, 'import torch\n'), ((6601, 6671), 'torchrec.distributed.planner.Topology', 'Topology', (['world_size', 'ctx.device.type'], {'local_world_size': 'ctx.local_size'}), '(world_size, ctx.device.type, local_world_size=ctx.local_size)\n', (6609, 6671), False, 'from torchrec.distributed.planner import EmbeddingShardingPlanner, ParameterConstraints, Topology\n'), ((8309, 8347), 'torchrec.distributed.types.ShardingEnv.from_process_group', 'ShardingEnv.from_process_group', (['ctx.pg'], {}), '(ctx.pg)\n', (8339, 8347), False, 'from torchrec.distributed.types import ModuleSharder, ShardedTensor, ShardingEnv, ShardingPlan, ShardingType\n'), ((8567, 8598), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'lr': '(0.1)'}), '(params, lr=0.1)\n', (8582, 8598), False, 'import torch\n'), ((9066, 9094), 'torch.empty_like', 'torch.empty_like', (['local_pred'], {}), '(local_pred)\n', (9082, 9094), False, 'import torch\n'), ((2128, 2194), 'torchrec.distributed.test_utils.test_model.TestETCSharder', 'TestETCSharder', (['sharding_type', 'kernel_type', "{'learning_rate': 0.1}"], {}), "(sharding_type, kernel_type, {'learning_rate': 0.1})\n", (2142, 2194), False, 'from torchrec.distributed.test_utils.test_model import ModelInput, TestEBCSharder, TestEBSharder, TestETCSharder, TestETSharder, TestSparseNNBase\n'), ((8096, 8156), 'torch.distributed._remote_device', 'torch.distributed._remote_device', (['f"""rank:{rank}/cuda:{rank}"""'], {}), "(f'rank:{rank}/cuda:{rank}')\n", (8128, 8156), False, 'import torch\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from torch.testing import FileCheck # @manual
from torchrec.fx import symbolic_trace
from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.modules.embedding_modules import EmbeddingBagCollection
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
class SparseArchTest(unittest.TestCase):
def test_basic(self) -> None:
torch.manual_seed(0)
D = 3
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=10, feature_names=["f1", "f3"]
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=10,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_arch = SparseArch(ebc)
keys = ["f1", "f2", "f3", "f4", "f5"]
offsets = torch.tensor([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 19])
features = KeyedJaggedTensor.from_offsets_sync(
keys=keys,
values=torch.tensor(
[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]
),
offsets=offsets,
)
B = (len(offsets) - 1) // len(keys)
sparse_features = sparse_arch(features)
F = len(sparse_arch.sparse_feature_names)
self.assertEqual(sparse_features.shape, (B, F, D))
expected_values = torch.tensor(
[
[
[-0.7499, -1.2665, 1.0143],
[-0.7499, -1.2665, 1.0143],
[3.2276, 2.9643, -0.3816],
],
[
[0.0082, 0.6241, -0.1119],
[0.0082, 0.6241, -0.1119],
[2.0722, -2.2734, -1.6307],
],
]
)
self.assertTrue(
torch.allclose(
sparse_features,
expected_values,
rtol=1e-4,
atol=1e-4,
),
)
def test_fx_and_shape(self) -> None:
D = 3
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=10, feature_names=["f1", "f3"]
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=10,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_arch = SparseArch(ebc)
F = len(sparse_arch.sparse_feature_names)
gm = symbolic_trace(sparse_arch)
FileCheck().check("KeyedJaggedTensor").check("cat").run(gm.code)
keys = ["f1", "f2", "f3", "f4", "f5"]
offsets = torch.tensor([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 19])
features = KeyedJaggedTensor.from_offsets_sync(
keys=keys,
values=torch.tensor(
[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]
),
offsets=offsets,
)
B = (len(offsets) - 1) // len(keys)
sparse_features = gm(features)
self.assertEqual(sparse_features.shape, (B, F, D))
# TODO(T89043538): Auto-generate this test.
def test_fx_script(self) -> None:
D = 3
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=10, feature_names=["f1"]
)
eb2_config = EmbeddingBagConfig(
name="t2", embedding_dim=D, num_embeddings=10, feature_names=["f2"]
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_arch = SparseArch(ebc)
gm = symbolic_trace(sparse_arch)
torch.jit.script(gm)
class DenseArchTest(unittest.TestCase):
def test_basic(self) -> None:
torch.manual_seed(0)
B = 4
D = 3
in_features = 10
dense_arch = DenseArch(in_features=in_features, layer_sizes=[10, D])
dense_embedded = dense_arch(torch.rand((B, in_features)))
self.assertEqual(dense_embedded.size(), (B, D))
expected = torch.tensor(
[
[0.2351, 0.1578, 0.2784],
[0.1579, 0.1012, 0.2660],
[0.2459, 0.2379, 0.2749],
[0.2582, 0.2178, 0.2860],
]
)
self.assertTrue(
torch.allclose(
dense_embedded,
expected,
rtol=1e-4,
atol=1e-4,
)
)
def test_fx_and_shape(self) -> None:
B = 20
D = 3
in_features = 10
dense_arch = DenseArch(in_features=in_features, layer_sizes=[10, D])
gm = symbolic_trace(dense_arch)
dense_embedded = gm(torch.rand((B, in_features)))
self.assertEqual(dense_embedded.size(), (B, D))
# TODO(T89043538): Auto-generate this test.
def test_fx_script(self) -> None:
B = 20
D = 3
in_features = 10
dense_arch = DenseArch(in_features=in_features, layer_sizes=[10, D])
gm = symbolic_trace(dense_arch)
scripted_gm = torch.jit.script(gm)
dense_embedded = scripted_gm(torch.rand((B, in_features)))
self.assertEqual(dense_embedded.size(), (B, D))
class InteractionArchTest(unittest.TestCase):
def test_basic(self) -> None:
D = 3
B = 10
keys = ["f1", "f2"]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = inter_arch(dense_features, sparse_features)
# B X (D + F + F choose 2)
self.assertEqual(concat_dense.size(), (B, D + F + choose(F, 2)))
def test_larger(self) -> None:
D = 8
B = 20
keys = ["f1", "f2", "f3", "f4"]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = inter_arch(dense_features, sparse_features)
# B X (D + F + F choose 2)
self.assertEqual(concat_dense.size(), (B, D + F + choose(F, 2)))
def test_fx_and_shape(self) -> None:
D = 3
B = 10
keys = ["f1", "f2"]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
gm = symbolic_trace(inter_arch)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = gm(dense_features, sparse_features)
# B X (D + F + F choose 2)
self.assertEqual(concat_dense.size(), (B, D + F + choose(F, 2)))
# TODO(T89043538): Auto-generate this test.
def test_fx_script(self) -> None:
D = 3
B = 10
keys = ["f1", "f2"]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
gm = symbolic_trace(inter_arch)
scripted_gm = torch.jit.script(gm)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = scripted_gm(dense_features, sparse_features)
# B X (D + F + F choose 2)
self.assertEqual(concat_dense.size(), (B, D + F + choose(F, 2)))
def test_correctness(self) -> None:
D = 4
B = 3
keys = [
"f1",
"f2",
"f3",
"f4",
]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
torch.manual_seed(0)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = inter_arch(dense_features, sparse_features)
# B X (D + F + F choose 2)
self.assertEqual(concat_dense.size(), (B, D + F + choose(F, 2)))
expected = torch.tensor(
[
[
0.4963,
0.7682,
0.0885,
0.1320,
0.2353,
1.0123,
1.1919,
0.7220,
0.3444,
0.7397,
0.4015,
1.5184,
0.8986,
1.2018,
],
[
0.3074,
0.6341,
0.4901,
0.8964,
1.2787,
0.3275,
1.6734,
0.6325,
0.2089,
1.2982,
0.3977,
0.4200,
0.2475,
0.7834,
],
[
0.4556,
0.6323,
0.3489,
0.4017,
0.8195,
1.1181,
1.0511,
0.4919,
1.6147,
1.0786,
0.4264,
1.3576,
0.5860,
0.6559,
],
]
)
self.assertTrue(
torch.allclose(
concat_dense,
expected,
rtol=1e-4,
atol=1e-4,
)
)
def test_numerical_stability(self) -> None:
D = 3
B = 6
keys = ["f1", "f2"]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
torch.manual_seed(0)
dense_features = torch.randint(0, 10, (B, D))
sparse_features = torch.randint(0, 10, (B, F, D))
concat_dense = inter_arch(dense_features, sparse_features)
expected = torch.LongTensor(
[
[4, 9, 3, 61, 57, 63],
[0, 3, 9, 84, 27, 45],
[7, 3, 7, 34, 50, 25],
[3, 1, 6, 21, 50, 91],
[6, 9, 8, 125, 109, 74],
[6, 6, 8, 18, 80, 21],
]
)
self.assertTrue(torch.equal(concat_dense, expected))
class DLRMTest(unittest.TestCase):
def test_basic(self) -> None:
torch.manual_seed(0)
B = 2
D = 8
dense_in_features = 100
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=100, feature_names=["f1", "f3"]
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_nn = DLRM(
embedding_bag_collection=ebc,
dense_in_features=dense_in_features,
dense_arch_layer_sizes=[20, D],
over_arch_layer_sizes=[5, 1],
)
features = torch.rand((B, dense_in_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f1", "f3", "f2"],
values=torch.tensor([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]),
offsets=torch.tensor([0, 2, 4, 6, 8, 10, 11]),
)
logits = sparse_nn(
dense_features=features,
sparse_features=sparse_features,
)
self.assertEqual(logits.size(), (B, 1))
expected_logits = torch.tensor([[0.5805], [0.5909]])
self.assertTrue(
torch.allclose(
logits,
expected_logits,
rtol=1e-4,
atol=1e-4,
)
)
def test_one_sparse(self) -> None:
B = 2
D = 8
dense_in_features = 100
eb1_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config])
sparse_nn = DLRM(
embedding_bag_collection=ebc,
dense_in_features=dense_in_features,
dense_arch_layer_sizes=[20, D],
over_arch_layer_sizes=[5, 1],
)
features = torch.rand((B, dense_in_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f2"],
values=torch.tensor(range(3)),
offsets=torch.tensor([0, 2, 3]),
)
logits = sparse_nn(
dense_features=features,
sparse_features=sparse_features,
)
self.assertEqual(logits.size(), (B, 1))
def test_no_sparse(self) -> None:
ebc = EmbeddingBagCollection(tables=[])
D_unused = 1
with self.assertRaises(AssertionError):
DLRM(
embedding_bag_collection=ebc,
dense_in_features=100,
dense_arch_layer_sizes=[20, D_unused],
over_arch_layer_sizes=[5, 1],
)
def test_fx(self) -> None:
B = 2
D = 8
dense_in_features = 100
eb1_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config])
sparse_nn = DLRM(
embedding_bag_collection=ebc,
dense_in_features=dense_in_features,
dense_arch_layer_sizes=[20, D],
over_arch_layer_sizes=[5, 1],
)
gm = symbolic_trace(sparse_nn)
FileCheck().check("KeyedJaggedTensor").check("cat").check("f2").run(gm.code)
features = torch.rand((B, dense_in_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f2"],
values=torch.tensor(range(3)),
offsets=torch.tensor([0, 2, 3]),
)
logits = gm(
dense_features=features,
sparse_features=sparse_features,
)
self.assertEqual(logits.size(), (B, 1))
# TODO(T89043538): Auto-generate this test.
def test_fx_script(self) -> None:
B = 2
D = 8
dense_in_features = 100
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=100, feature_names=["f1", "f3"]
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_nn = DLRM(
embedding_bag_collection=ebc,
dense_in_features=dense_in_features,
dense_arch_layer_sizes=[20, D],
over_arch_layer_sizes=[5, 1],
)
features = torch.rand((B, dense_in_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f1", "f3", "f2"],
values=torch.tensor([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]),
offsets=torch.tensor([0, 2, 4, 6, 8, 10, 11]),
)
sparse_nn(
dense_features=features,
sparse_features=sparse_features,
)
gm = symbolic_trace(sparse_nn)
scripted_gm = torch.jit.script(gm)
logits = scripted_gm(features, sparse_features)
self.assertEqual(logits.size(), (B, 1))
| [
"torchrec.models.dlrm.SparseArch",
"torchrec.modules.embedding_configs.EmbeddingBagConfig",
"torchrec.models.dlrm.DenseArch",
"torchrec.models.dlrm.choose",
"torchrec.models.dlrm.InteractionArch",
"torchrec.models.dlrm.DLRM",
"torchrec.modules.embedding_modules.EmbeddingBagCollection",
"torchrec.fx.sy... | [((715, 735), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (732, 735), False, 'import torch\n'), ((772, 869), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t1"""', 'embedding_dim': 'D', 'num_embeddings': '(10)', 'feature_names': "['f1', 'f3']"}), "(name='t1', embedding_dim=D, num_embeddings=10,\n feature_names=['f1', 'f3'])\n", (790, 869), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((909, 1000), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(10)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=10,\n feature_names=['f2'])\n", (927, 1000), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((1071, 1126), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config, eb2_config]'}), '(tables=[eb1_config, eb2_config])\n', (1093, 1126), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((1149, 1164), 'torchrec.models.dlrm.SparseArch', 'SparseArch', (['ebc'], {}), '(ebc)\n', (1159, 1164), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((1230, 1283), 'torch.tensor', 'torch.tensor', (['[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 19]'], {}), '([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 19])\n', (1242, 1283), False, 'import torch\n'), ((1753, 1945), 'torch.tensor', 'torch.tensor', (['[[[-0.7499, -1.2665, 1.0143], [-0.7499, -1.2665, 1.0143], [3.2276, 2.9643, \n -0.3816]], [[0.0082, 0.6241, -0.1119], [0.0082, 0.6241, -0.1119], [\n 2.0722, -2.2734, -1.6307]]]'], {}), '([[[-0.7499, -1.2665, 1.0143], [-0.7499, -1.2665, 1.0143], [\n 3.2276, 2.9643, -0.3816]], [[0.0082, 0.6241, -0.1119], [0.0082, 0.6241,\n -0.1119], [2.0722, -2.2734, -1.6307]]])\n', (1765, 1945), False, 'import torch\n'), ((2440, 2537), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t1"""', 'embedding_dim': 'D', 'num_embeddings': '(10)', 'feature_names': "['f1', 'f3']"}), "(name='t1', embedding_dim=D, num_embeddings=10,\n feature_names=['f1', 'f3'])\n", (2458, 2537), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((2577, 2668), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(10)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=10,\n feature_names=['f2'])\n", (2595, 2668), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((2739, 2794), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config, eb2_config]'}), '(tables=[eb1_config, eb2_config])\n', (2761, 2794), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((2817, 2832), 'torchrec.models.dlrm.SparseArch', 'SparseArch', (['ebc'], {}), '(ebc)\n', (2827, 2832), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((2896, 2923), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['sparse_arch'], {}), '(sparse_arch)\n', (2910, 2923), False, 'from torchrec.fx import symbolic_trace\n'), ((3063, 3116), 'torch.tensor', 'torch.tensor', (['[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 19]'], {}), '([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 19])\n', (3075, 3116), False, 'import torch\n'), ((3622, 3713), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t1"""', 'embedding_dim': 'D', 'num_embeddings': '(10)', 'feature_names': "['f1']"}), "(name='t1', embedding_dim=D, num_embeddings=10,\n feature_names=['f1'])\n", (3640, 3713), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((3753, 3844), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(10)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=10,\n feature_names=['f2'])\n", (3771, 3844), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((3878, 3933), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config, eb2_config]'}), '(tables=[eb1_config, eb2_config])\n', (3900, 3933), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((3956, 3971), 'torchrec.models.dlrm.SparseArch', 'SparseArch', (['ebc'], {}), '(ebc)\n', (3966, 3971), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((3986, 4013), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['sparse_arch'], {}), '(sparse_arch)\n', (4000, 4013), False, 'from torchrec.fx import symbolic_trace\n'), ((4022, 4042), 'torch.jit.script', 'torch.jit.script', (['gm'], {}), '(gm)\n', (4038, 4042), False, 'import torch\n'), ((4127, 4147), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (4144, 4147), False, 'import torch\n'), ((4222, 4277), 'torchrec.models.dlrm.DenseArch', 'DenseArch', ([], {'in_features': 'in_features', 'layer_sizes': '[10, D]'}), '(in_features=in_features, layer_sizes=[10, D])\n', (4231, 4277), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((4420, 4541), 'torch.tensor', 'torch.tensor', (['[[0.2351, 0.1578, 0.2784], [0.1579, 0.1012, 0.266], [0.2459, 0.2379, 0.2749\n ], [0.2582, 0.2178, 0.286]]'], {}), '([[0.2351, 0.1578, 0.2784], [0.1579, 0.1012, 0.266], [0.2459, \n 0.2379, 0.2749], [0.2582, 0.2178, 0.286]])\n', (4432, 4541), False, 'import torch\n'), ((4946, 5001), 'torchrec.models.dlrm.DenseArch', 'DenseArch', ([], {'in_features': 'in_features', 'layer_sizes': '[10, D]'}), '(in_features=in_features, layer_sizes=[10, D])\n', (4955, 5001), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((5015, 5041), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['dense_arch'], {}), '(dense_arch)\n', (5029, 5041), False, 'from torchrec.fx import symbolic_trace\n'), ((5318, 5373), 'torchrec.models.dlrm.DenseArch', 'DenseArch', ([], {'in_features': 'in_features', 'layer_sizes': '[10, D]'}), '(in_features=in_features, layer_sizes=[10, D])\n', (5327, 5373), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((5387, 5413), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['dense_arch'], {}), '(dense_arch)\n', (5401, 5413), False, 'from torchrec.fx import symbolic_trace\n'), ((5436, 5456), 'torch.jit.script', 'torch.jit.script', (['gm'], {}), '(gm)\n', (5452, 5456), False, 'import torch\n'), ((5762, 5800), 'torchrec.models.dlrm.InteractionArch', 'InteractionArch', ([], {'num_sparse_features': 'F'}), '(num_sparse_features=F)\n', (5777, 5800), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((5827, 5845), 'torch.rand', 'torch.rand', (['(B, D)'], {}), '((B, D))\n', (5837, 5845), False, 'import torch\n'), ((5873, 5894), 'torch.rand', 'torch.rand', (['(B, F, D)'], {}), '((B, F, D))\n', (5883, 5894), False, 'import torch\n'), ((6219, 6257), 'torchrec.models.dlrm.InteractionArch', 'InteractionArch', ([], {'num_sparse_features': 'F'}), '(num_sparse_features=F)\n', (6234, 6257), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((6284, 6302), 'torch.rand', 'torch.rand', (['(B, D)'], {}), '((B, D))\n', (6294, 6302), False, 'import torch\n'), ((6329, 6350), 'torch.rand', 'torch.rand', (['(B, F, D)'], {}), '((B, F, D))\n', (6339, 6350), False, 'import torch\n'), ((6670, 6708), 'torchrec.models.dlrm.InteractionArch', 'InteractionArch', ([], {'num_sparse_features': 'F'}), '(num_sparse_features=F)\n', (6685, 6708), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((6723, 6749), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['inter_arch'], {}), '(inter_arch)\n', (6737, 6749), False, 'from torchrec.fx import symbolic_trace\n'), ((6776, 6794), 'torch.rand', 'torch.rand', (['(B, D)'], {}), '((B, D))\n', (6786, 6794), False, 'import torch\n'), ((6821, 6842), 'torch.rand', 'torch.rand', (['(B, F, D)'], {}), '((B, F, D))\n', (6831, 6842), False, 'import torch\n'), ((7199, 7237), 'torchrec.models.dlrm.InteractionArch', 'InteractionArch', ([], {'num_sparse_features': 'F'}), '(num_sparse_features=F)\n', (7214, 7237), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((7252, 7278), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['inter_arch'], {}), '(inter_arch)\n', (7266, 7278), False, 'from torchrec.fx import symbolic_trace\n'), ((7301, 7321), 'torch.jit.script', 'torch.jit.script', (['gm'], {}), '(gm)\n', (7317, 7321), False, 'import torch\n'), ((7348, 7366), 'torch.rand', 'torch.rand', (['(B, D)'], {}), '((B, D))\n', (7358, 7366), False, 'import torch\n'), ((7393, 7414), 'torch.rand', 'torch.rand', (['(B, F, D)'], {}), '((B, F, D))\n', (7403, 7414), False, 'import torch\n'), ((7804, 7842), 'torchrec.models.dlrm.InteractionArch', 'InteractionArch', ([], {'num_sparse_features': 'F'}), '(num_sparse_features=F)\n', (7819, 7842), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((7851, 7871), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (7868, 7871), False, 'import torch\n'), ((7898, 7916), 'torch.rand', 'torch.rand', (['(B, D)'], {}), '((B, D))\n', (7908, 7916), False, 'import torch\n'), ((7943, 7964), 'torch.rand', 'torch.rand', (['(B, F, D)'], {}), '((B, F, D))\n', (7953, 7964), False, 'import torch\n'), ((8162, 8530), 'torch.tensor', 'torch.tensor', (['[[0.4963, 0.7682, 0.0885, 0.132, 0.2353, 1.0123, 1.1919, 0.722, 0.3444, \n 0.7397, 0.4015, 1.5184, 0.8986, 1.2018], [0.3074, 0.6341, 0.4901, \n 0.8964, 1.2787, 0.3275, 1.6734, 0.6325, 0.2089, 1.2982, 0.3977, 0.42, \n 0.2475, 0.7834], [0.4556, 0.6323, 0.3489, 0.4017, 0.8195, 1.1181, \n 1.0511, 0.4919, 1.6147, 1.0786, 0.4264, 1.3576, 0.586, 0.6559]]'], {}), '([[0.4963, 0.7682, 0.0885, 0.132, 0.2353, 1.0123, 1.1919, 0.722,\n 0.3444, 0.7397, 0.4015, 1.5184, 0.8986, 1.2018], [0.3074, 0.6341, \n 0.4901, 0.8964, 1.2787, 0.3275, 1.6734, 0.6325, 0.2089, 1.2982, 0.3977,\n 0.42, 0.2475, 0.7834], [0.4556, 0.6323, 0.3489, 0.4017, 0.8195, 1.1181,\n 1.0511, 0.4919, 1.6147, 1.0786, 0.4264, 1.3576, 0.586, 0.6559]])\n', (8174, 8530), False, 'import torch\n'), ((9838, 9876), 'torchrec.models.dlrm.InteractionArch', 'InteractionArch', ([], {'num_sparse_features': 'F'}), '(num_sparse_features=F)\n', (9853, 9876), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((9885, 9905), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (9902, 9905), False, 'import torch\n'), ((9931, 9959), 'torch.randint', 'torch.randint', (['(0)', '(10)', '(B, D)'], {}), '(0, 10, (B, D))\n', (9944, 9959), False, 'import torch\n'), ((9987, 10018), 'torch.randint', 'torch.randint', (['(0)', '(10)', '(B, F, D)'], {}), '(0, 10, (B, F, D))\n', (10000, 10018), False, 'import torch\n'), ((10107, 10275), 'torch.LongTensor', 'torch.LongTensor', (['[[4, 9, 3, 61, 57, 63], [0, 3, 9, 84, 27, 45], [7, 3, 7, 34, 50, 25], [3, 1,\n 6, 21, 50, 91], [6, 9, 8, 125, 109, 74], [6, 6, 8, 18, 80, 21]]'], {}), '([[4, 9, 3, 61, 57, 63], [0, 3, 9, 84, 27, 45], [7, 3, 7, \n 34, 50, 25], [3, 1, 6, 21, 50, 91], [6, 9, 8, 125, 109, 74], [6, 6, 8, \n 18, 80, 21]])\n', (10123, 10275), False, 'import torch\n'), ((10540, 10560), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (10557, 10560), False, 'import torch\n'), ((10643, 10741), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t1"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f1', 'f3']"}), "(name='t1', embedding_dim=D, num_embeddings=100,\n feature_names=['f1', 'f3'])\n", (10661, 10741), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((10781, 10873), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=100,\n feature_names=['f2'])\n", (10799, 10873), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((10944, 10999), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config, eb2_config]'}), '(tables=[eb1_config, eb2_config])\n', (10966, 10999), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((11020, 11157), 'torchrec.models.dlrm.DLRM', 'DLRM', ([], {'embedding_bag_collection': 'ebc', 'dense_in_features': 'dense_in_features', 'dense_arch_layer_sizes': '[20, D]', 'over_arch_layer_sizes': '[5, 1]'}), '(embedding_bag_collection=ebc, dense_in_features=dense_in_features,\n dense_arch_layer_sizes=[20, D], over_arch_layer_sizes=[5, 1])\n', (11024, 11157), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((11233, 11267), 'torch.rand', 'torch.rand', (['(B, dense_in_features)'], {}), '((B, dense_in_features))\n', (11243, 11267), False, 'import torch\n'), ((11701, 11735), 'torch.tensor', 'torch.tensor', (['[[0.5805], [0.5909]]'], {}), '([[0.5805], [0.5909]])\n', (11713, 11735), False, 'import torch\n'), ((12046, 12138), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=100,\n feature_names=['f2'])\n", (12064, 12138), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((12209, 12252), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config]'}), '(tables=[eb1_config])\n', (12231, 12252), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((12273, 12410), 'torchrec.models.dlrm.DLRM', 'DLRM', ([], {'embedding_bag_collection': 'ebc', 'dense_in_features': 'dense_in_features', 'dense_arch_layer_sizes': '[20, D]', 'over_arch_layer_sizes': '[5, 1]'}), '(embedding_bag_collection=ebc, dense_in_features=dense_in_features,\n dense_arch_layer_sizes=[20, D], over_arch_layer_sizes=[5, 1])\n', (12277, 12410), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((12486, 12520), 'torch.rand', 'torch.rand', (['(B, dense_in_features)'], {}), '((B, dense_in_features))\n', (12496, 12520), False, 'import torch\n'), ((12929, 12962), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[]'}), '(tables=[])\n', (12951, 12962), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((13364, 13456), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=100,\n feature_names=['f2'])\n", (13382, 13456), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((13527, 13570), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config]'}), '(tables=[eb1_config])\n', (13549, 13570), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((13591, 13728), 'torchrec.models.dlrm.DLRM', 'DLRM', ([], {'embedding_bag_collection': 'ebc', 'dense_in_features': 'dense_in_features', 'dense_arch_layer_sizes': '[20, D]', 'over_arch_layer_sizes': '[5, 1]'}), '(embedding_bag_collection=ebc, dense_in_features=dense_in_features,\n dense_arch_layer_sizes=[20, D], over_arch_layer_sizes=[5, 1])\n', (13595, 13728), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((13797, 13822), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['sparse_nn'], {}), '(sparse_nn)\n', (13811, 13822), False, 'from torchrec.fx import symbolic_trace\n'), ((13928, 13962), 'torch.rand', 'torch.rand', (['(B, dense_in_features)'], {}), '((B, dense_in_features))\n', (13938, 13962), False, 'import torch\n'), ((14480, 14578), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t1"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f1', 'f3']"}), "(name='t1', embedding_dim=D, num_embeddings=100,\n feature_names=['f1', 'f3'])\n", (14498, 14578), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((14618, 14710), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=100,\n feature_names=['f2'])\n", (14636, 14710), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((14781, 14836), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config, eb2_config]'}), '(tables=[eb1_config, eb2_config])\n', (14803, 14836), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((14857, 14994), 'torchrec.models.dlrm.DLRM', 'DLRM', ([], {'embedding_bag_collection': 'ebc', 'dense_in_features': 'dense_in_features', 'dense_arch_layer_sizes': '[20, D]', 'over_arch_layer_sizes': '[5, 1]'}), '(embedding_bag_collection=ebc, dense_in_features=dense_in_features,\n dense_arch_layer_sizes=[20, D], over_arch_layer_sizes=[5, 1])\n', (14861, 14994), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((15070, 15104), 'torch.rand', 'torch.rand', (['(B, dense_in_features)'], {}), '((B, dense_in_features))\n', (15080, 15104), False, 'import torch\n'), ((15468, 15493), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['sparse_nn'], {}), '(sparse_nn)\n', (15482, 15493), False, 'from torchrec.fx import symbolic_trace\n'), ((15517, 15537), 'torch.jit.script', 'torch.jit.script', (['gm'], {}), '(gm)\n', (15533, 15537), False, 'import torch\n'), ((2202, 2276), 'torch.allclose', 'torch.allclose', (['sparse_features', 'expected_values'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(sparse_features, expected_values, rtol=0.0001, atol=0.0001)\n', (2216, 2276), False, 'import torch\n'), ((4314, 4342), 'torch.rand', 'torch.rand', (['(B, in_features)'], {}), '((B, in_features))\n', (4324, 4342), False, 'import torch\n'), ((4677, 4743), 'torch.allclose', 'torch.allclose', (['dense_embedded', 'expected'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(dense_embedded, expected, rtol=0.0001, atol=0.0001)\n', (4691, 4743), False, 'import torch\n'), ((5070, 5098), 'torch.rand', 'torch.rand', (['(B, in_features)'], {}), '((B, in_features))\n', (5080, 5098), False, 'import torch\n'), ((5494, 5522), 'torch.rand', 'torch.rand', (['(B, in_features)'], {}), '((B, in_features))\n', (5504, 5522), False, 'import torch\n'), ((9539, 9603), 'torch.allclose', 'torch.allclose', (['concat_dense', 'expected'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(concat_dense, expected, rtol=0.0001, atol=0.0001)\n', (9553, 9603), False, 'import torch\n'), ((10424, 10459), 'torch.equal', 'torch.equal', (['concat_dense', 'expected'], {}), '(concat_dense, expected)\n', (10435, 10459), False, 'import torch\n'), ((11773, 11838), 'torch.allclose', 'torch.allclose', (['logits', 'expected_logits'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(logits, expected_logits, rtol=0.0001, atol=0.0001)\n', (11787, 11838), False, 'import torch\n'), ((13044, 13174), 'torchrec.models.dlrm.DLRM', 'DLRM', ([], {'embedding_bag_collection': 'ebc', 'dense_in_features': '(100)', 'dense_arch_layer_sizes': '[20, D_unused]', 'over_arch_layer_sizes': '[5, 1]'}), '(embedding_bag_collection=ebc, dense_in_features=100,\n dense_arch_layer_sizes=[20, D_unused], over_arch_layer_sizes=[5, 1])\n', (13048, 13174), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((1382, 1453), 'torch.tensor', 'torch.tensor', (['[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]'], {}), '([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3])\n', (1394, 1453), False, 'import torch\n'), ((3215, 3286), 'torch.tensor', 'torch.tensor', (['[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]'], {}), '([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3])\n', (3227, 3286), False, 'import torch\n'), ((11387, 11434), 'torch.tensor', 'torch.tensor', (['[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]'], {}), '([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3])\n', (11399, 11434), False, 'import torch\n'), ((11456, 11493), 'torch.tensor', 'torch.tensor', (['[0, 2, 4, 6, 8, 10, 11]'], {}), '([0, 2, 4, 6, 8, 10, 11])\n', (11468, 11493), False, 'import torch\n'), ((12672, 12695), 'torch.tensor', 'torch.tensor', (['[0, 2, 3]'], {}), '([0, 2, 3])\n', (12684, 12695), False, 'import torch\n'), ((14114, 14137), 'torch.tensor', 'torch.tensor', (['[0, 2, 3]'], {}), '([0, 2, 3])\n', (14126, 14137), False, 'import torch\n'), ((15224, 15271), 'torch.tensor', 'torch.tensor', (['[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]'], {}), '([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3])\n', (15236, 15271), False, 'import torch\n'), ((15293, 15330), 'torch.tensor', 'torch.tensor', (['[0, 2, 4, 6, 8, 10, 11]'], {}), '([0, 2, 4, 6, 8, 10, 11])\n', (15305, 15330), False, 'import torch\n'), ((6056, 6068), 'torchrec.models.dlrm.choose', 'choose', (['F', '(2)'], {}), '(F, 2)\n', (6062, 6068), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((6513, 6525), 'torchrec.models.dlrm.choose', 'choose', (['F', '(2)'], {}), '(F, 2)\n', (6519, 6525), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((6997, 7009), 'torchrec.models.dlrm.choose', 'choose', (['F', '(2)'], {}), '(F, 2)\n', (7003, 7009), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((7578, 7590), 'torchrec.models.dlrm.choose', 'choose', (['F', '(2)'], {}), '(F, 2)\n', (7584, 7590), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((8127, 8139), 'torchrec.models.dlrm.choose', 'choose', (['F', '(2)'], {}), '(F, 2)\n', (8133, 8139), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((2933, 2944), 'torch.testing.FileCheck', 'FileCheck', ([], {}), '()\n', (2942, 2944), False, 'from torch.testing import FileCheck\n'), ((13831, 13842), 'torch.testing.FileCheck', 'FileCheck', ([], {}), '()\n', (13840, 13842), False, 'from torch.testing import FileCheck\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
import copy
import itertools
import logging
from collections import defaultdict, OrderedDict
from dataclasses import dataclass
from typing import List, Optional, Dict, Any, Union, Tuple, cast, Iterator
import torch
import torch.distributed as dist
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops import (
EmbeddingLocation,
ComputeDevice,
PoolingMode,
DenseTableBatchedEmbeddingBagsCodegen,
SplitTableBatchedEmbeddingBagsCodegen,
IntNBitTableBatchedEmbeddingBagsCodegen,
rounded_row_size_in_bytes,
)
from torch import nn
from torch.nn.modules.module import _IncompatibleKeys
from torchrec.distributed.embedding_types import (
ShardedEmbeddingTable,
GroupedEmbeddingConfig,
BaseEmbeddingLookup,
SparseFeatures,
EmbeddingComputeKernel,
BaseGroupedFeatureProcessor,
)
from torchrec.distributed.grouped_position_weighted import (
GroupedPositionWeightedModule,
)
from torchrec.distributed.types import (
Shard,
ShardedTensorMetadata,
ShardMetadata,
ShardedTensor,
TensorProperties,
)
from torchrec.distributed.utils import append_prefix
from torchrec.modules.embedding_configs import (
PoolingType,
DataType,
DATA_TYPE_NUM_BITS,
)
from torchrec.optim.fused import FusedOptimizerModule, FusedOptimizer
from torchrec.sparse.jagged_tensor import (
KeyedJaggedTensor,
KeyedTensor,
)
logger: logging.Logger = logging.getLogger(__name__)
def _load_state_dict(
# pyre-fixme[24]: Non-generic type `nn.modules.container.ModuleList` cannot take
# parameters.
emb_modules: "nn.ModuleList[nn.Module]",
state_dict: "OrderedDict[str, torch.Tensor]",
) -> Tuple[List[str], List[str]]:
missing_keys = []
unexpected_keys = list(state_dict.keys())
for emb_module in emb_modules:
for key, param in emb_module.state_dict().items():
if key in state_dict:
if isinstance(param, ShardedTensor):
assert len(param.local_shards()) == 1
dst_tensor = param.local_shards()[0].tensor
else:
dst_tensor = param
if isinstance(state_dict[key], ShardedTensor):
# pyre-fixme[16]
assert len(state_dict[key].local_shards()) == 1
src_tensor = state_dict[key].local_shards()[0].tensor
else:
src_tensor = state_dict[key]
dst_tensor.detach().copy_(src_tensor)
unexpected_keys.remove(key)
else:
missing_keys.append(cast(str, key))
return missing_keys, unexpected_keys
class EmbeddingFusedOptimizer(FusedOptimizer):
def __init__(
self,
config: GroupedEmbeddingConfig,
emb_module: SplitTableBatchedEmbeddingBagsCodegen,
# pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type.
pg: Optional[dist.ProcessGroup] = None,
) -> None:
self._emb_module: SplitTableBatchedEmbeddingBagsCodegen = emb_module
# pyre-fixme[4]: Attribute must be annotated.
self._pg = pg
@dataclass
class ShardParams:
optimizer_states: List[Optional[Tuple[torch.Tensor]]]
local_metadata: List[ShardMetadata]
embedding_weights: List[torch.Tensor]
def to_rowwise_sharded_metadata(
local_metadata: ShardMetadata,
global_metadata: ShardedTensorMetadata,
sharding_dim: int,
) -> Tuple[ShardMetadata, ShardedTensorMetadata]:
rw_shards: List[ShardMetadata] = []
rw_local_shard: ShardMetadata = local_metadata
shards_metadata = global_metadata.shards_metadata
# column-wise sharding
# sort the metadata based on column offset and
# we construct the momentum tensor in row-wise sharded way
if sharding_dim == 1:
shards_metadata = sorted(
shards_metadata, key=lambda shard: shard.shard_offsets[1]
)
for idx, shard in enumerate(shards_metadata):
offset = shard.shard_offsets[0]
# for column-wise sharding, we still create row-wise sharded metadata for optimizer
# manually create a row-wise offset
if sharding_dim == 1:
offset = idx * shard.shard_sizes[0]
rw_shard = ShardMetadata(
shard_sizes=[shard.shard_sizes[0]],
shard_offsets=[offset],
placement=shard.placement,
)
if local_metadata == shard:
rw_local_shard = rw_shard
rw_shards.append(rw_shard)
tensor_properties = TensorProperties(
dtype=global_metadata.tensor_properties.dtype,
layout=global_metadata.tensor_properties.layout,
requires_grad=global_metadata.tensor_properties.requires_grad,
memory_format=global_metadata.tensor_properties.memory_format,
pin_memory=global_metadata.tensor_properties.pin_memory,
)
len_rw_shards = len(shards_metadata) if sharding_dim == 1 else 1
rw_metadata = ShardedTensorMetadata(
shards_metadata=rw_shards,
size=torch.Size([global_metadata.size[0] * len_rw_shards]),
tensor_properties=tensor_properties,
)
return rw_local_shard, rw_metadata
# pyre-ignore [33]
state: Dict[Any, Any] = {}
param_group: Dict[str, Any] = {
"params": [],
"lr": emb_module.optimizer_args.learning_rate,
}
params: Dict[str, Union[torch.Tensor, ShardedTensor]] = {}
# Fused optimizers use buffers (they don't use autograd) and we want to make sure
# that state_dict look identical to no-fused version.
table_to_shard_params = {}
split_embedding_weights = emb_module.split_embedding_weights()
split_optimizer_states = emb_module.split_optimizer_states()
for table_config, optimizer_states, weight in itertools.zip_longest(
config.embedding_tables,
split_optimizer_states,
split_embedding_weights,
):
if table_config.name not in table_to_shard_params:
table_to_shard_params[table_config.name] = ShardParams(
optimizer_states=[], local_metadata=[], embedding_weights=[]
)
if optimizer_states:
for optimizer_state in optimizer_states:
assert table_config.local_rows == optimizer_state.size(0)
local_metadata = table_config.local_metadata
table_to_shard_params[table_config.name].optimizer_states.append(
optimizer_states
)
table_to_shard_params[table_config.name].local_metadata.append(
local_metadata
)
table_to_shard_params[table_config.name].embedding_weights.append(weight)
seen_tables = set()
for table_config in config.embedding_tables:
if table_config.name in seen_tables:
continue
seen_tables.add(table_config.name)
table_config_global_metadata: Optional[
ShardedTensorMetadata
] = copy.deepcopy(table_config.global_metadata)
shard_params: ShardParams = table_to_shard_params[table_config.name]
assert table_config_global_metadata is not None
local_weight_shards = []
for local_weight, local_metadata in zip(
shard_params.embedding_weights, shard_params.local_metadata
):
local_weight_shards.append(Shard(local_weight, local_metadata))
table_config_global_metadata.tensor_properties.dtype = (
local_weight.dtype
)
table_config_global_metadata.tensor_properties.requires_grad = (
local_weight.requires_grad
)
weight = ShardedTensor._init_from_local_shards_and_global_metadata(
local_shards=local_weight_shards,
sharded_tensor_metadata=table_config_global_metadata,
process_group=self._pg,
)
state[weight] = {}
param_group["params"].append(weight)
param_key = table_config.name + ".weight"
params[param_key] = weight
# Setting optimizer states
sharding_dim: int = (
1 if table_config.local_cols != table_config.embedding_dim else 0
)
if all(
[opt_state is not None for opt_state in shard_params.optimizer_states]
):
# pyre-ignore
def get_momentum(momentum_idx: int) -> ShardedTensor:
assert momentum_idx > 0
momentum_local_shards: List[Shard] = []
sharded_tensor_metadata = table_config.global_metadata
for (optimizer_state, shard_param_local_metadata) in zip(
shard_params.optimizer_states, shard_params.local_metadata
):
local_metadata = table_config.local_metadata
if optimizer_state[momentum_idx - 1].dim() == 1:
(
local_metadata,
sharded_tensor_metadata,
) = to_rowwise_sharded_metadata(
shard_param_local_metadata,
table_config.global_metadata,
sharding_dim,
)
assert local_metadata is not None
assert sharded_tensor_metadata is not None
momentum_local_shards.append(
Shard(optimizer_state[momentum_idx - 1], local_metadata)
)
return ShardedTensor._init_from_local_shards_and_global_metadata(
local_shards=momentum_local_shards,
sharded_tensor_metadata=sharded_tensor_metadata,
process_group=self._pg,
)
if all(
# pyre-ignore
[len(opt_state) >= 1 for opt_state in shard_params.optimizer_states]
):
state[weight][f"{table_config.name}.momentum1"] = get_momentum(1)
if all(
# pyre-ignore
[len(opt_state) >= 2 for opt_state in shard_params.optimizer_states]
):
state[weight][f"{table_config.name}.momentum2"] = get_momentum(2)
super().__init__(params, state, [param_group])
def zero_grad(self, set_to_none: bool = False) -> None:
# pyre-ignore [16]
self._emb_module.set_learning_rate(self.param_groups[0]["lr"])
# pyre-ignore [2]
def step(self, closure: Any = None) -> None:
# pyre-ignore [16]
self._emb_module.set_learning_rate(self.param_groups[0]["lr"])
class BaseEmbedding(abc.ABC, nn.Module):
"""
abstract base class for grouped nn.Embedding
"""
@abc.abstractmethod
def forward(
self,
features: KeyedJaggedTensor,
) -> torch.Tensor:
pass
"""
return sparse gradient parameter names
"""
def sparse_grad_parameter_names(
self, destination: Optional[List[str]] = None, prefix: str = ""
) -> List[str]:
destination = [] if destination is None else destination
return destination
def _get_state_dict(
embedding_tables: List[ShardedEmbeddingTable],
params: Union[
nn.ModuleList,
List[Union[nn.Module, torch.Tensor]],
List[torch.Tensor],
],
pg: Optional[dist.ProcessGroup] = None,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
) -> Dict[str, Any]:
if destination is None:
destination = OrderedDict()
# pyre-ignore [16]
destination._metadata = OrderedDict()
""" It is possible for there to be multiple shards from a table on a single rank. """
""" We accumulate them in key_to_local_shards. Repeat shards should have identical """
""" global ShardedTensorMetadata"""
key_to_local_shards: Dict[str, List[Shard]] = defaultdict(list)
key_to_global_metadata: Dict[str, ShardedTensorMetadata] = {}
def get_key_from_embedding_table(embedding_table: ShardedEmbeddingTable) -> str:
return prefix + f"{embedding_table.name}.weight"
for embedding_table, param in zip(embedding_tables, params):
key = get_key_from_embedding_table(embedding_table)
assert embedding_table.local_rows == param.size(0)
assert embedding_table.local_cols == param.size(1)
if embedding_table.global_metadata is not None:
# set additional field of sharded tensor based on local tensor properties
embedding_table.global_metadata.tensor_properties.dtype = param.dtype
embedding_table.global_metadata.tensor_properties.requires_grad = (
param.requires_grad
)
key_to_global_metadata[key] = embedding_table.global_metadata
key_to_local_shards[key].append(
Shard(param, embedding_table.local_metadata)
)
else:
destination[key] = param
# Populate the remaining destinations that have a global metadata
for key in key_to_local_shards:
global_metadata = key_to_global_metadata[key]
destination[key] = ShardedTensor._init_from_local_shards_and_global_metadata(
local_shards=key_to_local_shards[key],
sharded_tensor_metadata=global_metadata,
process_group=pg,
)
return destination
class GroupedEmbedding(BaseEmbedding):
def __init__(
self,
config: GroupedEmbeddingConfig,
sparse: bool,
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
) -> None:
super().__init__()
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self._config = config
# pyre-fixme[4]: Attribute must be annotated.
self._pg = pg
# pyre-fixme[24]: Non-generic type `nn.modules.container.ModuleList` cannot
# take parameters.
self._emb_modules: nn.ModuleList[nn.Module] = nn.ModuleList()
self._sparse = sparse
for embedding_config in self._config.embedding_tables:
self._emb_modules.append(
nn.Embedding(
num_embeddings=embedding_config.local_rows,
embedding_dim=embedding_config.local_cols,
device=device,
sparse=self._sparse,
_weight=torch.empty(
embedding_config.local_rows,
embedding_config.local_cols,
device=device,
).uniform_(
embedding_config.get_weight_init_min(),
embedding_config.get_weight_init_max(),
),
)
)
def forward(self, features: KeyedJaggedTensor) -> torch.Tensor:
indices_dict: Dict[str, torch.Tensor] = {}
indices_list = torch.split(features.values(), features.length_per_key())
for key, indices in zip(features.keys(), indices_list):
indices_dict[key] = indices
unpooled_embeddings: List[torch.Tensor] = []
for embedding_config, emb_module in zip(
self._config.embedding_tables, self._emb_modules
):
for feature_name in embedding_config.feature_names:
unpooled_embeddings.append(emb_module(input=indices_dict[feature_name]))
return torch.cat(unpooled_embeddings, dim=0)
def state_dict(
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
params = [
emb_module.weight if keep_vars else emb_module.weight.data
for emb_module in self._emb_modules
]
return _get_state_dict(
self._config.embedding_tables, params, self._pg, destination, prefix
)
def named_parameters(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, nn.Parameter]]:
for config, emb_module in zip(
self._config.embedding_tables,
self._emb_modules,
):
param = emb_module.weight
assert config.local_rows == param.size(0)
assert config.local_cols == param.size(1)
yield append_prefix(prefix, f"{config.name}.weight"), param
def sparse_grad_parameter_names(
self, destination: Optional[List[str]] = None, prefix: str = ""
) -> List[str]:
destination = [] if destination is None else destination
if self._sparse:
for config in self._config.embedding_tables:
destination.append(append_prefix(prefix, f"{config.name}.weight"))
return destination
def config(self) -> GroupedEmbeddingConfig:
return self._config
class BaseBatchedEmbedding(BaseEmbedding):
def __init__(
self,
config: GroupedEmbeddingConfig,
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
) -> None:
super().__init__()
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self._config = config
# pyre-fixme[4]: Attribute must be annotated.
self._pg = pg
self._local_rows: List[int] = []
self._weight_init_mins: List[float] = []
self._weight_init_maxs: List[float] = []
self._num_embeddings: List[int] = []
self._local_cols: List[int] = []
self._feature_table_map: List[int] = []
for idx, config in enumerate(self._config.embedding_tables):
self._local_rows.append(config.local_rows)
self._weight_init_mins.append(config.get_weight_init_min())
self._weight_init_maxs.append(config.get_weight_init_max())
self._num_embeddings.append(config.num_embeddings)
self._local_cols.append(config.local_cols)
self._feature_table_map.extend([idx] * config.num_features())
def init_parameters(self) -> None:
# initialize embedding weights
assert len(self._num_embeddings) == len(self.split_embedding_weights())
for (rows, emb_dim, weight_init_min, weight_init_max, param) in zip(
self._local_rows,
self._local_cols,
self._weight_init_mins,
self._weight_init_maxs,
self.split_embedding_weights(),
):
assert param.shape == (rows, emb_dim)
param.data.uniform_(
weight_init_min,
weight_init_max,
)
def forward(self, features: KeyedJaggedTensor) -> torch.Tensor:
return self.emb_module(
indices=features.values().long(),
offsets=features.offsets().long(),
)
def state_dict(
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
self.flush()
return _get_state_dict(
self._config.embedding_tables,
self.split_embedding_weights(),
self._pg,
destination,
prefix,
)
def split_embedding_weights(self) -> List[torch.Tensor]:
return self.emb_module.split_embedding_weights()
@property
@abc.abstractmethod
def emb_module(
self,
) -> Union[
DenseTableBatchedEmbeddingBagsCodegen,
SplitTableBatchedEmbeddingBagsCodegen,
IntNBitTableBatchedEmbeddingBagsCodegen,
]:
...
def config(self) -> GroupedEmbeddingConfig:
return self._config
def flush(self) -> None:
pass
class BatchedFusedEmbedding(BaseBatchedEmbedding, FusedOptimizerModule):
def __init__(
self,
config: GroupedEmbeddingConfig,
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(config, pg, device)
def to_embedding_location(
compute_kernel: EmbeddingComputeKernel,
) -> EmbeddingLocation:
if compute_kernel == EmbeddingComputeKernel.BATCHED_FUSED:
return EmbeddingLocation.DEVICE
elif compute_kernel == EmbeddingComputeKernel.BATCHED_FUSED_UVM:
return EmbeddingLocation.MANAGED
elif compute_kernel == EmbeddingComputeKernel.BATCHED_FUSED_UVM_CACHING:
return EmbeddingLocation.MANAGED_CACHING
else:
raise ValueError(f"Invalid EmbeddingComputeKernel {compute_kernel}")
managed: List[EmbeddingLocation] = []
compute_devices: List[ComputeDevice] = []
for table in config.embedding_tables:
if device is not None and device.type == "cuda":
compute_devices.append(ComputeDevice.CUDA)
managed.append(to_embedding_location(table.compute_kernel))
else:
compute_devices.append(ComputeDevice.CPU)
managed.append(EmbeddingLocation.HOST)
if fused_params is None:
fused_params = {}
self._emb_module: SplitTableBatchedEmbeddingBagsCodegen = (
SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=list(
zip(self._local_rows, self._local_cols, managed, compute_devices)
),
feature_table_map=self._feature_table_map,
pooling_mode=PoolingMode.NONE,
weights_precision=BatchedFusedEmbeddingBag.to_sparse_type(
config.data_type
),
device=device,
**fused_params,
)
)
self._optim: EmbeddingFusedOptimizer = EmbeddingFusedOptimizer(
config,
self._emb_module,
pg,
)
self.init_parameters()
@staticmethod
def to_sparse_type(data_type: DataType) -> SparseType:
if data_type == DataType.FP32:
return SparseType.FP32
elif data_type == DataType.FP16:
return SparseType.FP16
elif data_type == DataType.INT8:
return SparseType.INT8
else:
raise ValueError(f"Invalid DataType {data_type}")
@property
def emb_module(
self,
) -> SplitTableBatchedEmbeddingBagsCodegen:
return self._emb_module
@property
def fused_optimizer(self) -> FusedOptimizer:
return self._optim
def named_parameters(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, nn.Parameter]]:
yield from ()
def named_buffers(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
for config, param in zip(
self._config.embedding_tables,
self.emb_module.split_embedding_weights(),
):
key = append_prefix(prefix, f"{config.name}.weight")
yield key, param
def flush(self) -> None:
self._emb_module.flush()
class BatchedDenseEmbedding(BaseBatchedEmbedding):
def __init__(
self,
config: GroupedEmbeddingConfig,
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
) -> None:
super().__init__(config, pg, device)
self._emb_module: DenseTableBatchedEmbeddingBagsCodegen = (
DenseTableBatchedEmbeddingBagsCodegen(
list(zip(self._local_rows, self._local_cols)),
feature_table_map=self._feature_table_map,
pooling_mode=PoolingMode.NONE,
use_cpu=device is None
or device.type == "cpu"
or not torch.cuda.is_available(),
)
)
self.init_parameters()
@property
def emb_module(
self,
) -> DenseTableBatchedEmbeddingBagsCodegen:
return self._emb_module
def named_parameters(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, nn.Parameter]]:
combined_key = "/".join(
[config.name for config in self._config.embedding_tables]
)
yield append_prefix(prefix, f"{combined_key}.weight"), cast(
nn.Parameter, self._emb_module.weights
)
class GroupedEmbeddingsLookup(BaseEmbeddingLookup):
def __init__(
self,
grouped_configs: List[GroupedEmbeddingConfig],
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
) -> None:
def _create_lookup(
config: GroupedEmbeddingConfig,
) -> BaseEmbedding:
if config.compute_kernel == EmbeddingComputeKernel.BATCHED_DENSE:
return BatchedDenseEmbedding(
config=config,
pg=pg,
device=device,
)
elif config.compute_kernel == EmbeddingComputeKernel.BATCHED_FUSED:
return BatchedFusedEmbedding(
config=config,
pg=pg,
device=device,
fused_params=fused_params,
)
elif config.compute_kernel == EmbeddingComputeKernel.DENSE:
return GroupedEmbedding(
config=config,
sparse=False,
pg=pg,
device=device,
)
elif config.compute_kernel == EmbeddingComputeKernel.SPARSE:
return GroupedEmbedding(
config=config,
sparse=True,
pg=pg,
device=device,
)
else:
raise ValueError(
f"Compute kernel not supported {config.compute_kernel}"
)
super().__init__()
# pyre-fixme[24]: Non-generic type `nn.modules.container.ModuleList` cannot
# take parameters.
self._emb_modules: nn.ModuleList[BaseEmbedding] = nn.ModuleList()
for config in grouped_configs:
self._emb_modules.append(_create_lookup(config))
self._id_list_feature_splits: List[int] = []
for config in grouped_configs:
self._id_list_feature_splits.append(config.num_features())
# return a dummy empty tensor when grouped_configs is empty
self.register_buffer(
"_dummy_embs_tensor",
torch.empty(
[0],
dtype=torch.float32,
device=device,
requires_grad=True,
),
)
self.grouped_configs = grouped_configs
def forward(
self,
sparse_features: SparseFeatures,
) -> torch.Tensor:
assert sparse_features.id_list_features is not None
embeddings: List[torch.Tensor] = []
id_list_features_by_group = sparse_features.id_list_features.split(
self._id_list_feature_splits,
)
for emb_op, features in zip(self._emb_modules, id_list_features_by_group):
embeddings.append(emb_op(features).view(-1))
if len(embeddings) == 0:
# a hack for empty ranks
return self._dummy_embs_tensor
elif len(embeddings) == 1:
return embeddings[0]
else:
return torch.cat(embeddings)
def state_dict(
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
if destination is None:
destination = OrderedDict()
# pyre-ignore [16]
destination._metadata = OrderedDict()
for emb_module in self._emb_modules:
emb_module.state_dict(destination, prefix, keep_vars)
return destination
def load_state_dict(
self,
state_dict: "OrderedDict[str, torch.Tensor]",
strict: bool = True,
) -> _IncompatibleKeys:
m, u = _load_state_dict(self._emb_modules, state_dict)
return _IncompatibleKeys(missing_keys=m, unexpected_keys=u)
def named_parameters(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, nn.Parameter]]:
for emb_module in self._emb_modules:
yield from emb_module.named_parameters(prefix, recurse)
def named_buffers(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
for emb_module in self._emb_modules:
yield from emb_module.named_buffers(prefix, recurse)
def sparse_grad_parameter_names(
self, destination: Optional[List[str]] = None, prefix: str = ""
) -> List[str]:
destination = [] if destination is None else destination
for emb_module in self._emb_modules:
emb_module.sparse_grad_parameter_names(destination, prefix)
return destination
class BaseEmbeddingBag(nn.Module):
"""
abstract base class for grouped nn.EmbeddingBag
"""
"""
return sparse gradient parameter names
"""
def sparse_grad_parameter_names(
self, destination: Optional[List[str]] = None, prefix: str = ""
) -> List[str]:
destination = [] if destination is None else destination
return destination
@property
@abc.abstractmethod
def config(self) -> GroupedEmbeddingConfig:
pass
class GroupedEmbeddingBag(BaseEmbeddingBag):
def __init__(
self,
config: GroupedEmbeddingConfig,
sparse: bool,
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
) -> None:
def _to_mode(pooling: PoolingType) -> str:
if pooling == PoolingType.SUM:
return "sum"
elif pooling == PoolingType.MEAN:
return "mean"
else:
raise ValueError(f"Unsupported pooling {pooling}")
super().__init__()
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self._config = config
# pyre-fixme[4]: Attribute must be annotated.
self._pg = pg
# pyre-fixme[24]: Non-generic type `nn.modules.container.ModuleList` cannot
# take parameters.
self._emb_modules: nn.ModuleList[nn.Module] = nn.ModuleList()
self._sparse = sparse
self._emb_names: List[str] = []
self._lengths_per_emb: List[int] = []
shared_feature: Dict[str, bool] = {}
for embedding_config in self._config.embedding_tables:
self._emb_modules.append(
nn.EmbeddingBag(
num_embeddings=embedding_config.local_rows,
embedding_dim=embedding_config.local_cols,
mode=_to_mode(embedding_config.pooling),
device=device,
include_last_offset=True,
sparse=self._sparse,
_weight=torch.empty(
embedding_config.local_rows,
embedding_config.local_cols,
device=device,
).uniform_(
embedding_config.get_weight_init_min(),
embedding_config.get_weight_init_max(),
),
)
)
for feature_name in embedding_config.feature_names:
if feature_name not in shared_feature:
shared_feature[feature_name] = False
else:
shared_feature[feature_name] = True
self._lengths_per_emb.append(embedding_config.embedding_dim)
for embedding_config in self._config.embedding_tables:
for feature_name in embedding_config.feature_names:
if shared_feature[feature_name]:
self._emb_names.append(feature_name + "@" + embedding_config.name)
else:
self._emb_names.append(feature_name)
def forward(self, features: KeyedJaggedTensor) -> KeyedTensor:
pooled_embeddings: List[torch.Tensor] = []
for embedding_config, emb_module in zip(
self._config.embedding_tables, self._emb_modules
):
for feature_name in embedding_config.feature_names:
values = features[feature_name].values()
offsets = features[feature_name].offsets()
weights = features[feature_name].weights_or_none()
if weights is not None and not torch.is_floating_point(weights):
weights = None
pooled_embeddings.append(
emb_module(
input=values,
offsets=offsets,
per_sample_weights=weights,
)
)
return KeyedTensor(
keys=self._emb_names,
values=torch.cat(pooled_embeddings, dim=1),
length_per_key=self._lengths_per_emb,
)
def state_dict(
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
params = [
emb_module.weight if keep_vars else emb_module.weight.data
for emb_module in self._emb_modules
]
return _get_state_dict(
self._config.embedding_tables, params, self._pg, destination, prefix
)
def named_parameters(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, nn.Parameter]]:
for config, emb_module in zip(
self._config.embedding_tables,
self._emb_modules,
):
param = emb_module.weight
assert config.local_rows == param.size(0)
assert config.local_cols == param.size(1)
yield append_prefix(prefix, f"{config.name}.weight"), param
def sparse_grad_parameter_names(
self, destination: Optional[List[str]] = None, prefix: str = ""
) -> List[str]:
destination = [] if destination is None else destination
if self._sparse:
for config in self._config.embedding_tables:
destination.append(append_prefix(prefix, f"{config.name}.weight"))
return destination
def config(self) -> GroupedEmbeddingConfig:
return self._config
class BaseBatchedEmbeddingBag(BaseEmbeddingBag):
def __init__(
self,
config: GroupedEmbeddingConfig,
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
) -> None:
super().__init__()
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self._config = config
# pyre-fixme[4]: Attribute must be annotated.
self._pg = pg
def to_pooling_mode(pooling_type: PoolingType) -> PoolingMode:
if pooling_type == PoolingType.SUM:
return PoolingMode.SUM
else:
assert pooling_type == PoolingType.MEAN
return PoolingMode.MEAN
self._pooling: PoolingMode = to_pooling_mode(config.pooling)
self._local_rows: List[int] = []
self._weight_init_mins: List[float] = []
self._weight_init_maxs: List[float] = []
self._num_embeddings: List[int] = []
self._local_cols: List[int] = []
self._feature_table_map: List[int] = []
self._emb_names: List[str] = []
self._lengths_per_emb: List[int] = []
shared_feature: Dict[str, bool] = {}
for idx, config in enumerate(self._config.embedding_tables):
self._local_rows.append(config.local_rows)
self._weight_init_mins.append(config.get_weight_init_min())
self._weight_init_maxs.append(config.get_weight_init_max())
self._num_embeddings.append(config.num_embeddings)
self._local_cols.append(config.local_cols)
self._feature_table_map.extend([idx] * config.num_features())
for feature_name in config.feature_names:
if feature_name not in shared_feature:
shared_feature[feature_name] = False
else:
shared_feature[feature_name] = True
self._lengths_per_emb.append(config.embedding_dim)
for embedding_config in self._config.embedding_tables:
for feature_name in embedding_config.feature_names:
if shared_feature[feature_name]:
self._emb_names.append(feature_name + "@" + embedding_config.name)
else:
self._emb_names.append(feature_name)
def init_parameters(self) -> None:
# initialize embedding weights
assert len(self._num_embeddings) == len(self.split_embedding_weights())
for (rows, emb_dim, weight_init_min, weight_init_max, param) in zip(
self._local_rows,
self._local_cols,
self._weight_init_mins,
self._weight_init_maxs,
self.split_embedding_weights(),
):
assert param.shape == (rows, emb_dim)
param.data.uniform_(
weight_init_min,
weight_init_max,
)
def forward(self, features: KeyedJaggedTensor) -> KeyedTensor:
weights = features.weights_or_none()
if weights is not None and not torch.is_floating_point(weights):
weights = None
values = self.emb_module(
indices=features.values().long(),
offsets=features.offsets().long(),
per_sample_weights=weights,
)
return KeyedTensor(
keys=self._emb_names,
values=values,
length_per_key=self._lengths_per_emb,
)
def state_dict(
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
self.flush()
return _get_state_dict(
self._config.embedding_tables,
self.split_embedding_weights(),
self._pg,
destination,
prefix,
)
def split_embedding_weights(self) -> List[torch.Tensor]:
return self.emb_module.split_embedding_weights()
@property
@abc.abstractmethod
def emb_module(
self,
) -> Union[
DenseTableBatchedEmbeddingBagsCodegen,
SplitTableBatchedEmbeddingBagsCodegen,
IntNBitTableBatchedEmbeddingBagsCodegen,
]:
...
def config(self) -> GroupedEmbeddingConfig:
return self._config
def flush(self) -> None:
pass
class BatchedFusedEmbeddingBag(BaseBatchedEmbeddingBag, FusedOptimizerModule):
def __init__(
self,
config: GroupedEmbeddingConfig,
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(config, pg, device)
def to_embedding_location(
compute_kernel: EmbeddingComputeKernel,
) -> EmbeddingLocation:
if compute_kernel == EmbeddingComputeKernel.BATCHED_FUSED:
return EmbeddingLocation.DEVICE
elif compute_kernel == EmbeddingComputeKernel.BATCHED_FUSED_UVM:
return EmbeddingLocation.MANAGED
elif compute_kernel == EmbeddingComputeKernel.BATCHED_FUSED_UVM_CACHING:
return EmbeddingLocation.MANAGED_CACHING
else:
raise ValueError(f"Invalid EmbeddingComputeKernel {compute_kernel}")
managed: List[EmbeddingLocation] = []
compute_devices: List[ComputeDevice] = []
for table in config.embedding_tables:
if device is not None and device.type == "cuda":
compute_devices.append(ComputeDevice.CUDA)
managed.append(to_embedding_location(table.compute_kernel))
else:
compute_devices.append(ComputeDevice.CPU)
managed.append(EmbeddingLocation.HOST)
if fused_params is None:
fused_params = {}
self._emb_module: SplitTableBatchedEmbeddingBagsCodegen = (
SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=list(
zip(self._local_rows, self._local_cols, managed, compute_devices)
),
feature_table_map=self._feature_table_map,
pooling_mode=self._pooling,
weights_precision=BatchedFusedEmbeddingBag.to_sparse_type(
config.data_type
),
device=device,
**fused_params,
)
)
self._optim: EmbeddingFusedOptimizer = EmbeddingFusedOptimizer(
config,
self._emb_module,
pg,
)
self.init_parameters()
@staticmethod
def to_sparse_type(data_type: DataType) -> SparseType:
if data_type == DataType.FP32:
return SparseType.FP32
elif data_type == DataType.FP16:
return SparseType.FP16
elif data_type == DataType.INT8:
return SparseType.INT8
else:
raise ValueError(f"Invalid DataType {data_type}")
@property
def emb_module(
self,
) -> SplitTableBatchedEmbeddingBagsCodegen:
return self._emb_module
@property
def fused_optimizer(self) -> FusedOptimizer:
return self._optim
def named_parameters(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, nn.Parameter]]:
yield from ()
def named_buffers(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
for config, param in zip(
self._config.embedding_tables,
self.emb_module.split_embedding_weights(),
):
key = append_prefix(prefix, f"{config.name}.weight")
yield key, param
def flush(self) -> None:
self._emb_module.flush()
class BatchedDenseEmbeddingBag(BaseBatchedEmbeddingBag):
def __init__(
self,
config: GroupedEmbeddingConfig,
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
) -> None:
super().__init__(config, pg, device)
self._emb_module: DenseTableBatchedEmbeddingBagsCodegen = (
DenseTableBatchedEmbeddingBagsCodegen(
list(zip(self._local_rows, self._local_cols)),
feature_table_map=self._feature_table_map,
pooling_mode=self._pooling,
use_cpu=device is None
or device.type == "cpu"
or not torch.cuda.is_available(),
)
)
self.init_parameters()
@property
def emb_module(
self,
) -> DenseTableBatchedEmbeddingBagsCodegen:
return self._emb_module
def named_parameters(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, nn.Parameter]]:
combined_key = "/".join(
[config.name for config in self._config.embedding_tables]
)
yield append_prefix(prefix, f"{combined_key}.weight"), cast(
nn.Parameter, self._emb_module.weights
)
class QuantBatchedEmbeddingBag(BaseBatchedEmbeddingBag):
def __init__(
self,
config: GroupedEmbeddingConfig,
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
) -> None:
super().__init__(config, pg, device)
self._emb_module: IntNBitTableBatchedEmbeddingBagsCodegen = (
IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
local_rows,
table.embedding_dim,
QuantBatchedEmbeddingBag.to_sparse_type(config.data_type),
EmbeddingLocation.DEVICE
if (device is not None and device.type == "cuda")
else EmbeddingLocation.HOST,
)
for local_rows, table in zip(
self._local_rows, config.embedding_tables
)
],
pooling_mode=self._pooling,
feature_table_map=self._feature_table_map,
)
)
if device is not None and device.type != "meta":
self._emb_module.initialize_weights()
@staticmethod
def to_sparse_type(data_type: DataType) -> SparseType:
if data_type == DataType.FP16:
return SparseType.FP16
elif data_type == DataType.INT8:
return SparseType.INT8
elif data_type == DataType.INT4:
return SparseType.INT4
elif data_type == DataType.INT2:
return SparseType.INT2
else:
raise ValueError(f"Invalid DataType {data_type}")
def init_parameters(self) -> None:
pass
@property
def emb_module(
self,
) -> IntNBitTableBatchedEmbeddingBagsCodegen:
return self._emb_module
def forward(self, features: KeyedJaggedTensor) -> KeyedTensor:
values = self.emb_module(
indices=features.values().int(),
offsets=features.offsets().int(),
per_sample_weights=features.weights_or_none(),
).float()
return KeyedTensor(
keys=self._emb_names,
values=values,
length_per_key=self._lengths_per_emb,
)
def named_buffers(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
for config, weight in zip(
self._config.embedding_tables,
self.emb_module.split_embedding_weights(),
):
yield append_prefix(prefix, f"{config.name}.weight"), weight[0]
def split_embedding_weights(self) -> List[torch.Tensor]:
return [
weight
for weight, _ in self.emb_module.split_embedding_weights(
split_scale_shifts=False
)
]
@classmethod
def from_float(cls, module: BaseEmbeddingBag) -> "QuantBatchedEmbeddingBag":
assert hasattr(
module, "qconfig"
), "EmbeddingBagCollectionInterface input float module must have qconfig defined"
def _to_data_type(dtype: torch.dtype) -> DataType:
if dtype == torch.quint8 or dtype == torch.qint8:
return DataType.INT8
elif dtype == torch.quint4 or dtype == torch.qint4:
return DataType.INT4
elif dtype == torch.quint2 or dtype == torch.qint2:
return DataType.INT2
else:
raise Exception(f"Invalid data type {dtype}")
# pyre-ignore [16]
data_type = _to_data_type(module.qconfig.weight().dtype)
sparse_type = QuantBatchedEmbeddingBag.to_sparse_type(data_type)
state_dict = dict(
itertools.chain(module.named_buffers(), module.named_parameters())
)
device = next(iter(state_dict.values())).device
# Adjust config to quantized version.
# This obviously doesn't work for column-wise sharding.
# pyre-ignore [29]
config = copy.deepcopy(module.config())
config.data_type = data_type
for table in config.embedding_tables:
table.local_cols = rounded_row_size_in_bytes(table.local_cols, sparse_type)
if table.local_metadata is not None:
table.local_metadata.shard_sizes = [
table.local_rows,
table.local_cols,
]
if table.global_metadata is not None:
for shard_meta in table.global_metadata.shards_metadata:
if shard_meta != table.local_metadata:
shard_meta.shard_sizes = [
shard_meta.shard_sizes[0],
rounded_row_size_in_bytes(
shard_meta.shard_sizes[1], sparse_type
),
]
table.global_metadata.size = torch.Size(
[
table.global_metadata.size[0],
sum(
shard_meta.shard_sizes[1]
for shard_meta in table.global_metadata.shards_metadata
),
]
)
ret = QuantBatchedEmbeddingBag(config=config, device=device)
# Quantize weights.
quant_weight_list = []
for _, weight in state_dict.items():
quantized_weights = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
weight, DATA_TYPE_NUM_BITS[data_type]
)
# weight and 4 byte scale shift (2xfp16)
quant_weight = quantized_weights[:, :-4]
scale_shift = quantized_weights[:, -4:]
quant_weight_list.append((quant_weight, scale_shift))
ret.emb_module.assign_embedding_weights(quant_weight_list)
return ret
class GroupedPooledEmbeddingsLookup(BaseEmbeddingLookup):
def __init__(
self,
grouped_configs: List[GroupedEmbeddingConfig],
grouped_score_configs: List[GroupedEmbeddingConfig],
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
pg: Optional[dist.ProcessGroup] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> None:
def _create_lookup(
config: GroupedEmbeddingConfig,
) -> BaseEmbeddingBag:
if config.compute_kernel == EmbeddingComputeKernel.BATCHED_DENSE:
return BatchedDenseEmbeddingBag(
config=config,
pg=pg,
device=device,
)
elif config.compute_kernel == EmbeddingComputeKernel.BATCHED_FUSED:
return BatchedFusedEmbeddingBag(
config=config,
pg=pg,
device=device,
fused_params=fused_params,
)
elif config.compute_kernel == EmbeddingComputeKernel.DENSE:
return GroupedEmbeddingBag(
config=config,
sparse=False,
device=device,
)
elif config.compute_kernel == EmbeddingComputeKernel.SPARSE:
return GroupedEmbeddingBag(
config=config,
sparse=True,
device=device,
)
elif config.compute_kernel == EmbeddingComputeKernel.BATCHED_QUANT:
return QuantBatchedEmbeddingBag(
config=config,
pg=pg,
device=device,
)
else:
raise ValueError(
f"Compute kernel not supported {config.compute_kernel}"
)
super().__init__()
# pyre-fixme[24]: Non-generic type `nn.modules.container.ModuleList` cannot
# take parameters.
self._emb_modules: nn.ModuleList[BaseEmbeddingBag] = nn.ModuleList()
for config in grouped_configs:
self._emb_modules.append(_create_lookup(config))
# pyre-fixme[24]: Non-generic type `nn.modules.container.ModuleList` cannot
# take parameters.
self._score_emb_modules: nn.ModuleList[BaseEmbeddingBag] = nn.ModuleList()
for config in grouped_score_configs:
self._score_emb_modules.append(_create_lookup(config))
self._id_list_feature_splits: List[int] = []
for config in grouped_configs:
self._id_list_feature_splits.append(config.num_features())
self._id_score_list_feature_splits: List[int] = []
for config in grouped_score_configs:
self._id_score_list_feature_splits.append(config.num_features())
# return a dummy empty tensor
# when grouped_configs and grouped_score_configs are empty
self.register_buffer(
"_dummy_embs_tensor",
torch.empty(
[0],
dtype=torch.float32,
device=device,
requires_grad=True,
),
)
self.grouped_configs = grouped_configs
self.grouped_score_configs = grouped_score_configs
self._feature_processor = feature_processor
def forward(
self,
sparse_features: SparseFeatures,
) -> torch.Tensor:
assert (
sparse_features.id_list_features is not None
or sparse_features.id_score_list_features is not None
)
embeddings: List[torch.Tensor] = []
if len(self._emb_modules) > 0:
assert sparse_features.id_list_features is not None
id_list_features_by_group = sparse_features.id_list_features.split(
self._id_list_feature_splits,
)
for config, emb_op, features in zip(
self.grouped_configs, self._emb_modules, id_list_features_by_group
):
if (
config.has_feature_processor
and self._feature_processor is not None
and isinstance(
self._feature_processor, GroupedPositionWeightedModule
)
):
features = self._feature_processor(features)
embeddings.append(emb_op(features).values())
if len(self._score_emb_modules) > 0:
assert sparse_features.id_score_list_features is not None
id_score_list_features_by_group = (
sparse_features.id_score_list_features.split(
self._id_score_list_feature_splits,
)
)
for emb_op, features in zip(
self._score_emb_modules, id_score_list_features_by_group
):
embeddings.append(emb_op(features).values())
if len(embeddings) == 0:
# a hack for empty ranks
batch_size: int = (
sparse_features.id_list_features.stride()
if sparse_features.id_list_features is not None
# pyre-fixme[16]: `Optional` has no attribute `stride`.
else sparse_features.id_score_list_features.stride()
)
return self._dummy_embs_tensor.view(batch_size, 0)
elif len(embeddings) == 1:
return embeddings[0]
else:
return torch.cat(embeddings, dim=1)
def state_dict(
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
if destination is None:
destination = OrderedDict()
# pyre-ignore [16]
destination._metadata = OrderedDict()
for emb_module in self._emb_modules:
emb_module.state_dict(destination, prefix, keep_vars)
for emb_module in self._score_emb_modules:
emb_module.state_dict(destination, prefix, keep_vars)
return destination
def load_state_dict(
self,
state_dict: "OrderedDict[str, torch.Tensor]",
strict: bool = True,
) -> _IncompatibleKeys:
m1, u1 = _load_state_dict(self._emb_modules, state_dict)
m2, u2 = _load_state_dict(self._score_emb_modules, state_dict)
return _IncompatibleKeys(missing_keys=m1 + m2, unexpected_keys=u1 + u2)
def named_parameters(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, nn.Parameter]]:
for emb_module in self._emb_modules:
yield from emb_module.named_parameters(prefix, recurse)
for emb_module in self._score_emb_modules:
yield from emb_module.named_parameters(prefix, recurse)
def named_buffers(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
for emb_module in self._emb_modules:
yield from emb_module.named_buffers(prefix, recurse)
for emb_module in self._score_emb_modules:
yield from emb_module.named_buffers(prefix, recurse)
def sparse_grad_parameter_names(
self, destination: Optional[List[str]] = None, prefix: str = ""
) -> List[str]:
destination = [] if destination is None else destination
for emb_module in self._emb_modules:
emb_module.sparse_grad_parameter_names(destination, prefix)
for emb_module in self._score_emb_modules:
emb_module.sparse_grad_parameter_names(destination, prefix)
return destination
| [
"torchrec.distributed.types.TensorProperties",
"torchrec.distributed.types.Shard",
"torchrec.distributed.types.ShardedTensor._init_from_local_shards_and_global_metadata",
"torchrec.sparse.jagged_tensor.KeyedTensor",
"torchrec.distributed.utils.append_prefix",
"torchrec.distributed.types.ShardMetadata"
] | [((1702, 1729), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1719, 1729), False, 'import logging\n'), ((12915, 12932), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12926, 12932), False, 'from collections import defaultdict, OrderedDict\n'), ((6487, 6586), 'itertools.zip_longest', 'itertools.zip_longest', (['config.embedding_tables', 'split_optimizer_states', 'split_embedding_weights'], {}), '(config.embedding_tables, split_optimizer_states,\n split_embedding_weights)\n', (6508, 6586), False, 'import itertools\n'), ((12556, 12569), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12567, 12569), False, 'from collections import defaultdict, OrderedDict\n'), ((12629, 12642), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12640, 12642), False, 'from collections import defaultdict, OrderedDict\n'), ((14174, 14338), 'torchrec.distributed.types.ShardedTensor._init_from_local_shards_and_global_metadata', 'ShardedTensor._init_from_local_shards_and_global_metadata', ([], {'local_shards': 'key_to_local_shards[key]', 'sharded_tensor_metadata': 'global_metadata', 'process_group': 'pg'}), '(local_shards=\n key_to_local_shards[key], sharded_tensor_metadata=global_metadata,\n process_group=pg)\n', (14231, 14338), False, 'from torchrec.distributed.types import Shard, ShardedTensorMetadata, ShardMetadata, ShardedTensor, TensorProperties\n'), ((14681, 14760), 'torch._C._log_api_usage_once', 'torch._C._log_api_usage_once', (['f"""torchrec.distributed.{self.__class__.__name__}"""'], {}), "(f'torchrec.distributed.{self.__class__.__name__}')\n", (14709, 14760), False, 'import torch\n'), ((15033, 15048), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (15046, 15048), False, 'from torch import nn\n'), ((16461, 16498), 'torch.cat', 'torch.cat', (['unpooled_embeddings'], {'dim': '(0)'}), '(unpooled_embeddings, dim=0)\n', (16470, 16498), False, 'import torch\n'), ((18136, 18215), 'torch._C._log_api_usage_once', 'torch._C._log_api_usage_once', (['f"""torchrec.distributed.{self.__class__.__name__}"""'], {}), "(f'torchrec.distributed.{self.__class__.__name__}')\n", (18164, 18215), False, 'import torch\n'), ((27214, 27229), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (27227, 27229), False, 'from torch import nn\n'), ((29250, 29302), 'torch.nn.modules.module._IncompatibleKeys', '_IncompatibleKeys', ([], {'missing_keys': 'm', 'unexpected_keys': 'u'}), '(missing_keys=m, unexpected_keys=u)\n', (29267, 29302), False, 'from torch.nn.modules.module import _IncompatibleKeys\n'), ((31170, 31249), 'torch._C._log_api_usage_once', 'torch._C._log_api_usage_once', (['f"""torchrec.distributed.{self.__class__.__name__}"""'], {}), "(f'torchrec.distributed.{self.__class__.__name__}')\n", (31198, 31249), False, 'import torch\n'), ((31522, 31537), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (31535, 31537), False, 'from torch import nn\n'), ((35888, 35967), 'torch._C._log_api_usage_once', 'torch._C._log_api_usage_once', (['f"""torchrec.distributed.{self.__class__.__name__}"""'], {}), "(f'torchrec.distributed.{self.__class__.__name__}')\n", (35916, 35967), False, 'import torch\n'), ((38928, 39019), 'torchrec.sparse.jagged_tensor.KeyedTensor', 'KeyedTensor', ([], {'keys': 'self._emb_names', 'values': 'values', 'length_per_key': 'self._lengths_per_emb'}), '(keys=self._emb_names, values=values, length_per_key=self.\n _lengths_per_emb)\n', (38939, 39019), False, 'from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor\n'), ((46806, 46897), 'torchrec.sparse.jagged_tensor.KeyedTensor', 'KeyedTensor', ([], {'keys': 'self._emb_names', 'values': 'values', 'length_per_key': 'self._lengths_per_emb'}), '(keys=self._emb_names, values=values, length_per_key=self.\n _lengths_per_emb)\n', (46817, 46897), False, 'from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor\n'), ((52724, 52739), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (52737, 52739), False, 'from torch import nn\n'), ((53020, 53035), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (53033, 53035), False, 'from torch import nn\n'), ((57050, 57114), 'torch.nn.modules.module._IncompatibleKeys', '_IncompatibleKeys', ([], {'missing_keys': '(m1 + m2)', 'unexpected_keys': '(u1 + u2)'}), '(missing_keys=m1 + m2, unexpected_keys=u1 + u2)\n', (57067, 57114), False, 'from torch.nn.modules.module import _IncompatibleKeys\n'), ((5087, 5402), 'torchrec.distributed.types.TensorProperties', 'TensorProperties', ([], {'dtype': 'global_metadata.tensor_properties.dtype', 'layout': 'global_metadata.tensor_properties.layout', 'requires_grad': 'global_metadata.tensor_properties.requires_grad', 'memory_format': 'global_metadata.tensor_properties.memory_format', 'pin_memory': 'global_metadata.tensor_properties.pin_memory'}), '(dtype=global_metadata.tensor_properties.dtype, layout=\n global_metadata.tensor_properties.layout, requires_grad=global_metadata\n .tensor_properties.requires_grad, memory_format=global_metadata.\n tensor_properties.memory_format, pin_memory=global_metadata.\n tensor_properties.pin_memory)\n', (5103, 5402), False, 'from torchrec.distributed.types import Shard, ShardedTensorMetadata, ShardMetadata, ShardedTensor, TensorProperties\n'), ((7734, 7777), 'copy.deepcopy', 'copy.deepcopy', (['table_config.global_metadata'], {}), '(table_config.global_metadata)\n', (7747, 7777), False, 'import copy\n'), ((8480, 8659), 'torchrec.distributed.types.ShardedTensor._init_from_local_shards_and_global_metadata', 'ShardedTensor._init_from_local_shards_and_global_metadata', ([], {'local_shards': 'local_weight_shards', 'sharded_tensor_metadata': 'table_config_global_metadata', 'process_group': 'self._pg'}), '(local_shards=\n local_weight_shards, sharded_tensor_metadata=\n table_config_global_metadata, process_group=self._pg)\n', (8537, 8659), False, 'from torchrec.distributed.types import Shard, ShardedTensorMetadata, ShardMetadata, ShardedTensor, TensorProperties\n'), ((24023, 24069), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', 'f"""{config.name}.weight"""'], {}), "(prefix, f'{config.name}.weight')\n", (24036, 24069), False, 'from torchrec.distributed.utils import append_prefix\n'), ((27639, 27711), 'torch.empty', 'torch.empty', (['[0]'], {'dtype': 'torch.float32', 'device': 'device', 'requires_grad': '(True)'}), '([0], dtype=torch.float32, device=device, requires_grad=True)\n', (27650, 27711), False, 'import torch\n'), ((28786, 28799), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (28797, 28799), False, 'from collections import defaultdict, OrderedDict\n'), ((28867, 28880), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (28878, 28880), False, 'from collections import defaultdict, OrderedDict\n'), ((43241, 43287), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', 'f"""{config.name}.weight"""'], {}), "(prefix, f'{config.name}.weight')\n", (43254, 43287), False, 'from torchrec.distributed.utils import append_prefix\n'), ((48838, 48894), 'fbgemm_gpu.split_table_batched_embeddings_ops.rounded_row_size_in_bytes', 'rounded_row_size_in_bytes', (['table.local_cols', 'sparse_type'], {}), '(table.local_cols, sparse_type)\n', (48863, 48894), False, 'from fbgemm_gpu.split_table_batched_embeddings_ops import EmbeddingLocation, ComputeDevice, PoolingMode, DenseTableBatchedEmbeddingBagsCodegen, SplitTableBatchedEmbeddingBagsCodegen, IntNBitTableBatchedEmbeddingBagsCodegen, rounded_row_size_in_bytes\n'), ((50138, 50236), 'torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf', 'torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf', (['weight', 'DATA_TYPE_NUM_BITS[data_type]'], {}), '(weight,\n DATA_TYPE_NUM_BITS[data_type])\n', (50193, 50236), False, 'import torch\n'), ((53675, 53747), 'torch.empty', 'torch.empty', (['[0]'], {'dtype': 'torch.float32', 'device': 'device', 'requires_grad': '(True)'}), '([0], dtype=torch.float32, device=device, requires_grad=True)\n', (53686, 53747), False, 'import torch\n'), ((56396, 56409), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (56407, 56409), False, 'from collections import defaultdict, OrderedDict\n'), ((56477, 56490), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (56488, 56490), False, 'from collections import defaultdict, OrderedDict\n'), ((4739, 4843), 'torchrec.distributed.types.ShardMetadata', 'ShardMetadata', ([], {'shard_sizes': '[shard.shard_sizes[0]]', 'shard_offsets': '[offset]', 'placement': 'shard.placement'}), '(shard_sizes=[shard.shard_sizes[0]], shard_offsets=[offset],\n placement=shard.placement)\n', (4752, 4843), False, 'from torchrec.distributed.types import Shard, ShardedTensorMetadata, ShardMetadata, ShardedTensor, TensorProperties\n'), ((13876, 13920), 'torchrec.distributed.types.Shard', 'Shard', (['param', 'embedding_table.local_metadata'], {}), '(param, embedding_table.local_metadata)\n', (13881, 13920), False, 'from torchrec.distributed.types import Shard, ShardedTensorMetadata, ShardMetadata, ShardedTensor, TensorProperties\n'), ((25297, 25344), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', 'f"""{combined_key}.weight"""'], {}), "(prefix, f'{combined_key}.weight')\n", (25310, 25344), False, 'from torchrec.distributed.utils import append_prefix\n'), ((25346, 25390), 'typing.cast', 'cast', (['nn.Parameter', 'self._emb_module.weights'], {}), '(nn.Parameter, self._emb_module.weights)\n', (25350, 25390), False, 'from typing import List, Optional, Dict, Any, Union, Tuple, cast, Iterator\n'), ((28533, 28554), 'torch.cat', 'torch.cat', (['embeddings'], {}), '(embeddings)\n', (28542, 28554), False, 'import torch\n'), ((34148, 34183), 'torch.cat', 'torch.cat', (['pooled_embeddings'], {'dim': '(1)'}), '(pooled_embeddings, dim=1)\n', (34157, 34183), False, 'import torch\n'), ((38675, 38707), 'torch.is_floating_point', 'torch.is_floating_point', (['weights'], {}), '(weights)\n', (38698, 38707), False, 'import torch\n'), ((44518, 44565), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', 'f"""{combined_key}.weight"""'], {}), "(prefix, f'{combined_key}.weight')\n", (44531, 44565), False, 'from torchrec.distributed.utils import append_prefix\n'), ((44567, 44611), 'typing.cast', 'cast', (['nn.Parameter', 'self._emb_module.weights'], {}), '(nn.Parameter, self._emb_module.weights)\n', (44571, 44611), False, 'from typing import List, Optional, Dict, Any, Union, Tuple, cast, Iterator\n'), ((56136, 56164), 'torch.cat', 'torch.cat', (['embeddings'], {'dim': '(1)'}), '(embeddings, dim=1)\n', (56145, 56164), False, 'import torch\n'), ((2884, 2898), 'typing.cast', 'cast', (['str', 'key'], {}), '(str, key)\n', (2888, 2898), False, 'from typing import List, Optional, Dict, Any, Union, Tuple, cast, Iterator\n'), ((5668, 5721), 'torch.Size', 'torch.Size', (['[global_metadata.size[0] * len_rw_shards]'], {}), '([global_metadata.size[0] * len_rw_shards])\n', (5678, 5721), False, 'import torch\n'), ((8145, 8180), 'torchrec.distributed.types.Shard', 'Shard', (['local_weight', 'local_metadata'], {}), '(local_weight, local_metadata)\n', (8150, 8180), False, 'from torchrec.distributed.types import Shard, ShardedTensorMetadata, ShardMetadata, ShardedTensor, TensorProperties\n'), ((10501, 10676), 'torchrec.distributed.types.ShardedTensor._init_from_local_shards_and_global_metadata', 'ShardedTensor._init_from_local_shards_and_global_metadata', ([], {'local_shards': 'momentum_local_shards', 'sharded_tensor_metadata': 'sharded_tensor_metadata', 'process_group': 'self._pg'}), '(local_shards=\n momentum_local_shards, sharded_tensor_metadata=sharded_tensor_metadata,\n process_group=self._pg)\n', (10558, 10676), False, 'from torchrec.distributed.types import Shard, ShardedTensorMetadata, ShardMetadata, ShardedTensor, TensorProperties\n'), ((17356, 17402), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', 'f"""{config.name}.weight"""'], {}), "(prefix, f'{config.name}.weight')\n", (17369, 17402), False, 'from torchrec.distributed.utils import append_prefix\n'), ((17722, 17768), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', 'f"""{config.name}.weight"""'], {}), "(prefix, f'{config.name}.weight')\n", (17735, 17768), False, 'from torchrec.distributed.utils import append_prefix\n'), ((35102, 35148), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', 'f"""{config.name}.weight"""'], {}), "(prefix, f'{config.name}.weight')\n", (35115, 35148), False, 'from torchrec.distributed.utils import append_prefix\n'), ((35468, 35514), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', 'f"""{config.name}.weight"""'], {}), "(prefix, f'{config.name}.weight')\n", (35481, 35514), False, 'from torchrec.distributed.utils import append_prefix\n'), ((47224, 47270), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', 'f"""{config.name}.weight"""'], {}), "(prefix, f'{config.name}.weight')\n", (47237, 47270), False, 'from torchrec.distributed.utils import append_prefix\n'), ((24833, 24858), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (24856, 24858), False, 'import torch\n'), ((33753, 33785), 'torch.is_floating_point', 'torch.is_floating_point', (['weights'], {}), '(weights)\n', (33776, 33785), False, 'import torch\n'), ((44054, 44079), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (44077, 44079), False, 'import torch\n'), ((10390, 10446), 'torchrec.distributed.types.Shard', 'Shard', (['optimizer_state[momentum_idx - 1]', 'local_metadata'], {}), '(optimizer_state[momentum_idx - 1], local_metadata)\n', (10395, 10446), False, 'from torchrec.distributed.types import Shard, ShardedTensorMetadata, ShardMetadata, ShardedTensor, TensorProperties\n'), ((49408, 49473), 'fbgemm_gpu.split_table_batched_embeddings_ops.rounded_row_size_in_bytes', 'rounded_row_size_in_bytes', (['shard_meta.shard_sizes[1]', 'sparse_type'], {}), '(shard_meta.shard_sizes[1], sparse_type)\n', (49433, 49473), False, 'from fbgemm_gpu.split_table_batched_embeddings_ops import EmbeddingLocation, ComputeDevice, PoolingMode, DenseTableBatchedEmbeddingBagsCodegen, SplitTableBatchedEmbeddingBagsCodegen, IntNBitTableBatchedEmbeddingBagsCodegen, rounded_row_size_in_bytes\n'), ((15441, 15529), 'torch.empty', 'torch.empty', (['embedding_config.local_rows', 'embedding_config.local_cols'], {'device': 'device'}), '(embedding_config.local_rows, embedding_config.local_cols,\n device=device)\n', (15452, 15529), False, 'import torch\n'), ((32172, 32260), 'torch.empty', 'torch.empty', (['embedding_config.local_rows', 'embedding_config.local_cols'], {'device': 'device'}), '(embedding_config.local_rows, embedding_config.local_cols,\n device=device)\n', (32183, 32260), False, 'import torch\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional, Tuple, cast
import torch
import torch.distributed as dist
from torchrec.distributed.embedding_types import (
ShardedEmbeddingTable,
EmbeddingComputeKernel,
)
from torchrec.distributed.tw_sharding import TwEmbeddingSharding
from torchrec.distributed.types import (
ShardedTensorMetadata,
ShardMetadata,
ParameterSharding,
)
from torchrec.modules.embedding_configs import EmbeddingTableConfig
class CwEmbeddingSharding(TwEmbeddingSharding):
"""
Shards embedding bags column-wise, i.e.. a given embedding table is distributed by
specified number of columns and table slices are placed on all ranks.
"""
def __init__(
self,
embedding_configs: List[
Tuple[EmbeddingTableConfig, ParameterSharding, torch.Tensor]
],
# pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type.
pg: dist.ProcessGroup,
device: Optional[torch.device] = None,
) -> None:
super().__init__(embedding_configs, pg, device)
def _shard(
self,
embedding_configs: List[
Tuple[EmbeddingTableConfig, ParameterSharding, torch.Tensor]
],
) -> List[List[ShardedEmbeddingTable]]:
world_size = self._pg.size()
tables_per_rank: List[List[ShardedEmbeddingTable]] = [
[] for i in range(world_size)
]
for config in embedding_configs:
# pyre-fixme [16]
shards: List[ShardMetadata] = config[1].sharding_spec.shards
# construct the global sharded_tensor_metadata
global_metadata = ShardedTensorMetadata(
shards_metadata=shards,
size=torch.Size([config[0].num_embeddings, config[0].embedding_dim]),
)
# pyre-fixme [6]
for i, rank in enumerate(config[1].ranks):
tables_per_rank[rank].append(
ShardedEmbeddingTable(
num_embeddings=config[0].num_embeddings,
embedding_dim=config[0].embedding_dim,
name=config[0].name,
embedding_names=config[0].embedding_names,
data_type=config[0].data_type,
feature_names=config[0].feature_names,
pooling=config[0].pooling,
is_weighted=config[0].is_weighted,
has_feature_processor=config[0].has_feature_processor,
local_rows=config[0].num_embeddings,
local_cols=shards[i].shard_sizes[1],
compute_kernel=EmbeddingComputeKernel(config[1].compute_kernel),
local_metadata=shards[i],
global_metadata=global_metadata,
)
)
return tables_per_rank
| [
"torchrec.distributed.embedding_types.EmbeddingComputeKernel"
] | [((1944, 2007), 'torch.Size', 'torch.Size', (['[config[0].num_embeddings, config[0].embedding_dim]'], {}), '([config[0].num_embeddings, config[0].embedding_dim])\n', (1954, 2007), False, 'import torch\n'), ((2905, 2953), 'torchrec.distributed.embedding_types.EmbeddingComputeKernel', 'EmbeddingComputeKernel', (['config[1].compute_kernel'], {}), '(config[1].compute_kernel)\n', (2927, 2953), False, 'from torchrec.distributed.embedding_types import ShardedEmbeddingTable, EmbeddingComputeKernel\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import List, Optional, Tuple
import torch
import torch.fx
from torch.testing import FileCheck # @manual
from torchrec.distributed.types import LazyAwaitable
from torchrec.fx import symbolic_trace
from torchrec.sparse.jagged_tensor import (
JaggedTensor,
KeyedJaggedTensor,
)
torch.fx.wrap("len")
class TestTracer(unittest.TestCase):
maxDiff: Optional[int] = None
def test_jagged_tensor(self) -> None:
class ModuleCreateAndAccessJaggedTensor(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input: int) -> int:
features = JaggedTensor(
values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]),
weights=torch.Tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]),
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
)
return (
features.values().numel()
+ features.weights().numel()
+ features.lengths().numel()
+ features.offsets().numel()
)
class ModuleUseJaggedTensorAsInputAndOutput(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input: JaggedTensor) -> JaggedTensor:
return JaggedTensor(
input.values(),
input.weights(),
lengths=input.lengths(),
offsets=input.offsets(),
)
class ModuleUseJaggedTensorAsInput(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input: JaggedTensor) -> int:
return (
input.values().numel()
+ input.weights().numel()
+ input.lengths().numel()
+ input.offsets().numel()
)
class ModuleUseJaggedTensorAsOutput(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(
self,
values: torch.Tensor,
weights: torch.Tensor,
lengths: torch.Tensor,
) -> JaggedTensor:
return JaggedTensor(values, weights, lengths)
# Case 1: JaggedTensor is only used as an output of the root module.
m = ModuleUseJaggedTensorAsOutput()
gm = symbolic_trace(m)
FileCheck().check("JaggedTensor").check("return jagged_tensor").run(gm.code)
values = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7])
weights = torch.tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])
lengths = torch.tensor([0, 2, 2, 3, 4, 5, 8])
ref_jt = m(values, weights, lengths)
traced_jt = gm(values, weights, lengths)
self.assertTrue(torch.equal(traced_jt.values(), ref_jt.values()))
self.assertTrue(torch.equal(traced_jt.weights(), ref_jt.weights()))
self.assertTrue(torch.equal(traced_jt.lengths(), ref_jt.lengths()))
# Case 2: JaggedTensor is only used as an input of the root module.
m = ModuleUseJaggedTensorAsInput()
gm = symbolic_trace(m)
FileCheck().check("values()").check("numel()").check("weights").check(
"lengths"
).check("offsets").run(gm.code)
input = JaggedTensor(
values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]),
weights=torch.Tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]),
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
)
ref_out = m(input)
traced_out = gm(input)
self.assertEqual(ref_out, traced_out)
# Case 3: JaggedTensor is used as both an input and an output of the root module.
m = ModuleUseJaggedTensorAsInputAndOutput()
gm = symbolic_trace(m)
FileCheck().check("values()").check("weights").check("lengths").check(
"offsets"
).check("JaggedTensor").run(gm.code)
ref_out = m(input)
traced_out = gm(input)
self.assertTrue(torch.equal(traced_out.values(), ref_out.values()))
self.assertTrue(torch.equal(traced_out.weights(), ref_out.weights()))
self.assertTrue(torch.equal(traced_out.lengths(), ref_out.lengths()))
# Case 4: JaggedTensor is only used within the root module and not as part of
# the root module's input/output interface.
m = ModuleCreateAndAccessJaggedTensor()
gm = symbolic_trace(m)
FileCheck().check("return 29").check_not("JaggedTensor").run(gm.code)
ref_out = m(8)
traced_out = gm(8)
self.assertEqual(ref_out, traced_out)
def test_keyed_jagged_tensor(self) -> None:
class ModuleCreateAndAccessKeyedJaggedTensor(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input: int) -> int:
features = KeyedJaggedTensor.from_offsets_sync(
values=torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
weights=torch.Tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]),
keys=["index_0", "index_1"],
offsets=torch.IntTensor([0, 0, 2, 2, 3, 4, 5, 5, 8]),
)
return (
len(features.keys())
+ features.values().numel()
+ features.weights().numel()
+ features.lengths().numel()
+ features.offsets().numel()
)
class ModuleUseKeyedJaggedTensorAsInputAndOutput(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(
self, input: KeyedJaggedTensor
) -> Tuple[KeyedJaggedTensor, int]:
output = KeyedJaggedTensor(
input.keys(),
input.values(),
input.weights(),
lengths=input.lengths(),
offsets=input.offsets(),
)
return output, output._stride
class ModuleUseKeyedJaggedTensorAsInput(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input: KeyedJaggedTensor) -> int:
return (
len(input.keys())
+ input.values().numel()
+ input.weights().numel()
+ input.lengths().numel()
+ input.offsets().numel()
)
class ModuleUseKeyedJaggedTensorAsOutput(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(
self,
keys: List[str],
values: torch.Tensor,
weights: torch.Tensor,
lengths: torch.Tensor,
) -> Tuple[KeyedJaggedTensor, int]:
output = KeyedJaggedTensor(keys, values, weights, lengths)
return output, output._stride
# Case 1: KeyedJaggedTensor is only used as an output of the root module.
m = ModuleUseKeyedJaggedTensorAsOutput()
gm = symbolic_trace(m)
FileCheck().check("KeyedJaggedTensor").check(
"return (keyed_jagged_tensor,"
).run(gm.code)
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])
keys = ["index_0", "index_1"]
lengths = torch.IntTensor([2, 0, 1, 1, 1, 3])
ref_out = m(keys, values, weights, lengths)
traced_out = gm(keys, values, weights, lengths)
self.assertEqual(ref_out[1], traced_out[1])
self.assertTrue(torch.equal(traced_out[0].offsets(), ref_out[0].offsets()))
# Case 2: KeyedJaggedTensor is only used as an input of the root module.
m = ModuleUseKeyedJaggedTensorAsInput()
gm = symbolic_trace(m)
FileCheck().check("KeyedJaggedTensor").check("keys()").check("len").check(
"values()"
).check("numel()").run(gm.code)
input = KeyedJaggedTensor.from_offsets_sync(
values=torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
weights=torch.Tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]),
keys=["index_0", "index_1"],
offsets=torch.IntTensor([0, 0, 2, 2, 3, 4, 5, 5, 8]),
)
ref_out = m(input)
traced_out = gm(input)
self.assertEqual(ref_out, traced_out)
# Case 3: KeyedJaggedTensor is used as both an input and an output of the root module.
m = ModuleUseKeyedJaggedTensorAsInputAndOutput()
gm = symbolic_trace(m)
FileCheck().check("KeyedJaggedTensor").check("keys()").check("values()").check(
"._stride"
).run(gm.code)
input = KeyedJaggedTensor.from_offsets_sync(
values=torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]),
weights=torch.Tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]),
keys=["index_0", "index_1"],
offsets=torch.IntTensor([0, 0, 2, 2, 3, 4, 5, 5, 8]),
)
ref_out = m(input)
traced_out = gm(input)
self.assertEqual(ref_out[1], traced_out[1])
# Case 4: KeyedJaggedTensor is only used within the root module and not as part of
# the root module's input/output interface.
m = ModuleCreateAndAccessKeyedJaggedTensor()
gm = symbolic_trace(m)
FileCheck().check("return 35").check_not("KeyedJaggedTensor").run(gm.code)
ref_out = m(8)
traced_out = gm(8)
self.assertEqual(ref_out, traced_out)
def test_trace_async_module(self) -> None:
class NeedWait(LazyAwaitable[torch.Tensor]):
def __init__(self, obj: torch.Tensor) -> None:
super().__init__()
self._obj = obj
def _wait_impl(self) -> torch.Tensor:
return self._obj + 3
class MyAsyncModule(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input) -> LazyAwaitable[torch.Tensor]:
return NeedWait(input + 2)
# Test automated LazyAwaitable type `wait()`
class AutoModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.sparse = MyAsyncModule()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return torch.add(self.sparse(input), input * 10)
auto_model = AutoModel()
auto_gm = symbolic_trace(auto_model)
FileCheck().check("+ 2").check("NeedWait").check("* 10").run(auto_gm.code)
input = torch.randn(3, 4)
ref_out = auto_model(input)
traced_out = auto_gm(input)
self.assertTrue(torch.equal(ref_out, traced_out))
| [
"torchrec.sparse.jagged_tensor.JaggedTensor",
"torchrec.sparse.jagged_tensor.KeyedJaggedTensor",
"torchrec.fx.symbolic_trace"
] | [((545, 565), 'torch.fx.wrap', 'torch.fx.wrap', (['"""len"""'], {}), "('len')\n", (558, 565), False, 'import torch\n'), ((2737, 2754), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['m'], {}), '(m)\n', (2751, 2754), False, 'from torchrec.fx import symbolic_trace\n'), ((2858, 2896), 'torch.tensor', 'torch.tensor', (['[0, 1, 2, 3, 4, 5, 6, 7]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7])\n', (2870, 2896), False, 'import torch\n'), ((2915, 2969), 'torch.tensor', 'torch.tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (2927, 2969), False, 'import torch\n'), ((2988, 3023), 'torch.tensor', 'torch.tensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (3000, 3023), False, 'import torch\n'), ((3479, 3496), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['m'], {}), '(m)\n', (3493, 3496), False, 'from torchrec.fx import symbolic_trace\n'), ((4131, 4148), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['m'], {}), '(m)\n', (4145, 4148), False, 'from torchrec.fx import symbolic_trace\n'), ((4786, 4803), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['m'], {}), '(m)\n', (4800, 4803), False, 'from torchrec.fx import symbolic_trace\n'), ((7545, 7562), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['m'], {}), '(m)\n', (7559, 7562), False, 'from torchrec.fx import symbolic_trace\n'), ((7701, 7755), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (7713, 7755), False, 'import torch\n'), ((7774, 7828), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (7786, 7828), False, 'import torch\n'), ((7885, 7920), 'torch.IntTensor', 'torch.IntTensor', (['[2, 0, 1, 1, 1, 3]'], {}), '([2, 0, 1, 1, 1, 3])\n', (7900, 7920), False, 'import torch\n'), ((8310, 8327), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['m'], {}), '(m)\n', (8324, 8327), False, 'from torchrec.fx import symbolic_trace\n'), ((9066, 9083), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['m'], {}), '(m)\n', (9080, 9083), False, 'from torchrec.fx import symbolic_trace\n'), ((9859, 9876), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['m'], {}), '(m)\n', (9873, 9876), False, 'from torchrec.fx import symbolic_trace\n'), ((11001, 11027), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['auto_model'], {}), '(auto_model)\n', (11015, 11027), False, 'from torchrec.fx import symbolic_trace\n'), ((11128, 11145), 'torch.randn', 'torch.randn', (['(3)', '(4)'], {}), '(3, 4)\n', (11139, 11145), False, 'import torch\n'), ((11242, 11274), 'torch.equal', 'torch.equal', (['ref_out', 'traced_out'], {}), '(ref_out, traced_out)\n', (11253, 11274), False, 'import torch\n'), ((2563, 2601), 'torchrec.sparse.jagged_tensor.JaggedTensor', 'JaggedTensor', (['values', 'weights', 'lengths'], {}), '(values, weights, lengths)\n', (2575, 2601), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor\n'), ((3688, 3726), 'torch.tensor', 'torch.tensor', (['[0, 1, 2, 3, 4, 5, 6, 7]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7])\n', (3700, 3726), False, 'import torch\n'), ((3748, 3802), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (3760, 3802), False, 'import torch\n'), ((3824, 3859), 'torch.tensor', 'torch.tensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (3836, 3859), False, 'import torch\n'), ((7304, 7353), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor', 'KeyedJaggedTensor', (['keys', 'values', 'weights', 'lengths'], {}), '(keys, values, weights, lengths)\n', (7321, 7353), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor\n'), ((8547, 8601), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (8559, 8601), False, 'import torch\n'), ((8623, 8677), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (8635, 8677), False, 'import torch\n'), ((8740, 8784), 'torch.IntTensor', 'torch.IntTensor', (['[0, 0, 2, 2, 3, 4, 5, 5, 8]'], {}), '([0, 0, 2, 2, 3, 4, 5, 5, 8])\n', (8755, 8784), False, 'import torch\n'), ((9290, 9344), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (9302, 9344), False, 'import torch\n'), ((9366, 9420), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (9378, 9420), False, 'import torch\n'), ((9483, 9527), 'torch.IntTensor', 'torch.IntTensor', (['[0, 0, 2, 2, 3, 4, 5, 5, 8]'], {}), '([0, 0, 2, 2, 3, 4, 5, 5, 8])\n', (9498, 9527), False, 'import torch\n'), ((934, 972), 'torch.tensor', 'torch.tensor', (['[0, 1, 2, 3, 4, 5, 6, 7]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7])\n', (946, 972), False, 'import torch\n'), ((1002, 1056), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (1014, 1056), False, 'import torch\n'), ((1086, 1121), 'torch.tensor', 'torch.tensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (1098, 1121), False, 'import torch\n'), ((5307, 5361), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (5319, 5361), False, 'import torch\n'), ((5391, 5445), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (5403, 5445), False, 'import torch\n'), ((5524, 5568), 'torch.IntTensor', 'torch.IntTensor', (['[0, 0, 2, 2, 3, 4, 5, 5, 8]'], {}), '([0, 0, 2, 2, 3, 4, 5, 5, 8])\n', (5539, 5568), False, 'import torch\n'), ((2763, 2774), 'torch.testing.FileCheck', 'FileCheck', ([], {}), '()\n', (2772, 2774), False, 'from torch.testing import FileCheck\n'), ((4812, 4823), 'torch.testing.FileCheck', 'FileCheck', ([], {}), '()\n', (4821, 4823), False, 'from torch.testing import FileCheck\n'), ((7571, 7582), 'torch.testing.FileCheck', 'FileCheck', ([], {}), '()\n', (7580, 7582), False, 'from torch.testing import FileCheck\n'), ((9885, 9896), 'torch.testing.FileCheck', 'FileCheck', ([], {}), '()\n', (9894, 9896), False, 'from torch.testing import FileCheck\n'), ((11036, 11047), 'torch.testing.FileCheck', 'FileCheck', ([], {}), '()\n', (11045, 11047), False, 'from torch.testing import FileCheck\n'), ((9092, 9103), 'torch.testing.FileCheck', 'FileCheck', ([], {}), '()\n', (9101, 9103), False, 'from torch.testing import FileCheck\n'), ((3505, 3516), 'torch.testing.FileCheck', 'FileCheck', ([], {}), '()\n', (3514, 3516), False, 'from torch.testing import FileCheck\n'), ((4157, 4168), 'torch.testing.FileCheck', 'FileCheck', ([], {}), '()\n', (4166, 4168), False, 'from torch.testing import FileCheck\n'), ((8336, 8347), 'torch.testing.FileCheck', 'FileCheck', ([], {}), '()\n', (8345, 8347), False, 'from torch.testing import FileCheck\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import multiprocessing
import os
import unittest
from typing import Callable
import numpy
import torch
import torch.distributed as dist
import torchrec.distributed.comm_ops as comm_ops
from torchrec.test_utils import seed_and_log, get_free_port
class TestAllToAll(unittest.TestCase):
@seed_and_log
def setUp(self) -> None:
os.environ["MASTER_ADDR"] = str("localhost")
os.environ["MASTER_PORT"] = str(get_free_port())
os.environ["GLOO_DEVICE_TRANSPORT"] = "TCP"
os.environ["NCCL_SOCKET_IFNAME"] = "lo"
self.WORLD_SIZE = 2
def tearDown(self) -> None:
del os.environ["GLOO_DEVICE_TRANSPORT"]
del os.environ["NCCL_SOCKET_IFNAME"]
super().tearDown()
def _run_multi_process_test(
self,
world_size: int,
backend: str,
callable: Callable[[], None],
) -> None:
processes = []
ctx = multiprocessing.get_context("spawn")
for rank in range(world_size):
p = ctx.Process(
target=callable,
args=(
rank,
world_size,
backend,
),
)
p.start()
processes.append(p)
for p in processes:
p.join()
self.assertEqual(0, p.exitcode)
@classmethod
def _test_alltoallv(
cls,
rank: int,
world_size: int,
backend: str,
) -> None:
dist.init_process_group(rank=rank, world_size=world_size, backend=backend)
device = torch.device(f"cuda:{rank}")
torch.cuda.set_device(device)
B_global = 10
D0 = 8
D1 = 9
input_embedding0 = torch.rand(
(B_global, D0),
device=device,
requires_grad=True,
)
input_embedding1 = torch.rand(
(B_global, D1),
device=device,
requires_grad=True,
)
input_embeddings = [input_embedding0, input_embedding1]
out_split = [17, 17]
a2a_req = comm_ops.alltoallv(input_embeddings, out_split)
v_embs_out = a2a_req.wait()
res = torch.cat(v_embs_out, dim=1).cpu()
assert tuple(res.size()) == (5, 34)
dist.destroy_process_group()
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `torch.cuda.device_count() = 0` to decorator factory `unittest.skipIf`.
@unittest.skipIf(
torch.cuda.device_count() < 2, "Need at least two ranks to run this test"
)
def test_alltoallv(self) -> None:
self._run_multi_process_test(
world_size=self.WORLD_SIZE,
backend="nccl",
# pyre-ignore [6]
callable=self._test_alltoallv,
)
@classmethod
def _test_alltoall_sequence(
cls,
rank: int,
world_size: int,
backend: str,
) -> None:
dist.init_process_group(rank=rank, world_size=world_size, backend=backend)
device = torch.device(f"cuda:{rank}")
torch.cuda.set_device(device)
ranks = 2
tables_mp = [[0], [1, 2]]
lengths_dp = [
numpy.array([[1, 2], [1, 1], [2, 1]]),
numpy.array([[1, 2], [2, 1], [3, 1]]),
] # W, T_g, B_l
lengths_a2a = [
numpy.array([[[1, 2]], [[1, 2]]]), # Rank 0
numpy.array(
[
[[1, 1], [2, 1]], # from Rank 0
[[2, 1], [3, 1]], # from rank 1
]
), # Rank 1
] # W, W, T_l, B_l
lengths_mp = [
numpy.array(
[
[1, 2, 1, 2],
]
),
numpy.array([[1, 1, 2, 1], [2, 1, 3, 1]]),
] # w, t_l, b_g
input_seg = list(itertools.accumulate([0] + [len(i) for i in tables_mp]))
input_splits = [
[
lengths_dp[i][input_seg[j] : input_seg[j + 1], :].sum()
for j in range(ranks)
]
for i in range(ranks)
]
output_splits = [lengths_a2a[i].sum(axis=(1, 2)).tolist() for i in range(ranks)]
table_dim = 3
num_features_per_rank = [len(features) for features in tables_mp]
seq_all2all_forward_recat = []
for j in range(ranks):
for i in range(num_features_per_rank[rank]):
seq_all2all_forward_recat.append(j + i * ranks)
seq_all2all_forward_recat_tensor = torch.IntTensor(seq_all2all_forward_recat)
seq_all2all_backward_recat = []
for i in range(num_features_per_rank[rank]):
for j in range(ranks):
seq_all2all_backward_recat.append(i + j * num_features_per_rank[rank])
seq_all2all_backward_recat_tensor = torch.IntTensor(seq_all2all_backward_recat)
input_embeddings = torch.rand(
lengths_mp[rank].sum(),
table_dim,
device=device,
requires_grad=True,
)
lengths_after_sparse_data_all2all = torch.IntTensor(lengths_mp[rank])
a2a_req = comm_ops.alltoall_sequence(
a2a_sequence_embs_tensor=input_embeddings.cuda(),
forward_recat_tensor=seq_all2all_forward_recat_tensor.cuda(),
backward_recat_tensor=seq_all2all_backward_recat_tensor.cuda(),
lengths_after_sparse_data_all2all=lengths_after_sparse_data_all2all.cuda(),
input_splits=input_splits[rank],
output_splits=output_splits[rank],
)
seq_embs_out = a2a_req.wait()
seq_embs_out.backward(seq_embs_out)
grad = input_embeddings.grad
assert torch.equal(input_embeddings.cpu().detach(), grad.cpu().detach())
dist.destroy_process_group()
# pyre-fixme[56]: Pyre was not able to infer the type of argument
# `torch.cuda.device_count() = 0` to decorator factory `unittest.skipIf`.
@unittest.skipIf(
torch.cuda.device_count() < 2, "Need at least two ranks to run this test"
)
def test_alltoall_sequence(self) -> None:
self._run_multi_process_test(
world_size=self.WORLD_SIZE,
backend="nccl",
# pyre-ignore [6]
callable=self._test_alltoall_sequence,
)
| [
"torchrec.distributed.comm_ops.alltoallv",
"torchrec.test_utils.get_free_port"
] | [((1159, 1195), 'multiprocessing.get_context', 'multiprocessing.get_context', (['"""spawn"""'], {}), "('spawn')\n", (1186, 1195), False, 'import multiprocessing\n'), ((1733, 1807), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'rank': 'rank', 'world_size': 'world_size', 'backend': 'backend'}), '(rank=rank, world_size=world_size, backend=backend)\n', (1756, 1807), True, 'import torch.distributed as dist\n'), ((1825, 1853), 'torch.device', 'torch.device', (['f"""cuda:{rank}"""'], {}), "(f'cuda:{rank}')\n", (1837, 1853), False, 'import torch\n'), ((1863, 1892), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device'], {}), '(device)\n', (1884, 1892), False, 'import torch\n'), ((1974, 2035), 'torch.rand', 'torch.rand', (['(B_global, D0)'], {'device': 'device', 'requires_grad': '(True)'}), '((B_global, D0), device=device, requires_grad=True)\n', (1984, 2035), False, 'import torch\n'), ((2110, 2171), 'torch.rand', 'torch.rand', (['(B_global, D1)'], {'device': 'device', 'requires_grad': '(True)'}), '((B_global, D1), device=device, requires_grad=True)\n', (2120, 2171), False, 'import torch\n'), ((2332, 2379), 'torchrec.distributed.comm_ops.alltoallv', 'comm_ops.alltoallv', (['input_embeddings', 'out_split'], {}), '(input_embeddings, out_split)\n', (2350, 2379), True, 'import torchrec.distributed.comm_ops as comm_ops\n'), ((2517, 2545), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (2543, 2545), True, 'import torch.distributed as dist\n'), ((3186, 3260), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'rank': 'rank', 'world_size': 'world_size', 'backend': 'backend'}), '(rank=rank, world_size=world_size, backend=backend)\n', (3209, 3260), True, 'import torch.distributed as dist\n'), ((3278, 3306), 'torch.device', 'torch.device', (['f"""cuda:{rank}"""'], {}), "(f'cuda:{rank}')\n", (3290, 3306), False, 'import torch\n'), ((3315, 3344), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device'], {}), '(device)\n', (3336, 3344), False, 'import torch\n'), ((4770, 4812), 'torch.IntTensor', 'torch.IntTensor', (['seq_all2all_forward_recat'], {}), '(seq_all2all_forward_recat)\n', (4785, 4812), False, 'import torch\n'), ((5073, 5116), 'torch.IntTensor', 'torch.IntTensor', (['seq_all2all_backward_recat'], {}), '(seq_all2all_backward_recat)\n', (5088, 5116), False, 'import torch\n'), ((5328, 5361), 'torch.IntTensor', 'torch.IntTensor', (['lengths_mp[rank]'], {}), '(lengths_mp[rank])\n', (5343, 5361), False, 'import torch\n'), ((6018, 6046), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (6044, 6046), True, 'import torch.distributed as dist\n'), ((676, 691), 'torchrec.test_utils.get_free_port', 'get_free_port', ([], {}), '()\n', (689, 691), False, 'from torchrec.test_utils import seed_and_log, get_free_port\n'), ((2726, 2751), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2749, 2751), False, 'import torch\n'), ((3433, 3470), 'numpy.array', 'numpy.array', (['[[1, 2], [1, 1], [2, 1]]'], {}), '([[1, 2], [1, 1], [2, 1]])\n', (3444, 3470), False, 'import numpy\n'), ((3484, 3521), 'numpy.array', 'numpy.array', (['[[1, 2], [2, 1], [3, 1]]'], {}), '([[1, 2], [2, 1], [3, 1]])\n', (3495, 3521), False, 'import numpy\n'), ((3584, 3617), 'numpy.array', 'numpy.array', (['[[[1, 2]], [[1, 2]]]'], {}), '([[[1, 2]], [[1, 2]]])\n', (3595, 3617), False, 'import numpy\n'), ((3641, 3690), 'numpy.array', 'numpy.array', (['[[[1, 1], [2, 1]], [[2, 1], [3, 1]]]'], {}), '([[[1, 1], [2, 1]], [[2, 1], [3, 1]]])\n', (3652, 3690), False, 'import numpy\n'), ((3884, 3911), 'numpy.array', 'numpy.array', (['[[1, 2, 1, 2]]'], {}), '([[1, 2, 1, 2]])\n', (3895, 3911), False, 'import numpy\n'), ((3994, 4035), 'numpy.array', 'numpy.array', (['[[1, 1, 2, 1], [2, 1, 3, 1]]'], {}), '([[1, 1, 2, 1], [2, 1, 3, 1]])\n', (4005, 4035), False, 'import numpy\n'), ((6227, 6252), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6250, 6252), False, 'import torch\n'), ((2430, 2458), 'torch.cat', 'torch.cat', (['v_embs_out'], {'dim': '(1)'}), '(v_embs_out, dim=1)\n', (2439, 2458), False, 'import torch\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
from typing import TypeVar, Generic, List, Tuple, Optional, Dict, Any
import torch
import torch.distributed as dist
from torch import nn
from torchrec.distributed.dist_data import (
KJTAllToAll,
KJTOneToAll,
KJTAllToAllIndicesAwaitable,
)
from torchrec.distributed.embedding_types import (
GroupedEmbeddingConfig,
BaseEmbeddingLookup,
SparseFeatures,
EmbeddingComputeKernel,
ShardedEmbeddingTable,
BaseGroupedFeatureProcessor,
SparseFeaturesList,
ListOfSparseFeaturesList,
)
from torchrec.distributed.types import NoWait, Awaitable, ShardMetadata
from torchrec.modules.embedding_configs import (
PoolingType,
DataType,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable
class SparseFeaturesIndicesAwaitable(Awaitable[SparseFeatures]):
"""
Awaitable of sparse features redistributed with AlltoAll collective.
Args:
id_list_features_awaitable (Optional[Awaitable[KeyedJaggedTensor]]): awaitable
of sharded id list features.
id_score_list_features_awaitable (Optional[Awaitable[KeyedJaggedTensor]]):
awaitable of sharded id score list features.
"""
def __init__(
self,
id_list_features_awaitable: Optional[Awaitable[KeyedJaggedTensor]],
id_score_list_features_awaitable: Optional[Awaitable[KeyedJaggedTensor]],
) -> None:
super().__init__()
self._id_list_features_awaitable = id_list_features_awaitable
self._id_score_list_features_awaitable = id_score_list_features_awaitable
def _wait_impl(self) -> SparseFeatures:
"""
Syncs sparse features after AlltoAll.
Returns:
SparseFeatures: synced sparse features.
"""
return SparseFeatures(
id_list_features=self._id_list_features_awaitable.wait()
if self._id_list_features_awaitable is not None
else None,
id_score_list_features=self._id_score_list_features_awaitable.wait()
if self._id_score_list_features_awaitable is not None
else None,
)
class SparseFeaturesLengthsAwaitable(Awaitable[SparseFeaturesIndicesAwaitable]):
"""
Awaitable of sparse features indices distribution.
Args:
id_list_features_awaitable (Optional[Awaitable[KJTAllToAllIndicesAwaitable]]):
awaitable of sharded id list features indices AlltoAll. Waiting on this
value will trigger indices AlltoAll (waiting again will yield final AlltoAll
results).
id_score_list_features_awaitable
(Optional[Awaitable[KJTAllToAllIndicesAwaitable]]):
awaitable of sharded id score list features indices AlltoAll. Waiting on
this value will trigger indices AlltoAll (waiting again will yield the final
AlltoAll results).
"""
def __init__(
self,
id_list_features_awaitable: Optional[Awaitable[KJTAllToAllIndicesAwaitable]],
id_score_list_features_awaitable: Optional[
Awaitable[KJTAllToAllIndicesAwaitable]
],
) -> None:
super().__init__()
self._id_list_features_awaitable = id_list_features_awaitable
self._id_score_list_features_awaitable = id_score_list_features_awaitable
def _wait_impl(self) -> SparseFeaturesIndicesAwaitable:
"""
Gets lengths of AlltoAll results, instantiates `SparseFeaturesIndicesAwaitable` for
indices AlltoAll.
Returns:
SparseFeaturesIndicesAwaitable.
"""
return SparseFeaturesIndicesAwaitable(
id_list_features_awaitable=self._id_list_features_awaitable.wait()
if self._id_list_features_awaitable is not None
else None,
id_score_list_features_awaitable=self._id_score_list_features_awaitable.wait()
if self._id_score_list_features_awaitable is not None
else None,
)
def bucketize_kjt_before_all2all(
kjt: KeyedJaggedTensor,
num_buckets: int,
block_sizes: torch.Tensor,
output_permute: bool = False,
bucketize_pos: bool = False,
) -> Tuple[KeyedJaggedTensor, Optional[torch.Tensor]]:
"""
Bucketizes the `values` in KeyedJaggedTensor into `num_buckets` buckets,
`lengths` are readjusted based on the bucketization results.
Note: This function should be used only for row-wise sharding before calling
`SparseFeaturesAllToAll`.
Args:
num_buckets (int): number of buckets to bucketize the values into.
block_sizes: (torch.Tensor): bucket sizes for the keyed dimension.
output_permute (bool): output the memory location mapping from the unbucketized
values to bucketized values or not.
bucketize_pos (bool): output the changed position of the bucketized values or
not.
Returns:
Tuple[KeyedJaggedTensor, Optional[torch.Tensor]]: the bucketized `KeyedJaggedTensor` and the optional permute mapping from the unbucketized values to bucketized value.
"""
num_features = len(kjt.keys())
assert (
block_sizes.numel() == num_features
), f"Expecting block sizes for {num_features} features, but {block_sizes.numel()} received."
# kernel expects them to be same type, cast to avoid type mismatch
block_sizes_new_type = block_sizes.type(kjt.values().type())
(
bucketized_lengths,
bucketized_indices,
bucketized_weights,
pos,
unbucketize_permute,
) = torch.ops.fbgemm.block_bucketize_sparse_features(
kjt.lengths().view(-1),
kjt.values(),
bucketize_pos=bucketize_pos,
sequence=output_permute,
block_sizes=block_sizes_new_type,
my_size=num_buckets,
weights=kjt.weights_or_none(),
)
return (
KeyedJaggedTensor(
# duplicate keys will be resolved by AllToAll
keys=kjt.keys() * num_buckets,
values=bucketized_indices,
weights=pos if bucketize_pos else bucketized_weights,
lengths=bucketized_lengths.view(-1),
offsets=None,
stride=kjt.stride(),
length_per_key=None,
offset_per_key=None,
index_per_key=None,
),
unbucketize_permute,
)
class SparseFeaturesAllToAll(nn.Module):
"""
Redistributes sparse features to a `ProcessGroup` utilizing an AlltoAll collective.
Args:
pg (dist.ProcessGroup): process group for AlltoAll communication.
id_list_features_per_rank (List[int]): number of id list features to send to
each rank.
id_score_list_features_per_rank (List[int]): number of id score list features to
send to each rank
device (Optional[torch.device]): device on which buffers will be allocated.
stagger (int): stagger value to apply to recat tensor, see `_recat` function for
more detail.
Example::
id_list_features_per_rank = [2, 1]
id_score_list_features_per_rank = [1, 3]
sfa2a = SparseFeaturesAllToAll(
pg,
id_list_features_per_rank,
id_score_list_features_per_rank
)
awaitable = sfa2a(rank0_input: SparseFeatures)
# where:
# rank0_input.id_list_features is KeyedJaggedTensor holding
# 0 1 2
# 'A' [A.V0] None [A.V1, A.V2]
# 'B' None [B.V0] [B.V1]
# 'C' [C.V0] [C.V1] None
# rank1_input.id_list_features is KeyedJaggedTensor holding
# 0 1 2
# 'A' [A.V3] [A.V4] None
# 'B' None [B.V2] [B.V3, B.V4]
# 'C' [C.V2] [C.V3] None
# rank0_input.id_score_list_features is KeyedJaggedTensor holding
# 0 1 2
# 'A' [A.V0] None [A.V1, A.V2]
# 'B' None [B.V0] [B.V1]
# 'C' [C.V0] [C.V1] None
# 'D' None [D.V0] None
# rank1_input.id_score_list_features is KeyedJaggedTensor holding
# 0 1 2
# 'A' [A.V3] [A.V4] None
# 'B' None [B.V2] [B.V3, B.V4]
# 'C' [C.V2] [C.V3] None
# 'D' [D.V1] [D.V2] [D.V3, D.V4]
rank0_output: SparseFeatures = awaitable.wait()
# rank0_output.id_list_features is KeyedJaggedTensor holding
# 0 1 2 3 4 5
# 'A' [A.V0] None [A.V1, A.V2] [A.V3] [A.V4] None
# 'B' None [B.V0] [B.V1] None [B.V2] [B.V3, B.V4]
# rank1_output.id_list_features is KeyedJaggedTensor holding
# 0 1 2 3 4 5
# 'C' [C.V0] [C.V1] None [C.V2] [C.V3] None
# rank0_output.id_score_list_features is KeyedJaggedTensor holding
# 0 1 2 3 4 5
# 'A' [A.V0] None [A.V1, A.V2] [A.V3] [A.V4] None
# rank1_output.id_score_list_features is KeyedJaggedTensor holding
# 0 1 2 3 4 5
# 'B' None [B.V0] [B.V1] None [B.V2] [B.V3, B.V4]
# 'C' [C.V0] [C.V1] None [C.V2] [C.V3] None
# 'D None [D.V0] None [D.V1] [D.V2] [D.V3, D.V4]
"""
def __init__(
self,
# pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type.
pg: dist.ProcessGroup,
id_list_features_per_rank: List[int],
id_score_list_features_per_rank: List[int],
device: Optional[torch.device] = None,
stagger: int = 1,
) -> None:
super().__init__()
self._id_list_features_all2all: KJTAllToAll = KJTAllToAll(
pg, id_list_features_per_rank, device, stagger
)
self._id_score_list_features_all2all: KJTAllToAll = KJTAllToAll(
pg, id_score_list_features_per_rank, device, stagger
)
def forward(
self,
sparse_features: SparseFeatures,
) -> Awaitable[SparseFeaturesIndicesAwaitable]:
"""
Sends sparse features to relevant ProcessGroup ranks. Instantiates lengths
AlltoAll.
First wait will get lengths AlltoAll results, then issues indices AlltoAll.
Second wait will get indices AlltoAll results.
Args:
sparse_features (SparseFeatures): sparse features to redistribute.
Returns:
Awaitable[SparseFeatures]: awaitable of SparseFeatures.
"""
return SparseFeaturesLengthsAwaitable(
id_list_features_awaitable=self._id_list_features_all2all.forward(
sparse_features.id_list_features
)
if sparse_features.id_list_features is not None
else None,
id_score_list_features_awaitable=self._id_score_list_features_all2all.forward(
sparse_features.id_score_list_features
)
if sparse_features.id_score_list_features is not None
else None,
)
class SparseFeaturesOneToAll(nn.Module):
"""
Redistributes sparse features to all devices.
Args:
id_list_features_per_rank (List[int]): number of id list features to send to
each rank.
id_score_list_features_per_rank (List[int]): number of id score list features to
send to each rank
world_size (int): number of devices in the topology.
"""
def __init__(
self,
id_list_features_per_rank: List[int],
id_score_list_features_per_rank: List[int],
world_size: int,
) -> None:
super().__init__()
self._world_size = world_size
self._id_list_features_one2all: KJTOneToAll = KJTOneToAll(
id_list_features_per_rank,
world_size,
)
self._id_score_list_features_one2all: KJTOneToAll = KJTOneToAll(
id_score_list_features_per_rank, world_size
)
def forward(
self,
sparse_features: SparseFeatures,
) -> Awaitable[SparseFeaturesList]:
"""
Performs OnetoAll operation on sparse features.
Args:
sparse_features (SparseFeatures): sparse features to redistribute.
Returns:
Awaitable[SparseFeatures]: awaitable of SparseFeatures.
"""
return NoWait(
SparseFeaturesList(
[
SparseFeatures(
id_list_features=id_list_features,
id_score_list_features=id_score_list_features,
)
for id_list_features, id_score_list_features in zip(
self._id_list_features_one2all.forward(
sparse_features.id_list_features
).wait()
if sparse_features.id_list_features is not None
else [None] * self._world_size,
self._id_score_list_features_one2all.forward(
sparse_features.id_score_list_features
).wait()
if sparse_features.id_score_list_features is not None
else [None] * self._world_size,
)
]
)
)
# group tables by DataType, PoolingType, Weighted, and EmbeddingComputeKernel.
def group_tables(
tables_per_rank: List[List[ShardedEmbeddingTable]],
) -> Tuple[List[List[GroupedEmbeddingConfig]], List[List[GroupedEmbeddingConfig]]]:
"""
Groups tables by `DataType`, `PoolingType`, `Weighted`, and `EmbeddingComputeKernel`.
Args:
tables_per_rank (List[List[ShardedEmbeddingTable]]): list of sharding embedding
tables per rank.
Returns:
Tuple[List[List[GroupedEmbeddingConfig]], List[List[GroupedEmbeddingConfig]]]: per rank list of GroupedEmbeddingConfig for unscored and scored features.
"""
def _group_tables_per_rank(
embedding_tables: List[ShardedEmbeddingTable],
) -> Tuple[List[GroupedEmbeddingConfig], List[GroupedEmbeddingConfig]]:
grouped_embedding_configs: List[GroupedEmbeddingConfig] = []
score_grouped_embedding_configs: List[GroupedEmbeddingConfig] = []
for data_type in DataType:
for pooling in PoolingType:
for is_weighted in [True, False]:
for has_feature_processor in [True, False]:
for compute_kernel in [
EmbeddingComputeKernel.DENSE,
EmbeddingComputeKernel.SPARSE,
EmbeddingComputeKernel.BATCHED_DENSE,
EmbeddingComputeKernel.BATCHED_FUSED,
EmbeddingComputeKernel.BATCHED_QUANT,
]:
grouped_tables: List[ShardedEmbeddingTable] = []
grouped_score_tables: List[ShardedEmbeddingTable] = []
for table in embedding_tables:
if table.compute_kernel in [
EmbeddingComputeKernel.BATCHED_FUSED_UVM,
EmbeddingComputeKernel.BATCHED_FUSED_UVM_CACHING,
]:
compute_kernel_type = (
EmbeddingComputeKernel.BATCHED_FUSED
)
else:
compute_kernel_type = table.compute_kernel
if (
table.data_type == data_type
and table.pooling == pooling
and table.is_weighted == is_weighted
and table.has_feature_processor
== has_feature_processor
and compute_kernel_type == compute_kernel
):
if table.is_weighted:
grouped_score_tables.append(table)
else:
grouped_tables.append(table)
if grouped_tables:
grouped_embedding_configs.append(
GroupedEmbeddingConfig(
data_type=data_type,
pooling=pooling,
is_weighted=is_weighted,
has_feature_processor=has_feature_processor,
compute_kernel=compute_kernel,
embedding_tables=grouped_tables,
)
)
if grouped_score_tables:
score_grouped_embedding_configs.append(
GroupedEmbeddingConfig(
data_type=data_type,
pooling=pooling,
is_weighted=is_weighted,
has_feature_processor=has_feature_processor,
compute_kernel=compute_kernel,
embedding_tables=grouped_score_tables,
)
)
return grouped_embedding_configs, score_grouped_embedding_configs
grouped_embedding_configs_by_rank: List[List[GroupedEmbeddingConfig]] = []
score_grouped_embedding_configs_by_rank: List[List[GroupedEmbeddingConfig]] = []
for tables in tables_per_rank:
(
grouped_embedding_configs,
score_grouped_embedding_configs,
) = _group_tables_per_rank(tables)
grouped_embedding_configs_by_rank.append(grouped_embedding_configs)
score_grouped_embedding_configs_by_rank.append(score_grouped_embedding_configs)
return (
grouped_embedding_configs_by_rank,
score_grouped_embedding_configs_by_rank,
)
class SparseFeaturesListAwaitable(Awaitable[SparseFeaturesList]):
"""
Awaitable of SparseFeaturesList.
Args:
awaitables (List[Awaitable[SparseFeatures]]): list of `Awaitable` of sparse
features.
"""
def __init__(
self,
awaitables: List[Awaitable[SparseFeatures]],
) -> None:
super().__init__()
self.awaitables = awaitables
def _wait_impl(self) -> SparseFeaturesList:
"""
Syncs sparse features in `SparseFeaturesList`.
Returns:
SparseFeaturesList: synced `SparseFeaturesList`.
"""
return SparseFeaturesList([w.wait() for w in self.awaitables])
class SparseFeaturesListIndicesAwaitable(Awaitable[List[Awaitable[SparseFeatures]]]):
"""
Handles the first wait for a list of two-layer awaitables of `SparseFeatures`.
Wait on this module will get lengths AlltoAll results for each `SparseFeatures`, and
instantiate its indices AlltoAll.
Args:
awaitables (List[Awaitable[Awaitable[SparseFeatures]]]): list of `Awaitable` of
`Awaitable` sparse features.
"""
def __init__(
self,
awaitables: List[Awaitable[Awaitable[SparseFeatures]]],
) -> None:
super().__init__()
self.awaitables = awaitables
def _wait_impl(self) -> List[Awaitable[SparseFeatures]]:
"""
Syncs sparse features in SparseFeaturesList.
Returns:
List[Awaitable[SparseFeatures]]
"""
return [m.wait() for m in self.awaitables]
class ListOfSparseFeaturesListAwaitable(Awaitable[ListOfSparseFeaturesList]):
"""
This module handles the tables-wise sharding input features distribution for inference.
For inference, we currently do not separate lengths from indices.
Args:
awaitables (List[Awaitable[SparseFeaturesList]]): list of `Awaitable` of
`SparseFeaturesList`.
"""
def __init__(
self,
awaitables: List[Awaitable[SparseFeaturesList]],
) -> None:
super().__init__()
self.awaitables = awaitables
def _wait_impl(self) -> ListOfSparseFeaturesList:
"""
Syncs sparse features in List of SparseFeaturesList.
Returns:
ListOfSparseFeaturesList: synced `ListOfSparseFeaturesList`.
"""
return ListOfSparseFeaturesList([w.wait() for w in self.awaitables])
F = TypeVar("F", bound=Multistreamable)
T = TypeVar("T")
class BaseSparseFeaturesDist(abc.ABC, nn.Module, Generic[F]):
"""
Converts input from data-parallel to model-parallel.
"""
@abc.abstractmethod
def forward(
self,
sparse_features: SparseFeatures,
) -> Awaitable[Awaitable[F]]:
pass
class BaseEmbeddingDist(abc.ABC, nn.Module, Generic[T]):
"""
Converts output of EmbeddingLookup from model-parallel to data-parallel.
"""
@abc.abstractmethod
def forward(
self,
local_embs: T,
) -> Awaitable[torch.Tensor]:
pass
class EmbeddingSharding(abc.ABC, Generic[F, T]):
"""
Used to implement different sharding types for `EmbeddingBagCollection`, e.g.
table_wise.
"""
@abc.abstractmethod
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[F]:
pass
@abc.abstractmethod
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[T]:
pass
@abc.abstractmethod
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup[F, T]:
pass
@abc.abstractmethod
def embedding_dims(self) -> List[int]:
pass
@abc.abstractmethod
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
pass
@abc.abstractmethod
def embedding_names(self) -> List[str]:
pass
@abc.abstractmethod
def id_list_feature_names(self) -> List[str]:
pass
@abc.abstractmethod
def id_score_list_feature_names(self) -> List[str]:
pass
| [
"torchrec.distributed.embedding_types.GroupedEmbeddingConfig",
"torchrec.distributed.dist_data.KJTAllToAll",
"torchrec.distributed.dist_data.KJTOneToAll",
"torchrec.distributed.embedding_types.SparseFeatures"
] | [((21641, 21676), 'typing.TypeVar', 'TypeVar', (['"""F"""'], {'bound': 'Multistreamable'}), "('F', bound=Multistreamable)\n", (21648, 21676), False, 'from typing import TypeVar, Generic, List, Tuple, Optional, Dict, Any\n'), ((21681, 21693), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (21688, 21693), False, 'from typing import TypeVar, Generic, List, Tuple, Optional, Dict, Any\n'), ((10542, 10601), 'torchrec.distributed.dist_data.KJTAllToAll', 'KJTAllToAll', (['pg', 'id_list_features_per_rank', 'device', 'stagger'], {}), '(pg, id_list_features_per_rank, device, stagger)\n', (10553, 10601), False, 'from torchrec.distributed.dist_data import KJTAllToAll, KJTOneToAll, KJTAllToAllIndicesAwaitable\n'), ((10684, 10749), 'torchrec.distributed.dist_data.KJTAllToAll', 'KJTAllToAll', (['pg', 'id_score_list_features_per_rank', 'device', 'stagger'], {}), '(pg, id_score_list_features_per_rank, device, stagger)\n', (10695, 10749), False, 'from torchrec.distributed.dist_data import KJTAllToAll, KJTOneToAll, KJTAllToAllIndicesAwaitable\n'), ((12571, 12621), 'torchrec.distributed.dist_data.KJTOneToAll', 'KJTOneToAll', (['id_list_features_per_rank', 'world_size'], {}), '(id_list_features_per_rank, world_size)\n', (12582, 12621), False, 'from torchrec.distributed.dist_data import KJTAllToAll, KJTOneToAll, KJTAllToAllIndicesAwaitable\n'), ((12717, 12773), 'torchrec.distributed.dist_data.KJTOneToAll', 'KJTOneToAll', (['id_score_list_features_per_rank', 'world_size'], {}), '(id_score_list_features_per_rank, world_size)\n', (12728, 12773), False, 'from torchrec.distributed.dist_data import KJTAllToAll, KJTOneToAll, KJTAllToAllIndicesAwaitable\n'), ((13263, 13364), 'torchrec.distributed.embedding_types.SparseFeatures', 'SparseFeatures', ([], {'id_list_features': 'id_list_features', 'id_score_list_features': 'id_score_list_features'}), '(id_list_features=id_list_features, id_score_list_features=\n id_score_list_features)\n', (13277, 13364), False, 'from torchrec.distributed.embedding_types import GroupedEmbeddingConfig, BaseEmbeddingLookup, SparseFeatures, EmbeddingComputeKernel, ShardedEmbeddingTable, BaseGroupedFeatureProcessor, SparseFeaturesList, ListOfSparseFeaturesList\n'), ((17339, 17542), 'torchrec.distributed.embedding_types.GroupedEmbeddingConfig', 'GroupedEmbeddingConfig', ([], {'data_type': 'data_type', 'pooling': 'pooling', 'is_weighted': 'is_weighted', 'has_feature_processor': 'has_feature_processor', 'compute_kernel': 'compute_kernel', 'embedding_tables': 'grouped_tables'}), '(data_type=data_type, pooling=pooling, is_weighted=\n is_weighted, has_feature_processor=has_feature_processor,\n compute_kernel=compute_kernel, embedding_tables=grouped_tables)\n', (17361, 17542), False, 'from torchrec.distributed.embedding_types import GroupedEmbeddingConfig, BaseEmbeddingLookup, SparseFeatures, EmbeddingComputeKernel, ShardedEmbeddingTable, BaseGroupedFeatureProcessor, SparseFeaturesList, ListOfSparseFeaturesList\n'), ((18008, 18217), 'torchrec.distributed.embedding_types.GroupedEmbeddingConfig', 'GroupedEmbeddingConfig', ([], {'data_type': 'data_type', 'pooling': 'pooling', 'is_weighted': 'is_weighted', 'has_feature_processor': 'has_feature_processor', 'compute_kernel': 'compute_kernel', 'embedding_tables': 'grouped_score_tables'}), '(data_type=data_type, pooling=pooling, is_weighted=\n is_weighted, has_feature_processor=has_feature_processor,\n compute_kernel=compute_kernel, embedding_tables=grouped_score_tables)\n', (18030, 18217), False, 'from torchrec.distributed.embedding_types import GroupedEmbeddingConfig, BaseEmbeddingLookup, SparseFeatures, EmbeddingComputeKernel, ShardedEmbeddingTable, BaseGroupedFeatureProcessor, SparseFeaturesList, ListOfSparseFeaturesList\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterator, List, Optional
import torch
from pyre_extensions import none_throws
from torch.utils.data.dataset import IterableDataset
from torchrec.datasets.utils import Batch
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
class _RandomRecBatch:
generator: Optional[torch.Generator]
def __init__(
self,
keys: List[str],
batch_size: int,
hash_size: Optional[int],
hash_sizes: Optional[List[int]],
ids_per_feature: int,
num_dense: int,
manual_seed: Optional[int] = None,
) -> None:
if (hash_size is None and hash_sizes is None) or (
hash_size is not None and hash_sizes is not None
):
raise ValueError(
"One - and only one - of hash_size or hash_sizes must be set."
)
self.keys = keys
self.keys_length: int = len(keys)
self.batch_size = batch_size
self.hash_size = hash_size
self.hash_sizes = hash_sizes
self.ids_per_feature = ids_per_feature
self.num_dense = num_dense
if manual_seed is not None:
self.generator = torch.Generator()
# pyre-ignore[16]
self.generator.manual_seed(manual_seed)
else:
self.generator = None
self.iter_num = 0
self._num_ids_in_batch: int = (
self.ids_per_feature * self.keys_length * self.batch_size
)
self.max_values: Optional[torch.Tensor] = None
if hash_sizes is not None:
self.max_values: torch.Tensor = torch.tensor(
[
hash_size
for hash_size in hash_sizes
for b in range(batch_size)
for i in range(ids_per_feature)
]
)
self._generated_batches: List[Batch] = [self._generate_batch()] * 10
self.batch_index = 0
def __iter__(self) -> "_RandomRecBatch":
return self
def __next__(self) -> Batch:
batch = self._generated_batches[self.batch_index % len(self._generated_batches)]
self.batch_index += 1
return batch
def _generate_batch(self) -> Batch:
if self.hash_sizes is None:
# pyre-ignore[28]
values = torch.randint(
high=self.hash_size,
size=(self._num_ids_in_batch,),
generator=self.generator,
)
else:
values = (
torch.rand(
self._num_ids_in_batch,
generator=self.generator,
)
* none_throws(self.max_values)
).type(torch.LongTensor)
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=self.keys,
values=values,
offsets=torch.tensor(
list(
range(
0,
self._num_ids_in_batch + 1,
self.ids_per_feature,
)
),
dtype=torch.int32,
),
)
dense_features = torch.randn(
self.batch_size,
self.num_dense,
generator=self.generator,
)
# pyre-ignore[28]
labels = torch.randint(
low=0,
high=2,
size=(self.batch_size,),
generator=self.generator,
)
batch = Batch(
dense_features=dense_features,
sparse_features=sparse_features,
labels=labels,
)
return batch
class RandomRecDataset(IterableDataset[Batch]):
"""
Random iterable dataset used to generate batches for recommender systems
(RecSys). Currently produces unweighted sparse features only. TODO: Add
weighted sparse features.
Args:
keys (List[str]): List of feature names for sparse features.
batch_size (int): batch size.
hash_size (Optional[int]): Max sparse id value. All sparse IDs will be taken
modulo this value.
hash_sizes (Optional[List[int]]): Max sparse id value per feature in keys. Each
sparse ID will be taken modulo the corresponding value from this argument.
ids_per_feature (int): Number of IDs per sparse feature.
num_dense (int): Number of dense features.
manual_seed (int): Seed for deterministic behavior.
Example:
>>> dataset = RandomRecDataset(
>>> keys=["feat1", "feat2"],
>>> batch_size=16,
>>> hash_size=100_000,
>>> ids_per_feature=1,
>>> num_dense=13,
>>> ),
>>> example = next(iter(dataset))
"""
def __init__(
self,
keys: List[str],
batch_size: int,
hash_size: Optional[int] = 100,
hash_sizes: Optional[List[int]] = None,
ids_per_feature: int = 2,
num_dense: int = 50,
manual_seed: Optional[int] = None,
) -> None:
super().__init__()
self.batch_generator = _RandomRecBatch(
keys=keys,
batch_size=batch_size,
hash_size=hash_size,
hash_sizes=hash_sizes,
ids_per_feature=ids_per_feature,
num_dense=num_dense,
manual_seed=manual_seed,
)
def __iter__(self) -> Iterator[Batch]:
return iter(self.batch_generator)
| [
"torchrec.datasets.utils.Batch"
] | [((3408, 3478), 'torch.randn', 'torch.randn', (['self.batch_size', 'self.num_dense'], {'generator': 'self.generator'}), '(self.batch_size, self.num_dense, generator=self.generator)\n', (3419, 3478), False, 'import torch\n'), ((3569, 3648), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(2)', 'size': '(self.batch_size,)', 'generator': 'self.generator'}), '(low=0, high=2, size=(self.batch_size,), generator=self.generator)\n', (3582, 3648), False, 'import torch\n'), ((3725, 3813), 'torchrec.datasets.utils.Batch', 'Batch', ([], {'dense_features': 'dense_features', 'sparse_features': 'sparse_features', 'labels': 'labels'}), '(dense_features=dense_features, sparse_features=sparse_features,\n labels=labels)\n', (3730, 3813), False, 'from torchrec.datasets.utils import Batch\n'), ((1398, 1415), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (1413, 1415), False, 'import torch\n'), ((2542, 2638), 'torch.randint', 'torch.randint', ([], {'high': 'self.hash_size', 'size': '(self._num_ids_in_batch,)', 'generator': 'self.generator'}), '(high=self.hash_size, size=(self._num_ids_in_batch,),\n generator=self.generator)\n', (2555, 2638), False, 'import torch\n'), ((2751, 2811), 'torch.rand', 'torch.rand', (['self._num_ids_in_batch'], {'generator': 'self.generator'}), '(self._num_ids_in_batch, generator=self.generator)\n', (2761, 2811), False, 'import torch\n'), ((2889, 2917), 'pyre_extensions.none_throws', 'none_throws', (['self.max_values'], {}), '(self.max_values)\n', (2900, 2917), False, 'from pyre_extensions import none_throws\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import os
import random
import tempfile
from typing import Optional, List, Any, Dict
import numpy as np
from torch.utils.data import DataLoader
from torchrec.datasets.criteo import (
BinaryCriteoUtils,
InMemoryBinaryCriteoIterDataPipe,
INT_FEATURE_COUNT,
CAT_FEATURE_COUNT,
)
from torchrec.datasets.criteo import criteo_kaggle, criteo_terabyte
from torchrec.datasets.tests.criteo_test_utils import CriteoTest
from torchrec.datasets.utils import Batch
class CriteoTerabyteTest(CriteoTest):
def test_single_file(self) -> None:
with self._create_dataset_tsv() as dataset_pathname:
dataset = criteo_terabyte((dataset_pathname,))
for sample in dataset:
self._validate_sample(sample)
self.assertEqual(len(list(iter(dataset))), 10)
def test_multiple_files(self) -> None:
with contextlib.ExitStack() as stack:
pathnames = [
stack.enter_context(self._create_dataset_tsv()) for _ in range(3)
]
dataset = criteo_terabyte(pathnames)
for sample in dataset:
self._validate_sample(sample)
self.assertEqual(len(list(iter(dataset))), 30)
class CriteoKaggleTest(CriteoTest):
def test_train_file(self) -> None:
with self._create_dataset_tsv() as path:
dataset = criteo_kaggle(path)
for sample in dataset:
self._validate_sample(sample)
self.assertEqual(len(list(iter(dataset))), 10)
def test_test_file(self) -> None:
with self._create_dataset_tsv(train=False) as path:
dataset = criteo_kaggle(path)
for sample in dataset:
self._validate_sample(sample, train=False)
self.assertEqual(len(list(iter(dataset))), 10)
class CriteoDataLoaderTest(CriteoTest):
def _validate_dataloader_sample(
self,
sample: Dict[str, List[Any]], # pyre-ignore[2]
batch_size: int,
train: bool = True,
) -> None:
unbatched_samples = [{} for _ in range(self._sample_len(sample))]
for k, batched_values in sample.items():
for (idx, value) in enumerate(batched_values):
unbatched_samples[idx][k] = value
for sample in unbatched_samples:
self._validate_sample(sample, train=train)
def _sample_len(
self,
sample: Dict[str, List[Any]], # pyre-ignore[2]
) -> int:
return len(next(iter(sample.values())))
def _test_dataloader(
self,
num_workers: int = 0,
batch_size: int = 1,
num_tsvs: int = 1,
num_rows_per_tsv: int = 10,
train: bool = True,
) -> None:
with contextlib.ExitStack() as stack:
pathnames = [
stack.enter_context(
self._create_dataset_tsv(num_rows=num_rows_per_tsv, train=train)
)
for _ in range(num_tsvs)
]
dataset = criteo_terabyte(pathnames)
dataloader = DataLoader(
dataset, batch_size=batch_size, num_workers=num_workers
)
total_len = 0
for sample in dataloader:
sample_len = self._sample_len(sample)
total_len += sample_len
self._validate_dataloader_sample(
sample, batch_size=batch_size, train=train
)
self.assertEqual(total_len, len(list(iter(dataset))))
def test_multiple_train_workers(self) -> None:
self._test_dataloader(
num_workers=4, batch_size=16, num_tsvs=5, num_rows_per_tsv=32
)
def test_fewer_tsvs_than_workers(self) -> None:
self._test_dataloader(
num_workers=2, batch_size=16, num_tsvs=1, num_rows_per_tsv=16
)
def test_single_worker(self) -> None:
self._test_dataloader(batch_size=16, num_tsvs=2, num_rows_per_tsv=16)
class TestBinaryCriteoUtils(CriteoTest):
def test_tsv_to_npys(self) -> None:
num_rows = 10
with self._create_dataset_tsv(num_rows=num_rows) as in_file:
out_files = [tempfile.NamedTemporaryFile(delete=False) for _ in range(3)]
for out_file in out_files:
out_file.close()
BinaryCriteoUtils.tsv_to_npys(
in_file, out_files[0].name, out_files[1].name, out_files[2].name
)
dense = np.load(out_files[0].name)
sparse = np.load(out_files[1].name)
labels = np.load(out_files[2].name)
self.assertEqual(dense.shape, (num_rows, INT_FEATURE_COUNT))
self.assertEqual(dense.dtype, np.float32)
self.assertEqual(sparse.shape, (num_rows, CAT_FEATURE_COUNT))
self.assertEqual(sparse.dtype, np.int32)
self.assertEqual(labels.shape, (num_rows, 1))
self.assertEqual(labels.dtype, np.int32)
for out_file in out_files:
os.remove(out_file.name)
def test_get_shape_from_npy(self) -> None:
num_rows = 10
with self._create_dataset_npys(num_rows=num_rows) as (
dense_path,
sparse_path,
labels_path,
):
dense_shape = BinaryCriteoUtils.get_shape_from_npy(dense_path)
sparse_shape = BinaryCriteoUtils.get_shape_from_npy(sparse_path)
labels_shape = BinaryCriteoUtils.get_shape_from_npy(labels_path)
self.assertEqual(dense_shape, (num_rows, INT_FEATURE_COUNT))
self.assertEqual(sparse_shape, (num_rows, CAT_FEATURE_COUNT))
self.assertEqual(labels_shape, (num_rows, 1))
def test_get_file_idx_to_row_range(self) -> None:
lengths = [14, 17, 20]
world_size = 3
expected = [{0: (0, 13), 1: (0, 2)}, {1: (3, 16), 2: (0, 2)}, {2: (3, 19)}]
for i in range(world_size):
self.assertEqual(
expected[i],
BinaryCriteoUtils.get_file_idx_to_row_range(
lengths=lengths,
rank=i,
world_size=world_size,
),
)
def test_load_npy_range(self) -> None:
num_rows = 10
start_row = 2
num_rows_to_select = 4
with self._create_dataset_npys(
num_rows=num_rows, generate_sparse=False, generate_labels=False
) as (dense_path,):
full = np.load(dense_path)
partial = BinaryCriteoUtils.load_npy_range(
dense_path, start_row=start_row, num_rows=num_rows_to_select
)
np.testing.assert_array_equal(
full[start_row : start_row + num_rows_to_select], partial
)
class TestInMemoryBinaryCriteoIterDataPipe(CriteoTest):
def _validate_batch(
self, batch: Batch, batch_size: int, hashes: Optional[List[int]] = None
) -> None:
self.assertEqual(
tuple(batch.dense_features.size()), (batch_size, INT_FEATURE_COUNT)
)
self.assertEqual(
tuple(batch.sparse_features.values().size()),
(batch_size * CAT_FEATURE_COUNT,),
)
self.assertEqual(tuple(batch.labels.size()), (batch_size,))
if hashes is not None:
hashes_np = np.array(hashes).reshape((CAT_FEATURE_COUNT, 1))
self.assertTrue(
np.all(
batch.sparse_features.values().reshape(
(CAT_FEATURE_COUNT, batch_size)
)
< hashes_np
)
)
def _test_dataset(
self, rows_per_file: List[int], batch_size: int, world_size: int
) -> None:
with contextlib.ExitStack() as stack:
files = [
stack.enter_context(self._create_dataset_npys(num_rows=num_rows))
for num_rows in rows_per_file
]
hashes = [i + 1 for i in range(CAT_FEATURE_COUNT)]
lens = []
for rank in range(world_size):
datapipe = InMemoryBinaryCriteoIterDataPipe(
dense_paths=[f[0] for f in files],
sparse_paths=[f[1] for f in files],
labels_paths=[f[2] for f in files],
batch_size=batch_size,
rank=rank,
world_size=world_size,
hashes=hashes,
)
datapipe_len = len(datapipe)
len_ = 0
for x in datapipe:
self._validate_batch(x, batch_size=batch_size)
len_ += 1
# Check that dataset __len__ matches true length.
self.assertEqual(datapipe_len, len_)
lens.append(len_)
# Ensure all ranks' datapipes return the same number of batches.
self.assertEqual(len(set(lens)), 1)
def test_dataset_small_files(self) -> None:
self._test_dataset([1] * 20, 4, 2)
def test_dataset_random_sized_files(self) -> None:
random.seed(0)
self._test_dataset([random.randint(1, 100) for _ in range(100)], 16, 3)
| [
"torchrec.datasets.criteo.InMemoryBinaryCriteoIterDataPipe",
"torchrec.datasets.criteo.criteo_terabyte",
"torchrec.datasets.criteo.BinaryCriteoUtils.get_shape_from_npy",
"torchrec.datasets.criteo.BinaryCriteoUtils.load_npy_range",
"torchrec.datasets.criteo.BinaryCriteoUtils.tsv_to_npys",
"torchrec.dataset... | [((9349, 9363), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (9360, 9363), False, 'import random\n'), ((879, 915), 'torchrec.datasets.criteo.criteo_terabyte', 'criteo_terabyte', (['(dataset_pathname,)'], {}), '((dataset_pathname,))\n', (894, 915), False, 'from torchrec.datasets.criteo import criteo_kaggle, criteo_terabyte\n'), ((1113, 1135), 'contextlib.ExitStack', 'contextlib.ExitStack', ([], {}), '()\n', (1133, 1135), False, 'import contextlib\n'), ((1290, 1316), 'torchrec.datasets.criteo.criteo_terabyte', 'criteo_terabyte', (['pathnames'], {}), '(pathnames)\n', (1305, 1316), False, 'from torchrec.datasets.criteo import criteo_kaggle, criteo_terabyte\n'), ((1605, 1624), 'torchrec.datasets.criteo.criteo_kaggle', 'criteo_kaggle', (['path'], {}), '(path)\n', (1618, 1624), False, 'from torchrec.datasets.criteo import criteo_kaggle, criteo_terabyte\n'), ((1886, 1905), 'torchrec.datasets.criteo.criteo_kaggle', 'criteo_kaggle', (['path'], {}), '(path)\n', (1899, 1905), False, 'from torchrec.datasets.criteo import criteo_kaggle, criteo_terabyte\n'), ((2977, 2999), 'contextlib.ExitStack', 'contextlib.ExitStack', ([], {}), '()\n', (2997, 2999), False, 'import contextlib\n'), ((3253, 3279), 'torchrec.datasets.criteo.criteo_terabyte', 'criteo_terabyte', (['pathnames'], {}), '(pathnames)\n', (3268, 3279), False, 'from torchrec.datasets.criteo import criteo_kaggle, criteo_terabyte\n'), ((3305, 3372), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, num_workers=num_workers)\n', (3315, 3372), False, 'from torch.utils.data import DataLoader\n'), ((4559, 4658), 'torchrec.datasets.criteo.BinaryCriteoUtils.tsv_to_npys', 'BinaryCriteoUtils.tsv_to_npys', (['in_file', 'out_files[0].name', 'out_files[1].name', 'out_files[2].name'], {}), '(in_file, out_files[0].name, out_files[1].name,\n out_files[2].name)\n', (4588, 4658), False, 'from torchrec.datasets.criteo import BinaryCriteoUtils, InMemoryBinaryCriteoIterDataPipe, INT_FEATURE_COUNT, CAT_FEATURE_COUNT\n'), ((4706, 4732), 'numpy.load', 'np.load', (['out_files[0].name'], {}), '(out_files[0].name)\n', (4713, 4732), True, 'import numpy as np\n'), ((4754, 4780), 'numpy.load', 'np.load', (['out_files[1].name'], {}), '(out_files[1].name)\n', (4761, 4780), True, 'import numpy as np\n'), ((4802, 4828), 'numpy.load', 'np.load', (['out_files[2].name'], {}), '(out_files[2].name)\n', (4809, 4828), True, 'import numpy as np\n'), ((5520, 5568), 'torchrec.datasets.criteo.BinaryCriteoUtils.get_shape_from_npy', 'BinaryCriteoUtils.get_shape_from_npy', (['dense_path'], {}), '(dense_path)\n', (5556, 5568), False, 'from torchrec.datasets.criteo import BinaryCriteoUtils, InMemoryBinaryCriteoIterDataPipe, INT_FEATURE_COUNT, CAT_FEATURE_COUNT\n'), ((5596, 5645), 'torchrec.datasets.criteo.BinaryCriteoUtils.get_shape_from_npy', 'BinaryCriteoUtils.get_shape_from_npy', (['sparse_path'], {}), '(sparse_path)\n', (5632, 5645), False, 'from torchrec.datasets.criteo import BinaryCriteoUtils, InMemoryBinaryCriteoIterDataPipe, INT_FEATURE_COUNT, CAT_FEATURE_COUNT\n'), ((5673, 5722), 'torchrec.datasets.criteo.BinaryCriteoUtils.get_shape_from_npy', 'BinaryCriteoUtils.get_shape_from_npy', (['labels_path'], {}), '(labels_path)\n', (5709, 5722), False, 'from torchrec.datasets.criteo import BinaryCriteoUtils, InMemoryBinaryCriteoIterDataPipe, INT_FEATURE_COUNT, CAT_FEATURE_COUNT\n'), ((6701, 6720), 'numpy.load', 'np.load', (['dense_path'], {}), '(dense_path)\n', (6708, 6720), True, 'import numpy as np\n'), ((6743, 6842), 'torchrec.datasets.criteo.BinaryCriteoUtils.load_npy_range', 'BinaryCriteoUtils.load_npy_range', (['dense_path'], {'start_row': 'start_row', 'num_rows': 'num_rows_to_select'}), '(dense_path, start_row=start_row, num_rows=\n num_rows_to_select)\n', (6775, 6842), False, 'from torchrec.datasets.criteo import BinaryCriteoUtils, InMemoryBinaryCriteoIterDataPipe, INT_FEATURE_COUNT, CAT_FEATURE_COUNT\n'), ((6880, 6971), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['full[start_row:start_row + num_rows_to_select]', 'partial'], {}), '(full[start_row:start_row + num_rows_to_select\n ], partial)\n', (6909, 6971), True, 'import numpy as np\n'), ((7986, 8008), 'contextlib.ExitStack', 'contextlib.ExitStack', ([], {}), '()\n', (8006, 8008), False, 'import contextlib\n'), ((4413, 4454), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (4440, 4454), False, 'import tempfile\n'), ((5251, 5275), 'os.remove', 'os.remove', (['out_file.name'], {}), '(out_file.name)\n', (5260, 5275), False, 'import os\n'), ((6233, 6328), 'torchrec.datasets.criteo.BinaryCriteoUtils.get_file_idx_to_row_range', 'BinaryCriteoUtils.get_file_idx_to_row_range', ([], {'lengths': 'lengths', 'rank': 'i', 'world_size': 'world_size'}), '(lengths=lengths, rank=i,\n world_size=world_size)\n', (6276, 6328), False, 'from torchrec.datasets.criteo import BinaryCriteoUtils, InMemoryBinaryCriteoIterDataPipe, INT_FEATURE_COUNT, CAT_FEATURE_COUNT\n'), ((8339, 8558), 'torchrec.datasets.criteo.InMemoryBinaryCriteoIterDataPipe', 'InMemoryBinaryCriteoIterDataPipe', ([], {'dense_paths': '[f[0] for f in files]', 'sparse_paths': '[f[1] for f in files]', 'labels_paths': '[f[2] for f in files]', 'batch_size': 'batch_size', 'rank': 'rank', 'world_size': 'world_size', 'hashes': 'hashes'}), '(dense_paths=[f[0] for f in files],\n sparse_paths=[f[1] for f in files], labels_paths=[f[2] for f in files],\n batch_size=batch_size, rank=rank, world_size=world_size, hashes=hashes)\n', (8371, 8558), False, 'from torchrec.datasets.criteo import BinaryCriteoUtils, InMemoryBinaryCriteoIterDataPipe, INT_FEATURE_COUNT, CAT_FEATURE_COUNT\n'), ((9392, 9414), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (9406, 9414), False, 'import random\n'), ((7557, 7573), 'numpy.array', 'np.array', (['hashes'], {}), '(hashes)\n', (7565, 7573), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Callable, List, Union
import hypothesis.strategies as st
import torch
from hypothesis import given, settings
from torch import nn
from torchrec.fx import symbolic_trace
from torchrec.modules.mlp import MLP, Perceptron
class TestMLP(unittest.TestCase):
# pyre-ignore[56]: Pyre was not able to infer the type of argument
# to decorator factory `hypothesis.given`.
@given(
has_bias=st.booleans(),
activation=st.sampled_from(
[
torch.relu,
torch.tanh,
torch.sigmoid,
nn.SiLU(),
]
),
)
@settings(deadline=None)
def test_perceptron_single_channel(
self,
has_bias: bool,
activation: Union[
torch.nn.Module,
Callable[[torch.Tensor], torch.Tensor],
],
) -> None:
batch_size = 3
input_dims: List[int] = [40, 30, 20, 10]
input_tensors: List[torch.Tensor] = [
torch.randn(batch_size, input_dims[0]), # Task 1
torch.randn(batch_size, input_dims[1]), # Task 2
torch.randn(batch_size, input_dims[2]), # Task 3
torch.randn(batch_size, input_dims[3]), # Task 4
]
perceptron_layer_size = 16
num_tasks = 4
perceptron_for_tasks = [
Perceptron(
input_dims[i],
perceptron_layer_size,
bias=has_bias,
activation=activation,
)
for i in range(num_tasks)
]
# Dry-run with input of a different batch size
dry_run_batch_size = 1
assert dry_run_batch_size != batch_size
for i in range(num_tasks):
perceptron_for_tasks[i](
torch.randn(dry_run_batch_size, input_tensors[i].shape[-1])
)
output_tensors = []
expected_output_tensors = []
for i in range(len(input_tensors)):
output_tensors.append(perceptron_for_tasks[i](input_tensors[i]))
expected_output_tensors.append(
perceptron_for_tasks[i]._activation_fn(
perceptron_for_tasks[i]._linear(input_tensors[i])
)
)
for i in range(len(output_tensors)):
self.assertEqual(
list(output_tensors[i].shape), [batch_size, perceptron_layer_size]
)
self.assertTrue(
torch.allclose(output_tensors[i], expected_output_tensors[i])
)
def test_fx_script_Perceptron(self) -> None:
batch_size = 1
in_features = 3
out_features = 5
m = Perceptron(in_features, out_features)
# Dry-run to initialize lazy module.
m(torch.randn(batch_size, in_features))
gm = symbolic_trace(m)
torch.jit.script(gm)
def test_fx_script_MLP(self) -> None:
in_features = 3
layer_sizes = [16, 8, 4]
m = MLP(in_features, layer_sizes)
gm = symbolic_trace(m)
torch.jit.script(gm)
| [
"torchrec.modules.mlp.MLP",
"torchrec.modules.mlp.Perceptron",
"torchrec.fx.symbolic_trace"
] | [((884, 907), 'hypothesis.settings', 'settings', ([], {'deadline': 'None'}), '(deadline=None)\n', (892, 907), False, 'from hypothesis import given, settings\n'), ((2929, 2966), 'torchrec.modules.mlp.Perceptron', 'Perceptron', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (2939, 2966), False, 'from torchrec.modules.mlp import MLP, Perceptron\n'), ((3075, 3092), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['m'], {}), '(m)\n', (3089, 3092), False, 'from torchrec.fx import symbolic_trace\n'), ((3101, 3121), 'torch.jit.script', 'torch.jit.script', (['gm'], {}), '(gm)\n', (3117, 3121), False, 'import torch\n'), ((3234, 3263), 'torchrec.modules.mlp.MLP', 'MLP', (['in_features', 'layer_sizes'], {}), '(in_features, layer_sizes)\n', (3237, 3263), False, 'from torchrec.modules.mlp import MLP, Perceptron\n'), ((3278, 3295), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['m'], {}), '(m)\n', (3292, 3295), False, 'from torchrec.fx import symbolic_trace\n'), ((3304, 3324), 'torch.jit.script', 'torch.jit.script', (['gm'], {}), '(gm)\n', (3320, 3324), False, 'import torch\n'), ((1251, 1289), 'torch.randn', 'torch.randn', (['batch_size', 'input_dims[0]'], {}), '(batch_size, input_dims[0])\n', (1262, 1289), False, 'import torch\n'), ((1313, 1351), 'torch.randn', 'torch.randn', (['batch_size', 'input_dims[1]'], {}), '(batch_size, input_dims[1])\n', (1324, 1351), False, 'import torch\n'), ((1375, 1413), 'torch.randn', 'torch.randn', (['batch_size', 'input_dims[2]'], {}), '(batch_size, input_dims[2])\n', (1386, 1413), False, 'import torch\n'), ((1437, 1475), 'torch.randn', 'torch.randn', (['batch_size', 'input_dims[3]'], {}), '(batch_size, input_dims[3])\n', (1448, 1475), False, 'import torch\n'), ((1601, 1692), 'torchrec.modules.mlp.Perceptron', 'Perceptron', (['input_dims[i]', 'perceptron_layer_size'], {'bias': 'has_bias', 'activation': 'activation'}), '(input_dims[i], perceptron_layer_size, bias=has_bias, activation=\n activation)\n', (1611, 1692), False, 'from torchrec.modules.mlp import MLP, Perceptron\n'), ((669, 682), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (680, 682), True, 'import hypothesis.strategies as st\n'), ((3023, 3059), 'torch.randn', 'torch.randn', (['batch_size', 'in_features'], {}), '(batch_size, in_features)\n', (3034, 3059), False, 'import torch\n'), ((2038, 2097), 'torch.randn', 'torch.randn', (['dry_run_batch_size', 'input_tensors[i].shape[-1]'], {}), '(dry_run_batch_size, input_tensors[i].shape[-1])\n', (2049, 2097), False, 'import torch\n'), ((2719, 2780), 'torch.allclose', 'torch.allclose', (['output_tensors[i]', 'expected_output_tensors[i]'], {}), '(output_tensors[i], expected_output_tensors[i])\n', (2733, 2780), False, 'import torch\n'), ((837, 846), 'torch.nn.SiLU', 'nn.SiLU', ([], {}), '()\n', (844, 846), False, 'from torch import nn\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, List, Optional, Any, Dict, Tuple, TypeVar
import torch
import torch.distributed as dist
from torchrec.distributed.dist_data import (
EmbeddingsAllToOne,
PooledEmbeddingsAllToAll,
)
from torchrec.distributed.embedding_lookup import (
GroupedPooledEmbeddingsLookup,
InferGroupedPooledEmbeddingsLookup,
)
from torchrec.distributed.embedding_sharding import (
EmbeddingSharding,
SparseFeaturesAllToAll,
SparseFeaturesOneToAll,
group_tables,
BaseEmbeddingDist,
BaseSparseFeaturesDist,
BaseEmbeddingLookup,
)
from torchrec.distributed.embedding_types import (
SparseFeaturesList,
GroupedEmbeddingConfig,
SparseFeatures,
ShardedEmbeddingTable,
EmbeddingComputeKernel,
BaseGroupedFeatureProcessor,
)
from torchrec.distributed.types import (
ShardingEnv,
ShardedTensorMetadata,
Awaitable,
NoWait,
ParameterSharding,
ShardMetadata,
)
from torchrec.modules.embedding_configs import EmbeddingTableConfig
from torchrec.streamable import Multistreamable
F = TypeVar("F", bound=Multistreamable)
T = TypeVar("T")
class BaseTwEmbeddingSharding(EmbeddingSharding[F, T]):
"""
base class for table-wise sharding
"""
def __init__(
self,
embedding_configs: List[
Tuple[EmbeddingTableConfig, ParameterSharding, torch.Tensor]
],
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> None:
super().__init__()
self._env = env
self._device = device
# pyre-ignore[11]
self._pg: Optional[dist.ProcessGroup] = self._env.process_group
self._world_size: int = self._env.world_size
self._rank: int = self._env.rank
sharded_tables_per_rank = self._shard(embedding_configs)
self._grouped_embedding_configs_per_rank: List[
List[GroupedEmbeddingConfig]
] = []
self._score_grouped_embedding_configs_per_rank: List[
List[GroupedEmbeddingConfig]
] = []
(
self._grouped_embedding_configs_per_rank,
self._score_grouped_embedding_configs_per_rank,
) = group_tables(sharded_tables_per_rank)
self._grouped_embedding_configs: List[
GroupedEmbeddingConfig
] = self._grouped_embedding_configs_per_rank[self._rank]
self._score_grouped_embedding_configs: List[
GroupedEmbeddingConfig
] = self._score_grouped_embedding_configs_per_rank[self._rank]
def _shard(
self,
embedding_configs: List[
Tuple[EmbeddingTableConfig, ParameterSharding, torch.Tensor]
],
) -> List[List[ShardedEmbeddingTable]]:
world_size = self._world_size
tables_per_rank: List[List[ShardedEmbeddingTable]] = [
[] for i in range(world_size)
]
for config in embedding_configs:
# pyre-fixme [16]
shards = config[1].sharding_spec.shards
# construct the global sharded_tensor_metadata
global_metadata = ShardedTensorMetadata(
shards_metadata=shards,
size=torch.Size([config[0].num_embeddings, config[0].embedding_dim]),
)
# pyre-fixme [16]
tables_per_rank[config[1].ranks[0]].append(
ShardedEmbeddingTable(
num_embeddings=config[0].num_embeddings,
embedding_dim=config[0].embedding_dim,
name=config[0].name,
embedding_names=config[0].embedding_names,
data_type=config[0].data_type,
feature_names=config[0].feature_names,
pooling=config[0].pooling,
is_weighted=config[0].is_weighted,
has_feature_processor=config[0].has_feature_processor,
local_rows=config[0].num_embeddings,
local_cols=config[0].embedding_dim,
compute_kernel=EmbeddingComputeKernel(config[1].compute_kernel),
local_metadata=shards[0],
global_metadata=global_metadata,
weight_init_max=config[0].weight_init_max,
weight_init_min=config[0].weight_init_min,
)
)
return tables_per_rank
def _dim_sum_per_rank(self) -> List[int]:
dim_sum_per_rank = []
for grouped_embedding_configs, score_grouped_embedding_configs in zip(
self._grouped_embedding_configs_per_rank,
self._score_grouped_embedding_configs_per_rank,
):
dim_sum = 0
for grouped_config in grouped_embedding_configs:
dim_sum += grouped_config.dim_sum()
for grouped_config in score_grouped_embedding_configs:
dim_sum += grouped_config.dim_sum()
dim_sum_per_rank.append(dim_sum)
return dim_sum_per_rank
def embedding_dims(self) -> List[int]:
embedding_dims = []
for grouped_embedding_configs, score_grouped_embedding_configs in zip(
self._grouped_embedding_configs_per_rank,
self._score_grouped_embedding_configs_per_rank,
):
for grouped_config in grouped_embedding_configs:
embedding_dims.extend(grouped_config.embedding_dims())
for grouped_config in score_grouped_embedding_configs:
embedding_dims.extend(grouped_config.embedding_dims())
return embedding_dims
def embedding_names(self) -> List[str]:
embedding_names = []
for grouped_embedding_configs, score_grouped_embedding_configs in zip(
self._grouped_embedding_configs_per_rank,
self._score_grouped_embedding_configs_per_rank,
):
for grouped_config in grouped_embedding_configs:
embedding_names.extend(grouped_config.embedding_names())
for grouped_config in score_grouped_embedding_configs:
embedding_names.extend(grouped_config.embedding_names())
return embedding_names
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
embedding_shard_metadata = []
for grouped_embedding_configs, score_grouped_embedding_configs in zip(
self._grouped_embedding_configs_per_rank,
self._score_grouped_embedding_configs_per_rank,
):
for grouped_config in grouped_embedding_configs:
embedding_shard_metadata.extend(
grouped_config.embedding_shard_metadata()
)
for grouped_config in score_grouped_embedding_configs:
embedding_shard_metadata.extend(
grouped_config.embedding_shard_metadata()
)
return embedding_shard_metadata
def id_list_feature_names(self) -> List[str]:
id_list_feature_names = []
for grouped_embedding_configs in self._grouped_embedding_configs_per_rank:
for grouped_config in grouped_embedding_configs:
id_list_feature_names.extend(grouped_config.feature_names())
return id_list_feature_names
def id_score_list_feature_names(self) -> List[str]:
id_score_list_feature_names = []
for (
score_grouped_embedding_configs
) in self._score_grouped_embedding_configs_per_rank:
for grouped_config in score_grouped_embedding_configs:
id_score_list_feature_names.extend(grouped_config.feature_names())
return id_score_list_feature_names
def _id_list_features_per_rank(self) -> List[int]:
id_list_features_per_rank = []
for grouped_embedding_configs in self._grouped_embedding_configs_per_rank:
num_features = 0
for grouped_config in grouped_embedding_configs:
num_features += grouped_config.num_features()
id_list_features_per_rank.append(num_features)
return id_list_features_per_rank
def _id_score_list_features_per_rank(self) -> List[int]:
id_score_list_features_per_rank = []
for (
score_grouped_embedding_configs
) in self._score_grouped_embedding_configs_per_rank:
num_features = 0
for grouped_config in score_grouped_embedding_configs:
num_features += grouped_config.num_features()
id_score_list_features_per_rank.append(num_features)
return id_score_list_features_per_rank
class TwSparseFeaturesDist(BaseSparseFeaturesDist[SparseFeatures]):
"""
Redistributes sparse features in TW fashion with an AlltoAll collective
operation.
Constructor Args:
pg (dist.ProcessGroup): ProcessGroup for AlltoAll communication.
id_list_features_per_rank (List[int]): number of id list features to send to
each rank.
id_score_list_features_per_rank (List[int]): number of id score list features to
send to each rank
device (Optional[torch.device]): device on which buffers will be allocated.
"""
def __init__(
self,
pg: dist.ProcessGroup,
id_list_features_per_rank: List[int],
id_score_list_features_per_rank: List[int],
device: Optional[torch.device] = None,
) -> None:
super().__init__()
self._dist = SparseFeaturesAllToAll(
pg=pg,
id_list_features_per_rank=id_list_features_per_rank,
id_score_list_features_per_rank=id_score_list_features_per_rank,
device=device,
)
def forward(
self,
sparse_features: SparseFeatures,
) -> Awaitable[Awaitable[SparseFeatures]]:
"""
Performs AlltoAll operation on sparse features.
Call Args:
sparse_features (SparseFeatures): sparse features to redistribute.
Returns:
Awaitable[Awaitable[SparseFeatures]]: awaitable of awaitable of
SparseFeatures.
"""
return self._dist(sparse_features)
class TwPooledEmbeddingDist(BaseEmbeddingDist[torch.Tensor]):
"""
Redistributes pooled embedding tensor in TW fashion with an AlltoAll
collective operation.
Constructor Args:
pg (dist.ProcessGroup): ProcessGroup for AlltoAll communication.
dim_sum_per_rank (List[int]): number of features (sum of dimensions) of the
embedding in each rank.
device (Optional[torch.device]): device on which buffers will be allocated.
"""
def __init__(
self,
pg: dist.ProcessGroup,
dim_sum_per_rank: List[int],
device: Optional[torch.device] = None,
callbacks: Optional[List[Callable[[torch.Tensor], torch.Tensor]]] = None,
) -> None:
super().__init__()
self._dist = PooledEmbeddingsAllToAll(pg, dim_sum_per_rank, device, callbacks)
def forward(
self,
local_embs: torch.Tensor,
) -> Awaitable[torch.Tensor]:
"""
Performs AlltoAll operation on pooled embeddings tensor.
Call Args:
local_embs (torch.Tensor): tensor of values to distribute.
Returns:
Awaitable[torch.Tensor]: awaitable of pooled embeddings.
"""
return self._dist(local_embs)
class TwPooledEmbeddingSharding(BaseTwEmbeddingSharding[SparseFeatures, torch.Tensor]):
"""
Shards embedding bags table-wise, i.e.. a given embedding table is entirely placed
on a selected rank.
"""
def create_input_dist(
self,
device: Optional[torch.device] = None,
) -> BaseSparseFeaturesDist[SparseFeatures]:
return TwSparseFeaturesDist(
self._pg,
self._id_list_features_per_rank(),
self._id_score_list_features_per_rank(),
device if device is not None else self._device,
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup:
return GroupedPooledEmbeddingsLookup(
grouped_configs=self._grouped_embedding_configs,
grouped_score_configs=self._score_grouped_embedding_configs,
fused_params=fused_params,
pg=self._pg,
device=device if device is not None else self._device,
feature_processor=feature_processor,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[torch.Tensor]:
return TwPooledEmbeddingDist(
self._pg,
self._dim_sum_per_rank(),
device if device is not None else self._device,
)
class InferTwSparseFeaturesDist(BaseSparseFeaturesDist[SparseFeaturesList]):
"""
Redistributes sparse features to all devices for inference.
Constructor Args:
id_list_features_per_rank (List[int]): number of id list features to send
to each rank.
id_score_list_features_per_rank (List[int]): number of id score list features
to send to each rank.
world_size (int): number of devices in the topology.
"""
def __init__(
self,
id_list_features_per_rank: List[int],
id_score_list_features_per_rank: List[int],
world_size: int,
) -> None:
super().__init__()
self._dist: SparseFeaturesOneToAll = SparseFeaturesOneToAll(
id_list_features_per_rank,
id_score_list_features_per_rank,
world_size,
)
def forward(
self,
sparse_features: SparseFeatures,
) -> Awaitable[Awaitable[SparseFeaturesList]]:
"""
Performs OnetoAll operation on sparse features.
Call Args:
sparse_features (SparseFeatures): sparse features to redistribute.
Returns:
Awaitable[Awaitable[SparseFeatures]]: awaitable of awaitable of
SparseFeatures.
"""
return NoWait(self._dist.forward(sparse_features))
class InferTwPooledEmbeddingDist(BaseEmbeddingDist[List[torch.Tensor]]):
"""
Merges pooled embedding tensor from each device for inference.
Constructor Args:
device (Optional[torch.device]): device on which buffer will be allocated.
world_size (int): number of devices in the topology.
"""
def __init__(
self,
device: torch.device,
world_size: int,
) -> None:
super().__init__()
self._dist: EmbeddingsAllToOne = EmbeddingsAllToOne(device, world_size, 1)
def forward(
self,
local_embs: List[torch.Tensor],
) -> Awaitable[torch.Tensor]:
"""
Performs AlltoOne operation on pooled embedding tensors.
Call Args:
local_embs (List[torch.Tensor]): pooled embedding tensors with
len(local_embs) == world_size.
Returns:
Awaitable[torch.Tensor]: awaitable of merged pooled embedding tensor.
"""
return self._dist.forward(local_embs)
class InferTwEmbeddingSharding(
BaseTwEmbeddingSharding[SparseFeaturesList, List[torch.Tensor]]
):
"""
Shards embedding bags table-wise for inference
"""
def create_input_dist(
self, device: Optional[torch.device] = None
) -> BaseSparseFeaturesDist[SparseFeaturesList]:
return InferTwSparseFeaturesDist(
self._id_list_features_per_rank(),
self._id_score_list_features_per_rank(),
self._world_size,
)
def create_lookup(
self,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
feature_processor: Optional[BaseGroupedFeatureProcessor] = None,
) -> BaseEmbeddingLookup[SparseFeaturesList, List[torch.Tensor]]:
return InferGroupedPooledEmbeddingsLookup(
grouped_configs_per_rank=self._grouped_embedding_configs_per_rank,
grouped_score_configs_per_rank=self._score_grouped_embedding_configs_per_rank,
world_size=self._world_size,
)
def create_output_dist(
self,
device: Optional[torch.device] = None,
) -> BaseEmbeddingDist[List[torch.Tensor]]:
device = device if device is not None else self._device
return InferTwPooledEmbeddingDist(
# pyre-fixme [6]
device,
self._world_size,
)
| [
"torchrec.distributed.embedding_types.EmbeddingComputeKernel",
"torchrec.distributed.embedding_sharding.SparseFeaturesAllToAll",
"torchrec.distributed.embedding_lookup.GroupedPooledEmbeddingsLookup",
"torchrec.distributed.embedding_sharding.SparseFeaturesOneToAll",
"torchrec.distributed.embedding_sharding.g... | [((1300, 1335), 'typing.TypeVar', 'TypeVar', (['"""F"""'], {'bound': 'Multistreamable'}), "('F', bound=Multistreamable)\n", (1307, 1335), False, 'from typing import Callable, List, Optional, Any, Dict, Tuple, TypeVar\n'), ((1340, 1352), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (1347, 1352), False, 'from typing import Callable, List, Optional, Any, Dict, Tuple, TypeVar\n'), ((2408, 2445), 'torchrec.distributed.embedding_sharding.group_tables', 'group_tables', (['sharded_tables_per_rank'], {}), '(sharded_tables_per_rank)\n', (2420, 2445), False, 'from torchrec.distributed.embedding_sharding import EmbeddingSharding, SparseFeaturesAllToAll, SparseFeaturesOneToAll, group_tables, BaseEmbeddingDist, BaseSparseFeaturesDist, BaseEmbeddingLookup\n'), ((9638, 9810), 'torchrec.distributed.embedding_sharding.SparseFeaturesAllToAll', 'SparseFeaturesAllToAll', ([], {'pg': 'pg', 'id_list_features_per_rank': 'id_list_features_per_rank', 'id_score_list_features_per_rank': 'id_score_list_features_per_rank', 'device': 'device'}), '(pg=pg, id_list_features_per_rank=\n id_list_features_per_rank, id_score_list_features_per_rank=\n id_score_list_features_per_rank, device=device)\n', (9660, 9810), False, 'from torchrec.distributed.embedding_sharding import EmbeddingSharding, SparseFeaturesAllToAll, SparseFeaturesOneToAll, group_tables, BaseEmbeddingDist, BaseSparseFeaturesDist, BaseEmbeddingLookup\n'), ((11107, 11172), 'torchrec.distributed.dist_data.PooledEmbeddingsAllToAll', 'PooledEmbeddingsAllToAll', (['pg', 'dim_sum_per_rank', 'device', 'callbacks'], {}), '(pg, dim_sum_per_rank, device, callbacks)\n', (11131, 11172), False, 'from torchrec.distributed.dist_data import EmbeddingsAllToOne, PooledEmbeddingsAllToAll\n'), ((12421, 12711), 'torchrec.distributed.embedding_lookup.GroupedPooledEmbeddingsLookup', 'GroupedPooledEmbeddingsLookup', ([], {'grouped_configs': 'self._grouped_embedding_configs', 'grouped_score_configs': 'self._score_grouped_embedding_configs', 'fused_params': 'fused_params', 'pg': 'self._pg', 'device': '(device if device is not None else self._device)', 'feature_processor': 'feature_processor'}), '(grouped_configs=self.\n _grouped_embedding_configs, grouped_score_configs=self.\n _score_grouped_embedding_configs, fused_params=fused_params, pg=self.\n _pg, device=device if device is not None else self._device,\n feature_processor=feature_processor)\n', (12450, 12711), False, 'from torchrec.distributed.embedding_lookup import GroupedPooledEmbeddingsLookup, InferGroupedPooledEmbeddingsLookup\n'), ((13783, 13881), 'torchrec.distributed.embedding_sharding.SparseFeaturesOneToAll', 'SparseFeaturesOneToAll', (['id_list_features_per_rank', 'id_score_list_features_per_rank', 'world_size'], {}), '(id_list_features_per_rank,\n id_score_list_features_per_rank, world_size)\n', (13805, 13881), False, 'from torchrec.distributed.embedding_sharding import EmbeddingSharding, SparseFeaturesAllToAll, SparseFeaturesOneToAll, group_tables, BaseEmbeddingDist, BaseSparseFeaturesDist, BaseEmbeddingLookup\n'), ((14906, 14947), 'torchrec.distributed.dist_data.EmbeddingsAllToOne', 'EmbeddingsAllToOne', (['device', 'world_size', '(1)'], {}), '(device, world_size, 1)\n', (14924, 14947), False, 'from torchrec.distributed.dist_data import EmbeddingsAllToOne, PooledEmbeddingsAllToAll\n'), ((16213, 16437), 'torchrec.distributed.embedding_lookup.InferGroupedPooledEmbeddingsLookup', 'InferGroupedPooledEmbeddingsLookup', ([], {'grouped_configs_per_rank': 'self._grouped_embedding_configs_per_rank', 'grouped_score_configs_per_rank': 'self._score_grouped_embedding_configs_per_rank', 'world_size': 'self._world_size'}), '(grouped_configs_per_rank=self.\n _grouped_embedding_configs_per_rank, grouped_score_configs_per_rank=\n self._score_grouped_embedding_configs_per_rank, world_size=self._world_size\n )\n', (16247, 16437), False, 'from torchrec.distributed.embedding_lookup import GroupedPooledEmbeddingsLookup, InferGroupedPooledEmbeddingsLookup\n'), ((3393, 3456), 'torch.Size', 'torch.Size', (['[config[0].num_embeddings, config[0].embedding_dim]'], {}), '([config[0].num_embeddings, config[0].embedding_dim])\n', (3403, 3456), False, 'import torch\n'), ((4257, 4305), 'torchrec.distributed.embedding_types.EmbeddingComputeKernel', 'EmbeddingComputeKernel', (['config[1].compute_kernel'], {}), '(config[1].compute_kernel)\n', (4279, 4305), False, 'from torchrec.distributed.embedding_types import SparseFeaturesList, GroupedEmbeddingConfig, SparseFeatures, ShardedEmbeddingTable, EmbeddingComputeKernel, BaseGroupedFeatureProcessor\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import cast, List, Optional
from unittest.mock import MagicMock
import torch
from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder
from torchrec.distributed.planner.enumerators import EmbeddingEnumerator
from torchrec.distributed.planner.proposers import (
GreedyProposer,
GridSearchProposer,
proposers_to_proposals_list,
UniformProposer,
)
from torchrec.distributed.planner.types import Proposer, ShardingOption, Topology
from torchrec.distributed.test_utils.test_model import TestSparseNN
from torchrec.distributed.types import ModuleSharder, ShardingType
from torchrec.modules.embedding_configs import EmbeddingBagConfig
class MockProposer(Proposer):
def load(
self,
search_space: List[ShardingOption],
) -> None:
pass
def feedback(
self,
partitionable: bool,
plan: Optional[List[ShardingOption]] = None,
perf_rating: Optional[float] = None,
) -> None:
pass
def propose(self) -> Optional[List[ShardingOption]]:
pass
class TestProposers(unittest.TestCase):
def setUp(self) -> None:
topology = Topology(world_size=2, compute_device="cuda")
self.enumerator = EmbeddingEnumerator(topology=topology)
self.greedy_proposer = GreedyProposer()
self.uniform_proposer = UniformProposer()
self.grid_search_proposer = GridSearchProposer()
def test_greedy_two_table(self) -> None:
tables = [
EmbeddingBagConfig(
num_embeddings=100,
embedding_dim=10,
name="table_0",
feature_names=["feature_0"],
),
EmbeddingBagConfig(
num_embeddings=100,
embedding_dim=10,
name="table_1",
feature_names=["feature_1"],
),
]
model = TestSparseNN(tables=tables, sparse_device=torch.device("meta"))
search_space = self.enumerator.enumerate(
module=model,
sharders=[
cast(ModuleSharder[torch.nn.Module], EmbeddingBagCollectionSharder())
],
)
self.greedy_proposer.load(search_space)
# simulate first five iterations:
output = []
for _ in range(5):
proposal = cast(List[ShardingOption], self.greedy_proposer.propose())
proposal.sort(
key=lambda sharding_option: (
max([shard.perf for shard in sharding_option.shards]),
sharding_option.name,
)
)
output.append(
[
(
candidate.name,
candidate.sharding_type,
candidate.compute_kernel,
)
for candidate in proposal
]
)
self.greedy_proposer.feedback(partitionable=True)
expected_output = [
[
("table_0", "row_wise", "batched_fused"),
("table_1", "row_wise", "batched_fused"),
],
[
("table_0", "table_row_wise", "batched_fused"),
("table_1", "row_wise", "batched_fused"),
],
[
("table_1", "row_wise", "batched_fused"),
("table_0", "data_parallel", "batched_dense"),
],
[
("table_1", "table_row_wise", "batched_fused"),
("table_0", "data_parallel", "batched_dense"),
],
[
("table_0", "data_parallel", "batched_dense"),
("table_1", "data_parallel", "batched_dense"),
],
]
self.assertEqual(expected_output, output)
def test_uniform_three_table(self) -> None:
tables = [
EmbeddingBagConfig(
num_embeddings=100 * i,
embedding_dim=10 * i,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
)
for i in range(1, 4)
]
model = TestSparseNN(tables=tables, sparse_device=torch.device("meta"))
mock_ebc_sharder = cast(
ModuleSharder[torch.nn.Module], EmbeddingBagCollectionSharder()
)
# TODO update this test for CW and TWCW sharding
mock_ebc_sharder.sharding_types = MagicMock(
return_value=[
ShardingType.DATA_PARALLEL.value,
ShardingType.TABLE_WISE.value,
ShardingType.ROW_WISE.value,
ShardingType.TABLE_ROW_WISE.value,
]
)
self.maxDiff = None
search_space = self.enumerator.enumerate(
module=model, sharders=[mock_ebc_sharder]
)
self.uniform_proposer.load(search_space)
output = []
proposal = self.uniform_proposer.propose()
while proposal:
proposal.sort(
key=lambda sharding_option: (
max([shard.perf for shard in sharding_option.shards]),
sharding_option.name,
)
)
output.append(
[
(
candidate.name,
candidate.sharding_type,
candidate.compute_kernel,
)
for candidate in proposal
]
)
self.uniform_proposer.feedback(partitionable=True)
proposal = self.uniform_proposer.propose()
expected_output = [
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_2",
"data_parallel",
"batched_dense",
),
(
"table_3",
"data_parallel",
"batched_dense",
),
],
[
(
"table_1",
"table_wise",
"batched_fused",
),
(
"table_2",
"table_wise",
"batched_fused",
),
(
"table_3",
"table_wise",
"batched_fused",
),
],
[
(
"table_1",
"row_wise",
"batched_fused",
),
(
"table_2",
"row_wise",
"batched_fused",
),
(
"table_3",
"row_wise",
"batched_fused",
),
],
[
(
"table_1",
"table_row_wise",
"batched_fused",
),
(
"table_2",
"table_row_wise",
"batched_fused",
),
(
"table_3",
"table_row_wise",
"batched_fused",
),
],
]
self.assertEqual(expected_output, output)
def test_grid_search_three_table(self) -> None:
tables = [
EmbeddingBagConfig(
num_embeddings=100 * i,
embedding_dim=10 * i,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
)
for i in range(1, 4)
]
model = TestSparseNN(tables=tables, sparse_device=torch.device("meta"))
search_space = self.enumerator.enumerate(
module=model,
sharders=[
cast(ModuleSharder[torch.nn.Module], EmbeddingBagCollectionSharder())
],
)
"""
All sharding types but DP will have 3 possible compute kernels after pruning:
- batched_fused
- batched_fused_uvm_caching
- batched_fused_uvm
DP will have 1 possible compute kernel: batched_dense
So the total number of pruned options will be:
(num_sharding_types - 1) * 3 + 1 = 16
"""
num_pruned_options = (len(ShardingType) - 1) * 3 + 1
self.grid_search_proposer.load(search_space)
for (
sharding_options
) in self.grid_search_proposer._sharding_options_by_fqn.values():
# number of sharding types after pruning is number of sharding types * 3
# 3 compute kernels batched_fused/batched_dense, batched_fused_uvm_caching, batched_fused_uvm
self.assertEqual(len(sharding_options), num_pruned_options)
num_proposals = 0
proposal = self.grid_search_proposer.propose()
while proposal:
self.grid_search_proposer.feedback(partitionable=True)
proposal = self.grid_search_proposer.propose()
num_proposals += 1
self.assertEqual(num_pruned_options ** len(tables), num_proposals)
def test_proposers_to_proposals_list(self) -> None:
def make_mock_proposal(name: str) -> List[ShardingOption]:
return [
ShardingOption(
name=name,
tensor=torch.zeros(1),
# pyre-ignore
module=("model", None),
upstream_modules=[],
downstream_modules=[],
input_lengths=[],
batch_size=8,
sharding_type="row_wise",
partition_by="DEVICE",
compute_kernel="batched_fused",
shards=[],
)
]
mock_proposer_1 = MockProposer()
mock_proposer_1_sharding_options = [
make_mock_proposal("p1so1"),
make_mock_proposal("p1so2"),
make_mock_proposal("p1so1"),
None,
]
mock_proposer_1.propose = MagicMock(
side_effect=mock_proposer_1_sharding_options
)
mock_proposer_2 = MockProposer()
mock_proposer_2_sharding_options = [
make_mock_proposal("p2so1"),
make_mock_proposal("p2so1"),
make_mock_proposal("p1so2"),
make_mock_proposal("p2so2"),
None,
]
mock_proposer_2.propose = MagicMock(
side_effect=mock_proposer_2_sharding_options
)
mock_proposer_3 = MockProposer()
mock_proposer_3_sharding_options = [
make_mock_proposal("p3so1"),
make_mock_proposal("p2so1"),
make_mock_proposal("p3so2"),
None,
]
mock_proposer_3.propose = MagicMock(
side_effect=mock_proposer_3_sharding_options
)
proposers_list: List[Proposer] = [
mock_proposer_1,
mock_proposer_2,
mock_proposer_3,
]
proposals_list = proposers_to_proposals_list(proposers_list, search_space=[])
proposals_list_names = []
for sharding_option in proposals_list:
proposals_list_names.append(sharding_option[0].name)
expected_list_names = ["p1so1", "p1so2", "p2so1", "p2so2", "p3so1", "p3so2"]
self.assertEqual(proposals_list_names, expected_list_names)
| [
"torchrec.distributed.planner.proposers.GridSearchProposer",
"torchrec.modules.embedding_configs.EmbeddingBagConfig",
"torchrec.distributed.planner.proposers.GreedyProposer",
"torchrec.distributed.planner.proposers.proposers_to_proposals_list",
"torchrec.distributed.planner.types.Topology",
"torchrec.dist... | [((1404, 1449), 'torchrec.distributed.planner.types.Topology', 'Topology', ([], {'world_size': '(2)', 'compute_device': '"""cuda"""'}), "(world_size=2, compute_device='cuda')\n", (1412, 1449), False, 'from torchrec.distributed.planner.types import Proposer, ShardingOption, Topology\n'), ((1476, 1514), 'torchrec.distributed.planner.enumerators.EmbeddingEnumerator', 'EmbeddingEnumerator', ([], {'topology': 'topology'}), '(topology=topology)\n', (1495, 1514), False, 'from torchrec.distributed.planner.enumerators import EmbeddingEnumerator\n'), ((1546, 1562), 'torchrec.distributed.planner.proposers.GreedyProposer', 'GreedyProposer', ([], {}), '()\n', (1560, 1562), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, GridSearchProposer, proposers_to_proposals_list, UniformProposer\n'), ((1595, 1612), 'torchrec.distributed.planner.proposers.UniformProposer', 'UniformProposer', ([], {}), '()\n', (1610, 1612), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, GridSearchProposer, proposers_to_proposals_list, UniformProposer\n'), ((1649, 1669), 'torchrec.distributed.planner.proposers.GridSearchProposer', 'GridSearchProposer', ([], {}), '()\n', (1667, 1669), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, GridSearchProposer, proposers_to_proposals_list, UniformProposer\n'), ((4708, 4871), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '[ShardingType.DATA_PARALLEL.value, ShardingType.TABLE_WISE.value,\n ShardingType.ROW_WISE.value, ShardingType.TABLE_ROW_WISE.value]'}), '(return_value=[ShardingType.DATA_PARALLEL.value, ShardingType.\n TABLE_WISE.value, ShardingType.ROW_WISE.value, ShardingType.\n TABLE_ROW_WISE.value])\n', (4717, 4871), False, 'from unittest.mock import MagicMock\n'), ((10581, 10636), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'mock_proposer_1_sharding_options'}), '(side_effect=mock_proposer_1_sharding_options)\n', (10590, 10636), False, 'from unittest.mock import MagicMock\n'), ((10972, 11027), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'mock_proposer_2_sharding_options'}), '(side_effect=mock_proposer_2_sharding_options)\n', (10981, 11027), False, 'from unittest.mock import MagicMock\n'), ((11322, 11377), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'mock_proposer_3_sharding_options'}), '(side_effect=mock_proposer_3_sharding_options)\n', (11331, 11377), False, 'from unittest.mock import MagicMock\n'), ((11567, 11627), 'torchrec.distributed.planner.proposers.proposers_to_proposals_list', 'proposers_to_proposals_list', (['proposers_list'], {'search_space': '[]'}), '(proposers_list, search_space=[])\n', (11594, 11627), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, GridSearchProposer, proposers_to_proposals_list, UniformProposer\n'), ((1747, 1852), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'num_embeddings': '(100)', 'embedding_dim': '(10)', 'name': '"""table_0"""', 'feature_names': "['feature_0']"}), "(num_embeddings=100, embedding_dim=10, name='table_0',\n feature_names=['feature_0'])\n", (1765, 1852), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((1941, 2046), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'num_embeddings': '(100)', 'embedding_dim': '(10)', 'name': '"""table_1"""', 'feature_names': "['feature_1']"}), "(num_embeddings=100, embedding_dim=10, name='table_1',\n feature_names=['feature_1'])\n", (1959, 2046), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((4567, 4598), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {}), '()\n', (4596, 4598), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder\n'), ((2192, 2212), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (2204, 2212), False, 'import torch\n'), ((4467, 4487), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (4479, 4487), False, 'import torch\n'), ((8177, 8197), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (8189, 8197), False, 'import torch\n'), ((2366, 2397), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {}), '()\n', (2395, 2397), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder\n'), ((8351, 8382), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {}), '()\n', (8380, 8382), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder\n'), ((9855, 9869), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (9866, 9869), False, 'import torch\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
from typing import Any, Dict, Iterator, List, Optional, Tuple
import torch
import torch.distributed as dist
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops import (
EmbeddingLocation,
IntNBitTableBatchedEmbeddingBagsCodegen,
PoolingMode,
rounded_row_size_in_bytes,
)
from torchrec.distributed.batched_embedding_kernel import (
BaseBatchedEmbedding,
BaseBatchedEmbeddingBag,
)
from torchrec.distributed.embedding_kernel import BaseEmbedding
from torchrec.distributed.embedding_types import (
compute_kernel_to_embedding_location,
GroupedEmbeddingConfig,
)
from torchrec.distributed.utils import append_prefix
from torchrec.modules.embedding_configs import (
DATA_TYPE_NUM_BITS,
data_type_to_sparse_type,
DataType,
dtype_to_data_type,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
logger: logging.Logger = logging.getLogger(__name__)
def _copy_config(
original: GroupedEmbeddingConfig,
data_type: DataType,
sparse_type: SparseType,
device: torch.device,
) -> GroupedEmbeddingConfig:
# Adjust config to quantized version.
# This obviously doesn't work for column-wise sharding.
config = copy.deepcopy(original)
config.data_type = data_type
for table in config.embedding_tables:
row_alignment = 16 if device.type == "cuda" else 1
table.local_cols = rounded_row_size_in_bytes(
table.local_cols, sparse_type, row_alignment
)
if table.local_metadata is not None:
table.local_metadata.shard_sizes = [
table.local_rows,
table.local_cols,
]
global_metadata = table.global_metadata
if global_metadata is not None:
for shard_meta in global_metadata.shards_metadata:
if shard_meta != table.local_metadata:
shard_meta.shard_sizes = [
shard_meta.shard_sizes[0],
rounded_row_size_in_bytes(
shard_meta.shard_sizes[1], sparse_type, row_alignment
),
]
global_metadata.size = torch.Size(
[
global_metadata.size[0],
sum(
shard_meta.shard_sizes[1]
for shard_meta in global_metadata.shards_metadata
),
]
)
return config
def _quantize_weight(
state_dict: Dict[str, torch.Tensor], data_type: DataType
) -> List[Tuple[torch.Tensor, Optional[torch.Tensor]]]:
quant_weight_list = []
for weight in state_dict.values():
if weight.dtype == torch.float or weight.dtype == torch.float16:
quantized_weights = (
torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf(
weight, DATA_TYPE_NUM_BITS[data_type]
)
)
else:
raise Exception("Unsupported dtype: {weight.dtype}")
# weight and 4 byte scale shift (2xfp16)
quant_weight = quantized_weights[:, :-4]
scale_shift = quantized_weights[:, -4:]
quant_weight_list.append((quant_weight, scale_shift))
return quant_weight_list
class QuantBatchedEmbeddingBag(BaseBatchedEmbeddingBag):
def __init__(
self,
config: GroupedEmbeddingConfig,
# pyre-fixme[11]
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(config, pg, device)
managed: List[EmbeddingLocation] = []
for table in config.embedding_tables:
if device is not None and device.type == "cuda":
managed.append(
compute_kernel_to_embedding_location(table.compute_kernel)
)
else:
managed.append(EmbeddingLocation.HOST)
self._emb_module: IntNBitTableBatchedEmbeddingBagsCodegen = (
IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
local_rows,
table.embedding_dim,
data_type_to_sparse_type(config.data_type),
location,
)
for local_rows, table, location in zip(
self._local_rows, config.embedding_tables, managed
)
],
device=device,
pooling_mode=self._pooling,
feature_table_map=self._feature_table_map,
output_dtype=SparseType.FP32,
row_alignment=16,
**(fused_params or {}),
)
)
if device is not None and device.type != "meta":
self._emb_module.initialize_weights()
def init_parameters(self) -> None:
pass
@property
def emb_module(
self,
) -> IntNBitTableBatchedEmbeddingBagsCodegen:
return self._emb_module
def forward(self, features: KeyedJaggedTensor) -> torch.Tensor:
return self.emb_module(
indices=features.values().int(),
offsets=features.offsets().int(),
per_sample_weights=features.weights_or_none(),
)
def named_buffers(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
for config, weight in zip(
self._config.embedding_tables,
self.emb_module.split_embedding_weights(),
):
yield append_prefix(prefix, f"{config.name}.weight"), weight[0]
def split_embedding_weights(self) -> List[torch.Tensor]:
return [
weight
for weight, _ in self.emb_module.split_embedding_weights(
split_scale_shifts=False
)
]
@classmethod
def from_float(cls, module: BaseEmbedding) -> "QuantBatchedEmbeddingBag":
assert hasattr(
module, "qconfig"
), "BaseEmbedding input float module must have qconfig defined"
# pyre-ignore [16]
data_type = dtype_to_data_type(module.qconfig.weight().dtype)
sparse_type = data_type_to_sparse_type(data_type)
state_dict = dict(module.named_buffers())
device = next(iter(state_dict.values())).device
config = _copy_config(module.config, data_type, sparse_type, device)
ret = QuantBatchedEmbeddingBag(config=config, device=device)
quant_weight_list = _quantize_weight(state_dict, data_type)
ret.emb_module.assign_embedding_weights(quant_weight_list)
return ret
class QuantBatchedEmbedding(BaseBatchedEmbedding):
def __init__(
self,
config: GroupedEmbeddingConfig,
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
fused_params: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(config, pg, device)
managed: List[EmbeddingLocation] = []
for table in config.embedding_tables:
if device is not None and device.type == "cuda":
managed.append(
compute_kernel_to_embedding_location(table.compute_kernel)
)
else:
managed.append(EmbeddingLocation.HOST)
self._emb_module: IntNBitTableBatchedEmbeddingBagsCodegen = (
IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
local_rows,
table.embedding_dim,
data_type_to_sparse_type(config.data_type),
location,
)
for local_rows, table, location in zip(
self._local_rows, config.embedding_tables, managed
)
],
device=device,
pooling_mode=PoolingMode.NONE,
feature_table_map=self._feature_table_map,
output_dtype=SparseType.FP32,
row_alignment=16,
**(fused_params or {}),
)
)
if device is not None and device.type != "meta":
self._emb_module.initialize_weights()
@property
def emb_module(
self,
) -> IntNBitTableBatchedEmbeddingBagsCodegen:
return self._emb_module
def split_embedding_weights(self) -> List[torch.Tensor]:
return [
weight
for weight, _ in self.emb_module.split_embedding_weights(
split_scale_shifts=False
)
]
def forward(self, features: KeyedJaggedTensor) -> torch.Tensor:
return self.emb_module(
indices=features.values().int(),
offsets=features.offsets().int(),
)
def named_buffers(
self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
for config, weight in zip(
self._config.embedding_tables,
self.emb_module.split_embedding_weights(),
):
yield append_prefix(prefix, f"{config.name}.weight"), weight[0]
@classmethod
def from_float(cls, module: BaseEmbedding) -> "QuantBatchedEmbedding":
assert hasattr(
module, "qconfig"
), "BaseEmbedding input float module must have qconfig defined"
# pyre-ignore [16]
data_type = dtype_to_data_type(module.qconfig.weight().dtype)
sparse_type = data_type_to_sparse_type(data_type)
state_dict = dict(module.named_buffers())
device = next(iter(state_dict.values())).device
config = _copy_config(module.config, data_type, sparse_type, device)
ret = QuantBatchedEmbedding(config=config, device=device)
quant_weight_list = _quantize_weight(state_dict, data_type)
ret.emb_module.assign_embedding_weights(quant_weight_list)
return ret
| [
"torchrec.distributed.utils.append_prefix",
"torchrec.modules.embedding_configs.data_type_to_sparse_type",
"torchrec.distributed.embedding_types.compute_kernel_to_embedding_location"
] | [((1191, 1218), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1208, 1218), False, 'import logging\n'), ((1501, 1524), 'copy.deepcopy', 'copy.deepcopy', (['original'], {}), '(original)\n', (1514, 1524), False, 'import copy\n'), ((1686, 1757), 'fbgemm_gpu.split_table_batched_embeddings_ops.rounded_row_size_in_bytes', 'rounded_row_size_in_bytes', (['table.local_cols', 'sparse_type', 'row_alignment'], {}), '(table.local_cols, sparse_type, row_alignment)\n', (1711, 1757), False, 'from fbgemm_gpu.split_table_batched_embeddings_ops import EmbeddingLocation, IntNBitTableBatchedEmbeddingBagsCodegen, PoolingMode, rounded_row_size_in_bytes\n'), ((6633, 6668), 'torchrec.modules.embedding_configs.data_type_to_sparse_type', 'data_type_to_sparse_type', (['data_type'], {}), '(data_type)\n', (6657, 6668), False, 'from torchrec.modules.embedding_configs import DATA_TYPE_NUM_BITS, data_type_to_sparse_type, DataType, dtype_to_data_type\n'), ((10024, 10059), 'torchrec.modules.embedding_configs.data_type_to_sparse_type', 'data_type_to_sparse_type', (['data_type'], {}), '(data_type)\n', (10048, 10059), False, 'from torchrec.modules.embedding_configs import DATA_TYPE_NUM_BITS, data_type_to_sparse_type, DataType, dtype_to_data_type\n'), ((3105, 3209), 'torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf', 'torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf', (['weight', 'DATA_TYPE_NUM_BITS[data_type]'], {}), '(weight,\n DATA_TYPE_NUM_BITS[data_type])\n', (3166, 3209), False, 'import torch\n'), ((4148, 4206), 'torchrec.distributed.embedding_types.compute_kernel_to_embedding_location', 'compute_kernel_to_embedding_location', (['table.compute_kernel'], {}), '(table.compute_kernel)\n', (4184, 4206), False, 'from torchrec.distributed.embedding_types import compute_kernel_to_embedding_location, GroupedEmbeddingConfig\n'), ((6000, 6046), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', 'f"""{config.name}.weight"""'], {}), "(prefix, f'{config.name}.weight')\n", (6013, 6046), False, 'from torchrec.distributed.utils import append_prefix\n'), ((7620, 7678), 'torchrec.distributed.embedding_types.compute_kernel_to_embedding_location', 'compute_kernel_to_embedding_location', (['table.compute_kernel'], {}), '(table.compute_kernel)\n', (7656, 7678), False, 'from torchrec.distributed.embedding_types import compute_kernel_to_embedding_location, GroupedEmbeddingConfig\n'), ((9627, 9673), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', 'f"""{config.name}.weight"""'], {}), "(prefix, f'{config.name}.weight')\n", (9640, 9673), False, 'from torchrec.distributed.utils import append_prefix\n'), ((2285, 2370), 'fbgemm_gpu.split_table_batched_embeddings_ops.rounded_row_size_in_bytes', 'rounded_row_size_in_bytes', (['shard_meta.shard_sizes[1]', 'sparse_type', 'row_alignment'], {}), '(shard_meta.shard_sizes[1], sparse_type, row_alignment\n )\n', (2310, 2370), False, 'from fbgemm_gpu.split_table_batched_embeddings_ops import EmbeddingLocation, IntNBitTableBatchedEmbeddingBagsCodegen, PoolingMode, rounded_row_size_in_bytes\n'), ((4610, 4652), 'torchrec.modules.embedding_configs.data_type_to_sparse_type', 'data_type_to_sparse_type', (['config.data_type'], {}), '(config.data_type)\n', (4634, 4652), False, 'from torchrec.modules.embedding_configs import DATA_TYPE_NUM_BITS, data_type_to_sparse_type, DataType, dtype_to_data_type\n'), ((8082, 8124), 'torchrec.modules.embedding_configs.data_type_to_sparse_type', 'data_type_to_sparse_type', (['config.data_type'], {}), '(config.data_type)\n', (8106, 8124), False, 'from torchrec.modules.embedding_configs import DATA_TYPE_NUM_BITS, data_type_to_sparse_type, DataType, dtype_to_data_type\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import click
import dataloader as torcharrow_dataloader
import torch
import torch.distributed as dist
from fbgemm_gpu.split_embedding_configs import EmbOptimType
from torch.distributed.elastic.multiprocessing.errors import record
from torchrec import EmbeddingBagCollection
from torchrec.datasets.criteo import DEFAULT_CAT_NAMES, INT_FEATURE_COUNT
from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder
from torchrec.distributed.model_parallel import DistributedModelParallel
from torchrec.models.dlrm import DLRM
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.optim.keyed import KeyedOptimizerWrapper
@record
@click.command()
@click.option("--batch_size", default=256)
@click.option("--num_embeddings", default=2048)
@click.option("--sigrid_hash_salt", default=0)
@click.option("--parquet_directory", default="/data/criteo_preproc")
def main(
batch_size,
num_embeddings,
sigrid_hash_salt,
parquet_directory,
) -> None:
rank = int(os.environ["LOCAL_RANK"])
if torch.cuda.is_available():
device = torch.device(f"cuda:{rank}")
backend = "nccl"
torch.cuda.set_device(device)
else:
device = torch.device("cpu")
backend = "gloo"
print(
"\033[92m"
+ f"WARNING: Running in CPU mode. cuda availablility {torch.cuda.is_available()}."
)
dist.init_process_group(backend=backend)
world_size = dist.get_world_size()
dataloader = torcharrow_dataloader.get_dataloader(
parquet_directory,
world_size,
rank,
batch_size=batch_size,
num_embeddings=num_embeddings,
salt=sigrid_hash_salt,
)
it = iter(dataloader)
model = DLRM(
embedding_bag_collection=EmbeddingBagCollection(
tables=[
EmbeddingBagConfig(
name=f"table_{cat_name}",
embedding_dim=64,
num_embeddings=num_embeddings,
feature_names=[cat_name],
)
for cat_name in DEFAULT_CAT_NAMES + ["bucketize_int_0"]
],
device=torch.device("meta"),
),
dense_in_features=INT_FEATURE_COUNT,
dense_arch_layer_sizes=[64],
over_arch_layer_sizes=[32, 1],
dense_device=device,
)
fused_params = {
"learning_rate": 0.02,
"optimizer": EmbOptimType.EXACT_ROWWISE_ADAGRAD,
}
sharded_model = DistributedModelParallel(
module=model,
device=device,
sharders=[
EmbeddingBagCollectionSharder(fused_params=fused_params),
],
)
optimizer = KeyedOptimizerWrapper(
dict(model.named_parameters()),
lambda params: torch.optim.SGD(params, lr=0.01),
)
loss_fn = torch.nn.BCEWithLogitsLoss()
print_example = dist.get_rank() == 0
for (dense_features, kjt, labels) in it:
if print_example:
print("Example dense_features", dense_features)
print("Example KJT input", kjt)
print_example = False
dense_features = dense_features.to(device)
kjt = kjt.to(device)
labels = labels.to(device)
optimizer.zero_grad()
preds = sharded_model(dense_features, kjt)
loss = loss_fn(preds.squeeze(), labels.squeeze())
loss.sum().backward()
optimizer.step()
print("\033[92m" + "DLRM run with torcharrow last-mile preprocessing finished!")
if __name__ == "__main__":
main()
| [
"torchrec.modules.embedding_configs.EmbeddingBagConfig",
"torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder"
] | [((910, 925), 'click.command', 'click.command', ([], {}), '()\n', (923, 925), False, 'import click\n'), ((927, 968), 'click.option', 'click.option', (['"""--batch_size"""'], {'default': '(256)'}), "('--batch_size', default=256)\n", (939, 968), False, 'import click\n'), ((970, 1016), 'click.option', 'click.option', (['"""--num_embeddings"""'], {'default': '(2048)'}), "('--num_embeddings', default=2048)\n", (982, 1016), False, 'import click\n'), ((1018, 1063), 'click.option', 'click.option', (['"""--sigrid_hash_salt"""'], {'default': '(0)'}), "('--sigrid_hash_salt', default=0)\n", (1030, 1063), False, 'import click\n'), ((1065, 1132), 'click.option', 'click.option', (['"""--parquet_directory"""'], {'default': '"""/data/criteo_preproc"""'}), "('--parquet_directory', default='/data/criteo_preproc')\n", (1077, 1132), False, 'import click\n'), ((1283, 1308), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1306, 1308), False, 'import torch\n'), ((1639, 1679), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': 'backend'}), '(backend=backend)\n', (1662, 1679), True, 'import torch.distributed as dist\n'), ((1698, 1719), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (1717, 1719), True, 'import torch.distributed as dist\n'), ((1738, 1897), 'dataloader.get_dataloader', 'torcharrow_dataloader.get_dataloader', (['parquet_directory', 'world_size', 'rank'], {'batch_size': 'batch_size', 'num_embeddings': 'num_embeddings', 'salt': 'sigrid_hash_salt'}), '(parquet_directory, world_size, rank,\n batch_size=batch_size, num_embeddings=num_embeddings, salt=sigrid_hash_salt\n )\n', (1774, 1897), True, 'import dataloader as torcharrow_dataloader\n'), ((3069, 3097), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {}), '()\n', (3095, 3097), False, 'import torch\n'), ((1327, 1355), 'torch.device', 'torch.device', (['f"""cuda:{rank}"""'], {}), "(f'cuda:{rank}')\n", (1339, 1355), False, 'import torch\n'), ((1389, 1418), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device'], {}), '(device)\n', (1410, 1418), False, 'import torch\n'), ((1446, 1465), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1458, 1465), False, 'import torch\n'), ((3119, 3134), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (3132, 3134), True, 'import torch.distributed as dist\n'), ((3014, 3046), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'lr': '(0.01)'}), '(params, lr=0.01)\n', (3029, 3046), False, 'import torch\n'), ((2836, 2892), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {'fused_params': 'fused_params'}), '(fused_params=fused_params)\n', (2865, 2892), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder\n'), ((2408, 2428), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (2420, 2428), False, 'import torch\n'), ((1595, 1620), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1618, 1620), False, 'import torch\n'), ((2083, 2206), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': 'f"""table_{cat_name}"""', 'embedding_dim': '(64)', 'num_embeddings': 'num_embeddings', 'feature_names': '[cat_name]'}), "(name=f'table_{cat_name}', embedding_dim=64,\n num_embeddings=num_embeddings, feature_names=[cat_name])\n", (2101, 2206), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
import os
import random
import tempfile
import uuid
from typing import Callable, Dict, List, Optional, Tuple, Type
from unittest.mock import Mock, patch
import torch
import torch.distributed as dist
import torch.distributed.launcher as pet
from torchrec.metrics.model_utils import parse_task_model_outputs
from torchrec.metrics.rec_metric import RecComputeMode, RecMetric, RecTaskInfo
TestRecMetricOutput = Tuple[
Dict[str, torch.Tensor],
Dict[str, torch.Tensor],
Dict[str, torch.Tensor],
Dict[str, torch.Tensor],
]
def gen_test_batch(
batch_size: int,
label_name: str = "label",
prediction_name: str = "prediction",
weight_name: str = "weight",
tensor_name: str = "tensor",
mask_tensor_name: Optional[str] = None,
label_value: Optional[torch.Tensor] = None,
prediction_value: Optional[torch.Tensor] = None,
weight_value: Optional[torch.Tensor] = None,
mask: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
if label_value is not None:
label = label_value
else:
label = torch.randint(0, 2, (batch_size,)).double()
if prediction_value is not None:
prediction = prediction_value
else:
prediction = torch.rand(batch_size, dtype=torch.double)
if weight_value is not None:
weight = weight_value
else:
weight = torch.rand(batch_size, dtype=torch.double)
test_batch = {
label_name: label,
prediction_name: prediction,
weight_name: weight,
tensor_name: torch.rand(batch_size, dtype=torch.double),
}
if mask_tensor_name is not None:
if mask is None:
mask = torch.ones(batch_size, dtype=torch.double)
test_batch[mask_tensor_name] = mask
return test_batch
def gen_test_tasks(
task_names: List[str],
) -> List[RecTaskInfo]:
return [
RecTaskInfo(
name=task_name,
label_name=f"{task_name}-label",
prediction_name=f"{task_name}-prediction",
weight_name=f"{task_name}-weight",
)
for task_name in task_names
]
def gen_test_timestamps(
nsteps: int,
) -> List[float]:
timestamps = [0.0 for _ in range(nsteps)]
for step in range(1, nsteps):
time_lapse = random.uniform(1.0, 5.0)
timestamps[step] = timestamps[step - 1] + time_lapse
return timestamps
class TestMetric(abc.ABC):
def __init__(
self,
world_size: int,
rec_tasks: List[RecTaskInfo],
compute_lifetime_metric: bool = True,
compute_window_metric: bool = True,
local_compute_lifetime_metric: bool = True,
local_compute_window_metric: bool = True,
) -> None:
self.world_size = world_size
self._rec_tasks = rec_tasks
self._compute_lifetime_metric = compute_lifetime_metric
self._compute_window_metric = compute_window_metric
self._local_compute_lifetime_metric = local_compute_lifetime_metric
self._local_compute_window_metric = local_compute_window_metric
@staticmethod
def _aggregate(
states: Dict[str, torch.Tensor], new_states: Dict[str, torch.Tensor]
) -> None:
for k, v in new_states.items():
if k not in states:
states[k] = torch.zeros_like(v)
states[k] += v
@staticmethod
@abc.abstractmethod
def _get_states(
labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor
) -> Dict[str, torch.Tensor]:
pass
@staticmethod
@abc.abstractmethod
def _compute(states: Dict[str, torch.Tensor]) -> torch.Tensor:
pass
def compute(
self,
model_outs: List[Dict[str, torch.Tensor]],
nsteps: int,
batch_window_size: int,
timestamps: Optional[List[float]],
) -> TestRecMetricOutput:
aggregated_model_out = {}
lifetime_states, window_states, local_lifetime_states, local_window_states = (
{task_info.name: {} for task_info in self._rec_tasks} for _ in range(4)
)
for i in range(nsteps):
for k, v in model_outs[i].items():
aggregated_list = [torch.zeros_like(v) for _ in range(self.world_size)]
dist.all_gather(aggregated_list, v)
aggregated_model_out[k] = torch.cat(aggregated_list)
for task_info in self._rec_tasks:
states = self._get_states(
aggregated_model_out[task_info.label_name],
aggregated_model_out[task_info.prediction_name],
aggregated_model_out[task_info.weight_name],
)
if self._compute_lifetime_metric:
self._aggregate(lifetime_states[task_info.name], states)
if self._compute_window_metric and nsteps - batch_window_size <= i:
self._aggregate(window_states[task_info.name], states)
local_states = self._get_states(
model_outs[i][task_info.label_name],
model_outs[i][task_info.prediction_name],
model_outs[i][task_info.weight_name],
)
if self._local_compute_lifetime_metric:
self._aggregate(local_lifetime_states[task_info.name], local_states)
if (
self._local_compute_window_metric
and nsteps - batch_window_size <= i
):
self._aggregate(local_window_states[task_info.name], local_states)
lifetime_metrics = {}
window_metrics = {}
local_lifetime_metrics = {}
local_window_metrics = {}
for task_info in self._rec_tasks:
lifetime_metrics[task_info.name] = (
self._compute(lifetime_states[task_info.name])
if self._compute_lifetime_metric
else torch.tensor(0.0)
)
window_metrics[task_info.name] = (
self._compute(window_states[task_info.name])
if self._compute_window_metric
else torch.tensor(0.0)
)
local_lifetime_metrics[task_info.name] = (
self._compute(local_lifetime_states[task_info.name])
if self._local_compute_lifetime_metric
else torch.tensor(0.0)
)
local_window_metrics[task_info.name] = (
self._compute(local_window_states[task_info.name])
if self._local_compute_window_metric
else torch.tensor(0.0)
)
return (
lifetime_metrics,
window_metrics,
local_lifetime_metrics,
local_window_metrics,
)
BATCH_SIZE = 32
BATCH_WINDOW_SIZE = 5
NSTEPS = 10
def rec_metric_value_test_helper(
target_clazz: Type[RecMetric],
target_compute_mode: RecComputeMode,
test_clazz: Optional[Type[TestMetric]],
fused_update_limit: int,
compute_on_all_ranks: bool,
world_size: int,
my_rank: int,
task_names: List[str],
batch_size: int = BATCH_SIZE,
nsteps: int = NSTEPS,
batch_window_size: int = BATCH_WINDOW_SIZE,
is_time_dependent: bool = False,
time_dependent_metric: Optional[Dict[Type[RecMetric], str]] = None,
) -> Tuple[Dict[str, torch.Tensor], Tuple[Dict[str, torch.Tensor], ...]]:
tasks = gen_test_tasks(task_names)
model_outs = []
for _ in range(nsteps):
_model_outs = [
gen_test_batch(
label_name=task.label_name,
prediction_name=task.prediction_name,
weight_name=task.weight_name,
batch_size=batch_size,
)
for task in tasks
]
model_outs.append({k: v for d in _model_outs for k, v in d.items()})
def get_target_rec_metric_value(
model_outs: List[Dict[str, torch.Tensor]],
tasks: List[RecTaskInfo],
timestamps: Optional[List[float]] = None,
time_mock: Optional[Mock] = None,
) -> Dict[str, torch.Tensor]:
window_size = world_size * batch_size * batch_window_size
target_metric_obj = target_clazz(
world_size=world_size,
my_rank=my_rank,
batch_size=batch_size,
tasks=tasks,
compute_mode=target_compute_mode,
window_size=window_size,
fused_update_limit=fused_update_limit,
compute_on_all_ranks=compute_on_all_ranks,
)
for i in range(nsteps):
labels, predictions, weights = parse_task_model_outputs(
tasks, model_outs[i]
)
if target_compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION:
labels = torch.stack(list(labels.values()))
predictions = torch.stack(list(predictions.values()))
weights = torch.stack(list(weights.values()))
if timestamps is not None:
time_mock.return_value = timestamps[i]
target_metric_obj.update(
predictions=predictions, labels=labels, weights=weights
)
result_metrics = target_metric_obj.compute()
result_metrics.update(target_metric_obj.local_compute())
return result_metrics
def get_test_rec_metric_value(
model_outs: List[Dict[str, torch.Tensor]],
tasks: List[RecTaskInfo],
timestamps: Optional[List[float]] = None,
) -> TestRecMetricOutput:
test_metrics: TestRecMetricOutput = ({}, {}, {}, {})
if test_clazz is not None:
# pyre-ignore[45]: Cannot instantiate abstract class `TestMetric`.
test_metric_obj = test_clazz(world_size, tasks)
test_metrics = test_metric_obj.compute(
model_outs, nsteps, batch_window_size, timestamps
)
return test_metrics
if is_time_dependent:
timestamps: Optional[List[float]] = (
gen_test_timestamps(nsteps) if is_time_dependent else None
)
assert time_dependent_metric is not None # avoid typing issue
time_dependent_target_clazz_path = time_dependent_metric[target_clazz]
with patch(time_dependent_target_clazz_path + ".time.monotonic") as time_mock:
result_metrics = get_target_rec_metric_value(
model_outs, tasks, timestamps, time_mock
)
test_metrics = get_test_rec_metric_value(model_outs, tasks, timestamps)
else:
result_metrics = get_target_rec_metric_value(model_outs, tasks)
test_metrics = get_test_rec_metric_value(model_outs, tasks)
return result_metrics, test_metrics
def get_launch_config(world_size: int, rdzv_endpoint: str) -> pet.LaunchConfig:
return pet.LaunchConfig(
min_nodes=1,
max_nodes=1,
nproc_per_node=world_size,
run_id=str(uuid.uuid4()),
rdzv_backend="c10d",
rdzv_endpoint=rdzv_endpoint,
rdzv_configs={"store_type": "file"},
start_method="spawn",
monitor_interval=1,
max_restarts=0,
)
def rec_metric_value_test_launcher(
target_clazz: Type[RecMetric],
target_compute_mode: RecComputeMode,
test_clazz: Type[TestMetric],
task_names: List[str],
fused_update_limit: int,
compute_on_all_ranks: bool,
world_size: int,
entry_point: Callable[..., None],
test_nsteps: int = 1,
) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
lc = get_launch_config(
world_size=world_size, rdzv_endpoint=os.path.join(tmpdir, "rdzv")
)
# Call the same helper as the actual test to make code coverage visible to
# the testing system.
rec_metric_value_test_helper(
target_clazz,
target_compute_mode,
test_clazz=None,
fused_update_limit=fused_update_limit,
compute_on_all_ranks=compute_on_all_ranks,
world_size=1,
my_rank=0,
task_names=task_names,
batch_size=32,
nsteps=test_nsteps,
batch_window_size=1,
)
pet.elastic_launch(lc, entrypoint=entry_point)(
target_clazz,
target_compute_mode,
task_names,
fused_update_limit,
compute_on_all_ranks,
)
def rec_metric_accuracy_test_helper(
world_size: int, entry_point: Callable[..., None]
) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
lc = get_launch_config(
world_size=world_size, rdzv_endpoint=os.path.join(tmpdir, "rdzv")
)
pet.elastic_launch(lc, entrypoint=entry_point)()
| [
"torchrec.metrics.model_utils.parse_task_model_outputs",
"torchrec.metrics.rec_metric.RecTaskInfo"
] | [((1459, 1501), 'torch.rand', 'torch.rand', (['batch_size'], {'dtype': 'torch.double'}), '(batch_size, dtype=torch.double)\n', (1469, 1501), False, 'import torch\n'), ((1592, 1634), 'torch.rand', 'torch.rand', (['batch_size'], {'dtype': 'torch.double'}), '(batch_size, dtype=torch.double)\n', (1602, 1634), False, 'import torch\n'), ((1768, 1810), 'torch.rand', 'torch.rand', (['batch_size'], {'dtype': 'torch.double'}), '(batch_size, dtype=torch.double)\n', (1778, 1810), False, 'import torch\n'), ((2103, 2250), 'torchrec.metrics.rec_metric.RecTaskInfo', 'RecTaskInfo', ([], {'name': 'task_name', 'label_name': 'f"""{task_name}-label"""', 'prediction_name': 'f"""{task_name}-prediction"""', 'weight_name': 'f"""{task_name}-weight"""'}), "(name=task_name, label_name=f'{task_name}-label',\n prediction_name=f'{task_name}-prediction', weight_name=\n f'{task_name}-weight')\n", (2114, 2250), False, 'from torchrec.metrics.rec_metric import RecComputeMode, RecMetric, RecTaskInfo\n'), ((2506, 2530), 'random.uniform', 'random.uniform', (['(1.0)', '(5.0)'], {}), '(1.0, 5.0)\n', (2520, 2530), False, 'import random\n'), ((11712, 11741), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (11739, 11741), False, 'import tempfile\n'), ((12733, 12762), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (12760, 12762), False, 'import tempfile\n'), ((1899, 1941), 'torch.ones', 'torch.ones', (['batch_size'], {'dtype': 'torch.double'}), '(batch_size, dtype=torch.double)\n', (1909, 1941), False, 'import torch\n'), ((8849, 8895), 'torchrec.metrics.model_utils.parse_task_model_outputs', 'parse_task_model_outputs', (['tasks', 'model_outs[i]'], {}), '(tasks, model_outs[i])\n', (8873, 8895), False, 'from torchrec.metrics.model_utils import parse_task_model_outputs\n'), ((10476, 10535), 'unittest.mock.patch', 'patch', (["(time_dependent_target_clazz_path + '.time.monotonic')"], {}), "(time_dependent_target_clazz_path + '.time.monotonic')\n", (10481, 10535), False, 'from unittest.mock import Mock, patch\n'), ((12413, 12459), 'torch.distributed.launcher.elastic_launch', 'pet.elastic_launch', (['lc'], {'entrypoint': 'entry_point'}), '(lc, entrypoint=entry_point)\n', (12431, 12459), True, 'import torch.distributed.launcher as pet\n'), ((12902, 12948), 'torch.distributed.launcher.elastic_launch', 'pet.elastic_launch', (['lc'], {'entrypoint': 'entry_point'}), '(lc, entrypoint=entry_point)\n', (12920, 12948), True, 'import torch.distributed.launcher as pet\n'), ((1309, 1343), 'torch.randint', 'torch.randint', (['(0)', '(2)', '(batch_size,)'], {}), '(0, 2, (batch_size,))\n', (1322, 1343), False, 'import torch\n'), ((3521, 3540), 'torch.zeros_like', 'torch.zeros_like', (['v'], {}), '(v)\n', (3537, 3540), False, 'import torch\n'), ((4488, 4523), 'torch.distributed.all_gather', 'dist.all_gather', (['aggregated_list', 'v'], {}), '(aggregated_list, v)\n', (4503, 4523), True, 'import torch.distributed as dist\n'), ((4566, 4592), 'torch.cat', 'torch.cat', (['aggregated_list'], {}), '(aggregated_list)\n', (4575, 4592), False, 'import torch\n'), ((6162, 6179), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (6174, 6179), False, 'import torch\n'), ((6370, 6387), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (6382, 6387), False, 'import torch\n'), ((6602, 6619), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (6614, 6619), False, 'import torch\n'), ((6828, 6845), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (6840, 6845), False, 'import torch\n'), ((11157, 11169), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11167, 11169), False, 'import uuid\n'), ((11834, 11862), 'os.path.join', 'os.path.join', (['tmpdir', '"""rdzv"""'], {}), "(tmpdir, 'rdzv')\n", (11846, 11862), False, 'import os\n'), ((12855, 12883), 'os.path.join', 'os.path.join', (['tmpdir', '"""rdzv"""'], {}), "(tmpdir, 'rdzv')\n", (12867, 12883), False, 'import os\n'), ((4419, 4438), 'torch.zeros_like', 'torch.zeros_like', (['v'], {}), '(v)\n', (4435, 4438), False, 'import torch\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
# from pathlib import Path
import argparse
import os
import sys
from typing import Any, cast, Dict, List, Union
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data_utils
from fbgemm_gpu.split_embedding_configs import EmbOptimType
from torch import distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torchrec.distributed.embedding import EmbeddingCollectionSharder
from torchrec.distributed.model_parallel import DistributedModelParallel as DMP
from torchrec.distributed.types import ModuleSharder
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizerWrapper
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from tqdm import tqdm
# OSS import
try:
# pyre-ignore[21]
# @manual=//torchrec/github/examples/bert4rec:bert4rec_metrics
from bert4rec_metrics import recalls_and_ndcgs_for_ks
# pyre-ignore[21]
# @manual=//torchrec/github/examples/bert4rec/data:bert4rec_movielens_datasets
from data.bert4rec_movielens_datasets import Bert4RecPreprocsser, get_raw_dataframe
# pyre-ignore[21]
# @manual=//torchrec/github/examples/bert4rec/dataloader:bert4rec_movielens_dataloader
from dataloader.bert4rec_movielens_dataloader import Bert4RecDataloader
# pyre-ignore[21]
# @manual=//torchrec/github/examples/bert4rec/models:bert4rec
from models.bert4rec import BERT4Rec
except ImportError:
pass
# internal import
try:
from .bert4rec_metrics import recalls_and_ndcgs_for_ks # noqa F811
from .data.bert4rec_movielens_datasets import ( # noqa F811
Bert4RecPreprocsser,
get_raw_dataframe,
)
from .dataloader.bert4rec_movielens_dataloader import ( # noqa F811
Bert4RecDataloader,
)
from .models.bert4rec import BERT4Rec # noqa F811
except ImportError:
pass
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description="torchrec + lightning app")
parser.add_argument(
"--min_user_count", type=int, default=5, help="minimum user ratings count"
)
parser.add_argument(
"--min_item_count",
type=int,
default=0,
help="minimum item count for each valid user",
)
parser.add_argument(
"--max_len",
type=int,
default=100,
help="max length of the Bert embedding dimension",
)
parser.add_argument(
"--mask_prob",
type=float,
default=0.15,
help="probability of the mask",
)
parser.add_argument(
"--dataset_name",
type=str,
default="ml-1m",
help="dataset for experiment, current support ml-1m, ml-20m",
)
parser.add_argument(
"--min_rating",
type=int,
default=0,
help="minimum valid rating",
)
parser.add_argument(
"--num_epochs",
type=int,
default=100,
help="the number of epoch to train",
)
parser.add_argument(
"--lr",
type=float,
default=0.001,
help="learning rate",
)
parser.add_argument(
"--decay_step",
type=int,
default="25",
help="the step of weight decay",
)
parser.add_argument(
"--weight_decay",
type=float,
default=0.0,
help="weight decay",
)
parser.add_argument(
"--gamma",
type=float,
default=1.0,
help="gamma of the lr scheduler",
)
parser.add_argument(
"--train_batch_size",
type=int,
default=128,
help="train batch size",
)
parser.add_argument(
"--val_batch_size",
type=int,
default=128,
help="val batch size",
)
parser.add_argument(
"--test_batch_size",
type=int,
default=128,
help="test batch size",
)
parser.add_argument(
"--emb_dim",
type=int,
default=256,
help="dimension of the hidden layer embedding",
)
parser.add_argument(
"--nhead",
type=int,
default=2,
help="number of header of attention",
)
parser.add_argument(
"--num_layers",
type=int,
default=2,
help="number of layers of attention",
)
parser.add_argument(
"--dataset_path",
type=str,
default=None,
help="Path to a folder containing the dataset.",
)
parser.add_argument(
"--export_root",
type=str,
default="",
help="Path to save the trained model",
)
parser.add_argument(
"--random_user_count",
type=int,
default=10,
help="number of random users",
)
parser.add_argument(
"--random_item_count",
type=int,
default=30,
help="number of random items",
)
parser.add_argument(
"--random_size",
type=int,
default=300,
help="number of random sample size",
)
parser.add_argument(
"--dupe_factor",
type=int,
default=3,
help="number of duplication while generating the random masked seqs",
)
parser.add_argument(
"--mode",
type=str,
default="dmp",
help="dmp (distirbuted model parallel) or ddp (distributed data parallel)",
)
return parser.parse_args(argv)
def _dict_mean(dict_list: List[Dict[str, float]]) -> Dict[str, float]:
mean_dict = {}
for key in dict_list[0].keys():
mean_dict[key] = np.mean([d[key] for d in dict_list], axis=0)
return mean_dict
def _to_kjt(seqs: torch.LongTensor, device: torch.device) -> KeyedJaggedTensor:
seqs_list = list(seqs)
lengths = torch.IntTensor([value.size(0) for value in seqs_list])
values = torch.cat(seqs_list, dim=0)
kjt = KeyedJaggedTensor.from_lengths_sync(
keys=["item"], values=values, lengths=lengths
).to(device)
return kjt
def _calculate_metrics(
model: Union[DDP, DMP],
batch: List[torch.LongTensor],
metric_ks: List[int],
device: torch.device,
) -> Dict[str, float]:
seqs, candidates, labels = batch
kjt = _to_kjt(seqs, device)
scores = model(kjt)
scores = scores[:, -1, :]
scores = scores.gather(1, candidates)
metrics = recalls_and_ndcgs_for_ks(scores, labels, metric_ks)
return metrics
def _train_one_epoch(
model: Union[DDP, DMP],
train_loader: data_utils.DataLoader,
device: torch.device,
optimizer: optim.Adam,
lr_scheduler: optim.lr_scheduler.StepLR,
epoch: int,
) -> None:
"""
Train model for 1 epoch. Helper function for train_val_test.
Args:
model (Union[DDP, DMP]): DMP or DDP model contains the Bert4Rec.
train_loader (data_utils.DataLoader): DataLoader used for training.
device (torch.device): the device to train/val/test
optimizer (optim.Adam): Adam optimizer to train the model
lr_scheduler (optim.lr_scheduler.StepLR): scheduler to control the learning rate
epoch (int): the current epoch number
Returns:
None.
"""
model.train()
if torch.cuda.is_available():
torch.cuda.set_device(dist.get_rank())
loss_logs = []
train_iterator = iter(train_loader)
ce = nn.CrossEntropyLoss(ignore_index=0)
outputs = [None for _ in range(dist.get_world_size())]
for _ in tqdm(iter(int, 1), desc=f"Epoch {epoch+1}"):
try:
batch = next(train_iterator)
batch = [x.to(device) for x in batch]
optimizer.zero_grad()
seqs, labels = batch
kjt = _to_kjt(seqs, device)
logits = model(kjt) # B x T x V
logits = logits.view(-1, logits.size(-1)) # (B*T) x V
labels = labels.view(-1) # B*T
loss = ce(logits, labels)
loss.backward()
optimizer.step()
loss_logs.append(loss.item())
except StopIteration:
break
dist.all_gather_object(outputs, sum(loss_logs) / len(loss_logs))
if dist.get_rank() == 0:
# pyre-fixme[6]: For 1st param expected `Iterable[Variable[_SumT (bound to
# _SupportsSum)]]` but got `List[None]`.
print(f"Epoch {epoch + 1}, average loss { (sum(outputs) or 0) /len(outputs)}")
lr_scheduler.step()
def _validate(
model: Union[DDP, DMP],
val_loader: data_utils.DataLoader,
device: torch.device,
epoch: int,
metric_ks: List[int],
is_testing: bool = False,
) -> None:
"""
Evaluate model. Computes and prints metrics including Recalls and NDCGs. Helper
function for train_val_test.
Args:
model (Union[DDP, DMP]): DMP or DDP model contains the Bert4Rec.
val_loader (data_utils.DataLoader): DataLoader used for validation.
device (torch.device): the device to train/val/test
epoch (int): the current epoch number
metric_ks (List[int]): the metrics we want to validate
is_testing (bool): if validation or testing
Returns:
None.
"""
model.eval()
if torch.cuda.is_available():
torch.cuda.set_device(dist.get_rank())
outputs = [None for _ in range(dist.get_world_size())]
keys = ["Recall@1", "Recall@5", "Recall@10", "NDCG@5", "NDCG@10"]
metrics_log: Dict[str, List[float]] = {key: [] for key in keys}
with torch.no_grad():
for _, batch in enumerate(val_loader):
batch = [x.to(device) for x in batch]
metrics = _calculate_metrics(model, batch, metric_ks, device)
for key in keys:
metrics_log[key].append(metrics[key])
metrics_avg = {
key: sum(values) / len(values) for key, values in metrics_log.items()
}
dist.all_gather_object(outputs, metrics_avg)
if dist.get_rank() == 0:
print(
# pyre-fixme[6] for 1st positional only parameter expected `List[Dict[str, float]]` but got `List[None]`
f"{'Epoch ' + str(epoch + 1) if not is_testing else 'Test'}, metrics {_dict_mean(outputs)}"
)
def train_val_test(
model: Union[DDP, DMP],
train_loader: data_utils.DataLoader,
val_loader: data_utils.DataLoader,
test_loader: data_utils.DataLoader,
device: torch.device,
optimizer: optim.Adam,
lr_scheduler: optim.lr_scheduler.StepLR,
num_epochs: int,
metric_ks: List[int],
export_root: str,
) -> None:
"""
Train/validation/test loop. Ensure the dataloader will do the shuffling on each rank
and will output the performance metrics like recalls and ndcgs
Args:
model (Union[DDP, DMP]): DMP or DDP model contains the Bert4Rec.
train_loader (data_utils.DataLoader): DataLoader used for training.
val_loader (data_utils.DataLoader): DataLoader used for validation.
test_loader (data_utils.DataLoader): DataLoader used for testing.
device (torch.device): the device to train/val/test
optimizer (optim.Adam): Adam optimizer to train the model
lr_scheduler (optim.lr_scheduler.StepLR): scheduler to control the learning rate
num_epochs (int): the number of epochs to train
metric_ks (List[int]): the metrics we want to validate
export_root (str): the export root of the saved models
Returns:
None.
"""
_validate(model, val_loader, device, -1, metric_ks)
for epoch in range(num_epochs):
# pyre-fixme[16] Undefined attribute [16]: has no attribute `set_epoch`
train_loader.sampler.set_epoch(epoch)
_train_one_epoch(
model,
train_loader,
device,
optimizer,
lr_scheduler,
epoch,
)
_validate(model, val_loader, device, epoch, metric_ks)
if (epoch + 1) % 10 == 0:
torch.save(
model.state_dict(),
export_root + f"epoch_{epoch + 1}_model.pth",
)
print(f"epoch {epoch + 1} model has been saved to {export_root}")
_validate(model, test_loader, device, num_epochs, metric_ks, True)
def main(argv: List[str]) -> None:
"""
Trains, validates, and tests a Bert4Rec Model
(https://arxiv.org/abs/1904.06690). The Bert4Rec model contains both data parallel
components (e.g. transformation blocks) and model parallel
components (e.g. item embeddings). The Bert4Rec model is pipelined so that dataloading,
data-parallel to model-parallel comms, and forward/backward are overlapped. Can be
run with either a random dataloader or the movielens dataset
(https://grouplens.org/datasets/movielens/).
Args:
argv (List[str]): command line args.
Returns:
None.
"""
args = parse_args(argv)
use_dmp = args.mode == "dmp"
metric_ks: List[int] = (
[1, 5, 10, 20, 50, 100] if args.dataset_name != "random" else [1, 5, 10]
)
rank = int(os.environ["LOCAL_RANK"])
if torch.cuda.is_available():
device = torch.device(f"cuda:{rank}")
backend = "nccl"
torch.cuda.set_device(device)
else:
device = torch.device("cpu")
backend = "gloo"
if not torch.distributed.is_initialized():
dist.init_process_group(backend=backend)
world_size = dist.get_world_size()
raw_data = get_raw_dataframe(
args.dataset_name,
args.random_user_count,
args.random_item_count,
args.random_size,
args.min_rating,
args.dataset_path,
)
df = Bert4RecPreprocsser(
raw_data,
args.min_rating,
args.min_user_count,
args.min_item_count,
args.dataset_name,
args.max_len,
args.mask_prob,
args.dupe_factor,
).get_processed_dataframes()
# 0 for padding, item_count + 1 for mask
vocab_size = len(df["smap"]) + 2
bert4recDataloader = Bert4RecDataloader(
df,
args.train_batch_size,
args.val_batch_size,
args.test_batch_size,
)
(
train_loader,
val_loader,
test_loader,
) = bert4recDataloader.get_pytorch_dataloaders(rank, world_size)
model_bert4rec = BERT4Rec(
vocab_size,
args.max_len,
emb_dim=args.emb_dim,
nhead=args.nhead,
num_layers=args.num_layers,
).to(device)
if use_dmp:
fused_params: Dict[str, Any] = {}
fused_params["optimizer"] = EmbOptimType.ADAM
fused_params["learning_rate"] = args.lr
fused_params["weight_decay"] = args.weight_decay
model = DMP(
module=model_bert4rec,
device=device,
sharders=[
cast(ModuleSharder[nn.Module], EmbeddingCollectionSharder(fused_params))
],
)
dense_optimizer = KeyedOptimizerWrapper(
dict(model.named_parameters()),
lambda params: optim.Adam(
params, lr=args.lr, weight_decay=args.weight_decay
),
)
optimizer = CombinedOptimizer([model.fused_optimizer, dense_optimizer])
else:
device_ids = [rank] if backend == "nccl" else None
"""
Another way to do DDP is to specify the sharding_type for TorchRec as Data_parallel
Here we provide an example of how to do it:
First we constraint the sharding_types to only use data_parallel in sharder,
then we use DMP to wrap it:
sharding_types = [ShardingType.DATA_PARALLEL.value]
constraints[
"item_embedding"
] = torchrec.distributed.planner.ParameterConstraints(sharding_types=sharding_types)
sharders = [
cast(ModuleSharder[nn.Module], EmbeddingCollectionSharder(fused_params))
]
pg = dist.GroupMember.WORLD
model = DMP(
module=model_bert4rec,
device=device,
plan=torchrec.distributed.planner.EmbeddingShardingPlanner(
topology=torchrec.distributed.planner.Topology(
world_size=world_size,
compute_device=device.type,
),
constraints=constraints
).collective_plan(model_bert4rec, sharders, pg),
sharders=sharders,
)
"""
model = DDP(model_bert4rec, device_ids=device_ids)
optimizer = optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
lr_scheduler = optim.lr_scheduler.StepLR(
optimizer, step_size=args.decay_step, gamma=args.gamma
)
train_val_test(
model,
train_loader,
val_loader,
test_loader,
device,
optimizer,
lr_scheduler,
args.num_epochs,
metric_ks,
args.export_root,
)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"torchrec.distributed.embedding.EmbeddingCollectionSharder",
"torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_lengths_sync",
"torchrec.optim.keyed.CombinedOptimizer"
] | [((2203, 2266), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""torchrec + lightning app"""'}), "(description='torchrec + lightning app')\n", (2226, 2266), False, 'import argparse\n'), ((6087, 6114), 'torch.cat', 'torch.cat', (['seqs_list'], {'dim': '(0)'}), '(seqs_list, dim=0)\n', (6096, 6114), False, 'import torch\n'), ((6593, 6644), 'bert4rec_metrics.recalls_and_ndcgs_for_ks', 'recalls_and_ndcgs_for_ks', (['scores', 'labels', 'metric_ks'], {}), '(scores, labels, metric_ks)\n', (6617, 6644), False, 'from bert4rec_metrics import recalls_and_ndcgs_for_ks\n'), ((7437, 7462), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7460, 7462), False, 'import torch\n'), ((7579, 7614), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': '(0)'}), '(ignore_index=0)\n', (7598, 7614), True, 'import torch.nn as nn\n'), ((9392, 9417), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9415, 9417), False, 'import torch\n'), ((10055, 10099), 'torch.distributed.all_gather_object', 'dist.all_gather_object', (['outputs', 'metrics_avg'], {}), '(outputs, metrics_avg)\n', (10077, 10099), True, 'from torch import distributed as dist\n'), ((13255, 13280), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13278, 13280), False, 'import torch\n'), ((13578, 13599), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (13597, 13599), True, 'from torch import distributed as dist\n'), ((13616, 13759), 'data.bert4rec_movielens_datasets.get_raw_dataframe', 'get_raw_dataframe', (['args.dataset_name', 'args.random_user_count', 'args.random_item_count', 'args.random_size', 'args.min_rating', 'args.dataset_path'], {}), '(args.dataset_name, args.random_user_count, args.\n random_item_count, args.random_size, args.min_rating, args.dataset_path)\n', (13633, 13759), False, 'from data.bert4rec_movielens_datasets import Bert4RecPreprocsser, get_raw_dataframe\n'), ((14181, 14274), 'dataloader.bert4rec_movielens_dataloader.Bert4RecDataloader', 'Bert4RecDataloader', (['df', 'args.train_batch_size', 'args.val_batch_size', 'args.test_batch_size'], {}), '(df, args.train_batch_size, args.val_batch_size, args.\n test_batch_size)\n', (14199, 14274), False, 'from dataloader.bert4rec_movielens_dataloader import Bert4RecDataloader\n'), ((16807, 16893), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': 'args.decay_step', 'gamma': 'args.gamma'}), '(optimizer, step_size=args.decay_step, gamma=args.\n gamma)\n', (16832, 16893), True, 'import torch.optim as optim\n'), ((5829, 5873), 'numpy.mean', 'np.mean', (['[d[key] for d in dict_list]'], {'axis': '(0)'}), '([d[key] for d in dict_list], axis=0)\n', (5836, 5873), True, 'import numpy as np\n'), ((8367, 8382), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (8380, 8382), True, 'from torch import distributed as dist\n'), ((9673, 9688), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9686, 9688), False, 'import torch\n'), ((10107, 10122), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (10120, 10122), True, 'from torch import distributed as dist\n'), ((13299, 13327), 'torch.device', 'torch.device', (['f"""cuda:{rank}"""'], {}), "(f'cuda:{rank}')\n", (13311, 13327), False, 'import torch\n'), ((13361, 13390), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device'], {}), '(device)\n', (13382, 13390), False, 'import torch\n'), ((13418, 13437), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (13430, 13437), False, 'import torch\n'), ((13475, 13509), 'torch.distributed.is_initialized', 'torch.distributed.is_initialized', ([], {}), '()\n', (13507, 13509), False, 'import torch\n'), ((13519, 13559), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': 'backend'}), '(backend=backend)\n', (13542, 13559), True, 'from torch import distributed as dist\n'), ((15310, 15369), 'torchrec.optim.keyed.CombinedOptimizer', 'CombinedOptimizer', (['[model.fused_optimizer, dense_optimizer]'], {}), '([model.fused_optimizer, dense_optimizer])\n', (15327, 15369), False, 'from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizerWrapper\n'), ((16628, 16670), 'torch.nn.parallel.DistributedDataParallel', 'DDP', (['model_bert4rec'], {'device_ids': 'device_ids'}), '(model_bert4rec, device_ids=device_ids)\n', (16631, 16670), True, 'from torch.nn.parallel import DistributedDataParallel as DDP\n'), ((6126, 6213), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_lengths_sync', 'KeyedJaggedTensor.from_lengths_sync', ([], {'keys': "['item']", 'values': 'values', 'lengths': 'lengths'}), "(keys=['item'], values=values, lengths=\n lengths)\n", (6161, 6213), False, 'from torchrec.sparse.jagged_tensor import KeyedJaggedTensor\n'), ((7494, 7509), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (7507, 7509), True, 'from torch import distributed as dist\n'), ((9449, 9464), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (9462, 9464), True, 'from torch import distributed as dist\n'), ((13820, 13985), 'data.bert4rec_movielens_datasets.Bert4RecPreprocsser', 'Bert4RecPreprocsser', (['raw_data', 'args.min_rating', 'args.min_user_count', 'args.min_item_count', 'args.dataset_name', 'args.max_len', 'args.mask_prob', 'args.dupe_factor'], {}), '(raw_data, args.min_rating, args.min_user_count, args.\n min_item_count, args.dataset_name, args.max_len, args.mask_prob, args.\n dupe_factor)\n', (13839, 13985), False, 'from data.bert4rec_movielens_datasets import Bert4RecPreprocsser, get_raw_dataframe\n'), ((14468, 14574), 'models.bert4rec.BERT4Rec', 'BERT4Rec', (['vocab_size', 'args.max_len'], {'emb_dim': 'args.emb_dim', 'nhead': 'args.nhead', 'num_layers': 'args.num_layers'}), '(vocab_size, args.max_len, emb_dim=args.emb_dim, nhead=args.nhead,\n num_layers=args.num_layers)\n', (14476, 14574), False, 'from models.bert4rec import BERT4Rec\n'), ((7650, 7671), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (7669, 7671), True, 'from torch import distributed as dist\n'), ((9501, 9522), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (9520, 9522), True, 'from torch import distributed as dist\n'), ((15186, 15248), 'torch.optim.Adam', 'optim.Adam', (['params'], {'lr': 'args.lr', 'weight_decay': 'args.weight_decay'}), '(params, lr=args.lr, weight_decay=args.weight_decay)\n', (15196, 15248), True, 'import torch.optim as optim\n'), ((14999, 15039), 'torchrec.distributed.embedding.EmbeddingCollectionSharder', 'EmbeddingCollectionSharder', (['fused_params'], {}), '(fused_params)\n', (15025, 15039), False, 'from torchrec.distributed.embedding import EmbeddingCollectionSharder\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import List, Optional, Type
import torch
from fbgemm_gpu.split_embedding_configs import EmbOptimType
from hypothesis import Verbosity, settings, given, strategies as st
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.test_utils.test_model import TestSparseNNBase
from torchrec.distributed.test_utils.test_model_parallel import (
create_test_sharder,
SharderType,
)
from torchrec.distributed.test_utils.test_model_parallel_base import (
ModelParallelTestBase,
)
from torchrec.distributed.tests.test_sequence_model import (
TestSequenceSparseNN,
TestEmbeddingCollectionSharder,
TestSequenceTowerSparseNN,
)
from torchrec.distributed.types import ShardingType
from torchrec.modules.embedding_configs import EmbeddingConfig
from torchrec.test_utils import (
skip_if_asan_class,
seed_and_log,
)
@skip_if_asan_class
class SequenceModelParallelHierarchicalTest(ModelParallelTestBase):
"""
Testing hierarchical sharding types.
NOTE:
Requires at least 4 GPUs to test.
"""
@unittest.skipIf(
torch.cuda.device_count() <= 3,
"Not enough GPUs, this test requires at least four GPUs",
)
# pyre-ignore [56]
@given(
sharding_type=st.sampled_from(
[
ShardingType.TABLE_ROW_WISE.value,
]
),
kernel_type=st.sampled_from(
[
EmbeddingComputeKernel.DENSE.value,
EmbeddingComputeKernel.SPARSE.value,
EmbeddingComputeKernel.BATCHED_DENSE.value,
EmbeddingComputeKernel.BATCHED_FUSED.value,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=4, deadline=None)
def test_seq_emb_tower_nccl(self, sharding_type: str, kernel_type: str) -> None:
self._test_sharding(
# pyre-ignore [6]
sharders=[
create_test_sharder(
SharderType.EMBEDDING_TOWER.value, sharding_type, kernel_type
)
],
backend="nccl",
world_size=4,
local_size=2,
model_class=TestSequenceTowerSparseNN,
)
# TODO: consolidate the following methods with https://fburl.com/code/62zg0kel
@seed_and_log
def setUp(self) -> None:
super().setUp()
num_features = 4
self.tables = [
EmbeddingConfig(
num_embeddings=(i + 1) * 11,
embedding_dim=16,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
)
for i in range(num_features)
]
self.embedding_groups = {
"group_0": ["feature_" + str(i) for i in range(num_features)]
}
def _test_sharding(
self,
sharders: List[TestEmbeddingCollectionSharder],
backend: str = "gloo",
world_size: int = 2,
local_size: Optional[int] = None,
model_class: Type[TestSparseNNBase] = TestSequenceSparseNN,
) -> None:
self._run_multi_process_test(
# pyre-ignore [6]
callable=self._test_sharding_single_rank,
world_size=world_size,
local_size=local_size,
model_class=model_class,
tables=self.tables,
embedding_groups=self.embedding_groups,
# pyre-fixme[6]
sharders=sharders,
optim=EmbOptimType.EXACT_SGD,
backend=backend,
)
| [
"torchrec.distributed.test_utils.test_model_parallel.create_test_sharder"
] | [((1945, 2013), 'hypothesis.settings', 'settings', ([], {'verbosity': 'Verbosity.verbose', 'max_examples': '(4)', 'deadline': 'None'}), '(verbosity=Verbosity.verbose, max_examples=4, deadline=None)\n', (1953, 2013), False, 'from hypothesis import Verbosity, settings, given, strategies as st\n'), ((1365, 1390), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1388, 1390), False, 'import torch\n'), ((1526, 1578), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[ShardingType.TABLE_ROW_WISE.value]'], {}), '([ShardingType.TABLE_ROW_WISE.value])\n', (1541, 1578), True, 'from hypothesis import Verbosity, settings, given, strategies as st\n'), ((1653, 1840), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel.SPARSE.value,\n EmbeddingComputeKernel.BATCHED_DENSE.value, EmbeddingComputeKernel.\n BATCHED_FUSED.value]'], {}), '([EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel\n .SPARSE.value, EmbeddingComputeKernel.BATCHED_DENSE.value,\n EmbeddingComputeKernel.BATCHED_FUSED.value])\n', (1668, 1840), True, 'from hypothesis import Verbosity, settings, given, strategies as st\n'), ((2197, 2283), 'torchrec.distributed.test_utils.test_model_parallel.create_test_sharder', 'create_test_sharder', (['SharderType.EMBEDDING_TOWER.value', 'sharding_type', 'kernel_type'], {}), '(SharderType.EMBEDDING_TOWER.value, sharding_type,\n kernel_type)\n', (2216, 2283), False, 'from torchrec.distributed.test_utils.test_model_parallel import create_test_sharder, SharderType\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import List, cast
from torchrec.distributed.embeddingbag import (
EmbeddingBagCollectionSharder,
)
from torchrec.distributed.planner.enumerators import EmbeddingEnumerator
from torchrec.distributed.planner.proposers import GreedyProposer
from torchrec.distributed.planner.types import Topology, ShardingOption
from torchrec.distributed.tests.test_model import TestSparseNN
from torchrec.modules.embedding_configs import EmbeddingBagConfig
class TestGreedyProposer(unittest.TestCase):
def setUp(self) -> None:
topology = Topology(world_size=2, compute_device="cuda")
self.enumerator = EmbeddingEnumerator(topology=topology)
self.proposer = GreedyProposer()
def test_two_table_perf(self) -> None:
tables = [
EmbeddingBagConfig(
num_embeddings=100,
embedding_dim=10,
name="table_0",
feature_names=["feature_0"],
),
EmbeddingBagConfig(
num_embeddings=100,
embedding_dim=10,
name="table_1",
feature_names=["feature_1"],
),
]
model = TestSparseNN(tables=tables, weighted_tables=[])
search_space = self.enumerator.enumerate(
module=model, sharders=[EmbeddingBagCollectionSharder()]
)
self.proposer.load(search_space)
# simulate first five iterations:
output = []
for _ in range(5):
proposal = cast(List[ShardingOption], self.proposer.propose())
proposal.sort(
key=lambda sharding_option: (
max([shard.perf for shard in sharding_option.shards]),
sharding_option.name,
)
)
output.append(
[
(
candidate.name,
candidate.sharding_type,
candidate.compute_kernel,
)
for candidate in proposal
]
)
self.proposer.feedback(partitionable=True)
expected_output = [
[
(
"table_0",
"data_parallel",
"batched_dense",
),
(
"table_1",
"data_parallel",
"batched_dense",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"data_parallel",
"dense",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"row_wise",
"batched_fused",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"table_wise",
"batched_fused",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"row_wise",
"batched_dense",
),
],
]
self.assertEqual(expected_output, output)
| [
"torchrec.distributed.planner.enumerators.EmbeddingEnumerator",
"torchrec.modules.embedding_configs.EmbeddingBagConfig",
"torchrec.distributed.planner.types.Topology",
"torchrec.distributed.planner.proposers.GreedyProposer",
"torchrec.distributed.tests.test_model.TestSparseNN",
"torchrec.distributed.embed... | [((797, 842), 'torchrec.distributed.planner.types.Topology', 'Topology', ([], {'world_size': '(2)', 'compute_device': '"""cuda"""'}), "(world_size=2, compute_device='cuda')\n", (805, 842), False, 'from torchrec.distributed.planner.types import Topology, ShardingOption\n'), ((869, 907), 'torchrec.distributed.planner.enumerators.EmbeddingEnumerator', 'EmbeddingEnumerator', ([], {'topology': 'topology'}), '(topology=topology)\n', (888, 907), False, 'from torchrec.distributed.planner.enumerators import EmbeddingEnumerator\n'), ((932, 948), 'torchrec.distributed.planner.proposers.GreedyProposer', 'GreedyProposer', ([], {}), '()\n', (946, 948), False, 'from torchrec.distributed.planner.proposers import GreedyProposer\n'), ((1427, 1474), 'torchrec.distributed.tests.test_model.TestSparseNN', 'TestSparseNN', ([], {'tables': 'tables', 'weighted_tables': '[]'}), '(tables=tables, weighted_tables=[])\n', (1439, 1474), False, 'from torchrec.distributed.tests.test_model import TestSparseNN\n'), ((1024, 1129), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'num_embeddings': '(100)', 'embedding_dim': '(10)', 'name': '"""table_0"""', 'feature_names': "['feature_0']"}), "(num_embeddings=100, embedding_dim=10, name='table_0',\n feature_names=['feature_0'])\n", (1042, 1129), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((1218, 1323), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'num_embeddings': '(100)', 'embedding_dim': '(10)', 'name': '"""table_1"""', 'feature_names': "['feature_1']"}), "(num_embeddings=100, embedding_dim=10, name='table_1',\n feature_names=['feature_1'])\n", (1236, 1323), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((1561, 1592), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {}), '()\n', (1590, 1592), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
from typing import Dict, List, Type
import torch
import torch.distributed as dist
from torchrec.metrics.calibration import CalibrationMetric
from torchrec.metrics.rec_metric import RecComputeMode, RecMetric
from torchrec.metrics.tests.test_utils import (
rec_metric_value_test_helper,
rec_metric_value_test_launcher,
TestMetric,
)
class TestCalibrationMetric(TestMetric):
@staticmethod
def _get_states(
labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor
) -> Dict[str, torch.Tensor]:
calibration_num = torch.sum(predictions * weights)
calibration_denom = torch.sum(labels * weights)
num_samples = torch.tensor(labels.size()[0]).double()
return {
"calibration_num": calibration_num,
"calibration_denom": calibration_denom,
"num_samples": num_samples,
}
@staticmethod
def _compute(states: Dict[str, torch.Tensor]) -> torch.Tensor:
return torch.where(
states["calibration_denom"] <= 0.0,
0.0,
states["calibration_num"] / states["calibration_denom"],
).double()
WORLD_SIZE = 4
class CalibrationMetricTest(unittest.TestCase):
clazz: Type[RecMetric] = CalibrationMetric
task_name: str = "calibration"
@staticmethod
def _test_calibration(
target_clazz: Type[RecMetric],
target_compute_mode: RecComputeMode,
task_names: List[str],
fused_update_limit: int = 0,
compute_on_all_ranks: bool = False,
) -> None:
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
dist.init_process_group(
backend="gloo",
world_size=world_size,
rank=rank,
)
calibration_metrics, test_metrics = rec_metric_value_test_helper(
target_clazz=target_clazz,
target_compute_mode=target_compute_mode,
test_clazz=TestCalibrationMetric,
fused_update_limit=fused_update_limit,
compute_on_all_ranks=False,
world_size=world_size,
my_rank=rank,
task_names=task_names,
)
if rank == 0:
for name in task_names:
assert torch.allclose(
calibration_metrics[f"calibration-{name}|lifetime_calibration"],
test_metrics[0][name],
)
assert torch.allclose(
calibration_metrics[f"calibration-{name}|window_calibration"],
test_metrics[1][name],
)
assert torch.allclose(
calibration_metrics[
f"calibration-{name}|local_lifetime_calibration"
],
test_metrics[2][name],
)
assert torch.allclose(
calibration_metrics[f"calibration-{name}|local_window_calibration"],
test_metrics[3][name],
)
dist.destroy_process_group()
def test_unfused_calibration(self) -> None:
rec_metric_value_test_launcher(
target_clazz=CalibrationMetric,
target_compute_mode=RecComputeMode.UNFUSED_TASKS_COMPUTATION,
test_clazz=TestCalibrationMetric,
task_names=["t1", "t2", "t3"],
fused_update_limit=0,
compute_on_all_ranks=False,
world_size=WORLD_SIZE,
entry_point=self._test_calibration,
)
def test_fused_calibration(self) -> None:
rec_metric_value_test_launcher(
target_clazz=CalibrationMetric,
target_compute_mode=RecComputeMode.FUSED_TASKS_COMPUTATION,
test_clazz=TestCalibrationMetric,
task_names=["t1", "t2", "t3"],
fused_update_limit=0,
compute_on_all_ranks=False,
world_size=WORLD_SIZE,
entry_point=self._test_calibration,
)
| [
"torchrec.metrics.tests.test_utils.rec_metric_value_test_launcher",
"torchrec.metrics.tests.test_utils.rec_metric_value_test_helper"
] | [((823, 855), 'torch.sum', 'torch.sum', (['(predictions * weights)'], {}), '(predictions * weights)\n', (832, 855), False, 'import torch\n'), ((884, 911), 'torch.sum', 'torch.sum', (['(labels * weights)'], {}), '(labels * weights)\n', (893, 911), False, 'import torch\n'), ((1912, 1985), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""gloo"""', 'world_size': 'world_size', 'rank': 'rank'}), "(backend='gloo', world_size=world_size, rank=rank)\n", (1935, 1985), True, 'import torch.distributed as dist\n'), ((2078, 2348), 'torchrec.metrics.tests.test_utils.rec_metric_value_test_helper', 'rec_metric_value_test_helper', ([], {'target_clazz': 'target_clazz', 'target_compute_mode': 'target_compute_mode', 'test_clazz': 'TestCalibrationMetric', 'fused_update_limit': 'fused_update_limit', 'compute_on_all_ranks': '(False)', 'world_size': 'world_size', 'my_rank': 'rank', 'task_names': 'task_names'}), '(target_clazz=target_clazz, target_compute_mode\n =target_compute_mode, test_clazz=TestCalibrationMetric,\n fused_update_limit=fused_update_limit, compute_on_all_ranks=False,\n world_size=world_size, my_rank=rank, task_names=task_names)\n', (2106, 2348), False, 'from torchrec.metrics.tests.test_utils import rec_metric_value_test_helper, rec_metric_value_test_launcher, TestMetric\n'), ((3304, 3332), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (3330, 3332), True, 'import torch.distributed as dist\n'), ((3390, 3704), 'torchrec.metrics.tests.test_utils.rec_metric_value_test_launcher', 'rec_metric_value_test_launcher', ([], {'target_clazz': 'CalibrationMetric', 'target_compute_mode': 'RecComputeMode.UNFUSED_TASKS_COMPUTATION', 'test_clazz': 'TestCalibrationMetric', 'task_names': "['t1', 't2', 't3']", 'fused_update_limit': '(0)', 'compute_on_all_ranks': '(False)', 'world_size': 'WORLD_SIZE', 'entry_point': 'self._test_calibration'}), "(target_clazz=CalibrationMetric,\n target_compute_mode=RecComputeMode.UNFUSED_TASKS_COMPUTATION,\n test_clazz=TestCalibrationMetric, task_names=['t1', 't2', 't3'],\n fused_update_limit=0, compute_on_all_ranks=False, world_size=WORLD_SIZE,\n entry_point=self._test_calibration)\n", (3420, 3704), False, 'from torchrec.metrics.tests.test_utils import rec_metric_value_test_helper, rec_metric_value_test_launcher, TestMetric\n'), ((3851, 4164), 'torchrec.metrics.tests.test_utils.rec_metric_value_test_launcher', 'rec_metric_value_test_launcher', ([], {'target_clazz': 'CalibrationMetric', 'target_compute_mode': 'RecComputeMode.FUSED_TASKS_COMPUTATION', 'test_clazz': 'TestCalibrationMetric', 'task_names': "['t1', 't2', 't3']", 'fused_update_limit': '(0)', 'compute_on_all_ranks': '(False)', 'world_size': 'WORLD_SIZE', 'entry_point': 'self._test_calibration'}), "(target_clazz=CalibrationMetric,\n target_compute_mode=RecComputeMode.FUSED_TASKS_COMPUTATION, test_clazz=\n TestCalibrationMetric, task_names=['t1', 't2', 't3'],\n fused_update_limit=0, compute_on_all_ranks=False, world_size=WORLD_SIZE,\n entry_point=self._test_calibration)\n", (3881, 4164), False, 'from torchrec.metrics.tests.test_utils import rec_metric_value_test_helper, rec_metric_value_test_launcher, TestMetric\n'), ((1242, 1356), 'torch.where', 'torch.where', (["(states['calibration_denom'] <= 0.0)", '(0.0)', "(states['calibration_num'] / states['calibration_denom'])"], {}), "(states['calibration_denom'] <= 0.0, 0.0, states[\n 'calibration_num'] / states['calibration_denom'])\n", (1253, 1356), False, 'import torch\n'), ((2525, 2632), 'torch.allclose', 'torch.allclose', (["calibration_metrics[f'calibration-{name}|lifetime_calibration']", 'test_metrics[0][name]'], {}), "(calibration_metrics[\n f'calibration-{name}|lifetime_calibration'], test_metrics[0][name])\n", (2539, 2632), False, 'import torch\n'), ((2710, 2815), 'torch.allclose', 'torch.allclose', (["calibration_metrics[f'calibration-{name}|window_calibration']", 'test_metrics[1][name]'], {}), "(calibration_metrics[f'calibration-{name}|window_calibration'\n ], test_metrics[1][name])\n", (2724, 2815), False, 'import torch\n'), ((2893, 3006), 'torch.allclose', 'torch.allclose', (["calibration_metrics[f'calibration-{name}|local_lifetime_calibration']", 'test_metrics[2][name]'], {}), "(calibration_metrics[\n f'calibration-{name}|local_lifetime_calibration'], test_metrics[2][name])\n", (2907, 3006), False, 'import torch\n'), ((3130, 3241), 'torch.allclose', 'torch.allclose', (["calibration_metrics[f'calibration-{name}|local_window_calibration']", 'test_metrics[3][name]'], {}), "(calibration_metrics[\n f'calibration-{name}|local_window_calibration'], test_metrics[3][name])\n", (3144, 3241), False, 'import torch\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
from functools import partial
from typing import (
Any,
Iterable,
List,
Mapping,
Optional,
Union,
cast,
Callable,
)
import pytorch_lightning as pl
import torch
from pyre_extensions import none_throws
from torch.utils.data import DataLoader, IterDataPipe
from torchrec.datasets.criteo import (
DEFAULT_CAT_NAMES,
DEFAULT_INT_NAMES,
DEFAULT_LABEL_NAME,
)
from torchrec.datasets.criteo import criteo_terabyte, criteo_kaggle
from torchrec.datasets.utils import rand_split_train_val, Batch
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrecipes.rec.datamodules.samplers.undersampler import ProportionUnderSampler
def _transform(
batch: Mapping[str, Union[Iterable[str], torch.Tensor]],
num_embeddings: Optional[int] = None,
num_embeddings_per_feature: Optional[List[int]] = None,
) -> Batch:
cat_list: List[torch.Tensor] = []
for col_name in DEFAULT_INT_NAMES:
val = cast(torch.Tensor, batch[col_name])
# minimum value in criteo 1t/kaggle dataset of int features
# is -1/-2 so we add 3 before taking log
cat_list.append((torch.log(val + 3)).unsqueeze(0).T)
dense_features = torch.cat(
cat_list,
dim=1,
)
kjt_values: List[int] = []
kjt_lengths: List[int] = []
for (col_idx, col_name) in enumerate(DEFAULT_CAT_NAMES):
values = cast(Iterable[str], batch[col_name])
for value in values:
if value:
kjt_values.append(
int(value, 16)
% (
none_throws(num_embeddings_per_feature)[col_idx]
if num_embeddings is None
else num_embeddings
)
)
kjt_lengths.append(1)
else:
kjt_lengths.append(0)
sparse_features = KeyedJaggedTensor.from_lengths_sync(
DEFAULT_CAT_NAMES,
torch.tensor(kjt_values),
torch.tensor(kjt_lengths, dtype=torch.int32),
)
labels = batch[DEFAULT_LABEL_NAME]
assert isinstance(labels, torch.Tensor)
return Batch(
dense_features=dense_features,
sparse_features=sparse_features,
labels=labels,
)
class CriteoDataModule(pl.LightningDataModule):
"""`DataModule for Criteo 1TB Click Logs <https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/>`_ Dataset
Args:
num_days: number of days (out of 25) of data to use for train/validation
only valid for criteo 1tb, as kaggle only have 1 train file
num_days_test: number of days (out of 25) of data to use for testing
only valid for criteo 1tb, the test data of kaggle does not label, thus not useable
num_embeddings: the number of embeddings (hash size) of the categorical (sparse) features
num_embeddings_per_feature: the number of embeddings (hash size) of the categorical (sparse) features
batch_size: int
num_workers: int
train_percent: percent of data to use for training vs validation- 0.0 - 1.0
read_chunk_size: int
dataset_name: criteo_1t or criteo_kaggle,
note that the test dataset of kaggle does not have label
dataset_path: Path to the criteo dataset. Users MUST pass it
undersampling_rate: 0.0 - 1.0. Desired proportion of zero-labeled samples to
retain (i.e. undersampling zero-labeled rows). Ex. 0.3 indicates only 30%
of the rows with label 0 will be kept. All rows with label 1 will be kept.
Default: None, indicating no undersampling.
seed: Random seed for reproducibility. Default: None.
worker_init_fn: If not ``None``, this will be called on each worker subprocess with the
worker id (an int in ``[0, num_workers - 1]``) as input, after seeding and before data
loading. (default: ``None``)
Examples:
>>> dm = CriteoDataModule(num_days=1, batch_size=3, num_days_test=1)
>>> dm.setup()
>>> train_batch = next(iter(dm.train_dataloader()))
"""
def __init__(
self,
num_days: int = 1,
num_days_test: int = 0,
num_embeddings: Optional[int] = 100000,
num_embeddings_per_feature: Optional[List[int]] = None,
batch_size: int = 32,
train_percent: float = 0.8,
num_workers: int = 0,
read_chunk_size: int = 100000,
dataset_name: str = "criteo_1t",
# pyre-fixme[9]: dataset_path is declared to have type `str` but is used as type `None`.
dataset_path: str = None,
undersampling_rate: Optional[float] = None,
pin_memory: bool = False,
seed: Optional[int] = None,
worker_init_fn: Optional[Callable[[int], None]] = None,
) -> None:
super().__init__()
self._dataset_name: str = dataset_name
self._dataset_path: str = dataset_path
if dataset_name == "criteo_1t":
if not (1 <= num_days <= 24):
raise ValueError(
f"Dataset has only 24 days of data. User asked for {num_days} days"
)
if not (0 <= num_days_test <= 24):
raise ValueError(
f"Dataset has only 24 days of data. User asked for {num_days_test} days"
)
if not (num_days + num_days_test <= 24):
raise ValueError(
f"Dataset has only 24 days of data. User asked for {num_days} train days and {num_days_test} test days"
)
elif dataset_name != "criteo_kaggle":
raise ValueError(
f"Unknown dataset {self._dataset_name}. "
+ "Please choose {criteo_1t, criteo_kaggle} for dataset_name"
)
if not (0.0 <= train_percent <= 1.0):
raise ValueError(f"train_percent {train_percent} must be between 0 and 1")
if (num_embeddings is None and num_embeddings_per_feature is None) or (
num_embeddings is not None and num_embeddings_per_feature is not None
):
raise ValueError(
"One - and only one - of num_embeddings or num_embeddings_per_feature must be set."
)
if num_embeddings_per_feature is not None and len(
num_embeddings_per_feature
) != len(DEFAULT_CAT_NAMES):
raise ValueError(
f"Length of num_embeddings_per_feature ({len(num_embeddings_per_feature)}) does not match the number"
" of sparse features ({DEFAULT_CAT_NAMES})."
)
# TODO handle more workers for IterableDataset
self.batch_size = batch_size
self._num_workers = num_workers
self._read_chunk_size = read_chunk_size
self._num_days = num_days
self._num_days_test = num_days_test
self.num_embeddings = num_embeddings
self.num_embeddings_per_feature = num_embeddings_per_feature
self._train_percent = train_percent
self._undersampling_rate = undersampling_rate
self._pin_memory = pin_memory
self._seed = seed
self._worker_init_fn = worker_init_fn
self._train_datapipe: Optional[IterDataPipe] = None
self._val_datapipe: Optional[IterDataPipe] = None
self._test_datapipe: Optional[IterDataPipe] = None
self.keys: List[str] = DEFAULT_CAT_NAMES
def _create_datapipe_1t(self, day_range: Iterable[int]) -> IterDataPipe:
# TODO (T105042401): replace the file path by using a file in memory, reference by a file handler
paths = [f"{self._dataset_path}/day_{day}.tsv" for day in day_range]
datapipe = criteo_terabyte(
paths,
# this is important because without it, the reader will attempt to synchronously download the whole file...
read_chunk_size=self._read_chunk_size,
)
undersampling_rate = self._undersampling_rate
if undersampling_rate is not None:
datapipe = ProportionUnderSampler(
datapipe,
self._get_label,
{0: undersampling_rate, 1: 1.0},
seed=self._seed,
)
return datapipe
def _create_datapipe_kaggle(self, partition: str) -> IterDataPipe:
# note that there is no need to downsampling in in Kaggle dataset
path = f"{self._dataset_path}/{partition}.txt"
return criteo_kaggle(
path,
# this is important because without it, the reader will attempt to synchronously download the whole file...
read_chunk_size=self._read_chunk_size,
)
@staticmethod
# pyre-ignore[2, 3]
def _get_label(row: Any) -> Any:
return row["label"]
def _batch_collate_transform(self, datapipe: IterDataPipe) -> IterDataPipe:
_transform_partial = partial(
_transform,
num_embeddings=self.num_embeddings,
num_embeddings_per_feature=self.num_embeddings_per_feature,
)
return datapipe.batch(self.batch_size).collate().map(_transform_partial)
def setup(self, stage: Optional[str] = None) -> None:
if self._worker_init_fn is not None:
self._worker_init_fn(0)
if stage == "fit" or stage is None:
if self._dataset_name == "criteo_1t":
datapipe = self._create_datapipe_1t(range(self._num_days))
elif self._dataset_name == "criteo_kaggle":
datapipe = self._create_datapipe_kaggle("train")
else:
raise ValueError(
f"Unknown dataset {self._dataset_name}. "
+ "Please choose {criteo_1t, criteo_kaggle} for dataset_name"
)
train_datapipe, val_datapipe = rand_split_train_val(
datapipe, self._train_percent
)
self._train_datapipe = self._batch_collate_transform(train_datapipe)
self._val_datapipe = self._batch_collate_transform(val_datapipe)
if stage == "test" or stage is None:
if self._dataset_name == "criteo_1t":
datapipe = self._create_datapipe_1t(
range(self._num_days, self._num_days + self._num_days_test)
)
elif self._dataset_name == "criteo_kaggle":
datapipe = self._create_datapipe_kaggle("test")
else:
raise ValueError(
f"Unknown dataset {self._dataset_name}. "
+ "Please choose {criteo_1t, criteo_kaggle} for dataset_name"
)
self._test_datapipe = self._batch_collate_transform(datapipe)
def _create_dataloader(self, datapipe: IterDataPipe) -> DataLoader:
return DataLoader(
datapipe,
num_workers=self._num_workers,
pin_memory=self._pin_memory,
batch_size=None,
batch_sampler=None,
worker_init_fn=self._worker_init_fn,
)
def train_dataloader(self) -> DataLoader:
datapipe = self._train_datapipe
assert isinstance(datapipe, IterDataPipe)
return self._create_dataloader(datapipe)
def val_dataloader(self) -> DataLoader:
datapipe = self._val_datapipe
assert isinstance(datapipe, IterDataPipe)
return self._create_dataloader(datapipe)
def test_dataloader(self) -> DataLoader:
if self._dataset_name == "criteo_1t":
datapipe = self._test_datapipe
elif self._dataset_name == "criteo_kaggle":
# because kaggle test dataset does not have label
# we use validation pipeline here
datapipe = self._val_datapipe
else:
raise ValueError(
f"Unknown dataset {self._dataset_name}. "
+ "Please choose {criteo_1t, criteo_kaggle} for dataset_name"
)
assert isinstance(datapipe, IterDataPipe)
return self._create_dataloader(datapipe)
| [
"torchrec.datasets.utils.rand_split_train_val",
"torchrec.datasets.utils.Batch",
"torchrec.datasets.criteo.criteo_kaggle",
"torchrec.datasets.criteo.criteo_terabyte"
] | [((1406, 1432), 'torch.cat', 'torch.cat', (['cat_list'], {'dim': '(1)'}), '(cat_list, dim=1)\n', (1415, 1432), False, 'import torch\n'), ((2357, 2445), 'torchrec.datasets.utils.Batch', 'Batch', ([], {'dense_features': 'dense_features', 'sparse_features': 'sparse_features', 'labels': 'labels'}), '(dense_features=dense_features, sparse_features=sparse_features,\n labels=labels)\n', (2362, 2445), False, 'from torchrec.datasets.utils import rand_split_train_val, Batch\n'), ((1171, 1206), 'typing.cast', 'cast', (['torch.Tensor', 'batch[col_name]'], {}), '(torch.Tensor, batch[col_name])\n', (1175, 1206), False, 'from typing import Any, Iterable, List, Mapping, Optional, Union, cast, Callable\n'), ((1598, 1634), 'typing.cast', 'cast', (['Iterable[str]', 'batch[col_name]'], {}), '(Iterable[str], batch[col_name])\n', (1602, 1634), False, 'from typing import Any, Iterable, List, Mapping, Optional, Union, cast, Callable\n'), ((2176, 2200), 'torch.tensor', 'torch.tensor', (['kjt_values'], {}), '(kjt_values)\n', (2188, 2200), False, 'import torch\n'), ((2210, 2254), 'torch.tensor', 'torch.tensor', (['kjt_lengths'], {'dtype': 'torch.int32'}), '(kjt_lengths, dtype=torch.int32)\n', (2222, 2254), False, 'import torch\n'), ((7934, 7995), 'torchrec.datasets.criteo.criteo_terabyte', 'criteo_terabyte', (['paths'], {'read_chunk_size': 'self._read_chunk_size'}), '(paths, read_chunk_size=self._read_chunk_size)\n', (7949, 7995), False, 'from torchrec.datasets.criteo import criteo_terabyte, criteo_kaggle\n'), ((8692, 8750), 'torchrec.datasets.criteo.criteo_kaggle', 'criteo_kaggle', (['path'], {'read_chunk_size': 'self._read_chunk_size'}), '(path, read_chunk_size=self._read_chunk_size)\n', (8705, 8750), False, 'from torchrec.datasets.criteo import criteo_terabyte, criteo_kaggle\n'), ((9124, 9243), 'functools.partial', 'partial', (['_transform'], {'num_embeddings': 'self.num_embeddings', 'num_embeddings_per_feature': 'self.num_embeddings_per_feature'}), '(_transform, num_embeddings=self.num_embeddings,\n num_embeddings_per_feature=self.num_embeddings_per_feature)\n', (9131, 9243), False, 'from functools import partial\n'), ((11038, 11202), 'torch.utils.data.DataLoader', 'DataLoader', (['datapipe'], {'num_workers': 'self._num_workers', 'pin_memory': 'self._pin_memory', 'batch_size': 'None', 'batch_sampler': 'None', 'worker_init_fn': 'self._worker_init_fn'}), '(datapipe, num_workers=self._num_workers, pin_memory=self.\n _pin_memory, batch_size=None, batch_sampler=None, worker_init_fn=self.\n _worker_init_fn)\n', (11048, 11202), False, 'from torch.utils.data import DataLoader, IterDataPipe\n'), ((8272, 8379), 'torchrecipes.rec.datamodules.samplers.undersampler.ProportionUnderSampler', 'ProportionUnderSampler', (['datapipe', 'self._get_label', '{(0): undersampling_rate, (1): 1.0}'], {'seed': 'self._seed'}), '(datapipe, self._get_label, {(0): undersampling_rate,\n (1): 1.0}, seed=self._seed)\n', (8294, 8379), False, 'from torchrecipes.rec.datamodules.samplers.undersampler import ProportionUnderSampler\n'), ((10055, 10106), 'torchrec.datasets.utils.rand_split_train_val', 'rand_split_train_val', (['datapipe', 'self._train_percent'], {}), '(datapipe, self._train_percent)\n', (10075, 10106), False, 'from torchrec.datasets.utils import rand_split_train_val, Batch\n'), ((1349, 1367), 'torch.log', 'torch.log', (['(val + 3)'], {}), '(val + 3)\n', (1358, 1367), False, 'import torch\n'), ((1804, 1843), 'pyre_extensions.none_throws', 'none_throws', (['num_embeddings_per_feature'], {}), '(num_embeddings_per_feature)\n', (1815, 1843), False, 'from pyre_extensions import none_throws\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from torchrec.fx import Tracer
from torchrec.modules.deepfm import (
DeepFM,
FactorizationMachine,
)
class TestDeepFM(unittest.TestCase):
def test_deepfm_shape(self) -> None:
batch_size = 3
output_dim = 30
# the input embedding are in torch.Tensor of [batch_size, num_embeddings, embedding_dim]
input_embeddings = [
torch.randn(batch_size, 2, 64),
torch.randn(batch_size, 2, 32),
torch.randn(batch_size, 3, 100),
torch.randn(batch_size, 5, 120),
]
in_features = 2 * 64 + 2 * 32 + 3 * 100 + 5 * 120
dense_module = torch.nn.Sequential(
torch.nn.Linear(in_features, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, 32),
torch.nn.ReLU(),
torch.nn.Linear(32, output_dim),
torch.nn.ReLU(),
)
deepfm = DeepFM(dense_module=dense_module)
deep_fm_output = deepfm(input_embeddings)
self.assertEqual(list(deep_fm_output.shape), [batch_size, output_dim])
def test_deepfm_with_lazy_shape(self) -> None:
batch_size = 3
output_dim = 30
# the input embedding are in torch.Tensor of [batch_size, num_embeddings, embedding_dim]
input_embeddings = [
torch.randn(batch_size, 2, 64),
torch.randn(batch_size, 2, 32),
torch.randn(batch_size, 3, 100),
torch.randn(batch_size, 5, 120),
]
dense_module = torch.nn.Sequential(
torch.nn.LazyLinear(output_dim),
torch.nn.ReLU(),
)
deepfm = DeepFM(dense_module=dense_module)
deep_fm_output = deepfm(input_embeddings)
self.assertEqual(list(deep_fm_output.shape), [batch_size, output_dim])
def test_deepfm_numerical_forward(self) -> None:
torch.manual_seed(0)
batch_size = 3
output_dim = 2
# the input embedding are in torch.Tensor of [batch_size, num_embeddings, embedding_dim]
input_embeddings = [
torch.randn(batch_size, 2, 64),
torch.randn(batch_size, 2, 32),
torch.randn(batch_size, 3, 100),
torch.randn(batch_size, 5, 120),
]
in_features = 2 * 64 + 2 * 32 + 3 * 100 + 5 * 120
dense_module = torch.nn.Sequential(
torch.nn.Linear(in_features, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, 32),
torch.nn.ReLU(),
torch.nn.Linear(32, output_dim),
torch.nn.ReLU(),
)
deepfm = DeepFM(dense_module=dense_module)
output = deepfm(input_embeddings)
expected_output = torch.Tensor(
[
[0.0896, 0.1182],
[0.0675, 0.0972],
[0.0764, 0.0199],
],
)
self.assertTrue(
torch.allclose(
output,
expected_output,
rtol=1e-4,
atol=1e-4,
)
)
def test_fx_script_deepfm(self) -> None:
m = DeepFM(dense_module=torch.nn.Linear(4, 1))
# dryrun to initialize the input
m([torch.randn(2, 2, 2)])
gm = torch.fx.GraphModule(m, Tracer().trace(m))
torch.jit.script(gm)
class TestFM(unittest.TestCase):
def test_fm_shape(self) -> None:
batch_size = 3
# the input embedding are in torch.Tensor of [batch_size, num_embeddings, embedding_dim]
input_embeddings = [
torch.randn(batch_size, 2, 64),
torch.randn(batch_size, 2, 32),
torch.randn(batch_size, 3, 100),
torch.randn(batch_size, 5, 120),
]
fm = FactorizationMachine()
fm_output = fm(input_embeddings)
self.assertEqual(list(fm_output.shape), [batch_size, 1])
def test_fm_numerical_forward(self) -> None:
torch.manual_seed(0)
batch_size = 3
# the input embedding are in torch.Tensor of [batch_size, num_embeddings, embedding_dim]
input_embeddings = [
torch.randn(batch_size, 2, 64),
torch.randn(batch_size, 2, 32),
torch.randn(batch_size, 3, 100),
torch.randn(batch_size, 5, 120),
]
fm = FactorizationMachine()
output = fm(input_embeddings)
expected_output = torch.Tensor(
[
[-577.5231],
[752.7272],
[-509.1023],
]
)
self.assertTrue(
torch.allclose(
output,
expected_output,
rtol=1e-4,
atol=1e-4,
)
)
def test_fx_script_fm(self) -> None:
m = FactorizationMachine()
gm = torch.fx.GraphModule(m, Tracer().trace(m))
torch.jit.script(gm)
if __name__ == "__main__":
unittest.main()
| [
"torchrec.fx.Tracer",
"torchrec.modules.deepfm.DeepFM",
"torchrec.modules.deepfm.FactorizationMachine"
] | [((5139, 5154), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5152, 5154), False, 'import unittest\n'), ((1157, 1190), 'torchrec.modules.deepfm.DeepFM', 'DeepFM', ([], {'dense_module': 'dense_module'}), '(dense_module=dense_module)\n', (1163, 1190), False, 'from torchrec.modules.deepfm import DeepFM, FactorizationMachine\n'), ((1880, 1913), 'torchrec.modules.deepfm.DeepFM', 'DeepFM', ([], {'dense_module': 'dense_module'}), '(dense_module=dense_module)\n', (1886, 1913), False, 'from torchrec.modules.deepfm import DeepFM, FactorizationMachine\n'), ((2107, 2127), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (2124, 2127), False, 'import torch\n'), ((2835, 2868), 'torchrec.modules.deepfm.DeepFM', 'DeepFM', ([], {'dense_module': 'dense_module'}), '(dense_module=dense_module)\n', (2841, 2868), False, 'from torchrec.modules.deepfm import DeepFM, FactorizationMachine\n'), ((2939, 3007), 'torch.Tensor', 'torch.Tensor', (['[[0.0896, 0.1182], [0.0675, 0.0972], [0.0764, 0.0199]]'], {}), '([[0.0896, 0.1182], [0.0675, 0.0972], [0.0764, 0.0199]])\n', (2951, 3007), False, 'import torch\n'), ((3523, 3543), 'torch.jit.script', 'torch.jit.script', (['gm'], {}), '(gm)\n', (3539, 3543), False, 'import torch\n'), ((3968, 3990), 'torchrec.modules.deepfm.FactorizationMachine', 'FactorizationMachine', ([], {}), '()\n', (3988, 3990), False, 'from torchrec.modules.deepfm import DeepFM, FactorizationMachine\n'), ((4157, 4177), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (4174, 4177), False, 'import torch\n'), ((4529, 4551), 'torchrec.modules.deepfm.FactorizationMachine', 'FactorizationMachine', ([], {}), '()\n', (4549, 4551), False, 'from torchrec.modules.deepfm import DeepFM, FactorizationMachine\n'), ((4618, 4670), 'torch.Tensor', 'torch.Tensor', (['[[-577.5231], [752.7272], [-509.1023]]'], {}), '([[-577.5231], [752.7272], [-509.1023]])\n', (4630, 4670), False, 'import torch\n'), ((4998, 5020), 'torchrec.modules.deepfm.FactorizationMachine', 'FactorizationMachine', ([], {}), '()\n', (5018, 5020), False, 'from torchrec.modules.deepfm import DeepFM, FactorizationMachine\n'), ((5085, 5105), 'torch.jit.script', 'torch.jit.script', (['gm'], {}), '(gm)\n', (5101, 5105), False, 'import torch\n'), ((635, 665), 'torch.randn', 'torch.randn', (['batch_size', '(2)', '(64)'], {}), '(batch_size, 2, 64)\n', (646, 665), False, 'import torch\n'), ((679, 709), 'torch.randn', 'torch.randn', (['batch_size', '(2)', '(32)'], {}), '(batch_size, 2, 32)\n', (690, 709), False, 'import torch\n'), ((723, 754), 'torch.randn', 'torch.randn', (['batch_size', '(3)', '(100)'], {}), '(batch_size, 3, 100)\n', (734, 754), False, 'import torch\n'), ((768, 799), 'torch.randn', 'torch.randn', (['batch_size', '(5)', '(120)'], {}), '(batch_size, 5, 120)\n', (779, 799), False, 'import torch\n'), ((925, 958), 'torch.nn.Linear', 'torch.nn.Linear', (['in_features', '(128)'], {}), '(in_features, 128)\n', (940, 958), False, 'import torch\n'), ((972, 987), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (985, 987), False, 'import torch\n'), ((1001, 1025), 'torch.nn.Linear', 'torch.nn.Linear', (['(128)', '(32)'], {}), '(128, 32)\n', (1016, 1025), False, 'import torch\n'), ((1039, 1054), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (1052, 1054), False, 'import torch\n'), ((1068, 1099), 'torch.nn.Linear', 'torch.nn.Linear', (['(32)', 'output_dim'], {}), '(32, output_dim)\n', (1083, 1099), False, 'import torch\n'), ((1113, 1128), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (1126, 1128), False, 'import torch\n'), ((1559, 1589), 'torch.randn', 'torch.randn', (['batch_size', '(2)', '(64)'], {}), '(batch_size, 2, 64)\n', (1570, 1589), False, 'import torch\n'), ((1603, 1633), 'torch.randn', 'torch.randn', (['batch_size', '(2)', '(32)'], {}), '(batch_size, 2, 32)\n', (1614, 1633), False, 'import torch\n'), ((1647, 1678), 'torch.randn', 'torch.randn', (['batch_size', '(3)', '(100)'], {}), '(batch_size, 3, 100)\n', (1658, 1678), False, 'import torch\n'), ((1692, 1723), 'torch.randn', 'torch.randn', (['batch_size', '(5)', '(120)'], {}), '(batch_size, 5, 120)\n', (1703, 1723), False, 'import torch\n'), ((1791, 1822), 'torch.nn.LazyLinear', 'torch.nn.LazyLinear', (['output_dim'], {}), '(output_dim)\n', (1810, 1822), False, 'import torch\n'), ((1836, 1851), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (1849, 1851), False, 'import torch\n'), ((2313, 2343), 'torch.randn', 'torch.randn', (['batch_size', '(2)', '(64)'], {}), '(batch_size, 2, 64)\n', (2324, 2343), False, 'import torch\n'), ((2357, 2387), 'torch.randn', 'torch.randn', (['batch_size', '(2)', '(32)'], {}), '(batch_size, 2, 32)\n', (2368, 2387), False, 'import torch\n'), ((2401, 2432), 'torch.randn', 'torch.randn', (['batch_size', '(3)', '(100)'], {}), '(batch_size, 3, 100)\n', (2412, 2432), False, 'import torch\n'), ((2446, 2477), 'torch.randn', 'torch.randn', (['batch_size', '(5)', '(120)'], {}), '(batch_size, 5, 120)\n', (2457, 2477), False, 'import torch\n'), ((2603, 2636), 'torch.nn.Linear', 'torch.nn.Linear', (['in_features', '(128)'], {}), '(in_features, 128)\n', (2618, 2636), False, 'import torch\n'), ((2650, 2665), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (2663, 2665), False, 'import torch\n'), ((2679, 2703), 'torch.nn.Linear', 'torch.nn.Linear', (['(128)', '(32)'], {}), '(128, 32)\n', (2694, 2703), False, 'import torch\n'), ((2717, 2732), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (2730, 2732), False, 'import torch\n'), ((2746, 2777), 'torch.nn.Linear', 'torch.nn.Linear', (['(32)', 'output_dim'], {}), '(32, output_dim)\n', (2761, 2777), False, 'import torch\n'), ((2791, 2806), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (2804, 2806), False, 'import torch\n'), ((3131, 3196), 'torch.allclose', 'torch.allclose', (['output', 'expected_output'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(output, expected_output, rtol=0.0001, atol=0.0001)\n', (3145, 3196), False, 'import torch\n'), ((3778, 3808), 'torch.randn', 'torch.randn', (['batch_size', '(2)', '(64)'], {}), '(batch_size, 2, 64)\n', (3789, 3808), False, 'import torch\n'), ((3822, 3852), 'torch.randn', 'torch.randn', (['batch_size', '(2)', '(32)'], {}), '(batch_size, 2, 32)\n', (3833, 3852), False, 'import torch\n'), ((3866, 3897), 'torch.randn', 'torch.randn', (['batch_size', '(3)', '(100)'], {}), '(batch_size, 3, 100)\n', (3877, 3897), False, 'import torch\n'), ((3911, 3942), 'torch.randn', 'torch.randn', (['batch_size', '(5)', '(120)'], {}), '(batch_size, 5, 120)\n', (3922, 3942), False, 'import torch\n'), ((4340, 4370), 'torch.randn', 'torch.randn', (['batch_size', '(2)', '(64)'], {}), '(batch_size, 2, 64)\n', (4351, 4370), False, 'import torch\n'), ((4384, 4414), 'torch.randn', 'torch.randn', (['batch_size', '(2)', '(32)'], {}), '(batch_size, 2, 32)\n', (4395, 4414), False, 'import torch\n'), ((4428, 4459), 'torch.randn', 'torch.randn', (['batch_size', '(3)', '(100)'], {}), '(batch_size, 3, 100)\n', (4439, 4459), False, 'import torch\n'), ((4473, 4504), 'torch.randn', 'torch.randn', (['batch_size', '(5)', '(120)'], {}), '(batch_size, 5, 120)\n', (4484, 4504), False, 'import torch\n'), ((4793, 4858), 'torch.allclose', 'torch.allclose', (['output', 'expected_output'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(output, expected_output, rtol=0.0001, atol=0.0001)\n', (4807, 4858), False, 'import torch\n'), ((3360, 3381), 'torch.nn.Linear', 'torch.nn.Linear', (['(4)', '(1)'], {}), '(4, 1)\n', (3375, 3381), False, 'import torch\n'), ((3436, 3456), 'torch.randn', 'torch.randn', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (3447, 3456), False, 'import torch\n'), ((3496, 3504), 'torchrec.fx.Tracer', 'Tracer', ([], {}), '()\n', (3502, 3504), False, 'from torchrec.fx import Tracer\n'), ((5058, 5066), 'torchrec.fx.Tracer', 'Tracer', ([], {}), '()\n', (5064, 5066), False, 'from torchrec.fx import Tracer\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
from collections import OrderedDict
from dataclasses import dataclass
from typing import (
List,
Dict,
Optional,
Type,
Any,
Mapping,
Union,
Iterator,
Tuple,
Set,
)
import torch
from torch import nn
from torch.nn.modules.module import _IncompatibleKeys
from torchrec.distributed.embedding_sharding import (
EmbeddingSharding,
SparseFeaturesListAwaitable,
SparseFeaturesIndicesAwaitable,
)
from torchrec.distributed.embedding_types import (
SparseFeatures,
BaseEmbeddingSharder,
EmbeddingComputeKernel,
ShardingType,
SparseFeaturesList,
)
from torchrec.distributed.sharding.dp_sequence_sharding import (
DpSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sequence_sharding import (
RwSequenceEmbeddingSharding,
)
from torchrec.distributed.sharding.rw_sharding import RwSparseFeaturesDist
from torchrec.distributed.sharding.sequence_sharding import SequenceShardingContext
from torchrec.distributed.sharding.tw_sequence_sharding import (
TwSequenceEmbeddingSharding,
)
from torchrec.distributed.types import (
Awaitable,
LazyAwaitable,
ParameterSharding,
ShardedModule,
ShardedModuleContext,
ShardedTensor,
ShardingEnv,
)
from torchrec.distributed.utils import append_prefix
from torchrec.distributed.utils import filter_state_dict
from torchrec.modules.embedding_configs import EmbeddingTableConfig, PoolingType
from torchrec.modules.embedding_modules import EmbeddingCollection
from torchrec.optim.fused import FusedOptimizerModule
from torchrec.optim.keyed import KeyedOptimizer, CombinedOptimizer
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, JaggedTensor
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
def create_embedding_sharding(
sharding_type: str,
embedding_configs: List[
Tuple[EmbeddingTableConfig, ParameterSharding, torch.Tensor]
],
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> EmbeddingSharding[SparseFeatures, torch.Tensor]:
if sharding_type == ShardingType.TABLE_WISE.value:
return TwSequenceEmbeddingSharding(embedding_configs, env, device)
elif sharding_type == ShardingType.ROW_WISE.value:
return RwSequenceEmbeddingSharding(embedding_configs, env, device)
elif sharding_type == ShardingType.DATA_PARALLEL.value:
return DpSequenceEmbeddingSharding(embedding_configs, env, device)
else:
raise ValueError(f"Sharding not supported {sharding_type}")
def _create_embedding_configs_by_sharding(
module: EmbeddingCollection,
table_name_to_parameter_sharding: Dict[str, ParameterSharding],
) -> Dict[str, List[Tuple[EmbeddingTableConfig, ParameterSharding, torch.Tensor]]]:
sharding_type_to_embedding_configs: Dict[
str, List[Tuple[EmbeddingTableConfig, ParameterSharding, torch.Tensor]]
] = {}
state_dict = module.state_dict()
for (
config,
embedding_names,
) in zip(module.embedding_configs, module.embedding_names_by_table):
table_name = config.name
assert table_name in table_name_to_parameter_sharding
parameter_sharding = table_name_to_parameter_sharding[table_name]
if parameter_sharding.compute_kernel not in [
kernel.value for kernel in EmbeddingComputeKernel
]:
raise ValueError(
f"Compute kernel not supported {parameter_sharding.compute_kernel}"
)
param_name = "embeddings." + config.name + ".weight"
assert param_name in state_dict
param = state_dict[param_name]
if parameter_sharding.sharding_type not in sharding_type_to_embedding_configs:
sharding_type_to_embedding_configs[parameter_sharding.sharding_type] = []
sharding_type_to_embedding_configs[parameter_sharding.sharding_type].append(
(
EmbeddingTableConfig(
num_embeddings=config.num_embeddings,
embedding_dim=config.embedding_dim,
name=config.name,
data_type=config.data_type,
feature_names=copy.deepcopy(config.feature_names),
pooling=PoolingType.NONE,
is_weighted=False,
has_feature_processor=False,
embedding_names=embedding_names,
weight_init_max=config.weight_init_max,
weight_init_min=config.weight_init_min,
),
parameter_sharding,
param,
)
)
return sharding_type_to_embedding_configs
def _construct_jagged_tensors(
embeddings: torch.Tensor,
features: KeyedJaggedTensor,
embedding_names: List[str],
) -> Dict[str, JaggedTensor]:
ret: Dict[str, JaggedTensor] = {}
length_per_key = features.length_per_key()
lengths = features.lengths().view(-1, features.stride())
values_per_key = embeddings.split(length_per_key)
for i, key in enumerate(features.keys()):
ret[key] = JaggedTensor(
lengths=lengths[i],
values=values_per_key[i],
)
return ret
@dataclass
class EmbeddingCollectionContext(ShardedModuleContext):
sharding_contexts: List[SequenceShardingContext]
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
for ctx in self.sharding_contexts:
ctx.record_stream(stream)
class EmbeddingCollectionAwaitable(LazyAwaitable[Dict[str, JaggedTensor]]):
def __init__(
self,
awaitables_per_sharding: List[Awaitable[torch.Tensor]],
features_per_sharding: List[KeyedJaggedTensor],
embedding_names_per_sharding: List[str],
) -> None:
super().__init__()
self._awaitables_per_sharding = awaitables_per_sharding
self._features_per_sharding = features_per_sharding
self._embedding_names_per_sharding = embedding_names_per_sharding
def _wait_impl(self) -> Dict[str, JaggedTensor]:
jt_dict: Dict[str, JaggedTensor] = {}
for w, f, e in zip(
self._awaitables_per_sharding,
self._features_per_sharding,
self._embedding_names_per_sharding,
):
jt_dict.update(_construct_jagged_tensors(w.wait(), f, e))
return jt_dict
class ShardedEmbeddingCollection(
ShardedModule[
SparseFeaturesList,
List[torch.Tensor],
Dict[str, torch.Tensor],
],
FusedOptimizerModule,
):
"""
Sharded implementation of `EmbeddingCollection`.
This is part of the public API to allow for manual data dist pipelining.
"""
def __init__(
self,
module: EmbeddingCollection,
table_name_to_parameter_sharding: Dict[str, ParameterSharding],
env: ShardingEnv,
fused_params: Optional[Dict[str, Any]] = None,
device: Optional[torch.device] = None,
) -> None:
super().__init__()
sharding_type_to_embedding_configs = _create_embedding_configs_by_sharding(
module, table_name_to_parameter_sharding
)
self._sharding_type_to_sharding: Dict[
str, EmbeddingSharding[SparseFeatures, torch.Tensor]
] = {
sharding_type: create_embedding_sharding(
sharding_type, embedding_confings, env, device
)
for sharding_type, embedding_confings in sharding_type_to_embedding_configs.items()
}
self._device = device
self._input_dists: nn.ModuleList = nn.ModuleList()
self._lookups: nn.ModuleList = nn.ModuleList()
self._create_lookups(fused_params)
self._output_dists: nn.ModuleList = nn.ModuleList()
self._create_output_dist()
self._feature_splits: List[int] = []
self._features_order: List[int] = []
self._has_uninitialized_input_dist: bool = True
# Get all fused optimizers and combine them.
optims = []
for lookup in self._lookups:
for _, m in lookup.named_modules():
if isinstance(m, FusedOptimizerModule):
# modify param keys to match EmbeddingCollection
params: Mapping[str, Union[torch.Tensor, ShardedTensor]] = {}
for param_key, weight in m.fused_optimizer.params.items():
# pyre-fixme[16]: `Mapping` has no attribute `__setitem__`.
params["embedding_modules." + param_key] = weight
m.fused_optimizer.params = params
optims.append(("", m.fused_optimizer))
self._optim: CombinedOptimizer = CombinedOptimizer(optims)
self._embedding_dim: int = module.embedding_dim
self._embedding_names_per_sharding: List[List[str]] = []
for sharding in self._sharding_type_to_sharding.values():
self._embedding_names_per_sharding.append(sharding.embedding_names())
def _create_input_dist(
self,
input_feature_names: List[str],
) -> None:
feature_names: List[str] = []
self._feature_splits: List[int] = []
for sharding in self._sharding_type_to_sharding.values():
self._input_dists.append(sharding.create_input_dist())
feature_names.extend(sharding.id_list_feature_names())
self._feature_splits.append(len(sharding.id_list_feature_names()))
self._features_order: List[int] = []
for f in feature_names:
self._features_order.append(input_feature_names.index(f))
self._features_order = (
[]
if self._features_order == list(range(len(self._features_order)))
else self._features_order
)
self.register_buffer(
"_features_order_tensor",
torch.tensor(self._features_order, device=self._device, dtype=torch.int32),
)
def _create_lookups(self, fused_params: Optional[Dict[str, Any]]) -> None:
for sharding in self._sharding_type_to_sharding.values():
self._lookups.append(sharding.create_lookup(fused_params=fused_params))
def _create_output_dist(
self,
) -> None:
for sharding in self._sharding_type_to_sharding.values():
self._output_dists.append(sharding.create_output_dist())
# pyre-ignore [14]
def input_dist(
self,
ctx: EmbeddingCollectionContext,
features: KeyedJaggedTensor,
) -> Awaitable[SparseFeaturesList]:
if self._has_uninitialized_input_dist:
self._create_input_dist(
input_feature_names=features.keys() if features is not None else []
)
self._has_uninitialized_input_dist = False
with torch.no_grad():
features_by_sharding = []
if self._features_order:
features = features.permute(
self._features_order,
# pyre-ignore [6]
self._features_order_tensor,
)
features_by_sharding = features.split(
self._feature_splits,
)
# save input splits and output splits in sharding context which
# will be reused in sequence embedding all2all
awaitables = []
for module, features in zip(self._input_dists, features_by_sharding):
tensor_awaitable = module(
SparseFeatures(
id_list_features=features,
id_score_list_features=None,
)
)
tensor_awaitable = tensor_awaitable.wait() # finish lengths all2all
input_splits = []
output_splits = []
if isinstance(tensor_awaitable, SparseFeaturesIndicesAwaitable):
input_splits = (
# pyre-fixme[16]: `Optional` has no attribute
# `_in_lengths_per_worker`.
tensor_awaitable._id_list_features_awaitable._in_lengths_per_worker
)
output_splits = (
# pyre-fixme[16]: `Optional` has no attribute
# `_out_lengths_per_worker`.
tensor_awaitable._id_list_features_awaitable._out_lengths_per_worker
)
ctx.sharding_contexts.append(
SequenceShardingContext(
features_before_input_dist=features,
input_splits=input_splits,
output_splits=output_splits,
unbucketize_permute_tensor=module.unbucketize_permute_tensor
if isinstance(module, RwSparseFeaturesDist)
else None,
)
)
awaitables.append(tensor_awaitable)
return SparseFeaturesListAwaitable(awaitables)
def compute(
self, ctx: ShardedModuleContext, dist_input: SparseFeaturesList
) -> List[torch.Tensor]:
ret: List[torch.Tensor] = []
for lookup, features, sharding_ctx in zip(
self._lookups,
dist_input,
# pyre-ignore [16]
ctx.sharding_contexts,
):
sharding_ctx.lengths_after_input_dist = (
features.id_list_features.lengths().view(
-1, features.id_list_features.stride()
)
)
ret.append(lookup(features).view(-1, self._embedding_dim))
return ret
def output_dist(
self, ctx: ShardedModuleContext, output: List[torch.Tensor]
) -> LazyAwaitable[Dict[str, torch.Tensor]]:
awaitables_per_sharding: List[Awaitable[Dict[str, JaggedTensor]]] = []
features_before_all2all_per_sharding: List[KeyedJaggedTensor] = []
for odist, embeddings, sharding_ctx in zip(
self._output_dists,
output,
# pyre-ignore [16]
ctx.sharding_contexts,
):
awaitables_per_sharding.append(odist(embeddings, sharding_ctx))
features_before_all2all_per_sharding.append(
sharding_ctx.features_before_input_dist
)
return EmbeddingCollectionAwaitable(
awaitables_per_sharding=awaitables_per_sharding,
features_per_sharding=features_before_all2all_per_sharding,
embedding_names_per_sharding=self._embedding_names_per_sharding,
)
def compute_and_output_dist(
self, ctx: ShardedModuleContext, input: SparseFeaturesList
) -> LazyAwaitable[Dict[str, torch.Tensor]]:
awaitables_per_sharding: List[Awaitable[Dict[str, JaggedTensor]]] = []
features_before_all2all_per_sharding: List[KeyedJaggedTensor] = []
for lookup, odist, features, sharding_ctx in zip(
self._lookups,
self._output_dists,
input,
# pyre-ignore [16]
ctx.sharding_contexts,
):
sharding_ctx.lengths_after_input_dist = (
features.id_list_features.lengths().view(
-1, features.id_list_features.stride()
)
)
awaitables_per_sharding.append(
odist(lookup(features).view(-1, self._embedding_dim), sharding_ctx)
)
features_before_all2all_per_sharding.append(
sharding_ctx.features_before_input_dist
)
return EmbeddingCollectionAwaitable(
awaitables_per_sharding=awaitables_per_sharding,
features_per_sharding=features_before_all2all_per_sharding,
embedding_names_per_sharding=self._embedding_names_per_sharding,
)
# pyre-fixme[14]: `state_dict` overrides method defined in `Module` inconsistently.
def state_dict(
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
if destination is None:
destination = OrderedDict()
# pyre-ignore [16]
destination._metadata = OrderedDict()
for lookup in self._lookups:
lookup.state_dict(destination, prefix + "embeddings.", keep_vars)
return destination
def named_modules(
self,
memo: Optional[Set[nn.Module]] = None,
prefix: str = "",
remove_duplicate: bool = True,
) -> Iterator[Tuple[str, nn.Module]]:
yield from [(prefix, self)]
def named_parameters(
self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True
) -> Iterator[Tuple[str, nn.Parameter]]:
for lookup in self._lookups:
yield from lookup.named_parameters(
append_prefix(prefix, "embeddings"), recurse
)
def named_buffers(
self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
for lookup in self._lookups:
yield from lookup.named_buffers(
append_prefix(prefix, "embeddings"), recurse
)
def load_state_dict(
self,
state_dict: "OrderedDict[str, torch.Tensor]",
strict: bool = True,
) -> _IncompatibleKeys:
missing_keys = []
unexpected_keys = []
for lookup in self._lookups:
missing, unexpected = lookup.load_state_dict(
filter_state_dict(state_dict, "embeddings"),
strict,
)
missing_keys.extend(missing)
unexpected_keys.extend(unexpected)
return _IncompatibleKeys(
missing_keys=missing_keys, unexpected_keys=unexpected_keys
)
def sparse_grad_parameter_names(
self,
destination: Optional[List[str]] = None,
prefix: str = "",
) -> List[str]:
destination = [] if destination is None else destination
for lookup in self._lookups:
lookup.sparse_grad_parameter_names(
destination, append_prefix(prefix, "embeddings")
)
return destination
def sharded_parameter_names(self, prefix: str = "") -> Iterator[str]:
for lookup, sharding_type in zip(
self._lookups, self._sharding_type_to_sharding.keys()
):
if sharding_type == ShardingType.DATA_PARALLEL.value:
continue
for name, _ in lookup.named_parameters(append_prefix(prefix, "embeddings")):
yield name
@property
def fused_optimizer(self) -> KeyedOptimizer:
return self._optim
def create_context(self) -> ShardedModuleContext:
return EmbeddingCollectionContext(sharding_contexts=[])
class EmbeddingCollectionSharder(BaseEmbeddingSharder[EmbeddingCollection]):
"""
This implementation uses non-fused EmbeddingCollection
"""
def shard(
self,
module: EmbeddingCollection,
params: Dict[str, ParameterSharding],
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> ShardedEmbeddingCollection:
return ShardedEmbeddingCollection(
module, params, env, self.fused_params, device
)
def shardable_parameters(
self, module: EmbeddingCollection
) -> Dict[str, nn.Parameter]:
return {
name.split(".")[0]: param
for name, param in module.embeddings.named_parameters()
}
def sharding_types(self, compute_device_type: str) -> List[str]:
types = [
ShardingType.DATA_PARALLEL.value,
ShardingType.TABLE_WISE.value,
ShardingType.ROW_WISE.value,
]
return types
@property
def module_type(self) -> Type[EmbeddingCollection]:
return EmbeddingCollection
| [
"torchrec.distributed.sharding.dp_sequence_sharding.DpSequenceEmbeddingSharding",
"torchrec.sparse.jagged_tensor.JaggedTensor",
"torchrec.distributed.utils.append_prefix",
"torchrec.distributed.sharding.tw_sequence_sharding.TwSequenceEmbeddingSharding",
"torchrec.distributed.embedding_sharding.SparseFeature... | [((1958, 2027), 'torch.ops.load_library', 'torch.ops.load_library', (['"""//deeplearning/fbgemm/fbgemm_gpu:sparse_ops"""'], {}), "('//deeplearning/fbgemm/fbgemm_gpu:sparse_ops')\n", (1980, 2027), False, 'import torch\n'), ((2032, 2105), 'torch.ops.load_library', 'torch.ops.load_library', (['"""//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu"""'], {}), "('//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu')\n", (2054, 2105), False, 'import torch\n'), ((2482, 2541), 'torchrec.distributed.sharding.tw_sequence_sharding.TwSequenceEmbeddingSharding', 'TwSequenceEmbeddingSharding', (['embedding_configs', 'env', 'device'], {}), '(embedding_configs, env, device)\n', (2509, 2541), False, 'from torchrec.distributed.sharding.tw_sequence_sharding import TwSequenceEmbeddingSharding\n'), ((5439, 5497), 'torchrec.sparse.jagged_tensor.JaggedTensor', 'JaggedTensor', ([], {'lengths': 'lengths[i]', 'values': 'values_per_key[i]'}), '(lengths=lengths[i], values=values_per_key[i])\n', (5451, 5497), False, 'from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, JaggedTensor\n'), ((7929, 7944), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (7942, 7944), False, 'from torch import nn\n'), ((7984, 7999), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (7997, 7999), False, 'from torch import nn\n'), ((8087, 8102), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (8100, 8102), False, 'from torch import nn\n'), ((9043, 9068), 'torchrec.optim.keyed.CombinedOptimizer', 'CombinedOptimizer', (['optims'], {}), '(optims)\n', (9060, 9068), False, 'from torchrec.optim.keyed import KeyedOptimizer, CombinedOptimizer\n'), ((18108, 18185), 'torch.nn.modules.module._IncompatibleKeys', '_IncompatibleKeys', ([], {'missing_keys': 'missing_keys', 'unexpected_keys': 'unexpected_keys'}), '(missing_keys=missing_keys, unexpected_keys=unexpected_keys)\n', (18125, 18185), False, 'from torch.nn.modules.module import _IncompatibleKeys\n'), ((2612, 2671), 'torchrec.distributed.sharding.rw_sequence_sharding.RwSequenceEmbeddingSharding', 'RwSequenceEmbeddingSharding', (['embedding_configs', 'env', 'device'], {}), '(embedding_configs, env, device)\n', (2639, 2671), False, 'from torchrec.distributed.sharding.rw_sequence_sharding import RwSequenceEmbeddingSharding\n'), ((10199, 10273), 'torch.tensor', 'torch.tensor', (['self._features_order'], {'device': 'self._device', 'dtype': 'torch.int32'}), '(self._features_order, device=self._device, dtype=torch.int32)\n', (10211, 10273), False, 'import torch\n'), ((11135, 11150), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11148, 11150), False, 'import torch\n'), ((13331, 13370), 'torchrec.distributed.embedding_sharding.SparseFeaturesListAwaitable', 'SparseFeaturesListAwaitable', (['awaitables'], {}), '(awaitables)\n', (13358, 13370), False, 'from torchrec.distributed.embedding_sharding import EmbeddingSharding, SparseFeaturesListAwaitable, SparseFeaturesIndicesAwaitable\n'), ((16514, 16527), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (16525, 16527), False, 'from collections import OrderedDict\n'), ((16595, 16608), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (16606, 16608), False, 'from collections import OrderedDict\n'), ((2747, 2806), 'torchrec.distributed.sharding.dp_sequence_sharding.DpSequenceEmbeddingSharding', 'DpSequenceEmbeddingSharding', (['embedding_configs', 'env', 'device'], {}), '(embedding_configs, env, device)\n', (2774, 2806), False, 'from torchrec.distributed.sharding.dp_sequence_sharding import DpSequenceEmbeddingSharding\n'), ((17922, 17965), 'torchrec.distributed.utils.filter_state_dict', 'filter_state_dict', (['state_dict', '"""embeddings"""'], {}), "(state_dict, 'embeddings')\n", (17939, 17965), False, 'from torchrec.distributed.utils import filter_state_dict\n'), ((18534, 18569), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', '"""embeddings"""'], {}), "(prefix, 'embeddings')\n", (18547, 18569), False, 'from torchrec.distributed.utils import append_prefix\n'), ((18947, 18982), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', '"""embeddings"""'], {}), "(prefix, 'embeddings')\n", (18960, 18982), False, 'from torchrec.distributed.utils import append_prefix\n'), ((11830, 11900), 'torchrec.distributed.embedding_types.SparseFeatures', 'SparseFeatures', ([], {'id_list_features': 'features', 'id_score_list_features': 'None'}), '(id_list_features=features, id_score_list_features=None)\n', (11844, 11900), False, 'from torchrec.distributed.embedding_types import SparseFeatures, BaseEmbeddingSharder, EmbeddingComputeKernel, ShardingType, SparseFeaturesList\n'), ((17236, 17271), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', '"""embeddings"""'], {}), "(prefix, 'embeddings')\n", (17249, 17271), False, 'from torchrec.distributed.utils import append_prefix\n'), ((17546, 17581), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', '"""embeddings"""'], {}), "(prefix, 'embeddings')\n", (17559, 17581), False, 'from torchrec.distributed.utils import append_prefix\n'), ((4524, 4559), 'copy.deepcopy', 'copy.deepcopy', (['config.feature_names'], {}), '(config.feature_names)\n', (4537, 4559), False, 'import copy\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import List
import torch
from torchrec.sparse.jagged_tensor import (
JaggedTensor,
KeyedTensor,
KeyedJaggedTensor,
)
class TestJaggedTensor(unittest.TestCase):
def test_from_dense_lengths(self) -> None:
values = torch.Tensor(
[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]
)
weights = 12.0 - values
j0 = JaggedTensor.from_dense_lengths(
values=values,
lengths=torch.IntTensor([1, 0, 2, 3]),
)
self.assertTrue(torch.equal(j0.lengths(), torch.IntTensor([1, 0, 2, 3])))
self.assertTrue(
torch.equal(j0.values(), torch.Tensor([1.0, 7.0, 8.0, 10.0, 11.0, 12.0]))
)
self.assertTrue(j0.weights_or_none() is None)
j1 = JaggedTensor.from_dense_lengths(
values=values,
lengths=torch.IntTensor([2, 0, 1, 1]),
weights=weights,
)
self.assertTrue(torch.equal(j1.lengths(), torch.IntTensor([2, 0, 1, 1])))
self.assertTrue(torch.equal(j1.values(), torch.Tensor([1.0, 2.0, 7.0, 10.0])))
self.assertTrue(torch.equal(j1.weights(), torch.Tensor([11.0, 10.0, 5.0, 2.0])))
def test_key_lookup(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
keys = ["index_0", "index_1"]
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jag_tensor = KeyedJaggedTensor(
values=values,
keys=keys,
offsets=offsets,
)
j0 = jag_tensor["index_0"]
j1 = jag_tensor["index_1"]
self.assertTrue(isinstance(j0, JaggedTensor))
self.assertTrue(torch.equal(j0.lengths(), torch.IntTensor([2, 0, 1])))
self.assertTrue(torch.equal(j0.values(), torch.Tensor([1.0, 2.0, 3.0])))
self.assertTrue(torch.equal(j1.lengths(), torch.IntTensor([1, 1, 3])))
self.assertTrue(
torch.equal(j1.values(), torch.Tensor([4.0, 5.0, 6.0, 7.0, 8.0]))
)
def test_split(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
keys = ["index_0", "index_1"]
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jag_tensor = KeyedJaggedTensor(
values=values,
keys=keys,
offsets=offsets,
)
j0, j1 = jag_tensor.split([1, 1])
self.assertTrue(isinstance(j0, KeyedJaggedTensor))
self.assertEqual(j0.keys(), ["index_0"])
self.assertEqual(j1.keys(), ["index_1"])
self.assertTrue(torch.equal(j0.lengths(), torch.IntTensor([2, 0, 1])))
self.assertTrue(torch.equal(j0.values(), torch.Tensor([1.0, 2.0, 3.0])))
self.assertTrue(torch.equal(j1.lengths(), torch.IntTensor([1, 1, 3])))
self.assertTrue(
torch.equal(j1.values(), torch.Tensor([4.0, 5.0, 6.0, 7.0, 8.0]))
)
def test_length_vs_offset(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
keys = ["index_0", "index_1"]
offsets = torch.IntTensor([0, 0, 2, 2, 3, 4, 5, 5, 8])
lengths = torch.IntTensor([0, 2, 0, 1, 1, 1, 0, 3])
j_offset = KeyedJaggedTensor.from_offsets_sync(
values=values,
keys=keys,
offsets=offsets,
)
j_lens = KeyedJaggedTensor.from_lengths_sync(
values=values,
keys=keys,
lengths=lengths,
)
self.assertTrue(torch.equal(j_offset.lengths(), j_lens.lengths()))
# TODO: T88149179
self.assertTrue(torch.equal(j_offset.offsets(), j_lens.offsets().int()))
def test_concat(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])
keys = ["index_0", "index_1", "index_2"]
lengths = torch.IntTensor([0, 2, 0, 1, 1, 1, 0, 3, 0, 0, 1, 0])
kjt_expected = KeyedJaggedTensor.from_lengths_sync(
values=values,
keys=keys,
lengths=lengths,
)
kjt_actual = KeyedJaggedTensor.concat(
a=KeyedJaggedTensor.from_lengths_sync(
values=values[:4],
keys=keys[:1],
lengths=lengths[:4],
),
b=KeyedJaggedTensor.from_lengths_sync(
values=values[4:],
keys=keys[1:],
lengths=lengths[4:],
),
)
self.assertTrue(torch.equal(kjt_expected.lengths(), kjt_actual.lengths()))
self.assertTrue(torch.equal(kjt_expected.offsets(), kjt_actual.offsets()))
self.assertTrue(torch.equal(kjt_expected.values(), kjt_actual.values()))
def test_empty(self) -> None:
values = torch.Tensor()
keys = []
offsets = torch.Tensor()
KeyedJaggedTensor.from_offsets_sync(values=values, keys=keys, offsets=offsets)
def test_2d(self) -> None:
values = torch.Tensor([[i * 0.5, i * 1.0, i * 1.5] for i in range(1, 9)])
keys = ["index_0", "index_1"]
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
j = KeyedJaggedTensor.from_offsets_sync(
values=values,
keys=keys,
offsets=offsets,
)
j_0 = j["index_0"]
self.assertTrue(torch.equal(j_0.lengths(), torch.IntTensor([2, 0, 1])))
self.assertTrue(
torch.equal(
j_0.values(),
torch.Tensor(
[
[0.5, 1.0, 1.5],
[1.0, 2.0, 3.0],
[1.5, 3.0, 4.5],
],
),
)
)
def test_float_lengths_offsets_throws(self) -> None:
values = torch.rand((7, 3))
lengths = torch.tensor([3.0, 4.0])
offsets = torch.tensor([0.0, 3.0, 7.0])
with self.assertRaises(AssertionError):
JaggedTensor(values=values, lengths=lengths)
with self.assertRaises(AssertionError):
JaggedTensor(values=values, offsets=offsets)
def test_to(self) -> None:
j = JaggedTensor(
offsets=torch.tensor([0, 2, 2, 3]),
values=torch.tensor([0.5, 1.0, 1.5]),
weights=torch.tensor([5.0, 10.0, 15.0]),
)
j2 = j.to(device=torch.device("cpu"))
self.assertTrue(torch.equal(j.offsets(), j2.offsets()))
self.assertTrue(torch.equal(j.lengths(), j2.lengths()))
self.assertTrue(torch.equal(j.values(), j2.values()))
self.assertTrue(torch.equal(j.weights(), j2.weights()))
# pyre-ignore[56]
@unittest.skipIf(
torch.cuda.device_count() <= 0,
"CUDA is not available",
)
def test_record_stream(self) -> None:
j = JaggedTensor(
offsets=torch.tensor([0, 2, 2, 3]),
values=torch.tensor([0.5, 1.0, 1.5]),
weights=torch.tensor([5.0, 10.0, 15.0]),
).to(torch.device("cuda"))
j.record_stream(torch.cuda.current_stream())
def test_string_basic(self) -> None:
values = torch.Tensor([1.0])
offsets = torch.IntTensor([0, 1])
jag_tensor = JaggedTensor(
values=values,
offsets=offsets,
)
self.assertEqual(
str(jag_tensor),
"""\
JaggedTensor({
[[1.0]]
})
""",
)
def test_string_values(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jag_tensor = JaggedTensor(
values=values,
offsets=offsets,
)
self.assertEqual(
str(jag_tensor),
"""\
JaggedTensor({
[[1.0, 2.0], [], [3.0], [4.0], [5.0], [6.0, 7.0, 8.0]]
})
""",
)
def test_string_weights(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jag_tensor = JaggedTensor(
values=values,
offsets=offsets,
weights=weights,
)
self.assertEqual(
str(jag_tensor),
"""\
JaggedTensor({
"values": [[1.0, 2.0], [], [3.0], [4.0], [5.0], [6.0, 7.0, 8.0]],
"weights": [[1.0, 0.5], [], [1.5], [1.0], [0.5], [1.0, 1.0, 1.5]]
})
""",
)
class TestKeyedJaggedTensor(unittest.TestCase):
def test_key_lookup(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])
keys = ["index_0", "index_1"]
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jag_tensor = KeyedJaggedTensor(
values=values,
keys=keys,
offsets=offsets,
weights=weights,
)
j0 = jag_tensor["index_0"]
j1 = jag_tensor["index_1"]
self.assertTrue(isinstance(j0, JaggedTensor))
self.assertTrue(torch.equal(j0.lengths(), torch.IntTensor([2, 0, 1])))
self.assertTrue(torch.equal(j0.weights(), torch.Tensor([1.0, 0.5, 1.5])))
self.assertTrue(torch.equal(j0.values(), torch.Tensor([1.0, 2.0, 3.0])))
self.assertTrue(torch.equal(j1.lengths(), torch.IntTensor([1, 1, 3])))
self.assertTrue(
torch.equal(j1.weights(), torch.Tensor([1.0, 0.5, 1.0, 1.0, 1.5]))
)
self.assertTrue(
torch.equal(j1.values(), torch.Tensor([4.0, 5.0, 6.0, 7.0, 8.0]))
)
def test_to_dict(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])
keys = ["index_0", "index_1"]
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jag_tensor = KeyedJaggedTensor(
values=values,
keys=keys,
offsets=offsets,
weights=weights,
)
jag_tensor_dict = jag_tensor.to_dict()
j0 = jag_tensor_dict["index_0"]
j1 = jag_tensor_dict["index_1"]
self.assertTrue(isinstance(j0, JaggedTensor))
self.assertTrue(torch.equal(j0.lengths(), torch.IntTensor([2, 0, 1])))
self.assertTrue(torch.equal(j0.weights(), torch.Tensor([1.0, 0.5, 1.5])))
self.assertTrue(torch.equal(j0.values(), torch.Tensor([1.0, 2.0, 3.0])))
self.assertTrue(torch.equal(j1.lengths(), torch.IntTensor([1, 1, 3])))
self.assertTrue(
torch.equal(j1.weights(), torch.Tensor([1.0, 0.5, 1.0, 1.0, 1.5]))
)
self.assertTrue(
torch.equal(j1.values(), torch.Tensor([4.0, 5.0, 6.0, 7.0, 8.0]))
)
def test_split(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])
keys = ["index_0", "index_1"]
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jag_tensor = KeyedJaggedTensor(
values=values,
keys=keys,
offsets=offsets,
weights=weights,
)
j0, j1 = jag_tensor.split([1, 1])
self.assertTrue(isinstance(j0, KeyedJaggedTensor))
self.assertEqual(j0.keys(), ["index_0"])
self.assertEqual(j1.keys(), ["index_1"])
self.assertTrue(torch.equal(j0.lengths(), torch.IntTensor([2, 0, 1])))
self.assertTrue(torch.equal(j0.weights(), torch.Tensor([1.0, 0.5, 1.5])))
self.assertTrue(torch.equal(j0.values(), torch.Tensor([1.0, 2.0, 3.0])))
self.assertTrue(torch.equal(j1.lengths(), torch.IntTensor([1, 1, 3])))
self.assertTrue(
torch.equal(j1.weights(), torch.Tensor([1.0, 0.5, 1.0, 1.0, 1.5]))
)
self.assertTrue(
torch.equal(j1.values(), torch.Tensor([4.0, 5.0, 6.0, 7.0, 8.0]))
)
def test_zero_split(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])
keys = ["index_0", "index_1"]
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jag_tensor = KeyedJaggedTensor(
values=values,
keys=keys,
offsets=offsets,
weights=weights,
)
j0, j1 = jag_tensor.split([0, 2])
self.assertTrue(isinstance(j0, KeyedJaggedTensor))
self.assertEqual(j0.keys(), [])
self.assertTrue(torch.equal(j0.lengths(), torch.IntTensor([])))
self.assertTrue(torch.equal(j0.weights(), torch.Tensor([])))
self.assertTrue(torch.equal(j0.values(), torch.Tensor([])))
self.assertEqual(j0.stride(), 3)
self.assertEqual(j1.keys(), ["index_0", "index_1"])
self.assertTrue(torch.equal(j1.lengths(), torch.IntTensor([2, 0, 1, 1, 1, 3])))
self.assertTrue(torch.equal(j1.weights(), weights))
self.assertTrue(torch.equal(j1.values(), values))
self.assertEqual(j0.stride(), 3)
def test_permute_w_weights(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])
lengths = torch.IntTensor([0, 2, 0, 1, 1, 1, 0, 3, 0])
keys = ["index_0", "index_1", "index_2"]
jag_tensor = KeyedJaggedTensor.from_lengths_sync(
values=values,
keys=keys,
lengths=lengths,
weights=weights,
)
indices = [1, 0, 2]
permuted_jag_tensor = jag_tensor.permute(indices)
self.assertEqual(permuted_jag_tensor.keys(), ["index_1", "index_0", "index_2"])
self.assertEqual(
permuted_jag_tensor.offset_per_key(),
[0, 3, 5, 8],
)
self.assertTrue(
torch.equal(
permuted_jag_tensor.values(),
torch.Tensor([3.0, 4.0, 5.0, 1.0, 2.0, 6.0, 7.0, 8.0]),
)
)
self.assertTrue(
torch.equal(
permuted_jag_tensor.lengths(),
torch.IntTensor([1, 1, 1, 0, 2, 0, 0, 3, 0]),
)
)
self.assertTrue(
torch.equal(
permuted_jag_tensor.weights(),
torch.Tensor([1.5, 1.0, 0.5, 1.0, 0.5, 1.0, 1.0, 1.5]),
),
)
def test_permute(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
lengths = torch.IntTensor([0, 2, 0, 1, 1, 1, 0, 3, 0])
keys = ["index_0", "index_1", "index_2"]
jag_tensor = KeyedJaggedTensor.from_lengths_sync(
values=values,
keys=keys,
lengths=lengths,
)
indices = [1, 0, 2]
permuted_jag_tensor = jag_tensor.permute(indices)
self.assertEqual(permuted_jag_tensor.keys(), ["index_1", "index_0", "index_2"])
self.assertEqual(
permuted_jag_tensor.offset_per_key(),
[0, 3, 5, 8],
)
self.assertTrue(
torch.equal(
permuted_jag_tensor.values(),
torch.Tensor([3.0, 4.0, 5.0, 1.0, 2.0, 6.0, 7.0, 8.0]),
)
)
self.assertTrue(
torch.equal(
permuted_jag_tensor.lengths(),
torch.IntTensor([1, 1, 1, 0, 2, 0, 0, 3, 0]),
)
)
self.assertEqual(permuted_jag_tensor.weights_or_none(), None)
def test_permute_duplicates(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
lengths = torch.IntTensor([0, 2, 0, 1, 1, 1, 0, 3, 0])
keys = ["index_0", "index_1", "index_2"]
jag_tensor = KeyedJaggedTensor.from_lengths_sync(
values=values,
keys=keys,
lengths=lengths,
)
indices = [1, 0, 2, 1, 1]
permuted_jag_tensor = jag_tensor.permute(indices)
self.assertEqual(
permuted_jag_tensor.keys(),
["index_1", "index_0", "index_2", "index_1", "index_1"],
)
self.assertEqual(
permuted_jag_tensor.offset_per_key(),
[0, 3, 5, 8, 11, 14],
)
self.assertTrue(
torch.equal(
permuted_jag_tensor.values(),
torch.Tensor(
[
3.0,
4.0,
5.0,
1.0,
2.0,
6.0,
7.0,
8.0,
3.0,
4.0,
5.0,
3.0,
4.0,
5.0,
]
),
)
)
self.assertTrue(
torch.equal(
permuted_jag_tensor.lengths(),
torch.IntTensor([1, 1, 1, 0, 2, 0, 0, 3, 0, 1, 1, 1, 1, 1, 1]),
)
)
self.assertEqual(permuted_jag_tensor.weights_or_none(), None)
def test_length_vs_offset(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])
keys = ["index_0", "index_1"]
offsets = torch.IntTensor([0, 0, 2, 2, 3, 4, 5, 5, 8])
lengths = torch.IntTensor([0, 2, 0, 1, 1, 1, 0, 3])
j_offset = KeyedJaggedTensor.from_offsets_sync(
values=values,
keys=keys,
offsets=offsets,
weights=weights,
)
j_lens = KeyedJaggedTensor.from_lengths_sync(
values=values,
keys=keys,
lengths=lengths,
weights=weights,
)
self.assertTrue(torch.equal(j_offset.lengths(), j_lens.lengths()))
# TO DO: T88149179
self.assertTrue(torch.equal(j_offset.offsets(), j_lens.offsets().int()))
def test_2d(self) -> None:
values = torch.Tensor([[i * 0.5, i * 1.0, i * 1.5] for i in range(1, 9)])
weights = torch.Tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])
keys = ["index_0", "index_1"]
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
j = KeyedJaggedTensor.from_offsets_sync(
values=values,
weights=weights,
keys=keys,
offsets=offsets,
)
j_0 = j["index_0"]
self.assertTrue(torch.equal(j_0.lengths(), torch.IntTensor([2, 0, 1])))
self.assertTrue(
torch.equal(
j_0.values(),
torch.Tensor(
[
[0.5, 1.0, 1.5],
[1.0, 2.0, 3.0],
[1.5, 3.0, 4.5],
],
),
)
)
def test_float_lengths_offsets_throws(self) -> None:
values = torch.rand((7, 3))
keys = ["f1", "f2"]
# torch.Tensor([3, 4]) also fails
# pyre-fixme[6]: Expected `Optional[typing.Type[torch._dtype]]` for 2nd
# param but got `Type[float]`.
lengths = torch.tensor([3, 4], dtype=float)
# pyre-fixme[6]: Expected `Optional[typing.Type[torch._dtype]]` for 2nd
# param but got `Type[float]`.
offsets = torch.tensor([0, 3, 7], dtype=float)
with self.assertRaises(AssertionError):
KeyedJaggedTensor.from_lengths_sync(
keys=keys, values=values, lengths=lengths
)
with self.assertRaises(AssertionError):
KeyedJaggedTensor.from_offsets_sync(
keys=keys, values=values, offsets=offsets
)
def test_scriptable(self) -> None:
class MyModule(torch.nn.Module):
def forward(self, input: KeyedJaggedTensor) -> torch.Tensor:
values = input["any"].values()
return values
m = MyModule()
torch.jit.script(m)
def test_to(self) -> None:
j = KeyedJaggedTensor.from_offsets_sync(
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
values=torch.arange(8),
weights=torch.arange(8 * 10),
keys=["index_0", "index_1"],
)
j2 = j.to(device=torch.device("cpu"))
self.assertTrue(torch.equal(j.offsets(), j2.offsets()))
self.assertTrue(torch.equal(j.lengths(), j2.lengths()))
self.assertTrue(torch.equal(j.values(), j2.values()))
self.assertTrue(torch.equal(j.weights(), j2.weights()))
def test_string_none(self) -> None:
jag_tensor = KeyedJaggedTensor(
torch.Tensor(),
[],
)
self.assertEqual(
str(jag_tensor),
"""\
KeyedJaggedTensor()
""",
)
def test_string_basic(self) -> None:
values = torch.Tensor([1.0])
keys = ["key"]
offsets = torch.IntTensor([0, 1])
jag_tensor = KeyedJaggedTensor(
values=values,
keys=keys,
offsets=offsets,
)
self.assertEqual(
str(jag_tensor),
"""\
KeyedJaggedTensor({
"key": [[1.0]]
})
""",
)
def test_string_values(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
keys = ["index_0", "index_1"]
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jag_tensor = KeyedJaggedTensor(
values=values,
keys=keys,
offsets=offsets,
)
self.assertEqual(
str(jag_tensor),
"""\
KeyedJaggedTensor({
"index_0": [[1.0, 2.0], [], [3.0]],
"index_1": [[4.0], [5.0], [6.0, 7.0, 8.0]]
})
""",
)
def test_string_weights(self) -> None:
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])
keys = ["index_0", "index_1"]
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jag_tensor = KeyedJaggedTensor(
values=values,
keys=keys,
offsets=offsets,
weights=weights,
)
self.assertEqual(
str(jag_tensor),
"""\
KeyedJaggedTensor({
"index_0": {
"values": [[1.0, 2.0], [], [3.0]],
"weights": [[1.0, 0.5], [], [1.5]]
},
"index_1": {
"values": [[4.0], [5.0], [6.0, 7.0, 8.0]],
"weights": [[1.0], [0.5], [1.0, 1.0, 1.5]]
}
})
""",
)
# pyre-ignore[56]
@unittest.skipIf(
torch.cuda.device_count() <= 0,
"CUDA is not available",
)
def test_record_stream(self) -> None:
j = KeyedJaggedTensor.from_offsets_sync(
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
values=torch.arange(8),
weights=torch.arange(8 * 10),
keys=["index_0", "index_1"],
).to(torch.device("cuda"))
j.record_stream(torch.cuda.current_stream())
class TestKeyedTensor(unittest.TestCase):
def test_key_lookup(self) -> None:
tensor_list = [
torch.Tensor([[1.0, 1.0]]),
torch.Tensor([[2.0, 2.0], [3.0, 3.0]]),
]
keys = ["dense_0", "dense_1"]
kt = KeyedTensor.from_tensor_list(keys, tensor_list, cat_dim=0, key_dim=0)
self.assertEqual(kt.key_dim(), 0)
self.assertTrue(torch.equal(kt["dense_0"], tensor_list[0]))
self.assertTrue(torch.equal(kt["dense_1"], tensor_list[1]))
def test_key_lookup_dim_1(self) -> None:
tensor_list = [
torch.tensor([[1.0, 1.0]]).T,
torch.tensor([[2.0, 2.0], [3.0, 3.0]]).T,
]
keys = ["dense_0", "dense_1"]
kt = KeyedTensor.from_tensor_list(keys, tensor_list, key_dim=1)
self.assertEqual(kt.key_dim(), 1)
self.assertTrue(torch.equal(kt["dense_0"], tensor_list[0]))
self.assertTrue(torch.equal(kt["dense_1"], tensor_list[1]))
def test_to_dict(self) -> None:
tensor_list = [
torch.Tensor([[1.0, 1.0]]),
torch.Tensor([[2.0, 2.0], [3.0, 3.0]]),
]
keys = ["dense_0", "dense_1"]
kt = KeyedTensor.from_tensor_list(keys, tensor_list, cat_dim=0, key_dim=0)
self.assertEqual(kt.key_dim(), 0)
d = kt.to_dict()
for key in keys:
self.assertTrue(torch.equal(kt[key], d[key]))
def test_to_dict_dim_1(self) -> None:
tensor_list = [
torch.tensor([[1.0, 1.0]]).T,
torch.tensor([[2.0, 2.0], [3.0, 3.0]]).T,
]
keys = ["dense_0", "dense_1"]
kt = KeyedTensor.from_tensor_list(keys, tensor_list, key_dim=1)
self.assertEqual(kt.key_dim(), 1)
d = kt.to_dict()
for key in keys:
self.assertTrue(torch.equal(kt[key], d[key]))
def test_regroup_single_kt(self) -> None:
tensor_list = [torch.randn(2, 3) for i in range(5)]
key_dim = 1
keys = ["dense_0", "dense_1", "dense_2", "dense_3", "dense_4"]
kt = KeyedTensor.from_tensor_list(keys, tensor_list, key_dim)
grouped_tensors = KeyedTensor.regroup(
[kt], [["dense_0", "dense_4"], ["dense_1", "dense_3"], ["dense_2"]]
)
self.assertTrue(
torch.equal(
grouped_tensors[0], torch.cat([tensor_list[0], tensor_list[4]], key_dim)
)
)
self.assertTrue(
torch.equal(
grouped_tensors[1], torch.cat([tensor_list[1], tensor_list[3]], key_dim)
)
)
self.assertTrue(torch.equal(grouped_tensors[2], tensor_list[2]))
def test_regroup_multiple_kt(self) -> None:
key_dim = 1
tensor_list_1 = [torch.randn(2, 3) for i in range(3)]
keys_1 = ["dense_0", "dense_1", "dense_2"]
kt_1 = KeyedTensor.from_tensor_list(keys_1, tensor_list_1, key_dim)
tensor_list_2 = [torch.randn(2, 3) for i in range(2)]
keys_2 = ["sparse_0", "sparse_1"]
kt_2 = KeyedTensor.from_tensor_list(keys_2, tensor_list_2, key_dim)
grouped_tensors = KeyedTensor.regroup(
[kt_1, kt_2], [["dense_0", "sparse_1", "dense_2"], ["dense_1", "sparse_0"]]
)
self.assertTrue(
torch.equal(
grouped_tensors[0],
torch.cat(
[tensor_list_1[0], tensor_list_2[1], tensor_list_1[2]], key_dim
),
)
)
self.assertTrue(
torch.equal(
grouped_tensors[1],
torch.cat([tensor_list_1[1], tensor_list_2[0]], key_dim),
)
)
def test_regroup_scriptable(self) -> None:
class MyModule(torch.nn.Module):
def forward(
self, inputs: List[KeyedTensor], groups: List[List[str]]
) -> List[torch.Tensor]:
return KeyedTensor.regroup(inputs, groups)
m = MyModule()
torch.jit.script(m)
def test_regroup_fxable(self) -> None:
class MyModule(torch.nn.Module):
def forward(
self, inputs: List[KeyedTensor], groups: List[List[str]]
) -> List[torch.Tensor]:
return KeyedTensor.regroup(inputs, groups)
m = MyModule()
# input
key_dim = 1
tensor_list_1 = [torch.randn(2, 3) for i in range(3)]
keys_1 = ["dense_0", "dense_1", "dense_2"]
kt_1 = KeyedTensor.from_tensor_list(keys_1, tensor_list_1, key_dim)
tensor_list_2 = [torch.randn(2, 3) for i in range(2)]
keys_2 = ["sparse_0", "sparse_1"]
kt_2 = KeyedTensor.from_tensor_list(keys_2, tensor_list_2, key_dim)
inputs = [kt_1, kt_2]
groups = [["dense_0", "sparse_1", "dense_2"], ["dense_1", "sparse_0"]]
# ensure that symbolic tracing works
gm = torch.fx.symbolic_trace(m)
results = m(inputs, groups)
traced_results = gm(inputs, groups)
self.assertEqual(len(results), len(traced_results))
for result, traced_result in zip(results, traced_results):
self.assertTrue(torch.equal(result, traced_result))
def test_scriptable(self) -> None:
class MyModule(torch.nn.Module):
def forward(self, input: KeyedTensor) -> torch.Tensor:
values = input["any"].values()
return values
m = MyModule()
torch.jit.script(m)
def test_string_none(self) -> None:
jag_tensor = KeyedTensor(
[],
[],
torch.Tensor(),
)
self.assertEqual(
str(jag_tensor),
"""\
KeyedTensor()
""",
)
def test_string_basic(self) -> None:
tensor_list = [
torch.tensor([[1.0]]),
]
keys = ["key"]
kt = KeyedTensor.from_tensor_list(keys, tensor_list, key_dim=0)
self.assertEqual(
str(kt),
"""\
KeyedTensor({
"key": [[1.0]]
})
""",
)
def test_string_values(self) -> None:
tensor_list = [
torch.tensor([[1.0, 1.0]]).T,
torch.tensor([[2.0, 2.0], [3.0, 3.0]]).T,
]
keys = ["dense_0", "dense_1"]
kt = KeyedTensor.from_tensor_list(keys, tensor_list)
self.assertEqual(
str(kt),
"""\
KeyedTensor({
"dense_0": [[1.0], [1.0]],
"dense_1": [[2.0, 3.0], [2.0, 3.0]]
})
""",
)
| [
"torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_offsets_sync",
"torchrec.sparse.jagged_tensor.KeyedJaggedTensor",
"torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_lengths_sync",
"torchrec.sparse.jagged_tensor.KeyedTensor.from_tensor_list",
"torchrec.sparse.jagged_tensor.JaggedTensor",
"torchrec... | [((498, 588), 'torch.Tensor', 'torch.Tensor', (['[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]'], {}), '([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0], [10.0, \n 11.0, 12.0]])\n', (510, 588), False, 'import torch\n'), ((1507, 1561), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (1519, 1561), False, 'import torch\n'), ((1618, 1656), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (1633, 1656), False, 'import torch\n'), ((1679, 1739), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor', 'KeyedJaggedTensor', ([], {'values': 'values', 'keys': 'keys', 'offsets': 'offsets'}), '(values=values, keys=keys, offsets=offsets)\n', (1696, 1739), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((2316, 2370), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (2328, 2370), False, 'import torch\n'), ((2427, 2465), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (2442, 2465), False, 'import torch\n'), ((2488, 2548), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor', 'KeyedJaggedTensor', ([], {'values': 'values', 'keys': 'keys', 'offsets': 'offsets'}), '(values=values, keys=keys, offsets=offsets)\n', (2505, 2548), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((3211, 3265), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (3223, 3265), False, 'import torch\n'), ((3322, 3366), 'torch.IntTensor', 'torch.IntTensor', (['[0, 0, 2, 2, 3, 4, 5, 5, 8]'], {}), '([0, 0, 2, 2, 3, 4, 5, 5, 8])\n', (3337, 3366), False, 'import torch\n'), ((3385, 3426), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 0, 1, 1, 1, 0, 3]'], {}), '([0, 2, 0, 1, 1, 1, 0, 3])\n', (3400, 3426), False, 'import torch\n'), ((3447, 3525), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_offsets_sync', 'KeyedJaggedTensor.from_offsets_sync', ([], {'values': 'values', 'keys': 'keys', 'offsets': 'offsets'}), '(values=values, keys=keys, offsets=offsets)\n', (3482, 3525), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((3591, 3669), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_lengths_sync', 'KeyedJaggedTensor.from_lengths_sync', ([], {'values': 'values', 'keys': 'keys', 'lengths': 'lengths'}), '(values=values, keys=keys, lengths=lengths)\n', (3626, 3669), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((3953, 4012), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0])\n', (3965, 4012), False, 'import torch\n'), ((4080, 4133), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 0, 1, 1, 1, 0, 3, 0, 0, 1, 0]'], {}), '([0, 2, 0, 1, 1, 1, 0, 3, 0, 0, 1, 0])\n', (4095, 4133), False, 'import torch\n'), ((4158, 4236), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_lengths_sync', 'KeyedJaggedTensor.from_lengths_sync', ([], {'values': 'values', 'keys': 'keys', 'lengths': 'lengths'}), '(values=values, keys=keys, lengths=lengths)\n', (4193, 4236), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((4978, 4992), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (4990, 4992), False, 'import torch\n'), ((5029, 5043), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (5041, 5043), False, 'import torch\n'), ((5053, 5131), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_offsets_sync', 'KeyedJaggedTensor.from_offsets_sync', ([], {'values': 'values', 'keys': 'keys', 'offsets': 'offsets'}), '(values=values, keys=keys, offsets=offsets)\n', (5088, 5131), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((5302, 5340), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (5317, 5340), False, 'import torch\n'), ((5354, 5432), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_offsets_sync', 'KeyedJaggedTensor.from_offsets_sync', ([], {'values': 'values', 'keys': 'keys', 'offsets': 'offsets'}), '(values=values, keys=keys, offsets=offsets)\n', (5389, 5432), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((5984, 6002), 'torch.rand', 'torch.rand', (['(7, 3)'], {}), '((7, 3))\n', (5994, 6002), False, 'import torch\n'), ((6021, 6045), 'torch.tensor', 'torch.tensor', (['[3.0, 4.0]'], {}), '([3.0, 4.0])\n', (6033, 6045), False, 'import torch\n'), ((6064, 6093), 'torch.tensor', 'torch.tensor', (['[0.0, 3.0, 7.0]'], {}), '([0.0, 3.0, 7.0])\n', (6076, 6093), False, 'import torch\n'), ((7314, 7333), 'torch.Tensor', 'torch.Tensor', (['[1.0]'], {}), '([1.0])\n', (7326, 7333), False, 'import torch\n'), ((7352, 7375), 'torch.IntTensor', 'torch.IntTensor', (['[0, 1]'], {}), '([0, 1])\n', (7367, 7375), False, 'import torch\n'), ((7398, 7442), 'torchrec.sparse.jagged_tensor.JaggedTensor', 'JaggedTensor', ([], {'values': 'values', 'offsets': 'offsets'}), '(values=values, offsets=offsets)\n', (7410, 7442), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((7656, 7710), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (7668, 7710), False, 'import torch\n'), ((7729, 7767), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (7744, 7767), False, 'import torch\n'), ((7790, 7834), 'torchrec.sparse.jagged_tensor.JaggedTensor', 'JaggedTensor', ([], {'values': 'values', 'offsets': 'offsets'}), '(values=values, offsets=offsets)\n', (7802, 7834), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((8096, 8150), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (8108, 8150), False, 'import torch\n'), ((8169, 8223), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (8181, 8223), False, 'import torch\n'), ((8242, 8280), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (8257, 8280), False, 'import torch\n'), ((8303, 8364), 'torchrec.sparse.jagged_tensor.JaggedTensor', 'JaggedTensor', ([], {'values': 'values', 'offsets': 'offsets', 'weights': 'weights'}), '(values=values, offsets=offsets, weights=weights)\n', (8315, 8364), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((8764, 8818), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (8776, 8818), False, 'import torch\n'), ((8837, 8891), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (8849, 8891), False, 'import torch\n'), ((8948, 8986), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (8963, 8986), False, 'import torch\n'), ((9009, 9086), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor', 'KeyedJaggedTensor', ([], {'values': 'values', 'keys': 'keys', 'offsets': 'offsets', 'weights': 'weights'}), '(values=values, keys=keys, offsets=offsets, weights=weights)\n', (9026, 9086), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((9873, 9927), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (9885, 9927), False, 'import torch\n'), ((9946, 10000), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (9958, 10000), False, 'import torch\n'), ((10057, 10095), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (10072, 10095), False, 'import torch\n'), ((10118, 10195), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor', 'KeyedJaggedTensor', ([], {'values': 'values', 'keys': 'keys', 'offsets': 'offsets', 'weights': 'weights'}), '(values=values, keys=keys, offsets=offsets, weights=weights)\n', (10135, 10195), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((11037, 11091), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (11049, 11091), False, 'import torch\n'), ((11110, 11164), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (11122, 11164), False, 'import torch\n'), ((11221, 11259), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (11236, 11259), False, 'import torch\n'), ((11282, 11359), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor', 'KeyedJaggedTensor', ([], {'values': 'values', 'keys': 'keys', 'offsets': 'offsets', 'weights': 'weights'}), '(values=values, keys=keys, offsets=offsets, weights=weights)\n', (11299, 11359), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((12224, 12278), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (12236, 12278), False, 'import torch\n'), ((12297, 12351), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (12309, 12351), False, 'import torch\n'), ((12408, 12446), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (12423, 12446), False, 'import torch\n'), ((12469, 12546), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor', 'KeyedJaggedTensor', ([], {'values': 'values', 'keys': 'keys', 'offsets': 'offsets', 'weights': 'weights'}), '(values=values, keys=keys, offsets=offsets, weights=weights)\n', (12486, 12546), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((13370, 13424), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (13382, 13424), False, 'import torch\n'), ((13443, 13497), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (13455, 13497), False, 'import torch\n'), ((13516, 13560), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 0, 1, 1, 1, 0, 3, 0]'], {}), '([0, 2, 0, 1, 1, 1, 0, 3, 0])\n', (13531, 13560), False, 'import torch\n'), ((13632, 13732), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_lengths_sync', 'KeyedJaggedTensor.from_lengths_sync', ([], {'values': 'values', 'keys': 'keys', 'lengths': 'lengths', 'weights': 'weights'}), '(values=values, keys=keys, lengths=\n lengths, weights=weights)\n', (13667, 13732), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((14697, 14751), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (14709, 14751), False, 'import torch\n'), ((14770, 14814), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 0, 1, 1, 1, 0, 3, 0]'], {}), '([0, 2, 0, 1, 1, 1, 0, 3, 0])\n', (14785, 14814), False, 'import torch\n'), ((14886, 14964), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_lengths_sync', 'KeyedJaggedTensor.from_lengths_sync', ([], {'values': 'values', 'keys': 'keys', 'lengths': 'lengths'}), '(values=values, keys=keys, lengths=lengths)\n', (14921, 14964), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((15810, 15864), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (15822, 15864), False, 'import torch\n'), ((15883, 15927), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 0, 1, 1, 1, 0, 3, 0]'], {}), '([0, 2, 0, 1, 1, 1, 0, 3, 0])\n', (15898, 15927), False, 'import torch\n'), ((15999, 16077), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_lengths_sync', 'KeyedJaggedTensor.from_lengths_sync', ([], {'values': 'values', 'keys': 'keys', 'lengths': 'lengths'}), '(values=values, keys=keys, lengths=lengths)\n', (16034, 16077), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((17437, 17491), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (17449, 17491), False, 'import torch\n'), ((17510, 17564), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (17522, 17564), False, 'import torch\n'), ((17621, 17665), 'torch.IntTensor', 'torch.IntTensor', (['[0, 0, 2, 2, 3, 4, 5, 5, 8]'], {}), '([0, 0, 2, 2, 3, 4, 5, 5, 8])\n', (17636, 17665), False, 'import torch\n'), ((17684, 17725), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 0, 1, 1, 1, 0, 3]'], {}), '([0, 2, 0, 1, 1, 1, 0, 3])\n', (17699, 17725), False, 'import torch\n'), ((17746, 17846), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_offsets_sync', 'KeyedJaggedTensor.from_offsets_sync', ([], {'values': 'values', 'keys': 'keys', 'offsets': 'offsets', 'weights': 'weights'}), '(values=values, keys=keys, offsets=\n offsets, weights=weights)\n', (17781, 17846), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((17919, 18019), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_lengths_sync', 'KeyedJaggedTensor.from_lengths_sync', ([], {'values': 'values', 'keys': 'keys', 'lengths': 'lengths', 'weights': 'weights'}), '(values=values, keys=keys, lengths=\n lengths, weights=weights)\n', (17954, 18019), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((18390, 18444), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (18402, 18444), False, 'import torch\n'), ((18501, 18539), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (18516, 18539), False, 'import torch\n'), ((18553, 18653), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_offsets_sync', 'KeyedJaggedTensor.from_offsets_sync', ([], {'values': 'values', 'weights': 'weights', 'keys': 'keys', 'offsets': 'offsets'}), '(values=values, weights=weights, keys=\n keys, offsets=offsets)\n', (18588, 18653), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((19212, 19230), 'torch.rand', 'torch.rand', (['(7, 3)'], {}), '((7, 3))\n', (19222, 19230), False, 'import torch\n'), ((19439, 19472), 'torch.tensor', 'torch.tensor', (['[3, 4]'], {'dtype': 'float'}), '([3, 4], dtype=float)\n', (19451, 19472), False, 'import torch\n'), ((19611, 19647), 'torch.tensor', 'torch.tensor', (['[0, 3, 7]'], {'dtype': 'float'}), '([0, 3, 7], dtype=float)\n', (19623, 19647), False, 'import torch\n'), ((20250, 20269), 'torch.jit.script', 'torch.jit.script', (['m'], {}), '(m)\n', (20266, 20269), False, 'import torch\n'), ((21139, 21158), 'torch.Tensor', 'torch.Tensor', (['[1.0]'], {}), '([1.0])\n', (21151, 21158), False, 'import torch\n'), ((21200, 21223), 'torch.IntTensor', 'torch.IntTensor', (['[0, 1]'], {}), '([0, 1])\n', (21215, 21223), False, 'import torch\n'), ((21246, 21306), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor', 'KeyedJaggedTensor', ([], {'values': 'values', 'keys': 'keys', 'offsets': 'offsets'}), '(values=values, keys=keys, offsets=offsets)\n', (21263, 21306), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((21544, 21598), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (21556, 21598), False, 'import torch\n'), ((21655, 21693), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (21670, 21693), False, 'import torch\n'), ((21716, 21776), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor', 'KeyedJaggedTensor', ([], {'values': 'values', 'keys': 'keys', 'offsets': 'offsets'}), '(values=values, keys=keys, offsets=offsets)\n', (21733, 21776), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((22083, 22137), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])\n', (22095, 22137), False, 'import torch\n'), ((22156, 22210), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (22168, 22210), False, 'import torch\n'), ((22267, 22305), 'torch.IntTensor', 'torch.IntTensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (22282, 22305), False, 'import torch\n'), ((22328, 22405), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor', 'KeyedJaggedTensor', ([], {'values': 'values', 'keys': 'keys', 'offsets': 'offsets', 'weights': 'weights'}), '(values=values, keys=keys, offsets=offsets, weights=weights)\n', (22345, 22405), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((23550, 23619), 'torchrec.sparse.jagged_tensor.KeyedTensor.from_tensor_list', 'KeyedTensor.from_tensor_list', (['keys', 'tensor_list'], {'cat_dim': '(0)', 'key_dim': '(0)'}), '(keys, tensor_list, cat_dim=0, key_dim=0)\n', (23578, 23619), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((24026, 24084), 'torchrec.sparse.jagged_tensor.KeyedTensor.from_tensor_list', 'KeyedTensor.from_tensor_list', (['keys', 'tensor_list'], {'key_dim': '(1)'}), '(keys, tensor_list, key_dim=1)\n', (24054, 24084), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((24477, 24546), 'torchrec.sparse.jagged_tensor.KeyedTensor.from_tensor_list', 'KeyedTensor.from_tensor_list', (['keys', 'tensor_list'], {'cat_dim': '(0)', 'key_dim': '(0)'}), '(keys, tensor_list, cat_dim=0, key_dim=0)\n', (24505, 24546), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((24922, 24980), 'torchrec.sparse.jagged_tensor.KeyedTensor.from_tensor_list', 'KeyedTensor.from_tensor_list', (['keys', 'tensor_list'], {'key_dim': '(1)'}), '(keys, tensor_list, key_dim=1)\n', (24950, 24980), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((25343, 25399), 'torchrec.sparse.jagged_tensor.KeyedTensor.from_tensor_list', 'KeyedTensor.from_tensor_list', (['keys', 'tensor_list', 'key_dim'], {}), '(keys, tensor_list, key_dim)\n', (25371, 25399), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((25426, 25518), 'torchrec.sparse.jagged_tensor.KeyedTensor.regroup', 'KeyedTensor.regroup', (['[kt]', "[['dense_0', 'dense_4'], ['dense_1', 'dense_3'], ['dense_2']]"], {}), "([kt], [['dense_0', 'dense_4'], ['dense_1', 'dense_3'],\n ['dense_2']])\n", (25445, 25518), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((26133, 26193), 'torchrec.sparse.jagged_tensor.KeyedTensor.from_tensor_list', 'KeyedTensor.from_tensor_list', (['keys_1', 'tensor_list_1', 'key_dim'], {}), '(keys_1, tensor_list_1, key_dim)\n', (26161, 26193), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((26313, 26373), 'torchrec.sparse.jagged_tensor.KeyedTensor.from_tensor_list', 'KeyedTensor.from_tensor_list', (['keys_2', 'tensor_list_2', 'key_dim'], {}), '(keys_2, tensor_list_2, key_dim)\n', (26341, 26373), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((26400, 26501), 'torchrec.sparse.jagged_tensor.KeyedTensor.regroup', 'KeyedTensor.regroup', (['[kt_1, kt_2]', "[['dense_0', 'sparse_1', 'dense_2'], ['dense_1', 'sparse_0']]"], {}), "([kt_1, kt_2], [['dense_0', 'sparse_1', 'dense_2'], [\n 'dense_1', 'sparse_0']])\n", (26419, 26501), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((27258, 27277), 'torch.jit.script', 'torch.jit.script', (['m'], {}), '(m)\n', (27274, 27277), False, 'import torch\n'), ((27746, 27806), 'torchrec.sparse.jagged_tensor.KeyedTensor.from_tensor_list', 'KeyedTensor.from_tensor_list', (['keys_1', 'tensor_list_1', 'key_dim'], {}), '(keys_1, tensor_list_1, key_dim)\n', (27774, 27806), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((27926, 27986), 'torchrec.sparse.jagged_tensor.KeyedTensor.from_tensor_list', 'KeyedTensor.from_tensor_list', (['keys_2', 'tensor_list_2', 'key_dim'], {}), '(keys_2, tensor_list_2, key_dim)\n', (27954, 27986), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((28155, 28181), 'torch.fx.symbolic_trace', 'torch.fx.symbolic_trace', (['m'], {}), '(m)\n', (28178, 28181), False, 'import torch\n'), ((28710, 28729), 'torch.jit.script', 'torch.jit.script', (['m'], {}), '(m)\n', (28726, 28729), False, 'import torch\n'), ((29124, 29182), 'torchrec.sparse.jagged_tensor.KeyedTensor.from_tensor_list', 'KeyedTensor.from_tensor_list', (['keys', 'tensor_list'], {'key_dim': '(0)'}), '(keys, tensor_list, key_dim=0)\n', (29152, 29182), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((29523, 29570), 'torchrec.sparse.jagged_tensor.KeyedTensor.from_tensor_list', 'KeyedTensor.from_tensor_list', (['keys', 'tensor_list'], {}), '(keys, tensor_list)\n', (29551, 29570), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((6155, 6199), 'torchrec.sparse.jagged_tensor.JaggedTensor', 'JaggedTensor', ([], {'values': 'values', 'lengths': 'lengths'}), '(values=values, lengths=lengths)\n', (6167, 6199), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((6260, 6304), 'torchrec.sparse.jagged_tensor.JaggedTensor', 'JaggedTensor', ([], {'values': 'values', 'offsets': 'offsets'}), '(values=values, offsets=offsets)\n', (6272, 6304), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((7180, 7200), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (7192, 7200), False, 'import torch\n'), ((7226, 7253), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (7251, 7253), False, 'import torch\n'), ((6877, 6902), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (6900, 6902), False, 'import torch\n'), ((19709, 19787), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_lengths_sync', 'KeyedJaggedTensor.from_lengths_sync', ([], {'keys': 'keys', 'values': 'values', 'lengths': 'lengths'}), '(keys=keys, values=values, lengths=lengths)\n', (19744, 19787), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((19878, 19956), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_offsets_sync', 'KeyedJaggedTensor.from_offsets_sync', ([], {'keys': 'keys', 'values': 'values', 'offsets': 'offsets'}), '(keys=keys, values=values, offsets=offsets)\n', (19913, 19956), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((20930, 20944), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (20942, 20944), False, 'import torch\n'), ((23215, 23235), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (23227, 23235), False, 'import torch\n'), ((23261, 23288), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (23286, 23288), False, 'import torch\n'), ((22864, 22889), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (22887, 22889), False, 'import torch\n'), ((23409, 23435), 'torch.Tensor', 'torch.Tensor', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (23421, 23435), False, 'import torch\n'), ((23449, 23487), 'torch.Tensor', 'torch.Tensor', (['[[2.0, 2.0], [3.0, 3.0]]'], {}), '([[2.0, 2.0], [3.0, 3.0]])\n', (23461, 23487), False, 'import torch\n'), ((23687, 23729), 'torch.equal', 'torch.equal', (["kt['dense_0']", 'tensor_list[0]'], {}), "(kt['dense_0'], tensor_list[0])\n", (23698, 23729), False, 'import torch\n'), ((23755, 23797), 'torch.equal', 'torch.equal', (["kt['dense_1']", 'tensor_list[1]'], {}), "(kt['dense_1'], tensor_list[1])\n", (23766, 23797), False, 'import torch\n'), ((24151, 24193), 'torch.equal', 'torch.equal', (["kt['dense_0']", 'tensor_list[0]'], {}), "(kt['dense_0'], tensor_list[0])\n", (24162, 24193), False, 'import torch\n'), ((24219, 24261), 'torch.equal', 'torch.equal', (["kt['dense_1']", 'tensor_list[1]'], {}), "(kt['dense_1'], tensor_list[1])\n", (24230, 24261), False, 'import torch\n'), ((24336, 24362), 'torch.Tensor', 'torch.Tensor', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (24348, 24362), False, 'import torch\n'), ((24376, 24414), 'torch.Tensor', 'torch.Tensor', (['[[2.0, 2.0], [3.0, 3.0]]'], {}), '([[2.0, 2.0], [3.0, 3.0]])\n', (24388, 24414), False, 'import torch\n'), ((25202, 25219), 'torch.randn', 'torch.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (25213, 25219), False, 'import torch\n'), ((25887, 25934), 'torch.equal', 'torch.equal', (['grouped_tensors[2]', 'tensor_list[2]'], {}), '(grouped_tensors[2], tensor_list[2])\n', (25898, 25934), False, 'import torch\n'), ((26030, 26047), 'torch.randn', 'torch.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (26041, 26047), False, 'import torch\n'), ((26219, 26236), 'torch.randn', 'torch.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (26230, 26236), False, 'import torch\n'), ((27643, 27660), 'torch.randn', 'torch.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (27654, 27660), False, 'import torch\n'), ((27832, 27849), 'torch.randn', 'torch.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (27843, 27849), False, 'import torch\n'), ((28849, 28863), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (28861, 28863), False, 'import torch\n'), ((29055, 29076), 'torch.tensor', 'torch.tensor', (['[[1.0]]'], {}), '([[1.0]])\n', (29067, 29076), False, 'import torch\n'), ((731, 760), 'torch.IntTensor', 'torch.IntTensor', (['[1, 0, 2, 3]'], {}), '([1, 0, 2, 3])\n', (746, 760), False, 'import torch\n'), ((822, 851), 'torch.IntTensor', 'torch.IntTensor', (['[1, 0, 2, 3]'], {}), '([1, 0, 2, 3])\n', (837, 851), False, 'import torch\n'), ((916, 963), 'torch.Tensor', 'torch.Tensor', (['[1.0, 7.0, 8.0, 10.0, 11.0, 12.0]'], {}), '([1.0, 7.0, 8.0, 10.0, 11.0, 12.0])\n', (928, 963), False, 'import torch\n'), ((1122, 1151), 'torch.IntTensor', 'torch.IntTensor', (['[2, 0, 1, 1]'], {}), '([2, 0, 1, 1])\n', (1137, 1151), False, 'import torch\n'), ((1242, 1271), 'torch.IntTensor', 'torch.IntTensor', (['[2, 0, 1, 1]'], {}), '([2, 0, 1, 1])\n', (1257, 1271), False, 'import torch\n'), ((1323, 1358), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 7.0, 10.0]'], {}), '([1.0, 2.0, 7.0, 10.0])\n', (1335, 1358), False, 'import torch\n'), ((1411, 1447), 'torch.Tensor', 'torch.Tensor', (['[11.0, 10.0, 5.0, 2.0]'], {}), '([11.0, 10.0, 5.0, 2.0])\n', (1423, 1447), False, 'import torch\n'), ((1962, 1988), 'torch.IntTensor', 'torch.IntTensor', (['[2, 0, 1]'], {}), '([2, 0, 1])\n', (1977, 1988), False, 'import torch\n'), ((2040, 2069), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (2052, 2069), False, 'import torch\n'), ((2122, 2148), 'torch.IntTensor', 'torch.IntTensor', (['[1, 1, 3]'], {}), '([1, 1, 3])\n', (2137, 2148), False, 'import torch\n'), ((2213, 2252), 'torch.Tensor', 'torch.Tensor', (['[4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([4.0, 5.0, 6.0, 7.0, 8.0])\n', (2225, 2252), False, 'import torch\n'), ((2846, 2872), 'torch.IntTensor', 'torch.IntTensor', (['[2, 0, 1]'], {}), '([2, 0, 1])\n', (2861, 2872), False, 'import torch\n'), ((2924, 2953), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (2936, 2953), False, 'import torch\n'), ((3006, 3032), 'torch.IntTensor', 'torch.IntTensor', (['[1, 1, 3]'], {}), '([1, 1, 3])\n', (3021, 3032), False, 'import torch\n'), ((3097, 3136), 'torch.Tensor', 'torch.Tensor', (['[4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([4.0, 5.0, 6.0, 7.0, 8.0])\n', (3109, 3136), False, 'import torch\n'), ((4345, 4439), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_lengths_sync', 'KeyedJaggedTensor.from_lengths_sync', ([], {'values': 'values[:4]', 'keys': 'keys[:1]', 'lengths': 'lengths[:4]'}), '(values=values[:4], keys=keys[:1],\n lengths=lengths[:4])\n', (4380, 4439), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((4514, 4608), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor.from_lengths_sync', 'KeyedJaggedTensor.from_lengths_sync', ([], {'values': 'values[4:]', 'keys': 'keys[1:]', 'lengths': 'lengths[4:]'}), '(values=values[4:], keys=keys[1:],\n lengths=lengths[4:])\n', (4549, 4608), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((5559, 5585), 'torch.IntTensor', 'torch.IntTensor', (['[2, 0, 1]'], {}), '([2, 0, 1])\n', (5574, 5585), False, 'import torch\n'), ((5684, 5749), 'torch.Tensor', 'torch.Tensor', (['[[0.5, 1.0, 1.5], [1.0, 2.0, 3.0], [1.5, 3.0, 4.5]]'], {}), '([[0.5, 1.0, 1.5], [1.0, 2.0, 3.0], [1.5, 3.0, 4.5]])\n', (5696, 5749), False, 'import torch\n'), ((6383, 6409), 'torch.tensor', 'torch.tensor', (['[0, 2, 2, 3]'], {}), '([0, 2, 2, 3])\n', (6395, 6409), False, 'import torch\n'), ((6430, 6459), 'torch.tensor', 'torch.tensor', (['[0.5, 1.0, 1.5]'], {}), '([0.5, 1.0, 1.5])\n', (6442, 6459), False, 'import torch\n'), ((6481, 6512), 'torch.tensor', 'torch.tensor', (['[5.0, 10.0, 15.0]'], {}), '([5.0, 10.0, 15.0])\n', (6493, 6512), False, 'import torch\n'), ((6549, 6568), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6561, 6568), False, 'import torch\n'), ((9321, 9347), 'torch.IntTensor', 'torch.IntTensor', (['[2, 0, 1]'], {}), '([2, 0, 1])\n', (9336, 9347), False, 'import torch\n'), ((9400, 9429), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5]'], {}), '([1.0, 0.5, 1.5])\n', (9412, 9429), False, 'import torch\n'), ((9481, 9510), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (9493, 9510), False, 'import torch\n'), ((9563, 9589), 'torch.IntTensor', 'torch.IntTensor', (['[1, 1, 3]'], {}), '([1, 1, 3])\n', (9578, 9589), False, 'import torch\n'), ((9655, 9694), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.0, 1.0, 1.5])\n', (9667, 9694), False, 'import torch\n'), ((9768, 9807), 'torch.Tensor', 'torch.Tensor', (['[4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([4.0, 5.0, 6.0, 7.0, 8.0])\n', (9780, 9807), False, 'import torch\n'), ((10487, 10513), 'torch.IntTensor', 'torch.IntTensor', (['[2, 0, 1]'], {}), '([2, 0, 1])\n', (10502, 10513), False, 'import torch\n'), ((10566, 10595), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5]'], {}), '([1.0, 0.5, 1.5])\n', (10578, 10595), False, 'import torch\n'), ((10647, 10676), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (10659, 10676), False, 'import torch\n'), ((10729, 10755), 'torch.IntTensor', 'torch.IntTensor', (['[1, 1, 3]'], {}), '([1, 1, 3])\n', (10744, 10755), False, 'import torch\n'), ((10821, 10860), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.0, 1.0, 1.5])\n', (10833, 10860), False, 'import torch\n'), ((10934, 10973), 'torch.Tensor', 'torch.Tensor', (['[4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([4.0, 5.0, 6.0, 7.0, 8.0])\n', (10946, 10973), False, 'import torch\n'), ((11669, 11695), 'torch.IntTensor', 'torch.IntTensor', (['[2, 0, 1]'], {}), '([2, 0, 1])\n', (11684, 11695), False, 'import torch\n'), ((11748, 11777), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.5]'], {}), '([1.0, 0.5, 1.5])\n', (11760, 11777), False, 'import torch\n'), ((11829, 11858), 'torch.Tensor', 'torch.Tensor', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (11841, 11858), False, 'import torch\n'), ((11911, 11937), 'torch.IntTensor', 'torch.IntTensor', (['[1, 1, 3]'], {}), '([1, 1, 3])\n', (11926, 11937), False, 'import torch\n'), ((12003, 12042), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.0, 0.5, 1.0, 1.0, 1.5])\n', (12015, 12042), False, 'import torch\n'), ((12116, 12155), 'torch.Tensor', 'torch.Tensor', (['[4.0, 5.0, 6.0, 7.0, 8.0]'], {}), '([4.0, 5.0, 6.0, 7.0, 8.0])\n', (12128, 12155), False, 'import torch\n'), ((12798, 12817), 'torch.IntTensor', 'torch.IntTensor', (['[]'], {}), '([])\n', (12813, 12817), False, 'import torch\n'), ((12870, 12886), 'torch.Tensor', 'torch.Tensor', (['[]'], {}), '([])\n', (12882, 12886), False, 'import torch\n'), ((12938, 12954), 'torch.Tensor', 'torch.Tensor', (['[]'], {}), '([])\n', (12950, 12954), False, 'import torch\n'), ((13109, 13144), 'torch.IntTensor', 'torch.IntTensor', (['[2, 0, 1, 1, 1, 3]'], {}), '([2, 0, 1, 1, 1, 3])\n', (13124, 13144), False, 'import torch\n'), ((14186, 14240), 'torch.Tensor', 'torch.Tensor', (['[3.0, 4.0, 5.0, 1.0, 2.0, 6.0, 7.0, 8.0]'], {}), '([3.0, 4.0, 5.0, 1.0, 2.0, 6.0, 7.0, 8.0])\n', (14198, 14240), False, 'import torch\n'), ((14379, 14423), 'torch.IntTensor', 'torch.IntTensor', (['[1, 1, 1, 0, 2, 0, 0, 3, 0]'], {}), '([1, 1, 1, 0, 2, 0, 0, 3, 0])\n', (14394, 14423), False, 'import torch\n'), ((14562, 14616), 'torch.Tensor', 'torch.Tensor', (['[1.5, 1.0, 0.5, 1.0, 0.5, 1.0, 1.0, 1.5]'], {}), '([1.5, 1.0, 0.5, 1.0, 0.5, 1.0, 1.0, 1.5])\n', (14574, 14616), False, 'import torch\n'), ((15412, 15466), 'torch.Tensor', 'torch.Tensor', (['[3.0, 4.0, 5.0, 1.0, 2.0, 6.0, 7.0, 8.0]'], {}), '([3.0, 4.0, 5.0, 1.0, 2.0, 6.0, 7.0, 8.0])\n', (15424, 15466), False, 'import torch\n'), ((15605, 15649), 'torch.IntTensor', 'torch.IntTensor', (['[1, 1, 1, 0, 2, 0, 0, 3, 0]'], {}), '([1, 1, 1, 0, 2, 0, 0, 3, 0])\n', (15620, 15649), False, 'import torch\n'), ((16596, 16685), 'torch.Tensor', 'torch.Tensor', (['[3.0, 4.0, 5.0, 1.0, 2.0, 6.0, 7.0, 8.0, 3.0, 4.0, 5.0, 3.0, 4.0, 5.0]'], {}), '([3.0, 4.0, 5.0, 1.0, 2.0, 6.0, 7.0, 8.0, 3.0, 4.0, 5.0, 3.0, \n 4.0, 5.0])\n', (16608, 16685), False, 'import torch\n'), ((17216, 17278), 'torch.IntTensor', 'torch.IntTensor', (['[1, 1, 1, 0, 2, 0, 0, 3, 0, 1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 0, 2, 0, 0, 3, 0, 1, 1, 1, 1, 1, 1])\n', (17231, 17278), False, 'import torch\n'), ((18787, 18813), 'torch.IntTensor', 'torch.IntTensor', (['[2, 0, 1]'], {}), '([2, 0, 1])\n', (18802, 18813), False, 'import torch\n'), ((18912, 18977), 'torch.Tensor', 'torch.Tensor', (['[[0.5, 1.0, 1.5], [1.0, 2.0, 3.0], [1.5, 3.0, 4.5]]'], {}), '([[0.5, 1.0, 1.5], [1.0, 2.0, 3.0], [1.5, 3.0, 4.5]])\n', (18924, 18977), False, 'import torch\n'), ((20371, 20406), 'torch.tensor', 'torch.tensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (20383, 20406), False, 'import torch\n'), ((20427, 20442), 'torch.arange', 'torch.arange', (['(8)'], {}), '(8)\n', (20439, 20442), False, 'import torch\n'), ((20464, 20484), 'torch.arange', 'torch.arange', (['(8 * 10)'], {}), '(8 * 10)\n', (20476, 20484), False, 'import torch\n'), ((20562, 20581), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (20574, 20581), False, 'import torch\n'), ((23881, 23907), 'torch.tensor', 'torch.tensor', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (23893, 23907), False, 'import torch\n'), ((23923, 23961), 'torch.tensor', 'torch.tensor', (['[[2.0, 2.0], [3.0, 3.0]]'], {}), '([[2.0, 2.0], [3.0, 3.0]])\n', (23935, 23961), False, 'import torch\n'), ((24668, 24696), 'torch.equal', 'torch.equal', (['kt[key]', 'd[key]'], {}), '(kt[key], d[key])\n', (24679, 24696), False, 'import torch\n'), ((24777, 24803), 'torch.tensor', 'torch.tensor', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (24789, 24803), False, 'import torch\n'), ((24819, 24857), 'torch.tensor', 'torch.tensor', (['[[2.0, 2.0], [3.0, 3.0]]'], {}), '([[2.0, 2.0], [3.0, 3.0]])\n', (24831, 24857), False, 'import torch\n'), ((25102, 25130), 'torch.equal', 'torch.equal', (['kt[key]', 'd[key]'], {}), '(kt[key], d[key])\n', (25113, 25130), False, 'import torch\n'), ((25623, 25675), 'torch.cat', 'torch.cat', (['[tensor_list[0], tensor_list[4]]', 'key_dim'], {}), '([tensor_list[0], tensor_list[4]], key_dim)\n', (25632, 25675), False, 'import torch\n'), ((25786, 25838), 'torch.cat', 'torch.cat', (['[tensor_list[1], tensor_list[3]]', 'key_dim'], {}), '([tensor_list[1], tensor_list[3]], key_dim)\n', (25795, 25838), False, 'import torch\n'), ((26621, 26695), 'torch.cat', 'torch.cat', (['[tensor_list_1[0], tensor_list_2[1], tensor_list_1[2]]', 'key_dim'], {}), '([tensor_list_1[0], tensor_list_2[1], tensor_list_1[2]], key_dim)\n', (26630, 26695), False, 'import torch\n'), ((26861, 26917), 'torch.cat', 'torch.cat', (['[tensor_list_1[1], tensor_list_2[0]]', 'key_dim'], {}), '([tensor_list_1[1], tensor_list_2[0]], key_dim)\n', (26870, 26917), False, 'import torch\n'), ((27190, 27225), 'torchrec.sparse.jagged_tensor.KeyedTensor.regroup', 'KeyedTensor.regroup', (['inputs', 'groups'], {}), '(inputs, groups)\n', (27209, 27225), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((27521, 27556), 'torchrec.sparse.jagged_tensor.KeyedTensor.regroup', 'KeyedTensor.regroup', (['inputs', 'groups'], {}), '(inputs, groups)\n', (27540, 27556), False, 'from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedTensor, KeyedJaggedTensor\n'), ((28417, 28451), 'torch.equal', 'torch.equal', (['result', 'traced_result'], {}), '(result, traced_result)\n', (28428, 28451), False, 'import torch\n'), ((29378, 29404), 'torch.tensor', 'torch.tensor', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (29390, 29404), False, 'import torch\n'), ((29420, 29458), 'torch.tensor', 'torch.tensor', (['[[2.0, 2.0], [3.0, 3.0]]'], {}), '([[2.0, 2.0], [3.0, 3.0]])\n', (29432, 29458), False, 'import torch\n'), ((7036, 7062), 'torch.tensor', 'torch.tensor', (['[0, 2, 2, 3]'], {}), '([0, 2, 2, 3])\n', (7048, 7062), False, 'import torch\n'), ((7083, 7112), 'torch.tensor', 'torch.tensor', (['[0.5, 1.0, 1.5]'], {}), '([0.5, 1.0, 1.5])\n', (7095, 7112), False, 'import torch\n'), ((7134, 7165), 'torch.tensor', 'torch.tensor', (['[5.0, 10.0, 15.0]'], {}), '([5.0, 10.0, 15.0])\n', (7146, 7165), False, 'import torch\n'), ((23046, 23081), 'torch.tensor', 'torch.tensor', (['[0, 2, 2, 3, 4, 5, 8]'], {}), '([0, 2, 2, 3, 4, 5, 8])\n', (23058, 23081), False, 'import torch\n'), ((23102, 23117), 'torch.arange', 'torch.arange', (['(8)'], {}), '(8)\n', (23114, 23117), False, 'import torch\n'), ((23139, 23159), 'torch.arange', 'torch.arange', (['(8 * 10)'], {}), '(8 * 10)\n', (23151, 23159), False, 'import torch\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List
import torch
from torch import nn
from torchrec import EmbeddingBagCollection, KeyedJaggedTensor
from torchrec.modules.deepfm import DeepFM, FactorizationMachine
from torchrec.sparse.jagged_tensor import KeyedTensor
class SparseArch(nn.Module):
"""
Processes the sparse features of the DeepFMNN model. Does embedding lookups for all
EmbeddingBag and embedding features of each collection.
Args:
embedding_bag_collection (EmbeddingBagCollection): represents a collection of
pooled embeddings.
Example::
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"]
)
eb2_config = EmbeddingBagConfig(
name="t2", embedding_dim=4, num_embeddings=10, feature_names=["f2"]
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
# 0 1 2 <-- batch
# 0 [0,1] None [2]
# 1 [3] [4] [5,6,7]
# ^
# feature
features = KeyedJaggedTensor.from_offsets_sync(
keys=["f1", "f2"],
values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]),
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
)
sparse_arch(features)
"""
def __init__(self, embedding_bag_collection: EmbeddingBagCollection) -> None:
super().__init__()
self.embedding_bag_collection: EmbeddingBagCollection = embedding_bag_collection
def forward(
self,
features: KeyedJaggedTensor,
) -> KeyedTensor:
"""
Args:
features (KeyedJaggedTensor):
Returns:
KeyedJaggedTensor: an output KJT of size F * D X B.
"""
return self.embedding_bag_collection(features)
class DenseArch(nn.Module):
"""
Processes the dense features of DeepFMNN model. Output layer is sized to
the embedding_dimension of the EmbeddingBagCollection embeddings.
Args:
in_features (int): dimensionality of the dense input features.
hidden_layer_size (int): sizes of the hidden layers in the DenseArch.
embedding_dim (int): the same size of the embedding_dimension of sparseArch.
device (torch.device): default compute device.
Example::
B = 20
D = 3
in_features = 10
dense_arch = DenseArch(in_features=10, hidden_layer_size=10, embedding_dim=D)
dense_embedded = dense_arch(torch.rand((B, 10)))
"""
def __init__(
self,
in_features: int,
hidden_layer_size: int,
embedding_dim: int,
) -> None:
super().__init__()
self.model: nn.Module = nn.Sequential(
nn.Linear(in_features, hidden_layer_size),
nn.ReLU(),
nn.Linear(hidden_layer_size, embedding_dim),
nn.ReLU(),
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
"""
Args:
features (torch.Tensor): size B X `num_features`.
Returns:
torch.Tensor: an output tensor of size B X D.
"""
return self.model(features)
class FMInteractionArch(nn.Module):
"""
Processes the output of both `SparseArch` (sparse_features) and `DenseArch`
(dense_features) and apply the general DeepFM interaction according to the
external source of DeepFM paper: https://arxiv.org/pdf/1703.04247.pdf
The output dimension is expected to be a cat of `dense_features`, D.
Args:
fm_in_features (int): the input dimension of `dense_module` in DeepFM. For
example, if the input embeddings is [randn(3, 2, 3), randn(3, 4, 5)], then
the `fm_in_features` should be: 2 * 3 + 4 * 5.
sparse_feature_names (List[str]): length of F.
deep_fm_dimension (int): output of the deep interaction (DI) in the DeepFM arch.
Example::
D = 3
B = 10
keys = ["f1", "f2"]
F = len(keys)
fm_inter_arch = FMInteractionArch(sparse_feature_names=keys)
dense_features = torch.rand((B, D))
sparse_features = KeyedTensor(
keys=keys,
length_per_key=[D, D],
values=torch.rand((B, D * F)),
)
cat_fm_output = fm_inter_arch(dense_features, sparse_features)
"""
def __init__(
self,
fm_in_features: int,
sparse_feature_names: List[str],
deep_fm_dimension: int,
) -> None:
super().__init__()
self.sparse_feature_names: List[str] = sparse_feature_names
self.deep_fm = DeepFM(
dense_module=nn.Sequential(
nn.Linear(fm_in_features, deep_fm_dimension),
nn.ReLU(),
)
)
self.fm = FactorizationMachine()
def forward(
self, dense_features: torch.Tensor, sparse_features: KeyedTensor
) -> torch.Tensor:
"""
Args:
dense_features (torch.Tensor): tensor of size B X D.
sparse_features (KeyedJaggedTensor): KJT of size F * D X B.
Returns:
torch.Tensor: an output tensor of size B X (D + DI + 1).
"""
if len(self.sparse_feature_names) == 0:
return dense_features
tensor_list: List[torch.Tensor] = [dense_features]
# dense/sparse interaction
# size B X F
for feature_name in self.sparse_feature_names:
tensor_list.append(sparse_features[feature_name])
deep_interaction = self.deep_fm(tensor_list)
fm_interaction = self.fm(tensor_list)
return torch.cat([dense_features, deep_interaction, fm_interaction], dim=1)
class OverArch(nn.Module):
"""
Final Arch - simple MLP. The output is just one target.
Args:
in_features (int): the output dimension of the interaction arch.
Example::
B = 20
over_arch = OverArch()
logits = over_arch(torch.rand((B, 10)))
"""
def __init__(self, in_features: int) -> None:
super().__init__()
self.model: nn.Module = nn.Sequential(
nn.Linear(in_features, 1),
nn.Sigmoid(),
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
"""
Args:
features (torch.Tensor):
Returns:
torch.Tensor: an output tensor of size B X 1.
"""
return self.model(features)
class SimpleDeepFMNN(nn.Module):
"""
Basic recsys module with DeepFM arch. Processes sparse features by
learning pooled embeddings for each feature. Learns the relationship between
dense features and sparse features by projecting dense features into the same
embedding space. Learns the interaction among those dense and sparse features
by deep_fm proposed in this paper: https://arxiv.org/pdf/1703.04247.pdf
The module assumes all sparse features have the same embedding dimension
(i.e, each `EmbeddingBagConfig` uses the same embedding_dim)
The following notation is used throughout the documentation for the models:
* F: number of sparse features
* D: embedding_dimension of sparse features
* B: batch size
* num_features: number of dense features
Args:
num_dense_features (int): the number of input dense features.
embedding_bag_collection (EmbeddingBagCollection): collection of embedding bags
used to define `SparseArch`.
hidden_layer_size (int): the hidden layer size used in dense module.
deep_fm_dimension (int): the output layer size used in `deep_fm`'s deep
interaction module.
Example::
B = 2
D = 8
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=100, feature_names=["f1", "f3"]
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_nn = SimpleDeepFMNN(
embedding_bag_collection=ebc, hidden_layer_size=20, over_embedding_dim=5
)
features = torch.rand((B, 100))
# 0 1
# 0 [1,2] [4,5]
# 1 [4,3] [2,9]
# ^
# feature
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f1", "f3"],
values=torch.tensor([1, 2, 4, 5, 4, 3, 2, 9]),
offsets=torch.tensor([0, 2, 4, 6, 8]),
)
logits = sparse_nn(
dense_features=features,
sparse_features=sparse_features,
)
"""
def __init__(
self,
num_dense_features: int,
embedding_bag_collection: EmbeddingBagCollection,
hidden_layer_size: int,
deep_fm_dimension: int,
) -> None:
super().__init__()
assert (
len(embedding_bag_collection.embedding_bag_configs) > 0
), "At least one embedding bag is required"
for i in range(1, len(embedding_bag_collection.embedding_bag_configs)):
conf_prev = embedding_bag_collection.embedding_bag_configs[i - 1]
conf = embedding_bag_collection.embedding_bag_configs[i]
assert (
conf_prev.embedding_dim == conf.embedding_dim
), "All EmbeddingBagConfigs must have the same dimension"
embedding_dim: int = embedding_bag_collection.embedding_bag_configs[
0
].embedding_dim
feature_names = []
fm_in_features = embedding_dim
for conf in embedding_bag_collection.embedding_bag_configs:
for feat in conf.feature_names:
feature_names.append(feat)
fm_in_features += conf.embedding_dim
self.sparse_arch = SparseArch(embedding_bag_collection)
self.dense_arch = DenseArch(
in_features=num_dense_features,
hidden_layer_size=hidden_layer_size,
embedding_dim=embedding_dim,
)
self.inter_arch = FMInteractionArch(
fm_in_features=fm_in_features,
sparse_feature_names=feature_names,
deep_fm_dimension=deep_fm_dimension,
)
over_in_features = embedding_dim + deep_fm_dimension + 1
self.over_arch = OverArch(over_in_features)
def forward(
self,
dense_features: torch.Tensor,
sparse_features: KeyedJaggedTensor,
) -> torch.Tensor:
"""
Args:
dense_features (torch.Tensor): the dense features.
sparse_features (KeyedJaggedTensor): the sparse features.
Returns:
torch.Tensor: logits with size B X 1.
"""
embedded_dense = self.dense_arch(dense_features)
embedded_sparse = self.sparse_arch(sparse_features)
concatenated_dense = self.inter_arch(
dense_features=embedded_dense, sparse_features=embedded_sparse
)
logits = self.over_arch(concatenated_dense)
return logits
| [
"torchrec.modules.deepfm.FactorizationMachine"
] | [((5018, 5040), 'torchrec.modules.deepfm.FactorizationMachine', 'FactorizationMachine', ([], {}), '()\n', (5038, 5040), False, 'from torchrec.modules.deepfm import DeepFM, FactorizationMachine\n'), ((5848, 5916), 'torch.cat', 'torch.cat', (['[dense_features, deep_interaction, fm_interaction]'], {'dim': '(1)'}), '([dense_features, deep_interaction, fm_interaction], dim=1)\n', (5857, 5916), False, 'import torch\n'), ((2965, 3006), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'hidden_layer_size'], {}), '(in_features, hidden_layer_size)\n', (2974, 3006), False, 'from torch import nn\n'), ((3020, 3029), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3027, 3029), False, 'from torch import nn\n'), ((3043, 3086), 'torch.nn.Linear', 'nn.Linear', (['hidden_layer_size', 'embedding_dim'], {}), '(hidden_layer_size, embedding_dim)\n', (3052, 3086), False, 'from torch import nn\n'), ((3100, 3109), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3107, 3109), False, 'from torch import nn\n'), ((6353, 6378), 'torch.nn.Linear', 'nn.Linear', (['in_features', '(1)'], {}), '(in_features, 1)\n', (6362, 6378), False, 'from torch import nn\n'), ((6392, 6404), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (6402, 6404), False, 'from torch import nn\n'), ((4903, 4947), 'torch.nn.Linear', 'nn.Linear', (['fm_in_features', 'deep_fm_dimension'], {}), '(fm_in_features, deep_fm_dimension)\n', (4912, 4947), False, 'from torch import nn\n'), ((4965, 4974), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4972, 4974), False, 'from torch import nn\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import cast, List
import torch
from torchrec.distributed.embedding_tower_sharding import (
EmbeddingTowerCollectionSharder,
EmbeddingTowerSharder,
)
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder
from torchrec.distributed.planner.constants import BIGINT_DTYPE
from torchrec.distributed.planner.enumerators import EmbeddingEnumerator
from torchrec.distributed.planner.shard_estimators import (
_calculate_dp_shard_io_sizes,
_calculate_tw_shard_io_sizes,
)
from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology
from torchrec.distributed.planner.utils import prod
from torchrec.distributed.test_utils.test_model import (
TestSparseNN,
TestTowerCollectionSparseNN,
TestTowerSparseNN,
)
from torchrec.distributed.types import ModuleSharder, ShardingType
from torchrec.modules.embedding_configs import EmbeddingBagConfig
EXPECTED_RW_SHARD_SIZES = [
[[13, 10], [13, 10], [13, 10], [13, 10], [13, 10], [13, 10], [13, 10], [9, 10]],
[[14, 20], [14, 20], [14, 20], [14, 20], [14, 20], [14, 20], [14, 20], [12, 20]],
[[15, 30], [15, 30], [15, 30], [15, 30], [15, 30], [15, 30], [15, 30], [15, 30]],
[[17, 40], [17, 40], [17, 40], [17, 40], [17, 40], [17, 40], [17, 40], [11, 40]],
]
EXPECTED_RW_SHARD_OFFSETS = [
[[0, 0], [13, 0], [26, 0], [39, 0], [52, 0], [65, 0], [78, 0], [91, 0]],
[[0, 0], [14, 0], [28, 0], [42, 0], [56, 0], [70, 0], [84, 0], [98, 0]],
[[0, 0], [15, 0], [30, 0], [45, 0], [60, 0], [75, 0], [90, 0], [105, 0]],
[[0, 0], [17, 0], [34, 0], [51, 0], [68, 0], [85, 0], [102, 0], [119, 0]],
]
EXPECTED_RW_SHARD_STORAGE = [
[
Storage(hbm=84488, ddr=0),
Storage(hbm=84488, ddr=0),
Storage(hbm=84488, ddr=0),
Storage(hbm=84488, ddr=0),
Storage(hbm=84488, ddr=0),
Storage(hbm=84488, ddr=0),
Storage(hbm=84488, ddr=0),
Storage(hbm=84328, ddr=0),
],
[
Storage(hbm=511072, ddr=0),
Storage(hbm=511072, ddr=0),
Storage(hbm=511072, ddr=0),
Storage(hbm=511072, ddr=0),
Storage(hbm=511072, ddr=0),
Storage(hbm=511072, ddr=0),
Storage(hbm=511072, ddr=0),
Storage(hbm=510912, ddr=0),
],
[
Storage(hbm=513800, ddr=0),
Storage(hbm=513800, ddr=0),
Storage(hbm=513800, ddr=0),
Storage(hbm=513800, ddr=0),
Storage(hbm=513800, ddr=0),
Storage(hbm=513800, ddr=0),
Storage(hbm=513800, ddr=0),
Storage(hbm=513800, ddr=0),
],
[
Storage(hbm=1340064, ddr=0),
Storage(hbm=1340064, ddr=0),
Storage(hbm=1340064, ddr=0),
Storage(hbm=1340064, ddr=0),
Storage(hbm=1340064, ddr=0),
Storage(hbm=1340064, ddr=0),
Storage(hbm=1340064, ddr=0),
Storage(hbm=1339104, ddr=0),
],
]
EXPECTED_UVM_CACHING_RW_SHARD_STORAGE = [
[
Storage(hbm=84072, ddr=520),
Storage(hbm=84072, ddr=520),
Storage(hbm=84072, ddr=520),
Storage(hbm=84072, ddr=520),
Storage(hbm=84072, ddr=520),
Storage(hbm=84072, ddr=520),
Storage(hbm=84072, ddr=520),
Storage(hbm=84040, ddr=360),
],
[
Storage(hbm=510176, ddr=1120),
Storage(hbm=510176, ddr=1120),
Storage(hbm=510176, ddr=1120),
Storage(hbm=510176, ddr=1120),
Storage(hbm=510176, ddr=1120),
Storage(hbm=510176, ddr=1120),
Storage(hbm=510176, ddr=1120),
Storage(hbm=510144, ddr=960),
],
[
Storage(hbm=512648, ddr=1800),
Storage(hbm=512648, ddr=1800),
Storage(hbm=512648, ddr=1800),
Storage(hbm=512648, ddr=1800),
Storage(hbm=512648, ddr=1800),
Storage(hbm=512648, ddr=1800),
Storage(hbm=512648, ddr=1800),
Storage(hbm=512648, ddr=1800),
],
[
Storage(hbm=1339656, ddr=2720),
Storage(hbm=1339656, ddr=2720),
Storage(hbm=1339656, ddr=2720),
Storage(hbm=1339656, ddr=2720),
Storage(hbm=1339656, ddr=2720),
Storage(hbm=1339656, ddr=2720),
Storage(hbm=1339656, ddr=2720),
Storage(hbm=1338840, ddr=1760),
],
]
EXPECTED_TWRW_SHARD_SIZES = [
[[25, 10], [25, 10], [25, 10], [25, 10]],
[[28, 20], [28, 20], [28, 20], [26, 20]],
[[30, 30], [30, 30], [30, 30], [30, 30]],
[[33, 40], [33, 40], [33, 40], [31, 40]],
]
EXPECTED_TWRW_SHARD_OFFSETS = [
[[0, 0], [25, 0], [50, 0], [75, 0]],
[[0, 0], [28, 0], [56, 0], [84, 0]],
[[0, 0], [30, 0], [60, 0], [90, 0]],
[[0, 0], [33, 0], [66, 0], [99, 0]],
]
EXPECTED_TWRW_SHARD_STORAGE = [
[
Storage(hbm=87016, ddr=0),
Storage(hbm=87016, ddr=0),
Storage(hbm=87016, ddr=0),
Storage(hbm=87016, ddr=0),
],
[
Storage(hbm=530624, ddr=0),
Storage(hbm=530624, ddr=0),
Storage(hbm=530624, ddr=0),
Storage(hbm=530464, ddr=0),
],
[
Storage(hbm=536080, ddr=0),
Storage(hbm=536080, ddr=0),
Storage(hbm=536080, ddr=0),
Storage(hbm=536080, ddr=0),
],
[
Storage(hbm=1369248, ddr=0),
Storage(hbm=1369248, ddr=0),
Storage(hbm=1369248, ddr=0),
Storage(hbm=1368928, ddr=0),
],
]
EXPECTED_CW_SHARD_SIZES = [
[[100, 10]],
[[110, 8], [110, 12]],
[[120, 9], [120, 9], [120, 12]],
[[130, 12], [130, 12], [130, 16]],
]
EXPECTED_CW_SHARD_OFFSETS = [
[[0, 0]],
[[0, 0], [0, 8]],
[[0, 0], [0, 9], [0, 18]],
[[0, 0], [0, 12], [0, 24]],
]
EXPECTED_CW_SHARD_STORAGE = [
[Storage(hbm=102304, ddr=0)],
[Storage(hbm=347584, ddr=0), Storage(hbm=447648, ddr=0)],
[
Storage(hbm=315616, ddr=0),
Storage(hbm=315616, ddr=0),
Storage(hbm=366208, ddr=0),
],
[
Storage(hbm=612448, ddr=0),
Storage(hbm=612448, ddr=0),
Storage(hbm=745600, ddr=0),
],
]
EXPECTED_TWCW_SHARD_SIZES: List[List[List[int]]] = EXPECTED_CW_SHARD_SIZES
EXPECTED_TWCW_SHARD_OFFSETS: List[List[List[int]]] = EXPECTED_CW_SHARD_OFFSETS
EXPECTED_TWCW_SHARD_STORAGE = [
[Storage(hbm=102304, ddr=0)],
[Storage(hbm=347584, ddr=0), Storage(hbm=447648, ddr=0)],
[
Storage(hbm=315616, ddr=0),
Storage(hbm=315616, ddr=0),
Storage(hbm=366208, ddr=0),
],
[
Storage(hbm=612448, ddr=0),
Storage(hbm=612448, ddr=0),
Storage(hbm=745600, ddr=0),
],
]
class TWSharder(EmbeddingBagCollectionSharder):
def sharding_types(self, compute_device_type: str) -> List[str]:
return [ShardingType.TABLE_WISE.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
return [EmbeddingComputeKernel.BATCHED_FUSED.value]
class RWSharder(EmbeddingBagCollectionSharder):
def sharding_types(self, compute_device_type: str) -> List[str]:
return [ShardingType.ROW_WISE.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
return [EmbeddingComputeKernel.BATCHED_FUSED.value]
class UVMCachingRWSharder(EmbeddingBagCollectionSharder):
def sharding_types(self, compute_device_type: str) -> List[str]:
return [ShardingType.ROW_WISE.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
return [EmbeddingComputeKernel.BATCHED_FUSED_UVM_CACHING.value]
class TWRWSharder(EmbeddingBagCollectionSharder):
def sharding_types(self, compute_device_type: str) -> List[str]:
return [ShardingType.TABLE_ROW_WISE.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
return [EmbeddingComputeKernel.BATCHED_FUSED.value]
class CWSharder(EmbeddingBagCollectionSharder):
def sharding_types(self, compute_device_type: str) -> List[str]:
return [ShardingType.COLUMN_WISE.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
return [EmbeddingComputeKernel.BATCHED_FUSED.value]
class TWCWSharder(EmbeddingBagCollectionSharder):
def sharding_types(self, compute_device_type: str) -> List[str]:
return [ShardingType.TABLE_COLUMN_WISE.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
return [EmbeddingComputeKernel.BATCHED_FUSED.value]
class DPSharder(EmbeddingBagCollectionSharder):
def sharding_types(self, compute_device_type: str) -> List[str]:
return [ShardingType.DATA_PARALLEL.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
return [EmbeddingComputeKernel.BATCHED_DENSE.value]
class AllTypesSharder(EmbeddingBagCollectionSharder):
def sharding_types(self, compute_device_type: str) -> List[str]:
return [
ShardingType.DATA_PARALLEL.value,
ShardingType.TABLE_WISE.value,
ShardingType.ROW_WISE.value,
ShardingType.TABLE_ROW_WISE.value,
ShardingType.COLUMN_WISE.value,
ShardingType.TABLE_COLUMN_WISE.value,
]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
return [
EmbeddingComputeKernel.BATCHED_DENSE.value,
EmbeddingComputeKernel.BATCHED_FUSED.value,
EmbeddingComputeKernel.BATCHED_FUSED_UVM.value,
EmbeddingComputeKernel.BATCHED_FUSED_UVM_CACHING.value,
EmbeddingComputeKernel.BATCHED_QUANT.value,
]
class TowerTWRWSharder(EmbeddingTowerSharder):
def sharding_types(self, compute_device_type: str) -> List[str]:
return [ShardingType.TABLE_ROW_WISE.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
return [EmbeddingComputeKernel.BATCHED_DENSE.value]
class TowerCollectionTWRWSharder(EmbeddingTowerCollectionSharder):
def sharding_types(self, compute_device_type: str) -> List[str]:
return [ShardingType.TABLE_ROW_WISE.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
return [EmbeddingComputeKernel.BATCHED_DENSE.value]
class TestEnumerators(unittest.TestCase):
def setUp(self) -> None:
self.compute_device = "cuda"
self.batch_size = 256
self.world_size = 8
self.local_world_size = 4
self.constraints = {
"table_0": ParameterConstraints(min_partition=20),
"table_1": ParameterConstraints(min_partition=8, pooling_factors=[1, 3, 5]),
"table_2": ParameterConstraints(
min_partition=9, caching_ratio=0.36, pooling_factors=[8, 2]
),
"table_3": ParameterConstraints(
min_partition=12, caching_ratio=0.85, pooling_factors=[2, 1, 3, 7]
),
}
self.num_tables = 4
tables = [
EmbeddingBagConfig(
num_embeddings=100 + i * 10,
embedding_dim=10 + i * 10,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
)
for i in range(self.num_tables)
]
weighted_tables = [
EmbeddingBagConfig(
num_embeddings=(i + 1) * 10,
embedding_dim=(i + 2) * 4,
name="weighted_table_" + str(i),
feature_names=["weighted_feature_" + str(i)],
)
for i in range(4)
]
self.model = TestSparseNN(tables=tables, weighted_tables=[])
self.enumerator = EmbeddingEnumerator(
topology=Topology(
world_size=self.world_size,
compute_device=self.compute_device,
local_world_size=self.local_world_size,
batch_size=self.batch_size,
),
constraints=self.constraints,
)
self.tower_model = TestTowerSparseNN(
tables=tables, weighted_tables=weighted_tables
)
self.tower_collection_model = TestTowerCollectionSparseNN(
tables=tables, weighted_tables=weighted_tables
)
def test_dp_sharding(self) -> None:
sharding_options = self.enumerator.enumerate(
self.model, [cast(ModuleSharder[torch.nn.Module], DPSharder())]
)
for sharding_option in sharding_options:
self.assertEqual(
sharding_option.sharding_type, ShardingType.DATA_PARALLEL.value
)
self.assertEqual(
[shard.size for shard in sharding_option.shards],
[list(sharding_option.tensor.shape)] * self.world_size,
)
self.assertEqual(
[shard.offset for shard in sharding_option.shards],
[[0, 0]] * self.world_size,
)
input_data_type_size = BIGINT_DTYPE
output_data_type_size = sharding_option.tensor.element_size()
input_sizes, output_sizes = _calculate_dp_shard_io_sizes(
batch_sizes=[self.batch_size] * sharding_option.num_inputs,
input_lengths=self.constraints[sharding_option.name].pooling_factors,
emb_dim=sharding_option.tensor.shape[1],
num_shards=self.world_size,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
is_pooled=sharding_option.is_pooled,
num_objects=[1.0] * sharding_option.num_inputs,
)
tensor_sizes = [
prod(sharding_option.tensor.shape)
* sharding_option.tensor.element_size()
] * self.world_size
optimizer_sizes = [tensor_size * 2 for tensor_size in tensor_sizes]
storage_sizes = [
input_size + tensor_size + output_size + optimizer_size
for input_size, tensor_size, output_size, optimizer_size in zip(
input_sizes,
tensor_sizes,
output_sizes,
optimizer_sizes,
)
]
expected_storage = [
Storage(hbm=storage_size, ddr=0) for storage_size in storage_sizes
]
self.assertEqual(
[shard.storage for shard in sharding_option.shards], expected_storage
)
def test_tw_sharding(self) -> None:
sharding_options = self.enumerator.enumerate(
self.model, [cast(ModuleSharder[torch.nn.Module], TWSharder())]
)
for sharding_option in sharding_options:
self.assertEqual(
sharding_option.sharding_type, ShardingType.TABLE_WISE.value
)
self.assertEqual(
sharding_option.shards[0].size, list(sharding_option.tensor.shape)
)
self.assertEqual(sharding_option.shards[0].offset, [0, 0])
input_data_type_size = BIGINT_DTYPE
output_data_type_size = sharding_option.tensor.element_size()
input_sizes, output_sizes = _calculate_tw_shard_io_sizes(
batch_sizes=[self.batch_size] * sharding_option.num_inputs,
world_size=self.world_size,
input_lengths=self.constraints[sharding_option.name].pooling_factors,
emb_dim=sharding_option.tensor.shape[1],
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
is_pooled=sharding_option.is_pooled,
num_objects=[1.0] * sharding_option.num_inputs,
)
tensor_size = (
prod(sharding_option.tensor.shape)
* sharding_option.tensor.element_size()
)
optimizer_size = 0
storage_size = (
input_sizes[0] + output_sizes[0] + tensor_size + optimizer_size
)
self.assertEqual(
sharding_option.shards[0].storage, Storage(hbm=storage_size, ddr=0)
)
def test_rw_sharding(self) -> None:
sharding_options = self.enumerator.enumerate(
self.model, [cast(ModuleSharder[torch.nn.Module], RWSharder())]
)
for i, sharding_option in enumerate(sharding_options):
self.assertEqual(sharding_option.sharding_type, ShardingType.ROW_WISE.value)
self.assertEqual(
[shard.size for shard in sharding_option.shards],
EXPECTED_RW_SHARD_SIZES[i],
)
self.assertEqual(
[shard.offset for shard in sharding_option.shards],
EXPECTED_RW_SHARD_OFFSETS[i],
)
self.assertEqual(
[shard.storage for shard in sharding_option.shards],
EXPECTED_RW_SHARD_STORAGE[i],
)
def test_uvm_caching_rw_sharding(self) -> None:
sharding_options = self.enumerator.enumerate(
self.model,
[cast(ModuleSharder[torch.nn.Module], UVMCachingRWSharder())],
)
for i, sharding_option in enumerate(sharding_options):
self.assertEqual(sharding_option.sharding_type, ShardingType.ROW_WISE.value)
self.assertEqual(
[shard.size for shard in sharding_option.shards],
EXPECTED_RW_SHARD_SIZES[i],
)
self.assertEqual(
[shard.offset for shard in sharding_option.shards],
EXPECTED_RW_SHARD_OFFSETS[i],
)
self.assertEqual(
[shard.storage for shard in sharding_option.shards],
EXPECTED_UVM_CACHING_RW_SHARD_STORAGE[i],
)
def test_twrw_sharding(self) -> None:
sharding_options = self.enumerator.enumerate(
self.model, [cast(ModuleSharder[torch.nn.Module], TWRWSharder())]
)
for i, sharding_option in enumerate(sharding_options):
self.assertEqual(
sharding_option.sharding_type, ShardingType.TABLE_ROW_WISE.value
)
self.assertEqual(
[shard.size for shard in sharding_option.shards],
EXPECTED_TWRW_SHARD_SIZES[i],
)
self.assertEqual(
[shard.offset for shard in sharding_option.shards],
EXPECTED_TWRW_SHARD_OFFSETS[i],
)
self.assertEqual(
[shard.storage for shard in sharding_option.shards],
EXPECTED_TWRW_SHARD_STORAGE[i],
)
def test_cw_sharding(self) -> None:
sharding_options = self.enumerator.enumerate(
self.model, [cast(ModuleSharder[torch.nn.Module], CWSharder())]
)
for i, sharding_option in enumerate(sharding_options):
self.assertEqual(
sharding_option.sharding_type, ShardingType.COLUMN_WISE.value
)
self.assertEqual(
[shard.size for shard in sharding_option.shards],
EXPECTED_CW_SHARD_SIZES[i],
)
self.assertEqual(
[shard.offset for shard in sharding_option.shards],
EXPECTED_CW_SHARD_OFFSETS[i],
)
self.assertEqual(
[shard.storage for shard in sharding_option.shards],
EXPECTED_CW_SHARD_STORAGE[i],
)
def test_twcw_sharding(self) -> None:
sharding_options = self.enumerator.enumerate(
self.model, [cast(ModuleSharder[torch.nn.Module], TWCWSharder())]
)
for i, sharding_option in enumerate(sharding_options):
self.assertEqual(
sharding_option.sharding_type, ShardingType.TABLE_COLUMN_WISE.value
)
self.assertEqual(
[shard.size for shard in sharding_option.shards],
EXPECTED_TWCW_SHARD_SIZES[i],
)
self.assertEqual(
[shard.offset for shard in sharding_option.shards],
EXPECTED_TWCW_SHARD_OFFSETS[i],
)
self.assertEqual(
[shard.storage for shard in sharding_option.shards],
EXPECTED_TWCW_SHARD_STORAGE[i],
)
def test_filtering(self) -> None:
constraint = ParameterConstraints(
sharding_types=[
ShardingType.TABLE_ROW_WISE.value,
ShardingType.COLUMN_WISE.value,
],
compute_kernels=[
EmbeddingComputeKernel.BATCHED_FUSED_UVM.value,
EmbeddingComputeKernel.BATCHED_DENSE.value,
],
)
constraints = {
"table_0": constraint,
"table_1": constraint,
"table_2": constraint,
"table_3": constraint,
}
enumerator = EmbeddingEnumerator(
topology=Topology(
world_size=self.world_size,
compute_device=self.compute_device,
local_world_size=self.local_world_size,
batch_size=self.batch_size,
),
constraints=constraints,
)
sharder = cast(ModuleSharder[torch.nn.Module], AllTypesSharder())
sharding_options = enumerator.enumerate(self.model, [sharder])
expected_sharding_types = {
ShardingType.TABLE_ROW_WISE.value,
ShardingType.COLUMN_WISE.value,
}
expected_compute_kernels = {
EmbeddingComputeKernel.BATCHED_FUSED_UVM.value,
EmbeddingComputeKernel.BATCHED_DENSE.value,
}
unexpected_sharding_types = (
set(sharder.sharding_types(self.compute_device)) - expected_sharding_types
)
unexpected_compute_kernels = (
set(sharder.compute_kernels("", "")) - expected_compute_kernels
)
self.assertEqual(
len(sharding_options),
self.num_tables
* len(expected_sharding_types)
* len(expected_compute_kernels),
)
for sharding_option in sharding_options:
self.assertIn(sharding_option.sharding_type, expected_sharding_types)
self.assertNotIn(sharding_option.sharding_type, unexpected_sharding_types)
self.assertIn(sharding_option.compute_kernel, expected_compute_kernels)
self.assertNotIn(sharding_option.compute_kernel, unexpected_compute_kernels)
def test_tower_sharding(self) -> None:
# five tables
# tower_0: tables[2], tables[3]
# tower_1: tables[0]
# sparse_arch:
# ebc:
# tables[1]
# weighted_tables[0]
sharding_options = self.enumerator.enumerate(
self.tower_model,
[
cast(ModuleSharder[torch.nn.Module], TWRWSharder()),
cast(ModuleSharder[torch.nn.Module], TowerTWRWSharder()),
],
)
self.assertEqual(len(sharding_options), 5)
self.assertEqual(sharding_options[0].dependency, None)
self.assertEqual(sharding_options[0].module[0], "sparse_arch.weighted_ebc")
self.assertEqual(sharding_options[1].dependency, None)
self.assertEqual(sharding_options[1].module[0], "sparse_arch.ebc")
self.assertEqual(sharding_options[2].dependency, "tower_1")
self.assertEqual(sharding_options[2].module[0], "tower_1")
self.assertEqual(sharding_options[3].dependency, "tower_0")
self.assertEqual(sharding_options[3].module[0], "tower_0")
self.assertEqual(sharding_options[4].dependency, "tower_0")
self.assertEqual(sharding_options[4].module[0], "tower_0")
def test_tower_collection_sharding(self) -> None:
sharding_options = self.enumerator.enumerate(
self.tower_collection_model,
[
cast(ModuleSharder[torch.nn.Module], TowerCollectionTWRWSharder()),
cast(ModuleSharder[torch.nn.Module], TowerTWRWSharder()),
],
)
self.assertEqual(len(sharding_options), 4)
# table_0
self.assertEqual(sharding_options[0].dependency, "tower_arch.tower_0")
self.assertEqual(sharding_options[0].module[0], "tower_arch")
# table_2
self.assertEqual(sharding_options[1].dependency, "tower_arch.tower_0")
self.assertEqual(sharding_options[1].module[0], "tower_arch")
# table_1
self.assertEqual(sharding_options[2].dependency, "tower_arch.tower_1")
self.assertEqual(sharding_options[2].module[0], "tower_arch")
# weighted_table_0
self.assertEqual(sharding_options[3].dependency, "tower_arch.tower_2")
self.assertEqual(sharding_options[3].module[0], "tower_arch")
| [
"torchrec.distributed.planner.shard_estimators._calculate_tw_shard_io_sizes",
"torchrec.distributed.planner.types.ParameterConstraints",
"torchrec.distributed.test_utils.test_model.TestTowerCollectionSparseNN",
"torchrec.distributed.planner.types.Storage",
"torchrec.distributed.test_utils.test_model.TestSpa... | [((2002, 2027), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84488)', 'ddr': '(0)'}), '(hbm=84488, ddr=0)\n', (2009, 2027), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2037, 2062), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84488)', 'ddr': '(0)'}), '(hbm=84488, ddr=0)\n', (2044, 2062), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2072, 2097), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84488)', 'ddr': '(0)'}), '(hbm=84488, ddr=0)\n', (2079, 2097), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2107, 2132), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84488)', 'ddr': '(0)'}), '(hbm=84488, ddr=0)\n', (2114, 2132), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2142, 2167), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84488)', 'ddr': '(0)'}), '(hbm=84488, ddr=0)\n', (2149, 2167), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2177, 2202), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84488)', 'ddr': '(0)'}), '(hbm=84488, ddr=0)\n', (2184, 2202), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2212, 2237), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84488)', 'ddr': '(0)'}), '(hbm=84488, ddr=0)\n', (2219, 2237), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2247, 2272), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84328)', 'ddr': '(0)'}), '(hbm=84328, ddr=0)\n', (2254, 2272), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2295, 2321), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(511072)', 'ddr': '(0)'}), '(hbm=511072, ddr=0)\n', (2302, 2321), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2331, 2357), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(511072)', 'ddr': '(0)'}), '(hbm=511072, ddr=0)\n', (2338, 2357), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2367, 2393), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(511072)', 'ddr': '(0)'}), '(hbm=511072, ddr=0)\n', (2374, 2393), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2403, 2429), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(511072)', 'ddr': '(0)'}), '(hbm=511072, ddr=0)\n', (2410, 2429), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2439, 2465), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(511072)', 'ddr': '(0)'}), '(hbm=511072, ddr=0)\n', (2446, 2465), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2475, 2501), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(511072)', 'ddr': '(0)'}), '(hbm=511072, ddr=0)\n', (2482, 2501), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2511, 2537), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(511072)', 'ddr': '(0)'}), '(hbm=511072, ddr=0)\n', (2518, 2537), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2547, 2573), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(510912)', 'ddr': '(0)'}), '(hbm=510912, ddr=0)\n', (2554, 2573), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2596, 2622), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(513800)', 'ddr': '(0)'}), '(hbm=513800, ddr=0)\n', (2603, 2622), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2632, 2658), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(513800)', 'ddr': '(0)'}), '(hbm=513800, ddr=0)\n', (2639, 2658), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2668, 2694), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(513800)', 'ddr': '(0)'}), '(hbm=513800, ddr=0)\n', (2675, 2694), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2704, 2730), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(513800)', 'ddr': '(0)'}), '(hbm=513800, ddr=0)\n', (2711, 2730), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2740, 2766), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(513800)', 'ddr': '(0)'}), '(hbm=513800, ddr=0)\n', (2747, 2766), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2776, 2802), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(513800)', 'ddr': '(0)'}), '(hbm=513800, ddr=0)\n', (2783, 2802), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2812, 2838), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(513800)', 'ddr': '(0)'}), '(hbm=513800, ddr=0)\n', (2819, 2838), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2848, 2874), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(513800)', 'ddr': '(0)'}), '(hbm=513800, ddr=0)\n', (2855, 2874), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2897, 2924), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1340064)', 'ddr': '(0)'}), '(hbm=1340064, ddr=0)\n', (2904, 2924), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2934, 2961), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1340064)', 'ddr': '(0)'}), '(hbm=1340064, ddr=0)\n', (2941, 2961), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((2971, 2998), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1340064)', 'ddr': '(0)'}), '(hbm=1340064, ddr=0)\n', (2978, 2998), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3008, 3035), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1340064)', 'ddr': '(0)'}), '(hbm=1340064, ddr=0)\n', (3015, 3035), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3045, 3072), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1340064)', 'ddr': '(0)'}), '(hbm=1340064, ddr=0)\n', (3052, 3072), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3082, 3109), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1340064)', 'ddr': '(0)'}), '(hbm=1340064, ddr=0)\n', (3089, 3109), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3119, 3146), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1340064)', 'ddr': '(0)'}), '(hbm=1340064, ddr=0)\n', (3126, 3146), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3156, 3183), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1339104)', 'ddr': '(0)'}), '(hbm=1339104, ddr=0)\n', (3163, 3183), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3252, 3279), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84072)', 'ddr': '(520)'}), '(hbm=84072, ddr=520)\n', (3259, 3279), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3289, 3316), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84072)', 'ddr': '(520)'}), '(hbm=84072, ddr=520)\n', (3296, 3316), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3326, 3353), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84072)', 'ddr': '(520)'}), '(hbm=84072, ddr=520)\n', (3333, 3353), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3363, 3390), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84072)', 'ddr': '(520)'}), '(hbm=84072, ddr=520)\n', (3370, 3390), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3400, 3427), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84072)', 'ddr': '(520)'}), '(hbm=84072, ddr=520)\n', (3407, 3427), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3437, 3464), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84072)', 'ddr': '(520)'}), '(hbm=84072, ddr=520)\n', (3444, 3464), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3474, 3501), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84072)', 'ddr': '(520)'}), '(hbm=84072, ddr=520)\n', (3481, 3501), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3511, 3538), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(84040)', 'ddr': '(360)'}), '(hbm=84040, ddr=360)\n', (3518, 3538), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3561, 3590), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(510176)', 'ddr': '(1120)'}), '(hbm=510176, ddr=1120)\n', (3568, 3590), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3600, 3629), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(510176)', 'ddr': '(1120)'}), '(hbm=510176, ddr=1120)\n', (3607, 3629), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3639, 3668), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(510176)', 'ddr': '(1120)'}), '(hbm=510176, ddr=1120)\n', (3646, 3668), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3678, 3707), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(510176)', 'ddr': '(1120)'}), '(hbm=510176, ddr=1120)\n', (3685, 3707), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3717, 3746), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(510176)', 'ddr': '(1120)'}), '(hbm=510176, ddr=1120)\n', (3724, 3746), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3756, 3785), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(510176)', 'ddr': '(1120)'}), '(hbm=510176, ddr=1120)\n', (3763, 3785), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3795, 3824), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(510176)', 'ddr': '(1120)'}), '(hbm=510176, ddr=1120)\n', (3802, 3824), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3834, 3862), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(510144)', 'ddr': '(960)'}), '(hbm=510144, ddr=960)\n', (3841, 3862), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3885, 3914), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(512648)', 'ddr': '(1800)'}), '(hbm=512648, ddr=1800)\n', (3892, 3914), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3924, 3953), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(512648)', 'ddr': '(1800)'}), '(hbm=512648, ddr=1800)\n', (3931, 3953), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((3963, 3992), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(512648)', 'ddr': '(1800)'}), '(hbm=512648, ddr=1800)\n', (3970, 3992), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((4002, 4031), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(512648)', 'ddr': '(1800)'}), '(hbm=512648, ddr=1800)\n', (4009, 4031), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((4041, 4070), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(512648)', 'ddr': '(1800)'}), '(hbm=512648, ddr=1800)\n', (4048, 4070), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((4080, 4109), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(512648)', 'ddr': '(1800)'}), '(hbm=512648, ddr=1800)\n', (4087, 4109), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((4119, 4148), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(512648)', 'ddr': '(1800)'}), '(hbm=512648, ddr=1800)\n', (4126, 4148), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((4158, 4187), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(512648)', 'ddr': '(1800)'}), '(hbm=512648, ddr=1800)\n', (4165, 4187), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((4210, 4240), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1339656)', 'ddr': '(2720)'}), '(hbm=1339656, ddr=2720)\n', (4217, 4240), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((4250, 4280), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1339656)', 'ddr': '(2720)'}), '(hbm=1339656, ddr=2720)\n', (4257, 4280), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((4290, 4320), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1339656)', 'ddr': '(2720)'}), '(hbm=1339656, ddr=2720)\n', (4297, 4320), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((4330, 4360), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1339656)', 'ddr': '(2720)'}), '(hbm=1339656, ddr=2720)\n', (4337, 4360), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((4370, 4400), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1339656)', 'ddr': '(2720)'}), '(hbm=1339656, ddr=2720)\n', (4377, 4400), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((4410, 4440), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1339656)', 'ddr': '(2720)'}), '(hbm=1339656, ddr=2720)\n', (4417, 4440), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((4450, 4480), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1339656)', 'ddr': '(2720)'}), '(hbm=1339656, ddr=2720)\n', (4457, 4480), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((4490, 4520), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1338840)', 'ddr': '(1760)'}), '(hbm=1338840, ddr=1760)\n', (4497, 4520), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((4995, 5020), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(87016)', 'ddr': '(0)'}), '(hbm=87016, ddr=0)\n', (5002, 5020), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5030, 5055), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(87016)', 'ddr': '(0)'}), '(hbm=87016, ddr=0)\n', (5037, 5055), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5065, 5090), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(87016)', 'ddr': '(0)'}), '(hbm=87016, ddr=0)\n', (5072, 5090), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5100, 5125), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(87016)', 'ddr': '(0)'}), '(hbm=87016, ddr=0)\n', (5107, 5125), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5148, 5174), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(530624)', 'ddr': '(0)'}), '(hbm=530624, ddr=0)\n', (5155, 5174), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5184, 5210), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(530624)', 'ddr': '(0)'}), '(hbm=530624, ddr=0)\n', (5191, 5210), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5220, 5246), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(530624)', 'ddr': '(0)'}), '(hbm=530624, ddr=0)\n', (5227, 5246), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5256, 5282), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(530464)', 'ddr': '(0)'}), '(hbm=530464, ddr=0)\n', (5263, 5282), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5305, 5331), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(536080)', 'ddr': '(0)'}), '(hbm=536080, ddr=0)\n', (5312, 5331), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5341, 5367), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(536080)', 'ddr': '(0)'}), '(hbm=536080, ddr=0)\n', (5348, 5367), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5377, 5403), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(536080)', 'ddr': '(0)'}), '(hbm=536080, ddr=0)\n', (5384, 5403), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5413, 5439), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(536080)', 'ddr': '(0)'}), '(hbm=536080, ddr=0)\n', (5420, 5439), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5462, 5489), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1369248)', 'ddr': '(0)'}), '(hbm=1369248, ddr=0)\n', (5469, 5489), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5499, 5526), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1369248)', 'ddr': '(0)'}), '(hbm=1369248, ddr=0)\n', (5506, 5526), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5536, 5563), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1369248)', 'ddr': '(0)'}), '(hbm=1369248, ddr=0)\n', (5543, 5563), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5573, 5600), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(1368928)', 'ddr': '(0)'}), '(hbm=1368928, ddr=0)\n', (5580, 5600), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5930, 5956), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(102304)', 'ddr': '(0)'}), '(hbm=102304, ddr=0)\n', (5937, 5956), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5964, 5990), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(347584)', 'ddr': '(0)'}), '(hbm=347584, ddr=0)\n', (5971, 5990), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((5992, 6018), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(447648)', 'ddr': '(0)'}), '(hbm=447648, ddr=0)\n', (5999, 6018), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((6035, 6061), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(315616)', 'ddr': '(0)'}), '(hbm=315616, ddr=0)\n', (6042, 6061), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((6071, 6097), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(315616)', 'ddr': '(0)'}), '(hbm=315616, ddr=0)\n', (6078, 6097), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((6107, 6133), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(366208)', 'ddr': '(0)'}), '(hbm=366208, ddr=0)\n', (6114, 6133), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((6156, 6182), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(612448)', 'ddr': '(0)'}), '(hbm=612448, ddr=0)\n', (6163, 6182), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((6192, 6218), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(612448)', 'ddr': '(0)'}), '(hbm=612448, ddr=0)\n', (6199, 6218), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((6228, 6254), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(745600)', 'ddr': '(0)'}), '(hbm=745600, ddr=0)\n', (6235, 6254), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((6459, 6485), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(102304)', 'ddr': '(0)'}), '(hbm=102304, ddr=0)\n', (6466, 6485), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((6493, 6519), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(347584)', 'ddr': '(0)'}), '(hbm=347584, ddr=0)\n', (6500, 6519), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((6521, 6547), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(447648)', 'ddr': '(0)'}), '(hbm=447648, ddr=0)\n', (6528, 6547), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((6564, 6590), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(315616)', 'ddr': '(0)'}), '(hbm=315616, ddr=0)\n', (6571, 6590), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((6600, 6626), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(315616)', 'ddr': '(0)'}), '(hbm=315616, ddr=0)\n', (6607, 6626), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((6636, 6662), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(366208)', 'ddr': '(0)'}), '(hbm=366208, ddr=0)\n', (6643, 6662), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((6685, 6711), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(612448)', 'ddr': '(0)'}), '(hbm=612448, ddr=0)\n', (6692, 6711), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((6721, 6747), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(612448)', 'ddr': '(0)'}), '(hbm=612448, ddr=0)\n', (6728, 6747), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((6757, 6783), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(745600)', 'ddr': '(0)'}), '(hbm=745600, ddr=0)\n', (6764, 6783), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((12021, 12068), 'torchrec.distributed.test_utils.test_model.TestSparseNN', 'TestSparseNN', ([], {'tables': 'tables', 'weighted_tables': '[]'}), '(tables=tables, weighted_tables=[])\n', (12033, 12068), False, 'from torchrec.distributed.test_utils.test_model import TestSparseNN, TestTowerCollectionSparseNN, TestTowerSparseNN\n'), ((12437, 12502), 'torchrec.distributed.test_utils.test_model.TestTowerSparseNN', 'TestTowerSparseNN', ([], {'tables': 'tables', 'weighted_tables': 'weighted_tables'}), '(tables=tables, weighted_tables=weighted_tables)\n', (12454, 12502), False, 'from torchrec.distributed.test_utils.test_model import TestSparseNN, TestTowerCollectionSparseNN, TestTowerSparseNN\n'), ((12563, 12638), 'torchrec.distributed.test_utils.test_model.TestTowerCollectionSparseNN', 'TestTowerCollectionSparseNN', ([], {'tables': 'tables', 'weighted_tables': 'weighted_tables'}), '(tables=tables, weighted_tables=weighted_tables)\n', (12590, 12638), False, 'from torchrec.distributed.test_utils.test_model import TestSparseNN, TestTowerCollectionSparseNN, TestTowerSparseNN\n'), ((20880, 21108), 'torchrec.distributed.planner.types.ParameterConstraints', 'ParameterConstraints', ([], {'sharding_types': '[ShardingType.TABLE_ROW_WISE.value, ShardingType.COLUMN_WISE.value]', 'compute_kernels': '[EmbeddingComputeKernel.BATCHED_FUSED_UVM.value, EmbeddingComputeKernel.\n BATCHED_DENSE.value]'}), '(sharding_types=[ShardingType.TABLE_ROW_WISE.value,\n ShardingType.COLUMN_WISE.value], compute_kernels=[\n EmbeddingComputeKernel.BATCHED_FUSED_UVM.value, EmbeddingComputeKernel.\n BATCHED_DENSE.value])\n', (20900, 21108), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((10941, 10979), 'torchrec.distributed.planner.types.ParameterConstraints', 'ParameterConstraints', ([], {'min_partition': '(20)'}), '(min_partition=20)\n', (10961, 10979), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((11004, 11068), 'torchrec.distributed.planner.types.ParameterConstraints', 'ParameterConstraints', ([], {'min_partition': '(8)', 'pooling_factors': '[1, 3, 5]'}), '(min_partition=8, pooling_factors=[1, 3, 5])\n', (11024, 11068), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((11093, 11179), 'torchrec.distributed.planner.types.ParameterConstraints', 'ParameterConstraints', ([], {'min_partition': '(9)', 'caching_ratio': '(0.36)', 'pooling_factors': '[8, 2]'}), '(min_partition=9, caching_ratio=0.36, pooling_factors=[\n 8, 2])\n', (11113, 11179), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((11229, 11322), 'torchrec.distributed.planner.types.ParameterConstraints', 'ParameterConstraints', ([], {'min_partition': '(12)', 'caching_ratio': '(0.85)', 'pooling_factors': '[2, 1, 3, 7]'}), '(min_partition=12, caching_ratio=0.85, pooling_factors=\n [2, 1, 3, 7])\n', (11249, 11322), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((13518, 13945), 'torchrec.distributed.planner.shard_estimators._calculate_dp_shard_io_sizes', '_calculate_dp_shard_io_sizes', ([], {'batch_sizes': '([self.batch_size] * sharding_option.num_inputs)', 'input_lengths': 'self.constraints[sharding_option.name].pooling_factors', 'emb_dim': 'sharding_option.tensor.shape[1]', 'num_shards': 'self.world_size', 'input_data_type_size': 'input_data_type_size', 'output_data_type_size': 'output_data_type_size', 'is_pooled': 'sharding_option.is_pooled', 'num_objects': '([1.0] * sharding_option.num_inputs)'}), '(batch_sizes=[self.batch_size] *\n sharding_option.num_inputs, input_lengths=self.constraints[\n sharding_option.name].pooling_factors, emb_dim=sharding_option.tensor.\n shape[1], num_shards=self.world_size, input_data_type_size=\n input_data_type_size, output_data_type_size=output_data_type_size,\n is_pooled=sharding_option.is_pooled, num_objects=[1.0] *\n sharding_option.num_inputs)\n', (13546, 13945), False, 'from torchrec.distributed.planner.shard_estimators import _calculate_dp_shard_io_sizes, _calculate_tw_shard_io_sizes\n'), ((15642, 16069), 'torchrec.distributed.planner.shard_estimators._calculate_tw_shard_io_sizes', '_calculate_tw_shard_io_sizes', ([], {'batch_sizes': '([self.batch_size] * sharding_option.num_inputs)', 'world_size': 'self.world_size', 'input_lengths': 'self.constraints[sharding_option.name].pooling_factors', 'emb_dim': 'sharding_option.tensor.shape[1]', 'input_data_type_size': 'input_data_type_size', 'output_data_type_size': 'output_data_type_size', 'is_pooled': 'sharding_option.is_pooled', 'num_objects': '([1.0] * sharding_option.num_inputs)'}), '(batch_sizes=[self.batch_size] *\n sharding_option.num_inputs, world_size=self.world_size, input_lengths=\n self.constraints[sharding_option.name].pooling_factors, emb_dim=\n sharding_option.tensor.shape[1], input_data_type_size=\n input_data_type_size, output_data_type_size=output_data_type_size,\n is_pooled=sharding_option.is_pooled, num_objects=[1.0] *\n sharding_option.num_inputs)\n', (15670, 16069), False, 'from torchrec.distributed.planner.shard_estimators import _calculate_dp_shard_io_sizes, _calculate_tw_shard_io_sizes\n'), ((12137, 12281), 'torchrec.distributed.planner.types.Topology', 'Topology', ([], {'world_size': 'self.world_size', 'compute_device': 'self.compute_device', 'local_world_size': 'self.local_world_size', 'batch_size': 'self.batch_size'}), '(world_size=self.world_size, compute_device=self.compute_device,\n local_world_size=self.local_world_size, batch_size=self.batch_size)\n', (12145, 12281), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((14716, 14748), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': 'storage_size', 'ddr': '(0)'}), '(hbm=storage_size, ddr=0)\n', (14723, 14748), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((16231, 16265), 'torchrec.distributed.planner.utils.prod', 'prod', (['sharding_option.tensor.shape'], {}), '(sharding_option.tensor.shape)\n', (16235, 16265), False, 'from torchrec.distributed.planner.utils import prod\n'), ((16573, 16605), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': 'storage_size', 'ddr': '(0)'}), '(hbm=storage_size, ddr=0)\n', (16580, 16605), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((21462, 21606), 'torchrec.distributed.planner.types.Topology', 'Topology', ([], {'world_size': 'self.world_size', 'compute_device': 'self.compute_device', 'local_world_size': 'self.local_world_size', 'batch_size': 'self.batch_size'}), '(world_size=self.world_size, compute_device=self.compute_device,\n local_world_size=self.local_world_size, batch_size=self.batch_size)\n', (21470, 21606), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Storage, Topology\n'), ((14108, 14142), 'torchrec.distributed.planner.utils.prod', 'prod', (['sharding_option.tensor.shape'], {}), '(sharding_option.tensor.shape)\n', (14112, 14142), False, 'from torchrec.distributed.planner.utils import prod\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from torch.testing import FileCheck # @manual
from torchrec.fx import Tracer
from torchrec.fx import symbolic_trace
from torchrec.models.deepfm import (
FMInteractionArch,
SimpleDeepFMNN,
)
from torchrec.modules.embedding_configs import (
EmbeddingBagConfig,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor
class FMInteractionArchTest(unittest.TestCase):
def test_basic(self) -> None:
torch.manual_seed(0)
D = 3
B = 3
DI = 2
keys = ["f1", "f2"]
F = len(keys)
dense_features = torch.rand((B, D))
embeddings = KeyedTensor(
keys=keys,
length_per_key=[D] * F,
values=torch.rand((B, D * F)),
)
inter_arch = FMInteractionArch(
fm_in_features=D + D * F,
sparse_feature_names=keys,
deep_fm_dimension=DI,
)
inter_output = inter_arch(dense_features, embeddings)
self.assertEqual(inter_output.size(), (B, D + DI + 1))
# check output forward numerical accuracy
expected_output = torch.Tensor(
[
[0.4963, 0.7682, 0.0885, 0.0000, 0.2646, 4.3660],
[0.1320, 0.3074, 0.6341, 0.0000, 0.0834, 7.6417],
[0.4901, 0.8964, 0.4556, 0.0000, 0.0671, 15.5230],
],
)
self.assertTrue(
torch.allclose(
inter_output,
expected_output,
rtol=1e-4,
atol=1e-4,
)
)
# check tracer compatibility
gm = torch.fx.GraphModule(inter_arch, Tracer().trace(inter_arch))
torch.jit.script(gm)
class SimpleDeepFMNNTest(unittest.TestCase):
def test_basic(self) -> None:
B = 2
D = 8
num_dense_features = 100
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=100, feature_names=["f1", "f3"]
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
features = torch.rand((B, num_dense_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f1", "f3", "f2"],
values=torch.tensor([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]),
offsets=torch.tensor([0, 2, 4, 6, 8, 10, 11]),
)
deepfm_nn = SimpleDeepFMNN(
num_dense_features=num_dense_features,
embedding_bag_collection=ebc,
hidden_layer_size=20,
deep_fm_dimension=5,
)
logits = deepfm_nn(
dense_features=features,
sparse_features=sparse_features,
)
self.assertEqual(logits.size(), (B, 1))
def test_no_sparse(self) -> None:
ebc = EmbeddingBagCollection(tables=[])
with self.assertRaises(AssertionError):
SimpleDeepFMNN(
num_dense_features=10,
embedding_bag_collection=ebc,
hidden_layer_size=20,
deep_fm_dimension=5,
)
def test_fx(self) -> None:
B = 2
D = 8
num_dense_features = 100
eb1_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config])
deepfm_nn = SimpleDeepFMNN(
num_dense_features=num_dense_features,
embedding_bag_collection=ebc,
hidden_layer_size=20,
deep_fm_dimension=5,
)
gm = symbolic_trace(deepfm_nn)
FileCheck().check("KeyedJaggedTensor").check("cat").check("f2").run(gm.code)
features = torch.rand((B, num_dense_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f2"],
values=torch.tensor(range(3)),
offsets=torch.tensor([0, 2, 3]),
)
logits = gm(
dense_features=features,
sparse_features=sparse_features,
)
self.assertEqual(logits.size(), (B, 1))
def test_fx_script(self) -> None:
B = 2
D = 8
num_dense_features = 100
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=100, feature_names=["f1", "f3"]
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
deepfm_nn = SimpleDeepFMNN(
num_dense_features=num_dense_features,
embedding_bag_collection=ebc,
hidden_layer_size=20,
deep_fm_dimension=5,
)
features = torch.rand((B, num_dense_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f1", "f3", "f2"],
values=torch.tensor([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]),
offsets=torch.tensor([0, 2, 4, 6, 8, 10, 11]),
)
deepfm_nn(
dense_features=features,
sparse_features=sparse_features,
)
gm = symbolic_trace(deepfm_nn)
scripted_gm = torch.jit.script(gm)
logits = scripted_gm(features, sparse_features)
self.assertEqual(logits.size(), (B, 1))
if __name__ == "__main__":
unittest.main()
| [
"torchrec.models.deepfm.FMInteractionArch",
"torchrec.fx.symbolic_trace",
"torchrec.models.deepfm.SimpleDeepFMNN",
"torchrec.fx.Tracer",
"torchrec.modules.embedding_modules.EmbeddingBagCollection",
"torchrec.modules.embedding_configs.EmbeddingBagConfig"
] | [((5930, 5945), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5943, 5945), False, 'import unittest\n'), ((780, 800), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (797, 800), False, 'import torch\n'), ((920, 938), 'torch.rand', 'torch.rand', (['(B, D)'], {}), '((B, D))\n', (930, 938), False, 'import torch\n'), ((1107, 1203), 'torchrec.models.deepfm.FMInteractionArch', 'FMInteractionArch', ([], {'fm_in_features': '(D + D * F)', 'sparse_feature_names': 'keys', 'deep_fm_dimension': 'DI'}), '(fm_in_features=D + D * F, sparse_feature_names=keys,\n deep_fm_dimension=DI)\n', (1124, 1203), False, 'from torchrec.models.deepfm import FMInteractionArch, SimpleDeepFMNN\n'), ((1449, 1611), 'torch.Tensor', 'torch.Tensor', (['[[0.4963, 0.7682, 0.0885, 0.0, 0.2646, 4.366], [0.132, 0.3074, 0.6341, 0.0,\n 0.0834, 7.6417], [0.4901, 0.8964, 0.4556, 0.0, 0.0671, 15.523]]'], {}), '([[0.4963, 0.7682, 0.0885, 0.0, 0.2646, 4.366], [0.132, 0.3074,\n 0.6341, 0.0, 0.0834, 7.6417], [0.4901, 0.8964, 0.4556, 0.0, 0.0671, \n 15.523]])\n', (1461, 1611), False, 'import torch\n'), ((2015, 2035), 'torch.jit.script', 'torch.jit.script', (['gm'], {}), '(gm)\n', (2031, 2035), False, 'import torch\n'), ((2199, 2297), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t1"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f1', 'f3']"}), "(name='t1', embedding_dim=D, num_embeddings=100,\n feature_names=['f1', 'f3'])\n", (2217, 2297), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((2337, 2429), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=100,\n feature_names=['f2'])\n", (2355, 2429), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((2500, 2555), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config, eb2_config]'}), '(tables=[eb1_config, eb2_config])\n', (2522, 2555), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((2576, 2611), 'torch.rand', 'torch.rand', (['(B, num_dense_features)'], {}), '((B, num_dense_features))\n', (2586, 2611), False, 'import torch\n'), ((2870, 3000), 'torchrec.models.deepfm.SimpleDeepFMNN', 'SimpleDeepFMNN', ([], {'num_dense_features': 'num_dense_features', 'embedding_bag_collection': 'ebc', 'hidden_layer_size': '(20)', 'deep_fm_dimension': '(5)'}), '(num_dense_features=num_dense_features,\n embedding_bag_collection=ebc, hidden_layer_size=20, deep_fm_dimension=5)\n', (2884, 3000), False, 'from torchrec.models.deepfm import FMInteractionArch, SimpleDeepFMNN\n'), ((3278, 3311), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[]'}), '(tables=[])\n', (3300, 3311), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((3677, 3769), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=100,\n feature_names=['f2'])\n", (3695, 3769), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((3840, 3883), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config]'}), '(tables=[eb1_config])\n', (3862, 3883), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((3904, 4034), 'torchrec.models.deepfm.SimpleDeepFMNN', 'SimpleDeepFMNN', ([], {'num_dense_features': 'num_dense_features', 'embedding_bag_collection': 'ebc', 'hidden_layer_size': '(20)', 'deep_fm_dimension': '(5)'}), '(num_dense_features=num_dense_features,\n embedding_bag_collection=ebc, hidden_layer_size=20, deep_fm_dimension=5)\n', (3918, 4034), False, 'from torchrec.models.deepfm import FMInteractionArch, SimpleDeepFMNN\n'), ((4103, 4128), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['deepfm_nn'], {}), '(deepfm_nn)\n', (4117, 4128), False, 'from torchrec.fx import symbolic_trace\n'), ((4234, 4269), 'torch.rand', 'torch.rand', (['(B, num_dense_features)'], {}), '((B, num_dense_features))\n', (4244, 4269), False, 'import torch\n'), ((4740, 4838), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t1"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f1', 'f3']"}), "(name='t1', embedding_dim=D, num_embeddings=100,\n feature_names=['f1', 'f3'])\n", (4758, 4838), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((4878, 4970), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=100,\n feature_names=['f2'])\n", (4896, 4970), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((5041, 5096), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config, eb2_config]'}), '(tables=[eb1_config, eb2_config])\n', (5063, 5096), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((5117, 5247), 'torchrec.models.deepfm.SimpleDeepFMNN', 'SimpleDeepFMNN', ([], {'num_dense_features': 'num_dense_features', 'embedding_bag_collection': 'ebc', 'hidden_layer_size': '(20)', 'deep_fm_dimension': '(5)'}), '(num_dense_features=num_dense_features,\n embedding_bag_collection=ebc, hidden_layer_size=20, deep_fm_dimension=5)\n', (5131, 5247), False, 'from torchrec.models.deepfm import FMInteractionArch, SimpleDeepFMNN\n'), ((5323, 5358), 'torch.rand', 'torch.rand', (['(B, num_dense_features)'], {}), '((B, num_dense_features))\n', (5333, 5358), False, 'import torch\n'), ((5722, 5747), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['deepfm_nn'], {}), '(deepfm_nn)\n', (5736, 5747), False, 'from torchrec.fx import symbolic_trace\n'), ((5771, 5791), 'torch.jit.script', 'torch.jit.script', (['gm'], {}), '(gm)\n', (5787, 5791), False, 'import torch\n'), ((1738, 1809), 'torch.allclose', 'torch.allclose', (['inter_output', 'expected_output'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(inter_output, expected_output, rtol=0.0001, atol=0.0001)\n', (1752, 1809), False, 'import torch\n'), ((3372, 3486), 'torchrec.models.deepfm.SimpleDeepFMNN', 'SimpleDeepFMNN', ([], {'num_dense_features': '(10)', 'embedding_bag_collection': 'ebc', 'hidden_layer_size': '(20)', 'deep_fm_dimension': '(5)'}), '(num_dense_features=10, embedding_bag_collection=ebc,\n hidden_layer_size=20, deep_fm_dimension=5)\n', (3386, 3486), False, 'from torchrec.models.deepfm import FMInteractionArch, SimpleDeepFMNN\n'), ((1052, 1074), 'torch.rand', 'torch.rand', (['(B, D * F)'], {}), '((B, D * F))\n', (1062, 1074), False, 'import torch\n'), ((2731, 2778), 'torch.tensor', 'torch.tensor', (['[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]'], {}), '([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3])\n', (2743, 2778), False, 'import torch\n'), ((2800, 2837), 'torch.tensor', 'torch.tensor', (['[0, 2, 4, 6, 8, 10, 11]'], {}), '([0, 2, 4, 6, 8, 10, 11])\n', (2812, 2837), False, 'import torch\n'), ((4421, 4444), 'torch.tensor', 'torch.tensor', (['[0, 2, 3]'], {}), '([0, 2, 3])\n', (4433, 4444), False, 'import torch\n'), ((5478, 5525), 'torch.tensor', 'torch.tensor', (['[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]'], {}), '([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3])\n', (5490, 5525), False, 'import torch\n'), ((5547, 5584), 'torch.tensor', 'torch.tensor', (['[0, 2, 4, 6, 8, 10, 11]'], {}), '([0, 2, 4, 6, 8, 10, 11])\n', (5559, 5584), False, 'import torch\n'), ((1979, 1987), 'torchrec.fx.Tracer', 'Tracer', ([], {}), '()\n', (1985, 1987), False, 'from torchrec.fx import Tracer\n'), ((4137, 4148), 'torch.testing.FileCheck', 'FileCheck', ([], {}), '()\n', (4146, 4148), False, 'from torch.testing import FileCheck\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from typing import List, Dict, Optional
import torch
import torchrec.distributed as trec_dist
from torchrec.datasets.criteo import ( # noqa
DEFAULT_INT_NAMES,
DEFAULT_CAT_NAMES,
CAT_FEATURE_COUNT,
)
from torchrec.inference.model_packager import load_pickle_config
from torchrec.inference.modules import (
PredictFactory,
MultistreamPredictModule,
quantize_embeddings,
)
from torchrec.models.dlrm import DLRM
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.modules.embedding_modules import EmbeddingBagCollection
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
logger: logging.Logger = logging.getLogger(__name__)
# OSS Only
@dataclass
class DLRMModelConfig:
dense_arch_layer_sizes: List[int]
dense_in_features: int
embedding_dim: int
id_list_features_keys: List[str]
num_embeddings_per_feature: List[int]
num_embeddings: int
over_arch_layer_sizes: List[int]
class DLRMPredictModule(MultistreamPredictModule):
"""
nn.Module to wrap DLRM model to use for inference.
Args:
embedding_bag_collection (EmbeddingBagCollection): collection of embedding bags
used to define SparseArch.
dense_in_features (int): the dimensionality of the dense input features.
dense_arch_layer_sizes (List[int]): the layer sizes for the DenseArch.
over_arch_layer_sizes (List[int]): the layer sizes for the OverArch. NOTE: The
output dimension of the InteractionArch should not be manually specified
here.
id_list_features_keys (List[str]): the names of the sparse features. Used to
construct a batch for inference.
dense_device: (Optional[torch.device]).
"""
def __init__(
self,
embedding_bag_collection: EmbeddingBagCollection,
dense_in_features: int,
dense_arch_layer_sizes: List[int],
over_arch_layer_sizes: List[int],
id_list_features_keys: List[str],
dense_device: Optional[torch.device] = None,
) -> None:
module = DLRM(
embedding_bag_collection=embedding_bag_collection,
dense_in_features=dense_in_features,
dense_arch_layer_sizes=dense_arch_layer_sizes,
over_arch_layer_sizes=over_arch_layer_sizes,
dense_device=dense_device,
)
super().__init__(module, dense_device)
self.id_list_features_keys: List[str] = id_list_features_keys
def predict_forward(
self, batch: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Args:
batch (Dict[str, torch.Tensor]): currently expects input dense features
to be mapped to the key "float_features" and input sparse features
to be mapped to the key "id_list_features".
Returns:
Dict[str, torch.Tensor]: output of inference.
"""
try:
predictions = self.predict_module(
batch["float_features"],
KeyedJaggedTensor(
keys=self.id_list_features_keys,
lengths=batch["id_list_features.lengths"],
values=batch["id_list_features.values"],
),
)
except Exception as e:
logger.info(e)
raise e
return {"default": predictions.to(torch.device("cpu")).float()}
class DLRMPredictFactory(PredictFactory):
def __init__(self) -> None:
self.model_config: DLRMModelConfig = load_pickle_config(
"config.pkl", DLRMModelConfig
)
def create_predict_module(self, rank: int, world_size: int) -> torch.nn.Module:
logging.basicConfig(level=logging.INFO)
device = torch.device("cuda", rank)
torch.cuda.set_device(device)
trec_dist.DistributedModelParallel.SHARE_SHARDED = True
eb_configs = [
EmbeddingBagConfig(
name=f"t_{feature_name}",
embedding_dim=self.model_config.embedding_dim,
num_embeddings=self.model_config.num_embeddings_per_feature[feature_idx]
if self.model_config.num_embeddings is None
else self.model_config.num_embeddings,
feature_names=[feature_name],
)
for feature_idx, feature_name in enumerate(
self.model_config.id_list_features_keys
)
]
ebc = EmbeddingBagCollection(tables=eb_configs, device=torch.device("meta"))
module = DLRMPredictModule(
embedding_bag_collection=ebc,
dense_in_features=self.model_config.dense_in_features,
dense_arch_layer_sizes=self.model_config.dense_arch_layer_sizes,
over_arch_layer_sizes=self.model_config.over_arch_layer_sizes,
dense_device=device,
)
module = quantize_embeddings(module, dtype=torch.qint8, inplace=True)
return trec_dist.DistributedModelParallel(
module=module,
device=device,
env=trec_dist.ShardingEnv.from_local(world_size, rank),
init_data_parallel=False,
)
def batching_metadata(self) -> Dict[str, str]:
return {
"float_features": "dense",
"id_list_features": "sparse",
}
| [
"torchrec.inference.modules.quantize_embeddings",
"torchrec.inference.model_packager.load_pickle_config",
"torchrec.distributed.ShardingEnv.from_local",
"torchrec.models.dlrm.DLRM",
"torchrec.sparse.jagged_tensor.KeyedJaggedTensor",
"torchrec.modules.embedding_configs.EmbeddingBagConfig"
] | [((937, 964), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (954, 964), False, 'import logging\n'), ((2366, 2586), 'torchrec.models.dlrm.DLRM', 'DLRM', ([], {'embedding_bag_collection': 'embedding_bag_collection', 'dense_in_features': 'dense_in_features', 'dense_arch_layer_sizes': 'dense_arch_layer_sizes', 'over_arch_layer_sizes': 'over_arch_layer_sizes', 'dense_device': 'dense_device'}), '(embedding_bag_collection=embedding_bag_collection, dense_in_features=\n dense_in_features, dense_arch_layer_sizes=dense_arch_layer_sizes,\n over_arch_layer_sizes=over_arch_layer_sizes, dense_device=dense_device)\n', (2370, 2586), False, 'from torchrec.models.dlrm import DLRM\n'), ((3830, 3879), 'torchrec.inference.model_packager.load_pickle_config', 'load_pickle_config', (['"""config.pkl"""', 'DLRMModelConfig'], {}), "('config.pkl', DLRMModelConfig)\n", (3848, 3879), False, 'from torchrec.inference.model_packager import load_pickle_config\n'), ((3995, 4034), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (4014, 4034), False, 'import logging\n'), ((4052, 4078), 'torch.device', 'torch.device', (['"""cuda"""', 'rank'], {}), "('cuda', rank)\n", (4064, 4078), False, 'import torch\n'), ((4087, 4116), 'torch.cuda.set_device', 'torch.cuda.set_device', (['device'], {}), '(device)\n', (4108, 4116), False, 'import torch\n'), ((5185, 5245), 'torchrec.inference.modules.quantize_embeddings', 'quantize_embeddings', (['module'], {'dtype': 'torch.qint8', 'inplace': '(True)'}), '(module, dtype=torch.qint8, inplace=True)\n', (5204, 5245), False, 'from torchrec.inference.modules import PredictFactory, MultistreamPredictModule, quantize_embeddings\n'), ((4217, 4513), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': 'f"""t_{feature_name}"""', 'embedding_dim': 'self.model_config.embedding_dim', 'num_embeddings': '(self.model_config.num_embeddings_per_feature[feature_idx] if self.\n model_config.num_embeddings is None else self.model_config.num_embeddings)', 'feature_names': '[feature_name]'}), "(name=f't_{feature_name}', embedding_dim=self.\n model_config.embedding_dim, num_embeddings=self.model_config.\n num_embeddings_per_feature[feature_idx] if self.model_config.\n num_embeddings is None else self.model_config.num_embeddings,\n feature_names=[feature_name])\n", (4235, 4513), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((3330, 3469), 'torchrec.sparse.jagged_tensor.KeyedJaggedTensor', 'KeyedJaggedTensor', ([], {'keys': 'self.id_list_features_keys', 'lengths': "batch['id_list_features.lengths']", 'values': "batch['id_list_features.values']"}), "(keys=self.id_list_features_keys, lengths=batch[\n 'id_list_features.lengths'], values=batch['id_list_features.values'])\n", (3347, 3469), False, 'from torchrec.sparse.jagged_tensor import KeyedJaggedTensor\n'), ((4805, 4825), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (4817, 4825), False, 'import torch\n'), ((5367, 5417), 'torchrec.distributed.ShardingEnv.from_local', 'trec_dist.ShardingEnv.from_local', (['world_size', 'rank'], {}), '(world_size, rank)\n', (5399, 5417), True, 'import torchrec.distributed as trec_dist\n'), ((3679, 3698), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3691, 3698), False, 'import torch\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Tuple, List
import torch
from torch import nn
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.planner.constants import (
BIGINT_DTYPE,
INTRA_NODE_BANDWIDTH,
CROSS_NODE_BANDWIDTH,
kernel_bw_lookup,
POOLING_FACTOR,
CACHING_RATIO,
)
from torchrec.distributed.planner.types import (
ParameterConstraints,
ShardEstimator,
Topology,
ShardingOption,
Storage,
PlannerError,
)
from torchrec.distributed.planner.utils import sharder_name
from torchrec.distributed.types import ModuleSharder, ShardingType
class EmbeddingPerfEstimator(ShardEstimator):
"""
Embedding Wall Time Perf Estimator
"""
def __init__(
self,
topology: Topology,
constraints: Optional[Dict[str, ParameterConstraints]] = None,
) -> None:
self._topology = topology
self._constraints = constraints
def estimate(
self,
sharding_options: List[ShardingOption],
sharder_map: Optional[Dict[str, ModuleSharder[nn.Module]]] = None,
) -> None:
for sharding_option in sharding_options:
caching_ratio = (
self._constraints[sharding_option.name].caching_ratio
if self._constraints and self._constraints.get(sharding_option.name)
else None
)
shard_perfs = perf_func_emb_wall_time(
shard_sizes=[shard.size for shard in sharding_option.shards],
compute_kernel=sharding_option.compute_kernel,
compute_device=self._topology.compute_device,
sharding_type=sharding_option.sharding_type,
batch_size=sharding_option.batch_size,
world_size=self._topology.world_size,
local_world_size=self._topology.local_world_size,
input_lengths=sharding_option.input_lengths,
input_data_type_size=BIGINT_DTYPE,
output_data_type_size=sharding_option.tensor.element_size(),
bw_intra_host=getattr(
self._topology, "intra_host_bw", INTRA_NODE_BANDWIDTH
),
bw_inter_host=getattr(
self._topology, "inter_host_bw", CROSS_NODE_BANDWIDTH
),
has_input_dist=True if sharding_option.upstream_modules else False,
has_output_dist=False if sharding_option.downstream_modules else True,
caching_ratio=caching_ratio,
)
for shard, perf in zip(sharding_option.shards, shard_perfs):
shard.perf = perf
def perf_func_emb_wall_time(
shard_sizes: List[List[int]],
compute_kernel: str,
compute_device: str,
sharding_type: str,
batch_size: int,
world_size: int,
local_world_size: int,
input_lengths: List[float],
input_data_type_size: float,
output_data_type_size: float,
bw_intra_host: int,
bw_inter_host: int,
has_input_dist: bool = True,
has_output_dist: bool = True,
caching_ratio: Optional[float] = None,
) -> List[float]:
"""
Attempts to model perfs as a function of relative wall times.
Only models forward perfs (ignores backward perfs).
The computation perf estimation is based on EmbeddingBagCollectionSharder
(pooledEmbedding).
Args:
shard_sizes (List[List[int]]): the list of (local_rows, local_cols) of each
shard.
compute_kernel (str): compute kernel.
compute_device (str): compute device.
sharding_type (str): tw, rw, cw, twrw, dp.
batch_size (int): the size of each batch.
world_size (int): the number of devices for all hosts.
local_world_size (int): the number of the device for each host.
input_lengths (List[float]): the list of the average number of lookups of each
input query feature.
input_data_type_size (float): the data type size of the distributed
data_parallel input.
output_data_type_size (float): the data type size of the distributed
data_parallel output.
bw_intra_host (int): the bandwidth within the single host like multiple threads.
bw_inter_host (int): the bandwidth between two hosts like multiple machines.
has_input_dist (bool = True): if we need input distributed.
has_output_dist (bool = True): if we need output distributed.
caching_ratio (Optional[float] = None): cache ratio to determine the bandwidth
of device.
Returns:
List[float]: the list of perf for each shard.
"""
shard_perfs = []
B = 1.0 * world_size * batch_size # global batch size
device_bw = kernel_bw_lookup(compute_device, compute_kernel, caching_ratio)
if device_bw is None:
raise PlannerError(
f"No kernel BW exists for this combo of compute device: {compute_device}, compute kernel: {compute_kernel}"
)
for hash_size, emb_dim in shard_sizes:
if sharding_type == ShardingType.TABLE_WISE.value:
input_perf, compute_perf, output_perf = _get_tw_sharding_perf(
global_batch_size=B,
world_size=world_size,
input_lengths=input_lengths,
emb_dim=emb_dim,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
device_bw=device_bw,
bw_inter_host=bw_inter_host,
)
elif sharding_type == ShardingType.COLUMN_WISE.value:
input_perf, compute_perf, output_perf = _get_cw_sharding_perf(
global_batch_size=B,
world_size=world_size,
input_lengths=input_lengths,
emb_dim=emb_dim,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
device_bw=device_bw,
bw_inter_host=bw_inter_host,
)
elif sharding_type == ShardingType.ROW_WISE.value:
input_perf, compute_perf, output_perf = _get_rw_sharding_perf(
global_batch_size=B,
world_size=world_size,
input_lengths=input_lengths,
emb_dim=emb_dim,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
device_bw=device_bw,
bw_inter_host=bw_inter_host,
)
elif sharding_type == ShardingType.TABLE_ROW_WISE.value:
input_perf, compute_perf, output_perf = _get_twrw_sharding_perf(
global_batch_size=B,
world_size=world_size,
local_world_size=local_world_size,
input_lengths=input_lengths,
emb_dim=emb_dim,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
device_bw=device_bw,
bw_inter_host=bw_inter_host,
bw_intra_host=bw_intra_host,
)
elif sharding_type == ShardingType.DATA_PARALLEL.value:
input_perf, compute_perf, output_perf = _get_dp_sharding_perf(
batch_size=batch_size,
input_lengths=input_lengths,
grad_num_elem=hash_size * emb_dim,
bw_inter_host=bw_inter_host,
emb_dim=emb_dim,
output_data_type_size=output_data_type_size,
device_bw=device_bw,
)
else:
raise ValueError(f"Unexpected sharding type: {sharding_type}")
shard_perf = 0
shard_perf += input_perf if has_input_dist else 0
shard_perf += compute_perf
shard_perf += output_perf if has_output_dist else 0
shard_perfs.append(shard_perf)
return shard_perfs
def _get_tw_sharding_perf(
global_batch_size: float,
world_size: int,
input_lengths: List[float],
emb_dim: int,
input_data_type_size: float,
output_data_type_size: float,
device_bw: float,
bw_inter_host: int,
) -> Tuple[float, float, float]:
input_perf = (
global_batch_size * sum(input_lengths) * input_data_type_size / bw_inter_host
)
compute_perf = (
global_batch_size
* sum(input_lengths)
* emb_dim
* output_data_type_size
/ device_bw
)
output_perf = (
global_batch_size
* emb_dim
* len(input_lengths)
* output_data_type_size
/ bw_inter_host
)
return (input_perf, compute_perf, output_perf)
def _get_cw_sharding_perf(
global_batch_size: float,
world_size: int,
input_lengths: List[float],
emb_dim: int,
input_data_type_size: float,
output_data_type_size: float,
device_bw: float,
bw_inter_host: int,
) -> Tuple[float, float, float]:
input_perf = (
global_batch_size * sum(input_lengths) * input_data_type_size / bw_inter_host
)
compute_perf = (
global_batch_size
* sum(input_lengths)
* emb_dim
* output_data_type_size
/ device_bw
)
output_perf = (
global_batch_size
* emb_dim
* len(input_lengths)
* output_data_type_size
/ bw_inter_host
)
return (input_perf, compute_perf, output_perf)
def _get_rw_sharding_perf(
global_batch_size: float,
world_size: int,
input_lengths: List[float],
emb_dim: int,
input_data_type_size: float,
output_data_type_size: float,
device_bw: float,
bw_inter_host: int,
) -> Tuple[float, float, float]:
input_perf = (
global_batch_size
* sum(input_lengths)
/ world_size
* input_data_type_size
/ bw_inter_host
)
compute_perf = (
global_batch_size
* sum(input_lengths)
/ world_size
* emb_dim
* output_data_type_size
/ device_bw
)
output_perf = (
global_batch_size
* emb_dim
* len(input_lengths)
* output_data_type_size
/ bw_inter_host
)
return (input_perf, compute_perf, output_perf)
def _get_twrw_sharding_perf(
global_batch_size: float,
world_size: int,
local_world_size: int,
input_lengths: List[float],
emb_dim: int,
input_data_type_size: float,
output_data_type_size: float,
device_bw: float,
bw_inter_host: int,
bw_intra_host: int,
) -> Tuple[float, float, float]:
input_perf = (
global_batch_size
* sum(input_lengths)
/ local_world_size
* input_data_type_size
/ bw_inter_host
)
compute_perf = (
global_batch_size
* sum(input_lengths)
/ local_world_size
* emb_dim
* output_data_type_size
/ device_bw
)
output_perf = (
global_batch_size
* emb_dim
* len(input_lengths)
* output_data_type_size
/ bw_intra_host
+ global_batch_size
* emb_dim
* len(input_lengths)
* output_data_type_size
* (local_world_size / world_size)
/ bw_inter_host
)
return (input_perf, compute_perf, output_perf)
def _get_dp_sharding_perf(
batch_size: float,
input_lengths: List[float],
grad_num_elem: int,
bw_inter_host: int,
emb_dim: int,
output_data_type_size: float,
device_bw: float,
) -> Tuple[float, float, float]:
input_perf = 0
compute_perf = (
batch_size * sum(input_lengths) * emb_dim * output_data_type_size / device_bw
)
# TODO: this is allreduce perf, better separated out as backward perf
output_perf = grad_num_elem * output_data_type_size / bw_inter_host
return (input_perf, compute_perf, output_perf)
class EmbeddingStorageEstimator(ShardEstimator):
"""
Embedding Storage Usage Estimator
"""
def __init__(
self,
topology: Topology,
constraints: Optional[Dict[str, ParameterConstraints]] = None,
) -> None:
self._topology = topology
self._constraints = constraints
def estimate(
self,
sharding_options: List[ShardingOption],
sharder_map: Optional[Dict[str, ModuleSharder[nn.Module]]] = None,
) -> None:
if not sharder_map:
raise ValueError("sharder map not provided for storage estimator")
for sharding_option in sharding_options:
sharder_key = sharder_name(type(sharding_option.module[1]))
sharder = sharder_map[sharder_key]
input_lengths = (
self._constraints[sharding_option.name].pooling_factors
if self._constraints and self._constraints.get(sharding_option.name)
else [POOLING_FACTOR]
)
caching_ratio = (
self._constraints[sharding_option.name].caching_ratio
if self._constraints and self._constraints.get(sharding_option.name)
else None
)
shard_storages = calculate_shard_storages(
sharder=sharder,
sharding_type=sharding_option.sharding_type,
tensor=sharding_option.tensor,
compute_device=self._topology.compute_device,
compute_kernel=sharding_option.compute_kernel,
shard_sizes=[shard.size for shard in sharding_option.shards],
batch_size=self._topology.batch_size,
world_size=self._topology.world_size,
local_world_size=self._topology.local_world_size,
input_lengths=input_lengths,
caching_ratio=caching_ratio if caching_ratio else CACHING_RATIO,
)
for shard, storage in zip(sharding_option.shards, shard_storages):
shard.storage = storage
def calculate_shard_storages(
sharder: ModuleSharder[nn.Module],
sharding_type: str,
tensor: torch.Tensor,
compute_device: str,
compute_kernel: str,
shard_sizes: List[List[int]],
batch_size: int,
world_size: int,
local_world_size: int,
input_lengths: List[float],
caching_ratio: float,
) -> List[Storage]:
"""
Calculates estimated storage sizes for each sharded tensor, comprised of input,
output, tensor, gradient, and optimizer sizes.
Args:
sharder (ModuleSharder[nn.Module]): sharder for module that supports sharding.
sharding_type (str): provided ShardingType value.
tensor (torch.Tensor): tensor to be sharded.
compute_device (str): compute device to be used.
compute_kernel (str): compute kernel to be used.
shard_sizes (List[List[int]]): list of dimensions of each sharded tensor.
batch_size (int): batch size to be used.
world_size (int): total number of devices in topology.
local_world_size (int): total number of devices in host group topology.
input_lengths (List[float]): average input lengths synonymous with pooling
factors.
caching_ratio (float): ratio of HBM to DDR memory for UVM caching.
Returns:
List[Storage]: storage object for each device in topology
"""
input_data_type_size = BIGINT_DTYPE
output_data_type_size = tensor.element_size()
input_sizes, output_sizes = _calculate_shard_io_sizes(
sharding_type=sharding_type,
batch_size=batch_size,
world_size=world_size,
local_world_size=local_world_size,
input_lengths=input_lengths,
emb_dim=tensor.shape[1],
shard_sizes=shard_sizes,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
)
tensor_storage = sharder.storage_usage(tensor, compute_device, compute_kernel)
hbm_storage: int = tensor_storage.get("hbm", 0)
ddr_storage: int = tensor_storage.get("ddr", 0)
if compute_kernel == EmbeddingComputeKernel.BATCHED_FUSED_UVM_CACHING.value:
hbm_storage = round(ddr_storage * caching_ratio)
ddr_storage = ddr_storage - hbm_storage
hbm_specific_sizes: List[int] = _calculate_storage_specific_sizes(
storage=hbm_storage,
shape=tensor.shape,
shard_sizes=shard_sizes,
sharding_type=sharding_type,
compute_kernel=compute_kernel,
on_device=compute_device == "cuda",
input_sizes=input_sizes,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
)
ddr_specific_sizes: List[int] = _calculate_storage_specific_sizes(
storage=ddr_storage,
shape=tensor.shape,
shard_sizes=shard_sizes,
sharding_type=sharding_type,
compute_kernel=compute_kernel,
on_device=compute_device == "cpu",
input_sizes=input_sizes,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
)
hbm_sizes: List[int] = [
input_size + output_size + hbm_specific_size if compute_device == "cuda" else 0
for input_size, output_size, hbm_specific_size in zip(
input_sizes,
output_sizes,
hbm_specific_sizes,
)
]
ddr_sizes: List[int] = [
input_size + output_size + ddr_specific_size
if compute_device == "cpu"
else ddr_specific_size
for input_size, output_size, ddr_specific_size in zip(
input_sizes,
output_sizes,
ddr_specific_sizes,
)
]
return [
Storage(
hbm=hbm_size,
ddr=ddr_size,
)
for hbm_size, ddr_size in zip(hbm_sizes, ddr_sizes)
]
def _calculate_shard_io_sizes(
sharding_type: str,
batch_size: int,
world_size: int,
local_world_size: int,
input_lengths: List[float],
emb_dim: int,
shard_sizes: List[List[int]],
input_data_type_size: int,
output_data_type_size: int,
) -> Tuple[List[int], List[int]]:
if sharding_type == ShardingType.DATA_PARALLEL.value:
return _calculate_dp_shard_io_sizes(
batch_size=batch_size,
input_lengths=input_lengths,
emb_dim=emb_dim,
num_shards=len(shard_sizes),
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
)
elif sharding_type == ShardingType.TABLE_WISE.value:
return _calculate_tw_shard_io_sizes(
batch_size=batch_size,
world_size=world_size,
input_lengths=input_lengths,
emb_dim=emb_dim,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
)
elif sharding_type == ShardingType.COLUMN_WISE.value:
return _calculate_cw_shard_io_sizes(
batch_size=batch_size,
world_size=world_size,
input_lengths=input_lengths,
shard_sizes=shard_sizes,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
)
elif sharding_type == ShardingType.ROW_WISE.value:
return _calculate_rw_shard_io_sizes(
batch_size=batch_size,
world_size=world_size,
input_lengths=input_lengths,
shard_sizes=shard_sizes,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
)
elif sharding_type == ShardingType.TABLE_ROW_WISE.value:
return _calculate_twrw_shard_io_sizes(
batch_size=batch_size,
world_size=world_size,
local_world_size=local_world_size,
input_lengths=input_lengths,
shard_sizes=shard_sizes,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
)
else:
raise ValueError(f"Unrecognized sharding type provided: {sharding_type}")
def _calculate_dp_shard_io_sizes(
batch_size: int,
input_lengths: List[float],
emb_dim: int,
num_shards: int,
input_data_type_size: int,
output_data_type_size: int,
) -> Tuple[List[int], List[int]]:
input_sizes: List[int] = [
# pyre-ignore[58]
math.ceil(batch_size * sum(input_lengths) * input_data_type_size)
] * num_shards
output_sizes: List[int] = [
batch_size * emb_dim * len(input_lengths) * output_data_type_size
] * num_shards
return input_sizes, output_sizes
def _calculate_tw_shard_io_sizes(
batch_size: int,
world_size: int,
input_lengths: List[float],
emb_dim: int,
input_data_type_size: int,
output_data_type_size: int,
) -> Tuple[List[int], List[int]]:
input_sizes: List[int] = [
# pyre-ignore[58]
math.ceil(batch_size * world_size * sum(input_lengths) * input_data_type_size)
]
output_sizes: List[int] = [
batch_size * world_size * emb_dim * len(input_lengths) * output_data_type_size
]
return input_sizes, output_sizes
def _calculate_cw_shard_io_sizes(
batch_size: int,
world_size: int,
input_lengths: List[float],
shard_sizes: List[List[int]],
input_data_type_size: int,
output_data_type_size: int,
) -> Tuple[List[int], List[int]]:
input_sizes: List[int] = [
# pyre-ignore[58]
math.ceil(batch_size * world_size * sum(input_lengths) * input_data_type_size)
] * len(shard_sizes)
output_sizes: List[int] = [
(
batch_size
* world_size
* shard_sizes[i][1]
* len(input_lengths)
* output_data_type_size
)
for i in range(len(shard_sizes))
]
return input_sizes, output_sizes
def _calculate_rw_shard_io_sizes(
batch_size: int,
world_size: int,
input_lengths: List[float],
shard_sizes: List[List[int]],
input_data_type_size: int,
output_data_type_size: int,
) -> Tuple[List[int], List[int]]:
input_sizes: List[int] = [
math.ceil(
batch_size
* world_size
# pyre-ignore[58]
* sum(input_lengths)
/ world_size
* input_data_type_size
)
if math.prod(shard) != 0
else 0
for shard in shard_sizes
]
output_sizes: List[int] = [
(
batch_size
* world_size
* shard_sizes[i][1]
* len(input_lengths)
* output_data_type_size
)
if math.prod(shard) != 0
else 0
for i, shard in enumerate(shard_sizes)
]
return input_sizes, output_sizes
def _calculate_twrw_shard_io_sizes(
batch_size: int,
world_size: int,
local_world_size: int,
input_lengths: List[float],
shard_sizes: List[List[int]],
input_data_type_size: int,
output_data_type_size: int,
) -> Tuple[List[int], List[int]]:
input_sizes: List[int] = [
math.ceil(
batch_size
* world_size
# pyre-ignore[58]
* sum(input_lengths)
/ local_world_size
* input_data_type_size
)
if math.prod(shard) != 0
else 0
for shard in shard_sizes
]
output_sizes: List[int] = [
(
batch_size
* world_size
* shard_sizes[i][1]
* len(input_lengths)
* output_data_type_size
)
if math.prod(shard) != 0
else 0
for i, shard in enumerate(shard_sizes)
]
return input_sizes, output_sizes
def _calculate_storage_specific_sizes(
storage: int,
shape: torch.Size,
shard_sizes: List[List[int]],
sharding_type: str,
compute_kernel: str,
on_device: bool,
input_sizes: List[int],
input_data_type_size: int,
output_data_type_size: int,
) -> List[int]:
tensor_sizes: List[int] = [
math.ceil(storage * math.prod(size) / math.prod(shape))
if sharding_type != ShardingType.DATA_PARALLEL.value
else storage
for size in shard_sizes
]
optimizer_sizes: List[int] = [
tensor_size * 2 if sharding_type == ShardingType.DATA_PARALLEL.value else 0
for tensor_size in tensor_sizes
]
return [
tensor_size + optimizer_size
for tensor_size, optimizer_size in zip(tensor_sizes, optimizer_sizes)
]
| [
"torchrec.distributed.planner.constants.kernel_bw_lookup",
"torchrec.distributed.planner.types.PlannerError",
"torchrec.distributed.planner.types.Storage"
] | [((5013, 5076), 'torchrec.distributed.planner.constants.kernel_bw_lookup', 'kernel_bw_lookup', (['compute_device', 'compute_kernel', 'caching_ratio'], {}), '(compute_device, compute_kernel, caching_ratio)\n', (5029, 5076), False, 'from torchrec.distributed.planner.constants import BIGINT_DTYPE, INTRA_NODE_BANDWIDTH, CROSS_NODE_BANDWIDTH, kernel_bw_lookup, POOLING_FACTOR, CACHING_RATIO\n'), ((5117, 5248), 'torchrec.distributed.planner.types.PlannerError', 'PlannerError', (['f"""No kernel BW exists for this combo of compute device: {compute_device}, compute kernel: {compute_kernel}"""'], {}), "(\n f'No kernel BW exists for this combo of compute device: {compute_device}, compute kernel: {compute_kernel}'\n )\n", (5129, 5248), False, 'from torchrec.distributed.planner.types import ParameterConstraints, ShardEstimator, Topology, ShardingOption, Storage, PlannerError\n'), ((17913, 17948), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': 'hbm_size', 'ddr': 'ddr_size'}), '(hbm=hbm_size, ddr=ddr_size)\n', (17920, 17948), False, 'from torchrec.distributed.planner.types import ParameterConstraints, ShardEstimator, Topology, ShardingOption, Storage, PlannerError\n'), ((22610, 22626), 'math.prod', 'math.prod', (['shard'], {}), '(shard)\n', (22619, 22626), False, 'import math\n'), ((22899, 22915), 'math.prod', 'math.prod', (['shard'], {}), '(shard)\n', (22908, 22915), False, 'import math\n'), ((23545, 23561), 'math.prod', 'math.prod', (['shard'], {}), '(shard)\n', (23554, 23561), False, 'import math\n'), ((23834, 23850), 'math.prod', 'math.prod', (['shard'], {}), '(shard)\n', (23843, 23850), False, 'import math\n'), ((24333, 24349), 'math.prod', 'math.prod', (['shape'], {}), '(shape)\n', (24342, 24349), False, 'import math\n'), ((24315, 24330), 'math.prod', 'math.prod', (['size'], {}), '(size)\n', (24324, 24330), False, 'import math\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# pyre-ignore-all-errors[56]
import unittest
from unittest.mock import Mock, patch
from torchrec.metrics.throughput import ThroughputMetric
THROUGHPUT_PATH = "torchrec.metrics.throughput"
class ThroughputMetricTest(unittest.TestCase):
def setUp(self) -> None:
self.world_size = 64
self.batch_size = 256
@patch(THROUGHPUT_PATH + ".time.monotonic")
def test_no_batches(self, time_mock: Mock) -> None:
time_mock.return_value = 1
throughput_metric = ThroughputMetric(
batch_size=self.batch_size, world_size=self.world_size, window_seconds=100
)
self.assertEqual(
throughput_metric.compute(), {"throughput-throughput|total_examples": 0}
)
@patch(THROUGHPUT_PATH + ".time.monotonic")
def test_one_batch(self, time_mock: Mock) -> None:
time_mock.return_value = 1
throughput_metric = ThroughputMetric(
batch_size=self.batch_size, world_size=self.world_size, window_seconds=100
)
throughput_metric.update()
self.assertEqual(
throughput_metric.compute(),
{"throughput-throughput|total_examples": self.batch_size * self.world_size},
)
@patch(THROUGHPUT_PATH + ".time.monotonic")
def _test_throughput(self, time_mock: Mock, warmup_steps: int) -> None:
update_timestamps = [10, 11, 12, 14, 15, 17, 18, 20, 21, 22, 25, 29, 30]
update_timestamps.sort()
throughput_metric = ThroughputMetric(
batch_size=self.batch_size,
world_size=self.world_size,
window_seconds=10,
warmup_steps=warmup_steps,
)
window_time_lapse_buffer = []
window_time_lapse = 0
for i in range(len(update_timestamps)):
time_mock.return_value = update_timestamps[i]
throughput_metric.update()
if i >= warmup_steps:
window_time_lapse_buffer.append(
update_timestamps[i] - update_timestamps[i - 1]
)
window_time_lapse += update_timestamps[i] - update_timestamps[i - 1]
ret = throughput_metric.compute()
total_examples = self.world_size * self.batch_size * (i + 1)
if i < warmup_steps:
self.assertEqual(
ret, {"throughput-throughput|total_examples": total_examples}
)
continue
lifetime_examples = total_examples - (
self.world_size * self.batch_size * warmup_steps
)
lifetime_throughput = lifetime_examples / (
update_timestamps[i] - update_timestamps[warmup_steps - 1]
)
while window_time_lapse > 10:
window_time_lapse -= window_time_lapse_buffer.pop(0)
window_throughput = (
len(window_time_lapse_buffer)
* self.world_size
* self.batch_size
/ window_time_lapse
)
self.assertEqual(
ret["throughput-throughput|lifetime_throughput"], lifetime_throughput
)
self.assertEqual(
ret["throughput-throughput|window_throughput"], window_throughput
)
self.assertEqual(
ret["throughput-throughput|total_examples"], total_examples
)
def test_throughput_warmup_steps_0(self) -> None:
with self.assertRaises(ValueError):
self._test_throughput(warmup_steps=0)
def test_throughput_warmup_steps_1(self) -> None:
self._test_throughput(warmup_steps=1)
def test_throughput_warmup_steps_2(self) -> None:
self._test_throughput(warmup_steps=2)
def test_throughput_warmup_steps_10(self) -> None:
self._test_throughput(warmup_steps=10)
def test_warmup_checkpointing(self) -> None:
warmup_steps = 5
extra_steps = 2
throughput_metric = ThroughputMetric(
batch_size=self.batch_size,
world_size=self.world_size,
window_seconds=10,
warmup_steps=warmup_steps,
)
for i in range(5):
for _ in range(warmup_steps + extra_steps):
throughput_metric.update()
self.assertEqual(
throughput_metric.warmup_examples.item(),
warmup_steps * (i + 1) * self.batch_size * self.world_size,
)
self.assertEqual(
throughput_metric.total_examples.item(),
(warmup_steps + extra_steps)
* (i + 1)
* self.batch_size
* self.world_size,
)
# Mimic trainer crashing and loading a checkpoint
throughput_metric._steps = 0
| [
"torchrec.metrics.throughput.ThroughputMetric"
] | [((567, 609), 'unittest.mock.patch', 'patch', (["(THROUGHPUT_PATH + '.time.monotonic')"], {}), "(THROUGHPUT_PATH + '.time.monotonic')\n", (572, 609), False, 'from unittest.mock import Mock, patch\n'), ((971, 1013), 'unittest.mock.patch', 'patch', (["(THROUGHPUT_PATH + '.time.monotonic')"], {}), "(THROUGHPUT_PATH + '.time.monotonic')\n", (976, 1013), False, 'from unittest.mock import Mock, patch\n'), ((1454, 1496), 'unittest.mock.patch', 'patch', (["(THROUGHPUT_PATH + '.time.monotonic')"], {}), "(THROUGHPUT_PATH + '.time.monotonic')\n", (1459, 1496), False, 'from unittest.mock import Mock, patch\n'), ((729, 825), 'torchrec.metrics.throughput.ThroughputMetric', 'ThroughputMetric', ([], {'batch_size': 'self.batch_size', 'world_size': 'self.world_size', 'window_seconds': '(100)'}), '(batch_size=self.batch_size, world_size=self.world_size,\n window_seconds=100)\n', (745, 825), False, 'from torchrec.metrics.throughput import ThroughputMetric\n'), ((1132, 1228), 'torchrec.metrics.throughput.ThroughputMetric', 'ThroughputMetric', ([], {'batch_size': 'self.batch_size', 'world_size': 'self.world_size', 'window_seconds': '(100)'}), '(batch_size=self.batch_size, world_size=self.world_size,\n window_seconds=100)\n', (1148, 1228), False, 'from torchrec.metrics.throughput import ThroughputMetric\n'), ((1715, 1837), 'torchrec.metrics.throughput.ThroughputMetric', 'ThroughputMetric', ([], {'batch_size': 'self.batch_size', 'world_size': 'self.world_size', 'window_seconds': '(10)', 'warmup_steps': 'warmup_steps'}), '(batch_size=self.batch_size, world_size=self.world_size,\n window_seconds=10, warmup_steps=warmup_steps)\n', (1731, 1837), False, 'from torchrec.metrics.throughput import ThroughputMetric\n'), ((4217, 4339), 'torchrec.metrics.throughput.ThroughputMetric', 'ThroughputMetric', ([], {'batch_size': 'self.batch_size', 'world_size': 'self.world_size', 'window_seconds': '(10)', 'warmup_steps': 'warmup_steps'}), '(batch_size=self.batch_size, world_size=self.world_size,\n window_seconds=10, warmup_steps=warmup_steps)\n', (4233, 4339), False, 'from torchrec.metrics.throughput import ThroughputMetric\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Type
import torch
from torch import nn
from torch.nn.modules.module import _IncompatibleKeys
from torchrec.distributed.embedding import (
create_embedding_configs_by_sharding,
EmbeddingCollectionAwaitable,
EmbeddingCollectionContext,
)
from torchrec.distributed.embedding_sharding import (
EmbeddingSharding,
ListOfSparseFeaturesListAwaitable,
)
from torchrec.distributed.embedding_types import (
BaseQuantEmbeddingSharder,
ListOfSparseFeaturesList,
ShardingType,
SparseFeatures,
SparseFeaturesList,
)
from torchrec.distributed.sharding.sequence_sharding import SequenceShardingContext
from torchrec.distributed.sharding.tw_sequence_sharding import (
InferTwSequenceEmbeddingSharding,
)
from torchrec.distributed.types import (
Awaitable,
LazyAwaitable,
ParameterSharding,
ShardedModule,
ShardedModuleContext,
ShardingEnv,
)
from torchrec.distributed.utils import filter_state_dict
from torchrec.modules.embedding_configs import EmbeddingTableConfig
from torchrec.quant.embedding_modules import (
EmbeddingCollection as QuantEmbeddingCollection,
)
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
def create_infer_embedding_sharding(
sharding_type: str,
embedding_configs: List[
Tuple[EmbeddingTableConfig, ParameterSharding, torch.Tensor]
],
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> EmbeddingSharding[SparseFeaturesList, List[torch.Tensor]]:
if sharding_type == ShardingType.TABLE_WISE.value:
return InferTwSequenceEmbeddingSharding(embedding_configs, env, device)
else:
raise ValueError(f"Sharding type not supported {sharding_type}")
class ShardedQuantEmbeddingCollection(
ShardedModule[
ListOfSparseFeaturesList, List[List[torch.Tensor]], Dict[str, JaggedTensor]
],
):
"""
Sharded implementation of `QuantEmbeddingCollection`.
"""
def __init__(
self,
module: QuantEmbeddingCollection,
table_name_to_parameter_sharding: Dict[str, ParameterSharding],
env: ShardingEnv,
fused_params: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__()
sharding_type_to_embedding_configs = create_embedding_configs_by_sharding(
module, table_name_to_parameter_sharding
)
self._sharding_type_to_sharding: Dict[
str, EmbeddingSharding[SparseFeaturesList, List[torch.Tensor]]
] = {
sharding_type: create_infer_embedding_sharding(
sharding_type, embedding_confings, env
)
for sharding_type, embedding_confings in sharding_type_to_embedding_configs.items()
}
self._input_dists: nn.ModuleList = nn.ModuleList()
self._lookups: nn.ModuleList = nn.ModuleList()
self._create_lookups(fused_params)
self._output_dists: nn.ModuleList = nn.ModuleList()
self._feature_splits: List[int] = []
self._features_order: List[int] = []
self._has_uninitialized_input_dist: bool = True
self._has_uninitialized_output_dist: bool = True
self._embedding_dim: int = module.embedding_dim
self._embedding_names_per_sharding: List[List[str]] = []
for sharding in self._sharding_type_to_sharding.values():
self._embedding_names_per_sharding.append(sharding.embedding_names())
self._need_indices: bool = module.need_indices
def _create_input_dist(
self,
input_feature_names: List[str],
device: torch.device,
) -> None:
feature_names: List[str] = []
self._feature_splits: List[int] = []
for sharding in self._sharding_type_to_sharding.values():
self._input_dists.append(sharding.create_input_dist())
feature_names.extend(sharding.id_list_feature_names())
self._feature_splits.append(len(sharding.id_list_feature_names()))
self._features_order: List[int] = []
for f in feature_names:
self._features_order.append(input_feature_names.index(f))
self._features_order = (
[]
if self._features_order == list(range(len(self._features_order)))
else self._features_order
)
self.register_buffer(
"_features_order_tensor",
torch.tensor(self._features_order, device=device, dtype=torch.int32),
)
def _create_lookups(self, fused_params: Optional[Dict[str, Any]]) -> None:
for sharding in self._sharding_type_to_sharding.values():
self._lookups.append(sharding.create_lookup(fused_params=fused_params))
def _create_output_dist(
self,
device: Optional[torch.device] = None,
) -> None:
for sharding in self._sharding_type_to_sharding.values():
self._output_dists.append(sharding.create_output_dist(device))
# pyre-ignore [3, 14]
def input_dist(
self,
ctx: EmbeddingCollectionContext,
features: KeyedJaggedTensor,
) -> Awaitable[Any]:
if self._has_uninitialized_input_dist:
self._create_input_dist(
input_feature_names=features.keys() if features is not None else [],
device=features.device(),
)
self._has_uninitialized_input_dist = False
if self._has_uninitialized_output_dist:
self._create_output_dist(features.device())
self._has_uninitialized_output_dist = False
with torch.no_grad():
features_by_sharding = []
if self._features_order:
features = features.permute(
self._features_order,
# pyre-ignore [6]
self._features_order_tensor,
)
features_by_sharding = features.split(
self._feature_splits,
)
# save input splits and output splits in sharding context which
# will be reused in sequence embedding all2all
awaitables = []
for module, features in zip(self._input_dists, features_by_sharding):
tensor_awaitable = module(
SparseFeatures(
id_list_features=features,
id_score_list_features=None,
)
).wait() # a dummy wait since now length indices comm is splited
ctx.sharding_contexts.append(
SequenceShardingContext(
features_before_input_dist=features,
input_splits=[],
output_splits=[],
unbucketize_permute_tensor=None,
)
)
awaitables.append(tensor_awaitable)
return ListOfSparseFeaturesListAwaitable(awaitables)
def compute(
self, ctx: ShardedModuleContext, dist_input: ListOfSparseFeaturesList
) -> List[List[torch.Tensor]]:
ret: List[List[torch.Tensor]] = []
for lookup, features in zip(
self._lookups,
dist_input,
):
ret.append([o.view(-1, self._embedding_dim) for o in lookup(features)])
return ret
def output_dist(
self, ctx: ShardedModuleContext, output: List[List[torch.Tensor]]
) -> LazyAwaitable[Dict[str, JaggedTensor]]:
awaitables_per_sharding: List[Awaitable[Dict[str, JaggedTensor]]] = []
features_before_all2all_per_sharding: List[KeyedJaggedTensor] = []
for odist, embeddings, sharding_ctx in zip(
self._output_dists,
output,
# pyre-ignore [16]
ctx.sharding_contexts,
):
awaitables_per_sharding.append(odist(embeddings, sharding_ctx))
features_before_all2all_per_sharding.append(
sharding_ctx.features_before_input_dist
)
return EmbeddingCollectionAwaitable(
awaitables_per_sharding=awaitables_per_sharding,
features_per_sharding=features_before_all2all_per_sharding,
embedding_names_per_sharding=self._embedding_names_per_sharding,
need_indices=self._need_indices,
)
def compute_and_output_dist(
self, ctx: ShardedModuleContext, input: ListOfSparseFeaturesList
) -> LazyAwaitable[Dict[str, JaggedTensor]]:
return self.output_dist(ctx, self.compute(ctx, input))
# pyre-fixme[14]: `state_dict` overrides method defined in `Module` inconsistently.
def state_dict(
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
if destination is None:
destination = OrderedDict()
# pyre-ignore [16]
destination._metadata = OrderedDict()
for lookup in self._lookups:
lookup.state_dict(destination, prefix + "embeddings.", keep_vars)
return destination
# pyre-fixme[14]: `load_state_dict` overrides method defined in `Module`
# inconsistently.
def load_state_dict(
self,
state_dict: "OrderedDict[str, torch.Tensor]",
strict: bool = True,
) -> _IncompatibleKeys:
missing_keys = []
unexpected_keys = []
for lookup in self._lookups:
missing, unexpected = lookup.load_state_dict(
filter_state_dict(state_dict, "embeddings"),
strict,
)
missing_keys.extend(missing)
unexpected_keys.extend(unexpected)
return _IncompatibleKeys(
missing_keys=missing_keys, unexpected_keys=unexpected_keys
)
def copy(self, device: torch.device) -> nn.Module:
return self
def create_context(self) -> ShardedModuleContext:
return EmbeddingCollectionContext(sharding_contexts=[])
class QuantEmbeddingCollectionSharder(
BaseQuantEmbeddingSharder[QuantEmbeddingCollection]
):
"""
This implementation uses non-fused EmbeddingCollection
"""
def shard(
self,
module: QuantEmbeddingCollection,
params: Dict[str, ParameterSharding],
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> ShardedQuantEmbeddingCollection:
return ShardedQuantEmbeddingCollection(module, params, env, self.fused_params)
def shardable_parameters(
self, module: QuantEmbeddingCollection
) -> Dict[str, nn.Parameter]:
return {
name.split(".")[-2]: param
for name, param in module.state_dict().items()
if name.endswith(".weight")
}
@property
def module_type(self) -> Type[QuantEmbeddingCollection]:
return QuantEmbeddingCollection
| [
"torchrec.distributed.sharding.sequence_sharding.SequenceShardingContext",
"torchrec.distributed.embedding_types.SparseFeatures",
"torchrec.distributed.sharding.tw_sequence_sharding.InferTwSequenceEmbeddingSharding",
"torchrec.distributed.embedding_sharding.ListOfSparseFeaturesListAwaitable",
"torchrec.dist... | [((1527, 1596), 'torch.ops.load_library', 'torch.ops.load_library', (['"""//deeplearning/fbgemm/fbgemm_gpu:sparse_ops"""'], {}), "('//deeplearning/fbgemm/fbgemm_gpu:sparse_ops')\n", (1549, 1596), False, 'import torch\n'), ((1601, 1674), 'torch.ops.load_library', 'torch.ops.load_library', (['"""//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu"""'], {}), "('//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu')\n", (1623, 1674), False, 'import torch\n'), ((2068, 2132), 'torchrec.distributed.sharding.tw_sequence_sharding.InferTwSequenceEmbeddingSharding', 'InferTwSequenceEmbeddingSharding', (['embedding_configs', 'env', 'device'], {}), '(embedding_configs, env, device)\n', (2100, 2132), False, 'from torchrec.distributed.sharding.tw_sequence_sharding import InferTwSequenceEmbeddingSharding\n'), ((2759, 2837), 'torchrec.distributed.embedding.create_embedding_configs_by_sharding', 'create_embedding_configs_by_sharding', (['module', 'table_name_to_parameter_sharding'], {}), '(module, table_name_to_parameter_sharding)\n', (2795, 2837), False, 'from torchrec.distributed.embedding import create_embedding_configs_by_sharding, EmbeddingCollectionAwaitable, EmbeddingCollectionContext\n'), ((3275, 3290), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3288, 3290), False, 'from torch import nn\n'), ((3330, 3345), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3343, 3345), False, 'from torch import nn\n'), ((3433, 3448), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3446, 3448), False, 'from torch import nn\n'), ((8486, 8736), 'torchrec.distributed.embedding.EmbeddingCollectionAwaitable', 'EmbeddingCollectionAwaitable', ([], {'awaitables_per_sharding': 'awaitables_per_sharding', 'features_per_sharding': 'features_before_all2all_per_sharding', 'embedding_names_per_sharding': 'self._embedding_names_per_sharding', 'need_indices': 'self._need_indices'}), '(awaitables_per_sharding=\n awaitables_per_sharding, features_per_sharding=\n features_before_all2all_per_sharding, embedding_names_per_sharding=self\n ._embedding_names_per_sharding, need_indices=self._need_indices)\n', (8514, 8736), False, 'from torchrec.distributed.embedding import create_embedding_configs_by_sharding, EmbeddingCollectionAwaitable, EmbeddingCollectionContext\n'), ((10159, 10236), 'torch.nn.modules.module._IncompatibleKeys', '_IncompatibleKeys', ([], {'missing_keys': 'missing_keys', 'unexpected_keys': 'unexpected_keys'}), '(missing_keys=missing_keys, unexpected_keys=unexpected_keys)\n', (10176, 10236), False, 'from torch.nn.modules.module import _IncompatibleKeys\n'), ((10405, 10453), 'torchrec.distributed.embedding.EmbeddingCollectionContext', 'EmbeddingCollectionContext', ([], {'sharding_contexts': '[]'}), '(sharding_contexts=[])\n', (10431, 10453), False, 'from torchrec.distributed.embedding import create_embedding_configs_by_sharding, EmbeddingCollectionAwaitable, EmbeddingCollectionContext\n'), ((4870, 4938), 'torch.tensor', 'torch.tensor', (['self._features_order'], {'device': 'device', 'dtype': 'torch.int32'}), '(self._features_order, device=device, dtype=torch.int32)\n', (4882, 4938), False, 'import torch\n'), ((6044, 6059), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6057, 6059), False, 'import torch\n'), ((7366, 7411), 'torchrec.distributed.embedding_sharding.ListOfSparseFeaturesListAwaitable', 'ListOfSparseFeaturesListAwaitable', (['awaitables'], {}), '(awaitables)\n', (7399, 7411), False, 'from torchrec.distributed.embedding_sharding import EmbeddingSharding, ListOfSparseFeaturesListAwaitable\n'), ((9319, 9332), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9330, 9332), False, 'from collections import OrderedDict\n'), ((9400, 9413), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9411, 9413), False, 'from collections import OrderedDict\n'), ((9973, 10016), 'torchrec.distributed.utils.filter_state_dict', 'filter_state_dict', (['state_dict', '"""embeddings"""'], {}), "(state_dict, 'embeddings')\n", (9990, 10016), False, 'from torchrec.distributed.utils import filter_state_dict\n'), ((7029, 7162), 'torchrec.distributed.sharding.sequence_sharding.SequenceShardingContext', 'SequenceShardingContext', ([], {'features_before_input_dist': 'features', 'input_splits': '[]', 'output_splits': '[]', 'unbucketize_permute_tensor': 'None'}), '(features_before_input_dist=features, input_splits=[\n ], output_splits=[], unbucketize_permute_tensor=None)\n', (7052, 7162), False, 'from torchrec.distributed.sharding.sequence_sharding import SequenceShardingContext\n'), ((6739, 6809), 'torchrec.distributed.embedding_types.SparseFeatures', 'SparseFeatures', ([], {'id_list_features': 'features', 'id_score_list_features': 'None'}), '(id_list_features=features, id_score_list_features=None)\n', (6753, 6809), False, 'from torchrec.distributed.embedding_types import BaseQuantEmbeddingSharder, ListOfSparseFeaturesList, ShardingType, SparseFeatures, SparseFeaturesList\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import cast
import torch
from torchrec.distributed.embeddingbag import (
EmbeddingBagCollectionSharder,
)
from torchrec.distributed.planner.enumerators import EmbeddingEnumerator
from torchrec.distributed.planner.shard_estimators import EmbeddingPerfEstimator
from torchrec.distributed.planner.types import Topology
from torchrec.distributed.test_utils.test_model import TestSparseNN
from torchrec.distributed.types import ModuleSharder
from torchrec.modules.embedding_configs import EmbeddingBagConfig
class TestEmbeddingPerfEstimator(unittest.TestCase):
def setUp(self) -> None:
topology = Topology(world_size=2, compute_device="cuda")
self.estimator = EmbeddingPerfEstimator(topology=topology)
self.enumerator = EmbeddingEnumerator(
topology=topology, estimator=self.estimator
)
def test_1_table_perf(self) -> None:
tables = [
EmbeddingBagConfig(
num_embeddings=100,
embedding_dim=10,
name="table_0",
feature_names=["feature_0"],
)
]
model = TestSparseNN(tables=tables, weighted_tables=[])
sharding_options = self.enumerator.enumerate(
module=model,
sharders=[
cast(ModuleSharder[torch.nn.Module], EmbeddingBagCollectionSharder())
],
)
expected_perfs = {
("dense", "data_parallel"): [
0.00037119411932357005,
0.00037119411932357005,
],
("batched_dense", "data_parallel"): [
0.00035296814098804687,
0.00035296814098804687,
],
("dense", "table_wise"): [0.0033004209102576545],
("batched_dense", "table_wise"): [0.0032639689535866085],
("batched_fused", "table_wise"): [0.003221441670803721],
("sparse", "table_wise"): [0.0033004209102576545],
("batched_fused_uvm", "table_wise"): [0.07797689998851104],
("batched_fused_uvm_caching", "table_wise"): [0.020502698518924556],
("dense", "row_wise"): [0.003239667649139244, 0.003239667649139244],
("batched_dense", "row_wise"): [0.003221441670803721, 0.003221441670803721],
("batched_fused", "row_wise"): [0.003200178029412277, 0.003200178029412277],
("sparse", "row_wise"): [0.003239667649139244, 0.003239667649139244],
("batched_fused_uvm", "row_wise"): [
0.04057790718826594,
0.04057790718826594,
],
("batched_fused_uvm_caching", "row_wise"): [
0.011840806453472696,
0.011840806453472696,
],
("dense", "column_wise"): [0.0033004209102576545],
("batched_dense", "column_wise"): [0.0032639689535866085],
("batched_fused", "column_wise"): [0.003221441670803721],
("sparse", "column_wise"): [0.0033004209102576545],
("batched_fused_uvm", "column_wise"): [0.07797689998851104],
("batched_fused_uvm_caching", "column_wise"): [0.020502698518924556],
("dense", "table_column_wise"): [0.0033004209102576545],
("batched_dense", "table_column_wise"): [0.0032639689535866085],
("batched_fused", "table_column_wise"): [0.003221441670803721],
("sparse", "table_column_wise"): [0.0033004209102576545],
("batched_fused_uvm", "table_column_wise"): [0.07797689998851104],
("batched_fused_uvm_caching", "table_column_wise"): [0.020502698518924556],
("dense", "table_row_wise"): [0.0033032459368996605, 0.0033032459368996605],
("batched_dense", "table_row_wise"): [
0.0032850199585641375,
0.0032850199585641375,
],
("batched_fused", "table_row_wise"): [
0.0032637563171726935,
0.0032637563171726935,
],
("sparse", "table_row_wise"): [
0.0033032459368996605,
0.0033032459368996605,
],
("batched_fused_uvm", "table_row_wise"): [
0.040641485476026355,
0.040641485476026355,
],
("batched_fused_uvm_caching", "table_row_wise"): [
0.011904384741233112,
0.011904384741233112,
],
}
perfs = {
(
sharding_option.compute_kernel,
sharding_option.sharding_type,
): [shard.perf for shard in sharding_option.shards]
for sharding_option in sharding_options
}
self.assertEqual(expected_perfs, perfs)
| [
"torchrec.distributed.test_utils.test_model.TestSparseNN",
"torchrec.distributed.planner.shard_estimators.EmbeddingPerfEstimator",
"torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder",
"torchrec.distributed.planner.enumerators.EmbeddingEnumerator",
"torchrec.modules.embedding_configs.EmbeddingB... | [((871, 916), 'torchrec.distributed.planner.types.Topology', 'Topology', ([], {'world_size': '(2)', 'compute_device': '"""cuda"""'}), "(world_size=2, compute_device='cuda')\n", (879, 916), False, 'from torchrec.distributed.planner.types import Topology\n'), ((942, 983), 'torchrec.distributed.planner.shard_estimators.EmbeddingPerfEstimator', 'EmbeddingPerfEstimator', ([], {'topology': 'topology'}), '(topology=topology)\n', (964, 983), False, 'from torchrec.distributed.planner.shard_estimators import EmbeddingPerfEstimator\n'), ((1010, 1074), 'torchrec.distributed.planner.enumerators.EmbeddingEnumerator', 'EmbeddingEnumerator', ([], {'topology': 'topology', 'estimator': 'self.estimator'}), '(topology=topology, estimator=self.estimator)\n', (1029, 1074), False, 'from torchrec.distributed.planner.enumerators import EmbeddingEnumerator\n'), ((1377, 1424), 'torchrec.distributed.test_utils.test_model.TestSparseNN', 'TestSparseNN', ([], {'tables': 'tables', 'weighted_tables': '[]'}), '(tables=tables, weighted_tables=[])\n', (1389, 1424), False, 'from torchrec.distributed.test_utils.test_model import TestSparseNN\n'), ((1170, 1275), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'num_embeddings': '(100)', 'embedding_dim': '(10)', 'name': '"""table_0"""', 'feature_names': "['feature_0']"}), "(num_embeddings=100, embedding_dim=10, name='table_0',\n feature_names=['feature_0'])\n", (1188, 1275), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((1581, 1612), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {}), '()\n', (1610, 1612), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import unittest
from typing import Dict, Any, List
import torch
import torch.distributed as dist
from torch.autograd import Variable
from torch.distributed._shard import sharded_tensor, sharding_spec
from torchrec.optim.keyed import (
CombinedOptimizer,
KeyedOptimizer,
OptimizerWrapper,
KeyedOptimizerWrapper,
)
from torchrec.test_utils import get_free_port
class TestKeyedOptimizer(unittest.TestCase):
def _assert_state_dict_equals(
self, dict1: Dict[str, Any], dict2: Dict[str, Any]
) -> None:
self.assertEqual(dict1["param_groups"], dict2["param_groups"])
self.assertEqual(
dict1["state"]["param_2"],
dict2["state"]["param_2"],
)
torch.testing.assert_close(
dict1["state"]["param_1"]["tensor"],
dict2["state"]["param_1"]["tensor"],
)
torch.testing.assert_close(
dict1["state"]["param_1"]["sharded_tensor"].local_shards()[0].tensor,
dict2["state"]["param_1"]["sharded_tensor"].local_shards()[0].tensor,
)
def test_load_state_dict(self) -> None:
os.environ["MASTER_ADDR"] = str("localhost")
os.environ["MASTER_PORT"] = str(get_free_port())
dist.init_process_group("gloo", rank=0, world_size=1)
# Set up example KeyedOptimizer.
param_1_t, param_2_t = torch.tensor([1.0, 2.0]), torch.tensor([3.0, 4.0])
param_1, param_2 = Variable(param_1_t), Variable(param_2_t)
keyed_optimizer = KeyedOptimizer(
{"param_1": param_1, "param_2": param_2},
{
param_1: {
"one": 1.0,
"tensor": torch.tensor([5.0, 6.0]),
"sharded_tensor": sharded_tensor.full(
# pyre-ignore [28]
sharding_spec.ChunkShardingSpec(
dim=0, placements=["rank:0/cpu"]
),
(4,),
fill_value=1.0,
),
},
param_2: {"two": 2.0},
},
[
{
"params": [param_1],
"param_group_val_0": 3.0,
"param_group_val_1": 4.0,
},
{
"params": [param_2],
"param_group_val_0": 5.0,
"param_group_val_1": 6.0,
},
],
)
keyed_optimizer.save_param_groups(True)
# Assert state_dict is as expected.
state: Dict[str, Any] = {
"param_1": {
"one": 1.0,
"tensor": torch.tensor([5.0, 6.0]),
"sharded_tensor": sharded_tensor.full(
# pyre-ignore [28]
sharding_spec.ChunkShardingSpec(dim=0, placements=["rank:0/cpu"]),
(4,),
fill_value=1.0,
),
},
"param_2": {"two": 2.0},
}
param_groups: List[Dict[str, Any]] = [
{
"params": ["param_1"],
"param_group_val_0": 3.0,
"param_group_val_1": 4.0,
},
{
"params": ["param_2"],
"param_group_val_0": 5.0,
"param_group_val_1": 6.0,
},
]
expected_state_dict = {
"state": state,
"param_groups": param_groups,
}
self._assert_state_dict_equals(
expected_state_dict, keyed_optimizer.state_dict()
)
# Modify state dict and call load_state_dict.
# pyre-ignore [6]
expected_state_dict["state"]["param_1"]["one"] = 10.0
# pyre-ignore [6]
expected_state_dict["state"]["param_1"]["tensor"] = torch.tensor([50.0, 60.0])
# pyre-ignore [6]
expected_state_dict["state"]["param_1"]["sharded_tensor"] = sharded_tensor.full(
# pyre-ignore [28]
sharding_spec.ChunkShardingSpec(dim=0, placements=["rank:0/cpu"]),
(4,),
fill_value=10.0,
)
# pyre-ignore [6]
expected_state_dict["param_groups"][0]["param_group_val_0"] = 8.0
# pyre-ignore [6]
expected_state_dict["param_groups"][1]["param_group_val_1"] = 9.0
keyed_optimizer.load_state_dict(expected_state_dict)
self._assert_state_dict_equals(
expected_state_dict, keyed_optimizer.state_dict()
)
dist.destroy_process_group()
def test_non_param_state_key(self) -> None:
with self.assertRaisesRegex(ValueError, "All state keys must be params."):
param_1_t = torch.tensor([1.0, 2.0])
param_1 = Variable(param_1_t)
KeyedOptimizer(
{"param_1": param_1},
{param_1: 1.0, "non_param_state_key": 2.0},
[{"params": [param_1], "param_group_val_0": 3.0}],
)
def test_init_state(self) -> None:
dense = torch.nn.Parameter(torch.ones((2, 3), dtype=torch.float))
sparse = torch.nn.Parameter(torch.ones((1, 4), dtype=torch.float))
opt = KeyedOptimizerWrapper(
{"dense": dense, "sparse": sparse},
lambda params: torch.optim.SGD(params, lr=0.1),
)
opt.init_state({"sparse"})
self.assertTrue(dense.grad is not None)
self.assertFalse(dense.grad.is_sparse)
self.assertTrue("momentum_buffer" in opt.state_dict()["state"]["dense"])
self.assertTrue(sparse.grad is not None)
self.assertTrue(sparse.grad.is_sparse)
self.assertTrue("momentum_buffer" in opt.state_dict()["state"]["sparse"])
def test_pickle(self) -> None:
dense = torch.nn.Parameter(torch.ones((2, 3), dtype=torch.float))
sparse = torch.nn.Parameter(torch.ones((1, 4), dtype=torch.float))
opt = KeyedOptimizerWrapper(
{"dense": dense, "sparse": sparse},
lambda params: torch.optim.SGD(params, lr=0.1),
)
opt.init_state({"sparse"})
bytesIO = io.BytesIO()
torch.save(opt, bytesIO)
bytesIO.seek(0)
reload_opt = torch.load(bytesIO)
for k in reload_opt.state_dict():
self.assertEqual(
opt.state_dict()[k],
reload_opt.state_dict()[k],
)
class TestCombinedOptimizer(unittest.TestCase):
def test_pickle(self) -> None:
# Set up example KeyedOptimizer 1.
param_1_t = torch.tensor([1.0, 2.0])
param_1 = Variable(param_1_t)
keyed_optimizer_1 = KeyedOptimizer(
{"param_1": param_1},
{param_1: {"one": 1.0}},
[{"params": [param_1], "param_group_val_0": 2.0}],
)
# Set up example KeyedOptimizer 2.
param_2_t = torch.tensor([-1.0, -2.0])
param_2 = Variable(param_2_t)
keyed_optimizer_2 = KeyedOptimizer(
{"param_2": param_2},
{param_2: {"two": -1.0}},
[{"params": [param_2], "param_group_val_0": -2.0}],
)
combined_optimizer = CombinedOptimizer(
[("ko1", keyed_optimizer_1), ("", keyed_optimizer_2)]
)
bytesIO = io.BytesIO()
torch.save(combined_optimizer, bytesIO)
bytesIO.seek(0)
reload_combined_optimizer = torch.load(bytesIO)
for k in reload_combined_optimizer.state_dict():
self.assertEqual(
combined_optimizer.state_dict()[k],
reload_combined_optimizer.state_dict()[k],
)
def test_load_state_dict(self) -> None:
# Set up example KeyedOptimizer 1.
param_1_t = torch.tensor([1.0, 2.0])
param_1 = Variable(param_1_t)
keyed_optimizer_1 = KeyedOptimizer(
{"param_1": param_1},
{param_1: {"one": 1.0}},
[{"params": [param_1], "param_group_val_0": 2.0}],
)
# Set up example KeyedOptimizer 2.
param_2_t = torch.tensor([-1.0, -2.0])
param_2 = Variable(param_2_t)
keyed_optimizer_2 = KeyedOptimizer(
{"param_2": param_2},
{param_2: {"two": -1.0}},
[{"params": [param_2], "param_group_val_0": -2.0}],
)
combined_optimizer = CombinedOptimizer(
[("ko1", keyed_optimizer_1), ("", keyed_optimizer_2)]
)
combined_optimizer.save_param_groups(True)
combined_optimizer_state_dict = combined_optimizer.state_dict()
combined_optimizer_state_dict["state"]["ko1.param_1"] = {"one": 999}
combined_optimizer_state_dict["state"]["param_2"] = {"two": 998}
combined_optimizer_state_dict["param_groups"][0]["param_group_val_0"] = 997
combined_optimizer_state_dict["param_groups"][1]["param_group_val_0"] = 996
combined_optimizer.load_state_dict(combined_optimizer_state_dict)
# Check that optimizers in the combined optimizer have their state and
# param_groups updated.
self.assertEqual(keyed_optimizer_1.state[param_1], {"one": 999})
self.assertEqual(keyed_optimizer_2.state[param_2], {"two": 998})
# pyre-ignore[16]
self.assertEqual(keyed_optimizer_1.param_groups[0]["param_group_val_0"], 997)
self.assertEqual(keyed_optimizer_2.param_groups[0]["param_group_val_0"], 996)
class TestOptimizerWrapper(unittest.TestCase):
def test_load_state_dict(self) -> None:
param_1_t = torch.tensor([1.0, 2.0])
param_1 = Variable(param_1_t)
keyed_optimizer = KeyedOptimizer(
{"param_1": param_1},
{param_1: {"one": 1.0}},
[{"params": [param_1], "param_group_val_0": 2.0}],
)
optimizer_wrapper = OptimizerWrapper(keyed_optimizer)
optimizer_wrapper.save_param_groups(True)
optimizer_wrapper_state_dict = optimizer_wrapper.state_dict()
optimizer_wrapper_state_dict["state"]["param_1"] = {"one": 999}
optimizer_wrapper_state_dict["param_groups"][0]["param_group_val_0"] = 998
optimizer_wrapper.load_state_dict(optimizer_wrapper_state_dict)
# Check that both keyed_optimizer and optimizer_wrapper have their state and
# param_groups updated.
self.assertEqual(keyed_optimizer.state[param_1], {"one": 999})
self.assertEqual(optimizer_wrapper.state[param_1], {"one": 999})
# pyre-ignore[16]
self.assertEqual(keyed_optimizer.param_groups[0]["param_group_val_0"], 998)
self.assertEqual(optimizer_wrapper.param_groups[0]["param_group_val_0"], 998)
| [
"torchrec.optim.keyed.KeyedOptimizer",
"torchrec.optim.keyed.OptimizerWrapper",
"torchrec.test_utils.get_free_port",
"torchrec.optim.keyed.CombinedOptimizer"
] | [((977, 1082), 'torch.testing.assert_close', 'torch.testing.assert_close', (["dict1['state']['param_1']['tensor']", "dict2['state']['param_1']['tensor']"], {}), "(dict1['state']['param_1']['tensor'], dict2[\n 'state']['param_1']['tensor'])\n", (1003, 1082), False, 'import torch\n'), ((1487, 1540), 'torch.distributed.init_process_group', 'dist.init_process_group', (['"""gloo"""'], {'rank': '(0)', 'world_size': '(1)'}), "('gloo', rank=0, world_size=1)\n", (1510, 1540), True, 'import torch.distributed as dist\n'), ((4120, 4146), 'torch.tensor', 'torch.tensor', (['[50.0, 60.0]'], {}), '([50.0, 60.0])\n', (4132, 4146), False, 'import torch\n'), ((4811, 4839), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (4837, 4839), True, 'import torch.distributed as dist\n'), ((6399, 6411), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (6409, 6411), False, 'import io\n'), ((6420, 6444), 'torch.save', 'torch.save', (['opt', 'bytesIO'], {}), '(opt, bytesIO)\n', (6430, 6444), False, 'import torch\n'), ((6490, 6509), 'torch.load', 'torch.load', (['bytesIO'], {}), '(bytesIO)\n', (6500, 6509), False, 'import torch\n'), ((6826, 6850), 'torch.tensor', 'torch.tensor', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (6838, 6850), False, 'import torch\n'), ((6869, 6888), 'torch.autograd.Variable', 'Variable', (['param_1_t'], {}), '(param_1_t)\n', (6877, 6888), False, 'from torch.autograd import Variable\n'), ((6917, 7034), 'torchrec.optim.keyed.KeyedOptimizer', 'KeyedOptimizer', (["{'param_1': param_1}", "{param_1: {'one': 1.0}}", "[{'params': [param_1], 'param_group_val_0': 2.0}]"], {}), "({'param_1': param_1}, {param_1: {'one': 1.0}}, [{'params': [\n param_1], 'param_group_val_0': 2.0}])\n", (6931, 7034), False, 'from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer, OptimizerWrapper, KeyedOptimizerWrapper\n'), ((7141, 7167), 'torch.tensor', 'torch.tensor', (['[-1.0, -2.0]'], {}), '([-1.0, -2.0])\n', (7153, 7167), False, 'import torch\n'), ((7186, 7205), 'torch.autograd.Variable', 'Variable', (['param_2_t'], {}), '(param_2_t)\n', (7194, 7205), False, 'from torch.autograd import Variable\n'), ((7234, 7352), 'torchrec.optim.keyed.KeyedOptimizer', 'KeyedOptimizer', (["{'param_2': param_2}", "{param_2: {'two': -1.0}}", "[{'params': [param_2], 'param_group_val_0': -2.0}]"], {}), "({'param_2': param_2}, {param_2: {'two': -1.0}}, [{'params':\n [param_2], 'param_group_val_0': -2.0}])\n", (7248, 7352), False, 'from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer, OptimizerWrapper, KeyedOptimizerWrapper\n'), ((7426, 7498), 'torchrec.optim.keyed.CombinedOptimizer', 'CombinedOptimizer', (["[('ko1', keyed_optimizer_1), ('', keyed_optimizer_2)]"], {}), "([('ko1', keyed_optimizer_1), ('', keyed_optimizer_2)])\n", (7443, 7498), False, 'from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer, OptimizerWrapper, KeyedOptimizerWrapper\n'), ((7540, 7552), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (7550, 7552), False, 'import io\n'), ((7561, 7600), 'torch.save', 'torch.save', (['combined_optimizer', 'bytesIO'], {}), '(combined_optimizer, bytesIO)\n', (7571, 7600), False, 'import torch\n'), ((7661, 7680), 'torch.load', 'torch.load', (['bytesIO'], {}), '(bytesIO)\n', (7671, 7680), False, 'import torch\n'), ((8002, 8026), 'torch.tensor', 'torch.tensor', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (8014, 8026), False, 'import torch\n'), ((8045, 8064), 'torch.autograd.Variable', 'Variable', (['param_1_t'], {}), '(param_1_t)\n', (8053, 8064), False, 'from torch.autograd import Variable\n'), ((8093, 8210), 'torchrec.optim.keyed.KeyedOptimizer', 'KeyedOptimizer', (["{'param_1': param_1}", "{param_1: {'one': 1.0}}", "[{'params': [param_1], 'param_group_val_0': 2.0}]"], {}), "({'param_1': param_1}, {param_1: {'one': 1.0}}, [{'params': [\n param_1], 'param_group_val_0': 2.0}])\n", (8107, 8210), False, 'from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer, OptimizerWrapper, KeyedOptimizerWrapper\n'), ((8317, 8343), 'torch.tensor', 'torch.tensor', (['[-1.0, -2.0]'], {}), '([-1.0, -2.0])\n', (8329, 8343), False, 'import torch\n'), ((8362, 8381), 'torch.autograd.Variable', 'Variable', (['param_2_t'], {}), '(param_2_t)\n', (8370, 8381), False, 'from torch.autograd import Variable\n'), ((8410, 8528), 'torchrec.optim.keyed.KeyedOptimizer', 'KeyedOptimizer', (["{'param_2': param_2}", "{param_2: {'two': -1.0}}", "[{'params': [param_2], 'param_group_val_0': -2.0}]"], {}), "({'param_2': param_2}, {param_2: {'two': -1.0}}, [{'params':\n [param_2], 'param_group_val_0': -2.0}])\n", (8424, 8528), False, 'from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer, OptimizerWrapper, KeyedOptimizerWrapper\n'), ((8602, 8674), 'torchrec.optim.keyed.CombinedOptimizer', 'CombinedOptimizer', (["[('ko1', keyed_optimizer_1), ('', keyed_optimizer_2)]"], {}), "([('ko1', keyed_optimizer_1), ('', keyed_optimizer_2)])\n", (8619, 8674), False, 'from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer, OptimizerWrapper, KeyedOptimizerWrapper\n'), ((9783, 9807), 'torch.tensor', 'torch.tensor', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (9795, 9807), False, 'import torch\n'), ((9826, 9845), 'torch.autograd.Variable', 'Variable', (['param_1_t'], {}), '(param_1_t)\n', (9834, 9845), False, 'from torch.autograd import Variable\n'), ((9872, 9989), 'torchrec.optim.keyed.KeyedOptimizer', 'KeyedOptimizer', (["{'param_1': param_1}", "{param_1: {'one': 1.0}}", "[{'params': [param_1], 'param_group_val_0': 2.0}]"], {}), "({'param_1': param_1}, {param_1: {'one': 1.0}}, [{'params': [\n param_1], 'param_group_val_0': 2.0}])\n", (9886, 9989), False, 'from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer, OptimizerWrapper, KeyedOptimizerWrapper\n'), ((10060, 10093), 'torchrec.optim.keyed.OptimizerWrapper', 'OptimizerWrapper', (['keyed_optimizer'], {}), '(keyed_optimizer)\n', (10076, 10093), False, 'from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer, OptimizerWrapper, KeyedOptimizerWrapper\n'), ((1462, 1477), 'torchrec.test_utils.get_free_port', 'get_free_port', ([], {}), '()\n', (1475, 1477), False, 'from torchrec.test_utils import get_free_port\n'), ((1614, 1638), 'torch.tensor', 'torch.tensor', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (1626, 1638), False, 'import torch\n'), ((1640, 1664), 'torch.tensor', 'torch.tensor', (['[3.0, 4.0]'], {}), '([3.0, 4.0])\n', (1652, 1664), False, 'import torch\n'), ((1692, 1711), 'torch.autograd.Variable', 'Variable', (['param_1_t'], {}), '(param_1_t)\n', (1700, 1711), False, 'from torch.autograd import Variable\n'), ((1713, 1732), 'torch.autograd.Variable', 'Variable', (['param_2_t'], {}), '(param_2_t)\n', (1721, 1732), False, 'from torch.autograd import Variable\n'), ((4305, 4370), 'torch.distributed._shard.sharding_spec.ChunkShardingSpec', 'sharding_spec.ChunkShardingSpec', ([], {'dim': '(0)', 'placements': "['rank:0/cpu']"}), "(dim=0, placements=['rank:0/cpu'])\n", (4336, 4370), False, 'from torch.distributed._shard import sharded_tensor, sharding_spec\n'), ((4996, 5020), 'torch.tensor', 'torch.tensor', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (5008, 5020), False, 'import torch\n'), ((5043, 5062), 'torch.autograd.Variable', 'Variable', (['param_1_t'], {}), '(param_1_t)\n', (5051, 5062), False, 'from torch.autograd import Variable\n'), ((5075, 5211), 'torchrec.optim.keyed.KeyedOptimizer', 'KeyedOptimizer', (["{'param_1': param_1}", "{param_1: 1.0, 'non_param_state_key': 2.0}", "[{'params': [param_1], 'param_group_val_0': 3.0}]"], {}), "({'param_1': param_1}, {param_1: 1.0, 'non_param_state_key': \n 2.0}, [{'params': [param_1], 'param_group_val_0': 3.0}])\n", (5089, 5211), False, 'from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer, OptimizerWrapper, KeyedOptimizerWrapper\n'), ((5345, 5382), 'torch.ones', 'torch.ones', (['(2, 3)'], {'dtype': 'torch.float'}), '((2, 3), dtype=torch.float)\n', (5355, 5382), False, 'import torch\n'), ((5420, 5457), 'torch.ones', 'torch.ones', (['(1, 4)'], {'dtype': 'torch.float'}), '((1, 4), dtype=torch.float)\n', (5430, 5457), False, 'import torch\n'), ((6076, 6113), 'torch.ones', 'torch.ones', (['(2, 3)'], {'dtype': 'torch.float'}), '((2, 3), dtype=torch.float)\n', (6086, 6113), False, 'import torch\n'), ((6151, 6188), 'torch.ones', 'torch.ones', (['(1, 4)'], {'dtype': 'torch.float'}), '((1, 4), dtype=torch.float)\n', (6161, 6188), False, 'import torch\n'), ((2956, 2980), 'torch.tensor', 'torch.tensor', (['[5.0, 6.0]'], {}), '([5.0, 6.0])\n', (2968, 2980), False, 'import torch\n'), ((5571, 5602), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'lr': '(0.1)'}), '(params, lr=0.1)\n', (5586, 5602), False, 'import torch\n'), ((6302, 6333), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'lr': '(0.1)'}), '(params, lr=0.1)\n', (6317, 6333), False, 'import torch\n'), ((1932, 1956), 'torch.tensor', 'torch.tensor', (['[5.0, 6.0]'], {}), '([5.0, 6.0])\n', (1944, 1956), False, 'import torch\n'), ((3096, 3161), 'torch.distributed._shard.sharding_spec.ChunkShardingSpec', 'sharding_spec.ChunkShardingSpec', ([], {'dim': '(0)', 'placements': "['rank:0/cpu']"}), "(dim=0, placements=['rank:0/cpu'])\n", (3127, 3161), False, 'from torch.distributed._shard import sharded_tensor, sharding_spec\n'), ((2084, 2149), 'torch.distributed._shard.sharding_spec.ChunkShardingSpec', 'sharding_spec.ChunkShardingSpec', ([], {'dim': '(0)', 'placements': "['rank:0/cpu']"}), "(dim=0, placements=['rank:0/cpu'])\n", (2115, 2149), False, 'from torch.distributed._shard import sharded_tensor, sharding_spec\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
import copy
import os
import tempfile
import unittest
import uuid
from typing import List
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.distributed._shard.sharded_tensor import ShardedTensor
from torch.distributed.launcher.api import elastic_launch, LaunchConfig
from torchrec import EmbeddingBagCollection
from torchrec.datasets.criteo import DEFAULT_INT_NAMES
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.test_utils import skip_if_asan
from torchrecipes.fb.utils.checkpoint import setup_checkpointing
from torchrecipes.rec.accelerators.torchrec import TorchrecStrategy
from torchrecipes.rec.datamodules.random_rec_datamodule import RandomRecDataModule
from torchrecipes.rec.modules.lightning_dlrm import LightningDLRM
def _remove_prefix(origin_string: str, prefix: str) -> str:
if origin_string.startswith(prefix):
return origin_string[len(prefix) :]
else:
return origin_string[:]
class TestLightningDLRM(unittest.TestCase):
@classmethod
def _run_trainer(cls) -> None:
torch.manual_seed(int(os.environ["RANK"]))
num_embeddings = 100
embedding_dim = 12
num_dense = 50
batch_size = 3
eb1_config = EmbeddingBagConfig(
name="t1",
embedding_dim=embedding_dim,
num_embeddings=num_embeddings,
feature_names=["f1", "f3"],
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=embedding_dim,
num_embeddings=num_embeddings,
feature_names=["f2"],
)
eb_configs = [eb1_config, eb2_config]
lit_models = []
datamodules = []
for _ in range(2):
ebc = EmbeddingBagCollection(tables=eb_configs, device=torch.device("meta"))
lit_model = LightningDLRM(
ebc,
batch_size=batch_size,
dense_in_features=num_dense,
dense_arch_layer_sizes=[20, embedding_dim],
over_arch_layer_sizes=[5, 1],
)
datamodule = RandomRecDataModule(manual_seed=564733621, num_dense=num_dense)
lit_models.append(lit_model)
datamodules.append(datamodule)
lit_model1, lit_model2 = lit_models
dm1, dm2 = datamodules
# Load m1 state dicts into m2
lit_model2.model.load_state_dict(lit_model1.model.state_dict())
optim1 = lit_model1.configure_optimizers()
optim2 = lit_model2.configure_optimizers()
optim2.load_state_dict(optim1.state_dict())
# train model 1 using lightning
trainer = pl.Trainer(
max_epochs=1,
limit_train_batches=5,
limit_val_batches=5,
limit_test_batches=5,
strategy=TorchrecStrategy(),
accelerator=("gpu" if torch.cuda.is_available() else "cpu"),
devices=os.environ.get("LOCAL_WORLD_SIZE", 1),
enable_model_summary=False,
logger=False,
enable_checkpointing=False,
)
trainer.fit(lit_model1, datamodule=dm1)
# train model 2 manually
train_dataiterator = iter(dm2.train_dataloader())
for _ in range(5):
batch = next(train_dataiterator).to(lit_model2.device)
optim2.zero_grad()
loss, _ = lit_model2.model(batch)
loss.backward()
optim2.step()
# assert parameters equal
sd1 = lit_model1.model.state_dict()
for name, value in lit_model2.model.state_dict().items():
if isinstance(value, ShardedTensor):
assert torch.equal(
value.local_shards()[0].tensor, sd1[name].local_shards()[0].tensor
)
else:
assert torch.equal(sd1[name], value)
# assert model evaluation equal
test_dataiterator = iter(dm2.test_dataloader())
with torch.no_grad():
for _ in range(10):
batch = next(test_dataiterator).to(lit_model2.device)
_loss_1, (_loss_1_detached, logits_1, _labels_1) = lit_model1.model(
batch
)
_loss_2, (_loss_2_detached, logits_2, _labels_2) = lit_model2.model(
batch
)
assert torch.equal(logits_1, logits_2)
@skip_if_asan
def test_lit_trainer_equivalent_to_non_lit(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
lc = LaunchConfig(
min_nodes=1,
max_nodes=1,
nproc_per_node=2,
run_id=str(uuid.uuid4()),
rdzv_backend="c10d",
rdzv_endpoint=os.path.join(tmpdir, "rdzv"),
rdzv_configs={"store_type": "file"},
start_method="spawn",
monitor_interval=1,
max_restarts=0,
)
elastic_launch(config=lc, entrypoint=self._run_trainer)()
@classmethod
def _assert_model_of_ckpt(
cls,
eb_configs: List[EmbeddingBagConfig],
dense_arch_layer_sizes: str,
over_arch_layer_sizes: str,
checkpoint: ModelCheckpoint,
batch_size: int,
) -> None:
model1 = LightningDLRM(
EmbeddingBagCollection(tables=eb_configs, device=torch.device("meta")),
batch_size=batch_size,
dense_in_features=len(DEFAULT_INT_NAMES),
dense_arch_layer_sizes=list(map(int, dense_arch_layer_sizes.split(","))),
over_arch_layer_sizes=list(map(int, over_arch_layer_sizes.split(","))),
)
model2 = LightningDLRM(
EmbeddingBagCollection(tables=eb_configs, device=torch.device("meta")),
batch_size=batch_size,
dense_in_features=len(DEFAULT_INT_NAMES),
dense_arch_layer_sizes=list(map(int, dense_arch_layer_sizes.split(","))),
over_arch_layer_sizes=list(map(int, over_arch_layer_sizes.split(","))),
)
datamodule_1 = RandomRecDataModule(
batch_size=batch_size, hash_size=64, num_dense=len(DEFAULT_INT_NAMES)
)
datamodule_1.setup()
datamodule_2 = RandomRecDataModule(
batch_size=batch_size, hash_size=64, num_dense=len(DEFAULT_INT_NAMES)
)
datamodule_2.setup()
trainer = pl.Trainer(
logger=False,
max_epochs=3,
callbacks=[checkpoint],
limit_train_batches=5,
limit_val_batches=5,
limit_test_batches=5,
strategy=TorchrecStrategy(),
enable_model_summary=False,
)
trainer.fit(model1, datamodule=datamodule_1)
cb_callback = trainer.checkpoint_callback
assert cb_callback is not None
last_checkpoint_path = cb_callback.best_model_path
# second run
cp_std = torch.load(last_checkpoint_path)["state_dict"]
test_std = {}
for name, value in cp_std.items():
updated_name = _remove_prefix(name, "model.")
test_std[updated_name] = copy.deepcopy(value)
# load state dict from the chopped state_dict
# pyre-fixme[6] Expected `collections.OrderedDict[str, torch.Tensor]` for 1st positional only
model2.model.load_state_dict(test_std)
# assert parameters equal
for w0, w1 in zip(model1.model.parameters(), model2.model.parameters()):
assert w0.eq(w1).all()
# assert state_dict equal
sd1 = model1.model.state_dict()
for name, value in model2.model.state_dict().items():
if isinstance(value, ShardedTensor):
assert torch.equal(
value.local_shards()[0].tensor,
sd1[name].local_shards()[0].tensor,
)
else:
assert torch.equal(sd1[name], value)
@classmethod
def _test_checkpointing(cls) -> None:
batch_size = 32
datamodule = RandomRecDataModule(
batch_size=batch_size, hash_size=64, num_dense=len(DEFAULT_INT_NAMES)
)
keys = datamodule.keys
embedding_dim = 8
eb_configs = [
EmbeddingBagConfig(
name=f"t_{feature_name}",
embedding_dim=embedding_dim,
num_embeddings=64,
feature_names=[feature_name],
)
for feature_idx, feature_name in enumerate(keys)
]
over_arch_layer_sizes = "8,1"
dense_arch_layer_sizes = "8,8"
checkpoint_1 = setup_checkpointing(
model=LightningDLRM(
EmbeddingBagCollection(tables=eb_configs, device=torch.device("meta")),
batch_size=batch_size,
dense_in_features=len(DEFAULT_INT_NAMES),
dense_arch_layer_sizes=list(
map(int, dense_arch_layer_sizes.split(","))
),
over_arch_layer_sizes=list(map(int, over_arch_layer_sizes.split(","))),
),
checkpoint_output_path=tempfile.mkdtemp(),
)
assert checkpoint_1 is not None
with tempfile.TemporaryDirectory() as tmpdir:
checkpoint_2 = ModelCheckpoint(dirpath=tmpdir, save_top_k=1)
cls._assert_model_of_ckpt(
eb_configs,
dense_arch_layer_sizes,
over_arch_layer_sizes,
checkpoint_1,
batch_size,
)
cls._assert_model_of_ckpt(
eb_configs,
dense_arch_layer_sizes,
over_arch_layer_sizes,
checkpoint_2,
batch_size,
)
@skip_if_asan
def test_checkpointing_function(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
lc = LaunchConfig(
min_nodes=1,
max_nodes=1,
nproc_per_node=2,
run_id=str(uuid.uuid4()),
rdzv_backend="c10d",
rdzv_endpoint=os.path.join(tmpdir, "rdzv"),
rdzv_configs={"store_type": "file"},
start_method="spawn",
monitor_interval=1,
max_restarts=0,
)
elastic_launch(config=lc, entrypoint=self._test_checkpointing)()
| [
"torchrec.modules.embedding_configs.EmbeddingBagConfig"
] | [((1494, 1616), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t1"""', 'embedding_dim': 'embedding_dim', 'num_embeddings': 'num_embeddings', 'feature_names': "['f1', 'f3']"}), "(name='t1', embedding_dim=embedding_dim, num_embeddings=\n num_embeddings, feature_names=['f1', 'f3'])\n", (1512, 1616), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((1692, 1808), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'embedding_dim', 'num_embeddings': 'num_embeddings', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=embedding_dim, num_embeddings=\n num_embeddings, feature_names=['f2'])\n", (1710, 1808), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((2099, 2247), 'torchrecipes.rec.modules.lightning_dlrm.LightningDLRM', 'LightningDLRM', (['ebc'], {'batch_size': 'batch_size', 'dense_in_features': 'num_dense', 'dense_arch_layer_sizes': '[20, embedding_dim]', 'over_arch_layer_sizes': '[5, 1]'}), '(ebc, batch_size=batch_size, dense_in_features=num_dense,\n dense_arch_layer_sizes=[20, embedding_dim], over_arch_layer_sizes=[5, 1])\n', (2112, 2247), False, 'from torchrecipes.rec.modules.lightning_dlrm import LightningDLRM\n'), ((2365, 2428), 'torchrecipes.rec.datamodules.random_rec_datamodule.RandomRecDataModule', 'RandomRecDataModule', ([], {'manual_seed': '(564733621)', 'num_dense': 'num_dense'}), '(manual_seed=564733621, num_dense=num_dense)\n', (2384, 2428), False, 'from torchrecipes.rec.datamodules.random_rec_datamodule import RandomRecDataModule\n'), ((4223, 4238), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4236, 4238), False, 'import torch\n'), ((4749, 4778), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (4776, 4778), False, 'import tempfile\n'), ((7209, 7241), 'torch.load', 'torch.load', (['last_checkpoint_path'], {}), '(last_checkpoint_path)\n', (7219, 7241), False, 'import torch\n'), ((7416, 7436), 'copy.deepcopy', 'copy.deepcopy', (['value'], {}), '(value)\n', (7429, 7436), False, 'import copy\n'), ((8522, 8648), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': 'f"""t_{feature_name}"""', 'embedding_dim': 'embedding_dim', 'num_embeddings': '(64)', 'feature_names': '[feature_name]'}), "(name=f't_{feature_name}', embedding_dim=embedding_dim,\n num_embeddings=64, feature_names=[feature_name])\n", (8540, 8648), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((9485, 9514), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (9512, 9514), False, 'import tempfile\n'), ((9553, 9598), 'pytorch_lightning.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'dirpath': 'tmpdir', 'save_top_k': '(1)'}), '(dirpath=tmpdir, save_top_k=1)\n', (9568, 9598), False, 'from pytorch_lightning.callbacks import ModelCheckpoint\n'), ((10120, 10149), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (10147, 10149), False, 'import tempfile\n'), ((3074, 3092), 'torchrecipes.rec.accelerators.torchrec.TorchrecStrategy', 'TorchrecStrategy', ([], {}), '()\n', (3090, 3092), False, 'from torchrecipes.rec.accelerators.torchrec import TorchrecStrategy\n'), ((3187, 3224), 'os.environ.get', 'os.environ.get', (['"""LOCAL_WORLD_SIZE"""', '(1)'], {}), "('LOCAL_WORLD_SIZE', 1)\n", (3201, 3224), False, 'import os\n'), ((4083, 4112), 'torch.equal', 'torch.equal', (['sd1[name]', 'value'], {}), '(sd1[name], value)\n', (4094, 4112), False, 'import torch\n'), ((4623, 4654), 'torch.equal', 'torch.equal', (['logits_1', 'logits_2'], {}), '(logits_1, logits_2)\n', (4634, 4654), False, 'import torch\n'), ((5238, 5293), 'torch.distributed.launcher.api.elastic_launch', 'elastic_launch', ([], {'config': 'lc', 'entrypoint': 'self._run_trainer'}), '(config=lc, entrypoint=self._run_trainer)\n', (5252, 5293), False, 'from torch.distributed.launcher.api import elastic_launch, LaunchConfig\n'), ((6896, 6914), 'torchrecipes.rec.accelerators.torchrec.TorchrecStrategy', 'TorchrecStrategy', ([], {}), '()\n', (6912, 6914), False, 'from torchrecipes.rec.accelerators.torchrec import TorchrecStrategy\n'), ((8180, 8209), 'torch.equal', 'torch.equal', (['sd1[name]', 'value'], {}), '(sd1[name], value)\n', (8191, 8209), False, 'import torch\n'), ((9401, 9419), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (9417, 9419), False, 'import tempfile\n'), ((10609, 10671), 'torch.distributed.launcher.api.elastic_launch', 'elastic_launch', ([], {'config': 'lc', 'entrypoint': 'self._test_checkpointing'}), '(config=lc, entrypoint=self._test_checkpointing)\n', (10623, 10671), False, 'from torch.distributed.launcher.api import elastic_launch, LaunchConfig\n'), ((2053, 2073), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (2065, 2073), False, 'import torch\n'), ((3128, 3153), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3151, 3153), False, 'import torch\n'), ((5022, 5050), 'os.path.join', 'os.path.join', (['tmpdir', '"""rdzv"""'], {}), "(tmpdir, 'rdzv')\n", (5034, 5050), False, 'import os\n'), ((5647, 5667), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (5659, 5667), False, 'import torch\n'), ((6032, 6052), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (6044, 6052), False, 'import torch\n'), ((10393, 10421), 'os.path.join', 'os.path.join', (['tmpdir', '"""rdzv"""'], {}), "(tmpdir, 'rdzv')\n", (10405, 10421), False, 'import os\n'), ((4940, 4952), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4950, 4952), False, 'import uuid\n'), ((10311, 10323), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10321, 10323), False, 'import uuid\n'), ((9015, 9035), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (9027, 9035), False, 'import torch\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
from functools import partial, update_wrapper
from typing import Callable, Dict, List, Type
import torch
import torch.distributed as dist
from torchrec.metrics.ne import compute_cross_entropy, compute_ne, NEMetric
from torchrec.metrics.rec_metric import RecComputeMode, RecMetric
from torchrec.metrics.tests.test_utils import (
rec_metric_value_test_helper,
rec_metric_value_test_launcher,
TestMetric,
)
WORLD_SIZE = 4
class TestNEMetric(TestMetric):
eta: float = 1e-12
@staticmethod
def _get_states(
labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor
) -> Dict[str, torch.Tensor]:
cross_entropy = compute_cross_entropy(
labels, predictions, weights, TestNEMetric.eta
)
cross_entropy_sum = torch.sum(cross_entropy)
weighted_num_samples = torch.sum(weights)
pos_labels = torch.sum(weights * labels)
neg_labels = torch.sum(weights * (1.0 - labels))
return {
"cross_entropy_sum": cross_entropy_sum,
"weighted_num_samples": weighted_num_samples,
"pos_labels": pos_labels,
"neg_labels": neg_labels,
"num_samples": torch.tensor(labels.size()).long(),
}
@staticmethod
def _compute(states: Dict[str, torch.Tensor]) -> torch.Tensor:
return compute_ne(
states["cross_entropy_sum"],
states["weighted_num_samples"],
pos_labels=states["pos_labels"],
neg_labels=states["neg_labels"],
eta=TestNEMetric.eta,
)
class NEMetricTest(unittest.TestCase):
target_clazz: Type[RecMetric] = NEMetric
target_compute_mode: RecComputeMode = RecComputeMode.UNFUSED_TASKS_COMPUTATION
task_name: str = "ne"
@staticmethod
def _test_ne(
target_clazz: Type[RecMetric],
target_compute_mode: RecComputeMode,
task_names: List[str],
fused_update_limit: int = 0,
compute_on_all_ranks: bool = False,
batch_window_size: int = 5,
) -> None:
rank = int(os.environ["RANK"])
world_size = int(os.environ["WORLD_SIZE"])
dist.init_process_group(
backend="gloo",
world_size=world_size,
rank=rank,
)
ne_metrics, test_metrics = rec_metric_value_test_helper(
target_clazz=target_clazz,
target_compute_mode=target_compute_mode,
test_clazz=TestNEMetric,
fused_update_limit=fused_update_limit,
compute_on_all_ranks=False,
world_size=world_size,
my_rank=rank,
task_names=task_names,
batch_window_size=batch_window_size,
)
if rank == 0:
for name in task_names:
assert torch.allclose(
ne_metrics[f"ne-{name}|lifetime_ne"], test_metrics[0][name]
)
assert torch.allclose(
ne_metrics[f"ne-{name}|window_ne"], test_metrics[1][name]
)
assert torch.allclose(
ne_metrics[f"ne-{name}|local_lifetime_ne"], test_metrics[2][name]
)
assert torch.allclose(
ne_metrics[f"ne-{name}|local_window_ne"], test_metrics[3][name]
)
dist.destroy_process_group()
_test_ne_large_window_size: Callable[..., None] = partial(
# pyre-fixme[16]: `Callable` has no attribute `__func__`.
_test_ne.__func__,
batch_window_size=10,
)
update_wrapper(_test_ne_large_window_size, _test_ne.__func__)
def test_ne_unfused(self) -> None:
rec_metric_value_test_launcher(
target_clazz=NEMetric,
target_compute_mode=RecComputeMode.UNFUSED_TASKS_COMPUTATION,
test_clazz=TestNEMetric,
task_names=["t1", "t2", "t3"],
fused_update_limit=0,
compute_on_all_ranks=False,
world_size=WORLD_SIZE,
entry_point=self._test_ne,
)
def test_ne_fused(self) -> None:
rec_metric_value_test_launcher(
target_clazz=NEMetric,
target_compute_mode=RecComputeMode.FUSED_TASKS_COMPUTATION,
test_clazz=TestNEMetric,
task_names=["t1", "t2", "t3"],
fused_update_limit=0,
compute_on_all_ranks=False,
world_size=WORLD_SIZE,
entry_point=self._test_ne,
)
def test_ne_update_fused(self) -> None:
rec_metric_value_test_launcher(
target_clazz=NEMetric,
target_compute_mode=RecComputeMode.UNFUSED_TASKS_COMPUTATION,
test_clazz=TestNEMetric,
task_names=["t1", "t2", "t3"],
fused_update_limit=5,
compute_on_all_ranks=False,
world_size=WORLD_SIZE,
entry_point=self._test_ne,
)
rec_metric_value_test_launcher(
target_clazz=NEMetric,
target_compute_mode=RecComputeMode.UNFUSED_TASKS_COMPUTATION,
test_clazz=TestNEMetric,
task_names=["t1", "t2", "t3"],
fused_update_limit=100,
compute_on_all_ranks=False,
world_size=WORLD_SIZE,
entry_point=self._test_ne_large_window_size,
)
# TODO(stellaya): support the usage of fused_tasks_computation and
# fused_update for the same RecMetric
# rec_metric_value_test_launcher(
# target_clazz=NEMetric,
# target_compute_mode=RecComputeMode.FUSED_TASKS_COMPUTATION,
# test_clazz=TestNEMetric,
# task_names=["t1", "t2", "t3"],
# fused_update_limit=5,
# compute_on_all_ranks=False,
# world_size=WORLD_SIZE,
# entry_point=self._test_ne,
# )
# rec_metric_value_test_launcher(
# target_clazz=NEMetric,
# target_compute_mode=RecComputeMode.FUSED_TASKS_COMPUTATION,
# test_clazz=TestNEMetric,
# task_names=["t1", "t2", "t3"],
# fused_update_limit=100,
# compute_on_all_ranks=False,
# world_size=WORLD_SIZE,
# entry_point=self._test_ne_large_window_size,
# )
| [
"torchrec.metrics.ne.compute_cross_entropy",
"torchrec.metrics.tests.test_utils.rec_metric_value_test_helper",
"torchrec.metrics.ne.compute_ne",
"torchrec.metrics.tests.test_utils.rec_metric_value_test_launcher"
] | [((3681, 3729), 'functools.partial', 'partial', (['_test_ne.__func__'], {'batch_window_size': '(10)'}), '(_test_ne.__func__, batch_window_size=10)\n', (3688, 3729), False, 'from functools import partial, update_wrapper\n'), ((3823, 3884), 'functools.update_wrapper', 'update_wrapper', (['_test_ne_large_window_size', '_test_ne.__func__'], {}), '(_test_ne_large_window_size, _test_ne.__func__)\n', (3837, 3884), False, 'from functools import partial, update_wrapper\n'), ((926, 995), 'torchrec.metrics.ne.compute_cross_entropy', 'compute_cross_entropy', (['labels', 'predictions', 'weights', 'TestNEMetric.eta'], {}), '(labels, predictions, weights, TestNEMetric.eta)\n', (947, 995), False, 'from torchrec.metrics.ne import compute_cross_entropy, compute_ne, NEMetric\n'), ((1046, 1070), 'torch.sum', 'torch.sum', (['cross_entropy'], {}), '(cross_entropy)\n', (1055, 1070), False, 'import torch\n'), ((1102, 1120), 'torch.sum', 'torch.sum', (['weights'], {}), '(weights)\n', (1111, 1120), False, 'import torch\n'), ((1142, 1169), 'torch.sum', 'torch.sum', (['(weights * labels)'], {}), '(weights * labels)\n', (1151, 1169), False, 'import torch\n'), ((1191, 1226), 'torch.sum', 'torch.sum', (['(weights * (1.0 - labels))'], {}), '(weights * (1.0 - labels))\n', (1200, 1226), False, 'import torch\n'), ((1604, 1772), 'torchrec.metrics.ne.compute_ne', 'compute_ne', (["states['cross_entropy_sum']", "states['weighted_num_samples']"], {'pos_labels': "states['pos_labels']", 'neg_labels': "states['neg_labels']", 'eta': 'TestNEMetric.eta'}), "(states['cross_entropy_sum'], states['weighted_num_samples'],\n pos_labels=states['pos_labels'], neg_labels=states['neg_labels'], eta=\n TestNEMetric.eta)\n", (1614, 1772), False, 'from torchrec.metrics.ne import compute_cross_entropy, compute_ne, NEMetric\n'), ((2412, 2485), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""gloo"""', 'world_size': 'world_size', 'rank': 'rank'}), "(backend='gloo', world_size=world_size, rank=rank)\n", (2435, 2485), True, 'import torch.distributed as dist\n'), ((2569, 2868), 'torchrec.metrics.tests.test_utils.rec_metric_value_test_helper', 'rec_metric_value_test_helper', ([], {'target_clazz': 'target_clazz', 'target_compute_mode': 'target_compute_mode', 'test_clazz': 'TestNEMetric', 'fused_update_limit': 'fused_update_limit', 'compute_on_all_ranks': '(False)', 'world_size': 'world_size', 'my_rank': 'rank', 'task_names': 'task_names', 'batch_window_size': 'batch_window_size'}), '(target_clazz=target_clazz, target_compute_mode\n =target_compute_mode, test_clazz=TestNEMetric, fused_update_limit=\n fused_update_limit, compute_on_all_ranks=False, world_size=world_size,\n my_rank=rank, task_names=task_names, batch_window_size=batch_window_size)\n', (2597, 2868), False, 'from torchrec.metrics.tests.test_utils import rec_metric_value_test_helper, rec_metric_value_test_launcher, TestMetric\n'), ((3597, 3625), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (3623, 3625), True, 'import torch.distributed as dist\n'), ((3933, 4222), 'torchrec.metrics.tests.test_utils.rec_metric_value_test_launcher', 'rec_metric_value_test_launcher', ([], {'target_clazz': 'NEMetric', 'target_compute_mode': 'RecComputeMode.UNFUSED_TASKS_COMPUTATION', 'test_clazz': 'TestNEMetric', 'task_names': "['t1', 't2', 't3']", 'fused_update_limit': '(0)', 'compute_on_all_ranks': '(False)', 'world_size': 'WORLD_SIZE', 'entry_point': 'self._test_ne'}), "(target_clazz=NEMetric, target_compute_mode=\n RecComputeMode.UNFUSED_TASKS_COMPUTATION, test_clazz=TestNEMetric,\n task_names=['t1', 't2', 't3'], fused_update_limit=0,\n compute_on_all_ranks=False, world_size=WORLD_SIZE, entry_point=self.\n _test_ne)\n", (3963, 4222), False, 'from torchrec.metrics.tests.test_utils import rec_metric_value_test_helper, rec_metric_value_test_launcher, TestMetric\n'), ((4358, 4645), 'torchrec.metrics.tests.test_utils.rec_metric_value_test_launcher', 'rec_metric_value_test_launcher', ([], {'target_clazz': 'NEMetric', 'target_compute_mode': 'RecComputeMode.FUSED_TASKS_COMPUTATION', 'test_clazz': 'TestNEMetric', 'task_names': "['t1', 't2', 't3']", 'fused_update_limit': '(0)', 'compute_on_all_ranks': '(False)', 'world_size': 'WORLD_SIZE', 'entry_point': 'self._test_ne'}), "(target_clazz=NEMetric, target_compute_mode=\n RecComputeMode.FUSED_TASKS_COMPUTATION, test_clazz=TestNEMetric,\n task_names=['t1', 't2', 't3'], fused_update_limit=0,\n compute_on_all_ranks=False, world_size=WORLD_SIZE, entry_point=self.\n _test_ne)\n", (4388, 4645), False, 'from torchrec.metrics.tests.test_utils import rec_metric_value_test_helper, rec_metric_value_test_launcher, TestMetric\n'), ((4788, 5077), 'torchrec.metrics.tests.test_utils.rec_metric_value_test_launcher', 'rec_metric_value_test_launcher', ([], {'target_clazz': 'NEMetric', 'target_compute_mode': 'RecComputeMode.UNFUSED_TASKS_COMPUTATION', 'test_clazz': 'TestNEMetric', 'task_names': "['t1', 't2', 't3']", 'fused_update_limit': '(5)', 'compute_on_all_ranks': '(False)', 'world_size': 'WORLD_SIZE', 'entry_point': 'self._test_ne'}), "(target_clazz=NEMetric, target_compute_mode=\n RecComputeMode.UNFUSED_TASKS_COMPUTATION, test_clazz=TestNEMetric,\n task_names=['t1', 't2', 't3'], fused_update_limit=5,\n compute_on_all_ranks=False, world_size=WORLD_SIZE, entry_point=self.\n _test_ne)\n", (4818, 5077), False, 'from torchrec.metrics.tests.test_utils import rec_metric_value_test_helper, rec_metric_value_test_launcher, TestMetric\n'), ((5176, 5485), 'torchrec.metrics.tests.test_utils.rec_metric_value_test_launcher', 'rec_metric_value_test_launcher', ([], {'target_clazz': 'NEMetric', 'target_compute_mode': 'RecComputeMode.UNFUSED_TASKS_COMPUTATION', 'test_clazz': 'TestNEMetric', 'task_names': "['t1', 't2', 't3']", 'fused_update_limit': '(100)', 'compute_on_all_ranks': '(False)', 'world_size': 'WORLD_SIZE', 'entry_point': 'self._test_ne_large_window_size'}), "(target_clazz=NEMetric, target_compute_mode=\n RecComputeMode.UNFUSED_TASKS_COMPUTATION, test_clazz=TestNEMetric,\n task_names=['t1', 't2', 't3'], fused_update_limit=100,\n compute_on_all_ranks=False, world_size=WORLD_SIZE, entry_point=self.\n _test_ne_large_window_size)\n", (5206, 5485), False, 'from torchrec.metrics.tests.test_utils import rec_metric_value_test_helper, rec_metric_value_test_launcher, TestMetric\n'), ((3056, 3131), 'torch.allclose', 'torch.allclose', (["ne_metrics[f'ne-{name}|lifetime_ne']", 'test_metrics[0][name]'], {}), "(ne_metrics[f'ne-{name}|lifetime_ne'], test_metrics[0][name])\n", (3070, 3131), False, 'import torch\n'), ((3193, 3266), 'torch.allclose', 'torch.allclose', (["ne_metrics[f'ne-{name}|window_ne']", 'test_metrics[1][name]'], {}), "(ne_metrics[f'ne-{name}|window_ne'], test_metrics[1][name])\n", (3207, 3266), False, 'import torch\n'), ((3328, 3414), 'torch.allclose', 'torch.allclose', (["ne_metrics[f'ne-{name}|local_lifetime_ne']", 'test_metrics[2][name]'], {}), "(ne_metrics[f'ne-{name}|local_lifetime_ne'], test_metrics[2][\n name])\n", (3342, 3414), False, 'import torch\n'), ((3471, 3550), 'torch.allclose', 'torch.allclose', (["ne_metrics[f'ne-{name}|local_window_ne']", 'test_metrics[3][name]'], {}), "(ne_metrics[f'ne-{name}|local_window_ne'], test_metrics[3][name])\n", (3485, 3550), False, 'import torch\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
import unittest
from typing import Any, Iterator, List, Tuple
from unittest.mock import Mock, patch
from torch.utils.data import IterDataPipe
from torchrec.datasets.utils import (
idx_split_train_val,
rand_split_train_val,
ParallelReadConcat,
)
class _DummyDataReader(IterDataPipe):
def __init__(self, num_rows: int, val: str = "") -> None:
self.num_rows = num_rows
self.val = val
def __iter__(self) -> Iterator[Tuple[int, str]]:
for idx in range(self.num_rows):
yield idx, self.val
class TestLimit(unittest.TestCase):
def test(self) -> None:
datapipe = _DummyDataReader(100).limit(10)
self.assertEqual(len(list(datapipe)), 10)
class TestIdxSplitTrainVal(unittest.TestCase):
def test_even_split(self) -> None:
datapipe = _DummyDataReader(int(1000))
train_datapipe, val_datapipe = idx_split_train_val(datapipe, 0.5)
self.assertEqual(len(list(train_datapipe)), 500)
self.assertEqual(len(list(val_datapipe)), 500)
def test_uneven_split(self) -> None:
datapipe = _DummyDataReader(int(100000))
train_datapipe, val_datapipe = idx_split_train_val(datapipe, 0.6)
self.assertEqual(len(list(train_datapipe)), 100000 * 0.6)
self.assertEqual(len(list(val_datapipe)), 100000 * 0.4)
def test_invalid_train_perc(self) -> None:
datapipe = _DummyDataReader(123)
with self.assertRaisesRegex(ValueError, "train_perc"):
train_datapipe, val_datapipe = idx_split_train_val(datapipe, 0.0)
with self.assertRaisesRegex(ValueError, "train_perc"):
train_datapipe, val_datapipe = idx_split_train_val(datapipe, 1.0)
with self.assertRaisesRegex(ValueError, "train_perc"):
train_datapipe, val_datapipe = idx_split_train_val(datapipe, 10.2)
with self.assertRaisesRegex(ValueError, "train_perc"):
train_datapipe, val_datapipe = idx_split_train_val(datapipe, -50.15)
class _FakeRandom(random.Random):
def __init__(self, num_vals: int) -> None:
super().__init__()
self.num_vals = num_vals
self.vals: List[float] = [val / num_vals for val in range(num_vals)]
self.current_idx = 0
def random(self) -> float:
val = self.vals[self.current_idx]
self.current_idx += 1
return val
# pyre-ignore[3]
def getstate(self) -> Tuple[Any, ...]:
return (self.vals, self.current_idx)
# pyre-ignore[2]
def setstate(self, state: Tuple[Any, ...]) -> None:
self.vals, self.current_idx = state
class TestRandSplitTrainVal(unittest.TestCase):
def test_deterministic_split(self) -> None:
num_vals = 1000
datapipe = _DummyDataReader(num_vals)
with patch("random.Random", new=lambda a: _FakeRandom(num_vals)):
train_datapipe, val_datapipe = rand_split_train_val(datapipe, 0.8)
self.assertEqual(len(list(train_datapipe)), num_vals * 0.8)
self.assertEqual(len(list(val_datapipe)), num_vals * 0.2)
self.assertEqual(
len(set(train_datapipe).intersection(set(val_datapipe))), 0
)
def test_rand_split(self) -> None:
datapipe = _DummyDataReader(100000)
train_datapipe, val_datapipe = rand_split_train_val(datapipe, 0.7)
self.assertEqual(len(set(train_datapipe).intersection(set(val_datapipe))), 0)
def test_invalid_train_perc(self) -> None:
datapipe = _DummyDataReader(123)
with self.assertRaisesRegex(ValueError, "train_perc"):
train_datapipe, val_datapipe = rand_split_train_val(datapipe, 0.0)
with self.assertRaisesRegex(ValueError, "train_perc"):
train_datapipe, val_datapipe = rand_split_train_val(datapipe, 1.0)
with self.assertRaisesRegex(ValueError, "train_perc"):
train_datapipe, val_datapipe = rand_split_train_val(datapipe, 10.2)
with self.assertRaisesRegex(ValueError, "train_perc"):
train_datapipe, val_datapipe = rand_split_train_val(datapipe, -50.15)
class TestParallelReadConcat(unittest.TestCase):
def test_worker_assignment(self) -> None:
datapipes = [_DummyDataReader(1000, str(idx)) for idx in range(10)]
all_res = []
num_workers = 4
for idx in range(num_workers):
with patch("torchrec.datasets.utils.get_worker_info") as get_worker_info:
get_worker_info.return_value = Mock(id=idx, num_workers=num_workers)
all_res += list(ParallelReadConcat(*datapipes))
expected_res = []
for dp in datapipes:
expected_res += list(dp)
self.assertEqual(all_res, expected_res)
def test_no_workers(self) -> None:
datapipes = [_DummyDataReader(1000, str(idx)) for idx in range(10)]
with patch("torchrec.datasets.utils.get_worker_info") as get_worker_info:
get_worker_info.return_value = None
dp = ParallelReadConcat(*datapipes)
self.assertEqual(len(list(dp)), 10000)
def test_more_workers_than_dps(self) -> None:
datapipes = [_DummyDataReader(1000, str(idx)) for idx in range(2)]
with patch("torchrec.datasets.utils.get_worker_info") as get_worker_info:
get_worker_info.return_value = Mock(id=2, num_workers=10)
with self.assertRaises(ValueError):
next(iter(ParallelReadConcat(*datapipes)))
| [
"torchrec.datasets.utils.ParallelReadConcat",
"torchrec.datasets.utils.idx_split_train_val",
"torchrec.datasets.utils.rand_split_train_val"
] | [((1130, 1164), 'torchrec.datasets.utils.idx_split_train_val', 'idx_split_train_val', (['datapipe', '(0.5)'], {}), '(datapipe, 0.5)\n', (1149, 1164), False, 'from torchrec.datasets.utils import idx_split_train_val, rand_split_train_val, ParallelReadConcat\n'), ((1407, 1441), 'torchrec.datasets.utils.idx_split_train_val', 'idx_split_train_val', (['datapipe', '(0.6)'], {}), '(datapipe, 0.6)\n', (1426, 1441), False, 'from torchrec.datasets.utils import idx_split_train_val, rand_split_train_val, ParallelReadConcat\n'), ((3539, 3574), 'torchrec.datasets.utils.rand_split_train_val', 'rand_split_train_val', (['datapipe', '(0.7)'], {}), '(datapipe, 0.7)\n', (3559, 3574), False, 'from torchrec.datasets.utils import idx_split_train_val, rand_split_train_val, ParallelReadConcat\n'), ((1767, 1801), 'torchrec.datasets.utils.idx_split_train_val', 'idx_split_train_val', (['datapipe', '(0.0)'], {}), '(datapipe, 0.0)\n', (1786, 1801), False, 'from torchrec.datasets.utils import idx_split_train_val, rand_split_train_val, ParallelReadConcat\n'), ((1908, 1942), 'torchrec.datasets.utils.idx_split_train_val', 'idx_split_train_val', (['datapipe', '(1.0)'], {}), '(datapipe, 1.0)\n', (1927, 1942), False, 'from torchrec.datasets.utils import idx_split_train_val, rand_split_train_val, ParallelReadConcat\n'), ((2049, 2084), 'torchrec.datasets.utils.idx_split_train_val', 'idx_split_train_val', (['datapipe', '(10.2)'], {}), '(datapipe, 10.2)\n', (2068, 2084), False, 'from torchrec.datasets.utils import idx_split_train_val, rand_split_train_val, ParallelReadConcat\n'), ((2191, 2228), 'torchrec.datasets.utils.idx_split_train_val', 'idx_split_train_val', (['datapipe', '(-50.15)'], {}), '(datapipe, -50.15)\n', (2210, 2228), False, 'from torchrec.datasets.utils import idx_split_train_val, rand_split_train_val, ParallelReadConcat\n'), ((3118, 3153), 'torchrec.datasets.utils.rand_split_train_val', 'rand_split_train_val', (['datapipe', '(0.8)'], {}), '(datapipe, 0.8)\n', (3138, 3153), False, 'from torchrec.datasets.utils import idx_split_train_val, rand_split_train_val, ParallelReadConcat\n'), ((3856, 3891), 'torchrec.datasets.utils.rand_split_train_val', 'rand_split_train_val', (['datapipe', '(0.0)'], {}), '(datapipe, 0.0)\n', (3876, 3891), False, 'from torchrec.datasets.utils import idx_split_train_val, rand_split_train_val, ParallelReadConcat\n'), ((3998, 4033), 'torchrec.datasets.utils.rand_split_train_val', 'rand_split_train_val', (['datapipe', '(1.0)'], {}), '(datapipe, 1.0)\n', (4018, 4033), False, 'from torchrec.datasets.utils import idx_split_train_val, rand_split_train_val, ParallelReadConcat\n'), ((4140, 4176), 'torchrec.datasets.utils.rand_split_train_val', 'rand_split_train_val', (['datapipe', '(10.2)'], {}), '(datapipe, 10.2)\n', (4160, 4176), False, 'from torchrec.datasets.utils import idx_split_train_val, rand_split_train_val, ParallelReadConcat\n'), ((4283, 4321), 'torchrec.datasets.utils.rand_split_train_val', 'rand_split_train_val', (['datapipe', '(-50.15)'], {}), '(datapipe, -50.15)\n', (4303, 4321), False, 'from torchrec.datasets.utils import idx_split_train_val, rand_split_train_val, ParallelReadConcat\n'), ((5083, 5131), 'unittest.mock.patch', 'patch', (['"""torchrec.datasets.utils.get_worker_info"""'], {}), "('torchrec.datasets.utils.get_worker_info')\n", (5088, 5131), False, 'from unittest.mock import Mock, patch\n'), ((5217, 5247), 'torchrec.datasets.utils.ParallelReadConcat', 'ParallelReadConcat', (['*datapipes'], {}), '(*datapipes)\n', (5235, 5247), False, 'from torchrec.datasets.utils import idx_split_train_val, rand_split_train_val, ParallelReadConcat\n'), ((5438, 5486), 'unittest.mock.patch', 'patch', (['"""torchrec.datasets.utils.get_worker_info"""'], {}), "('torchrec.datasets.utils.get_worker_info')\n", (5443, 5486), False, 'from unittest.mock import Mock, patch\n'), ((5550, 5576), 'unittest.mock.Mock', 'Mock', ([], {'id': '(2)', 'num_workers': '(10)'}), '(id=2, num_workers=10)\n', (5554, 5576), False, 'from unittest.mock import Mock, patch\n'), ((4596, 4644), 'unittest.mock.patch', 'patch', (['"""torchrec.datasets.utils.get_worker_info"""'], {}), "('torchrec.datasets.utils.get_worker_info')\n", (4601, 4644), False, 'from unittest.mock import Mock, patch\n'), ((4712, 4749), 'unittest.mock.Mock', 'Mock', ([], {'id': 'idx', 'num_workers': 'num_workers'}), '(id=idx, num_workers=num_workers)\n', (4716, 4749), False, 'from unittest.mock import Mock, patch\n'), ((4782, 4812), 'torchrec.datasets.utils.ParallelReadConcat', 'ParallelReadConcat', (['*datapipes'], {}), '(*datapipes)\n', (4800, 4812), False, 'from torchrec.datasets.utils import idx_split_train_val, rand_split_train_val, ParallelReadConcat\n'), ((5651, 5681), 'torchrec.datasets.utils.ParallelReadConcat', 'ParallelReadConcat', (['*datapipes'], {}), '(*datapipes)\n', (5669, 5681), False, 'from torchrec.datasets.utils import idx_split_train_val, rand_split_train_val, ParallelReadConcat\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
from collections import OrderedDict
from typing import List, Tuple, Optional, cast
import hypothesis.strategies as st
import numpy as np
import torch
import torch.distributed as dist
import torch.nn as nn
from hypothesis import Verbosity, given, settings
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.embeddingbag import (
EmbeddingBagCollectionSharder,
EmbeddingBagSharder,
ShardedEmbeddingBagCollection,
)
from torchrec.distributed.model_parallel import (
DistributedModelParallel,
get_default_sharders,
)
from torchrec.distributed.planner import (
EmbeddingShardingPlanner,
ParameterConstraints,
Topology,
)
from torchrec.distributed.test_utils.test_model import (
TestSparseNN,
ModelInput,
)
from torchrec.distributed.test_utils.test_model_parallel import (
ModelParallelTestShared,
SharderType,
create_test_sharder,
)
from torchrec.distributed.types import (
ModuleSharder,
ShardedTensor,
ShardingType,
ShardingEnv,
)
from torchrec.modules.embedding_configs import EmbeddingBagConfig, PoolingType
from torchrec.modules.embedding_modules import EmbeddingBagCollection
from torchrec.test_utils import get_free_port, skip_if_asan_class
@skip_if_asan_class
class ModelParallelTest(ModelParallelTestShared):
@unittest.skipIf(
torch.cuda.device_count() <= 1,
"Not enough GPUs, this test requires at least two GPUs",
)
# pyre-fixme[56]
@given(
sharder_type=st.sampled_from(
[
SharderType.EMBEDDING_BAG.value,
SharderType.EMBEDDING_BAG_COLLECTION.value,
]
),
sharding_type=st.sampled_from(
[
ShardingType.ROW_WISE.value,
]
),
kernel_type=st.sampled_from(
[
EmbeddingComputeKernel.DENSE.value,
EmbeddingComputeKernel.SPARSE.value,
EmbeddingComputeKernel.BATCHED_DENSE.value,
EmbeddingComputeKernel.BATCHED_FUSED.value,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=8, deadline=None)
def test_sharding_nccl_rw(
self,
sharder_type: str,
sharding_type: str,
kernel_type: str,
) -> None:
self._test_sharding(
# pyre-ignore[6]
sharders=[
create_test_sharder(sharder_type, sharding_type, kernel_type),
],
backend="nccl",
)
@unittest.skipIf(
torch.cuda.device_count() <= 1,
"Not enough GPUs, this test requires at least two GPUs",
)
# pyre-fixme[56]
@given(
sharder_type=st.sampled_from(
[
SharderType.EMBEDDING_BAG.value,
SharderType.EMBEDDING_BAG_COLLECTION.value,
]
),
sharding_type=st.sampled_from(
[
ShardingType.DATA_PARALLEL.value,
]
),
kernel_type=st.sampled_from(
[
EmbeddingComputeKernel.DENSE.value,
EmbeddingComputeKernel.BATCHED_DENSE.value,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=4, deadline=None)
def test_sharding_nccl_dp(
self, sharder_type: str, sharding_type: str, kernel_type: str
) -> None:
self._test_sharding(
# pyre-ignore[6]
sharders=[
create_test_sharder(sharder_type, sharding_type, kernel_type),
],
backend="nccl",
)
@unittest.skipIf(
torch.cuda.device_count() <= 1,
"Not enough GPUs, this test requires at least two GPUs",
)
# pyre-fixme[56]
@given(
sharder_type=st.sampled_from(
[
SharderType.EMBEDDING_BAG.value,
SharderType.EMBEDDING_BAG_COLLECTION.value,
]
),
sharding_type=st.sampled_from(
[
ShardingType.COLUMN_WISE.value,
]
),
kernel_type=st.sampled_from(
[
EmbeddingComputeKernel.DENSE.value,
EmbeddingComputeKernel.SPARSE.value,
EmbeddingComputeKernel.BATCHED_DENSE.value,
EmbeddingComputeKernel.BATCHED_FUSED.value,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=8, deadline=None)
def test_sharding_nccl_cw(
self, sharder_type: str, sharding_type: str, kernel_type: str
) -> None:
self._test_sharding(
# pyre-ignore[6]
sharders=[
create_test_sharder(
sharder_type,
sharding_type,
kernel_type,
),
],
backend="nccl",
constraints={
table.name: ParameterConstraints(min_partition=4)
for table in self.tables
},
)
@unittest.skipIf(
torch.cuda.device_count() <= 1,
"Not enough GPUs, this test requires at least two GPUs",
)
# pyre-fixme[56]
@given(
sharder_type=st.sampled_from(
[
# SharderType.EMBEDDING_BAG.value,
SharderType.EMBEDDING_BAG_COLLECTION.value,
]
),
sharding_type=st.sampled_from(
[
ShardingType.TABLE_WISE.value,
]
),
kernel_type=st.sampled_from(
[
EmbeddingComputeKernel.DENSE.value,
EmbeddingComputeKernel.SPARSE.value,
EmbeddingComputeKernel.BATCHED_DENSE.value,
EmbeddingComputeKernel.BATCHED_FUSED.value,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=8, deadline=None)
def test_sharding_nccl_tw(
self, sharder_type: str, sharding_type: str, kernel_type: str
) -> None:
self._test_sharding(
# pyre-ignore[6]
sharders=[
create_test_sharder(sharder_type, sharding_type, kernel_type),
],
backend="nccl",
)
# pyre-fixme[56]
@given(
sharder_type=st.sampled_from(
[
# TODO: enable it with correct semantics, see T104397332
# SharderType.EMBEDDING_BAG.value,
SharderType.EMBEDDING_BAG_COLLECTION.value,
]
),
sharding_type=st.sampled_from(
[
ShardingType.TABLE_WISE.value,
]
),
kernel_type=st.sampled_from(
[
EmbeddingComputeKernel.DENSE.value,
EmbeddingComputeKernel.SPARSE.value,
EmbeddingComputeKernel.BATCHED_DENSE.value,
EmbeddingComputeKernel.BATCHED_FUSED.value,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=8, deadline=None)
def test_sharding_gloo_tw(
self,
sharder_type: str,
sharding_type: str,
kernel_type: str,
) -> None:
self._test_sharding(
# pyre-ignore[6]
sharders=[
create_test_sharder(sharder_type, sharding_type, kernel_type),
],
backend="gloo",
)
# pyre-fixme[56]
@given(
sharder_type=st.sampled_from(
[
# TODO: enable it with correct semantics, see T104397332
# SharderType.EMBEDDING_BAG.value,
SharderType.EMBEDDING_BAG_COLLECTION.value,
]
),
sharding_type=st.sampled_from(
[
ShardingType.COLUMN_WISE.value,
]
),
kernel_type=st.sampled_from(
[
EmbeddingComputeKernel.DENSE.value,
EmbeddingComputeKernel.SPARSE.value,
EmbeddingComputeKernel.BATCHED_DENSE.value,
EmbeddingComputeKernel.BATCHED_FUSED.value,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=8, deadline=None)
def test_sharding_gloo_cw(
self,
sharder_type: str,
sharding_type: str,
kernel_type: str,
) -> None:
world_size = 4
self._test_sharding(
# pyre-ignore[6]
sharders=[
create_test_sharder(
sharder_type,
sharding_type,
kernel_type,
),
],
backend="gloo",
world_size=world_size,
constraints={
table.name: ParameterConstraints(min_partition=4)
for table in self.tables
},
)
# pyre-fixme[56]
@given(
sharder_type=st.sampled_from(
[
SharderType.EMBEDDING_BAG.value,
SharderType.EMBEDDING_BAG_COLLECTION.value,
]
),
sharding_type=st.sampled_from(
[
ShardingType.DATA_PARALLEL.value,
]
),
kernel_type=st.sampled_from(
[
EmbeddingComputeKernel.DENSE.value,
EmbeddingComputeKernel.BATCHED_DENSE.value,
# TODO dp+batch_fused is numerically buggy in cpu
# EmbeddingComputeKernel.SPARSE.value,
# EmbeddingComputeKernel.BATCHED_FUSED.value,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=8, deadline=None)
def test_sharding_gloo_dp(
self, sharder_type: str, sharding_type: str, kernel_type: str
) -> None:
self._test_sharding(
# pyre-ignore[6]
sharders=[
create_test_sharder(sharder_type, sharding_type, kernel_type),
],
backend="gloo",
)
def test_sharding_ebc_as_top_level(self) -> None:
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
os.environ["LOCAL_WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = str("localhost")
os.environ["MASTER_PORT"] = str(get_free_port())
os.environ["NCCL_SOCKET_IFNAME"] = "lo"
if torch.cuda.is_available():
curr_device = torch.device("cuda:0")
torch.cuda.set_device(curr_device)
backend = "nccl"
else:
curr_device = torch.device("cpu")
backend = "gloo"
dist.init_process_group(backend=backend)
embedding_dim = 128
num_embeddings = 256
ebc = EmbeddingBagCollection(
device=torch.device("meta"),
tables=[
EmbeddingBagConfig(
name="large_table",
embedding_dim=embedding_dim,
num_embeddings=num_embeddings,
feature_names=["my_feature"],
pooling=PoolingType.SUM,
),
],
)
model = DistributedModelParallel(ebc, device=curr_device)
self.assertTrue(isinstance(model.module, ShardedEmbeddingBagCollection))
class ModelParallelStateDictTest(unittest.TestCase):
def setUp(self) -> None:
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
os.environ["LOCAL_WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = str("localhost")
os.environ["MASTER_PORT"] = str(get_free_port())
os.environ["NCCL_SOCKET_IFNAME"] = "lo"
if torch.cuda.is_available():
self.device = torch.device("cuda:0")
backend = "nccl"
torch.cuda.set_device(self.device)
else:
self.device = torch.device("cpu")
backend = "gloo"
dist.init_process_group(backend=backend)
num_features = 4
num_weighted_features = 2
self.batch_size = 3
self.num_float_features = 10
self.tables = [
EmbeddingBagConfig(
num_embeddings=(i + 1) * 10,
embedding_dim=(i + 1) * 4,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
)
for i in range(num_features)
]
self.weighted_tables = [
EmbeddingBagConfig(
num_embeddings=(i + 1) * 10,
embedding_dim=(i + 1) * 4,
name="weighted_table_" + str(i),
feature_names=["weighted_feature_" + str(i)],
)
for i in range(num_weighted_features)
]
def tearDown(self) -> None:
dist.destroy_process_group()
del os.environ["NCCL_SOCKET_IFNAME"]
super().tearDown()
def _generate_dmps_and_batch(
self, sharders: Optional[List[ModuleSharder[nn.Module]]] = None
) -> Tuple[List[DistributedModelParallel], ModelInput]:
if sharders is None:
sharders = get_default_sharders()
_, local_batch = ModelInput.generate(
batch_size=self.batch_size,
world_size=1,
num_float_features=self.num_float_features,
tables=self.tables,
weighted_tables=self.weighted_tables,
)
batch = local_batch[0].to(self.device)
# Create two TestSparseNN modules, wrap both in DMP
dmps = []
for _ in range(2):
m = TestSparseNN(
tables=self.tables,
num_float_features=self.num_float_features,
weighted_tables=self.weighted_tables,
dense_device=self.device,
sparse_device=torch.device("meta"),
)
dmp = DistributedModelParallel(
module=m,
init_data_parallel=False,
device=self.device,
sharders=sharders,
)
with torch.no_grad():
dmp(batch)
dmp.init_data_parallel()
dmps.append(dmp)
return (dmps, batch)
def test_parameter_init(self) -> None:
class MyModel(nn.Module):
def __init__(self, device: str, val: float) -> None:
super().__init__()
self.p = nn.Parameter(
torch.empty(3, dtype=torch.float32, device=device)
)
self.val = val
self.reset_parameters()
def reset_parameters(self) -> None:
nn.init.constant_(self.p, self.val)
dist.destroy_process_group()
dist.init_process_group(backend="gloo")
# Check that already allocated parameters are left 'as is'.
cpu_model = MyModel(device="cpu", val=3.2)
sharded_model = DistributedModelParallel(
cpu_model,
)
sharded_param = next(sharded_model.parameters())
np.testing.assert_array_equal(
np.array([3.2, 3.2, 3.2], dtype=np.float32), sharded_param.detach().numpy()
)
# Check that parameters over 'meta' device are allocated and initialized.
meta_model = MyModel(device="meta", val=7.5)
sharded_model = DistributedModelParallel(
meta_model,
)
sharded_param = next(sharded_model.parameters())
np.testing.assert_array_equal(
np.array([7.5, 7.5, 7.5], dtype=np.float32), sharded_param.detach().numpy()
)
def test_meta_device_dmp_state_dict(self) -> None:
env = ShardingEnv.from_process_group(dist.GroupMember.WORLD)
m1 = TestSparseNN(
tables=self.tables,
num_float_features=self.num_float_features,
weighted_tables=self.weighted_tables,
dense_device=self.device,
)
# dmp with real device
dmp1 = DistributedModelParallel(
module=m1,
init_data_parallel=False,
init_parameters=False,
sharders=get_default_sharders(),
device=self.device,
env=env,
plan=EmbeddingShardingPlanner(
topology=Topology(
world_size=env.world_size, compute_device=self.device.type
)
).plan(m1, get_default_sharders()),
)
m2 = TestSparseNN(
tables=self.tables,
num_float_features=self.num_float_features,
weighted_tables=self.weighted_tables,
dense_device=self.device,
)
# dmp with meta device
dmp2 = DistributedModelParallel(
module=m2,
init_data_parallel=False,
init_parameters=False,
sharders=get_default_sharders(),
device=torch.device("meta"),
env=env,
plan=EmbeddingShardingPlanner(
topology=Topology(
world_size=env.world_size, compute_device=self.device.type
)
).plan(m2, get_default_sharders()),
)
sd1 = dmp1.state_dict()
for key, v2 in dmp2.state_dict().items():
v1 = sd1[key]
if isinstance(v2, nn.parameter.UninitializedParameter) and isinstance(
v1, nn.parameter.UninitializedParameter
):
continue
if isinstance(v2, ShardedTensor):
self.assertTrue(isinstance(v1, ShardedTensor))
assert len(v2.local_shards()) == 1
dst = v2.local_shards()[0].tensor
else:
dst = v2
if isinstance(v1, ShardedTensor):
assert len(v1.local_shards()) == 1
src = v1.local_shards()[0].tensor
else:
src = v1
self.assertEqual(src.size(), dst.size())
# pyre-ignore[56]
@given(
sharders=st.sampled_from(
[
[EmbeddingBagCollectionSharder()],
[EmbeddingBagSharder()],
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=2, deadline=None)
def test_load_state_dict(self, sharders: List[ModuleSharder[nn.Module]]) -> None:
models, batch = self._generate_dmps_and_batch(sharders)
m1, m2 = models
# load the second's (m2's) with the first (m1's) state_dict
m2.load_state_dict(
cast("OrderedDict[str, torch.Tensor]", m1.state_dict()), strict=False
)
# validate the models are equivalent
with torch.no_grad():
loss1, pred1 = m1(batch)
loss2, pred2 = m2(batch)
self.assertTrue(torch.equal(loss1, loss2))
self.assertTrue(torch.equal(pred1, pred2))
sd1 = m1.state_dict()
for key, value in m2.state_dict().items():
v2 = sd1[key]
if isinstance(value, ShardedTensor):
assert len(value.local_shards()) == 1
dst = value.local_shards()[0].tensor
else:
dst = value
if isinstance(v2, ShardedTensor):
assert len(v2.local_shards()) == 1
src = v2.local_shards()[0].tensor
else:
src = v2
self.assertTrue(torch.equal(src, dst))
# pyre-ignore[56]
@given(
sharders=st.sampled_from(
[
[EmbeddingBagCollectionSharder()],
[EmbeddingBagSharder()],
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=2, deadline=None)
def test_load_state_dict_prefix(
self, sharders: List[ModuleSharder[nn.Module]]
) -> None:
(m1, m2), batch = self._generate_dmps_and_batch(sharders)
# load the second's (m2's) with the first (m1's) state_dict
m2.load_state_dict(
cast("OrderedDict[str, torch.Tensor]", m1.state_dict(prefix="alpha")),
prefix="alpha",
strict=False,
)
# validate the models are equivalent
sd1 = m1.state_dict()
for key, value in m2.state_dict().items():
v2 = sd1[key]
if isinstance(value, ShardedTensor):
assert len(value.local_shards()) == 1
dst = value.local_shards()[0].tensor
else:
dst = value
if isinstance(v2, ShardedTensor):
assert len(v2.local_shards()) == 1
src = v2.local_shards()[0].tensor
else:
src = v2
self.assertTrue(torch.equal(src, dst))
# pyre-fixme[56]
@given(
sharder_type=st.sampled_from(
[
SharderType.EMBEDDING_BAG_COLLECTION.value,
]
),
sharding_type=st.sampled_from(
[
ShardingType.TABLE_WISE.value,
]
),
kernel_type=st.sampled_from(
[
EmbeddingComputeKernel.DENSE.value,
EmbeddingComputeKernel.SPARSE.value,
# EmbeddingComputeKernel.BATCHED_DENSE.value,
EmbeddingComputeKernel.BATCHED_FUSED.value,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_params_and_buffers(
self, sharder_type: str, sharding_type: str, kernel_type: str
) -> None:
sharders = [
create_test_sharder(sharder_type, sharding_type, kernel_type),
]
# pyre-ignore[6]
(m, _), batch = self._generate_dmps_and_batch(sharders=sharders)
print(f"Sharding Plan: {m._plan}")
state_dict_keys = set(m.state_dict().keys())
param_keys = {key for (key, _) in m.named_parameters()}
buffer_keys = {key for (key, _) in m.named_buffers()}
self.assertEqual(state_dict_keys, {*param_keys, *buffer_keys})
| [
"torchrec.distributed.test_utils.test_model.TestSparseNN",
"torchrec.test_utils.get_free_port",
"torchrec.distributed.test_utils.test_model.ModelInput.generate",
"torchrec.distributed.model_parallel.get_default_sharders",
"torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder",
"torchrec.modules... | [((2376, 2444), 'hypothesis.settings', 'settings', ([], {'verbosity': 'Verbosity.verbose', 'max_examples': '(8)', 'deadline': 'None'}), '(verbosity=Verbosity.verbose, max_examples=8, deadline=None)\n', (2384, 2444), False, 'from hypothesis import Verbosity, given, settings\n'), ((3479, 3547), 'hypothesis.settings', 'settings', ([], {'verbosity': 'Verbosity.verbose', 'max_examples': '(4)', 'deadline': 'None'}), '(verbosity=Verbosity.verbose, max_examples=4, deadline=None)\n', (3487, 3547), False, 'from hypothesis import Verbosity, given, settings\n'), ((4668, 4736), 'hypothesis.settings', 'settings', ([], {'verbosity': 'Verbosity.verbose', 'max_examples': '(8)', 'deadline': 'None'}), '(verbosity=Verbosity.verbose, max_examples=8, deadline=None)\n', (4676, 4736), False, 'from hypothesis import Verbosity, given, settings\n'), ((6085, 6153), 'hypothesis.settings', 'settings', ([], {'verbosity': 'Verbosity.verbose', 'max_examples': '(8)', 'deadline': 'None'}), '(verbosity=Verbosity.verbose, max_examples=8, deadline=None)\n', (6093, 6153), False, 'from hypothesis import Verbosity, given, settings\n'), ((7215, 7283), 'hypothesis.settings', 'settings', ([], {'verbosity': 'Verbosity.verbose', 'max_examples': '(8)', 'deadline': 'None'}), '(verbosity=Verbosity.verbose, max_examples=8, deadline=None)\n', (7223, 7283), False, 'from hypothesis import Verbosity, given, settings\n'), ((8371, 8439), 'hypothesis.settings', 'settings', ([], {'verbosity': 'Verbosity.verbose', 'max_examples': '(8)', 'deadline': 'None'}), '(verbosity=Verbosity.verbose, max_examples=8, deadline=None)\n', (8379, 8439), False, 'from hypothesis import Verbosity, given, settings\n'), ((9809, 9877), 'hypothesis.settings', 'settings', ([], {'verbosity': 'Verbosity.verbose', 'max_examples': '(8)', 'deadline': 'None'}), '(verbosity=Verbosity.verbose, max_examples=8, deadline=None)\n', (9817, 9877), False, 'from hypothesis import Verbosity, given, settings\n'), ((18261, 18329), 'hypothesis.settings', 'settings', ([], {'verbosity': 'Verbosity.verbose', 'max_examples': '(2)', 'deadline': 'None'}), '(verbosity=Verbosity.verbose, max_examples=2, deadline=None)\n', (18269, 18329), False, 'from hypothesis import Verbosity, given, settings\n'), ((19713, 19781), 'hypothesis.settings', 'settings', ([], {'verbosity': 'Verbosity.verbose', 'max_examples': '(2)', 'deadline': 'None'}), '(verbosity=Verbosity.verbose, max_examples=2, deadline=None)\n', (19721, 19781), False, 'from hypothesis import Verbosity, given, settings\n'), ((21405, 21474), 'hypothesis.settings', 'settings', ([], {'verbosity': 'Verbosity.verbose', 'max_examples': '(10)', 'deadline': 'None'}), '(verbosity=Verbosity.verbose, max_examples=10, deadline=None)\n', (21413, 21474), False, 'from hypothesis import Verbosity, given, settings\n'), ((10549, 10574), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10572, 10574), False, 'import torch\n'), ((10798, 10838), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': 'backend'}), '(backend=backend)\n', (10821, 10838), True, 'import torch.distributed as dist\n'), ((11329, 11378), 'torchrec.distributed.model_parallel.DistributedModelParallel', 'DistributedModelParallel', (['ebc'], {'device': 'curr_device'}), '(ebc, device=curr_device)\n', (11353, 11378), False, 'from torchrec.distributed.model_parallel import DistributedModelParallel, get_default_sharders\n'), ((11831, 11856), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11854, 11856), False, 'import torch\n'), ((12080, 12120), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': 'backend'}), '(backend=backend)\n', (12103, 12120), True, 'import torch.distributed as dist\n'), ((12928, 12956), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (12954, 12956), True, 'import torch.distributed as dist\n'), ((13297, 13468), 'torchrec.distributed.test_utils.test_model.ModelInput.generate', 'ModelInput.generate', ([], {'batch_size': 'self.batch_size', 'world_size': '(1)', 'num_float_features': 'self.num_float_features', 'tables': 'self.tables', 'weighted_tables': 'self.weighted_tables'}), '(batch_size=self.batch_size, world_size=1,\n num_float_features=self.num_float_features, tables=self.tables,\n weighted_tables=self.weighted_tables)\n', (13316, 13468), False, 'from torchrec.distributed.test_utils.test_model import TestSparseNN, ModelInput\n'), ((14818, 14846), 'torch.distributed.destroy_process_group', 'dist.destroy_process_group', ([], {}), '()\n', (14844, 14846), True, 'import torch.distributed as dist\n'), ((14855, 14894), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""gloo"""'}), "(backend='gloo')\n", (14878, 14894), True, 'import torch.distributed as dist\n'), ((15039, 15074), 'torchrec.distributed.model_parallel.DistributedModelParallel', 'DistributedModelParallel', (['cpu_model'], {}), '(cpu_model)\n', (15063, 15074), False, 'from torchrec.distributed.model_parallel import DistributedModelParallel, get_default_sharders\n'), ((15452, 15488), 'torchrec.distributed.model_parallel.DistributedModelParallel', 'DistributedModelParallel', (['meta_model'], {}), '(meta_model)\n', (15476, 15488), False, 'from torchrec.distributed.model_parallel import DistributedModelParallel, get_default_sharders\n'), ((15776, 15830), 'torchrec.distributed.types.ShardingEnv.from_process_group', 'ShardingEnv.from_process_group', (['dist.GroupMember.WORLD'], {}), '(dist.GroupMember.WORLD)\n', (15806, 15830), False, 'from torchrec.distributed.types import ModuleSharder, ShardedTensor, ShardingType, ShardingEnv\n'), ((15845, 15989), 'torchrec.distributed.test_utils.test_model.TestSparseNN', 'TestSparseNN', ([], {'tables': 'self.tables', 'num_float_features': 'self.num_float_features', 'weighted_tables': 'self.weighted_tables', 'dense_device': 'self.device'}), '(tables=self.tables, num_float_features=self.num_float_features,\n weighted_tables=self.weighted_tables, dense_device=self.device)\n', (15857, 15989), False, 'from torchrec.distributed.test_utils.test_model import TestSparseNN, ModelInput\n'), ((16558, 16702), 'torchrec.distributed.test_utils.test_model.TestSparseNN', 'TestSparseNN', ([], {'tables': 'self.tables', 'num_float_features': 'self.num_float_features', 'weighted_tables': 'self.weighted_tables', 'dense_device': 'self.device'}), '(tables=self.tables, num_float_features=self.num_float_features,\n weighted_tables=self.weighted_tables, dense_device=self.device)\n', (16570, 16702), False, 'from torchrec.distributed.test_utils.test_model import TestSparseNN, ModelInput\n'), ((1619, 1644), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1642, 1644), False, 'import torch\n'), ((1776, 1875), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[SharderType.EMBEDDING_BAG.value, SharderType.EMBEDDING_BAG_COLLECTION.value]'], {}), '([SharderType.EMBEDDING_BAG.value, SharderType.\n EMBEDDING_BAG_COLLECTION.value])\n', (1791, 1875), True, 'import hypothesis.strategies as st\n'), ((1963, 2009), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[ShardingType.ROW_WISE.value]'], {}), '([ShardingType.ROW_WISE.value])\n', (1978, 2009), True, 'import hypothesis.strategies as st\n'), ((2084, 2271), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel.SPARSE.value,\n EmbeddingComputeKernel.BATCHED_DENSE.value, EmbeddingComputeKernel.\n BATCHED_FUSED.value]'], {}), '([EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel\n .SPARSE.value, EmbeddingComputeKernel.BATCHED_DENSE.value,\n EmbeddingComputeKernel.BATCHED_FUSED.value])\n', (2099, 2271), True, 'import hypothesis.strategies as st\n'), ((2830, 2855), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2853, 2855), False, 'import torch\n'), ((2987, 3086), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[SharderType.EMBEDDING_BAG.value, SharderType.EMBEDDING_BAG_COLLECTION.value]'], {}), '([SharderType.EMBEDDING_BAG.value, SharderType.\n EMBEDDING_BAG_COLLECTION.value])\n', (3002, 3086), True, 'import hypothesis.strategies as st\n'), ((3174, 3225), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[ShardingType.DATA_PARALLEL.value]'], {}), '([ShardingType.DATA_PARALLEL.value])\n', (3189, 3225), True, 'import hypothesis.strategies as st\n'), ((3300, 3402), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel.BATCHED_DENSE.value\n ]'], {}), '([EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel\n .BATCHED_DENSE.value])\n', (3315, 3402), True, 'import hypothesis.strategies as st\n'), ((3908, 3933), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3931, 3933), False, 'import torch\n'), ((4065, 4164), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[SharderType.EMBEDDING_BAG.value, SharderType.EMBEDDING_BAG_COLLECTION.value]'], {}), '([SharderType.EMBEDDING_BAG.value, SharderType.\n EMBEDDING_BAG_COLLECTION.value])\n', (4080, 4164), True, 'import hypothesis.strategies as st\n'), ((4252, 4301), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[ShardingType.COLUMN_WISE.value]'], {}), '([ShardingType.COLUMN_WISE.value])\n', (4267, 4301), True, 'import hypothesis.strategies as st\n'), ((4376, 4563), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel.SPARSE.value,\n EmbeddingComputeKernel.BATCHED_DENSE.value, EmbeddingComputeKernel.\n BATCHED_FUSED.value]'], {}), '([EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel\n .SPARSE.value, EmbeddingComputeKernel.BATCHED_DENSE.value,\n EmbeddingComputeKernel.BATCHED_FUSED.value])\n', (4391, 4563), True, 'import hypothesis.strategies as st\n'), ((5324, 5349), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (5347, 5349), False, 'import torch\n'), ((5481, 5542), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[SharderType.EMBEDDING_BAG_COLLECTION.value]'], {}), '([SharderType.EMBEDDING_BAG_COLLECTION.value])\n', (5496, 5542), True, 'import hypothesis.strategies as st\n'), ((5670, 5718), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[ShardingType.TABLE_WISE.value]'], {}), '([ShardingType.TABLE_WISE.value])\n', (5685, 5718), True, 'import hypothesis.strategies as st\n'), ((5793, 5980), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel.SPARSE.value,\n EmbeddingComputeKernel.BATCHED_DENSE.value, EmbeddingComputeKernel.\n BATCHED_FUSED.value]'], {}), '([EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel\n .SPARSE.value, EmbeddingComputeKernel.BATCHED_DENSE.value,\n EmbeddingComputeKernel.BATCHED_FUSED.value])\n', (5808, 5980), True, 'import hypothesis.strategies as st\n'), ((6538, 6599), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[SharderType.EMBEDDING_BAG_COLLECTION.value]'], {}), '([SharderType.EMBEDDING_BAG_COLLECTION.value])\n', (6553, 6599), True, 'import hypothesis.strategies as st\n'), ((6800, 6848), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[ShardingType.TABLE_WISE.value]'], {}), '([ShardingType.TABLE_WISE.value])\n', (6815, 6848), True, 'import hypothesis.strategies as st\n'), ((6923, 7110), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel.SPARSE.value,\n EmbeddingComputeKernel.BATCHED_DENSE.value, EmbeddingComputeKernel.\n BATCHED_FUSED.value]'], {}), '([EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel\n .SPARSE.value, EmbeddingComputeKernel.BATCHED_DENSE.value,\n EmbeddingComputeKernel.BATCHED_FUSED.value])\n', (6938, 7110), True, 'import hypothesis.strategies as st\n'), ((7693, 7754), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[SharderType.EMBEDDING_BAG_COLLECTION.value]'], {}), '([SharderType.EMBEDDING_BAG_COLLECTION.value])\n', (7708, 7754), True, 'import hypothesis.strategies as st\n'), ((7955, 8004), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[ShardingType.COLUMN_WISE.value]'], {}), '([ShardingType.COLUMN_WISE.value])\n', (7970, 8004), True, 'import hypothesis.strategies as st\n'), ((8079, 8266), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel.SPARSE.value,\n EmbeddingComputeKernel.BATCHED_DENSE.value, EmbeddingComputeKernel.\n BATCHED_FUSED.value]'], {}), '([EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel\n .SPARSE.value, EmbeddingComputeKernel.BATCHED_DENSE.value,\n EmbeddingComputeKernel.BATCHED_FUSED.value])\n', (8094, 8266), True, 'import hypothesis.strategies as st\n'), ((9134, 9233), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[SharderType.EMBEDDING_BAG.value, SharderType.EMBEDDING_BAG_COLLECTION.value]'], {}), '([SharderType.EMBEDDING_BAG.value, SharderType.\n EMBEDDING_BAG_COLLECTION.value])\n', (9149, 9233), True, 'import hypothesis.strategies as st\n'), ((9321, 9372), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[ShardingType.DATA_PARALLEL.value]'], {}), '([ShardingType.DATA_PARALLEL.value])\n', (9336, 9372), True, 'import hypothesis.strategies as st\n'), ((9447, 9549), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel.BATCHED_DENSE.value\n ]'], {}), '([EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel\n .BATCHED_DENSE.value])\n', (9462, 9549), True, 'import hypothesis.strategies as st\n'), ((10472, 10487), 'torchrec.test_utils.get_free_port', 'get_free_port', ([], {}), '()\n', (10485, 10487), False, 'from torchrec.test_utils import get_free_port, skip_if_asan_class\n'), ((10602, 10624), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (10614, 10624), False, 'import torch\n'), ((10637, 10671), 'torch.cuda.set_device', 'torch.cuda.set_device', (['curr_device'], {}), '(curr_device)\n', (10658, 10671), False, 'import torch\n'), ((10741, 10760), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (10753, 10760), False, 'import torch\n'), ((11755, 11770), 'torchrec.test_utils.get_free_port', 'get_free_port', ([], {}), '()\n', (11768, 11770), False, 'from torchrec.test_utils import get_free_port, skip_if_asan_class\n'), ((11884, 11906), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (11896, 11906), False, 'import torch\n'), ((11948, 11982), 'torch.cuda.set_device', 'torch.cuda.set_device', (['self.device'], {}), '(self.device)\n', (11969, 11982), False, 'import torch\n'), ((12023, 12042), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (12035, 12042), False, 'import torch\n'), ((13248, 13270), 'torchrec.distributed.model_parallel.get_default_sharders', 'get_default_sharders', ([], {}), '()\n', (13268, 13270), False, 'from torchrec.distributed.model_parallel import DistributedModelParallel, get_default_sharders\n'), ((13991, 14095), 'torchrec.distributed.model_parallel.DistributedModelParallel', 'DistributedModelParallel', ([], {'module': 'm', 'init_data_parallel': '(False)', 'device': 'self.device', 'sharders': 'sharders'}), '(module=m, init_data_parallel=False, device=self.\n device, sharders=sharders)\n', (14015, 14095), False, 'from torchrec.distributed.model_parallel import DistributedModelParallel, get_default_sharders\n'), ((15206, 15249), 'numpy.array', 'np.array', (['[3.2, 3.2, 3.2]'], {'dtype': 'np.float32'}), '([3.2, 3.2, 3.2], dtype=np.float32)\n', (15214, 15249), True, 'import numpy as np\n'), ((15620, 15663), 'numpy.array', 'np.array', (['[7.5, 7.5, 7.5]'], {'dtype': 'np.float32'}), '([7.5, 7.5, 7.5], dtype=np.float32)\n', (15628, 15663), True, 'import numpy as np\n'), ((18751, 18766), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18764, 18766), False, 'import torch\n'), ((21626, 21687), 'torchrec.distributed.test_utils.test_model_parallel.create_test_sharder', 'create_test_sharder', (['sharder_type', 'sharding_type', 'kernel_type'], {}), '(sharder_type, sharding_type, kernel_type)\n', (21645, 21687), False, 'from torchrec.distributed.test_utils.test_model_parallel import ModelParallelTestShared, SharderType, create_test_sharder\n'), ((20850, 20911), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[SharderType.EMBEDDING_BAG_COLLECTION.value]'], {}), '([SharderType.EMBEDDING_BAG_COLLECTION.value])\n', (20865, 20911), True, 'import hypothesis.strategies as st\n'), ((20988, 21036), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[ShardingType.TABLE_WISE.value]'], {}), '([ShardingType.TABLE_WISE.value])\n', (21003, 21036), True, 'import hypothesis.strategies as st\n'), ((21111, 21250), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel.SPARSE.value,\n EmbeddingComputeKernel.BATCHED_FUSED.value]'], {}), '([EmbeddingComputeKernel.DENSE.value, EmbeddingComputeKernel\n .SPARSE.value, EmbeddingComputeKernel.BATCHED_FUSED.value])\n', (21126, 21250), True, 'import hypothesis.strategies as st\n'), ((10954, 10974), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (10966, 10974), False, 'import torch\n'), ((14188, 14203), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14201, 14203), False, 'import torch\n'), ((14773, 14808), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.p', 'self.val'], {}), '(self.p, self.val)\n', (14790, 14808), True, 'import torch.nn as nn\n'), ((16234, 16256), 'torchrec.distributed.model_parallel.get_default_sharders', 'get_default_sharders', ([], {}), '()\n', (16254, 16256), False, 'from torchrec.distributed.model_parallel import DistributedModelParallel, get_default_sharders\n'), ((16947, 16969), 'torchrec.distributed.model_parallel.get_default_sharders', 'get_default_sharders', ([], {}), '()\n', (16967, 16969), False, 'from torchrec.distributed.model_parallel import DistributedModelParallel, get_default_sharders\n'), ((16990, 17010), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (17002, 17010), False, 'import torch\n'), ((18870, 18895), 'torch.equal', 'torch.equal', (['loss1', 'loss2'], {}), '(loss1, loss2)\n', (18881, 18895), False, 'import torch\n'), ((18925, 18950), 'torch.equal', 'torch.equal', (['pred1', 'pred2'], {}), '(pred1, pred2)\n', (18936, 18950), False, 'import torch\n'), ((19479, 19500), 'torch.equal', 'torch.equal', (['src', 'dst'], {}), '(src, dst)\n', (19490, 19500), False, 'import torch\n'), ((20772, 20793), 'torch.equal', 'torch.equal', (['src', 'dst'], {}), '(src, dst)\n', (20783, 20793), False, 'import torch\n'), ((2683, 2744), 'torchrec.distributed.test_utils.test_model_parallel.create_test_sharder', 'create_test_sharder', (['sharder_type', 'sharding_type', 'kernel_type'], {}), '(sharder_type, sharding_type, kernel_type)\n', (2702, 2744), False, 'from torchrec.distributed.test_utils.test_model_parallel import ModelParallelTestShared, SharderType, create_test_sharder\n'), ((3761, 3822), 'torchrec.distributed.test_utils.test_model_parallel.create_test_sharder', 'create_test_sharder', (['sharder_type', 'sharding_type', 'kernel_type'], {}), '(sharder_type, sharding_type, kernel_type)\n', (3780, 3822), False, 'from torchrec.distributed.test_utils.test_model_parallel import ModelParallelTestShared, SharderType, create_test_sharder\n'), ((4950, 5011), 'torchrec.distributed.test_utils.test_model_parallel.create_test_sharder', 'create_test_sharder', (['sharder_type', 'sharding_type', 'kernel_type'], {}), '(sharder_type, sharding_type, kernel_type)\n', (4969, 5011), False, 'from torchrec.distributed.test_utils.test_model_parallel import ModelParallelTestShared, SharderType, create_test_sharder\n'), ((5189, 5226), 'torchrec.distributed.planner.ParameterConstraints', 'ParameterConstraints', ([], {'min_partition': '(4)'}), '(min_partition=4)\n', (5209, 5226), False, 'from torchrec.distributed.planner import EmbeddingShardingPlanner, ParameterConstraints, Topology\n'), ((6367, 6428), 'torchrec.distributed.test_utils.test_model_parallel.create_test_sharder', 'create_test_sharder', (['sharder_type', 'sharding_type', 'kernel_type'], {}), '(sharder_type, sharding_type, kernel_type)\n', (6386, 6428), False, 'from torchrec.distributed.test_utils.test_model_parallel import ModelParallelTestShared, SharderType, create_test_sharder\n'), ((7522, 7583), 'torchrec.distributed.test_utils.test_model_parallel.create_test_sharder', 'create_test_sharder', (['sharder_type', 'sharding_type', 'kernel_type'], {}), '(sharder_type, sharding_type, kernel_type)\n', (7541, 7583), False, 'from torchrec.distributed.test_utils.test_model_parallel import ModelParallelTestShared, SharderType, create_test_sharder\n'), ((8701, 8762), 'torchrec.distributed.test_utils.test_model_parallel.create_test_sharder', 'create_test_sharder', (['sharder_type', 'sharding_type', 'kernel_type'], {}), '(sharder_type, sharding_type, kernel_type)\n', (8720, 8762), False, 'from torchrec.distributed.test_utils.test_model_parallel import ModelParallelTestShared, SharderType, create_test_sharder\n'), ((8975, 9012), 'torchrec.distributed.planner.ParameterConstraints', 'ParameterConstraints', ([], {'min_partition': '(4)'}), '(min_partition=4)\n', (8995, 9012), False, 'from torchrec.distributed.planner import EmbeddingShardingPlanner, ParameterConstraints, Topology\n'), ((10091, 10152), 'torchrec.distributed.test_utils.test_model_parallel.create_test_sharder', 'create_test_sharder', (['sharder_type', 'sharding_type', 'kernel_type'], {}), '(sharder_type, sharding_type, kernel_type)\n', (10110, 10152), False, 'from torchrec.distributed.test_utils.test_model_parallel import ModelParallelTestShared, SharderType, create_test_sharder\n'), ((11013, 11175), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""large_table"""', 'embedding_dim': 'embedding_dim', 'num_embeddings': 'num_embeddings', 'feature_names': "['my_feature']", 'pooling': 'PoolingType.SUM'}), "(name='large_table', embedding_dim=embedding_dim,\n num_embeddings=num_embeddings, feature_names=['my_feature'], pooling=\n PoolingType.SUM)\n", (11031, 11175), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig, PoolingType\n'), ((13937, 13957), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (13949, 13957), False, 'import torch\n'), ((14568, 14618), 'torch.empty', 'torch.empty', (['(3)'], {'dtype': 'torch.float32', 'device': 'device'}), '(3, dtype=torch.float32, device=device)\n', (14579, 14618), False, 'import torch\n'), ((16509, 16531), 'torchrec.distributed.model_parallel.get_default_sharders', 'get_default_sharders', ([], {}), '()\n', (16529, 16531), False, 'from torchrec.distributed.model_parallel import DistributedModelParallel, get_default_sharders\n'), ((17231, 17253), 'torchrec.distributed.model_parallel.get_default_sharders', 'get_default_sharders', ([], {}), '()\n', (17251, 17253), False, 'from torchrec.distributed.model_parallel import DistributedModelParallel, get_default_sharders\n'), ((18150, 18181), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {}), '()\n', (18179, 18181), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder, EmbeddingBagSharder, ShardedEmbeddingBagCollection\n'), ((18201, 18222), 'torchrec.distributed.embeddingbag.EmbeddingBagSharder', 'EmbeddingBagSharder', ([], {}), '()\n', (18220, 18222), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder, EmbeddingBagSharder, ShardedEmbeddingBagCollection\n'), ((19602, 19633), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {}), '()\n', (19631, 19633), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder, EmbeddingBagSharder, ShardedEmbeddingBagCollection\n'), ((19653, 19674), 'torchrec.distributed.embeddingbag.EmbeddingBagSharder', 'EmbeddingBagSharder', ([], {}), '()\n', (19672, 19674), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder, EmbeddingBagSharder, ShardedEmbeddingBagCollection\n'), ((16379, 16447), 'torchrec.distributed.planner.Topology', 'Topology', ([], {'world_size': 'env.world_size', 'compute_device': 'self.device.type'}), '(world_size=env.world_size, compute_device=self.device.type)\n', (16387, 16447), False, 'from torchrec.distributed.planner import EmbeddingShardingPlanner, ParameterConstraints, Topology\n'), ((17101, 17169), 'torchrec.distributed.planner.Topology', 'Topology', ([], {'world_size': 'env.world_size', 'compute_device': 'self.device.type'}), '(world_size=env.world_size, compute_device=self.device.type)\n', (17109, 17169), False, 'from torchrec.distributed.planner import EmbeddingShardingPlanner, ParameterConstraints, Topology\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import cast, List
from unittest.mock import MagicMock
import torch
from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder
from torchrec.distributed.planner.enumerators import EmbeddingEnumerator
from torchrec.distributed.planner.proposers import (
GreedyProposer,
GridSearchProposer,
UniformProposer,
)
from torchrec.distributed.planner.types import ShardingOption, Topology
from torchrec.distributed.test_utils.test_model import TestSparseNN
from torchrec.distributed.types import ModuleSharder, ShardingType
from torchrec.modules.embedding_configs import EmbeddingBagConfig
class TestProposers(unittest.TestCase):
def setUp(self) -> None:
topology = Topology(world_size=2, compute_device="cuda")
self.enumerator = EmbeddingEnumerator(topology=topology)
self.greedy_proposer = GreedyProposer()
self.uniform_proposer = UniformProposer()
self.grid_search_proposer = GridSearchProposer()
def test_greedy_two_table(self) -> None:
tables = [
EmbeddingBagConfig(
num_embeddings=100,
embedding_dim=10,
name="table_0",
feature_names=["feature_0"],
),
EmbeddingBagConfig(
num_embeddings=100,
embedding_dim=10,
name="table_1",
feature_names=["feature_1"],
),
]
model = TestSparseNN(tables=tables, sparse_device=torch.device("meta"))
search_space = self.enumerator.enumerate(
module=model,
sharders=[
cast(ModuleSharder[torch.nn.Module], EmbeddingBagCollectionSharder())
],
)
self.greedy_proposer.load(search_space)
# simulate first five iterations:
output = []
for _ in range(5):
proposal = cast(List[ShardingOption], self.greedy_proposer.propose())
proposal.sort(
key=lambda sharding_option: (
max([shard.perf for shard in sharding_option.shards]),
sharding_option.name,
)
)
output.append(
[
(
candidate.name,
candidate.sharding_type,
candidate.compute_kernel,
)
for candidate in proposal
]
)
self.greedy_proposer.feedback(partitionable=True)
expected_output = [
[
("table_0", "row_wise", "batched_fused"),
("table_1", "row_wise", "batched_fused"),
],
[
("table_0", "table_row_wise", "batched_fused"),
("table_1", "row_wise", "batched_fused"),
],
[
("table_1", "row_wise", "batched_fused"),
("table_0", "data_parallel", "batched_dense"),
],
[
("table_1", "table_row_wise", "batched_fused"),
("table_0", "data_parallel", "batched_dense"),
],
[
("table_0", "data_parallel", "batched_dense"),
("table_1", "data_parallel", "batched_dense"),
],
]
self.assertEqual(expected_output, output)
def test_uniform_three_table(self) -> None:
tables = [
EmbeddingBagConfig(
num_embeddings=100 * i,
embedding_dim=10 * i,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
)
for i in range(1, 4)
]
model = TestSparseNN(tables=tables, sparse_device=torch.device("meta"))
mock_ebc_sharder = cast(
ModuleSharder[torch.nn.Module], EmbeddingBagCollectionSharder()
)
# TODO update this test for CW and TWCW sharding
mock_ebc_sharder.sharding_types = MagicMock(
return_value=[
ShardingType.DATA_PARALLEL.value,
ShardingType.TABLE_WISE.value,
ShardingType.ROW_WISE.value,
ShardingType.TABLE_ROW_WISE.value,
]
)
self.maxDiff = None
search_space = self.enumerator.enumerate(
module=model, sharders=[mock_ebc_sharder]
)
self.uniform_proposer.load(search_space)
output = []
proposal = self.uniform_proposer.propose()
while proposal:
proposal.sort(
key=lambda sharding_option: (
max([shard.perf for shard in sharding_option.shards]),
sharding_option.name,
)
)
output.append(
[
(
candidate.name,
candidate.sharding_type,
candidate.compute_kernel,
)
for candidate in proposal
]
)
self.uniform_proposer.feedback(partitionable=True)
proposal = self.uniform_proposer.propose()
expected_output = [
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_2",
"data_parallel",
"batched_dense",
),
(
"table_3",
"data_parallel",
"batched_dense",
),
],
[
(
"table_1",
"table_wise",
"batched_fused",
),
(
"table_2",
"table_wise",
"batched_fused",
),
(
"table_3",
"table_wise",
"batched_fused",
),
],
[
(
"table_1",
"row_wise",
"batched_fused",
),
(
"table_2",
"row_wise",
"batched_fused",
),
(
"table_3",
"row_wise",
"batched_fused",
),
],
[
(
"table_1",
"table_row_wise",
"batched_fused",
),
(
"table_2",
"table_row_wise",
"batched_fused",
),
(
"table_3",
"table_row_wise",
"batched_fused",
),
],
]
self.assertEqual(expected_output, output)
def test_grid_search_three_table(self) -> None:
tables = [
EmbeddingBagConfig(
num_embeddings=100 * i,
embedding_dim=10 * i,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
)
for i in range(1, 4)
]
model = TestSparseNN(tables=tables, sparse_device=torch.device("meta"))
search_space = self.enumerator.enumerate(
module=model,
sharders=[
cast(ModuleSharder[torch.nn.Module], EmbeddingBagCollectionSharder())
],
)
"""
All sharding types but DP will have 3 possible compute kernels after pruning:
- batched_fused
- batched_fused_uvm_caching
- batched_fused_uvm
DP will have 1 possible compute kernel: batched_dense
So the total number of pruned options will be:
(num_sharding_types - 1) * 3 + 1 = 16
"""
num_pruned_options = (len(ShardingType) - 1) * 3 + 1
self.grid_search_proposer.load(search_space)
for (
sharding_options
) in self.grid_search_proposer._sharding_options_by_fqn.values():
# number of sharding types after pruning is number of sharding types * 3
# 3 compute kernels batched_fused/batched_dense, batched_fused_uvm_caching, batched_fused_uvm
self.assertEqual(len(sharding_options), num_pruned_options)
num_proposals = 0
proposal = self.grid_search_proposer.propose()
while proposal:
self.grid_search_proposer.feedback(partitionable=True)
proposal = self.grid_search_proposer.propose()
num_proposals += 1
self.assertEqual(num_pruned_options ** len(tables), num_proposals)
| [
"torchrec.distributed.planner.proposers.GreedyProposer",
"torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder",
"torchrec.distributed.planner.enumerators.EmbeddingEnumerator",
"torchrec.distributed.planner.proposers.GridSearchProposer",
"torchrec.distributed.planner.proposers.UniformProposer",
... | [((960, 1005), 'torchrec.distributed.planner.types.Topology', 'Topology', ([], {'world_size': '(2)', 'compute_device': '"""cuda"""'}), "(world_size=2, compute_device='cuda')\n", (968, 1005), False, 'from torchrec.distributed.planner.types import ShardingOption, Topology\n'), ((1032, 1070), 'torchrec.distributed.planner.enumerators.EmbeddingEnumerator', 'EmbeddingEnumerator', ([], {'topology': 'topology'}), '(topology=topology)\n', (1051, 1070), False, 'from torchrec.distributed.planner.enumerators import EmbeddingEnumerator\n'), ((1102, 1118), 'torchrec.distributed.planner.proposers.GreedyProposer', 'GreedyProposer', ([], {}), '()\n', (1116, 1118), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, GridSearchProposer, UniformProposer\n'), ((1151, 1168), 'torchrec.distributed.planner.proposers.UniformProposer', 'UniformProposer', ([], {}), '()\n', (1166, 1168), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, GridSearchProposer, UniformProposer\n'), ((1205, 1225), 'torchrec.distributed.planner.proposers.GridSearchProposer', 'GridSearchProposer', ([], {}), '()\n', (1223, 1225), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, GridSearchProposer, UniformProposer\n'), ((4264, 4427), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '[ShardingType.DATA_PARALLEL.value, ShardingType.TABLE_WISE.value,\n ShardingType.ROW_WISE.value, ShardingType.TABLE_ROW_WISE.value]'}), '(return_value=[ShardingType.DATA_PARALLEL.value, ShardingType.\n TABLE_WISE.value, ShardingType.ROW_WISE.value, ShardingType.\n TABLE_ROW_WISE.value])\n', (4273, 4427), False, 'from unittest.mock import MagicMock\n'), ((1303, 1408), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'num_embeddings': '(100)', 'embedding_dim': '(10)', 'name': '"""table_0"""', 'feature_names': "['feature_0']"}), "(num_embeddings=100, embedding_dim=10, name='table_0',\n feature_names=['feature_0'])\n", (1321, 1408), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((1497, 1602), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'num_embeddings': '(100)', 'embedding_dim': '(10)', 'name': '"""table_1"""', 'feature_names': "['feature_1']"}), "(num_embeddings=100, embedding_dim=10, name='table_1',\n feature_names=['feature_1'])\n", (1515, 1602), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((4123, 4154), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {}), '()\n', (4152, 4154), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder\n'), ((1748, 1768), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (1760, 1768), False, 'import torch\n'), ((4023, 4043), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (4035, 4043), False, 'import torch\n'), ((7733, 7753), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (7745, 7753), False, 'import torch\n'), ((1922, 1953), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {}), '()\n', (1951, 1953), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder\n'), ((7907, 7938), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {}), '()\n', (7936, 7938), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from torchrec.distributed.embeddingbag import (
EmbeddingBagCollectionSharder,
)
from torchrec.distributed.planner.enumerators import EmbeddingEnumerator
from torchrec.distributed.planner.shard_estimators import EmbeddingPerfEstimator
from torchrec.distributed.planner.types import Topology
from torchrec.distributed.tests.test_model import TestSparseNN
from torchrec.modules.embedding_configs import EmbeddingBagConfig
class TestEmbeddingPerfEstimator(unittest.TestCase):
def setUp(self) -> None:
topology = Topology(world_size=2, compute_device="cuda")
self.estimator = EmbeddingPerfEstimator(topology=topology)
self.enumerator = EmbeddingEnumerator(
topology=topology, estimator=self.estimator
)
def test_1_table_perf(self) -> None:
tables = [
EmbeddingBagConfig(
num_embeddings=100,
embedding_dim=10,
name="table_0",
feature_names=["feature_0"],
)
]
model = TestSparseNN(tables=tables, weighted_tables=[])
sharding_options = self.enumerator.enumerate(
module=model,
sharders=[EmbeddingBagCollectionSharder()],
)
expected_perfs = {
("dense", "data_parallel"): [398.5666507405638, 398.5666507405638],
("batched_dense", "data_parallel"): [378.9966555183946, 378.9966555183946],
("dense", "table_wise"): [3543.7999681477945],
("batched_dense", "table_wise"): [3504.659977703456],
("batched_fused", "table_wise"): [3458.9966555183946],
("sparse", "table_wise"): [3543.7999681477945],
("batched_fused_uvm", "table_wise"): [83727.05882352941],
("batched_fused_uvm_caching", "table_wise"): [22014.604904632153],
("dense", "row_wise"): [3478.566650740564, 3478.566650740564],
("batched_dense", "row_wise"): [3458.9966555183946, 3458.9966555183946],
("batched_fused", "row_wise"): [3436.1649944258643, 3436.1649944258643],
("sparse", "row_wise"): [3478.566650740564, 3478.566650740564],
("batched_fused_uvm", "row_wise"): [43570.19607843138, 43570.19607843138],
("batched_fused_uvm_caching", "row_wise"): [
12713.969118982744,
12713.969118982744,
],
("dense", "table_row_wise"): [3546.833317407231, 3546.833317407231],
("batched_dense", "table_row_wise"): [
3527.2633221850615,
3527.2633221850615,
],
("batched_fused", "table_row_wise"): [3504.431661092531, 3504.431661092531],
("sparse", "table_row_wise"): [3546.833317407231, 3546.833317407231],
("batched_fused_uvm", "table_row_wise"): [
43638.46274509804,
43638.46274509804,
],
("batched_fused_uvm_caching", "table_row_wise"): [
12782.23578564941,
12782.23578564941,
],
}
perfs = {
(
sharding_option.compute_kernel,
sharding_option.sharding_type,
): [shard.perf for shard in sharding_option.shards]
for sharding_option in sharding_options
}
self.assertEqual(expected_perfs, perfs)
| [
"torchrec.distributed.planner.shard_estimators.EmbeddingPerfEstimator",
"torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder",
"torchrec.distributed.planner.enumerators.EmbeddingEnumerator",
"torchrec.distributed.tests.test_model.TestSparseNN",
"torchrec.modules.embedding_configs.EmbeddingBagCon... | [((776, 821), 'torchrec.distributed.planner.types.Topology', 'Topology', ([], {'world_size': '(2)', 'compute_device': '"""cuda"""'}), "(world_size=2, compute_device='cuda')\n", (784, 821), False, 'from torchrec.distributed.planner.types import Topology\n'), ((847, 888), 'torchrec.distributed.planner.shard_estimators.EmbeddingPerfEstimator', 'EmbeddingPerfEstimator', ([], {'topology': 'topology'}), '(topology=topology)\n', (869, 888), False, 'from torchrec.distributed.planner.shard_estimators import EmbeddingPerfEstimator\n'), ((915, 979), 'torchrec.distributed.planner.enumerators.EmbeddingEnumerator', 'EmbeddingEnumerator', ([], {'topology': 'topology', 'estimator': 'self.estimator'}), '(topology=topology, estimator=self.estimator)\n', (934, 979), False, 'from torchrec.distributed.planner.enumerators import EmbeddingEnumerator\n'), ((1282, 1329), 'torchrec.distributed.tests.test_model.TestSparseNN', 'TestSparseNN', ([], {'tables': 'tables', 'weighted_tables': '[]'}), '(tables=tables, weighted_tables=[])\n', (1294, 1329), False, 'from torchrec.distributed.tests.test_model import TestSparseNN\n'), ((1075, 1180), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'num_embeddings': '(100)', 'embedding_dim': '(10)', 'name': '"""table_0"""', 'feature_names': "['feature_0']"}), "(num_embeddings=100, embedding_dim=10, name='table_0',\n feature_names=['feature_0'])\n", (1093, 1180), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((1432, 1463), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {}), '()\n', (1461, 1463), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import cast, Dict, List, Optional, Tuple
import torch
from torch import nn
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.planner.constants import (
BATCHED_COPY_PERF_FACTOR,
BIGINT_DTYPE,
BWD_COMPUTE_MULTIPLIER,
CROSS_NODE_BANDWIDTH,
DP_ELEMENTWISE_KERNELS_PERF_FACTOR,
FULL_BLOCK_EMB_DIM,
HALF_BLOCK_PENALTY,
INTRA_NODE_BANDWIDTH,
kernel_bw_lookup,
QUARTER_BLOCK_PENALTY,
UVM_CACHING_RATIO,
WEIGHTED_KERNEL_MULTIPLIER,
)
from torchrec.distributed.planner.types import (
ParameterConstraints,
PlannerError,
ShardEstimator,
ShardingOption,
Storage,
Topology,
)
from torchrec.distributed.planner.utils import prod, sharder_name
from torchrec.distributed.types import ModuleSharder, ShardingType
class EmbeddingPerfEstimator(ShardEstimator):
"""
Embedding Wall Time Perf Estimator
"""
def __init__(
self,
topology: Topology,
constraints: Optional[Dict[str, ParameterConstraints]] = None,
) -> None:
self._topology = topology
self._constraints = constraints
def estimate(
self,
sharding_options: List[ShardingOption],
sharder_map: Optional[Dict[str, ModuleSharder[nn.Module]]] = None,
) -> None:
for sharding_option in sharding_options:
caching_ratio = (
self._constraints[sharding_option.name].caching_ratio
if self._constraints and self._constraints.get(sharding_option.name)
else None
)
num_objects = (
cast(List[float], self._constraints[sharding_option.name].num_objects)
if self._constraints
and self._constraints.get(sharding_option.name)
and self._constraints[sharding_option.name].num_objects
else [1.0] * sharding_option.num_inputs
)
batch_sizes = (
cast(List[int], self._constraints[sharding_option.name].batch_sizes)
if self._constraints
and self._constraints.get(sharding_option.name)
and self._constraints[sharding_option.name].batch_sizes
else [sharding_option.batch_size] * sharding_option.num_inputs
)
shard_perfs = perf_func_emb_wall_time(
shard_sizes=[shard.size for shard in sharding_option.shards],
compute_kernel=sharding_option.compute_kernel,
compute_device=self._topology.compute_device,
sharding_type=sharding_option.sharding_type,
batch_sizes=batch_sizes,
world_size=self._topology.world_size,
local_world_size=self._topology.local_world_size,
input_lengths=sharding_option.input_lengths,
input_data_type_size=BIGINT_DTYPE,
output_data_type_size=sharding_option.tensor.element_size(),
num_objects=num_objects,
bw_intra_host=getattr(
self._topology, "intra_host_bw", INTRA_NODE_BANDWIDTH
),
bw_inter_host=getattr(
self._topology, "inter_host_bw", CROSS_NODE_BANDWIDTH
),
is_pooled=sharding_option.is_pooled,
is_weighted=True
if getattr(sharding_option.module[1], "is_weighted", False)
else False,
has_feature_processor=True
if getattr(sharding_option.module[1], "feature_processor", None)
else False,
caching_ratio=caching_ratio,
)
for shard, perf in zip(sharding_option.shards, shard_perfs):
shard.perf = perf
def perf_func_emb_wall_time(
shard_sizes: List[List[int]],
compute_kernel: str,
compute_device: str,
sharding_type: str,
batch_sizes: List[int],
world_size: int,
local_world_size: int,
input_lengths: List[float],
input_data_type_size: float,
output_data_type_size: float,
num_objects: List[float],
bw_intra_host: float,
bw_inter_host: float,
is_pooled: bool,
is_weighted: bool = False,
has_feature_processor: bool = False,
caching_ratio: Optional[float] = None,
) -> List[float]:
"""
Attempts to model perfs as a function of relative wall times.
Args:
shard_sizes (List[List[int]]): the list of (local_rows, local_cols) of each
shard.
compute_kernel (str): compute kernel.
compute_device (str): compute device.
sharding_type (str): tw, rw, cw, twrw, dp.
batch_sizes (List[int]): batch size for each input feature.
world_size (int): the number of devices for all hosts.
local_world_size (int): the number of the device for each host.
input_lengths (List[float]): the list of the average number of lookups of each
input query feature.
input_data_type_size (float): the data type size of the distributed
data_parallel input.
output_data_type_size (float): the data type size of the distributed
data_parallel output.
num_objects (List[float]): number of objects per sample, typically 1.0.
bw_intra_host (float): the bandwidth within a single host like multiple threads.
bw_inter_host (float): the bandwidth between two hosts like multiple machines.
is_pooled (bool): True if embedding output is pooled (ie. EmbeddingBag), False
if unpooled/sequential (ie. Embedding).
is_weighted (bool = False): if the module is an EBC and is weighted, typically
signifying an id score list feature.
has_feature_processor (bool = False): if the module has a feature processor.
caching_ratio (Optional[float] = None): cache ratio to determine the bandwidth
of device.
Returns:
List[float]: the list of perf for each shard.
"""
shard_perfs = []
device_bw = kernel_bw_lookup(compute_device, compute_kernel, caching_ratio)
if device_bw is None:
raise PlannerError(
f"No kernel bandwidth exists for this combo of compute device: {compute_device}, compute kernel: {compute_kernel}"
)
for hash_size, emb_dim in shard_sizes:
if (
sharding_type == ShardingType.TABLE_WISE.value
or sharding_type == ShardingType.COLUMN_WISE.value
or sharding_type == ShardingType.TABLE_COLUMN_WISE.value
):
shard_perf = _get_tw_sharding_perf(
batch_sizes=batch_sizes,
world_size=world_size,
local_world_size=local_world_size,
input_lengths=input_lengths,
emb_dim=emb_dim,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
num_objects=num_objects,
device_bw=device_bw,
bw_inter_host=bw_inter_host,
bw_intra_host=bw_intra_host,
is_pooled=is_pooled,
is_weighted=is_weighted,
has_feature_processor=has_feature_processor,
)
elif sharding_type == ShardingType.ROW_WISE.value:
shard_perf = _get_rw_sharding_perf(
batch_sizes=batch_sizes,
world_size=world_size,
local_world_size=local_world_size,
input_lengths=input_lengths,
emb_dim=emb_dim,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
num_objects=num_objects,
device_bw=device_bw,
bw_inter_host=bw_inter_host,
bw_intra_host=bw_intra_host,
is_pooled=is_pooled,
is_weighted=is_weighted,
has_feature_processor=has_feature_processor,
)
elif sharding_type == ShardingType.TABLE_ROW_WISE.value:
shard_perf = _get_twrw_sharding_perf(
batch_sizes=batch_sizes,
world_size=world_size,
local_world_size=local_world_size,
input_lengths=input_lengths,
emb_dim=emb_dim,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
num_objects=num_objects,
device_bw=device_bw,
bw_inter_host=bw_inter_host,
bw_intra_host=bw_intra_host,
is_pooled=is_pooled,
is_weighted=is_weighted,
has_feature_processor=has_feature_processor,
)
elif sharding_type == ShardingType.DATA_PARALLEL.value:
shard_perf = _get_dp_sharding_perf(
batch_sizes=batch_sizes,
world_size=world_size,
local_world_size=local_world_size,
input_lengths=input_lengths,
grad_num_elem=hash_size * emb_dim,
emb_dim=emb_dim,
input_data_type_size=output_data_type_size,
output_data_type_size=output_data_type_size,
num_objects=num_objects,
device_bw=device_bw,
bw_inter_host=bw_inter_host,
is_pooled=is_pooled,
is_weighted=is_weighted,
has_feature_processor=has_feature_processor,
)
else:
raise ValueError(
f"Unrecognized or unsupported sharding type provided: {sharding_type}"
)
shard_perfs.append(shard_perf)
return shard_perfs
def _get_tw_sharding_perf(
batch_sizes: List[int],
world_size: int,
local_world_size: int,
input_lengths: List[float],
emb_dim: int,
input_data_type_size: float,
output_data_type_size: float,
num_objects: List[float],
device_bw: float,
bw_inter_host: float,
bw_intra_host: float,
is_pooled: bool,
is_weighted: bool = False,
has_feature_processor: bool = False,
) -> float:
batch_inputs = sum(
[x * y * z for x, y, z in zip(input_lengths, num_objects, batch_sizes)]
)
batch_outputs = (
sum([x * y for x, y in zip(num_objects, batch_sizes)])
if is_pooled
else batch_inputs
)
input_read_size = math.ceil(batch_inputs * world_size * input_data_type_size)
if is_weighted or has_feature_processor:
input_read_size *= 2
# minimum embedding dim is set to 32 due to kernel usage
embedding_lookup_size = (
batch_inputs * world_size * max(emb_dim, 32) * output_data_type_size
)
output_write_size = batch_outputs * world_size * emb_dim * output_data_type_size
# embedding dim below 128 will reduce kernel efficency
block_usage_penalty = 1
if emb_dim < FULL_BLOCK_EMB_DIM:
if emb_dim >= 64:
block_usage_penalty = HALF_BLOCK_PENALTY
else: # emb_dim >= 32
block_usage_penalty = QUARTER_BLOCK_PENALTY
comms_bw = bw_inter_host if world_size > local_world_size else bw_intra_host
fwd_comms = output_write_size / comms_bw
fwd_compute = (
(input_read_size + embedding_lookup_size + output_write_size)
* block_usage_penalty
/ device_bw
)
bwd_comms = fwd_comms
bwd_grad_indice_weights_kernel = (
fwd_compute * WEIGHTED_KERNEL_MULTIPLIER
if is_weighted or has_feature_processor
else 0
)
# includes fused optimizers
bwd_compute = fwd_compute * BWD_COMPUTE_MULTIPLIER
# in order of model parallel execution, starting with:
# BWD DP -> BWD MP ... FWD MP -> FWD DP
return (
bwd_comms
+ bwd_grad_indice_weights_kernel
+ bwd_compute
+ fwd_compute
+ fwd_comms
)
def _get_rw_sharding_perf(
batch_sizes: List[int],
world_size: int,
local_world_size: int,
input_lengths: List[float],
emb_dim: int,
input_data_type_size: float,
output_data_type_size: float,
num_objects: List[float],
device_bw: float,
bw_inter_host: float,
bw_intra_host: float,
is_pooled: bool,
is_weighted: bool = False,
has_feature_processor: bool = False,
) -> float:
batch_inputs = (
sum([x * y * z for x, y, z in zip(input_lengths, num_objects, batch_sizes)])
/ world_size
)
batch_outputs = (
sum([x * y for x, y in zip(num_objects, batch_sizes)])
if is_pooled
else batch_inputs
)
input_read_size = math.ceil(batch_inputs * world_size * input_data_type_size)
if is_weighted or has_feature_processor:
input_read_size *= 2
embedding_lookup_size = batch_inputs * world_size * emb_dim * output_data_type_size
output_write_size = batch_outputs * world_size * emb_dim * output_data_type_size
comms_bw = bw_inter_host if world_size > local_world_size else bw_intra_host
fwd_comms = output_write_size / comms_bw
fwd_compute = (
input_read_size + embedding_lookup_size + output_write_size
) / device_bw
bwd_comms = fwd_comms
bwd_batched_copy = output_write_size * BATCHED_COPY_PERF_FACTOR / device_bw
bwd_grad_indice_weights_kernel = (
fwd_compute * WEIGHTED_KERNEL_MULTIPLIER
if is_weighted or has_feature_processor
else 0
)
bwd_compute = fwd_compute * BWD_COMPUTE_MULTIPLIER
return (
bwd_comms
+ bwd_batched_copy
+ bwd_grad_indice_weights_kernel
+ bwd_compute
+ fwd_compute
+ fwd_comms
)
def _get_twrw_sharding_perf(
batch_sizes: List[int],
world_size: int,
local_world_size: int,
input_lengths: List[float],
emb_dim: int,
input_data_type_size: float,
output_data_type_size: float,
num_objects: List[float],
device_bw: float,
bw_inter_host: float,
bw_intra_host: float,
is_pooled: bool,
is_weighted: bool = False,
has_feature_processor: bool = False,
) -> float:
batch_inputs = (
sum([x * y * z for x, y, z in zip(input_lengths, num_objects, batch_sizes)])
/ local_world_size
)
batch_outputs = (
sum([x * y for x, y in zip(num_objects, batch_sizes)])
if is_pooled
else batch_inputs
)
input_read_size = math.ceil(batch_inputs * world_size * input_data_type_size)
if is_weighted or has_feature_processor:
input_read_size *= 2
embedding_lookup_size = batch_inputs * world_size * emb_dim * output_data_type_size
output_write_size = batch_outputs * world_size * emb_dim * output_data_type_size
fwd_comms = output_write_size / bw_intra_host
if world_size > local_world_size:
fwd_comms += output_write_size * (local_world_size / world_size) / bw_inter_host
fwd_compute = (
input_read_size + embedding_lookup_size + output_write_size
) / device_bw
bwd_comms = fwd_comms
bwd_grad_indice_weights_kernel = (
fwd_compute * WEIGHTED_KERNEL_MULTIPLIER
if is_weighted or has_feature_processor
else 0
)
bwd_batched_copy = output_write_size * BATCHED_COPY_PERF_FACTOR / device_bw
bwd_compute = fwd_compute * BWD_COMPUTE_MULTIPLIER
return (
bwd_comms
+ bwd_batched_copy
+ bwd_grad_indice_weights_kernel
+ bwd_compute
+ fwd_compute
+ fwd_comms
)
def _get_dp_sharding_perf(
batch_sizes: List[int],
world_size: int,
local_world_size: int,
input_lengths: List[float],
grad_num_elem: int,
emb_dim: int,
input_data_type_size: float,
output_data_type_size: float,
num_objects: List[float],
device_bw: float,
bw_inter_host: float,
is_pooled: bool,
is_weighted: bool = False,
has_feature_processor: bool = False,
) -> float:
batch_inputs = sum(
[x * y * z for x, y, z in zip(input_lengths, num_objects, batch_sizes)]
)
batch_outputs = (
sum([x * y for x, y in zip(num_objects, batch_sizes)])
if is_pooled
else batch_inputs
)
input_read_size = math.ceil(batch_inputs * input_data_type_size)
if is_weighted or has_feature_processor:
input_read_size *= 2
embedding_lookup_size = batch_inputs * emb_dim * output_data_type_size
output_write_size = batch_outputs * emb_dim * output_data_type_size
table_size = grad_num_elem * output_data_type_size
fwd_compute = (
input_read_size + embedding_lookup_size + output_write_size
) / device_bw
num_nodes = min(world_size / local_world_size, 2)
# all-reduce data transfer: https://images.nvidia.com/events/sc15/pdfs/NCCL-Woolley.pdf
all_reduce = (
table_size
* (2 * num_nodes - 1)
/ num_nodes
/ (bw_inter_host * local_world_size) # 1 NIC per GPU
)
# inter host communication constraint
if world_size > 2 * local_world_size:
all_reduce *= 2
# SGD + Fill + BUnary
optimizer_kernels = table_size * DP_ELEMENTWISE_KERNELS_PERF_FACTOR / device_bw
bwd_compute = fwd_compute * BWD_COMPUTE_MULTIPLIER
bwd_grad_indice_weights_kernel = (
fwd_compute * WEIGHTED_KERNEL_MULTIPLIER
if is_weighted or has_feature_processor
else 0
)
return (
all_reduce
+ optimizer_kernels
+ bwd_grad_indice_weights_kernel
+ bwd_compute
+ fwd_compute
)
class EmbeddingStorageEstimator(ShardEstimator):
"""
Embedding Storage Usage Estimator
"""
def __init__(
self,
topology: Topology,
constraints: Optional[Dict[str, ParameterConstraints]] = None,
) -> None:
self._topology = topology
self._constraints = constraints
def estimate(
self,
sharding_options: List[ShardingOption],
sharder_map: Optional[Dict[str, ModuleSharder[nn.Module]]] = None,
) -> None:
if not sharder_map:
raise ValueError("sharder map not provided for storage estimator")
for sharding_option in sharding_options:
sharder_key = sharder_name(type(sharding_option.module[1]))
sharder = sharder_map[sharder_key]
caching_ratio = (
self._constraints[sharding_option.name].caching_ratio
if self._constraints and self._constraints.get(sharding_option.name)
else None
)
num_objects = (
cast(List[float], self._constraints[sharding_option.name].num_objects)
if self._constraints
and self._constraints.get(sharding_option.name)
and self._constraints[sharding_option.name].num_objects
else [1.0] * sharding_option.num_inputs
)
assert len(num_objects) == sharding_option.num_inputs
batch_sizes = (
cast(List[int], self._constraints[sharding_option.name].batch_sizes)
if self._constraints
and self._constraints.get(sharding_option.name)
and self._constraints[sharding_option.name].batch_sizes
else [sharding_option.batch_size] * sharding_option.num_inputs
)
shard_storages = calculate_shard_storages(
sharder=sharder,
sharding_type=sharding_option.sharding_type,
tensor=sharding_option.tensor,
compute_device=self._topology.compute_device,
compute_kernel=sharding_option.compute_kernel,
shard_sizes=[shard.size for shard in sharding_option.shards],
batch_sizes=batch_sizes,
world_size=self._topology.world_size,
local_world_size=self._topology.local_world_size,
input_lengths=sharding_option.input_lengths,
num_objects=num_objects,
caching_ratio=caching_ratio if caching_ratio else UVM_CACHING_RATIO,
is_pooled=sharding_option.is_pooled,
)
for shard, storage in zip(sharding_option.shards, shard_storages):
shard.storage = storage
def calculate_shard_storages(
sharder: ModuleSharder[nn.Module],
sharding_type: str,
tensor: torch.Tensor,
compute_device: str,
compute_kernel: str,
shard_sizes: List[List[int]],
batch_sizes: List[int],
world_size: int,
local_world_size: int,
input_lengths: List[float],
num_objects: List[float],
caching_ratio: float,
is_pooled: bool,
) -> List[Storage]:
"""
Calculates estimated storage sizes for each sharded tensor, comprised of input,
output, tensor, gradient, and optimizer sizes.
Args:
sharder (ModuleSharder[nn.Module]): sharder for module that supports sharding.
sharding_type (str): provided ShardingType value.
tensor (torch.Tensor): tensor to be sharded.
compute_device (str): compute device to be used.
compute_kernel (str): compute kernel to be used.
shard_sizes (List[List[int]]): list of dimensions of each sharded tensor.
batch_sizes (List[int]): batch size for each input feature.
world_size (int): total number of devices in topology.
local_world_size (int): total number of devices in host group topology.
input_lengths (List[float]): average input lengths synonymous with pooling
factors.
num_objects (List[float]): average number of objects per sample (typically 1.0)
caching_ratio (float): ratio of HBM to DDR memory for UVM caching.
is_pooled (bool): True if embedding output is pooled (ie. EmbeddingBag), False
if unpooled/sequential (ie. Embedding).
Returns:
List[Storage]: storage object for each device in topology.
"""
input_data_type_size = BIGINT_DTYPE
output_data_type_size = tensor.element_size()
input_sizes, output_sizes = _calculate_shard_io_sizes(
sharding_type=sharding_type,
batch_sizes=batch_sizes,
world_size=world_size,
local_world_size=local_world_size,
input_lengths=input_lengths,
emb_dim=tensor.shape[1],
shard_sizes=shard_sizes,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
num_objects=num_objects,
is_pooled=is_pooled,
)
tensor_storage = sharder.storage_usage(tensor, compute_device, compute_kernel)
hbm_storage: int = tensor_storage.get("hbm", 0)
ddr_storage: int = tensor_storage.get("ddr", 0)
if compute_kernel in {
EmbeddingComputeKernel.BATCHED_FUSED_UVM_CACHING.value,
EmbeddingComputeKernel.BATCHED_QUANT_UVM_CACHING.value,
}:
hbm_storage = round(ddr_storage * caching_ratio)
hbm_specific_sizes: List[int] = _calculate_storage_specific_sizes(
storage=hbm_storage,
shape=tensor.shape,
shard_sizes=shard_sizes,
sharding_type=sharding_type,
compute_kernel=compute_kernel,
)
ddr_specific_sizes: List[int] = _calculate_storage_specific_sizes(
storage=ddr_storage,
shape=tensor.shape,
shard_sizes=shard_sizes,
sharding_type=sharding_type,
compute_kernel=compute_kernel,
)
hbm_sizes: List[int] = [
input_size + output_size + hbm_specific_size if compute_device == "cuda" else 0
for input_size, output_size, hbm_specific_size in zip(
input_sizes,
output_sizes,
hbm_specific_sizes,
)
]
ddr_sizes: List[int] = [
input_size + output_size + ddr_specific_size
if compute_device == "cpu"
else ddr_specific_size
for input_size, output_size, ddr_specific_size in zip(
input_sizes,
output_sizes,
ddr_specific_sizes,
)
]
return [
Storage(
hbm=hbm_size,
ddr=ddr_size,
)
for hbm_size, ddr_size in zip(hbm_sizes, ddr_sizes)
]
def _calculate_shard_io_sizes(
sharding_type: str,
batch_sizes: List[int],
world_size: int,
local_world_size: int,
input_lengths: List[float],
emb_dim: int,
shard_sizes: List[List[int]],
input_data_type_size: int,
output_data_type_size: int,
num_objects: List[float],
is_pooled: bool,
) -> Tuple[List[int], List[int]]:
if sharding_type == ShardingType.DATA_PARALLEL.value:
return _calculate_dp_shard_io_sizes(
batch_sizes=batch_sizes,
input_lengths=input_lengths,
emb_dim=emb_dim,
num_shards=len(shard_sizes),
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
num_objects=num_objects,
is_pooled=is_pooled,
)
elif sharding_type == ShardingType.TABLE_WISE.value:
return _calculate_tw_shard_io_sizes(
batch_sizes=batch_sizes,
world_size=world_size,
input_lengths=input_lengths,
emb_dim=emb_dim,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
num_objects=num_objects,
is_pooled=is_pooled,
)
elif sharding_type in {
ShardingType.COLUMN_WISE.value,
ShardingType.TABLE_COLUMN_WISE.value,
}:
return _calculate_cw_shard_io_sizes(
batch_sizes=batch_sizes,
world_size=world_size,
input_lengths=input_lengths,
shard_sizes=shard_sizes,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
num_objects=num_objects,
is_pooled=is_pooled,
)
elif sharding_type == ShardingType.ROW_WISE.value:
return _calculate_rw_shard_io_sizes(
batch_sizes=batch_sizes,
world_size=world_size,
input_lengths=input_lengths,
shard_sizes=shard_sizes,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
num_objects=num_objects,
is_pooled=is_pooled,
)
elif sharding_type == ShardingType.TABLE_ROW_WISE.value:
return _calculate_twrw_shard_io_sizes(
batch_sizes=batch_sizes,
world_size=world_size,
local_world_size=local_world_size,
input_lengths=input_lengths,
shard_sizes=shard_sizes,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
num_objects=num_objects,
is_pooled=is_pooled,
)
else:
raise ValueError(
f"Unrecognized or unsupported sharding type provided: {sharding_type}"
)
def _calculate_dp_shard_io_sizes(
batch_sizes: List[int],
input_lengths: List[float],
emb_dim: int,
num_shards: int,
input_data_type_size: int,
output_data_type_size: int,
num_objects: List[float],
is_pooled: bool,
) -> Tuple[List[int], List[int]]:
batch_inputs = sum(
[x * y * z for x, y, z in zip(input_lengths, num_objects, batch_sizes)]
)
batch_outputs = (
sum([x * y for x, y in zip(num_objects, batch_sizes)])
if is_pooled
else batch_inputs
)
input_sizes = [math.ceil(batch_inputs * input_data_type_size)] * num_shards
output_sizes = [
math.ceil(batch_outputs * emb_dim * output_data_type_size)
] * num_shards
return input_sizes, output_sizes
def _calculate_tw_shard_io_sizes(
batch_sizes: List[int],
world_size: int,
input_lengths: List[float],
emb_dim: int,
input_data_type_size: int,
output_data_type_size: int,
num_objects: List[float],
is_pooled: bool,
) -> Tuple[List[int], List[int]]:
batch_inputs = sum(
[x * y * z for x, y, z in zip(input_lengths, num_objects, batch_sizes)]
)
batch_outputs = (
sum([x * y for x, y in zip(num_objects, batch_sizes)])
if is_pooled
else batch_inputs
)
input_sizes = [math.ceil(batch_inputs * world_size * input_data_type_size)]
output_sizes = [
math.ceil(batch_outputs * world_size * emb_dim * output_data_type_size)
]
return input_sizes, output_sizes
def _calculate_cw_shard_io_sizes(
batch_sizes: List[int],
world_size: int,
input_lengths: List[float],
shard_sizes: List[List[int]],
input_data_type_size: int,
output_data_type_size: int,
num_objects: List[float],
is_pooled: bool,
) -> Tuple[List[int], List[int]]:
batch_inputs = sum(
[x * y * z for x, y, z in zip(input_lengths, num_objects, batch_sizes)]
)
batch_outputs = (
sum([x * y for x, y in zip(num_objects, batch_sizes)])
if is_pooled
else batch_inputs
)
input_sizes = [math.ceil(batch_inputs * world_size * input_data_type_size)] * len(
shard_sizes
)
output_sizes = [
math.ceil(
batch_outputs * world_size * shard_sizes[i][1] * output_data_type_size
)
for i in range(len(shard_sizes))
]
return input_sizes, output_sizes
def _calculate_rw_shard_io_sizes(
batch_sizes: List[int],
world_size: int,
input_lengths: List[float],
shard_sizes: List[List[int]],
input_data_type_size: int,
output_data_type_size: int,
num_objects: List[float],
is_pooled: bool,
) -> Tuple[List[int], List[int]]:
batch_inputs = (
sum([x * y * z for x, y, z in zip(input_lengths, num_objects, batch_sizes)])
/ world_size
)
batch_outputs = (
sum([x * y for x, y in zip(num_objects, batch_sizes)])
if is_pooled
else batch_inputs
)
input_sizes = [
math.ceil(batch_inputs * world_size * input_data_type_size)
if prod(shard) != 0
else 0
for shard in shard_sizes
]
output_sizes = [
math.ceil(
batch_outputs * world_size * shard_sizes[i][1] * output_data_type_size
)
if prod(shard) != 0
else 0
for i, shard in enumerate(shard_sizes)
]
return input_sizes, output_sizes
def _calculate_twrw_shard_io_sizes(
batch_sizes: List[int],
world_size: int,
local_world_size: int,
input_lengths: List[float],
shard_sizes: List[List[int]],
input_data_type_size: int,
output_data_type_size: int,
num_objects: List[float],
is_pooled: bool,
) -> Tuple[List[int], List[int]]:
batch_inputs = (
sum([x * y * z for x, y, z in zip(input_lengths, num_objects, batch_sizes)])
/ local_world_size
)
batch_outputs = (
sum([x * y for x, y in zip(num_objects, batch_sizes)])
if is_pooled
else batch_inputs
)
input_sizes = [
math.ceil(batch_inputs * world_size * input_data_type_size)
if prod(shard) != 0
else 0
for shard in shard_sizes
]
output_sizes = [
math.ceil(
batch_outputs * world_size * shard_sizes[i][1] * output_data_type_size
)
if prod(shard) != 0
else 0
for i, shard in enumerate(shard_sizes)
]
return input_sizes, output_sizes
def _calculate_storage_specific_sizes(
storage: int,
shape: torch.Size,
shard_sizes: List[List[int]],
sharding_type: str,
compute_kernel: str,
) -> List[int]:
tensor_sizes: List[int] = [
math.ceil(storage * prod(size) / prod(shape))
if sharding_type != ShardingType.DATA_PARALLEL.value
else storage
for size in shard_sizes
]
optimizer_sizes: List[int] = [
tensor_size * 2
if compute_kernel == EmbeddingComputeKernel.BATCHED_DENSE.value
else 0
for tensor_size in tensor_sizes
]
return [
tensor_size + optimizer_size
for tensor_size, optimizer_size in zip(tensor_sizes, optimizer_sizes)
]
| [
"torchrec.distributed.planner.types.PlannerError",
"torchrec.distributed.planner.types.Storage",
"torchrec.distributed.planner.utils.prod",
"torchrec.distributed.planner.constants.kernel_bw_lookup"
] | [((6323, 6386), 'torchrec.distributed.planner.constants.kernel_bw_lookup', 'kernel_bw_lookup', (['compute_device', 'compute_kernel', 'caching_ratio'], {}), '(compute_device, compute_kernel, caching_ratio)\n', (6339, 6386), False, 'from torchrec.distributed.planner.constants import BATCHED_COPY_PERF_FACTOR, BIGINT_DTYPE, BWD_COMPUTE_MULTIPLIER, CROSS_NODE_BANDWIDTH, DP_ELEMENTWISE_KERNELS_PERF_FACTOR, FULL_BLOCK_EMB_DIM, HALF_BLOCK_PENALTY, INTRA_NODE_BANDWIDTH, kernel_bw_lookup, QUARTER_BLOCK_PENALTY, UVM_CACHING_RATIO, WEIGHTED_KERNEL_MULTIPLIER\n'), ((10736, 10795), 'math.ceil', 'math.ceil', (['(batch_inputs * world_size * input_data_type_size)'], {}), '(batch_inputs * world_size * input_data_type_size)\n', (10745, 10795), False, 'import math\n'), ((12940, 12999), 'math.ceil', 'math.ceil', (['(batch_inputs * world_size * input_data_type_size)'], {}), '(batch_inputs * world_size * input_data_type_size)\n', (12949, 12999), False, 'import math\n'), ((14708, 14767), 'math.ceil', 'math.ceil', (['(batch_inputs * world_size * input_data_type_size)'], {}), '(batch_inputs * world_size * input_data_type_size)\n', (14717, 14767), False, 'import math\n'), ((16495, 16541), 'math.ceil', 'math.ceil', (['(batch_inputs * input_data_type_size)'], {}), '(batch_inputs * input_data_type_size)\n', (16504, 16541), False, 'import math\n'), ((6427, 6565), 'torchrec.distributed.planner.types.PlannerError', 'PlannerError', (['f"""No kernel bandwidth exists for this combo of compute device: {compute_device}, compute kernel: {compute_kernel}"""'], {}), "(\n f'No kernel bandwidth exists for this combo of compute device: {compute_device}, compute kernel: {compute_kernel}'\n )\n", (6439, 6565), False, 'from torchrec.distributed.planner.types import ParameterConstraints, PlannerError, ShardEstimator, ShardingOption, Storage, Topology\n'), ((24293, 24328), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': 'hbm_size', 'ddr': 'ddr_size'}), '(hbm=hbm_size, ddr=ddr_size)\n', (24300, 24328), False, 'from torchrec.distributed.planner.types import ParameterConstraints, PlannerError, ShardEstimator, ShardingOption, Storage, Topology\n'), ((28558, 28617), 'math.ceil', 'math.ceil', (['(batch_inputs * world_size * input_data_type_size)'], {}), '(batch_inputs * world_size * input_data_type_size)\n', (28567, 28617), False, 'import math\n'), ((28648, 28719), 'math.ceil', 'math.ceil', (['(batch_outputs * world_size * emb_dim * output_data_type_size)'], {}), '(batch_outputs * world_size * emb_dim * output_data_type_size)\n', (28657, 28719), False, 'import math\n'), ((29454, 29539), 'math.ceil', 'math.ceil', (['(batch_outputs * world_size * shard_sizes[i][1] * output_data_type_size)'], {}), '(batch_outputs * world_size * shard_sizes[i][1] *\n output_data_type_size)\n', (29463, 29539), False, 'import math\n'), ((27801, 27847), 'math.ceil', 'math.ceil', (['(batch_inputs * input_data_type_size)'], {}), '(batch_inputs * input_data_type_size)\n', (27810, 27847), False, 'import math\n'), ((27891, 27949), 'math.ceil', 'math.ceil', (['(batch_outputs * emb_dim * output_data_type_size)'], {}), '(batch_outputs * emb_dim * output_data_type_size)\n', (27900, 27949), False, 'import math\n'), ((29331, 29390), 'math.ceil', 'math.ceil', (['(batch_inputs * world_size * input_data_type_size)'], {}), '(batch_inputs * world_size * input_data_type_size)\n', (29340, 29390), False, 'import math\n'), ((30242, 30301), 'math.ceil', 'math.ceil', (['(batch_inputs * world_size * input_data_type_size)'], {}), '(batch_inputs * world_size * input_data_type_size)\n', (30251, 30301), False, 'import math\n'), ((30413, 30498), 'math.ceil', 'math.ceil', (['(batch_outputs * world_size * shard_sizes[i][1] * output_data_type_size)'], {}), '(batch_outputs * world_size * shard_sizes[i][1] *\n output_data_type_size)\n', (30422, 30498), False, 'import math\n'), ((31285, 31344), 'math.ceil', 'math.ceil', (['(batch_inputs * world_size * input_data_type_size)'], {}), '(batch_inputs * world_size * input_data_type_size)\n', (31294, 31344), False, 'import math\n'), ((31456, 31541), 'math.ceil', 'math.ceil', (['(batch_outputs * world_size * shard_sizes[i][1] * output_data_type_size)'], {}), '(batch_outputs * world_size * shard_sizes[i][1] *\n output_data_type_size)\n', (31465, 31541), False, 'import math\n'), ((1887, 1957), 'typing.cast', 'cast', (['List[float]', 'self._constraints[sharding_option.name].num_objects'], {}), '(List[float], self._constraints[sharding_option.name].num_objects)\n', (1891, 1957), False, 'from typing import cast, Dict, List, Optional, Tuple\n'), ((2245, 2313), 'typing.cast', 'cast', (['List[int]', 'self._constraints[sharding_option.name].batch_sizes'], {}), '(List[int], self._constraints[sharding_option.name].batch_sizes)\n', (2249, 2313), False, 'from typing import cast, Dict, List, Optional, Tuple\n'), ((18858, 18928), 'typing.cast', 'cast', (['List[float]', 'self._constraints[sharding_option.name].num_objects'], {}), '(List[float], self._constraints[sharding_option.name].num_objects)\n', (18862, 18928), False, 'from typing import cast, Dict, List, Optional, Tuple\n'), ((19282, 19350), 'typing.cast', 'cast', (['List[int]', 'self._constraints[sharding_option.name].batch_sizes'], {}), '(List[int], self._constraints[sharding_option.name].batch_sizes)\n', (19286, 19350), False, 'from typing import cast, Dict, List, Optional, Tuple\n'), ((30313, 30324), 'torchrec.distributed.planner.utils.prod', 'prod', (['shard'], {}), '(shard)\n', (30317, 30324), False, 'from torchrec.distributed.planner.utils import prod, sharder_name\n'), ((30528, 30539), 'torchrec.distributed.planner.utils.prod', 'prod', (['shard'], {}), '(shard)\n', (30532, 30539), False, 'from torchrec.distributed.planner.utils import prod, sharder_name\n'), ((31356, 31367), 'torchrec.distributed.planner.utils.prod', 'prod', (['shard'], {}), '(shard)\n', (31360, 31367), False, 'from torchrec.distributed.planner.utils import prod, sharder_name\n'), ((31571, 31582), 'torchrec.distributed.planner.utils.prod', 'prod', (['shard'], {}), '(shard)\n', (31575, 31582), False, 'from torchrec.distributed.planner.utils import prod, sharder_name\n'), ((31948, 31959), 'torchrec.distributed.planner.utils.prod', 'prod', (['shape'], {}), '(shape)\n', (31952, 31959), False, 'from torchrec.distributed.planner.utils import prod, sharder_name\n'), ((31935, 31945), 'torchrec.distributed.planner.utils.prod', 'prod', (['size'], {}), '(size)\n', (31939, 31945), False, 'from torchrec.distributed.planner.utils import prod, sharder_name\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Callable, Dict, List, Optional, Union
from torch.utils.data import IterDataPipe
from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV, safe_cast
RATINGS_FILENAME = "ratings.csv"
MOVIES_FILENAME = "movies.csv"
DEFAULT_RATINGS_COLUMN_NAMES: List[str] = ["userId", "movieId", "rating", "timestamp"]
DEFAULT_MOVIES_COLUMN_NAMES: List[str] = ["movieId", "title", "genres"]
DEFAULT_COLUMN_NAMES: List[str] = (
DEFAULT_RATINGS_COLUMN_NAMES + DEFAULT_MOVIES_COLUMN_NAMES[1:]
)
COLUMN_TYPE_CASTERS: List[
Callable[[Union[float, int, str]], Union[float, int, str]]
] = [
lambda val: safe_cast(val, int, 0),
lambda val: safe_cast(val, int, 0),
lambda val: safe_cast(val, float, 0.0),
lambda val: safe_cast(val, int, 0),
lambda val: safe_cast(val, str, ""),
lambda val: safe_cast(val, str, ""),
]
def _default_row_mapper(example: List[str]) -> Dict[str, Union[float, int, str]]:
return {
DEFAULT_COLUMN_NAMES[idx]: COLUMN_TYPE_CASTERS[idx](val)
for idx, val in enumerate(example)
}
def _join_with_movies(datapipe: IterDataPipe, root: str) -> IterDataPipe:
movies_path = os.path.join(root, MOVIES_FILENAME)
movies_datapipe = LoadFiles((movies_path,), mode="r")
movies_datapipe = ReadLinesFromCSV(
movies_datapipe,
skip_first_line=True,
delimiter=",",
)
movie_id_to_movie: Dict[str, List[str]] = {
row[0]: row[1:] for row in movies_datapipe
}
def join_rating_movie(val: List[str]) -> List[str]:
return val + movie_id_to_movie[val[1]]
return datapipe.map(join_rating_movie)
def _movielens(
root: str,
*,
include_movies_data: bool = False,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> IterDataPipe:
ratings_path = os.path.join(root, RATINGS_FILENAME)
datapipe = LoadFiles((ratings_path,), mode="r", **open_kw)
datapipe = ReadLinesFromCSV(datapipe, skip_first_line=True, delimiter=",")
if include_movies_data:
datapipe = _join_with_movies(datapipe, root)
if row_mapper:
datapipe = datapipe.map(row_mapper)
return datapipe
def movielens_20m(
root: str,
*,
include_movies_data: bool = False,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> IterDataPipe:
"""`MovieLens 20M <https://grouplens.org/datasets/movielens/20m/>`_ Dataset
Args:
root (str): local path to root directory containing MovieLens 20M dataset files.
include_movies_data (bool): if True, adds movies data to each line.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split line.
open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open.
Example::
datapipe = movielens_20m("/home/datasets/ml-20")
datapipe = dp.iter.Batch(datapipe, 100)
datapipe = dp.iter.Collate(datapipe)
batch = next(iter(datapipe))
"""
return _movielens(
root,
include_movies_data=include_movies_data,
row_mapper=row_mapper,
**open_kw,
)
def movielens_25m(
root: str,
*,
include_movies_data: bool = False,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> IterDataPipe:
"""`MovieLens 25M <https://grouplens.org/datasets/movielens/25m/>`_ Dataset
Args:
root (str): local path to root directory containing MovieLens 25M dataset files.
include_movies_data (bool): if True, adds movies data to each line.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split line.
open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open.
Example::
datapipe = movielens_25m("/home/datasets/ml-25")
datapipe = dp.iter.Batch(datapipe, 100)
datapipe = dp.iter.Collate(datapipe)
batch = next(iter(datapipe))
"""
return _movielens(
root,
include_movies_data=include_movies_data,
row_mapper=row_mapper,
**open_kw,
)
| [
"torchrec.datasets.utils.safe_cast",
"torchrec.datasets.utils.ReadLinesFromCSV",
"torchrec.datasets.utils.LoadFiles"
] | [((1402, 1437), 'os.path.join', 'os.path.join', (['root', 'MOVIES_FILENAME'], {}), '(root, MOVIES_FILENAME)\n', (1414, 1437), False, 'import os\n'), ((1460, 1495), 'torchrec.datasets.utils.LoadFiles', 'LoadFiles', (['(movies_path,)'], {'mode': '"""r"""'}), "((movies_path,), mode='r')\n", (1469, 1495), False, 'from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV, safe_cast\n'), ((1518, 1588), 'torchrec.datasets.utils.ReadLinesFromCSV', 'ReadLinesFromCSV', (['movies_datapipe'], {'skip_first_line': '(True)', 'delimiter': '""","""'}), "(movies_datapipe, skip_first_line=True, delimiter=',')\n", (1534, 1588), False, 'from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV, safe_cast\n'), ((2123, 2159), 'os.path.join', 'os.path.join', (['root', 'RATINGS_FILENAME'], {}), '(root, RATINGS_FILENAME)\n', (2135, 2159), False, 'import os\n'), ((2175, 2222), 'torchrec.datasets.utils.LoadFiles', 'LoadFiles', (['(ratings_path,)'], {'mode': '"""r"""'}), "((ratings_path,), mode='r', **open_kw)\n", (2184, 2222), False, 'from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV, safe_cast\n'), ((2238, 2301), 'torchrec.datasets.utils.ReadLinesFromCSV', 'ReadLinesFromCSV', (['datapipe'], {'skip_first_line': '(True)', 'delimiter': '""","""'}), "(datapipe, skip_first_line=True, delimiter=',')\n", (2254, 2301), False, 'from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV, safe_cast\n'), ((865, 887), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['val', 'int', '(0)'], {}), '(val, int, 0)\n', (874, 887), False, 'from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV, safe_cast\n'), ((905, 927), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['val', 'int', '(0)'], {}), '(val, int, 0)\n', (914, 927), False, 'from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV, safe_cast\n'), ((945, 971), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['val', 'float', '(0.0)'], {}), '(val, float, 0.0)\n', (954, 971), False, 'from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV, safe_cast\n'), ((989, 1011), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['val', 'int', '(0)'], {}), '(val, int, 0)\n', (998, 1011), False, 'from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV, safe_cast\n'), ((1029, 1052), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['val', 'str', '""""""'], {}), "(val, str, '')\n", (1038, 1052), False, 'from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV, safe_cast\n'), ((1070, 1093), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['val', 'str', '""""""'], {}), "(val, str, '')\n", (1079, 1093), False, 'from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV, safe_cast\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from typing import List
from torch import distributed as dist
from torch.utils.data import DataLoader
from torchrec.datasets.criteo import (
CAT_FEATURE_COUNT,
DEFAULT_CAT_NAMES,
DEFAULT_INT_NAMES,
DAYS,
InMemoryBinaryCriteoIterDataPipe,
)
from torchrec.datasets.random import RandomRecDataset
from torchrec.datasets.synthetic import SyntheticRecDataset
STAGES = ["train", "val", "test"]
def _get_random_dataloader(
args: argparse.Namespace,
) -> DataLoader:
return DataLoader(
RandomRecDataset(
keys=DEFAULT_CAT_NAMES,
batch_size=args.batch_size,
hash_size=args.num_embeddings,
hash_sizes=args.num_embeddings_per_feature
if hasattr(args, "num_embeddings_per_feature")
else None,
manual_seed=args.seed if hasattr(args, "seed") else None,
ids_per_feature=1,
num_dense=len(DEFAULT_INT_NAMES),
),
batch_size=None,
batch_sampler=None,
pin_memory=args.pin_memory,
num_workers=args.num_workers if hasattr(args, "num_workers") else 0,
)
def _get_synthetic_dataloader(
args: argparse.Namespace,
) -> DataLoader:
return DataLoader(
SyntheticRecDataset(
keys=args.sparse_feature_names,
batch_size=args.batch_size,
pooling_factor_per_feature=args.pooling_factor_per_feature,
num_embeddings_per_feature=args.num_embeddings_per_feature,
manual_seed=args.seed if hasattr(args, "seed") else None,
num_dense=len(DEFAULT_INT_NAMES),
),
batch_size=None,
batch_sampler=None,
pin_memory=args.pin_memory,
num_workers=args.num_workers if hasattr(args, "num_workers") else 0,
)
def _get_in_memory_dataloader(
args: argparse.Namespace,
stage: str,
) -> DataLoader:
files = os.listdir(args.in_memory_binary_criteo_path)
def is_final_day(s: str) -> bool:
return f"day_{DAYS - 1}" in s
if stage == "train":
# Train set gets all data except from the final day.
files = list(filter(lambda s: not is_final_day(s), files))
rank = dist.get_rank()
world_size = dist.get_world_size()
else:
# Validation set gets the first half of the final day's samples. Test set get
# the other half.
files = list(filter(is_final_day, files))
rank = (
dist.get_rank()
if stage == "val"
else dist.get_rank() + dist.get_world_size()
)
world_size = dist.get_world_size() * 2
stage_files: List[List[str]] = [
sorted(
map(
lambda x: os.path.join(args.in_memory_binary_criteo_path, x),
filter(lambda s: kind in s, files),
)
)
for kind in ["dense", "sparse", "labels"]
]
dataloader = DataLoader(
InMemoryBinaryCriteoIterDataPipe(
*stage_files, # pyre-ignore[6]
batch_size=args.batch_size,
rank=rank,
world_size=world_size,
shuffle_batches=args.shuffle_batches,
hashes=args.num_embeddings_per_feature
if args.num_embeddings is None
else ([args.num_embeddings] * CAT_FEATURE_COUNT),
),
batch_size=None,
pin_memory=args.pin_memory,
collate_fn=lambda x: x,
)
return dataloader
def get_dataloader(args: argparse.Namespace, backend: str, stage: str) -> DataLoader:
"""
Gets desired dataloader from dlrm_main command line options. Currently, this
function is able to return either a DataLoader wrapped around a RandomRecDataset or
a Dataloader wrapped around an InMemoryBinaryCriteoIterDataPipe.
Args:
args (argparse.Namespace): Command line options supplied to dlrm_main.py's main
function.
backend (str): "nccl" or "gloo".
stage (str): "train", "val", or "test".
Returns:
dataloader (DataLoader): PyTorch dataloader for the specified options.
"""
stage = stage.lower()
if stage not in STAGES:
raise ValueError(f"Supplied stage was {stage}. Must be one of {STAGES}.")
args.pin_memory = (
(backend == "nccl") if not hasattr(args, "pin_memory") else args.pin_memory
)
if (
not hasattr(args, "in_memory_binary_criteo_path")
or args.in_memory_binary_criteo_path is None
):
if args.dataset_type == 'random':
return _get_random_dataloader(args)
elif args.dataset_type == 'synthetic':
return _get_synthetic_dataloader(args)
else:
return _get_in_memory_dataloader(args, stage)
| [
"torchrec.datasets.criteo.InMemoryBinaryCriteoIterDataPipe"
] | [((2147, 2192), 'os.listdir', 'os.listdir', (['args.in_memory_binary_criteo_path'], {}), '(args.in_memory_binary_criteo_path)\n', (2157, 2192), False, 'import os\n'), ((2439, 2454), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2452, 2454), True, 'from torch import distributed as dist\n'), ((2476, 2497), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2495, 2497), True, 'from torch import distributed as dist\n'), ((3177, 3453), 'torchrec.datasets.criteo.InMemoryBinaryCriteoIterDataPipe', 'InMemoryBinaryCriteoIterDataPipe', (['*stage_files'], {'batch_size': 'args.batch_size', 'rank': 'rank', 'world_size': 'world_size', 'shuffle_batches': 'args.shuffle_batches', 'hashes': '(args.num_embeddings_per_feature if args.num_embeddings is None else [args.\n num_embeddings] * CAT_FEATURE_COUNT)'}), '(*stage_files, batch_size=args.batch_size,\n rank=rank, world_size=world_size, shuffle_batches=args.shuffle_batches,\n hashes=args.num_embeddings_per_feature if args.num_embeddings is None else\n [args.num_embeddings] * CAT_FEATURE_COUNT)\n', (3209, 3453), False, 'from torchrec.datasets.criteo import CAT_FEATURE_COUNT, DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES, DAYS, InMemoryBinaryCriteoIterDataPipe\n'), ((2699, 2714), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2712, 2714), True, 'from torch import distributed as dist\n'), ((2833, 2854), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2852, 2854), True, 'from torch import distributed as dist\n'), ((2762, 2777), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (2775, 2777), True, 'from torch import distributed as dist\n'), ((2780, 2801), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2799, 2801), True, 'from torch import distributed as dist\n'), ((2956, 3006), 'os.path.join', 'os.path.join', (['args.in_memory_binary_criteo_path', 'x'], {}), '(args.in_memory_binary_criteo_path, x)\n', (2968, 3006), False, 'import os\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.