code stringlengths 3.13k 58.3k | apis sequence | extract_api stringlengths 499 39.4k |
|---|---|---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
import abc
import math
from collections import defaultdict, deque
from dataclasses import dataclass
from enum import Enum
from typing import (
Any,
Callable,
cast,
Deque,
Dict,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
import torch
import torch.distributed as dist
import torch.nn as nn
from torchmetrics import Metric
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import (
compose_metric_key,
MetricNameBase,
MetricNamespaceBase,
MetricPrefix,
)
RecModelOutput = Union[torch.Tensor, Dict[str, torch.Tensor]]
@dataclass(frozen=True)
class MetricComputationReport:
name: MetricNameBase
metric_prefix: MetricPrefix
value: torch.Tensor
DefaultValueT = TypeVar("DefaultValueT")
ComputeIterType = Iterator[
Tuple[RecTaskInfo, MetricNameBase, torch.Tensor, MetricPrefix]
]
MAX_BUFFER_COUNT = 1000
class RecMetricException(Exception):
pass
class WindowBuffer:
def __init__(self, max_size: int, max_buffer_count: int) -> None:
self._max_size: int = max_size
self._max_buffer_count: int = max_buffer_count
self._buffers: Deque[torch.Tensor] = deque(maxlen=max_buffer_count)
self._used_sizes: Deque[int] = deque(maxlen=max_buffer_count)
self._window_used_size = 0
def aggregate_state(
self, window_state: torch.Tensor, curr_state: torch.Tensor, size: int
) -> None:
def remove(window_state: torch.Tensor) -> None:
window_state -= self._buffers.popleft()
self._window_used_size -= self._used_sizes.popleft()
if len(self._buffers) == self._buffers.maxlen:
remove(window_state)
self._buffers.append(curr_state)
self._used_sizes.append(size)
window_state += curr_state
self._window_used_size += size
while self._window_used_size > self._max_size:
remove(window_state)
@property
def buffers(self) -> Deque[torch.Tensor]:
return self._buffers
class RecMetricComputation(Metric, abc.ABC):
r"""The internal computation class template.
A metric implementation should overwrite update() and compute(). These two
APIs focuses the actual mathematical meaning of the metric, without the
detail knowledge of model output and task information.
Args:
my_rank (int): the rank of this trainer.
batch_size (int): batch size used by this trainer.
n_tasks (int): the number tasks this communication obj
will have to compute.
window_size (int): the window size for the window metric.
compute_on_all_ranks (bool): whether to compute metrics on all ranks. This
is necessary if non-leader rank want to consum metrics result.
process_group (Optional[ProcessGroup]): the process group used for the
communication. Will use the default process group if not specified.
"""
_batch_window_buffers: Optional[Dict[str, WindowBuffer]]
def __init__(
self,
my_rank: int,
batch_size: int,
n_tasks: int,
window_size: int,
compute_on_all_ranks: bool = False,
# pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type.
process_group: Optional[dist.ProcessGroup] = None,
*args: Any,
**kwargs: Any,
) -> None:
super().__init__(process_group=process_group, *args, **kwargs)
self._my_rank = my_rank
self._n_tasks = n_tasks
self._batch_size = batch_size
self._window_size = window_size
self._compute_on_all_ranks = compute_on_all_ranks
if self._window_size > 0:
self._batch_window_buffers = {}
else:
self._batch_window_buffers = None
self._add_state(
"has_valid_update",
torch.zeros(self._n_tasks, dtype=torch.uint8),
add_window_state=False,
dist_reduce_fx=lambda x: torch.any(x, dim=0).byte(),
persistent=True,
)
@staticmethod
def get_window_state_name(state_name: str) -> str:
return f"window_{state_name}"
def get_window_state(self, state_name: str) -> torch.Tensor:
return getattr(self, self.get_window_state_name(state_name))
def _add_state(
self, name: str, default: DefaultValueT, add_window_state: bool, **kwargs: Any
) -> None:
# pyre-fixme[6]: Expected `Union[List[typing.Any], torch.Tensor]` for 2nd
# param but got `DefaultValueT`.
super().add_state(name, default, **kwargs)
if add_window_state:
if self._batch_window_buffers is None:
raise RuntimeError(
"Users is adding a window state while window metric is disabled."
)
kwargs["persistent"] = False
window_state_name = self.get_window_state_name(name)
# Avoid pyre error
assert isinstance(default, torch.Tensor)
super().add_state(window_state_name, default.detach().clone(), **kwargs)
self._batch_window_buffers[window_state_name] = WindowBuffer(
max_size=self._window_size,
max_buffer_count=MAX_BUFFER_COUNT,
)
def _aggregate_window_state(
self, state_name: str, state: torch.Tensor, num_samples: int
) -> None:
if self._batch_window_buffers is None:
raise RuntimeError(
"Users is adding a window state while window metric is disabled."
)
window_state_name = self.get_window_state_name(state_name)
assert self._batch_window_buffers is not None
self._batch_window_buffers[window_state_name].aggregate_state(
getattr(self, window_state_name), curr_state=state, size=num_samples
)
@abc.abstractmethod
# pyre-fixme[14]: `update` overrides method defined in `Metric` inconsistently.
def update(
self,
*,
predictions: Optional[torch.Tensor],
labels: torch.Tensor,
weights: Optional[torch.Tensor],
) -> None: # pragma: no cover
pass
@abc.abstractmethod
def _compute(self) -> List[MetricComputationReport]: # pragma: no cover
pass
def pre_compute(self) -> None:
r"""If a metric need to do some work before `compute()`, the metric
has to override this `pre_compute()`. One possible usage is to do
some pre-processing of the local state before `compute()` as TorchMetric
wraps `RecMetricComputation.compute()` and will do the global aggregation
before `RecMetricComputation.compute()` is called.
"""
return
def compute(self) -> List[MetricComputationReport]:
if self._my_rank == 0 or self._compute_on_all_ranks:
return self._compute()
else:
return []
def local_compute(self) -> List[MetricComputationReport]:
return self._compute()
class RecMetric(nn.Module, abc.ABC):
r"""The main class template to implement a recommendation metric.
This class contains the recommendation tasks information (RecTaskInfo) and
the actual computation object (RecMetricComputation). RecMetric processes
all the information related to RecTaskInfo and models and pass the required
signals to the computation object, allowing the implementation of
RecMetricComputation to focus on the mathemetical meaning.
A new metric that inherit RecMetric must override the following attributes
in its own __init__(): `_namespace` and `_metrics_computations`. No other
methods should be overridden.
Args:
world_size (int): the number of trainers.
my_rank (int): the rank of this trainer.
batch_size (int): batch size used by this trainer.
tasks (List[RecTaskInfo]): the information of the model tasks.
compute_mode (RecComputeMode): the computation mode. See RecComputeMode.
window_size (int): the window size for the window metric.
fused_update_limit (int): the maximum number of updates to be fused.
compute_on_all_ranks (bool): whether to compute metrics on all ranks. This
is necessary if non-leader rank want to consume global metrics result.
process_group (Optional[ProcessGroup]): the process group used for the
communication. Will use the default process group if not specified.
Call Args:
Not supported.
Returns:
Not supported.
Example::
ne = NEMetric(
world_size=4,
my_rank=0,
batch_size=128,
tasks=DefaultTaskInfo,
)
"""
_computation_class: Type[RecMetricComputation]
_namespace: MetricNamespaceBase
_metrics_computations: nn.ModuleList
_tasks: List[RecTaskInfo]
_window_size: int
_tasks_iter: Callable[[str], ComputeIterType]
_update_buffers: Dict[str, List[RecModelOutput]]
_default_weights: Dict[Tuple[int, ...], torch.Tensor]
PREDICTIONS: str = "predictions"
LABELS: str = "labels"
WEIGHTS: str = "weights"
def __init__(
self,
world_size: int,
my_rank: int,
batch_size: int,
tasks: List[RecTaskInfo],
compute_mode: RecComputeMode = RecComputeMode.UNFUSED_TASKS_COMPUTATION,
window_size: int = 100,
fused_update_limit: int = 0,
compute_on_all_ranks: bool = False,
process_group: Optional[dist.ProcessGroup] = None,
**kwargs: Any,
) -> None:
# TODO(stellaya): consider to inherit from TorchMetrics.Metric or
# TorchMetrics.MetricCollection.
if (
compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION
and fused_update_limit > 0
):
raise ValueError(
"The fused tasks computation and the fused update cannot be set at the same time"
)
super().__init__()
self._world_size = world_size
self._my_rank = my_rank
self._window_size = math.ceil(window_size / world_size)
self._batch_size = batch_size
self._tasks = tasks
self._compute_mode = compute_mode
self._fused_update_limit = fused_update_limit
self._default_weights = {}
self._update_buffers = {
self.PREDICTIONS: [],
self.LABELS: [],
self.WEIGHTS: [],
}
if compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION:
n_metrics = 1
task_per_metric = len(self._tasks)
self._tasks_iter = self._fused_tasks_iter
else:
n_metrics = len(self._tasks)
task_per_metric = 1
self._tasks_iter = self._unfused_tasks_iter
self._metrics_computations: nn.ModuleList = nn.ModuleList(
[
# This Pyre error seems to be Pyre's bug as it can be inferred by mypy
# according to https://github.com/python/mypy/issues/3048.
# pyre-fixme[45]: Cannot instantiate abstract class `RecMetricCoputation`.
self._computation_class(
my_rank,
batch_size,
task_per_metric,
self._window_size,
compute_on_all_ranks,
process_group,
**kwargs,
)
for _ in range(n_metrics)
]
)
# TODO(stellaya): Refactor the _[fused, unfused]_tasks_iter methods and replace the
# compute_scope str input with an enum
def _fused_tasks_iter(self, compute_scope: str) -> ComputeIterType:
assert len(self._metrics_computations) == 1
self._metrics_computations[0].pre_compute()
for metric_report in getattr(
self._metrics_computations[0], compute_scope + "compute"
)():
for task, metric_value, has_valid_update in zip(
self._tasks,
metric_report.value,
self._metrics_computations[0].has_valid_update,
):
# The attribute has_valid_update is a tensor whose length equals to the
# number of tasks. Each value in it is corresponding to whether a task
# has valid updates or not.
# If for a task there's no valid updates, the calculated metric_value
# will be meaningless, so we mask it with the default value, i.e. 0.
valid_metric_value = (
metric_value
if has_valid_update > 0
else torch.zeros_like(metric_value)
)
yield task, metric_report.name, valid_metric_value, compute_scope + metric_report.metric_prefix.value
def _unfused_tasks_iter(self, compute_scope: str) -> ComputeIterType:
for task, metric_computation in zip(self._tasks, self._metrics_computations):
metric_computation.pre_compute()
for metric_report in getattr(
metric_computation, compute_scope + "compute"
)():
# The attribute has_valid_update is a tensor with only 1 value
# corresponding to whether the task has valid updates or not.
# If there's no valid update, the calculated metric_report.value
# will be meaningless, so we mask it with the default value, i.e. 0.
valid_metric_value = (
metric_report.value
if metric_computation.has_valid_update[0] > 0
else torch.zeros_like(metric_report.value)
)
yield task, metric_report.name, valid_metric_value, compute_scope + metric_report.metric_prefix.value
def _fuse_update_buffers(self) -> Dict[str, RecModelOutput]:
def fuse(outputs: List[RecModelOutput]) -> RecModelOutput:
assert len(outputs) > 0
if isinstance(outputs[0], torch.Tensor):
return torch.cat(cast(List[torch.Tensor], outputs))
else:
task_outputs: Dict[str, List[torch.Tensor]] = defaultdict(list)
for output in outputs:
assert isinstance(output, dict)
for task_name, tensor in output.items():
task_outputs[task_name].append(tensor)
return {
name: torch.cat(tensors) for name, tensors in task_outputs.items()
}
ret: Dict[str, RecModelOutput] = {}
for key, output_list in self._update_buffers.items():
if len(output_list) > 0:
ret[key] = fuse(output_list)
else:
assert key == self.WEIGHTS
output_list.clear()
return ret
def _check_fused_update(self, force: bool) -> None:
if self._fused_update_limit <= 0:
return
if len(self._update_buffers[self.PREDICTIONS]) == 0:
return
if (
not force
and len(self._update_buffers[self.PREDICTIONS]) < self._fused_update_limit
):
return
fused_arguments = self._fuse_update_buffers()
self._update(
predictions=fused_arguments[self.PREDICTIONS],
labels=fused_arguments[self.LABELS],
weights=fused_arguments.get(self.WEIGHTS, None),
)
def _create_default_weights(self, predictions: torch.Tensor) -> torch.Tensor:
weights = self._default_weights.get(predictions.size(), None)
if weights is None:
weights = torch.ones_like(predictions)
self._default_weights[predictions.size()] = weights
return weights
def _check_nonempty_weights(self, weights: torch.Tensor) -> torch.Tensor:
return torch.gt(torch.count_nonzero(weights, dim=-1), 0)
def _update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: Optional[RecModelOutput],
) -> None:
with torch.no_grad():
if self._compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION:
assert isinstance(predictions, torch.Tensor)
# Reshape the predictions to size([len(self._tasks), self._batch_size])
predictions = predictions.view(-1, self._batch_size)
assert isinstance(labels, torch.Tensor)
labels = labels.view(-1, self._batch_size)
if weights is None:
weights = self._create_default_weights(predictions)
else:
assert isinstance(weights, torch.Tensor)
weights = weights.view(-1, self._batch_size)
# has_valid_weights is a tensor of bool whose length equals to the number
# of tasks. Each value in it is corresponding to whether the weights
# are valid, i.e. are set to non-zero values for that task in this update.
# If has_valid_weights are Falses for all the tasks, we just ignore this
# update.
has_valid_weights = self._check_nonempty_weights(weights)
if torch.any(has_valid_weights):
self._metrics_computations[0].update(
predictions=predictions, labels=labels, weights=weights
)
self._metrics_computations[0].has_valid_update.logical_or_(
has_valid_weights
).byte()
else:
for task, metric_ in zip(self._tasks, self._metrics_computations):
if task.name not in predictions:
continue
if torch.numel(predictions[task.name]) == 0:
assert torch.numel(labels[task.name]) == 0
assert weights is None or torch.numel(weights[task.name]) == 0
continue
# Reshape the predictions to size([1, self._batch_size])
task_predictions = predictions[task.name].view(1, -1)
task_labels = labels[task.name].view(1, -1)
if weights is None:
task_weights = self._create_default_weights(task_predictions)
else:
task_weights = weights[task.name].view(1, -1)
# has_valid_weights is a tensor with only 1 value corresponding to
# whether the weights are valid, i.e. are set to non-zero values for
# the task in this update.
# If has_valid_update[0] is False, we just ignore this update.
has_valid_weights = self._check_nonempty_weights(task_weights)
if has_valid_weights[0]:
metric_.update(
predictions=task_predictions,
labels=task_labels,
weights=task_weights,
)
metric_.has_valid_update.logical_or_(has_valid_weights).byte()
def update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: Optional[RecModelOutput],
) -> None:
if self._fused_update_limit > 0:
self._update_buffers[self.PREDICTIONS].append(predictions)
self._update_buffers[self.LABELS].append(labels)
if weights is not None:
self._update_buffers[self.WEIGHTS].append(weights)
self._check_fused_update(force=False)
else:
self._update(predictions=predictions, labels=labels, weights=weights)
# The implementation of compute is very similar to local_compute, but compute overwrites
# the abstract method compute in torchmetrics.Metric, which is wrapped by _wrap_compute
def compute(self) -> Dict[str, torch.Tensor]:
self._check_fused_update(force=True)
ret = {}
for task, metric_name, metric_value, prefix in self._tasks_iter(""):
metric_key = compose_metric_key(
self._namespace, task.name, metric_name, prefix
)
ret[metric_key] = metric_value
return ret
def local_compute(self) -> Dict[str, torch.Tensor]:
self._check_fused_update(force=True)
ret = {}
for task, metric_name, metric_value, prefix in self._tasks_iter("local_"):
metric_key = compose_metric_key(
self._namespace, task.name, metric_name, prefix
)
ret[metric_key] = metric_value
return ret
def sync(self) -> None:
for computation in self._metrics_computations:
computation.sync()
def unsync(self) -> None:
for computation in self._metrics_computations:
if computation._is_synced:
computation.unsync()
def reset(self) -> None:
for computation in self._metrics_computations:
computation.reset()
def get_memory_usage(self) -> Dict[torch.Tensor, int]:
r"""Estimates the memory of the rec metric instance's
underlying tensors; returns the map of tensor to size
"""
tensor_map = {}
attributes_q = deque(self.__dict__.values())
while attributes_q:
attribute = attributes_q.popleft()
if isinstance(attribute, torch.Tensor):
tensor_map[attribute] = (
attribute.size().numel() * attribute.element_size()
)
elif isinstance(attribute, WindowBuffer):
attributes_q.extend(attribute.buffers)
elif isinstance(attribute, Mapping):
attributes_q.extend(attribute.values())
elif isinstance(attribute, Sequence) and not isinstance(attribute, str):
attributes_q.extend(attribute)
elif hasattr(attribute, "__dict__") and not isinstance(attribute, Enum):
attributes_q.extend(attribute.__dict__.values())
return tensor_map
# pyre-fixme[14]: `state_dict` overrides method defined in `Module` inconsistently.
def state_dict(
self,
destination: Optional[Dict[str, torch.Tensor]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, torch.Tensor]:
# We need to flush the cached output to ensure checkpointing correctness.
self._check_fused_update(force=True)
destination = super().state_dict(
destination=destination, prefix=prefix, keep_vars=keep_vars
)
return self._metrics_computations.state_dict(
destination=destination,
prefix=f"{prefix}_metrics_computations.",
keep_vars=keep_vars,
)
class RecMetricList(nn.Module):
"""
A list module to encapulate multiple RecMetric instances and provide the
same interfaces as RecMetric.
Args:
rec_metrics (List[RecMetric]: the list of the input RecMetrics.
Call Args:
Not supported.
Returns:
Not supported.
Example::
ne = NEMetric(
world_size=4,
my_rank=0,
batch_size=128,
tasks=DefaultTaskInfo
)
metrics = RecMetricList([ne])
"""
rec_metrics: nn.ModuleList
def __init__(self, rec_metrics: List[RecMetric]) -> None:
# TODO(stellaya): consider to inherit from TorchMetrics.MetricCollection.
# The prequsite to use MetricCollection is that RecMetric inherits from
# TorchMetrics.Metric or TorchMetrics.MetricCollection
super().__init__()
self.rec_metrics = nn.ModuleList(rec_metrics)
def __len__(self) -> int:
return len(self.rec_metrics)
def __getitem__(self, idx: int) -> nn.Module:
return self.rec_metrics[idx]
def update(
self,
*,
predictions: RecModelOutput,
labels: RecModelOutput,
weights: RecModelOutput,
) -> None:
for metric in self.rec_metrics:
metric.update(predictions=predictions, labels=labels, weights=weights)
def compute(self) -> Dict[str, torch.Tensor]:
ret = {}
for metric in self.rec_metrics:
ret.update(metric.compute())
return ret
def local_compute(self) -> Dict[str, torch.Tensor]:
ret = {}
for metric in self.rec_metrics:
ret.update(metric.local_compute())
return ret
def sync(self) -> None:
for metric in self.rec_metrics:
metric.sync()
def unsync(self) -> None:
for metric in self.rec_metrics:
metric.unsync()
def reset(self) -> None:
for metric in self.rec_metrics:
metric.reset()
| [
"torchrec.metrics.metrics_namespace.compose_metric_key"
] | [((943, 965), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (952, 965), False, 'from dataclasses import dataclass\n'), ((1096, 1120), 'typing.TypeVar', 'TypeVar', (['"""DefaultValueT"""'], {}), "('DefaultValueT')\n", (1103, 1120), False, 'from typing import Any, Callable, cast, Deque, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union\n'), ((1523, 1553), 'collections.deque', 'deque', ([], {'maxlen': 'max_buffer_count'}), '(maxlen=max_buffer_count)\n', (1528, 1553), False, 'from collections import defaultdict, deque\n'), ((1593, 1623), 'collections.deque', 'deque', ([], {'maxlen': 'max_buffer_count'}), '(maxlen=max_buffer_count)\n', (1598, 1623), False, 'from collections import defaultdict, deque\n'), ((10435, 10470), 'math.ceil', 'math.ceil', (['(window_size / world_size)'], {}), '(window_size / world_size)\n', (10444, 10470), False, 'import math\n'), ((24195, 24221), 'torch.nn.ModuleList', 'nn.ModuleList', (['rec_metrics'], {}), '(rec_metrics)\n', (24208, 24221), True, 'import torch.nn as nn\n'), ((4195, 4240), 'torch.zeros', 'torch.zeros', (['self._n_tasks'], {'dtype': 'torch.uint8'}), '(self._n_tasks, dtype=torch.uint8)\n', (4206, 4240), False, 'import torch\n'), ((16013, 16041), 'torch.ones_like', 'torch.ones_like', (['predictions'], {}), '(predictions)\n', (16028, 16041), False, 'import torch\n'), ((16232, 16268), 'torch.count_nonzero', 'torch.count_nonzero', (['weights'], {'dim': '(-1)'}), '(weights, dim=-1)\n', (16251, 16268), False, 'import torch\n'), ((16456, 16471), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16469, 16471), False, 'import torch\n'), ((20564, 20631), 'torchrec.metrics.metrics_namespace.compose_metric_key', 'compose_metric_key', (['self._namespace', 'task.name', 'metric_name', 'prefix'], {}), '(self._namespace, task.name, metric_name, prefix)\n', (20582, 20631), False, 'from torchrec.metrics.metrics_namespace import compose_metric_key, MetricNameBase, MetricNamespaceBase, MetricPrefix\n'), ((20951, 21018), 'torchrec.metrics.metrics_namespace.compose_metric_key', 'compose_metric_key', (['self._namespace', 'task.name', 'metric_name', 'prefix'], {}), '(self._namespace, task.name, metric_name, prefix)\n', (20969, 21018), False, 'from torchrec.metrics.metrics_namespace import compose_metric_key, MetricNameBase, MetricNamespaceBase, MetricPrefix\n'), ((14541, 14558), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (14552, 14558), False, 'from collections import defaultdict, deque\n'), ((17613, 17641), 'torch.any', 'torch.any', (['has_valid_weights'], {}), '(has_valid_weights)\n', (17622, 17641), False, 'import torch\n'), ((13010, 13040), 'torch.zeros_like', 'torch.zeros_like', (['metric_value'], {}), '(metric_value)\n', (13026, 13040), False, 'import torch\n'), ((13997, 14034), 'torch.zeros_like', 'torch.zeros_like', (['metric_report.value'], {}), '(metric_report.value)\n', (14013, 14034), False, 'import torch\n'), ((14426, 14459), 'typing.cast', 'cast', (['List[torch.Tensor]', 'outputs'], {}), '(List[torch.Tensor], outputs)\n', (14430, 14459), False, 'from typing import Any, Callable, cast, Deque, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union\n'), ((14825, 14843), 'torch.cat', 'torch.cat', (['tensors'], {}), '(tensors)\n', (14834, 14843), False, 'import torch\n'), ((18164, 18199), 'torch.numel', 'torch.numel', (['predictions[task.name]'], {}), '(predictions[task.name])\n', (18175, 18199), False, 'import torch\n'), ((4315, 4334), 'torch.any', 'torch.any', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (4324, 4334), False, 'import torch\n'), ((18237, 18267), 'torch.numel', 'torch.numel', (['labels[task.name]'], {}), '(labels[task.name])\n', (18248, 18267), False, 'import torch\n'), ((18323, 18354), 'torch.numel', 'torch.numel', (['weights[task.name]'], {}), '(weights[task.name])\n', (18334, 18354), False, 'import torch\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import itertools
import logging
from typing import List, Optional, Tuple, Iterator
import torch
import torch.distributed as dist
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops import (
EmbeddingLocation,
IntNBitTableBatchedEmbeddingBagsCodegen,
rounded_row_size_in_bytes,
)
from torchrec.distributed.batched_embedding_kernel import BaseBatchedEmbeddingBag
from torchrec.distributed.embedding_kernel import BaseEmbeddingBag
from torchrec.distributed.embedding_types import GroupedEmbeddingConfig
from torchrec.distributed.utils import append_prefix
from torchrec.modules.embedding_configs import (
DataType,
DATA_TYPE_NUM_BITS,
)
from torchrec.sparse.jagged_tensor import (
KeyedJaggedTensor,
KeyedTensor,
)
logger: logging.Logger = logging.getLogger(__name__)
class QuantBatchedEmbeddingBag(BaseBatchedEmbeddingBag):
def __init__(
self,
config: GroupedEmbeddingConfig,
# pyre-fixme[11]
pg: Optional[dist.ProcessGroup] = None,
device: Optional[torch.device] = None,
) -> None:
super().__init__(config, pg, device)
self._emb_module: IntNBitTableBatchedEmbeddingBagsCodegen = (
IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
local_rows,
table.embedding_dim,
QuantBatchedEmbeddingBag.to_sparse_type(config.data_type),
EmbeddingLocation.DEVICE
if (device is not None and device.type == "cuda")
else EmbeddingLocation.HOST,
)
for local_rows, table in zip(
self._local_rows, config.embedding_tables
)
],
device=device,
pooling_mode=self._pooling,
feature_table_map=self._feature_table_map,
)
)
if device is not None and device.type != "meta":
self._emb_module.initialize_weights()
@staticmethod
def to_sparse_type(data_type: DataType) -> SparseType:
if data_type == DataType.FP16:
return SparseType.FP16
elif data_type == DataType.INT8:
return SparseType.INT8
elif data_type == DataType.INT4:
return SparseType.INT4
elif data_type == DataType.INT2:
return SparseType.INT2
else:
raise ValueError(f"Invalid DataType {data_type}")
def init_parameters(self) -> None:
pass
@property
def emb_module(
self,
) -> IntNBitTableBatchedEmbeddingBagsCodegen:
return self._emb_module
def forward(self, features: KeyedJaggedTensor) -> KeyedTensor:
values = self.emb_module(
indices=features.values().int(),
offsets=features.offsets().int(),
per_sample_weights=features.weights_or_none(),
).float()
return KeyedTensor(
keys=self._emb_names,
values=values,
length_per_key=self._lengths_per_emb,
)
def named_buffers(
self, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
for config, weight in zip(
self._config.embedding_tables,
self.emb_module.split_embedding_weights(),
):
yield append_prefix(prefix, f"{config.name}.weight"), weight[0]
def split_embedding_weights(self) -> List[torch.Tensor]:
return [
weight
for weight, _ in self.emb_module.split_embedding_weights(
split_scale_shifts=False
)
]
@classmethod
def from_float(cls, module: BaseEmbeddingBag) -> "QuantBatchedEmbeddingBag":
assert hasattr(
module, "qconfig"
), "EmbeddingBagCollectionInterface input float module must have qconfig defined"
def _to_data_type(dtype: torch.dtype) -> DataType:
if dtype == torch.quint8 or dtype == torch.qint8:
return DataType.INT8
elif dtype == torch.quint4 or dtype == torch.qint4:
return DataType.INT4
elif dtype == torch.quint2 or dtype == torch.qint2:
return DataType.INT2
else:
raise Exception(f"Invalid data type {dtype}")
# pyre-ignore [16]
data_type = _to_data_type(module.qconfig.weight().dtype)
sparse_type = QuantBatchedEmbeddingBag.to_sparse_type(data_type)
state_dict = dict(
itertools.chain(module.named_buffers(), module.named_parameters())
)
device = next(iter(state_dict.values())).device
# Adjust config to quantized version.
# This obviously doesn't work for column-wise sharding.
# pyre-ignore [29]
config = copy.deepcopy(module.config())
config.data_type = data_type
for table in config.embedding_tables:
table.local_cols = rounded_row_size_in_bytes(table.local_cols, sparse_type)
if table.local_metadata is not None:
table.local_metadata.shard_sizes = [
table.local_rows,
table.local_cols,
]
if table.global_metadata is not None:
for shard_meta in table.global_metadata.shards_metadata:
if shard_meta != table.local_metadata:
shard_meta.shard_sizes = [
shard_meta.shard_sizes[0],
rounded_row_size_in_bytes(
shard_meta.shard_sizes[1], sparse_type
),
]
table.global_metadata.size = torch.Size(
[
table.global_metadata.size[0],
sum(
shard_meta.shard_sizes[1]
for shard_meta in table.global_metadata.shards_metadata
),
]
)
ret = QuantBatchedEmbeddingBag(config=config, device=device)
# Quantize weights.
quant_weight_list = []
for _, weight in state_dict.items():
quantized_weights = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
weight, DATA_TYPE_NUM_BITS[data_type]
)
# weight and 4 byte scale shift (2xfp16)
quant_weight = quantized_weights[:, :-4]
scale_shift = quantized_weights[:, -4:]
quant_weight_list.append((quant_weight, scale_shift))
ret.emb_module.assign_embedding_weights(quant_weight_list)
return ret
| [
"torchrec.distributed.utils.append_prefix",
"torchrec.sparse.jagged_tensor.KeyedTensor"
] | [((1068, 1095), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1085, 1095), False, 'import logging\n'), ((3324, 3415), 'torchrec.sparse.jagged_tensor.KeyedTensor', 'KeyedTensor', ([], {'keys': 'self._emb_names', 'values': 'values', 'length_per_key': 'self._lengths_per_emb'}), '(keys=self._emb_names, values=values, length_per_key=self.\n _lengths_per_emb)\n', (3335, 3415), False, 'from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor\n'), ((5356, 5412), 'fbgemm_gpu.split_table_batched_embeddings_ops.rounded_row_size_in_bytes', 'rounded_row_size_in_bytes', (['table.local_cols', 'sparse_type'], {}), '(table.local_cols, sparse_type)\n', (5381, 5412), False, 'from fbgemm_gpu.split_table_batched_embeddings_ops import EmbeddingLocation, IntNBitTableBatchedEmbeddingBagsCodegen, rounded_row_size_in_bytes\n'), ((6656, 6754), 'torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf', 'torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf', (['weight', 'DATA_TYPE_NUM_BITS[data_type]'], {}), '(weight,\n DATA_TYPE_NUM_BITS[data_type])\n', (6711, 6754), False, 'import torch\n'), ((3742, 3788), 'torchrec.distributed.utils.append_prefix', 'append_prefix', (['prefix', 'f"""{config.name}.weight"""'], {}), "(prefix, f'{config.name}.weight')\n", (3755, 3788), False, 'from torchrec.distributed.utils import append_prefix\n'), ((5926, 5991), 'fbgemm_gpu.split_table_batched_embeddings_ops.rounded_row_size_in_bytes', 'rounded_row_size_in_bytes', (['shard_meta.shard_sizes[1]', 'sparse_type'], {}), '(shard_meta.shard_sizes[1], sparse_type)\n', (5951, 5991), False, 'from fbgemm_gpu.split_table_batched_embeddings_ops import EmbeddingLocation, IntNBitTableBatchedEmbeddingBagsCodegen, rounded_row_size_in_bytes\n')] |
from typing import (
Iterator,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
)
import io
import torch
import torch.utils.data.datapipes as dp
from torchdata.datapipes.iter import S3FileLister, S3FileLoader
from torchdata.datapipes.utils import StreamWrapper
from torchrec.datasets.utils import (
LoadFiles,
ReadLinesFromCSV)
from torch.utils.data import IterDataPipe
from torchrec.datasets.criteo import _default_row_mapper
s3_prefixes = ['s3://criteo-dataset/day_0']
dp_s3_urls = S3FileLister(s3_prefixes)
dp_s3_files = S3FileLoader(dp_s3_urls) # outputs in (url, BytesIO)
# more datapipes to convert loaded bytes, e.g.
class LoadWithTextIOWrapper(IterDataPipe):
def __init__(self, paths, **open_kw):
self.paths = paths
self.open_kw: Any = open_kw # pyre-ignore[4]
def __iter__(self) -> Iterator[Any]:
for url, buffer in self.paths:
yield url, io.TextIOWrapper(buffer, encoding='utf-8')
class S3CriteoIterDataPipe(IterDataPipe):
"""
IterDataPipe that can be used to stream either the Criteo 1TB Click Logs Dataset
(https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/) or the
Kaggle/Criteo Display Advertising Dataset
(https://www.kaggle.com/c/criteo-display-ad-challenge/) from the source TSV
files.
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example:
>>> datapipe = CriteoIterDataPipe(
>>> ("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
>>> )
>>> datapipe = dp.iter.Batcher(datapipe, 100)
>>> datapipe = dp.iter.Collator(datapipe)
>>> batch = next(iter(datapipe))
"""
def __init__(
self,
paths: S3FileLoader,
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> None:
self.paths = paths
self.row_mapper = row_mapper
self.open_kw: Any = open_kw # pyre-ignore[4]
# pyre-ignore[3]
def __iter__(self) -> Iterator[Any]:
worker_info = torch.utils.data.get_worker_info()
paths = self.paths
if worker_info is not None:
paths = (
path
for (idx, path) in enumerate(paths)
if idx % worker_info.num_workers == worker_info.id
)
# datapipe = LoadFiles(paths, mode="r", **self.open_kw)
datapipe = LoadWithTextIOWrapper(paths)
datapipe = ReadLinesFromCSV(datapipe, delimiter="\t")
if self.row_mapper:
datapipe = dp.iter.Mapper(datapipe, self.row_mapper)
yield from datapipe
#print(dp_s3_files)
#datapipe = StreamWrapper(dp_s3_files).parse_csv_files(delimiter=' ')
#for d in datapipe: # Start loading data
datapipe = S3CriteoIterDataPipe(dp_s3_files)
datapipe = dp.iter.Batcher(datapipe, 100)
datapipe = dp.iter.Collator(datapipe)
batch = next(iter(datapipe))
print(batch.keys())
| [
"torchrec.datasets.utils.ReadLinesFromCSV"
] | [((520, 545), 'torchdata.datapipes.iter.S3FileLister', 'S3FileLister', (['s3_prefixes'], {}), '(s3_prefixes)\n', (532, 545), False, 'from torchdata.datapipes.iter import S3FileLister, S3FileLoader\n'), ((560, 584), 'torchdata.datapipes.iter.S3FileLoader', 'S3FileLoader', (['dp_s3_urls'], {}), '(dp_s3_urls)\n', (572, 584), False, 'from torchdata.datapipes.iter import S3FileLister, S3FileLoader\n'), ((3173, 3203), 'torch.utils.data.datapipes.iter.Batcher', 'dp.iter.Batcher', (['datapipe', '(100)'], {}), '(datapipe, 100)\n', (3188, 3203), True, 'import torch.utils.data.datapipes as dp\n'), ((3215, 3241), 'torch.utils.data.datapipes.iter.Collator', 'dp.iter.Collator', (['datapipe'], {}), '(datapipe)\n', (3231, 3241), True, 'import torch.utils.data.datapipes as dp\n'), ((2416, 2450), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (2448, 2450), False, 'import torch\n'), ((2821, 2863), 'torchrec.datasets.utils.ReadLinesFromCSV', 'ReadLinesFromCSV', (['datapipe'], {'delimiter': '"""\t"""'}), "(datapipe, delimiter='\\t')\n", (2837, 2863), False, 'from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV\n'), ((2915, 2956), 'torch.utils.data.datapipes.iter.Mapper', 'dp.iter.Mapper', (['datapipe', 'self.row_mapper'], {}), '(datapipe, self.row_mapper)\n', (2929, 2956), True, 'import torch.utils.data.datapipes as dp\n'), ((932, 974), 'io.TextIOWrapper', 'io.TextIOWrapper', (['buffer'], {'encoding': '"""utf-8"""'}), "(buffer, encoding='utf-8')\n", (948, 974), False, 'import io\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
from typing import List, Tuple, Optional, Dict, cast
from torchrec.distributed.planner.constants import MAX_SIZE
from torchrec.distributed.planner.types import (
Partitioner,
Topology,
ShardingOption,
Storage,
PartitionByType,
PlannerError,
DeviceHardware,
)
from torchrec.distributed.types import ShardingType
def greedy_partition(
num_partitions: int,
sharding_options: List[ShardingOption],
shard_idxes: Optional[List[Tuple[int, int]]] = None,
partition_sums: Optional[List[float]] = None,
mem_cap: Optional[List[Storage]] = None,
) -> List[List[Tuple[int, int]]]:
"""
Divides indices among `num_partitions` partitions in a greedy fashion based on perf
weights associated with each [option_idx, shard_idx].
Returns:
List[List[Tuple[int, int]]]: list of indices of (option_idx, shard_idx) that should be allocated to each partition.
Example::
sharding_options = [
[0,1,2,3] with perfs [10,20,30,40]
[0,1] with perfs [200,300]
]
# with num_partitions=3
# The final output would be:
[
partition_0 = [(1,1)], with a perf of 300
partition_1 = [(1,0)], with a perf of 200
partition_2 = [(0,0),(0,1),(0,2),(0,3)], with a perf of 100 (10+20+30+40)
]
"""
if shard_idxes is None:
shard_idxes = []
for option_idx, sharding_option in enumerate(sharding_options):
for shard_idx in range(sharding_option.num_shards):
shard_idxes.append((option_idx, shard_idx))
def _to_comparable(order_shard_idx: Tuple[int, int]) -> Tuple[float, Storage]:
sharding_option: ShardingOption = sharding_options[order_shard_idx[0]]
return (
cast(float, sharding_option.shards[order_shard_idx[1]].perf),
cast(Storage, sharding_option.shards[order_shard_idx[1]].storage),
)
sorted_shard_idxes = sorted(
shard_idxes, key=lambda order_shard_idx: _to_comparable(order_shard_idx)
)
partitions = [[] for p in range(num_partitions)]
if partition_sums is None:
partition_sums = [0.0] * num_partitions
partition_size_sums = [Storage(hbm=0, ddr=0) for _ in range(num_partitions)]
if mem_cap is None:
mem_cap = [Storage(hbm=MAX_SIZE, ddr=MAX_SIZE) for _ in range(num_partitions)]
assert len(partition_size_sums) == len(
mem_cap
), "partition_size_sums and mem_cap must have the same dimensions"
"""
Successively add remaining pairs to the partition with the minimum sum.
"""
while sorted_shard_idxes:
option_idx, shard_idx = sorted_shard_idxes.pop()
storage_size = cast(
Storage, sharding_options[option_idx].shards[shard_idx].storage
)
perf = cast(float, sharding_options[option_idx].shards[shard_idx].perf)
min_sum = MAX_SIZE
min_partition_idx = -1
for partition_idx in range(num_partitions):
partition_mem_cap: Storage = mem_cap[partition_idx]
partition_size_sum: Storage = partition_size_sums[partition_idx]
if (
partition_mem_cap.hbm >= partition_size_sum.hbm + storage_size.hbm
) and (partition_mem_cap.ddr >= partition_size_sum.ddr + storage_size.ddr):
if partition_sums[partition_idx] < min_sum:
min_sum = partition_sums[partition_idx]
min_partition_idx = partition_idx
if min_partition_idx == -1:
raise PlannerError(
f"Table of size {storage_size} GB cannot be added to any rank. partition_size_sums: {partition_size_sums}. mem_cap: {mem_cap}."
)
partitions[min_partition_idx].append((option_idx, shard_idx))
partition_size_sums[min_partition_idx] += storage_size
partition_sums[min_partition_idx] += perf
return partitions
def uniform_partition(
num_partitions: int,
sharding_options: List[ShardingOption],
mem_cap: List[Storage],
shard_idxes: Optional[List[Tuple[int, int]]] = None,
) -> List[List[Tuple[int, int]]]:
"""
Assigns one shard to each rank.
Example::
sharding_options = [
[0,1,2,3],
[0,1,2,3],
]
# with num_partitions=4
# The final output would be:
[
partition_0 = [(0,0),(1,0)]
partition_1 = [(0,1),(1,1)]
partition_2 = [(0,2),(1,2)]
partition_3 = [(0,3),(1,3)]
]
"""
partition_size_sums = [Storage(hbm=0, ddr=0) for _ in range(num_partitions)]
if shard_idxes is None:
shard_idxes = []
for option_idx, sharding_option in enumerate(sharding_options):
for shard_idx in range(sharding_option.num_shards):
shard_idxes.append((option_idx, shard_idx))
partitions: List[List[Tuple[int, int]]] = [[] for _ in range(num_partitions)]
for option_idx, shard_idx in shard_idxes:
storage_size = cast(
Storage, sharding_options[option_idx].shards[shard_idx].storage
)
if partition_size_sums[shard_idx] + storage_size > mem_cap[shard_idx]:
raise PlannerError(
f"Table of size {storage_size} GB cannot be added to any rank. partition_size_sums: {partition_size_sums}. mem_cap: {mem_cap}."
)
partition_size_sums[shard_idx] += storage_size
partitions[shard_idx].append((option_idx, shard_idx))
return partitions
def _group_sharding_options(
sharding_options: List[ShardingOption],
) -> Dict[str, List[ShardingOption]]:
partition_by_groups = {}
for sharding_option in sharding_options:
if sharding_option.partition_by not in partition_by_groups:
partition_by_groups[sharding_option.partition_by] = []
partition_by_groups[sharding_option.partition_by].append(sharding_option)
return partition_by_groups
class GreedyPerfPartitioner(Partitioner):
"""
Greedy Partitioner
"""
def partition(
self,
proposal: List[ShardingOption],
storage_constraint: Topology,
) -> List[ShardingOption]:
"""
Places sharding options on topology based on each sharding option's
`partition_by` attribute.
Topology storage and perfs are updated at the end of the placement.
Args:
proposal (List[ShardingOption]): list of populated sharding options.
storage_constraint (Topology): device topology.
Returns:
List[ShardingOption]: list of sharding options for selected plan.
Example::
sharding_options = [
ShardingOption(partition_by="uniform",
shards=[
Shards(storage=1, perf=1),
Shards(storage=1, perf=1),
]),
ShardingOption(partition_by="uniform",
shards=[
Shards(storage=2, perf=2),
Shards(storage=2, perf=2),
]),
ShardingOption(partition_by="device",
shards=[
Shards(storage=3, perf=3),
Shards(storage=3, perf=3),
])
ShardingOption(partition_by="device",
shards=[
Shards(storage=4, perf=4),
Shards(storage=4, perf=4),
]),
]
topology = Topology(world_size=2)
# First [sharding_options[0] and sharding_options[1]] will be placed on the
# topology with the uniform strategy, resulting in
topology.devices[0].perf = (1,2)
topology.devices[1].perf = (1,2)
# Finally sharding_options[2] and sharding_options[3]] will be placed on the
# topology with the device strategy (see docstring of `partition_by_device` for
# more details).
topology.devices[0].perf = (1,2) + (3,4)
topology.devices[1].perf = (1,2) + (3,4)
# The topology updates are done after the end of all the placements (the other
# in the example is just for clarity).
"""
# pyre-ignore [16]: `GreedyPerfPartitioner` has no attribute `_topology`.
self._topology: Topology = copy.deepcopy(storage_constraint)
plan = copy.deepcopy(proposal)
grouped_sharding_options = _group_sharding_options(plan)
if PartitionByType.UNIFORM.value in grouped_sharding_options:
self._partition_by_uniform(
grouped_sharding_options[PartitionByType.UNIFORM.value]
)
if PartitionByType.HOST.value in grouped_sharding_options:
self._partition_by_host(
grouped_sharding_options[PartitionByType.HOST.value]
)
if PartitionByType.DEVICE.value in grouped_sharding_options:
self._partition_by_device(
grouped_sharding_options[PartitionByType.DEVICE.value]
)
return plan
def _partition_by_uniform(self, sharding_options: List[ShardingOption]) -> None:
partitions = uniform_partition(
# pyre-ignore [16]: `GreedyPerfPartitioner` has no attribute `_topology`.
num_partitions=self._topology.world_size,
sharding_options=sharding_options,
mem_cap=[device.storage for device in self._topology.devices],
)
self._update_shards(partitions, sharding_options)
def _partition_by_device(self, sharding_options: List[ShardingOption]) -> None:
# pyre-ignore [16]: `GreedyPerfPartitioner` has no attribute `_topology`.
partition_sums = [float(device.perf) for device in self._topology.devices]
mem_cap: List[Storage] = [device.storage for device in self._topology.devices]
partitions = greedy_partition(
num_partitions=self._topology.world_size,
sharding_options=sharding_options,
partition_sums=partition_sums,
mem_cap=mem_cap,
)
self._update_shards(partitions, sharding_options)
def _partition_by_host(self, sharding_options: List[ShardingOption]) -> None:
# pyre-ignore [16]: `GreedyPerfPartitioner` has no attribute `_topology`.
num_hosts: int = self._topology.world_size // self._topology.local_world_size
mem_cap: List[Storage] = []
partition_sums = []
shard_idxes = []
for option_idx, _ in enumerate(sharding_options):
# only take the first shard from each sharding option. We can infer the rest
shard_idxes.append((option_idx, 0))
host_level_devices: Dict[int, List[DeviceHardware]] = {}
for i in range(num_hosts):
devices_in_host = self._topology.devices[
i
* self._topology.local_world_size : (i + 1)
* self._topology.local_world_size
]
host_level_devices[i] = devices_in_host
# mem_cap of a host is the min of the storage of all devies on that host
mem_cap.append(min([device.storage for device in devices_in_host]))
# perf of a host is the max across all of its devices. Typically this should be zero at entry point.
partition_sums.append(
max([float(device.perf) for device in devices_in_host])
)
host_level_partitions: List[List[Tuple[int, int]]] = greedy_partition(
num_partitions=num_hosts,
sharding_options=sharding_options,
shard_idxes=shard_idxes,
partition_sums=partition_sums,
mem_cap=mem_cap,
)
partitions: List[List[Tuple[int, int]]] = [[] for _ in self._topology.devices]
for host_idx, host_partition in enumerate(host_level_partitions):
self._uniform_device_level_partition(
partitions=partitions,
sharding_options=sharding_options,
option_idxes=[
option_idx
for option_idx, _ in host_partition
if _base_partition_by(sharding_options[option_idx].sharding_type)
== PartitionByType.UNIFORM.value
],
host_level_devices=host_level_devices[host_idx],
host_idx=host_idx,
)
self._greedy_device_level_partition(
partitions=partitions,
sharding_options=sharding_options,
option_idxes=[
option_idx
for option_idx, _ in host_partition
if _base_partition_by(sharding_options[option_idx].sharding_type)
== PartitionByType.DEVICE.value
],
host_level_devices=host_level_devices[host_idx],
host_idx=host_idx,
)
self._update_shards(partitions, sharding_options)
def _uniform_device_level_partition(
self,
partitions: List[List[Tuple[int, int]]],
sharding_options: List[ShardingOption],
option_idxes: List[int],
host_level_devices: List[DeviceHardware],
host_idx: int,
) -> None:
shard_idxes = []
for option_idx in option_idxes:
for shard_idx in range(sharding_options[option_idx].num_shards):
shard_idxes.append((option_idx, shard_idx))
if shard_idxes:
device_level_partitions: List[List[Tuple[int, int]]] = uniform_partition(
# pyre-ignore [16]: `GreedyPerfPartitioner` has no attribute `_topology`.
num_partitions=self._topology.local_world_size,
sharding_options=sharding_options,
mem_cap=[device.storage for device in host_level_devices],
shard_idxes=shard_idxes,
)
for device_idx, device_partition in enumerate(device_level_partitions):
for option_idx, shard_idx in device_partition:
partitions[
self._topology.local_world_size * host_idx + device_idx
].append((option_idx, shard_idx))
def _greedy_device_level_partition(
self,
partitions: List[List[Tuple[int, int]]],
sharding_options: List[ShardingOption],
option_idxes: List[int],
host_level_devices: List[DeviceHardware],
host_idx: int,
) -> None:
shard_idxes = []
for option_idx in option_idxes:
for shard_idx in range(sharding_options[option_idx].num_shards):
shard_idxes.append((option_idx, shard_idx))
if shard_idxes:
device_level_partitions: List[List[Tuple[int, int]]] = greedy_partition(
# pyre-ignore [16]: `GreedyPerfPartitioner` has no attribute `_topology`.
num_partitions=self._topology.local_world_size,
sharding_options=sharding_options,
shard_idxes=shard_idxes,
partition_sums=[float(device.perf) for device in host_level_devices],
mem_cap=[device.storage for device in host_level_devices],
)
for device_idx, device_partition in enumerate(device_level_partitions):
for option_idx, shard_idx in device_partition:
partitions[
self._topology.local_world_size * host_idx + device_idx
].append((option_idx, shard_idx))
def _update_shards(
self,
partitions: List[List[Tuple[int, int]]],
sharding_options: List[ShardingOption],
) -> None:
"""
Updates the ranks of the shards as well as device perfs.
"""
for partition_idx, partition in enumerate(partitions):
for [option_idx, shard_idx] in partition:
sharding_options[option_idx].shards[shard_idx].rank = partition_idx
# pyre-ignore [16]: `GreedyPerfPartitioner` has no attribute `_topology`.
self._topology.devices[partition_idx].storage -= (
sharding_options[option_idx].shards[shard_idx].storage
)
self._topology.devices[partition_idx].perf += (
sharding_options[option_idx].shards[shard_idx].perf
)
def _base_partition_by(sharding_type: str) -> str:
if sharding_type == ShardingType.TABLE_ROW_WISE.value:
return PartitionByType.UNIFORM.value
elif sharding_type == ShardingType.TABLE_COLUMN_WISE.value:
return PartitionByType.DEVICE.value
else:
raise ValueError(
f"Sharding type provided must have a partition_by value of HOST: {sharding_type}"
)
| [
"torchrec.distributed.planner.types.PlannerError",
"torchrec.distributed.planner.types.Storage"
] | [((2463, 2484), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(0)', 'ddr': '(0)'}), '(hbm=0, ddr=0)\n', (2470, 2484), False, 'from torchrec.distributed.planner.types import Partitioner, Topology, ShardingOption, Storage, PartitionByType, PlannerError, DeviceHardware\n'), ((2964, 3033), 'typing.cast', 'cast', (['Storage', 'sharding_options[option_idx].shards[shard_idx].storage'], {}), '(Storage, sharding_options[option_idx].shards[shard_idx].storage)\n', (2968, 3033), False, 'from typing import List, Tuple, Optional, Dict, cast\n'), ((3071, 3135), 'typing.cast', 'cast', (['float', 'sharding_options[option_idx].shards[shard_idx].perf'], {}), '(float, sharding_options[option_idx].shards[shard_idx].perf)\n', (3075, 3135), False, 'from typing import List, Tuple, Optional, Dict, cast\n'), ((4829, 4850), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': '(0)', 'ddr': '(0)'}), '(hbm=0, ddr=0)\n', (4836, 4850), False, 'from torchrec.distributed.planner.types import Partitioner, Topology, ShardingOption, Storage, PartitionByType, PlannerError, DeviceHardware\n'), ((5285, 5354), 'typing.cast', 'cast', (['Storage', 'sharding_options[option_idx].shards[shard_idx].storage'], {}), '(Storage, sharding_options[option_idx].shards[shard_idx].storage)\n', (5289, 5354), False, 'from typing import List, Tuple, Optional, Dict, cast\n'), ((8830, 8863), 'copy.deepcopy', 'copy.deepcopy', (['storage_constraint'], {}), '(storage_constraint)\n', (8843, 8863), False, 'import copy\n'), ((8879, 8902), 'copy.deepcopy', 'copy.deepcopy', (['proposal'], {}), '(proposal)\n', (8892, 8902), False, 'import copy\n'), ((2030, 2090), 'typing.cast', 'cast', (['float', 'sharding_option.shards[order_shard_idx[1]].perf'], {}), '(float, sharding_option.shards[order_shard_idx[1]].perf)\n', (2034, 2090), False, 'from typing import List, Tuple, Optional, Dict, cast\n'), ((2104, 2169), 'typing.cast', 'cast', (['Storage', 'sharding_option.shards[order_shard_idx[1]].storage'], {}), '(Storage, sharding_option.shards[order_shard_idx[1]].storage)\n', (2108, 2169), False, 'from typing import List, Tuple, Optional, Dict, cast\n'), ((2561, 2596), 'torchrec.distributed.planner.types.Storage', 'Storage', ([], {'hbm': 'MAX_SIZE', 'ddr': 'MAX_SIZE'}), '(hbm=MAX_SIZE, ddr=MAX_SIZE)\n', (2568, 2596), False, 'from torchrec.distributed.planner.types import Partitioner, Topology, ShardingOption, Storage, PartitionByType, PlannerError, DeviceHardware\n'), ((3805, 3956), 'torchrec.distributed.planner.types.PlannerError', 'PlannerError', (['f"""Table of size {storage_size} GB cannot be added to any rank. partition_size_sums: {partition_size_sums}. mem_cap: {mem_cap}."""'], {}), "(\n f'Table of size {storage_size} GB cannot be added to any rank. partition_size_sums: {partition_size_sums}. mem_cap: {mem_cap}.'\n )\n", (3817, 3956), False, 'from torchrec.distributed.planner.types import Partitioner, Topology, ShardingOption, Storage, PartitionByType, PlannerError, DeviceHardware\n'), ((5474, 5625), 'torchrec.distributed.planner.types.PlannerError', 'PlannerError', (['f"""Table of size {storage_size} GB cannot be added to any rank. partition_size_sums: {partition_size_sums}. mem_cap: {mem_cap}."""'], {}), "(\n f'Table of size {storage_size} GB cannot be added to any rank. partition_size_sums: {partition_size_sums}. mem_cap: {mem_cap}.'\n )\n", (5486, 5625), False, 'from torchrec.distributed.planner.types import Partitioner, Topology, ShardingOption, Storage, PartitionByType, PlannerError, DeviceHardware\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import List, cast
import torch
from torchrec.distributed.embeddingbag import (
EmbeddingBagCollectionSharder,
)
from torchrec.distributed.planner.enumerators import EmbeddingEnumerator
from torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer
from torchrec.distributed.planner.types import Topology, ShardingOption
from torchrec.distributed.tests.test_model import TestSparseNN
from torchrec.modules.embedding_configs import EmbeddingBagConfig
class TestProposers(unittest.TestCase):
def setUp(self) -> None:
topology = Topology(world_size=2, compute_device="cuda")
self.enumerator = EmbeddingEnumerator(topology=topology)
self.greedy_proposer = GreedyProposer()
self.uniform_proposer = UniformProposer()
def test_greedy_two_table_perf(self) -> None:
tables = [
EmbeddingBagConfig(
num_embeddings=100,
embedding_dim=10,
name="table_0",
feature_names=["feature_0"],
),
EmbeddingBagConfig(
num_embeddings=100,
embedding_dim=10,
name="table_1",
feature_names=["feature_1"],
),
]
model = TestSparseNN(tables=tables, sparse_device=torch.device("meta"))
search_space = self.enumerator.enumerate(
module=model, sharders=[EmbeddingBagCollectionSharder()]
)
self.greedy_proposer.load(search_space)
# simulate first five iterations:
output = []
for _ in range(5):
proposal = cast(List[ShardingOption], self.greedy_proposer.propose())
proposal.sort(
key=lambda sharding_option: (
max([shard.perf for shard in sharding_option.shards]),
sharding_option.name,
)
)
output.append(
[
(
candidate.name,
candidate.sharding_type,
candidate.compute_kernel,
)
for candidate in proposal
]
)
self.greedy_proposer.feedback(partitionable=True)
expected_output = [
[
(
"table_0",
"data_parallel",
"batched_dense",
),
(
"table_1",
"data_parallel",
"batched_dense",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"data_parallel",
"dense",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"row_wise",
"batched_fused",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"table_wise",
"batched_fused",
),
],
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_0",
"row_wise",
"batched_dense",
),
],
]
self.assertEqual(expected_output, output)
def test_uniform_three_table_perf(self) -> None:
tables = [
EmbeddingBagConfig(
num_embeddings=100 * i,
embedding_dim=10 * i,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
)
for i in range(1, 4)
]
model = TestSparseNN(tables=tables, sparse_device=torch.device("meta"))
search_space = self.enumerator.enumerate(
module=model, sharders=[EmbeddingBagCollectionSharder()]
)
self.uniform_proposer.load(search_space)
output = []
proposal = self.uniform_proposer.propose()
while proposal:
proposal.sort(
key=lambda sharding_option: (
max([shard.perf for shard in sharding_option.shards]),
sharding_option.name,
)
)
output.append(
[
(
candidate.name,
candidate.sharding_type,
candidate.compute_kernel,
)
for candidate in proposal
]
)
self.uniform_proposer.feedback(partitionable=True)
proposal = self.uniform_proposer.propose()
expected_output = [
[
(
"table_1",
"data_parallel",
"batched_dense",
),
(
"table_2",
"data_parallel",
"batched_dense",
),
(
"table_3",
"data_parallel",
"batched_dense",
),
],
[
(
"table_1",
"table_wise",
"batched_fused",
),
(
"table_2",
"table_wise",
"batched_fused",
),
(
"table_3",
"table_wise",
"batched_fused",
),
],
[
(
"table_1",
"row_wise",
"batched_fused",
),
(
"table_2",
"row_wise",
"batched_fused",
),
(
"table_3",
"row_wise",
"batched_fused",
),
],
[
(
"table_1",
"table_row_wise",
"batched_fused",
),
(
"table_2",
"table_row_wise",
"batched_fused",
),
(
"table_3",
"table_row_wise",
"batched_fused",
),
],
]
self.assertEqual(expected_output, output)
| [
"torchrec.modules.embedding_configs.EmbeddingBagConfig",
"torchrec.distributed.planner.proposers.GreedyProposer",
"torchrec.distributed.planner.types.Topology",
"torchrec.distributed.planner.proposers.UniformProposer",
"torchrec.distributed.planner.enumerators.EmbeddingEnumerator",
"torchrec.distributed.e... | [((824, 869), 'torchrec.distributed.planner.types.Topology', 'Topology', ([], {'world_size': '(2)', 'compute_device': '"""cuda"""'}), "(world_size=2, compute_device='cuda')\n", (832, 869), False, 'from torchrec.distributed.planner.types import Topology, ShardingOption\n'), ((896, 934), 'torchrec.distributed.planner.enumerators.EmbeddingEnumerator', 'EmbeddingEnumerator', ([], {'topology': 'topology'}), '(topology=topology)\n', (915, 934), False, 'from torchrec.distributed.planner.enumerators import EmbeddingEnumerator\n'), ((966, 982), 'torchrec.distributed.planner.proposers.GreedyProposer', 'GreedyProposer', ([], {}), '()\n', (980, 982), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer\n'), ((1015, 1032), 'torchrec.distributed.planner.proposers.UniformProposer', 'UniformProposer', ([], {}), '()\n', (1030, 1032), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer\n'), ((1115, 1220), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'num_embeddings': '(100)', 'embedding_dim': '(10)', 'name': '"""table_0"""', 'feature_names': "['feature_0']"}), "(num_embeddings=100, embedding_dim=10, name='table_0',\n feature_names=['feature_0'])\n", (1133, 1220), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((1309, 1414), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'num_embeddings': '(100)', 'embedding_dim': '(10)', 'name': '"""table_1"""', 'feature_names': "['feature_1']"}), "(num_embeddings=100, embedding_dim=10, name='table_1',\n feature_names=['feature_1'])\n", (1327, 1414), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((1560, 1580), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (1572, 1580), False, 'import torch\n'), ((4546, 4566), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (4558, 4566), False, 'import torch\n'), ((1668, 1699), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {}), '()\n', (1697, 1699), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder\n'), ((4654, 4685), 'torchrec.distributed.embeddingbag.EmbeddingBagCollectionSharder', 'EmbeddingBagCollectionSharder', ([], {}), '()\n', (4683, 4685), False, 'from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
from functools import reduce
from typing import Tuple, Dict, Optional, List, cast, Union
import torch
import torch.distributed as dist
from torch import nn
from torchrec.distributed.collective_utils import (
invoke_on_rank_and_broadcast_result,
)
from torchrec.distributed.planner.constants import MAX_SIZE
from torchrec.distributed.planner.enumerators import EmbeddingEnumerator
from torchrec.distributed.planner.partitioners import GreedyPerfPartitioner
from torchrec.distributed.planner.perf_models import NoopPerfModel
from torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer
from torchrec.distributed.planner.stats import EmbeddingStats
from torchrec.distributed.planner.storage_reservations import (
HeuristicalStorageReservation,
)
from torchrec.distributed.planner.types import (
ParameterConstraints,
Partitioner,
Topology,
Stats,
Shard,
Storage,
ShardingOption,
StorageReservation,
Enumerator,
Proposer,
PerfModel,
PlannerError,
)
from torchrec.distributed.types import (
EnumerableShardingSpec,
ShardMetadata,
)
from torchrec.distributed.types import (
ShardingPlan,
ShardingPlanner,
ModuleSharder,
ShardingType,
ParameterSharding,
)
def _merge_shards_by_dim(shards: List[Shard], dim: int) -> List[Shard]:
# merges shards down to one per rank along dimension.
# Will recompute shard offsets
merged_shards = []
shards = sorted(shards, key=lambda x: x.rank)
current_rank = -1
current_shard: Optional[Shard] = None
current_dim_offset = 0
for shard in shards:
if shard.rank != current_rank:
current_shard = copy.deepcopy(shard)
current_shard.offset[dim] = current_dim_offset
merged_shards.append(current_shard)
current_rank = shard.rank
else:
# pyre-ignore [16]
current_shard.size[dim] += shard.size[dim]
# pyre-ignore [16]
current_shard.storage += shard.storage
# pyre-ignore [16]
current_shard.perf += shard.perf
current_dim_offset += shard.size[dim]
return merged_shards
def _to_sharding_plan(
sharding_options: List[ShardingOption],
topology: Topology,
) -> ShardingPlan:
def _placement(
compute_device: str,
rank: int,
local_size: int,
) -> str:
param_device = compute_device
if compute_device == "cuda":
param_device = torch.device("cuda", rank % local_size)
return f"rank:{rank}/{param_device}"
compute_device = topology.compute_device
local_size = topology.local_world_size
plan = {}
for sharding_option in sharding_options:
shards = sharding_option.shards
sharding_type = sharding_option.sharding_type
module_plan = plan.get(sharding_option.path, {})
module_plan[sharding_option.name] = ParameterSharding(
sharding_spec=None
if sharding_type == ShardingType.DATA_PARALLEL.value
else EnumerableShardingSpec(
[
ShardMetadata(
shard_sizes=shard.size,
shard_offsets=shard.offset,
placement=_placement(
compute_device, cast(int, shard.rank), local_size
),
)
for shard in shards
]
),
sharding_type=sharding_type,
compute_kernel=sharding_option.compute_kernel,
ranks=[cast(int, shard.rank) for shard in shards],
)
plan[sharding_option.path] = module_plan
return ShardingPlan(plan)
class EmbeddingShardingPlanner(ShardingPlanner):
def __init__(
self,
topology: Topology,
enumerator: Optional[Enumerator] = None,
storage_reservation: Optional[StorageReservation] = None,
proposer: Optional[Union[Proposer, List[Proposer]]] = None,
partitioner: Optional[Partitioner] = None,
performance_model: Optional[PerfModel] = None,
stats: Optional[Stats] = None,
constraints: Optional[Dict[str, ParameterConstraints]] = None,
debug: bool = False,
) -> None:
self._topology = topology
self._constraints = constraints
self._enumerator: Enumerator = (
enumerator
if enumerator
else EmbeddingEnumerator(
topology=topology,
constraints=constraints,
)
)
self._storage_reservation: StorageReservation = (
storage_reservation
if storage_reservation
else HeuristicalStorageReservation(percentage=0.15)
)
self._partitioner: Partitioner = (
partitioner if partitioner else GreedyPerfPartitioner()
)
if proposer:
self._proposers: List[Proposer] = (
[proposer] if not isinstance(proposer, list) else proposer
)
else:
self._proposers = [
GreedyProposer(),
GreedyProposer(use_depth=False),
UniformProposer(),
]
self._perf_model: PerfModel = (
performance_model if performance_model else NoopPerfModel(topology=topology)
)
self._stats: Stats = stats if stats else EmbeddingStats()
self._debug = debug
self._num_proposals: int = 0
self._num_plans: int = 0
def collective_plan(
self,
module: nn.Module,
sharders: List[ModuleSharder[nn.Module]],
# pyre-fixme[11]: Annotation `ProcessGroup` is not defined as a type.
pg: dist.ProcessGroup,
) -> ShardingPlan:
"""
Call self.plan(...) on rank 0 and broadcast
"""
return invoke_on_rank_and_broadcast_result(
pg,
0,
self.plan,
module,
sharders,
)
def plan(
self,
module: nn.Module,
sharders: List[ModuleSharder[nn.Module]],
) -> ShardingPlan:
best_plan = None
lowest_storage = Storage(MAX_SIZE, MAX_SIZE)
best_perf_rating = MAX_SIZE
storage_constraint: Topology = self._storage_reservation.reserve(
topology=self._topology,
module=module,
sharders=sharders,
constraints=self._constraints,
)
search_space = self._enumerator.enumerate(
module=module,
sharders=sharders,
)
if not search_space:
# No shardable parameters
return ShardingPlan({})
proposal_cache: Dict[
Tuple[int, ...],
Tuple[bool, Optional[List[ShardingOption]], Optional[float]],
] = {}
for proposer in self._proposers:
proposer.load(search_space=search_space)
for proposer in self._proposers:
proposal = proposer.propose()
while proposal:
proposal_key = tuple(sorted(map(hash, proposal)))
if proposal_key in proposal_cache:
partitionable, plan, perf_rating = proposal_cache[proposal_key]
proposer.feedback(
partitionable=partitionable,
plan=plan,
perf_rating=perf_rating,
)
proposal = proposer.propose()
continue
self._num_proposals += 1
try:
plan = self._partitioner.partition(
proposal=proposal,
storage_constraint=storage_constraint,
)
self._num_plans += 1
perf_rating = self._perf_model.rate(plan=plan)
if perf_rating < best_perf_rating:
best_perf_rating = perf_rating
best_plan = plan
proposal_cache[proposal_key] = (True, plan, perf_rating)
proposer.feedback(
partitionable=True, plan=plan, perf_rating=perf_rating
)
except PlannerError:
current_storage = cast(
Storage,
reduce(
lambda x, y: x + y,
[
shard.storage
for option in proposal
for shard in option.shards
],
),
)
if current_storage < lowest_storage:
lowest_storage = current_storage
proposal_cache[proposal_key] = (False, None, None)
proposer.feedback(partitionable=False)
proposal = proposer.propose()
if best_plan:
sharding_plan = _to_sharding_plan(best_plan, self._topology)
self._stats.log(
sharding_plan=sharding_plan,
topology=self._topology,
num_proposals=self._num_proposals,
num_plans=self._num_plans,
best_plan=best_plan,
constraints=self._constraints,
debug=self._debug,
)
return sharding_plan
else:
global_storage_capacity = reduce(
lambda x, y: x + y,
[device.storage for device in self._topology.devices],
)
global_storge_constraints = reduce(
lambda x, y: x + y,
[device.storage for device in storage_constraint.devices],
)
raise PlannerError(
f"Unable to find a plan for this model are evaluating {self._num_proposals} proposals."
"\nPossible solutions:"
f"\n 1) Increase the number of devices ({self._topology.world_size})"
f"\n 2) Reduce the model size ("
f"\n\t Global storage: {global_storage_capacity.hbm}, "
f"\n\t Available for model parallel: {global_storge_constraints},"
f"\n\t Requirement for model parallel: {lowest_storage})"
f"\n 3) Reduce local batch size ({self._topology.batch_size})"
"\n 4) Remove planner constraints that might be reducing search space or available storage\n"
)
| [
"torchrec.distributed.collective_utils.invoke_on_rank_and_broadcast_result",
"torchrec.distributed.planner.storage_reservations.HeuristicalStorageReservation",
"torchrec.distributed.planner.proposers.GreedyProposer",
"torchrec.distributed.planner.stats.EmbeddingStats",
"torchrec.distributed.planner.types.Pl... | [((3958, 3976), 'torchrec.distributed.types.ShardingPlan', 'ShardingPlan', (['plan'], {}), '(plan)\n', (3970, 3976), False, 'from torchrec.distributed.types import ShardingPlan, ShardingPlanner, ModuleSharder, ShardingType, ParameterSharding\n'), ((6132, 6203), 'torchrec.distributed.collective_utils.invoke_on_rank_and_broadcast_result', 'invoke_on_rank_and_broadcast_result', (['pg', '(0)', 'self.plan', 'module', 'sharders'], {}), '(pg, 0, self.plan, module, sharders)\n', (6167, 6203), False, 'from torchrec.distributed.collective_utils import invoke_on_rank_and_broadcast_result\n'), ((6455, 6482), 'torchrec.distributed.planner.types.Storage', 'Storage', (['MAX_SIZE', 'MAX_SIZE'], {}), '(MAX_SIZE, MAX_SIZE)\n', (6462, 6482), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Partitioner, Topology, Stats, Shard, Storage, ShardingOption, StorageReservation, Enumerator, Proposer, PerfModel, PlannerError\n'), ((1924, 1944), 'copy.deepcopy', 'copy.deepcopy', (['shard'], {}), '(shard)\n', (1937, 1944), False, 'import copy\n'), ((2740, 2779), 'torch.device', 'torch.device', (['"""cuda"""', '(rank % local_size)'], {}), "('cuda', rank % local_size)\n", (2752, 2779), False, 'import torch\n'), ((4712, 4775), 'torchrec.distributed.planner.enumerators.EmbeddingEnumerator', 'EmbeddingEnumerator', ([], {'topology': 'topology', 'constraints': 'constraints'}), '(topology=topology, constraints=constraints)\n', (4731, 4775), False, 'from torchrec.distributed.planner.enumerators import EmbeddingEnumerator\n'), ((4975, 5021), 'torchrec.distributed.planner.storage_reservations.HeuristicalStorageReservation', 'HeuristicalStorageReservation', ([], {'percentage': '(0.15)'}), '(percentage=0.15)\n', (5004, 5021), False, 'from torchrec.distributed.planner.storage_reservations import HeuristicalStorageReservation\n'), ((5119, 5142), 'torchrec.distributed.planner.partitioners.GreedyPerfPartitioner', 'GreedyPerfPartitioner', ([], {}), '()\n', (5140, 5142), False, 'from torchrec.distributed.planner.partitioners import GreedyPerfPartitioner\n'), ((5585, 5617), 'torchrec.distributed.planner.perf_models.NoopPerfModel', 'NoopPerfModel', ([], {'topology': 'topology'}), '(topology=topology)\n', (5598, 5617), False, 'from torchrec.distributed.planner.perf_models import NoopPerfModel\n'), ((5677, 5693), 'torchrec.distributed.planner.stats.EmbeddingStats', 'EmbeddingStats', ([], {}), '()\n', (5691, 5693), False, 'from torchrec.distributed.planner.stats import EmbeddingStats\n'), ((6948, 6964), 'torchrec.distributed.types.ShardingPlan', 'ShardingPlan', (['{}'], {}), '({})\n', (6960, 6964), False, 'from torchrec.distributed.types import ShardingPlan, ShardingPlanner, ModuleSharder, ShardingType, ParameterSharding\n'), ((9802, 9888), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', '[device.storage for device in self._topology.devices]'], {}), '(lambda x, y: x + y, [device.storage for device in self._topology.\n devices])\n', (9808, 9888), False, 'from functools import reduce\n'), ((9971, 10061), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', '[device.storage for device in storage_constraint.devices]'], {}), '(lambda x, y: x + y, [device.storage for device in storage_constraint\n .devices])\n', (9977, 10061), False, 'from functools import reduce\n'), ((10122, 10667), 'torchrec.distributed.planner.types.PlannerError', 'PlannerError', (['f"""Unable to find a plan for this model are evaluating {self._num_proposals} proposals.\nPossible solutions:\n 1) Increase the number of devices ({self._topology.world_size})\n 2) Reduce the model size (\n\t Global storage: {global_storage_capacity.hbm}, \n\t Available for model parallel: {global_storge_constraints},\n\t Requirement for model parallel: {lowest_storage})\n 3) Reduce local batch size ({self._topology.batch_size})\n 4) Remove planner constraints that might be reducing search space or available storage\n"""'], {}), '(\n f"""Unable to find a plan for this model are evaluating {self._num_proposals} proposals.\nPossible solutions:\n 1) Increase the number of devices ({self._topology.world_size})\n 2) Reduce the model size (\n\t Global storage: {global_storage_capacity.hbm}, \n\t Available for model parallel: {global_storge_constraints},\n\t Requirement for model parallel: {lowest_storage})\n 3) Reduce local batch size ({self._topology.batch_size})\n 4) Remove planner constraints that might be reducing search space or available storage\n"""\n )\n', (10134, 10667), False, 'from torchrec.distributed.planner.types import ParameterConstraints, Partitioner, Topology, Stats, Shard, Storage, ShardingOption, StorageReservation, Enumerator, Proposer, PerfModel, PlannerError\n'), ((5373, 5389), 'torchrec.distributed.planner.proposers.GreedyProposer', 'GreedyProposer', ([], {}), '()\n', (5387, 5389), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer\n'), ((5407, 5438), 'torchrec.distributed.planner.proposers.GreedyProposer', 'GreedyProposer', ([], {'use_depth': '(False)'}), '(use_depth=False)\n', (5421, 5438), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer\n'), ((5456, 5473), 'torchrec.distributed.planner.proposers.UniformProposer', 'UniformProposer', ([], {}), '()\n', (5471, 5473), False, 'from torchrec.distributed.planner.proposers import GreedyProposer, UniformProposer\n'), ((3844, 3865), 'typing.cast', 'cast', (['int', 'shard.rank'], {}), '(int, shard.rank)\n', (3848, 3865), False, 'from typing import Tuple, Dict, Optional, List, cast, Union\n'), ((8661, 8758), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', '[shard.storage for option in proposal for shard in option.shards]'], {}), '(lambda x, y: x + y, [shard.storage for option in proposal for shard in\n option.shards])\n', (8667, 8758), False, 'from functools import reduce\n'), ((3569, 3590), 'typing.cast', 'cast', (['int', 'shard.rank'], {}), '(int, shard.rank)\n', (3573, 3590), False, 'from typing import Tuple, Dict, Optional, List, cast, Union\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.data.datapipes as dp
from iopath.common.file_io import PathManager, PathManagerFactory
from pyre_extensions import none_throws
from torch.utils.data import IterableDataset, IterDataPipe
from torchrec.datasets.utils import (
Batch,
LoadFiles,
PATH_MANAGER_KEY,
ReadLinesFromCSV,
safe_cast,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
FREQUENCY_THRESHOLD = 3
INT_FEATURE_COUNT = 13
CAT_FEATURE_COUNT = 26
DAYS = 24
DEFAULT_LABEL_NAME = "label"
DEFAULT_INT_NAMES: List[str] = [f"int_{idx}" for idx in range(INT_FEATURE_COUNT)]
DEFAULT_CAT_NAMES: List[str] = [f"cat_{idx}" for idx in range(CAT_FEATURE_COUNT)]
DEFAULT_COLUMN_NAMES: List[str] = [
DEFAULT_LABEL_NAME,
*DEFAULT_INT_NAMES,
*DEFAULT_CAT_NAMES,
]
TOTAL_TRAINING_SAMPLES = 4195197692 # Number of rows across days 0-22 (day 23 is used for validation and testing)
COLUMN_TYPE_CASTERS: List[Callable[[Union[int, str]], Union[int, str]]] = [
lambda val: safe_cast(val, int, 0),
*(lambda val: safe_cast(val, int, 0) for _ in range(INT_FEATURE_COUNT)),
*(lambda val: safe_cast(val, str, "") for _ in range(CAT_FEATURE_COUNT)),
]
def _default_row_mapper(example: List[str]) -> Dict[str, Union[int, str]]:
column_names = reversed(DEFAULT_COLUMN_NAMES)
column_type_casters = reversed(COLUMN_TYPE_CASTERS)
return {
next(column_names): next(column_type_casters)(val) for val in reversed(example)
}
class CriteoIterDataPipe(IterDataPipe):
"""
IterDataPipe that can be used to stream either the Criteo 1TB Click Logs Dataset
(https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/) or the
Kaggle/Criteo Display Advertising Dataset
(https://www.kaggle.com/c/criteo-display-ad-challenge/) from the source TSV
files.
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example::
datapipe = CriteoIterDataPipe(
("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
)
datapipe = dp.iter.Batcher(datapipe, 100)
datapipe = dp.iter.Collator(datapipe)
batch = next(iter(datapipe))
"""
def __init__(
self,
paths: Iterable[str],
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> None:
self.paths = paths
self.row_mapper = row_mapper
self.open_kw: Any = open_kw # pyre-ignore[4]
# pyre-ignore[3]
def __iter__(self) -> Iterator[Any]:
worker_info = torch.utils.data.get_worker_info()
paths = self.paths
if worker_info is not None:
paths = (
path
for (idx, path) in enumerate(paths)
if idx % worker_info.num_workers == worker_info.id
)
datapipe = LoadFiles(paths, mode="r", **self.open_kw)
datapipe = ReadLinesFromCSV(datapipe, delimiter="\t")
if self.row_mapper:
datapipe = dp.iter.Mapper(datapipe, self.row_mapper)
yield from datapipe
def criteo_terabyte(
paths: Iterable[str],
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> IterDataPipe:
"""`Criteo 1TB Click Logs <https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/>`_ Dataset
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo 1TB
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example::
datapipe = criteo_terabyte(
("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
)
datapipe = dp.iter.Batcher(datapipe, 100)
datapipe = dp.iter.Collator(datapipe)
batch = next(iter(datapipe))
"""
return CriteoIterDataPipe(paths, row_mapper=row_mapper, **open_kw)
def criteo_kaggle(
path: str,
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> IterDataPipe:
"""`Kaggle/Criteo Display Advertising <https://www.kaggle.com/c/criteo-display-ad-challenge/>`_ Dataset
Args:
root (str): local path to train or test dataset file.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split TSV line.
open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open.
Example::
train_datapipe = criteo_kaggle(
"/home/datasets/criteo_kaggle/train.txt",
)
example = next(iter(train_datapipe))
test_datapipe = criteo_kaggle(
"/home/datasets/criteo_kaggle/test.txt",
)
example = next(iter(test_datapipe))
"""
return CriteoIterDataPipe((path,), row_mapper=row_mapper, **open_kw)
class BinaryCriteoUtils:
"""
Utility functions used to preprocess, save, load, partition, etc. the Criteo
dataset in a binary (numpy) format.
"""
@staticmethod
def tsv_to_npys(
in_file: str,
out_dense_file: str,
out_sparse_file: str,
out_labels_file: str,
path_manager_key: str = PATH_MANAGER_KEY,
) -> None:
"""
Convert one Criteo tsv file to three npy files: one for dense (np.float32), one
for sparse (np.int32), and one for labels (np.int32).
Args:
in_file (str): Input tsv file path.
out_dense_file (str): Output dense npy file path.
out_sparse_file (str): Output sparse npy file path.
out_labels_file (str): Output labels npy file path.
path_manager_key (str): Path manager key used to load from different
filesystems.
Returns:
None.
"""
def row_mapper(row: List[str]) -> Tuple[List[int], List[int], int]:
label = safe_cast(row[0], int, 0)
dense = [safe_cast(row[i], int, 0) for i in range(1, 1 + INT_FEATURE_COUNT)]
sparse = [
int(safe_cast(row[i], str, "0") or "0", 16)
for i in range(
1 + INT_FEATURE_COUNT, 1 + INT_FEATURE_COUNT + CAT_FEATURE_COUNT
)
]
return dense, sparse, label # pyre-ignore[7]
dense, sparse, labels = [], [], []
for (row_dense, row_sparse, row_label) in CriteoIterDataPipe(
[in_file], row_mapper=row_mapper
):
dense.append(row_dense)
sparse.append(row_sparse)
labels.append(row_label)
# PyTorch tensors can't handle uint32, but we can save space by not
# using int64. Numpy will automatically handle dense values >= 2 ** 31.
dense_np = np.array(dense, dtype=np.int32)
del dense
sparse_np = np.array(sparse, dtype=np.int32)
del sparse
labels_np = np.array(labels, dtype=np.int32)
del labels
# Log is expensive to compute at runtime.
dense_np += 3
dense_np = np.log(dense_np, dtype=np.float32)
# To be consistent with dense and sparse.
labels_np = labels_np.reshape((-1, 1))
path_manager = PathManagerFactory().get(path_manager_key)
for (fname, arr) in [
(out_dense_file, dense_np),
(out_sparse_file, sparse_np),
(out_labels_file, labels_np),
]:
with path_manager.open(fname, "wb") as fout:
np.save(fout, arr)
@staticmethod
def get_shape_from_npy(
path: str, path_manager_key: str = PATH_MANAGER_KEY
) -> Tuple[int, ...]:
"""
Returns the shape of an npy file using only its header.
Args:
path (str): Input npy file path.
path_manager_key (str): Path manager key used to load from different
filesystems.
Returns:
shape (Tuple[int, ...]): Shape tuple.
"""
path_manager = PathManagerFactory().get(path_manager_key)
with path_manager.open(path, "rb") as fin:
np.lib.format.read_magic(fin)
shape, _order, _dtype = np.lib.format.read_array_header_1_0(fin)
return shape
@staticmethod
def get_file_idx_to_row_range(
lengths: List[int],
rank: int,
world_size: int,
) -> Dict[int, Tuple[int, int]]:
"""
Given a rank, world_size, and the lengths (number of rows) for a list of files,
return which files and which portions of those files (represented as row ranges
- all range indices are inclusive) should be handled by the rank. Each rank
will be assigned the same number of rows.
The ranges are determined in such a way that each rank deals with large
continuous ranges of files. This enables each rank to reduce the amount of data
it needs to read while avoiding seeks.
Args:
lengths (List[int]): A list of row counts for each file.
rank (int): rank.
world_size (int): world size.
Returns:
output (Dict[int, Tuple[int, int]]): Mapping of which files to the range in
those files to be handled by the rank. The keys of this dict are indices
of lengths.
"""
# All ..._g variables are globals indices (meaning they range from 0 to
# total_length - 1). All ..._l variables are local indices (meaning they range
# from 0 to lengths[i] - 1 for the ith file).
total_length = sum(lengths)
rows_per_rank = total_length // world_size
# Global indices that rank is responsible for. All ranges (left, right) are
# inclusive.
rank_left_g = rank * rows_per_rank
rank_right_g = (rank + 1) * rows_per_rank - 1
output = {}
# Find where range (rank_left_g, rank_right_g) intersects each file's range.
file_left_g, file_right_g = -1, -1
for idx, length in enumerate(lengths):
file_left_g = file_right_g + 1
file_right_g = file_left_g + length - 1
# If the ranges overlap.
if rank_left_g <= file_right_g and rank_right_g >= file_left_g:
overlap_left_g, overlap_right_g = max(rank_left_g, file_left_g), min(
rank_right_g, file_right_g
)
# Convert overlap in global numbers to (local) numbers specific to the
# file.
overlap_left_l = overlap_left_g - file_left_g
overlap_right_l = overlap_right_g - file_left_g
output[idx] = (overlap_left_l, overlap_right_l)
return output
@staticmethod
def load_npy_range(
fname: str,
start_row: int,
num_rows: int,
path_manager_key: str = PATH_MANAGER_KEY,
mmap_mode: bool = False,
) -> np.ndarray:
"""
Load part of an npy file.
NOTE: Assumes npy represents a numpy array of ndim 2.
Args:
fname (str): path string to npy file.
start_row (int): starting row from the npy file.
num_rows (int): number of rows to get from the npy file.
path_manager_key (str): Path manager key used to load from different
filesystems.
Returns:
output (np.ndarray): numpy array with the desired range of data from the
supplied npy file.
"""
path_manager = PathManagerFactory().get(path_manager_key)
with path_manager.open(fname, "rb") as fin:
np.lib.format.read_magic(fin)
shape, _order, dtype = np.lib.format.read_array_header_1_0(fin)
if len(shape) == 2:
total_rows, row_size = shape
else:
raise ValueError("Cannot load range for npy with ndim == 2.")
if not (0 <= start_row < total_rows):
raise ValueError(
f"start_row ({start_row}) is out of bounds. It must be between 0 "
f"and {total_rows - 1}, inclusive."
)
if not (start_row + num_rows <= total_rows):
raise ValueError(
f"num_rows ({num_rows}) exceeds number of available rows "
f"({total_rows}) for the given start_row ({start_row})."
)
if mmap_mode:
data = np.load(fname, mmap_mode="r")
data = data[start_row : start_row + num_rows]
else:
offset = start_row * row_size * dtype.itemsize
fin.seek(offset, os.SEEK_CUR)
num_entries = num_rows * row_size
data = np.fromfile(fin, dtype=dtype, count=num_entries)
return data.reshape((num_rows, row_size))
@staticmethod
def sparse_to_contiguous(
in_files: List[str],
output_dir: str,
frequency_threshold: int = FREQUENCY_THRESHOLD,
columns: int = CAT_FEATURE_COUNT,
path_manager_key: str = PATH_MANAGER_KEY,
output_file_suffix: str = "_contig_freq.npy",
) -> None:
"""
Convert all sparse .npy files to have contiguous integers. Store in a separate
.npy file. All input files must be processed together because columns
can have matching IDs between files. Hence, they must be transformed
together. Also, the transformed IDs are not unique between columns. IDs
that appear less than frequency_threshold amount of times will be remapped
to have a value of 1.
Example transformation, frequenchy_threshold of 2:
day_0_sparse.npy
| col_0 | col_1 |
-----------------
| abc | xyz |
| iop | xyz |
day_1_sparse.npy
| col_0 | col_1 |
-----------------
| iop | tuv |
| lkj | xyz |
day_0_sparse_contig.npy
| col_0 | col_1 |
-----------------
| 1 | 2 |
| 2 | 2 |
day_1_sparse_contig.npy
| col_0 | col_1 |
-----------------
| 2 | 1 |
| 1 | 2 |
Args:
in_files List[str]: Input directory of npy files.
out_dir (str): Output directory of processed npy files.
frequency_threshold: IDs occuring less than this frequency will be remapped to a value of 1.
path_manager_key (str): Path manager key used to load from different filesystems.
Returns:
None.
"""
# Load each .npy file of sparse features. Transformations are made along the columns.
# Thereby, transpose the input to ease operations.
# E.g. file_to_features = {"day_0_sparse": [array([[3,6,7],[7,9,3]]}
file_to_features: Dict[str, np.ndarray] = {}
for f in in_files:
name = os.path.basename(f).split(".")[0]
file_to_features[name] = np.load(f).transpose()
print(f"Successfully loaded file: {f}")
# Iterate through each column in each file and map the sparse ids to contiguous ids.
for col in range(columns):
print(f"Processing column: {col}")
# Iterate through each row in each file for the current column and determine the
# frequency of each sparse id.
sparse_to_frequency: Dict[int, int] = {}
if frequency_threshold > 1:
for f in file_to_features:
for _, sparse in enumerate(file_to_features[f][col]):
if sparse in sparse_to_frequency:
sparse_to_frequency[sparse] += 1
else:
sparse_to_frequency[sparse] = 1
# Iterate through each row in each file for the current column and remap each
# sparse id to a contiguous id. The contiguous ints start at a value of 2 so that
# infrequenct IDs (determined by the frequency_threshold) can be remapped to 1.
running_sum = 2
sparse_to_contiguous_int: Dict[int, int] = {}
for f in file_to_features:
print(f"Processing file: {f}")
for i, sparse in enumerate(file_to_features[f][col]):
if sparse not in sparse_to_contiguous_int:
# If the ID appears less than frequency_threshold amount of times
# remap the value to 1.
if (
frequency_threshold > 1
and sparse_to_frequency[sparse] < frequency_threshold
):
sparse_to_contiguous_int[sparse] = 1
else:
sparse_to_contiguous_int[sparse] = running_sum
running_sum += 1
# Re-map sparse value to contiguous in place.
file_to_features[f][col][i] = sparse_to_contiguous_int[sparse]
path_manager = PathManagerFactory().get(path_manager_key)
for f, features in file_to_features.items():
output_file = os.path.join(output_dir, f + output_file_suffix)
with path_manager.open(output_file, "wb") as fout:
print(f"Writing file: {output_file}")
# Transpose back the features when saving, as they were transposed when loading.
np.save(fout, features.transpose())
@staticmethod
def shuffle(
input_dir_labels_and_dense: str,
input_dir_sparse: str,
output_dir_shuffled: str,
rows_per_day: Dict[int, int],
output_dir_full_set: Optional[str] = None,
days: int = DAYS,
int_columns: int = INT_FEATURE_COUNT,
sparse_columns: int = CAT_FEATURE_COUNT,
path_manager_key: str = PATH_MANAGER_KEY,
) -> None:
"""
Shuffle the dataset. Expects the files to be in .npy format and the data
to be split by day and by dense, sparse and label data.
Dense data must be in: day_x_dense.npy
Sparse data must be in: day_x_sparse.npy
Labels data must be in: day_x_labels.npy
The dataset will be reconstructed, shuffled and then split back into
separate dense, sparse and labels files.
Args:
input_dir_labels_and_dense (str): Input directory of labels and dense npy files.
input_dir_sparse (str): Input directory of sparse npy files.
output_dir_shuffled (str): Output directory for shuffled labels, dense and sparse npy files.
rows_per_day Dict[int, int]: Number of rows in each file.
output_dir_full_set (str): Output directory of the full dataset, if desired.
days (int): Number of day files.
int_columns (int): Number of columns with dense features.
columns (int): Total number of columns.
path_manager_key (str): Path manager key used to load from different filesystems.
"""
total_rows = sum(rows_per_day.values())
columns = int_columns + sparse_columns + 1 # add 1 for label column
full_dataset = np.zeros((total_rows, columns), dtype=np.float32)
curr_first_row = 0
curr_last_row = 0
for d in range(0, days):
curr_last_row += rows_per_day[d]
# dense
path_to_file = os.path.join(
input_dir_labels_and_dense, f"day_{d}_dense.npy"
)
data = np.load(path_to_file)
print(
f"Day {d} dense- {curr_first_row}-{curr_last_row} loaded files - {time.time()} - {path_to_file}"
)
full_dataset[curr_first_row:curr_last_row, 0:int_columns] = data
del data
# sparse
path_to_file = os.path.join(input_dir_sparse, f"day_{d}_sparse.npy")
data = np.load(path_to_file)
print(
f"Day {d} sparse- {curr_first_row}-{curr_last_row} loaded files - {time.time()} - {path_to_file}"
)
full_dataset[curr_first_row:curr_last_row, int_columns : columns - 1] = data
del data
# labels
path_to_file = os.path.join(
input_dir_labels_and_dense, f"day_{d}_labels.npy"
)
data = np.load(path_to_file)
print(
f"Day {d} labels- {curr_first_row}-{curr_last_row} loaded files - {time.time()} - {path_to_file}"
)
full_dataset[curr_first_row:curr_last_row, columns - 1 :] = data
del data
curr_first_row = curr_last_row
path_manager = PathManagerFactory().get(path_manager_key)
# Save the full dataset
if output_dir_full_set is not None:
full_output_file = os.path.join(output_dir_full_set, "full.npy")
with path_manager.open(full_output_file, "wb") as fout:
print(f"Writing full set file: {full_output_file}")
np.save(fout, full_dataset)
print("Shuffling dataset")
np.random.shuffle(full_dataset)
# Slice and save each portion into dense, sparse and labels
curr_first_row = 0
curr_last_row = 0
for d in range(0, days):
curr_last_row += rows_per_day[d]
# write dense columns
shuffled_dense_file = os.path.join(
output_dir_shuffled, f"day_{d}_dense.npy"
)
with path_manager.open(shuffled_dense_file, "wb") as fout:
print(
f"Writing rows {curr_first_row}-{curr_last_row-1} dense file: {shuffled_dense_file}"
)
np.save(fout, full_dataset[curr_first_row:curr_last_row, 0:int_columns])
# write sparse columns
shuffled_sparse_file = os.path.join(
output_dir_shuffled, f"day_{d}_sparse.npy"
)
with path_manager.open(shuffled_sparse_file, "wb") as fout:
print(
f"Writing rows {curr_first_row}-{curr_last_row-1} sparse file: {shuffled_sparse_file}"
)
np.save(
fout,
full_dataset[
curr_first_row:curr_last_row, int_columns : columns - 1
].astype(np.int32),
)
# write labels columns
shuffled_labels_file = os.path.join(
output_dir_shuffled, f"day_{d}_labels.npy"
)
with path_manager.open(shuffled_labels_file, "wb") as fout:
print(
f"Writing rows {curr_first_row}-{curr_last_row-1} labels file: {shuffled_labels_file}"
)
np.save(
fout,
full_dataset[curr_first_row:curr_last_row, columns - 1 :].astype(
np.int32
),
)
curr_first_row = curr_last_row
class InMemoryBinaryCriteoIterDataPipe(IterableDataset):
"""
Datapipe designed to operate over binary (npy) versions of Criteo datasets. Loads
the entire dataset into memory to prevent disk speed from affecting throughout. Each
rank reads only the data for the portion of the dataset it is responsible for.
The torchrec/datasets/scripts/preprocess_criteo.py script can be used to convert
the Criteo tsv files to the npy files expected by this dataset.
Args:
dense_paths (List[str]): List of path strings to dense npy files.
sparse_paths (List[str]): List of path strings to sparse npy files.
labels_paths (List[str]): List of path strings to labels npy files.
batch_size (int): batch size.
rank (int): rank.
world_size (int): world size.
shuffle_batches (bool): Whether to shuffle batches
hashes (Optional[int]): List of max categorical feature value for each feature.
Length of this list should be CAT_FEATURE_COUNT.
path_manager_key (str): Path manager key used to load from different
filesystems.
Example::
template = "/home/datasets/criteo/1tb_binary/day_{}_{}.npy"
datapipe = InMemoryBinaryCriteoIterDataPipe(
dense_paths=[template.format(0, "dense"), template.format(1, "dense")],
sparse_paths=[template.format(0, "sparse"), template.format(1, "sparse")],
labels_paths=[template.format(0, "labels"), template.format(1, "labels")],
batch_size=1024,
rank=torch.distributed.get_rank(),
world_size=torch.distributed.get_world_size(),
)
batch = next(iter(datapipe))
"""
def __init__(
self,
dense_paths: List[str],
sparse_paths: List[str],
labels_paths: List[str],
batch_size: int,
rank: int,
world_size: int,
shuffle_batches: bool = False,
mmap_mode: bool = False,
hashes: Optional[List[int]] = None,
path_manager_key: str = PATH_MANAGER_KEY,
) -> None:
self.dense_paths = dense_paths
self.sparse_paths = sparse_paths
self.labels_paths = labels_paths
self.batch_size = batch_size
self.rank = rank
self.world_size = world_size
self.shuffle_batches = shuffle_batches
self.mmap_mode = mmap_mode
self.hashes = hashes
self.path_manager_key = path_manager_key
self.path_manager: PathManager = PathManagerFactory().get(path_manager_key)
self._load_data_for_rank()
self.num_rows_per_file: List[int] = [a.shape[0] for a in self.dense_arrs]
self.num_batches: int = sum(self.num_rows_per_file) // batch_size
# These values are the same for the KeyedJaggedTensors in all batches, so they
# are computed once here. This avoids extra work from the KeyedJaggedTensor sync
# functions.
self._num_ids_in_batch: int = CAT_FEATURE_COUNT * batch_size
self.keys: List[str] = DEFAULT_CAT_NAMES
self.lengths: torch.Tensor = torch.ones(
(self._num_ids_in_batch,), dtype=torch.int32
)
self.offsets: torch.Tensor = torch.arange(
0, self._num_ids_in_batch + 1, dtype=torch.int32
)
self.stride = batch_size
self.length_per_key: List[int] = CAT_FEATURE_COUNT * [batch_size]
self.offset_per_key: List[int] = [
batch_size * i for i in range(CAT_FEATURE_COUNT + 1)
]
self.index_per_key: Dict[str, int] = {
key: i for (i, key) in enumerate(self.keys)
}
def _load_data_for_rank(self) -> None:
file_idx_to_row_range = BinaryCriteoUtils.get_file_idx_to_row_range(
lengths=[
BinaryCriteoUtils.get_shape_from_npy(
path, path_manager_key=self.path_manager_key
)[0]
for path in self.dense_paths
],
rank=self.rank,
world_size=self.world_size,
)
self.dense_arrs, self.sparse_arrs, self.labels_arrs = [], [], []
for arrs, paths in zip(
[self.dense_arrs, self.sparse_arrs, self.labels_arrs],
[self.dense_paths, self.sparse_paths, self.labels_paths],
):
for idx, (range_left, range_right) in file_idx_to_row_range.items():
arrs.append(
BinaryCriteoUtils.load_npy_range(
paths[idx],
range_left,
range_right - range_left + 1,
path_manager_key=self.path_manager_key,
mmap_mode=self.mmap_mode,
)
)
# When mmap_mode is enabled, the hash is applied in def __iter__, which is
# where samples are batched during training.
# Otherwise, the ML dataset is preloaded, and the hash is applied here in
# the preload stage, as shown:
if not self.mmap_mode and self.hashes is not None:
hashes_np = np.array(self.hashes).reshape((1, CAT_FEATURE_COUNT))
for sparse_arr in self.sparse_arrs:
sparse_arr %= hashes_np
def _np_arrays_to_batch(
self, dense: np.ndarray, sparse: np.ndarray, labels: np.ndarray
) -> Batch:
if self.shuffle_batches:
# Shuffle all 3 in unison
shuffler = np.random.permutation(len(dense))
dense = dense[shuffler]
sparse = sparse[shuffler]
labels = labels[shuffler]
return Batch(
dense_features=torch.from_numpy(dense),
sparse_features=KeyedJaggedTensor(
keys=self.keys,
# transpose + reshape(-1) incurs an additional copy.
values=torch.from_numpy(sparse.transpose(1, 0).reshape(-1)),
lengths=self.lengths,
offsets=self.offsets,
stride=self.stride,
length_per_key=self.length_per_key,
offset_per_key=self.offset_per_key,
index_per_key=self.index_per_key,
),
labels=torch.from_numpy(labels.reshape(-1)),
)
def __iter__(self) -> Iterator[Batch]:
# Invariant: buffer never contains more than batch_size rows.
buffer: Optional[List[np.ndarray]] = None
def append_to_buffer(
dense: np.ndarray, sparse: np.ndarray, labels: np.ndarray
) -> None:
nonlocal buffer
if buffer is None:
buffer = [dense, sparse, labels]
else:
for idx, arr in enumerate([dense, sparse, labels]):
buffer[idx] = np.concatenate((buffer[idx], arr))
# Maintain a buffer that can contain up to batch_size rows. Fill buffer as
# much as possible on each iteration. Only return a new batch when batch_size
# rows are filled.
file_idx = 0
row_idx = 0
batch_idx = 0
while batch_idx < self.num_batches:
buffer_row_count = 0 if buffer is None else none_throws(buffer)[0].shape[0]
if buffer_row_count == self.batch_size:
yield self._np_arrays_to_batch(*none_throws(buffer))
batch_idx += 1
buffer = None
else:
rows_to_get = min(
self.batch_size - buffer_row_count,
self.num_rows_per_file[file_idx] - row_idx,
)
slice_ = slice(row_idx, row_idx + rows_to_get)
dense_inputs = self.dense_arrs[file_idx][slice_, :]
sparse_inputs = self.sparse_arrs[file_idx][slice_, :]
target_labels = self.labels_arrs[file_idx][slice_, :]
if self.mmap_mode and self.hashes is not None:
sparse_inputs = sparse_inputs % np.array(self.hashes).reshape(
(1, CAT_FEATURE_COUNT)
)
append_to_buffer(
dense_inputs,
sparse_inputs,
target_labels,
)
row_idx += rows_to_get
if row_idx >= self.num_rows_per_file[file_idx]:
file_idx += 1
row_idx = 0
def __len__(self) -> int:
return self.num_batches
| [
"torchrec.datasets.utils.LoadFiles",
"torchrec.datasets.utils.safe_cast",
"torchrec.datasets.utils.ReadLinesFromCSV"
] | [((1358, 1380), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['val', 'int', '(0)'], {}), '(val, int, 0)\n', (1367, 1380), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((3250, 3284), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (3282, 3284), False, 'import torch\n'), ((3543, 3585), 'torchrec.datasets.utils.LoadFiles', 'LoadFiles', (['paths'], {'mode': '"""r"""'}), "(paths, mode='r', **self.open_kw)\n", (3552, 3585), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((3605, 3647), 'torchrec.datasets.utils.ReadLinesFromCSV', 'ReadLinesFromCSV', (['datapipe'], {'delimiter': '"""\t"""'}), "(datapipe, delimiter='\\t')\n", (3621, 3647), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((7668, 7699), 'numpy.array', 'np.array', (['dense'], {'dtype': 'np.int32'}), '(dense, dtype=np.int32)\n', (7676, 7699), True, 'import numpy as np\n'), ((7738, 7770), 'numpy.array', 'np.array', (['sparse'], {'dtype': 'np.int32'}), '(sparse, dtype=np.int32)\n', (7746, 7770), True, 'import numpy as np\n'), ((7810, 7842), 'numpy.array', 'np.array', (['labels'], {'dtype': 'np.int32'}), '(labels, dtype=np.int32)\n', (7818, 7842), True, 'import numpy as np\n'), ((7954, 7988), 'numpy.log', 'np.log', (['dense_np'], {'dtype': 'np.float32'}), '(dense_np, dtype=np.float32)\n', (7960, 7988), True, 'import numpy as np\n'), ((20171, 20220), 'numpy.zeros', 'np.zeros', (['(total_rows, columns)'], {'dtype': 'np.float32'}), '((total_rows, columns), dtype=np.float32)\n', (20179, 20220), True, 'import numpy as np\n'), ((22100, 22131), 'numpy.random.shuffle', 'np.random.shuffle', (['full_dataset'], {}), '(full_dataset)\n', (22117, 22131), True, 'import numpy as np\n'), ((27130, 27186), 'torch.ones', 'torch.ones', (['(self._num_ids_in_batch,)'], {'dtype': 'torch.int32'}), '((self._num_ids_in_batch,), dtype=torch.int32)\n', (27140, 27186), False, 'import torch\n'), ((27246, 27308), 'torch.arange', 'torch.arange', (['(0)', '(self._num_ids_in_batch + 1)'], {'dtype': 'torch.int32'}), '(0, self._num_ids_in_batch + 1, dtype=torch.int32)\n', (27258, 27308), False, 'import torch\n'), ((3699, 3740), 'torch.utils.data.datapipes.iter.Mapper', 'dp.iter.Mapper', (['datapipe', 'self.row_mapper'], {}), '(datapipe, self.row_mapper)\n', (3713, 3740), True, 'import torch.utils.data.datapipes as dp\n'), ((6806, 6831), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['row[0]', 'int', '(0)'], {}), '(row[0], int, 0)\n', (6815, 6831), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((8999, 9028), 'numpy.lib.format.read_magic', 'np.lib.format.read_magic', (['fin'], {}), '(fin)\n', (9023, 9028), True, 'import numpy as np\n'), ((9065, 9105), 'numpy.lib.format.read_array_header_1_0', 'np.lib.format.read_array_header_1_0', (['fin'], {}), '(fin)\n', (9100, 9105), True, 'import numpy as np\n'), ((12526, 12555), 'numpy.lib.format.read_magic', 'np.lib.format.read_magic', (['fin'], {}), '(fin)\n', (12550, 12555), True, 'import numpy as np\n'), ((12591, 12631), 'numpy.lib.format.read_array_header_1_0', 'np.lib.format.read_array_header_1_0', (['fin'], {}), '(fin)\n', (12626, 12631), True, 'import numpy as np\n'), ((18143, 18191), 'os.path.join', 'os.path.join', (['output_dir', '(f + output_file_suffix)'], {}), '(output_dir, f + output_file_suffix)\n', (18155, 18191), False, 'import os\n'), ((20400, 20462), 'os.path.join', 'os.path.join', (['input_dir_labels_and_dense', 'f"""day_{d}_dense.npy"""'], {}), "(input_dir_labels_and_dense, f'day_{d}_dense.npy')\n", (20412, 20462), False, 'import os\n'), ((20512, 20533), 'numpy.load', 'np.load', (['path_to_file'], {}), '(path_to_file)\n', (20519, 20533), True, 'import numpy as np\n'), ((20828, 20881), 'os.path.join', 'os.path.join', (['input_dir_sparse', 'f"""day_{d}_sparse.npy"""'], {}), "(input_dir_sparse, f'day_{d}_sparse.npy')\n", (20840, 20881), False, 'import os\n'), ((20901, 20922), 'numpy.load', 'np.load', (['path_to_file'], {}), '(path_to_file)\n', (20908, 20922), True, 'import numpy as np\n'), ((21230, 21293), 'os.path.join', 'os.path.join', (['input_dir_labels_and_dense', 'f"""day_{d}_labels.npy"""'], {}), "(input_dir_labels_and_dense, f'day_{d}_labels.npy')\n", (21242, 21293), False, 'import os\n'), ((21343, 21364), 'numpy.load', 'np.load', (['path_to_file'], {}), '(path_to_file)\n', (21350, 21364), True, 'import numpy as np\n'), ((21830, 21875), 'os.path.join', 'os.path.join', (['output_dir_full_set', '"""full.npy"""'], {}), "(output_dir_full_set, 'full.npy')\n", (21842, 21875), False, 'import os\n'), ((22401, 22456), 'os.path.join', 'os.path.join', (['output_dir_shuffled', 'f"""day_{d}_dense.npy"""'], {}), "(output_dir_shuffled, f'day_{d}_dense.npy')\n", (22413, 22456), False, 'import os\n'), ((22864, 22920), 'os.path.join', 'os.path.join', (['output_dir_shuffled', 'f"""day_{d}_sparse.npy"""'], {}), "(output_dir_shuffled, f'day_{d}_sparse.npy')\n", (22876, 22920), False, 'import os\n'), ((23465, 23521), 'os.path.join', 'os.path.join', (['output_dir_shuffled', 'f"""day_{d}_labels.npy"""'], {}), "(output_dir_shuffled, f'day_{d}_labels.npy')\n", (23477, 23521), False, 'import os\n'), ((1400, 1422), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['val', 'int', '(0)'], {}), '(val, int, 0)\n', (1409, 1422), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((1477, 1500), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['val', 'str', '""""""'], {}), "(val, str, '')\n", (1486, 1500), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((6853, 6878), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['row[i]', 'int', '(0)'], {}), '(row[i], int, 0)\n', (6862, 6878), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((8111, 8131), 'iopath.common.file_io.PathManagerFactory', 'PathManagerFactory', ([], {}), '()\n', (8129, 8131), False, 'from iopath.common.file_io import PathManager, PathManagerFactory\n'), ((8392, 8410), 'numpy.save', 'np.save', (['fout', 'arr'], {}), '(fout, arr)\n', (8399, 8410), True, 'import numpy as np\n'), ((8893, 8913), 'iopath.common.file_io.PathManagerFactory', 'PathManagerFactory', ([], {}), '()\n', (8911, 8913), False, 'from iopath.common.file_io import PathManager, PathManagerFactory\n'), ((12419, 12439), 'iopath.common.file_io.PathManagerFactory', 'PathManagerFactory', ([], {}), '()\n', (12437, 12439), False, 'from iopath.common.file_io import PathManager, PathManagerFactory\n'), ((13365, 13394), 'numpy.load', 'np.load', (['fname'], {'mmap_mode': '"""r"""'}), "(fname, mmap_mode='r')\n", (13372, 13394), True, 'import numpy as np\n'), ((13657, 13705), 'numpy.fromfile', 'np.fromfile', (['fin'], {'dtype': 'dtype', 'count': 'num_entries'}), '(fin, dtype=dtype, count=num_entries)\n', (13668, 13705), True, 'import numpy as np\n'), ((18021, 18041), 'iopath.common.file_io.PathManagerFactory', 'PathManagerFactory', ([], {}), '()\n', (18039, 18041), False, 'from iopath.common.file_io import PathManager, PathManagerFactory\n'), ((21679, 21699), 'iopath.common.file_io.PathManagerFactory', 'PathManagerFactory', ([], {}), '()\n', (21697, 21699), False, 'from iopath.common.file_io import PathManager, PathManagerFactory\n'), ((22028, 22055), 'numpy.save', 'np.save', (['fout', 'full_dataset'], {}), '(fout, full_dataset)\n', (22035, 22055), True, 'import numpy as np\n'), ((22720, 22792), 'numpy.save', 'np.save', (['fout', 'full_dataset[curr_first_row:curr_last_row, 0:int_columns]'], {}), '(fout, full_dataset[curr_first_row:curr_last_row, 0:int_columns])\n', (22727, 22792), True, 'import numpy as np\n'), ((26542, 26562), 'iopath.common.file_io.PathManagerFactory', 'PathManagerFactory', ([], {}), '()\n', (26560, 26562), False, 'from iopath.common.file_io import PathManager, PathManagerFactory\n'), ((29679, 29702), 'torch.from_numpy', 'torch.from_numpy', (['dense'], {}), '(dense)\n', (29695, 29702), False, 'import torch\n'), ((15913, 15923), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (15920, 15923), True, 'import numpy as np\n'), ((29129, 29150), 'numpy.array', 'np.array', (['self.hashes'], {}), '(self.hashes)\n', (29137, 29150), True, 'import numpy as np\n'), ((30789, 30823), 'numpy.concatenate', 'np.concatenate', (['(buffer[idx], arr)'], {}), '((buffer[idx], arr))\n', (30803, 30823), True, 'import numpy as np\n'), ((6964, 6991), 'torchrec.datasets.utils.safe_cast', 'safe_cast', (['row[i]', 'str', '"""0"""'], {}), "(row[i], str, '0')\n", (6973, 6991), False, 'from torchrec.datasets.utils import Batch, LoadFiles, PATH_MANAGER_KEY, ReadLinesFromCSV, safe_cast\n'), ((15842, 15861), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (15858, 15861), False, 'import os\n'), ((20635, 20646), 'time.time', 'time.time', ([], {}), '()\n', (20644, 20646), False, 'import time\n'), ((21025, 21036), 'time.time', 'time.time', ([], {}), '()\n', (21034, 21036), False, 'import time\n'), ((21467, 21478), 'time.time', 'time.time', ([], {}), '()\n', (21476, 21478), False, 'import time\n'), ((31184, 31203), 'pyre_extensions.none_throws', 'none_throws', (['buffer'], {}), '(buffer)\n', (31195, 31203), False, 'from pyre_extensions import none_throws\n'), ((31316, 31335), 'pyre_extensions.none_throws', 'none_throws', (['buffer'], {}), '(buffer)\n', (31327, 31335), False, 'from pyre_extensions import none_throws\n'), ((31977, 31998), 'numpy.array', 'np.array', (['self.hashes'], {}), '(self.hashes)\n', (31985, 31998), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from typing import cast, Dict, List, Optional, Tuple, Union
import torch
import torch.distributed as dist
import torch.nn as nn
from fbgemm_gpu.split_embedding_configs import EmbOptimType
from torchrec.distributed.embedding_types import EmbeddingTableConfig
from torchrec.distributed.model_parallel import DistributedModelParallel
from torchrec.distributed.planner import (
EmbeddingShardingPlanner,
ParameterConstraints,
Topology,
)
from torchrec.distributed.test_utils.multi_process import MultiProcessContext
from torchrec.distributed.test_utils.test_model import (
ModelInput,
TestEBCSharder,
TestEBSharder,
TestETCSharder,
TestETSharder,
TestSparseNNBase,
)
from torchrec.distributed.types import (
ModuleSharder,
ShardedTensor,
ShardingEnv,
ShardingPlan,
ShardingType,
)
from torchrec.modules.embedding_configs import BaseEmbeddingConfig
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizerWrapper
class SharderType(Enum):
EMBEDDING_BAG = "embedding_bag"
EMBEDDING_BAG_COLLECTION = "embedding_bag_collection"
EMBEDDING_TOWER = "embedding_tower"
EMBEDDING_TOWER_COLLECTION = "embedding_tower_collection"
def create_test_sharder(
sharder_type: str, sharding_type: str, kernel_type: str
) -> Union[TestEBSharder, TestEBCSharder, TestETSharder, TestETCSharder]:
if sharder_type == SharderType.EMBEDDING_BAG.value:
return TestEBSharder(sharding_type, kernel_type, {"learning_rate": 0.1})
elif sharder_type == SharderType.EMBEDDING_BAG_COLLECTION.value:
return TestEBCSharder(sharding_type, kernel_type, {"learning_rate": 0.1})
elif sharder_type == SharderType.EMBEDDING_TOWER.value:
return TestETSharder(sharding_type, kernel_type, {"learning_rate": 0.1})
elif sharder_type == SharderType.EMBEDDING_TOWER_COLLECTION.value:
return TestETCSharder(sharding_type, kernel_type, {"learning_rate": 0.1})
else:
raise ValueError(f"Sharder not supported {sharder_type}")
def generate_inputs(
world_size: int,
tables: List[EmbeddingTableConfig],
weighted_tables: Optional[List[EmbeddingTableConfig]] = None,
batch_size: int = 4,
num_float_features: int = 16,
) -> Tuple[ModelInput, List[ModelInput]]:
return ModelInput.generate(
batch_size=batch_size,
world_size=world_size,
num_float_features=num_float_features,
tables=tables,
weighted_tables=weighted_tables or [],
)
def gen_model_and_input(
model_class: TestSparseNNBase,
tables: List[EmbeddingTableConfig],
embedding_groups: Dict[str, List[str]],
world_size: int,
weighted_tables: Optional[List[EmbeddingTableConfig]] = None,
num_float_features: int = 16,
dense_device: Optional[torch.device] = None,
sparse_device: Optional[torch.device] = None,
) -> Tuple[nn.Module, List[Tuple[ModelInput, List[ModelInput]]]]:
torch.manual_seed(0)
model = model_class(
tables=cast(List[BaseEmbeddingConfig], tables),
num_float_features=num_float_features,
weighted_tables=cast(List[BaseEmbeddingConfig], weighted_tables),
embedding_groups=embedding_groups,
dense_device=dense_device,
sparse_device=sparse_device,
)
inputs = [
generate_inputs(
world_size=world_size,
tables=tables,
weighted_tables=weighted_tables,
num_float_features=num_float_features,
)
]
return (model, inputs)
def copy_state_dict(
loc: Dict[str, Union[torch.Tensor, ShardedTensor]],
glob: Dict[str, torch.Tensor],
) -> None:
for name, tensor in loc.items():
assert name in glob
global_tensor = glob[name]
if isinstance(global_tensor, ShardedTensor):
global_tensor = global_tensor.local_shards()[0].tensor
if isinstance(tensor, ShardedTensor):
for local_shard in tensor.local_shards():
assert global_tensor.ndim == local_shard.tensor.ndim
shard_meta = local_shard.metadata
t = global_tensor.detach()
if t.ndim == 1:
t = t[
shard_meta.shard_offsets[0] : shard_meta.shard_offsets[0]
+ local_shard.tensor.shape[0]
]
elif t.ndim == 2:
t = t[
shard_meta.shard_offsets[0] : shard_meta.shard_offsets[0]
+ local_shard.tensor.shape[0],
shard_meta.shard_offsets[1] : shard_meta.shard_offsets[1]
+ local_shard.tensor.shape[1],
]
else:
raise ValueError("Tensors with ndim > 2 are not supported")
local_shard.tensor.copy_(t)
else:
tensor.copy_(global_tensor)
def sharding_single_rank_test(
rank: int,
world_size: int,
model_class: TestSparseNNBase,
embedding_groups: Dict[str, List[str]],
tables: List[EmbeddingTableConfig],
sharders: List[ModuleSharder[nn.Module]],
backend: str,
optim: EmbOptimType,
weighted_tables: Optional[List[EmbeddingTableConfig]] = None,
constraints: Optional[Dict[str, ParameterConstraints]] = None,
local_size: Optional[int] = None,
) -> None:
with MultiProcessContext(rank, world_size, backend, local_size) as ctx:
# Generate model & inputs.
(global_model, inputs) = gen_model_and_input(
model_class=model_class,
tables=tables,
weighted_tables=weighted_tables,
embedding_groups=embedding_groups,
world_size=world_size,
num_float_features=16,
)
global_model = global_model.to(ctx.device)
global_input = inputs[0][0].to(ctx.device)
local_input = inputs[0][1][rank].to(ctx.device)
# Shard model.
local_model = model_class(
tables=cast(List[BaseEmbeddingConfig], tables),
weighted_tables=cast(List[BaseEmbeddingConfig], weighted_tables),
embedding_groups=embedding_groups,
dense_device=ctx.device,
sparse_device=torch.device("meta"),
num_float_features=16,
)
planner = EmbeddingShardingPlanner(
topology=Topology(
world_size, ctx.device.type, local_world_size=ctx.local_size
),
constraints=constraints,
)
plan: ShardingPlan = planner.collective_plan(local_model, sharders, ctx.pg)
"""
Simulating multiple nodes on a single node. However, metadata information and
tensor placement must still be consistent. Here we overwrite this to do so.
NOTE:
inter/intra process groups should still behave as expected.
TODO: may need to add some checks that only does this if we're running on a
single GPU (which should be most cases).
"""
for group in plan.plan:
for _, parameter_sharding in plan.plan[group].items():
if (
parameter_sharding.sharding_type
in {
ShardingType.TABLE_ROW_WISE.value,
ShardingType.TABLE_COLUMN_WISE.value,
}
and ctx.device.type != "cpu"
):
sharding_spec = parameter_sharding.sharding_spec
if sharding_spec is not None:
# pyre-ignore
for shard in sharding_spec.shards:
placement = shard.placement
rank: Optional[int] = placement.rank()
assert rank is not None
shard.placement = torch.distributed._remote_device(
f"rank:{rank}/cuda:{rank}"
)
local_model = DistributedModelParallel(
local_model,
env=ShardingEnv.from_process_group(ctx.pg),
plan=plan,
sharders=sharders,
device=ctx.device,
)
dense_optim = KeyedOptimizerWrapper(
dict(local_model.named_parameters()),
lambda params: torch.optim.SGD(params, lr=0.1),
)
local_opt = CombinedOptimizer([local_model.fused_optimizer, dense_optim])
# Load model state from the global model.
copy_state_dict(local_model.state_dict(), global_model.state_dict())
# Run a single training step of the sharded model.
local_pred = gen_full_pred_after_one_step(local_model, local_opt, local_input)
all_local_pred = []
for _ in range(world_size):
all_local_pred.append(torch.empty_like(local_pred))
dist.all_gather(all_local_pred, local_pred, group=ctx.pg)
# Run second training step of the unsharded model.
assert optim == EmbOptimType.EXACT_SGD
global_opt = torch.optim.SGD(global_model.parameters(), lr=0.1)
global_pred = gen_full_pred_after_one_step(
global_model, global_opt, global_input
)
# Compare predictions of sharded vs unsharded models.
torch.testing.assert_allclose(global_pred, torch.cat(all_local_pred))
def gen_full_pred_after_one_step(
model: nn.Module,
opt: torch.optim.Optimizer,
input: ModelInput,
) -> torch.Tensor:
# Run a single training step of the global model.
opt.zero_grad()
model.train(True)
loss, _ = model(input)
loss.backward()
# pyre-fixme[20]: Argument `closure` expected.
opt.step()
# Run a forward pass of the global model.
with torch.no_grad():
model.train(False)
full_pred = model(input)
return full_pred
| [
"torchrec.distributed.types.ShardingEnv.from_process_group",
"torchrec.distributed.test_utils.test_model.TestETSharder",
"torchrec.distributed.test_utils.test_model.TestEBCSharder",
"torchrec.distributed.planner.Topology",
"torchrec.distributed.test_utils.test_model.ModelInput.generate",
"torchrec.distrib... | [((2533, 2700), 'torchrec.distributed.test_utils.test_model.ModelInput.generate', 'ModelInput.generate', ([], {'batch_size': 'batch_size', 'world_size': 'world_size', 'num_float_features': 'num_float_features', 'tables': 'tables', 'weighted_tables': '(weighted_tables or [])'}), '(batch_size=batch_size, world_size=world_size,\n num_float_features=num_float_features, tables=tables, weighted_tables=\n weighted_tables or [])\n', (2552, 2700), False, 'from torchrec.distributed.test_utils.test_model import ModelInput, TestEBCSharder, TestEBSharder, TestETCSharder, TestETSharder, TestSparseNNBase\n'), ((3175, 3195), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (3192, 3195), False, 'import torch\n'), ((1684, 1749), 'torchrec.distributed.test_utils.test_model.TestEBSharder', 'TestEBSharder', (['sharding_type', 'kernel_type', "{'learning_rate': 0.1}"], {}), "(sharding_type, kernel_type, {'learning_rate': 0.1})\n", (1697, 1749), False, 'from torchrec.distributed.test_utils.test_model import ModelInput, TestEBCSharder, TestEBSharder, TestETCSharder, TestETSharder, TestSparseNNBase\n'), ((5611, 5669), 'torchrec.distributed.test_utils.multi_process.MultiProcessContext', 'MultiProcessContext', (['rank', 'world_size', 'backend', 'local_size'], {}), '(rank, world_size, backend, local_size)\n', (5630, 5669), False, 'from torchrec.distributed.test_utils.multi_process import MultiProcessContext\n'), ((8630, 8691), 'torchrec.optim.keyed.CombinedOptimizer', 'CombinedOptimizer', (['[local_model.fused_optimizer, dense_optim]'], {}), '([local_model.fused_optimizer, dense_optim])\n', (8647, 8691), False, 'from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizerWrapper\n'), ((9104, 9161), 'torch.distributed.all_gather', 'dist.all_gather', (['all_local_pred', 'local_pred'], {'group': 'ctx.pg'}), '(all_local_pred, local_pred, group=ctx.pg)\n', (9119, 9161), True, 'import torch.distributed as dist\n'), ((9992, 10007), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10005, 10007), False, 'import torch\n'), ((1834, 1900), 'torchrec.distributed.test_utils.test_model.TestEBCSharder', 'TestEBCSharder', (['sharding_type', 'kernel_type', "{'learning_rate': 0.1}"], {}), "(sharding_type, kernel_type, {'learning_rate': 0.1})\n", (1848, 1900), False, 'from torchrec.distributed.test_utils.test_model import ModelInput, TestEBCSharder, TestEBSharder, TestETCSharder, TestETSharder, TestSparseNNBase\n'), ((3237, 3276), 'typing.cast', 'cast', (['List[BaseEmbeddingConfig]', 'tables'], {}), '(List[BaseEmbeddingConfig], tables)\n', (3241, 3276), False, 'from typing import cast, Dict, List, Optional, Tuple, Union\n'), ((3349, 3397), 'typing.cast', 'cast', (['List[BaseEmbeddingConfig]', 'weighted_tables'], {}), '(List[BaseEmbeddingConfig], weighted_tables)\n', (3353, 3397), False, 'from typing import cast, Dict, List, Optional, Tuple, Union\n'), ((9568, 9593), 'torch.cat', 'torch.cat', (['all_local_pred'], {}), '(all_local_pred)\n', (9577, 9593), False, 'import torch\n'), ((1976, 2041), 'torchrec.distributed.test_utils.test_model.TestETSharder', 'TestETSharder', (['sharding_type', 'kernel_type', "{'learning_rate': 0.1}"], {}), "(sharding_type, kernel_type, {'learning_rate': 0.1})\n", (1989, 2041), False, 'from torchrec.distributed.test_utils.test_model import ModelInput, TestEBCSharder, TestEBSharder, TestETCSharder, TestETSharder, TestSparseNNBase\n'), ((6239, 6278), 'typing.cast', 'cast', (['List[BaseEmbeddingConfig]', 'tables'], {}), '(List[BaseEmbeddingConfig], tables)\n', (6243, 6278), False, 'from typing import cast, Dict, List, Optional, Tuple, Union\n'), ((6308, 6356), 'typing.cast', 'cast', (['List[BaseEmbeddingConfig]', 'weighted_tables'], {}), '(List[BaseEmbeddingConfig], weighted_tables)\n', (6312, 6356), False, 'from typing import cast, Dict, List, Optional, Tuple, Union\n'), ((6468, 6488), 'torch.device', 'torch.device', (['"""meta"""'], {}), "('meta')\n", (6480, 6488), False, 'import torch\n'), ((6601, 6671), 'torchrec.distributed.planner.Topology', 'Topology', (['world_size', 'ctx.device.type'], {'local_world_size': 'ctx.local_size'}), '(world_size, ctx.device.type, local_world_size=ctx.local_size)\n', (6609, 6671), False, 'from torchrec.distributed.planner import EmbeddingShardingPlanner, ParameterConstraints, Topology\n'), ((8309, 8347), 'torchrec.distributed.types.ShardingEnv.from_process_group', 'ShardingEnv.from_process_group', (['ctx.pg'], {}), '(ctx.pg)\n', (8339, 8347), False, 'from torchrec.distributed.types import ModuleSharder, ShardedTensor, ShardingEnv, ShardingPlan, ShardingType\n'), ((8567, 8598), 'torch.optim.SGD', 'torch.optim.SGD', (['params'], {'lr': '(0.1)'}), '(params, lr=0.1)\n', (8582, 8598), False, 'import torch\n'), ((9066, 9094), 'torch.empty_like', 'torch.empty_like', (['local_pred'], {}), '(local_pred)\n', (9082, 9094), False, 'import torch\n'), ((2128, 2194), 'torchrec.distributed.test_utils.test_model.TestETCSharder', 'TestETCSharder', (['sharding_type', 'kernel_type', "{'learning_rate': 0.1}"], {}), "(sharding_type, kernel_type, {'learning_rate': 0.1})\n", (2142, 2194), False, 'from torchrec.distributed.test_utils.test_model import ModelInput, TestEBCSharder, TestEBSharder, TestETCSharder, TestETSharder, TestSparseNNBase\n'), ((8096, 8156), 'torch.distributed._remote_device', 'torch.distributed._remote_device', (['f"""rank:{rank}/cuda:{rank}"""'], {}), "(f'rank:{rank}/cuda:{rank}')\n", (8128, 8156), False, 'import torch\n')] |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from torch.testing import FileCheck # @manual
from torchrec.fx import symbolic_trace
from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.modules.embedding_modules import EmbeddingBagCollection
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
class SparseArchTest(unittest.TestCase):
def test_basic(self) -> None:
torch.manual_seed(0)
D = 3
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=10, feature_names=["f1", "f3"]
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=10,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_arch = SparseArch(ebc)
keys = ["f1", "f2", "f3", "f4", "f5"]
offsets = torch.tensor([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 19])
features = KeyedJaggedTensor.from_offsets_sync(
keys=keys,
values=torch.tensor(
[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]
),
offsets=offsets,
)
B = (len(offsets) - 1) // len(keys)
sparse_features = sparse_arch(features)
F = len(sparse_arch.sparse_feature_names)
self.assertEqual(sparse_features.shape, (B, F, D))
expected_values = torch.tensor(
[
[
[-0.7499, -1.2665, 1.0143],
[-0.7499, -1.2665, 1.0143],
[3.2276, 2.9643, -0.3816],
],
[
[0.0082, 0.6241, -0.1119],
[0.0082, 0.6241, -0.1119],
[2.0722, -2.2734, -1.6307],
],
]
)
self.assertTrue(
torch.allclose(
sparse_features,
expected_values,
rtol=1e-4,
atol=1e-4,
),
)
def test_fx_and_shape(self) -> None:
D = 3
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=10, feature_names=["f1", "f3"]
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=10,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_arch = SparseArch(ebc)
F = len(sparse_arch.sparse_feature_names)
gm = symbolic_trace(sparse_arch)
FileCheck().check("KeyedJaggedTensor").check("cat").run(gm.code)
keys = ["f1", "f2", "f3", "f4", "f5"]
offsets = torch.tensor([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 19])
features = KeyedJaggedTensor.from_offsets_sync(
keys=keys,
values=torch.tensor(
[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]
),
offsets=offsets,
)
B = (len(offsets) - 1) // len(keys)
sparse_features = gm(features)
self.assertEqual(sparse_features.shape, (B, F, D))
# TODO(T89043538): Auto-generate this test.
def test_fx_script(self) -> None:
D = 3
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=10, feature_names=["f1"]
)
eb2_config = EmbeddingBagConfig(
name="t2", embedding_dim=D, num_embeddings=10, feature_names=["f2"]
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_arch = SparseArch(ebc)
gm = symbolic_trace(sparse_arch)
torch.jit.script(gm)
class DenseArchTest(unittest.TestCase):
def test_basic(self) -> None:
torch.manual_seed(0)
B = 4
D = 3
in_features = 10
dense_arch = DenseArch(in_features=in_features, layer_sizes=[10, D])
dense_embedded = dense_arch(torch.rand((B, in_features)))
self.assertEqual(dense_embedded.size(), (B, D))
expected = torch.tensor(
[
[0.2351, 0.1578, 0.2784],
[0.1579, 0.1012, 0.2660],
[0.2459, 0.2379, 0.2749],
[0.2582, 0.2178, 0.2860],
]
)
self.assertTrue(
torch.allclose(
dense_embedded,
expected,
rtol=1e-4,
atol=1e-4,
)
)
def test_fx_and_shape(self) -> None:
B = 20
D = 3
in_features = 10
dense_arch = DenseArch(in_features=in_features, layer_sizes=[10, D])
gm = symbolic_trace(dense_arch)
dense_embedded = gm(torch.rand((B, in_features)))
self.assertEqual(dense_embedded.size(), (B, D))
# TODO(T89043538): Auto-generate this test.
def test_fx_script(self) -> None:
B = 20
D = 3
in_features = 10
dense_arch = DenseArch(in_features=in_features, layer_sizes=[10, D])
gm = symbolic_trace(dense_arch)
scripted_gm = torch.jit.script(gm)
dense_embedded = scripted_gm(torch.rand((B, in_features)))
self.assertEqual(dense_embedded.size(), (B, D))
class InteractionArchTest(unittest.TestCase):
def test_basic(self) -> None:
D = 3
B = 10
keys = ["f1", "f2"]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = inter_arch(dense_features, sparse_features)
# B X (D + F + F choose 2)
self.assertEqual(concat_dense.size(), (B, D + F + choose(F, 2)))
def test_larger(self) -> None:
D = 8
B = 20
keys = ["f1", "f2", "f3", "f4"]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = inter_arch(dense_features, sparse_features)
# B X (D + F + F choose 2)
self.assertEqual(concat_dense.size(), (B, D + F + choose(F, 2)))
def test_fx_and_shape(self) -> None:
D = 3
B = 10
keys = ["f1", "f2"]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
gm = symbolic_trace(inter_arch)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = gm(dense_features, sparse_features)
# B X (D + F + F choose 2)
self.assertEqual(concat_dense.size(), (B, D + F + choose(F, 2)))
# TODO(T89043538): Auto-generate this test.
def test_fx_script(self) -> None:
D = 3
B = 10
keys = ["f1", "f2"]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
gm = symbolic_trace(inter_arch)
scripted_gm = torch.jit.script(gm)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = scripted_gm(dense_features, sparse_features)
# B X (D + F + F choose 2)
self.assertEqual(concat_dense.size(), (B, D + F + choose(F, 2)))
def test_correctness(self) -> None:
D = 4
B = 3
keys = [
"f1",
"f2",
"f3",
"f4",
]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
torch.manual_seed(0)
dense_features = torch.rand((B, D))
sparse_features = torch.rand((B, F, D))
concat_dense = inter_arch(dense_features, sparse_features)
# B X (D + F + F choose 2)
self.assertEqual(concat_dense.size(), (B, D + F + choose(F, 2)))
expected = torch.tensor(
[
[
0.4963,
0.7682,
0.0885,
0.1320,
0.2353,
1.0123,
1.1919,
0.7220,
0.3444,
0.7397,
0.4015,
1.5184,
0.8986,
1.2018,
],
[
0.3074,
0.6341,
0.4901,
0.8964,
1.2787,
0.3275,
1.6734,
0.6325,
0.2089,
1.2982,
0.3977,
0.4200,
0.2475,
0.7834,
],
[
0.4556,
0.6323,
0.3489,
0.4017,
0.8195,
1.1181,
1.0511,
0.4919,
1.6147,
1.0786,
0.4264,
1.3576,
0.5860,
0.6559,
],
]
)
self.assertTrue(
torch.allclose(
concat_dense,
expected,
rtol=1e-4,
atol=1e-4,
)
)
def test_numerical_stability(self) -> None:
D = 3
B = 6
keys = ["f1", "f2"]
F = len(keys)
inter_arch = InteractionArch(num_sparse_features=F)
torch.manual_seed(0)
dense_features = torch.randint(0, 10, (B, D))
sparse_features = torch.randint(0, 10, (B, F, D))
concat_dense = inter_arch(dense_features, sparse_features)
expected = torch.LongTensor(
[
[4, 9, 3, 61, 57, 63],
[0, 3, 9, 84, 27, 45],
[7, 3, 7, 34, 50, 25],
[3, 1, 6, 21, 50, 91],
[6, 9, 8, 125, 109, 74],
[6, 6, 8, 18, 80, 21],
]
)
self.assertTrue(torch.equal(concat_dense, expected))
class DLRMTest(unittest.TestCase):
def test_basic(self) -> None:
torch.manual_seed(0)
B = 2
D = 8
dense_in_features = 100
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=100, feature_names=["f1", "f3"]
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_nn = DLRM(
embedding_bag_collection=ebc,
dense_in_features=dense_in_features,
dense_arch_layer_sizes=[20, D],
over_arch_layer_sizes=[5, 1],
)
features = torch.rand((B, dense_in_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f1", "f3", "f2"],
values=torch.tensor([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]),
offsets=torch.tensor([0, 2, 4, 6, 8, 10, 11]),
)
logits = sparse_nn(
dense_features=features,
sparse_features=sparse_features,
)
self.assertEqual(logits.size(), (B, 1))
expected_logits = torch.tensor([[0.5805], [0.5909]])
self.assertTrue(
torch.allclose(
logits,
expected_logits,
rtol=1e-4,
atol=1e-4,
)
)
def test_one_sparse(self) -> None:
B = 2
D = 8
dense_in_features = 100
eb1_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config])
sparse_nn = DLRM(
embedding_bag_collection=ebc,
dense_in_features=dense_in_features,
dense_arch_layer_sizes=[20, D],
over_arch_layer_sizes=[5, 1],
)
features = torch.rand((B, dense_in_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f2"],
values=torch.tensor(range(3)),
offsets=torch.tensor([0, 2, 3]),
)
logits = sparse_nn(
dense_features=features,
sparse_features=sparse_features,
)
self.assertEqual(logits.size(), (B, 1))
def test_no_sparse(self) -> None:
ebc = EmbeddingBagCollection(tables=[])
D_unused = 1
with self.assertRaises(AssertionError):
DLRM(
embedding_bag_collection=ebc,
dense_in_features=100,
dense_arch_layer_sizes=[20, D_unused],
over_arch_layer_sizes=[5, 1],
)
def test_fx(self) -> None:
B = 2
D = 8
dense_in_features = 100
eb1_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config])
sparse_nn = DLRM(
embedding_bag_collection=ebc,
dense_in_features=dense_in_features,
dense_arch_layer_sizes=[20, D],
over_arch_layer_sizes=[5, 1],
)
gm = symbolic_trace(sparse_nn)
FileCheck().check("KeyedJaggedTensor").check("cat").check("f2").run(gm.code)
features = torch.rand((B, dense_in_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f2"],
values=torch.tensor(range(3)),
offsets=torch.tensor([0, 2, 3]),
)
logits = gm(
dense_features=features,
sparse_features=sparse_features,
)
self.assertEqual(logits.size(), (B, 1))
# TODO(T89043538): Auto-generate this test.
def test_fx_script(self) -> None:
B = 2
D = 8
dense_in_features = 100
eb1_config = EmbeddingBagConfig(
name="t1", embedding_dim=D, num_embeddings=100, feature_names=["f1", "f3"]
)
eb2_config = EmbeddingBagConfig(
name="t2",
embedding_dim=D,
num_embeddings=100,
feature_names=["f2"],
)
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
sparse_nn = DLRM(
embedding_bag_collection=ebc,
dense_in_features=dense_in_features,
dense_arch_layer_sizes=[20, D],
over_arch_layer_sizes=[5, 1],
)
features = torch.rand((B, dense_in_features))
sparse_features = KeyedJaggedTensor.from_offsets_sync(
keys=["f1", "f3", "f2"],
values=torch.tensor([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]),
offsets=torch.tensor([0, 2, 4, 6, 8, 10, 11]),
)
sparse_nn(
dense_features=features,
sparse_features=sparse_features,
)
gm = symbolic_trace(sparse_nn)
scripted_gm = torch.jit.script(gm)
logits = scripted_gm(features, sparse_features)
self.assertEqual(logits.size(), (B, 1))
| [
"torchrec.models.dlrm.SparseArch",
"torchrec.modules.embedding_configs.EmbeddingBagConfig",
"torchrec.models.dlrm.DenseArch",
"torchrec.models.dlrm.choose",
"torchrec.models.dlrm.InteractionArch",
"torchrec.models.dlrm.DLRM",
"torchrec.modules.embedding_modules.EmbeddingBagCollection",
"torchrec.fx.sy... | [((715, 735), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (732, 735), False, 'import torch\n'), ((772, 869), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t1"""', 'embedding_dim': 'D', 'num_embeddings': '(10)', 'feature_names': "['f1', 'f3']"}), "(name='t1', embedding_dim=D, num_embeddings=10,\n feature_names=['f1', 'f3'])\n", (790, 869), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((909, 1000), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(10)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=10,\n feature_names=['f2'])\n", (927, 1000), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((1071, 1126), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config, eb2_config]'}), '(tables=[eb1_config, eb2_config])\n', (1093, 1126), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((1149, 1164), 'torchrec.models.dlrm.SparseArch', 'SparseArch', (['ebc'], {}), '(ebc)\n', (1159, 1164), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((1230, 1283), 'torch.tensor', 'torch.tensor', (['[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 19]'], {}), '([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 19])\n', (1242, 1283), False, 'import torch\n'), ((1753, 1945), 'torch.tensor', 'torch.tensor', (['[[[-0.7499, -1.2665, 1.0143], [-0.7499, -1.2665, 1.0143], [3.2276, 2.9643, \n -0.3816]], [[0.0082, 0.6241, -0.1119], [0.0082, 0.6241, -0.1119], [\n 2.0722, -2.2734, -1.6307]]]'], {}), '([[[-0.7499, -1.2665, 1.0143], [-0.7499, -1.2665, 1.0143], [\n 3.2276, 2.9643, -0.3816]], [[0.0082, 0.6241, -0.1119], [0.0082, 0.6241,\n -0.1119], [2.0722, -2.2734, -1.6307]]])\n', (1765, 1945), False, 'import torch\n'), ((2440, 2537), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t1"""', 'embedding_dim': 'D', 'num_embeddings': '(10)', 'feature_names': "['f1', 'f3']"}), "(name='t1', embedding_dim=D, num_embeddings=10,\n feature_names=['f1', 'f3'])\n", (2458, 2537), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((2577, 2668), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(10)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=10,\n feature_names=['f2'])\n", (2595, 2668), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((2739, 2794), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config, eb2_config]'}), '(tables=[eb1_config, eb2_config])\n', (2761, 2794), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((2817, 2832), 'torchrec.models.dlrm.SparseArch', 'SparseArch', (['ebc'], {}), '(ebc)\n', (2827, 2832), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((2896, 2923), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['sparse_arch'], {}), '(sparse_arch)\n', (2910, 2923), False, 'from torchrec.fx import symbolic_trace\n'), ((3063, 3116), 'torch.tensor', 'torch.tensor', (['[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 19]'], {}), '([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 19])\n', (3075, 3116), False, 'import torch\n'), ((3622, 3713), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t1"""', 'embedding_dim': 'D', 'num_embeddings': '(10)', 'feature_names': "['f1']"}), "(name='t1', embedding_dim=D, num_embeddings=10,\n feature_names=['f1'])\n", (3640, 3713), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((3753, 3844), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(10)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=10,\n feature_names=['f2'])\n", (3771, 3844), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((3878, 3933), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config, eb2_config]'}), '(tables=[eb1_config, eb2_config])\n', (3900, 3933), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((3956, 3971), 'torchrec.models.dlrm.SparseArch', 'SparseArch', (['ebc'], {}), '(ebc)\n', (3966, 3971), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((3986, 4013), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['sparse_arch'], {}), '(sparse_arch)\n', (4000, 4013), False, 'from torchrec.fx import symbolic_trace\n'), ((4022, 4042), 'torch.jit.script', 'torch.jit.script', (['gm'], {}), '(gm)\n', (4038, 4042), False, 'import torch\n'), ((4127, 4147), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (4144, 4147), False, 'import torch\n'), ((4222, 4277), 'torchrec.models.dlrm.DenseArch', 'DenseArch', ([], {'in_features': 'in_features', 'layer_sizes': '[10, D]'}), '(in_features=in_features, layer_sizes=[10, D])\n', (4231, 4277), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((4420, 4541), 'torch.tensor', 'torch.tensor', (['[[0.2351, 0.1578, 0.2784], [0.1579, 0.1012, 0.266], [0.2459, 0.2379, 0.2749\n ], [0.2582, 0.2178, 0.286]]'], {}), '([[0.2351, 0.1578, 0.2784], [0.1579, 0.1012, 0.266], [0.2459, \n 0.2379, 0.2749], [0.2582, 0.2178, 0.286]])\n', (4432, 4541), False, 'import torch\n'), ((4946, 5001), 'torchrec.models.dlrm.DenseArch', 'DenseArch', ([], {'in_features': 'in_features', 'layer_sizes': '[10, D]'}), '(in_features=in_features, layer_sizes=[10, D])\n', (4955, 5001), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((5015, 5041), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['dense_arch'], {}), '(dense_arch)\n', (5029, 5041), False, 'from torchrec.fx import symbolic_trace\n'), ((5318, 5373), 'torchrec.models.dlrm.DenseArch', 'DenseArch', ([], {'in_features': 'in_features', 'layer_sizes': '[10, D]'}), '(in_features=in_features, layer_sizes=[10, D])\n', (5327, 5373), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((5387, 5413), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['dense_arch'], {}), '(dense_arch)\n', (5401, 5413), False, 'from torchrec.fx import symbolic_trace\n'), ((5436, 5456), 'torch.jit.script', 'torch.jit.script', (['gm'], {}), '(gm)\n', (5452, 5456), False, 'import torch\n'), ((5762, 5800), 'torchrec.models.dlrm.InteractionArch', 'InteractionArch', ([], {'num_sparse_features': 'F'}), '(num_sparse_features=F)\n', (5777, 5800), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((5827, 5845), 'torch.rand', 'torch.rand', (['(B, D)'], {}), '((B, D))\n', (5837, 5845), False, 'import torch\n'), ((5873, 5894), 'torch.rand', 'torch.rand', (['(B, F, D)'], {}), '((B, F, D))\n', (5883, 5894), False, 'import torch\n'), ((6219, 6257), 'torchrec.models.dlrm.InteractionArch', 'InteractionArch', ([], {'num_sparse_features': 'F'}), '(num_sparse_features=F)\n', (6234, 6257), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((6284, 6302), 'torch.rand', 'torch.rand', (['(B, D)'], {}), '((B, D))\n', (6294, 6302), False, 'import torch\n'), ((6329, 6350), 'torch.rand', 'torch.rand', (['(B, F, D)'], {}), '((B, F, D))\n', (6339, 6350), False, 'import torch\n'), ((6670, 6708), 'torchrec.models.dlrm.InteractionArch', 'InteractionArch', ([], {'num_sparse_features': 'F'}), '(num_sparse_features=F)\n', (6685, 6708), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((6723, 6749), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['inter_arch'], {}), '(inter_arch)\n', (6737, 6749), False, 'from torchrec.fx import symbolic_trace\n'), ((6776, 6794), 'torch.rand', 'torch.rand', (['(B, D)'], {}), '((B, D))\n', (6786, 6794), False, 'import torch\n'), ((6821, 6842), 'torch.rand', 'torch.rand', (['(B, F, D)'], {}), '((B, F, D))\n', (6831, 6842), False, 'import torch\n'), ((7199, 7237), 'torchrec.models.dlrm.InteractionArch', 'InteractionArch', ([], {'num_sparse_features': 'F'}), '(num_sparse_features=F)\n', (7214, 7237), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((7252, 7278), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['inter_arch'], {}), '(inter_arch)\n', (7266, 7278), False, 'from torchrec.fx import symbolic_trace\n'), ((7301, 7321), 'torch.jit.script', 'torch.jit.script', (['gm'], {}), '(gm)\n', (7317, 7321), False, 'import torch\n'), ((7348, 7366), 'torch.rand', 'torch.rand', (['(B, D)'], {}), '((B, D))\n', (7358, 7366), False, 'import torch\n'), ((7393, 7414), 'torch.rand', 'torch.rand', (['(B, F, D)'], {}), '((B, F, D))\n', (7403, 7414), False, 'import torch\n'), ((7804, 7842), 'torchrec.models.dlrm.InteractionArch', 'InteractionArch', ([], {'num_sparse_features': 'F'}), '(num_sparse_features=F)\n', (7819, 7842), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((7851, 7871), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (7868, 7871), False, 'import torch\n'), ((7898, 7916), 'torch.rand', 'torch.rand', (['(B, D)'], {}), '((B, D))\n', (7908, 7916), False, 'import torch\n'), ((7943, 7964), 'torch.rand', 'torch.rand', (['(B, F, D)'], {}), '((B, F, D))\n', (7953, 7964), False, 'import torch\n'), ((8162, 8530), 'torch.tensor', 'torch.tensor', (['[[0.4963, 0.7682, 0.0885, 0.132, 0.2353, 1.0123, 1.1919, 0.722, 0.3444, \n 0.7397, 0.4015, 1.5184, 0.8986, 1.2018], [0.3074, 0.6341, 0.4901, \n 0.8964, 1.2787, 0.3275, 1.6734, 0.6325, 0.2089, 1.2982, 0.3977, 0.42, \n 0.2475, 0.7834], [0.4556, 0.6323, 0.3489, 0.4017, 0.8195, 1.1181, \n 1.0511, 0.4919, 1.6147, 1.0786, 0.4264, 1.3576, 0.586, 0.6559]]'], {}), '([[0.4963, 0.7682, 0.0885, 0.132, 0.2353, 1.0123, 1.1919, 0.722,\n 0.3444, 0.7397, 0.4015, 1.5184, 0.8986, 1.2018], [0.3074, 0.6341, \n 0.4901, 0.8964, 1.2787, 0.3275, 1.6734, 0.6325, 0.2089, 1.2982, 0.3977,\n 0.42, 0.2475, 0.7834], [0.4556, 0.6323, 0.3489, 0.4017, 0.8195, 1.1181,\n 1.0511, 0.4919, 1.6147, 1.0786, 0.4264, 1.3576, 0.586, 0.6559]])\n', (8174, 8530), False, 'import torch\n'), ((9838, 9876), 'torchrec.models.dlrm.InteractionArch', 'InteractionArch', ([], {'num_sparse_features': 'F'}), '(num_sparse_features=F)\n', (9853, 9876), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((9885, 9905), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (9902, 9905), False, 'import torch\n'), ((9931, 9959), 'torch.randint', 'torch.randint', (['(0)', '(10)', '(B, D)'], {}), '(0, 10, (B, D))\n', (9944, 9959), False, 'import torch\n'), ((9987, 10018), 'torch.randint', 'torch.randint', (['(0)', '(10)', '(B, F, D)'], {}), '(0, 10, (B, F, D))\n', (10000, 10018), False, 'import torch\n'), ((10107, 10275), 'torch.LongTensor', 'torch.LongTensor', (['[[4, 9, 3, 61, 57, 63], [0, 3, 9, 84, 27, 45], [7, 3, 7, 34, 50, 25], [3, 1,\n 6, 21, 50, 91], [6, 9, 8, 125, 109, 74], [6, 6, 8, 18, 80, 21]]'], {}), '([[4, 9, 3, 61, 57, 63], [0, 3, 9, 84, 27, 45], [7, 3, 7, \n 34, 50, 25], [3, 1, 6, 21, 50, 91], [6, 9, 8, 125, 109, 74], [6, 6, 8, \n 18, 80, 21]])\n', (10123, 10275), False, 'import torch\n'), ((10540, 10560), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (10557, 10560), False, 'import torch\n'), ((10643, 10741), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t1"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f1', 'f3']"}), "(name='t1', embedding_dim=D, num_embeddings=100,\n feature_names=['f1', 'f3'])\n", (10661, 10741), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((10781, 10873), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=100,\n feature_names=['f2'])\n", (10799, 10873), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((10944, 10999), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config, eb2_config]'}), '(tables=[eb1_config, eb2_config])\n', (10966, 10999), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((11020, 11157), 'torchrec.models.dlrm.DLRM', 'DLRM', ([], {'embedding_bag_collection': 'ebc', 'dense_in_features': 'dense_in_features', 'dense_arch_layer_sizes': '[20, D]', 'over_arch_layer_sizes': '[5, 1]'}), '(embedding_bag_collection=ebc, dense_in_features=dense_in_features,\n dense_arch_layer_sizes=[20, D], over_arch_layer_sizes=[5, 1])\n', (11024, 11157), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((11233, 11267), 'torch.rand', 'torch.rand', (['(B, dense_in_features)'], {}), '((B, dense_in_features))\n', (11243, 11267), False, 'import torch\n'), ((11701, 11735), 'torch.tensor', 'torch.tensor', (['[[0.5805], [0.5909]]'], {}), '([[0.5805], [0.5909]])\n', (11713, 11735), False, 'import torch\n'), ((12046, 12138), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=100,\n feature_names=['f2'])\n", (12064, 12138), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((12209, 12252), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config]'}), '(tables=[eb1_config])\n', (12231, 12252), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((12273, 12410), 'torchrec.models.dlrm.DLRM', 'DLRM', ([], {'embedding_bag_collection': 'ebc', 'dense_in_features': 'dense_in_features', 'dense_arch_layer_sizes': '[20, D]', 'over_arch_layer_sizes': '[5, 1]'}), '(embedding_bag_collection=ebc, dense_in_features=dense_in_features,\n dense_arch_layer_sizes=[20, D], over_arch_layer_sizes=[5, 1])\n', (12277, 12410), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((12486, 12520), 'torch.rand', 'torch.rand', (['(B, dense_in_features)'], {}), '((B, dense_in_features))\n', (12496, 12520), False, 'import torch\n'), ((12929, 12962), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[]'}), '(tables=[])\n', (12951, 12962), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((13364, 13456), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=100,\n feature_names=['f2'])\n", (13382, 13456), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((13527, 13570), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config]'}), '(tables=[eb1_config])\n', (13549, 13570), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((13591, 13728), 'torchrec.models.dlrm.DLRM', 'DLRM', ([], {'embedding_bag_collection': 'ebc', 'dense_in_features': 'dense_in_features', 'dense_arch_layer_sizes': '[20, D]', 'over_arch_layer_sizes': '[5, 1]'}), '(embedding_bag_collection=ebc, dense_in_features=dense_in_features,\n dense_arch_layer_sizes=[20, D], over_arch_layer_sizes=[5, 1])\n', (13595, 13728), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((13797, 13822), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['sparse_nn'], {}), '(sparse_nn)\n', (13811, 13822), False, 'from torchrec.fx import symbolic_trace\n'), ((13928, 13962), 'torch.rand', 'torch.rand', (['(B, dense_in_features)'], {}), '((B, dense_in_features))\n', (13938, 13962), False, 'import torch\n'), ((14480, 14578), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t1"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f1', 'f3']"}), "(name='t1', embedding_dim=D, num_embeddings=100,\n feature_names=['f1', 'f3'])\n", (14498, 14578), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((14618, 14710), 'torchrec.modules.embedding_configs.EmbeddingBagConfig', 'EmbeddingBagConfig', ([], {'name': '"""t2"""', 'embedding_dim': 'D', 'num_embeddings': '(100)', 'feature_names': "['f2']"}), "(name='t2', embedding_dim=D, num_embeddings=100,\n feature_names=['f2'])\n", (14636, 14710), False, 'from torchrec.modules.embedding_configs import EmbeddingBagConfig\n'), ((14781, 14836), 'torchrec.modules.embedding_modules.EmbeddingBagCollection', 'EmbeddingBagCollection', ([], {'tables': '[eb1_config, eb2_config]'}), '(tables=[eb1_config, eb2_config])\n', (14803, 14836), False, 'from torchrec.modules.embedding_modules import EmbeddingBagCollection\n'), ((14857, 14994), 'torchrec.models.dlrm.DLRM', 'DLRM', ([], {'embedding_bag_collection': 'ebc', 'dense_in_features': 'dense_in_features', 'dense_arch_layer_sizes': '[20, D]', 'over_arch_layer_sizes': '[5, 1]'}), '(embedding_bag_collection=ebc, dense_in_features=dense_in_features,\n dense_arch_layer_sizes=[20, D], over_arch_layer_sizes=[5, 1])\n', (14861, 14994), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((15070, 15104), 'torch.rand', 'torch.rand', (['(B, dense_in_features)'], {}), '((B, dense_in_features))\n', (15080, 15104), False, 'import torch\n'), ((15468, 15493), 'torchrec.fx.symbolic_trace', 'symbolic_trace', (['sparse_nn'], {}), '(sparse_nn)\n', (15482, 15493), False, 'from torchrec.fx import symbolic_trace\n'), ((15517, 15537), 'torch.jit.script', 'torch.jit.script', (['gm'], {}), '(gm)\n', (15533, 15537), False, 'import torch\n'), ((2202, 2276), 'torch.allclose', 'torch.allclose', (['sparse_features', 'expected_values'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(sparse_features, expected_values, rtol=0.0001, atol=0.0001)\n', (2216, 2276), False, 'import torch\n'), ((4314, 4342), 'torch.rand', 'torch.rand', (['(B, in_features)'], {}), '((B, in_features))\n', (4324, 4342), False, 'import torch\n'), ((4677, 4743), 'torch.allclose', 'torch.allclose', (['dense_embedded', 'expected'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(dense_embedded, expected, rtol=0.0001, atol=0.0001)\n', (4691, 4743), False, 'import torch\n'), ((5070, 5098), 'torch.rand', 'torch.rand', (['(B, in_features)'], {}), '((B, in_features))\n', (5080, 5098), False, 'import torch\n'), ((5494, 5522), 'torch.rand', 'torch.rand', (['(B, in_features)'], {}), '((B, in_features))\n', (5504, 5522), False, 'import torch\n'), ((9539, 9603), 'torch.allclose', 'torch.allclose', (['concat_dense', 'expected'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(concat_dense, expected, rtol=0.0001, atol=0.0001)\n', (9553, 9603), False, 'import torch\n'), ((10424, 10459), 'torch.equal', 'torch.equal', (['concat_dense', 'expected'], {}), '(concat_dense, expected)\n', (10435, 10459), False, 'import torch\n'), ((11773, 11838), 'torch.allclose', 'torch.allclose', (['logits', 'expected_logits'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(logits, expected_logits, rtol=0.0001, atol=0.0001)\n', (11787, 11838), False, 'import torch\n'), ((13044, 13174), 'torchrec.models.dlrm.DLRM', 'DLRM', ([], {'embedding_bag_collection': 'ebc', 'dense_in_features': '(100)', 'dense_arch_layer_sizes': '[20, D_unused]', 'over_arch_layer_sizes': '[5, 1]'}), '(embedding_bag_collection=ebc, dense_in_features=100,\n dense_arch_layer_sizes=[20, D_unused], over_arch_layer_sizes=[5, 1])\n', (13048, 13174), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((1382, 1453), 'torch.tensor', 'torch.tensor', (['[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]'], {}), '([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3])\n', (1394, 1453), False, 'import torch\n'), ((3215, 3286), 'torch.tensor', 'torch.tensor', (['[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]'], {}), '([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3])\n', (3227, 3286), False, 'import torch\n'), ((11387, 11434), 'torch.tensor', 'torch.tensor', (['[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]'], {}), '([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3])\n', (11399, 11434), False, 'import torch\n'), ((11456, 11493), 'torch.tensor', 'torch.tensor', (['[0, 2, 4, 6, 8, 10, 11]'], {}), '([0, 2, 4, 6, 8, 10, 11])\n', (11468, 11493), False, 'import torch\n'), ((12672, 12695), 'torch.tensor', 'torch.tensor', (['[0, 2, 3]'], {}), '([0, 2, 3])\n', (12684, 12695), False, 'import torch\n'), ((14114, 14137), 'torch.tensor', 'torch.tensor', (['[0, 2, 3]'], {}), '([0, 2, 3])\n', (14126, 14137), False, 'import torch\n'), ((15224, 15271), 'torch.tensor', 'torch.tensor', (['[1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3]'], {}), '([1, 2, 4, 5, 4, 3, 2, 9, 1, 2, 3])\n', (15236, 15271), False, 'import torch\n'), ((15293, 15330), 'torch.tensor', 'torch.tensor', (['[0, 2, 4, 6, 8, 10, 11]'], {}), '([0, 2, 4, 6, 8, 10, 11])\n', (15305, 15330), False, 'import torch\n'), ((6056, 6068), 'torchrec.models.dlrm.choose', 'choose', (['F', '(2)'], {}), '(F, 2)\n', (6062, 6068), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((6513, 6525), 'torchrec.models.dlrm.choose', 'choose', (['F', '(2)'], {}), '(F, 2)\n', (6519, 6525), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((6997, 7009), 'torchrec.models.dlrm.choose', 'choose', (['F', '(2)'], {}), '(F, 2)\n', (7003, 7009), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((7578, 7590), 'torchrec.models.dlrm.choose', 'choose', (['F', '(2)'], {}), '(F, 2)\n', (7584, 7590), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((8127, 8139), 'torchrec.models.dlrm.choose', 'choose', (['F', '(2)'], {}), '(F, 2)\n', (8133, 8139), False, 'from torchrec.models.dlrm import choose, DenseArch, DLRM, InteractionArch, SparseArch\n'), ((2933, 2944), 'torch.testing.FileCheck', 'FileCheck', ([], {}), '()\n', (2942, 2944), False, 'from torch.testing import FileCheck\n'), ((13831, 13842), 'torch.testing.FileCheck', 'FileCheck', ([], {}), '()\n', (13840, 13842), False, 'from torch.testing import FileCheck\n')] |
"#!/usr/bin/env python3\n# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved(...TRUNCATED) | ["torchrec.distributed.types.TensorProperties","torchrec.distributed.types.Shard","torchrec.distribu(...TRUNCATED) | "[((1702, 1729), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\\n', (171(...TRUNCATED) |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 6