id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
9,062 | import logging
from collections import defaultdict
from typing import Any, cast, Dict, List, Optional, Tuple, Union
from torch import nn
from torchrec.distributed.planner.constants import BIGINT_DTYPE, NUM_POOLINGS
from torchrec.distributed.planner.shard_estimators import _calculate_shard_io_sizes
from torchrec.distributed.planner.storage_reservations import (
FixedPercentageStorageReservation,
HeuristicalStorageReservation,
InferenceStorageReservation,
)
from torchrec.distributed.planner.types import (
ParameterConstraints,
Perf,
ShardingOption,
Stats,
Storage,
StorageReservation,
Topology,
)
from torchrec.distributed.planner.utils import (
_find_imbalance_tables,
bytes_to_gb,
bytes_to_mb,
sharder_name as get_sharder_name,
)
from torchrec.distributed.types import (
ModuleSharder,
ParameterSharding,
ShardingPlan,
ShardingType,
)
def round_to_one_sigfig(x: float) -> str:
return f'{float(f"{x:.1g}"):g}'
class Perf:
"""
Representation of the breakdown of the perf estimate a single shard of an
embedding table.
"""
fwd_compute: float
fwd_comms: float
bwd_compute: float
bwd_comms: float
prefetch_compute: float = 0.0
def total(self) -> float:
# When using embedding offload, there is a prefetch compute component. This
# prefetch can overlap with fwd_compute + fwd_comm and dense fwd (some of it
# overlaps with fwd_compute) and dense bwd. (fwd_compute and bwd_compute are
# embedding fwd/bwd, nothing to do with dense). Only when prefetch is longer
# than fwd_compute + dense_fwd + dense_bwd it will block bwd_compute. However,
# we don't have an effective way to estimate dense fwd/bwd at this point, so our
# cost model is too simplistic. Instead prefetch is always considered blocking.
#
# Also note, measuring prefetch blocking can only be done after partitioning,
# here are only have the per shard estimates.
#
# However adding a per-shard prefetch component to the cost model does have the
# benefit that 1) it enables the ScaleupProposer to explore the trade off
# between increasing cache sizes vs more difficult bin-packing constraints. 2)
# it helps balance the prefetch compute across the ranks.
return (
self.fwd_compute
+ self.bwd_compute
+ self.fwd_comms
+ self.bwd_comms
+ self.prefetch_compute
)
def __add__(self, other: "Perf") -> "Perf":
return Perf(
fwd_compute=self.fwd_compute + other.fwd_compute,
fwd_comms=self.fwd_comms + other.fwd_comms,
bwd_compute=self.bwd_compute + other.bwd_compute,
bwd_comms=self.bwd_comms + other.bwd_comms,
prefetch_compute=self.prefetch_compute + other.prefetch_compute,
)
def __hash__(self) -> int:
return hash(
(
self.fwd_compute,
self.fwd_comms,
self.bwd_compute,
self.bwd_comms,
self.prefetch_compute,
)
)
def _format_perf_breakdown(perf: Perf) -> str:
breakdown = [
perf.fwd_compute,
perf.fwd_comms,
perf.bwd_compute,
perf.bwd_comms,
perf.prefetch_compute,
]
breakdown_string = ",".join(
[str(round(num)) if num >= 1 else round_to_one_sigfig(num) for num in breakdown]
)
return f"{str(round(perf.total, 3))} ({breakdown_string})" | null |
9,063 | import logging
from collections import defaultdict
from typing import Any, cast, Dict, List, Optional, Tuple, Union
from torch import nn
from torchrec.distributed.planner.constants import BIGINT_DTYPE, NUM_POOLINGS
from torchrec.distributed.planner.shard_estimators import _calculate_shard_io_sizes
from torchrec.distributed.planner.storage_reservations import (
FixedPercentageStorageReservation,
HeuristicalStorageReservation,
InferenceStorageReservation,
)
from torchrec.distributed.planner.types import (
ParameterConstraints,
Perf,
ShardingOption,
Stats,
Storage,
StorageReservation,
Topology,
)
from torchrec.distributed.planner.utils import (
_find_imbalance_tables,
bytes_to_gb,
bytes_to_mb,
sharder_name as get_sharder_name,
)
from torchrec.distributed.types import (
ModuleSharder,
ParameterSharding,
ShardingPlan,
ShardingType,
)
class Storage:
"""
Representation of the storage capacities of a hardware used in training.
"""
hbm: int
ddr: int
def __add__(self, other: "Storage") -> "Storage":
return Storage(
hbm=self.hbm + other.hbm,
ddr=self.ddr + other.ddr,
)
def __sub__(self, other: "Storage") -> "Storage":
return Storage(
hbm=self.hbm - other.hbm,
ddr=self.ddr - other.ddr,
)
def __hash__(self) -> int:
return hash((self.hbm, self.ddr))
def fits_in(self, other: "Storage") -> bool:
return self.hbm <= other.hbm and self.ddr <= other.ddr
def bytes_to_gb(num_bytes: int) -> float:
return float(num_bytes / (1024 * 1024 * 1024))
def _format_storage_breakdown(storage: Storage) -> str:
storage_hbm = round(bytes_to_gb(storage.hbm), 3)
storage_ddr = round(bytes_to_gb(storage.ddr), 3)
return f"({storage_hbm} GB, {storage_ddr} GB)" | null |
9,064 | import logging
from collections import defaultdict
from typing import Any, cast, Dict, List, Optional, Tuple, Union
from torch import nn
from torchrec.distributed.planner.constants import BIGINT_DTYPE, NUM_POOLINGS
from torchrec.distributed.planner.shard_estimators import _calculate_shard_io_sizes
from torchrec.distributed.planner.storage_reservations import (
FixedPercentageStorageReservation,
HeuristicalStorageReservation,
InferenceStorageReservation,
)
from torchrec.distributed.planner.types import (
ParameterConstraints,
Perf,
ShardingOption,
Stats,
Storage,
StorageReservation,
Topology,
)
from torchrec.distributed.planner.utils import (
_find_imbalance_tables,
bytes_to_gb,
bytes_to_mb,
sharder_name as get_sharder_name,
)
from torchrec.distributed.types import (
ModuleSharder,
ParameterSharding,
ShardingPlan,
ShardingType,
)
def _format_table(table: List[List[Union[str, int]]]) -> List[str]:
longest_cols = [
(max([len(str(row[i])) for row in table]) + 3) for i in range(len(table[0]))
]
row_format = "".join(
["{:>" + str(longest_col) + "}" for longest_col in longest_cols]
)
return [row_format.format(*row) for row in table] | null |
9,065 | import logging
from collections import defaultdict
from typing import Any, cast, Dict, List, Optional, Tuple, Union
from torch import nn
from torchrec.distributed.planner.constants import BIGINT_DTYPE, NUM_POOLINGS
from torchrec.distributed.planner.shard_estimators import _calculate_shard_io_sizes
from torchrec.distributed.planner.storage_reservations import (
FixedPercentageStorageReservation,
HeuristicalStorageReservation,
InferenceStorageReservation,
)
from torchrec.distributed.planner.types import (
ParameterConstraints,
Perf,
ShardingOption,
Stats,
Storage,
StorageReservation,
Topology,
)
from torchrec.distributed.planner.utils import (
_find_imbalance_tables,
bytes_to_gb,
bytes_to_mb,
sharder_name as get_sharder_name,
)
from torchrec.distributed.types import (
ModuleSharder,
ParameterSharding,
ShardingPlan,
ShardingType,
)
def _reduce_int_list(input_list: List[int]) -> str:
if len(input_list) == 0:
return ""
reduced = []
count = 1
prev_num = input_list[0]
for num in input_list[1:]:
if num == prev_num:
count += 1
else:
if count > 1:
reduced.append(f"{prev_num} * {count}")
else:
reduced.append(str(prev_num))
prev_num = num
count = 1
# Handle the last number
if count > 1:
reduced.append(f"{prev_num}*{count}")
else:
reduced.append(str(prev_num))
return ", ".join(reduced) | null |
9,066 | import logging
import math
from typing import cast, Dict, List, Optional, Tuple, Type
import torch
import torchrec.optim as trec_optim
from torch import nn
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.planner.constants import (
BATCHED_COPY_PERF_FACTOR,
BIGINT_DTYPE,
DP_ELEMENTWISE_KERNELS_PERF_FACTOR,
FULL_BLOCK_EMB_DIM,
HALF_BLOCK_PENALTY,
kernel_bw_lookup,
QUARTER_BLOCK_PENALTY,
UVM_CACHING_RATIO,
WEIGHTED_KERNEL_MULTIPLIER,
)
from torchrec.distributed.planner.types import (
ParameterConstraints,
Perf,
PlannerError,
ShardEstimator,
ShardingOption,
Storage,
Topology,
)
from torchrec.distributed.planner.utils import prod, sharder_name
from torchrec.distributed.types import (
CacheStatistics,
CommOp,
ModuleSharder,
ShardingType,
)
from torchrec.modules.embedding_modules import EmbeddingBagCollectionInterface
class ShardingOption:
"""
One way of sharding an embedding table.
"""
def __init__(
self,
name: str,
tensor: torch.Tensor,
module: Tuple[str, nn.Module],
input_lengths: List[float],
batch_size: int,
sharding_type: str,
partition_by: str,
compute_kernel: str,
shards: List[Shard],
cache_params: Optional[CacheParams] = None,
enforce_hbm: Optional[bool] = None,
stochastic_rounding: Optional[bool] = None,
bounds_check_mode: Optional[BoundsCheckMode] = None,
dependency: Optional[str] = None,
is_pooled: Optional[bool] = None,
feature_names: Optional[List[str]] = None,
) -> None:
self.name = name
self._tensor = tensor
self._module = module
self.input_lengths = input_lengths
self.batch_size = batch_size
self.sharding_type = sharding_type
self.partition_by = partition_by
self.compute_kernel = compute_kernel
# relevant to planner output, must be populated if sharding option
# part of final solution
self.shards = shards
self.cache_params = cache_params
self.enforce_hbm = enforce_hbm
self.stochastic_rounding = stochastic_rounding
self.bounds_check_mode = bounds_check_mode
self.dependency = dependency
self._is_pooled = is_pooled
self.is_weighted: Optional[bool] = None
self.feature_names: Optional[List[str]] = feature_names
def tensor(self) -> torch.Tensor:
return self._tensor
def module(self) -> Tuple[str, nn.Module]:
return self._module
def fqn(self) -> str:
return self.module[0] + "." + self.name
def cache_load_factor(self) -> Optional[float]:
if self.cache_params is not None:
return self.cache_params.load_factor
return None
def path(self) -> str:
return self.module[0]
def num_shards(self) -> int:
return len(self.shards)
def num_inputs(self) -> int:
return len(self.input_lengths)
def total_storage(self) -> Storage:
storage: Storage = Storage(hbm=0, ddr=0)
for shard in self.shards:
storage += cast(Storage, shard.storage)
return storage
def total_perf(self) -> float:
perf: float = 0
for shard in self.shards:
# pyre-ignore: Undefined attribute [16]
perf += shard.perf.total
return perf
def is_pooled(self) -> bool:
if self._is_pooled is None:
self._is_pooled = ShardingOption.module_pooled(self.module[1], self.name)
return self._is_pooled
def module_pooled(module: nn.Module, sharding_option_name: str) -> bool:
"""Determine if module pools output (e.g. EmbeddingBag) or uses unpooled/sequential output."""
if isinstance(module, EmbeddingCollectionInterface) or isinstance(
module, ManagedCollisionEmbeddingCollection
):
return False
for submodule in module.modules():
if isinstance(submodule, EmbeddingCollectionInterface) or isinstance(
submodule, ManagedCollisionEmbeddingCollection
):
for name, _ in submodule.named_parameters():
if sharding_option_name in name:
return False
return True
def __hash__(self) -> int:
return hash(
(
self.fqn,
self.sharding_type,
self.compute_kernel,
tuple(self.shards),
self.cache_params,
)
)
def __deepcopy__(
self, memo: Optional[Dict[int, "ShardingOption"]]
) -> "ShardingOption":
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if k in ["_tensor", "_module"]:
setattr(result, k, v)
else:
setattr(result, k, deepcopy(v, memo))
return result
class CommOp(Enum):
# For detailed descriptions of each of these, see their doc strings in dist_data.
# These are commonly used inside of a QuantizedCommsRegistry
POOLED_EMBEDDINGS_ALL_TO_ALL = "pooled_embeddings_all_to_all"
POOLED_EMBEDDINGS_REDUCE_SCATTER = "pooled_embeddings_reduce_scatter"
SEQUENCE_EMBEDDINGS_ALL_TO_ALL = "sequence_embeddings_all_to_all"
class ModuleSharder(abc.ABC, Generic[M]):
"""
`ModuleSharder` is per each module, which supports sharding,
e.g. `EmbeddingBagCollection`.
Args::
qcomm_codecs_registry (Optional[Dict[str, QuantizedCommCodecs]]) : Mapping of CommOp name to QuantizedCommCodecs
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self._qcomm_codecs_registry = qcomm_codecs_registry
# pyre-ignore [3]
def shard(
self,
module: M,
params: EmbeddingModuleShardingPlan,
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> ShardedModule[Any, Any, Any, Any]:
"""
Does the actual sharding. It will allocate parameters on the requested locations
as specified by corresponding ParameterSharding.
Default implementation is data-parallel replication.
Args:
module (M): module to shard.
params (EmbeddingModuleShardingPlan): dict of fully qualified parameter names
(module path + parameter name, '.'-separated) to its sharding spec.
env (ShardingEnv): sharding environment that has the process group.
device (torch.device): compute device.
Returns:
ShardedModule[Any, Any, Any]: sharded module implementation.
"""
...
def module_type(self) -> Type[M]: ...
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def shardable_parameters(self, module: M) -> Dict[str, nn.Parameter]:
"""
List of parameters that can be sharded.
"""
return dict(module.named_parameters())
def sharding_types(self, compute_device_type: str) -> List[str]:
"""
List of supported sharding types. See `ShardingType` for well-known examples.
"""
return [ShardingType.DATA_PARALLEL.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
"""
List of supported compute kernels for a given sharding type and compute device.
"""
return [ComputeKernel.DEFAULT.value]
def storage_usage(
self, tensor: torch.Tensor, compute_device_type: str, compute_kernel: str
) -> Dict[str, int]:
"""
List of system resources and corresponding usage given a compute device and
compute kernel.
"""
assert compute_device_type in {"cuda", "cpu", "mtia"}
storage_map = {
"cuda": ParameterStorage.HBM,
"cpu": ParameterStorage.DDR,
# TODO: Update it later. Setting for MTIA is same as CPU's for now.
"mtia": ParameterStorage.DDR,
}
return {storage_map[compute_device_type].value: get_tensor_size_bytes(tensor)}
def _extract_comm_data_type_size(
sharder: ModuleSharder[nn.Module], sharding_option: ShardingOption
) -> Tuple[float, float, float, float]:
table_data_type_size = sharding_option.tensor.element_size()
fwd_a2a_comm_data_type_size = table_data_type_size
bwd_a2a_comm_data_type_size = table_data_type_size
fwd_sr_comm_data_type_size = table_data_type_size
bwd_sr_comm_data_type_size = table_data_type_size
if sharder.qcomm_codecs_registry is not None:
qcomm_codecs_registry = sharder.qcomm_codecs_registry
if (
sharding_option.is_pooled
and CommOp.POOLED_EMBEDDINGS_ALL_TO_ALL.name in qcomm_codecs_registry
):
codecs = sharder.qcomm_codecs_registry[
CommOp.POOLED_EMBEDDINGS_ALL_TO_ALL.name
]
fwd_a2a_comm_data_type_size = torch.tensor(
[], dtype=codecs.forward.quantized_dtype
).element_size()
bwd_a2a_comm_data_type_size = torch.tensor(
[], dtype=codecs.backward.quantized_dtype
).element_size()
if (
not sharding_option.is_pooled
and CommOp.SEQUENCE_EMBEDDINGS_ALL_TO_ALL.name in qcomm_codecs_registry
):
codecs = qcomm_codecs_registry[CommOp.SEQUENCE_EMBEDDINGS_ALL_TO_ALL.name]
fwd_a2a_comm_data_type_size = torch.tensor(
[], dtype=codecs.forward.quantized_dtype
).element_size()
bwd_a2a_comm_data_type_size = torch.tensor(
[], dtype=codecs.backward.quantized_dtype
).element_size()
if (
sharding_option.is_pooled
and CommOp.POOLED_EMBEDDINGS_REDUCE_SCATTER.name in qcomm_codecs_registry
):
codecs = qcomm_codecs_registry[CommOp.POOLED_EMBEDDINGS_REDUCE_SCATTER.name]
fwd_sr_comm_data_type_size = torch.tensor(
[], dtype=codecs.forward.quantized_dtype
).element_size()
bwd_sr_comm_data_type_size = torch.tensor(
[], dtype=codecs.backward.quantized_dtype
).element_size()
return (
fwd_a2a_comm_data_type_size,
bwd_a2a_comm_data_type_size,
fwd_sr_comm_data_type_size,
bwd_sr_comm_data_type_size,
) | null |
9,067 | import logging
import math
from typing import cast, Dict, List, Optional, Tuple, Type
import torch
import torchrec.optim as trec_optim
from torch import nn
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.planner.constants import (
BATCHED_COPY_PERF_FACTOR,
BIGINT_DTYPE,
DP_ELEMENTWISE_KERNELS_PERF_FACTOR,
FULL_BLOCK_EMB_DIM,
HALF_BLOCK_PENALTY,
kernel_bw_lookup,
QUARTER_BLOCK_PENALTY,
UVM_CACHING_RATIO,
WEIGHTED_KERNEL_MULTIPLIER,
)
from torchrec.distributed.planner.types import (
ParameterConstraints,
Perf,
PlannerError,
ShardEstimator,
ShardingOption,
Storage,
Topology,
)
from torchrec.distributed.planner.utils import prod, sharder_name
from torchrec.distributed.types import (
CacheStatistics,
CommOp,
ModuleSharder,
ShardingType,
)
from torchrec.modules.embedding_modules import EmbeddingBagCollectionInterface
def _calculate_shard_io_sizes(
sharding_type: str,
batch_sizes: List[int],
world_size: int,
local_world_size: int,
input_lengths: List[float],
emb_dim: int,
shard_sizes: List[List[int]],
input_data_type_size: int,
output_data_type_size: int,
num_poolings: List[float],
is_pooled: bool,
) -> Tuple[List[int], List[int]]:
if sharding_type == ShardingType.DATA_PARALLEL.value:
return _calculate_dp_shard_io_sizes(
batch_sizes=batch_sizes,
input_lengths=input_lengths,
emb_dim=emb_dim,
num_shards=len(shard_sizes),
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
num_poolings=num_poolings,
is_pooled=is_pooled,
)
elif sharding_type == ShardingType.TABLE_WISE.value:
return _calculate_tw_shard_io_sizes(
batch_sizes=batch_sizes,
world_size=world_size,
input_lengths=input_lengths,
emb_dim=emb_dim,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
num_poolings=num_poolings,
is_pooled=is_pooled,
)
elif sharding_type in {
ShardingType.COLUMN_WISE.value,
ShardingType.TABLE_COLUMN_WISE.value,
}:
return _calculate_cw_shard_io_sizes(
batch_sizes=batch_sizes,
world_size=world_size,
input_lengths=input_lengths,
shard_sizes=shard_sizes,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
num_poolings=num_poolings,
is_pooled=is_pooled,
)
elif sharding_type == ShardingType.ROW_WISE.value:
return _calculate_rw_shard_io_sizes(
batch_sizes=batch_sizes,
world_size=world_size,
input_lengths=input_lengths,
shard_sizes=shard_sizes,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
num_poolings=num_poolings,
is_pooled=is_pooled,
)
elif sharding_type == ShardingType.TABLE_ROW_WISE.value:
return _calculate_twrw_shard_io_sizes(
batch_sizes=batch_sizes,
world_size=world_size,
local_world_size=local_world_size,
input_lengths=input_lengths,
shard_sizes=shard_sizes,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
num_poolings=num_poolings,
is_pooled=is_pooled,
)
else:
raise ValueError(
f"Unrecognized or unsupported sharding type provided: {sharding_type}"
)
def _calculate_storage_specific_sizes(
storage: int,
shape: torch.Size,
shard_sizes: List[List[int]],
sharding_type: str,
optimizer_class: Optional[Type[torch.optim.Optimizer]] = None,
) -> List[int]:
tensor_sizes: List[int] = [
(
math.ceil(storage * prod(size) / prod(shape))
if sharding_type != ShardingType.DATA_PARALLEL.value
else storage
)
for size in shard_sizes
]
optimizer_multipler: float = _get_optimizer_multipler(optimizer_class, shape)
optimizer_sizes: List[int] = [
math.ceil(tensor_size * optimizer_multipler) for tensor_size in tensor_sizes
]
return [
tensor_size + optimizer_size
for tensor_size, optimizer_size in zip(tensor_sizes, optimizer_sizes)
]
class EmbeddingComputeKernel(Enum):
DENSE = "dense"
FUSED = "fused"
FUSED_UVM = "fused_uvm"
FUSED_UVM_CACHING = "fused_uvm_caching"
QUANT = "quant"
QUANT_UVM = "quant_uvm"
QUANT_UVM_CACHING = "quant_uvm_caching"
BIGINT_DTYPE: int = 8
class Storage:
"""
Representation of the storage capacities of a hardware used in training.
"""
hbm: int
ddr: int
def __add__(self, other: "Storage") -> "Storage":
return Storage(
hbm=self.hbm + other.hbm,
ddr=self.ddr + other.ddr,
)
def __sub__(self, other: "Storage") -> "Storage":
return Storage(
hbm=self.hbm - other.hbm,
ddr=self.ddr - other.ddr,
)
def __hash__(self) -> int:
return hash((self.hbm, self.ddr))
def fits_in(self, other: "Storage") -> bool:
return self.hbm <= other.hbm and self.ddr <= other.ddr
class ModuleSharder(abc.ABC, Generic[M]):
"""
`ModuleSharder` is per each module, which supports sharding,
e.g. `EmbeddingBagCollection`.
Args::
qcomm_codecs_registry (Optional[Dict[str, QuantizedCommCodecs]]) : Mapping of CommOp name to QuantizedCommCodecs
"""
def __init__(
self, qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None
) -> None:
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self._qcomm_codecs_registry = qcomm_codecs_registry
# pyre-ignore [3]
def shard(
self,
module: M,
params: EmbeddingModuleShardingPlan,
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> ShardedModule[Any, Any, Any, Any]:
"""
Does the actual sharding. It will allocate parameters on the requested locations
as specified by corresponding ParameterSharding.
Default implementation is data-parallel replication.
Args:
module (M): module to shard.
params (EmbeddingModuleShardingPlan): dict of fully qualified parameter names
(module path + parameter name, '.'-separated) to its sharding spec.
env (ShardingEnv): sharding environment that has the process group.
device (torch.device): compute device.
Returns:
ShardedModule[Any, Any, Any]: sharded module implementation.
"""
...
def module_type(self) -> Type[M]: ...
def qcomm_codecs_registry(self) -> Optional[Dict[str, QuantizedCommCodecs]]:
return self._qcomm_codecs_registry
def shardable_parameters(self, module: M) -> Dict[str, nn.Parameter]:
"""
List of parameters that can be sharded.
"""
return dict(module.named_parameters())
def sharding_types(self, compute_device_type: str) -> List[str]:
"""
List of supported sharding types. See `ShardingType` for well-known examples.
"""
return [ShardingType.DATA_PARALLEL.value]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
"""
List of supported compute kernels for a given sharding type and compute device.
"""
return [ComputeKernel.DEFAULT.value]
def storage_usage(
self, tensor: torch.Tensor, compute_device_type: str, compute_kernel: str
) -> Dict[str, int]:
"""
List of system resources and corresponding usage given a compute device and
compute kernel.
"""
assert compute_device_type in {"cuda", "cpu", "mtia"}
storage_map = {
"cuda": ParameterStorage.HBM,
"cpu": ParameterStorage.DDR,
# TODO: Update it later. Setting for MTIA is same as CPU's for now.
"mtia": ParameterStorage.DDR,
}
return {storage_map[compute_device_type].value: get_tensor_size_bytes(tensor)}
The provided code snippet includes necessary dependencies for implementing the `calculate_shard_storages` function. Write a Python function `def calculate_shard_storages( sharder: ModuleSharder[nn.Module], sharding_type: str, tensor: torch.Tensor, compute_device: str, compute_kernel: str, shard_sizes: List[List[int]], batch_sizes: List[int], world_size: int, local_world_size: int, input_lengths: List[float], num_poolings: List[float], caching_ratio: float, is_pooled: bool, ) -> List[Storage]` to solve the following problem:
Calculates estimated storage sizes for each sharded tensor, comprised of input, output, tensor, gradient, and optimizer sizes. Args: sharder (ModuleSharder[nn.Module]): sharder for module that supports sharding. sharding_type (str): provided ShardingType value. tensor (torch.Tensor): tensor to be sharded. compute_device (str): compute device to be used. compute_kernel (str): compute kernel to be used. shard_sizes (List[List[int]]): list of dimensions of each sharded tensor. batch_sizes (List[int]): batch size for each input feature. world_size (int): total number of devices in topology. local_world_size (int): total number of devices in host group topology. input_lengths (List[float]): average input lengths synonymous with pooling factors. num_poolings (List[float]): average number of poolings per sample (typically 1.0). caching_ratio (float): ratio of HBM to DDR memory for UVM caching. is_pooled (bool): True if embedding output is pooled (ie. `EmbeddingBag`), False if unpooled/sequential (ie. `Embedding`). Returns: List[Storage]: storage object for each device in topology.
Here is the function:
def calculate_shard_storages(
sharder: ModuleSharder[nn.Module],
sharding_type: str,
tensor: torch.Tensor,
compute_device: str,
compute_kernel: str,
shard_sizes: List[List[int]],
batch_sizes: List[int],
world_size: int,
local_world_size: int,
input_lengths: List[float],
num_poolings: List[float],
caching_ratio: float,
is_pooled: bool,
) -> List[Storage]:
"""
Calculates estimated storage sizes for each sharded tensor, comprised of input,
output, tensor, gradient, and optimizer sizes.
Args:
sharder (ModuleSharder[nn.Module]): sharder for module that supports sharding.
sharding_type (str): provided ShardingType value.
tensor (torch.Tensor): tensor to be sharded.
compute_device (str): compute device to be used.
compute_kernel (str): compute kernel to be used.
shard_sizes (List[List[int]]): list of dimensions of each sharded tensor.
batch_sizes (List[int]): batch size for each input feature.
world_size (int): total number of devices in topology.
local_world_size (int): total number of devices in host group topology.
input_lengths (List[float]): average input lengths synonymous with pooling
factors.
num_poolings (List[float]): average number of poolings per sample
(typically 1.0).
caching_ratio (float): ratio of HBM to DDR memory for UVM caching.
is_pooled (bool): True if embedding output is pooled (ie. `EmbeddingBag`), False
if unpooled/sequential (ie. `Embedding`).
Returns:
List[Storage]: storage object for each device in topology.
"""
input_data_type_size = BIGINT_DTYPE
output_data_type_size = tensor.element_size()
input_sizes, output_sizes = _calculate_shard_io_sizes(
sharding_type=sharding_type,
batch_sizes=batch_sizes,
world_size=world_size,
local_world_size=local_world_size,
input_lengths=input_lengths,
emb_dim=tensor.shape[1],
shard_sizes=shard_sizes,
input_data_type_size=input_data_type_size,
output_data_type_size=output_data_type_size,
num_poolings=num_poolings,
is_pooled=is_pooled,
)
tensor_storage = sharder.storage_usage(tensor, compute_device, compute_kernel)
hbm_storage: int = tensor_storage.get("hbm", 0)
ddr_storage: int = tensor_storage.get("ddr", 0)
if compute_kernel in {
EmbeddingComputeKernel.FUSED_UVM_CACHING.value,
EmbeddingComputeKernel.QUANT_UVM_CACHING.value,
}:
hbm_storage = round(ddr_storage * caching_ratio)
optimizer_class = getattr(tensor, "_optimizer_class", None)
hbm_specific_sizes: List[int] = _calculate_storage_specific_sizes(
storage=hbm_storage,
shape=tensor.shape,
shard_sizes=shard_sizes,
sharding_type=sharding_type,
optimizer_class=optimizer_class,
)
ddr_specific_sizes: List[int] = _calculate_storage_specific_sizes(
storage=ddr_storage,
shape=tensor.shape,
shard_sizes=shard_sizes,
sharding_type=sharding_type,
optimizer_class=optimizer_class,
)
hbm_sizes: List[int] = [
input_size + output_size + hbm_specific_size if compute_device == "cuda" else 0
for input_size, output_size, hbm_specific_size in zip(
input_sizes,
output_sizes,
hbm_specific_sizes,
)
]
ddr_sizes: List[int] = [
(
input_size + output_size + ddr_specific_size
if compute_device in {"cpu", "mtia"}
else ddr_specific_size
)
for input_size, output_size, ddr_specific_size in zip(
input_sizes,
output_sizes,
ddr_specific_sizes,
)
]
return [
Storage(
hbm=hbm_size,
ddr=ddr_size,
)
for hbm_size, ddr_size in zip(hbm_sizes, ddr_sizes)
] | Calculates estimated storage sizes for each sharded tensor, comprised of input, output, tensor, gradient, and optimizer sizes. Args: sharder (ModuleSharder[nn.Module]): sharder for module that supports sharding. sharding_type (str): provided ShardingType value. tensor (torch.Tensor): tensor to be sharded. compute_device (str): compute device to be used. compute_kernel (str): compute kernel to be used. shard_sizes (List[List[int]]): list of dimensions of each sharded tensor. batch_sizes (List[int]): batch size for each input feature. world_size (int): total number of devices in topology. local_world_size (int): total number of devices in host group topology. input_lengths (List[float]): average input lengths synonymous with pooling factors. num_poolings (List[float]): average number of poolings per sample (typically 1.0). caching_ratio (float): ratio of HBM to DDR memory for UVM caching. is_pooled (bool): True if embedding output is pooled (ie. `EmbeddingBag`), False if unpooled/sequential (ie. `Embedding`). Returns: List[Storage]: storage object for each device in topology. |
9,068 | import logging
from typing import Dict, List, Optional, Tuple, Union
from torch import nn
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.planner.constants import POOLING_FACTOR
from torchrec.distributed.planner.shard_estimators import (
EmbeddingPerfEstimator,
EmbeddingStorageEstimator,
)
from torchrec.distributed.planner.types import (
Enumerator,
ParameterConstraints,
PartitionByType,
Shard,
ShardEstimator,
ShardingOption,
Topology,
)
from torchrec.distributed.planner.utils import sharder_name
from torchrec.distributed.sharding_plan import calculate_shard_sizes_and_offsets
from torchrec.distributed.types import (
BoundsCheckMode,
CacheParams,
ModuleSharder,
ShardingType,
)
from torchrec.modules.embedding_tower import EmbeddingTower, EmbeddingTowerCollection
POOLING_FACTOR: float = 1.0
class ParameterConstraints:
class CacheParams:
def __hash__(self) -> int:
def _extract_constraints_for_param(
constraints: Optional[Dict[str, ParameterConstraints]], name: str
) -> Tuple[
List[float],
Optional[int],
Optional[CacheParams],
Optional[bool],
Optional[bool],
Optional[BoundsCheckMode],
Optional[List[str]],
]:
input_lengths = [POOLING_FACTOR]
col_wise_shard_dim = None
cache_params = None
enforce_hbm = None
stochastic_rounding = None
bounds_check_mode = None
feature_names = None
if constraints and constraints.get(name):
input_lengths = constraints[name].pooling_factors
col_wise_shard_dim = constraints[name].min_partition
cache_params = constraints[name].cache_params
enforce_hbm = constraints[name].enforce_hbm
stochastic_rounding = constraints[name].stochastic_rounding
bounds_check_mode = constraints[name].bounds_check_mode
feature_names = constraints[name].feature_names
return (
input_lengths,
col_wise_shard_dim,
cache_params,
enforce_hbm,
stochastic_rounding,
bounds_check_mode,
feature_names,
) | null |
9,069 | import logging
from typing import Dict, List, Optional, Tuple, Union
from torch import nn
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.planner.constants import POOLING_FACTOR
from torchrec.distributed.planner.shard_estimators import (
EmbeddingPerfEstimator,
EmbeddingStorageEstimator,
)
from torchrec.distributed.planner.types import (
Enumerator,
ParameterConstraints,
PartitionByType,
Shard,
ShardEstimator,
ShardingOption,
Topology,
)
from torchrec.distributed.planner.utils import sharder_name
from torchrec.distributed.sharding_plan import calculate_shard_sizes_and_offsets
from torchrec.distributed.types import (
BoundsCheckMode,
CacheParams,
ModuleSharder,
ShardingType,
)
from torchrec.modules.embedding_tower import EmbeddingTower, EmbeddingTowerCollection
class PartitionByType(Enum):
"""
Well-known partition types.
"""
# Partitioning based on device
DEVICE = "device"
# Partitioning based on host
HOST = "host"
# Uniform, (ie. fixed layout)
UNIFORM = "uniform"
class ShardingType(Enum):
"""
Well-known sharding types, used by inter-module optimizations.
"""
# Replicated on all ranks
DATA_PARALLEL = "data_parallel"
# Placed on a single rank
TABLE_WISE = "table_wise"
# Placed on multiple ranks as different sharded tables
COLUMN_WISE = "column_wise"
# Range-split on the first dimension across all ranks
ROW_WISE = "row_wise"
# Row-wise on the same node and table-wise across nodes
# Useful when having multiple ranks per node
# and comms within a single node are more efficient than across nodes.
TABLE_ROW_WISE = "table_row_wise"
# Column-wise on the same node and table-wise across nodes
TABLE_COLUMN_WISE = "table_column_wise"
The provided code snippet includes necessary dependencies for implementing the `get_partition_by_type` function. Write a Python function `def get_partition_by_type(sharding_type: str) -> str` to solve the following problem:
Gets corresponding partition by type for provided sharding type. Args: sharding_type (str): sharding type string. Returns: str: the corresponding `PartitionByType` value.
Here is the function:
def get_partition_by_type(sharding_type: str) -> str:
"""
Gets corresponding partition by type for provided sharding type.
Args:
sharding_type (str): sharding type string.
Returns:
str: the corresponding `PartitionByType` value.
"""
device_sharding_types = {
ShardingType.TABLE_WISE.value,
ShardingType.COLUMN_WISE.value,
}
host_sharding_types = {
ShardingType.TABLE_ROW_WISE.value,
ShardingType.TABLE_COLUMN_WISE.value,
}
uniform_sharding_types = {
ShardingType.ROW_WISE.value,
ShardingType.DATA_PARALLEL.value,
}
if sharding_type in device_sharding_types:
return PartitionByType.DEVICE.value
elif sharding_type in host_sharding_types:
return PartitionByType.HOST.value
elif sharding_type in uniform_sharding_types:
return PartitionByType.UNIFORM.value
raise ValueError(
f"Unrecognized or unsupported sharding type provided: {sharding_type}"
) | Gets corresponding partition by type for provided sharding type. Args: sharding_type (str): sharding type string. Returns: str: the corresponding `PartitionByType` value. |
9,070 | import logging
from typing import Dict, List, Optional, Tuple, Union
from torch import nn
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
from torchrec.distributed.planner.constants import POOLING_FACTOR
from torchrec.distributed.planner.shard_estimators import (
EmbeddingPerfEstimator,
EmbeddingStorageEstimator,
)
from torchrec.distributed.planner.types import (
Enumerator,
ParameterConstraints,
PartitionByType,
Shard,
ShardEstimator,
ShardingOption,
Topology,
)
from torchrec.distributed.planner.utils import sharder_name
from torchrec.distributed.sharding_plan import calculate_shard_sizes_and_offsets
from torchrec.distributed.types import (
BoundsCheckMode,
CacheParams,
ModuleSharder,
ShardingType,
)
from torchrec.modules.embedding_tower import EmbeddingTower, EmbeddingTowerCollection
class EmbeddingTowerCollection(nn.Module):
def __init__(
self,
towers: List[EmbeddingTower],
device: Optional[torch.device] = None,
) -> None:
def forward(
self,
features: Optional[KeyedJaggedTensor] = None,
weighted_features: Optional[KeyedJaggedTensor] = None,
) -> torch.Tensor:
def _get_tower_index(name: str, child_module: EmbeddingTowerCollection) -> int:
for i, tower in enumerate(child_module.towers):
for n, m in tower.named_modules():
if isinstance(m, nn.Embedding) or isinstance(m, nn.EmbeddingBag):
table_name = n.split(".")[-1]
if name == table_name:
return i
raise RuntimeError(
f"couldn't get the tower index for table {name}, tower collection: {child_module}"
) | null |
9,071 | from typing import Optional
from torchrec.distributed.embedding_types import EmbeddingComputeKernel
UVM_CACHING_RATIO: float = 0.2
class EmbeddingComputeKernel(Enum):
DENSE = "dense"
FUSED = "fused"
FUSED_UVM = "fused_uvm"
FUSED_UVM_CACHING = "fused_uvm_caching"
QUANT = "quant"
QUANT_UVM = "quant_uvm"
QUANT_UVM_CACHING = "quant_uvm_caching"
The provided code snippet includes necessary dependencies for implementing the `kernel_bw_lookup` function. Write a Python function `def kernel_bw_lookup( compute_device: str, compute_kernel: str, hbm_mem_bw: float, ddr_mem_bw: float, caching_ratio: Optional[float] = None, prefetch_pipeline: bool = False, ) -> Optional[float]` to solve the following problem:
Calculates the device bandwidth based on given compute device, compute kernel, and caching ratio. Args: compute_kernel (str): compute kernel. compute_device (str): compute device. hbm_mem_bw (float): the bandwidth of the device HBM. ddr_mem_bw (float): the bandwidth of the system DDR memory. caching_ratio (Optional[float]): caching ratio used to determine device bandwidth if UVM caching is enabled. prefetch_pipeline (bool): whether prefetch pipeline is enabled. Returns: Optional[float]: the device bandwidth.
Here is the function:
def kernel_bw_lookup(
compute_device: str,
compute_kernel: str,
hbm_mem_bw: float,
ddr_mem_bw: float,
caching_ratio: Optional[float] = None,
prefetch_pipeline: bool = False,
) -> Optional[float]:
"""
Calculates the device bandwidth based on given compute device, compute kernel, and
caching ratio.
Args:
compute_kernel (str): compute kernel.
compute_device (str): compute device.
hbm_mem_bw (float): the bandwidth of the device HBM.
ddr_mem_bw (float): the bandwidth of the system DDR memory.
caching_ratio (Optional[float]): caching ratio used to determine device bandwidth
if UVM caching is enabled.
prefetch_pipeline (bool): whether prefetch pipeline is enabled.
Returns:
Optional[float]: the device bandwidth.
"""
caching_ratio = caching_ratio if caching_ratio else UVM_CACHING_RATIO
lookup = {
# CPU
("cpu", EmbeddingComputeKernel.DENSE.value): 0.5 * ddr_mem_bw,
("cpu", EmbeddingComputeKernel.FUSED.value): 1 * ddr_mem_bw,
("cpu", EmbeddingComputeKernel.QUANT.value): 1 * ddr_mem_bw,
# TODO: Determine the correct value later. MTIA uses values same as CPU's.
# MTIA
("mtia", EmbeddingComputeKernel.DENSE.value): 0.5 * ddr_mem_bw,
("mtia", EmbeddingComputeKernel.FUSED.value): 1 * ddr_mem_bw,
("mtia", EmbeddingComputeKernel.QUANT.value): 1 * ddr_mem_bw,
# CUDA
("cuda", EmbeddingComputeKernel.DENSE.value): 0.5 * hbm_mem_bw,
("cuda", EmbeddingComputeKernel.FUSED.value): 1 * hbm_mem_bw,
("cuda", EmbeddingComputeKernel.FUSED_UVM.value): ddr_mem_bw / 10,
("cuda", EmbeddingComputeKernel.FUSED_UVM_CACHING.value): (
caching_ratio * hbm_mem_bw + (1 - caching_ratio) * ddr_mem_bw
)
/ 10,
("cuda", EmbeddingComputeKernel.QUANT.value): 1 * hbm_mem_bw,
("cuda", EmbeddingComputeKernel.QUANT_UVM.value): ddr_mem_bw / 10,
("cuda", EmbeddingComputeKernel.QUANT_UVM_CACHING.value): (
caching_ratio * hbm_mem_bw + (1 - caching_ratio) * ddr_mem_bw
)
/ 10,
}
if (
prefetch_pipeline
and compute_device == "cuda"
and compute_kernel == EmbeddingComputeKernel.FUSED_UVM_CACHING.value
):
return lookup.get(("cuda", EmbeddingComputeKernel.FUSED.value))
return lookup.get((compute_device, compute_kernel)) | Calculates the device bandwidth based on given compute device, compute kernel, and caching ratio. Args: compute_kernel (str): compute kernel. compute_device (str): compute device. hbm_mem_bw (float): the bandwidth of the device HBM. ddr_mem_bw (float): the bandwidth of the system DDR memory. caching_ratio (Optional[float]): caching ratio used to determine device bandwidth if UVM caching is enabled. prefetch_pipeline (bool): whether prefetch pipeline is enabled. Returns: Optional[float]: the device bandwidth. |
9,072 | import copy
import heapq
import logging
from dataclasses import dataclass
from enum import Enum
from typing import cast, Dict, List, Optional
from torchrec.distributed.planner.perf_models import NoopPerfModel
from torchrec.distributed.planner.types import (
DeviceHardware,
PartitionByType,
Partitioner,
Perf,
PerfModel,
PlannerError,
PlannerErrorType,
ShardingOption,
Storage,
Topology,
)
from torchrec.distributed.planner.utils import bytes_to_gb, reset_shard_rank
from torchrec.distributed.types import ShardingType
class DeviceHardware:
"""
Representation of a device in a process group. 'perf' is an estimation of network,
CPU, and storage usages.
"""
rank: int
storage: Storage
perf: Perf
def _sort_devices_by_perf(
devices: List[List[DeviceHardware]],
) -> List[List[DeviceHardware]]:
def _get_perf_sum(device_list: List[DeviceHardware]) -> float:
perf = 0
for device in device_list:
perf += device.perf.total
return perf
return sorted(devices, key=_get_perf_sum) | null |
9,073 | import copy
import heapq
import logging
from dataclasses import dataclass
from enum import Enum
from typing import cast, Dict, List, Optional
from torchrec.distributed.planner.perf_models import NoopPerfModel
from torchrec.distributed.planner.types import (
DeviceHardware,
PartitionByType,
Partitioner,
Perf,
PerfModel,
PlannerError,
PlannerErrorType,
ShardingOption,
Storage,
Topology,
)
from torchrec.distributed.planner.utils import bytes_to_gb, reset_shard_rank
from torchrec.distributed.types import ShardingType
class ShardingOption:
"""
One way of sharding an embedding table.
"""
def __init__(
self,
name: str,
tensor: torch.Tensor,
module: Tuple[str, nn.Module],
input_lengths: List[float],
batch_size: int,
sharding_type: str,
partition_by: str,
compute_kernel: str,
shards: List[Shard],
cache_params: Optional[CacheParams] = None,
enforce_hbm: Optional[bool] = None,
stochastic_rounding: Optional[bool] = None,
bounds_check_mode: Optional[BoundsCheckMode] = None,
dependency: Optional[str] = None,
is_pooled: Optional[bool] = None,
feature_names: Optional[List[str]] = None,
) -> None:
self.name = name
self._tensor = tensor
self._module = module
self.input_lengths = input_lengths
self.batch_size = batch_size
self.sharding_type = sharding_type
self.partition_by = partition_by
self.compute_kernel = compute_kernel
# relevant to planner output, must be populated if sharding option
# part of final solution
self.shards = shards
self.cache_params = cache_params
self.enforce_hbm = enforce_hbm
self.stochastic_rounding = stochastic_rounding
self.bounds_check_mode = bounds_check_mode
self.dependency = dependency
self._is_pooled = is_pooled
self.is_weighted: Optional[bool] = None
self.feature_names: Optional[List[str]] = feature_names
def tensor(self) -> torch.Tensor:
return self._tensor
def module(self) -> Tuple[str, nn.Module]:
return self._module
def fqn(self) -> str:
return self.module[0] + "." + self.name
def cache_load_factor(self) -> Optional[float]:
if self.cache_params is not None:
return self.cache_params.load_factor
return None
def path(self) -> str:
return self.module[0]
def num_shards(self) -> int:
return len(self.shards)
def num_inputs(self) -> int:
return len(self.input_lengths)
def total_storage(self) -> Storage:
storage: Storage = Storage(hbm=0, ddr=0)
for shard in self.shards:
storage += cast(Storage, shard.storage)
return storage
def total_perf(self) -> float:
perf: float = 0
for shard in self.shards:
# pyre-ignore: Undefined attribute [16]
perf += shard.perf.total
return perf
def is_pooled(self) -> bool:
if self._is_pooled is None:
self._is_pooled = ShardingOption.module_pooled(self.module[1], self.name)
return self._is_pooled
def module_pooled(module: nn.Module, sharding_option_name: str) -> bool:
"""Determine if module pools output (e.g. EmbeddingBag) or uses unpooled/sequential output."""
if isinstance(module, EmbeddingCollectionInterface) or isinstance(
module, ManagedCollisionEmbeddingCollection
):
return False
for submodule in module.modules():
if isinstance(submodule, EmbeddingCollectionInterface) or isinstance(
submodule, ManagedCollisionEmbeddingCollection
):
for name, _ in submodule.named_parameters():
if sharding_option_name in name:
return False
return True
def __hash__(self) -> int:
return hash(
(
self.fqn,
self.sharding_type,
self.compute_kernel,
tuple(self.shards),
self.cache_params,
)
)
def __deepcopy__(
self, memo: Optional[Dict[int, "ShardingOption"]]
) -> "ShardingOption":
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if k in ["_tensor", "_module"]:
setattr(result, k, v)
else:
setattr(result, k, deepcopy(v, memo))
return result
class PartitionByType(Enum):
"""
Well-known partition types.
"""
# Partitioning based on device
DEVICE = "device"
# Partitioning based on host
HOST = "host"
# Uniform, (ie. fixed layout)
UNIFORM = "uniform"
def _get_uniform_sharding_options(
sharding_options: List[ShardingOption],
) -> List[ShardingOption]:
uniform_sharding_options: List[ShardingOption] = []
for sharding_option in sharding_options:
if sharding_option.partition_by == PartitionByType.UNIFORM.value:
uniform_sharding_options.append(sharding_option)
return uniform_sharding_options | null |
9,074 | import copy
import heapq
import logging
from dataclasses import dataclass
from enum import Enum
from typing import cast, Dict, List, Optional
from torchrec.distributed.planner.perf_models import NoopPerfModel
from torchrec.distributed.planner.types import (
DeviceHardware,
PartitionByType,
Partitioner,
Perf,
PerfModel,
PlannerError,
PlannerErrorType,
ShardingOption,
Storage,
Topology,
)
from torchrec.distributed.planner.utils import bytes_to_gb, reset_shard_rank
from torchrec.distributed.types import ShardingType
logger: logging.Logger = logging.getLogger(__name__)
class ShardingOptionGroup:
class SortBy(Enum):
class ShardingOption:
def __init__(
self,
name: str,
tensor: torch.Tensor,
module: Tuple[str, nn.Module],
input_lengths: List[float],
batch_size: int,
sharding_type: str,
partition_by: str,
compute_kernel: str,
shards: List[Shard],
cache_params: Optional[CacheParams] = None,
enforce_hbm: Optional[bool] = None,
stochastic_rounding: Optional[bool] = None,
bounds_check_mode: Optional[BoundsCheckMode] = None,
dependency: Optional[str] = None,
is_pooled: Optional[bool] = None,
feature_names: Optional[List[str]] = None,
) -> None:
def tensor(self) -> torch.Tensor:
def module(self) -> Tuple[str, nn.Module]:
def fqn(self) -> str:
def cache_load_factor(self) -> Optional[float]:
def path(self) -> str:
def num_shards(self) -> int:
def num_inputs(self) -> int:
def total_storage(self) -> Storage:
def total_perf(self) -> float:
def is_pooled(self) -> bool:
def module_pooled(module: nn.Module, sharding_option_name: str) -> bool:
def __hash__(self) -> int:
def __deepcopy__(
self, memo: Optional[Dict[int, "ShardingOption"]]
) -> "ShardingOption":
class PartitionByType(Enum):
def _group_and_sort_non_uniform_sharding_options(
sharding_options: List[ShardingOption],
sort_by: SortBy = SortBy.STORAGE,
balance_modules: bool = False,
) -> List[ShardingOptionGroup]:
# count modules by name
param_count: Dict[str, int] = {}
for sharding_option in sharding_options:
path = sharding_option.path
if path not in param_count:
param_count[path] = 0
param_count[path] += 1
logger.debug(f"param_count is {param_count}")
sharding_option_groups_by_dependency = {}
for sharding_option in sharding_options:
if sharding_option.partition_by == PartitionByType.UNIFORM.value:
continue
group_key = sharding_option.dependency or sharding_option.fqn
if group_key not in sharding_option_groups_by_dependency:
sharding_option_groups_by_dependency[group_key] = ShardingOptionGroup(
[sharding_option],
sharding_option.total_storage,
sharding_option.total_perf,
# negative value to indicate that smaller modules should be sorted first
param_count=-param_count[sharding_option.path],
)
else:
sharding_option_groups_by_dependency[group_key].sharding_options.append(
sharding_option
)
sharding_option_groups_by_dependency[
group_key
].storage_sum += sharding_option.total_storage
sharding_option_groups_by_dependency[
group_key
].perf_sum += sharding_option.total_perf
sharding_option_groups = list(sharding_option_groups_by_dependency.values())
sort_by_attributes: List[str] = []
if balance_modules:
sort_by_attributes.append("param_count")
if sort_by == SortBy.STORAGE:
sort_by_attributes.append("storage_sum")
elif sort_by == SortBy.PERF:
sort_by_attributes.append("perf_sum")
else:
raise RuntimeError(f"Unexpected sort_by: {sort_by}")
sharding_option_groups.sort(
key=lambda group: [getattr(group, attr) for attr in sort_by_attributes],
reverse=True,
)
return sharding_option_groups | null |
9,075 | import copy
import heapq
import logging
from dataclasses import dataclass
from enum import Enum
from typing import cast, Dict, List, Optional
from torchrec.distributed.planner.perf_models import NoopPerfModel
from torchrec.distributed.planner.types import (
DeviceHardware,
PartitionByType,
Partitioner,
Perf,
PerfModel,
PlannerError,
PlannerErrorType,
ShardingOption,
Storage,
Topology,
)
from torchrec.distributed.planner.utils import bytes_to_gb, reset_shard_rank
from torchrec.distributed.types import ShardingType
class Topology:
def __init__(
self,
world_size: int,
compute_device: str,
hbm_cap: Optional[int] = None,
ddr_cap: Optional[int] = None,
local_world_size: Optional[int] = None,
hbm_mem_bw: float = HBM_MEM_BW,
ddr_mem_bw: float = DDR_MEM_BW,
intra_host_bw: float = INTRA_NODE_BANDWIDTH,
inter_host_bw: float = CROSS_NODE_BANDWIDTH,
bwd_compute_multiplier: float = BWD_COMPUTE_MULTIPLIER,
) -> None:
"""
Representation of a network of devices in a cluster.
"""
# validate input
assert compute_device in [
"cpu",
"cuda",
"mtia",
], f"unsupported compute device {compute_device}"
self._compute_device = compute_device
self._world_size = world_size
hbm_per_device = 0
if self._compute_device == "cuda":
hbm_per_device = hbm_cap if hbm_cap else HBM_CAP
ddr_cap = ddr_cap if ddr_cap else DDR_CAP
self._devices: List[DeviceHardware] = []
for rank in range(world_size):
self._devices.append(
DeviceHardware(
rank=rank,
storage=Storage(hbm=hbm_per_device, ddr=ddr_cap),
perf=Perf(fwd_compute=0, fwd_comms=0, bwd_compute=0, bwd_comms=0),
)
)
self._local_world_size: int = (
local_world_size if local_world_size else world_size
)
self._hbm_mem_bw = hbm_mem_bw
self._ddr_mem_bw = ddr_mem_bw
self._intra_host_bw = intra_host_bw
self._inter_host_bw = inter_host_bw
self._bwd_compute_multiplier = bwd_compute_multiplier
def compute_device(self) -> str:
return self._compute_device
def devices(self) -> List[DeviceHardware]:
return self._devices
def world_size(self) -> int:
return self._world_size
def local_world_size(self) -> int:
return self._local_world_size
def hbm_mem_bw(self) -> float:
return self._hbm_mem_bw
def ddr_mem_bw(self) -> float:
return self._ddr_mem_bw
def intra_host_bw(self) -> float:
return self._intra_host_bw
def inter_host_bw(self) -> float:
return self._inter_host_bw
def bwd_compute_multiplier(self) -> float:
return self._bwd_compute_multiplier
def __repr__(self) -> str:
topology_repr: str = f"world_size={self._world_size} \n"
topology_repr += f"compute_device={self._compute_device}\n"
topology_repr += "devices=\n"
for idx, device in enumerate(self._devices):
topology_repr += f"\tdevice {idx} {device}\n"
topology_repr += f"local_world_size={self._local_world_size} \n"
topology_repr += f"intra_host_bw={self._intra_host_bw} \n"
topology_repr += f"inter_host_bw={self._inter_host_bw} \n"
return topology_repr
def set_hbm_per_device(storage_constraint: Topology, hbm_per_device: int) -> None:
for device in storage_constraint.devices:
device.storage.hbm = hbm_per_device | null |
9,076 | import abc
from dataclasses import dataclass
from enum import Enum, unique
from typing import Any, Dict, Generic, Iterator, List, Optional, TypeVar
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops_training import EmbeddingLocation
from torch import fx, nn
from torch.nn.modules.module import _addindent
from torchrec.distributed.types import (
get_tensor_size_bytes,
ModuleSharder,
ParameterStorage,
QuantizedCommCodecs,
ShardedModule,
ShardedTensorMetadata,
ShardingType,
ShardMetadata,
)
from torchrec.modules.embedding_configs import (
DataType,
EmbeddingTableConfig,
PoolingType,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable
class EmbeddingComputeKernel(Enum):
DENSE = "dense"
FUSED = "fused"
FUSED_UVM = "fused_uvm"
FUSED_UVM_CACHING = "fused_uvm_caching"
QUANT = "quant"
QUANT_UVM = "quant_uvm"
QUANT_UVM_CACHING = "quant_uvm_caching"
def compute_kernel_to_embedding_location(
compute_kernel: EmbeddingComputeKernel,
) -> EmbeddingLocation:
if compute_kernel in [
EmbeddingComputeKernel.DENSE,
EmbeddingComputeKernel.FUSED,
EmbeddingComputeKernel.QUANT,
]:
return EmbeddingLocation.DEVICE
elif compute_kernel in [
EmbeddingComputeKernel.FUSED_UVM,
EmbeddingComputeKernel.QUANT_UVM,
]:
return EmbeddingLocation.MANAGED
elif compute_kernel in [
EmbeddingComputeKernel.FUSED_UVM_CACHING,
EmbeddingComputeKernel.QUANT_UVM_CACHING,
]:
return EmbeddingLocation.MANAGED_CACHING
else:
raise ValueError(f"Invalid EmbeddingComputeKernel {compute_kernel}") | null |
9,077 | import abc
import logging
from collections import defaultdict, OrderedDict
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import torch.distributed as dist
from torch import nn
from torchrec.distributed.embedding_types import (
EmbeddingComputeKernel,
GroupedEmbeddingConfig,
ShardedEmbeddingTable,
)
from torchrec.distributed.types import Shard, ShardedTensor, ShardedTensorMetadata
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
class EmbeddingComputeKernel(Enum):
DENSE = "dense"
FUSED = "fused"
FUSED_UVM = "fused_uvm"
FUSED_UVM_CACHING = "fused_uvm_caching"
QUANT = "quant"
QUANT_UVM = "quant_uvm"
QUANT_UVM_CACHING = "quant_uvm_caching"
class ShardedEmbeddingTable(
ShardedMetaConfig,
EmbeddingAttributes,
EmbeddingTableConfig,
):
fused_params: Optional[Dict[str, Any]] = None
The provided code snippet includes necessary dependencies for implementing the `get_state_dict` function. Write a Python function `def get_state_dict( embedding_tables: List[ShardedEmbeddingTable], params: Union[ nn.ModuleList, List[Union[nn.Module, torch.Tensor]], List[torch.Tensor], List[Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]], ], pg: Optional[dist.ProcessGroup] = None, destination: Optional[Dict[str, Any]] = None, prefix: str = "", ) -> Dict[str, Any]` to solve the following problem:
It is possible for there to be multiple shards from a table on a single rank. We accumulate them in key_to_local_shards. Repeat shards should have identical global ShardedTensorMetadata.
Here is the function:
def get_state_dict(
embedding_tables: List[ShardedEmbeddingTable],
params: Union[
nn.ModuleList,
List[Union[nn.Module, torch.Tensor]],
List[torch.Tensor],
List[Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]],
],
pg: Optional[dist.ProcessGroup] = None,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
) -> Dict[str, Any]:
if destination is None:
destination = OrderedDict()
# pyre-ignore [16]
destination._metadata = OrderedDict()
"""
It is possible for there to be multiple shards from a table on a single rank.
We accumulate them in key_to_local_shards. Repeat shards should have identical
global ShardedTensorMetadata.
"""
key_to_local_shards: Dict[str, List[Shard]] = defaultdict(list)
key_to_global_metadata: Dict[str, ShardedTensorMetadata] = {}
def get_key_from_embedding_table(embedding_table: ShardedEmbeddingTable) -> str:
return prefix + f"{embedding_table.name}.weight"
for embedding_table, param in zip(embedding_tables, params):
key = get_key_from_embedding_table(embedding_table)
is_quant = embedding_table.compute_kernel in [
EmbeddingComputeKernel.QUANT,
EmbeddingComputeKernel.QUANT_UVM,
EmbeddingComputeKernel.QUANT_UVM_CACHING,
]
qscale = None
qbias = None
if is_quant:
# For QUANT* param is Tuple[torch.Tensor, Optional[torch.Tensor]] where first argument is the weight table, the second is optional quantization extra information, depending on quantization type. e.g. for fbgemm rowwise quantization this is scale and shift for each row.
assert isinstance(param, tuple)
qscale = param[1]
qbias = param[2]
param = param[0]
assert embedding_table.local_rows == param.size(0) # pyre-ignore[16]
if qscale is not None:
assert embedding_table.local_cols == param.size(1) # pyre-ignore[16]
if embedding_table.global_metadata is not None and pg is not None:
# set additional field of sharded tensor based on local tensor properties
embedding_table.global_metadata.tensor_properties.dtype = (
param.dtype # pyre-ignore[16]
)
embedding_table.global_metadata.tensor_properties.requires_grad = (
param.requires_grad # pyre-ignore[16]
)
key_to_global_metadata[key] = embedding_table.global_metadata
key_to_local_shards[key].append(
# pyre-fixme[6]: For 1st argument expected `Tensor` but got
# `Union[Module, Tensor]`.
# pyre-fixme[6]: For 2nd argument expected `ShardMetadata` but got
# `Optional[ShardMetadata]`.
Shard(param, embedding_table.local_metadata)
)
else:
destination[key] = param
if qscale is not None:
destination[f"{key}_qscale"] = qscale
if qbias is not None:
destination[f"{key}_qbias"] = qbias
if pg is not None:
# Populate the remaining destinations that have a global metadata
for key in key_to_local_shards:
global_metadata = key_to_global_metadata[key]
destination[key] = (
ShardedTensor._init_from_local_shards_and_global_metadata(
local_shards=key_to_local_shards[key],
sharded_tensor_metadata=global_metadata,
process_group=pg,
)
)
return destination | It is possible for there to be multiple shards from a table on a single rank. We accumulate them in key_to_local_shards. Repeat shards should have identical global ShardedTensorMetadata. |
9,078 | import copy
import logging
from typing import Any, Dict, Iterator, List, Optional, Tuple
import torch
import torch.distributed as dist
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
EmbeddingLocation,
IntNBitTableBatchedEmbeddingBagsCodegen,
PoolingMode,
rounded_row_size_in_bytes,
)
from torchrec.distributed.batched_embedding_kernel import (
BaseBatchedEmbedding,
BaseBatchedEmbeddingBag,
BatchedDenseEmbedding,
BatchedDenseEmbeddingBag,
)
from torchrec.distributed.embedding_kernel import BaseEmbedding
from torchrec.distributed.embedding_types import (
compute_kernel_to_embedding_location,
GroupedEmbeddingConfig,
)
from torchrec.distributed.fused_params import (
is_fused_param_quant_state_dict_split_scale_bias,
is_fused_param_register_tbe,
tbe_fused_params,
TBEToRegisterMixIn,
)
from torchrec.distributed.utils import append_prefix
from torchrec.modules.embedding_configs import (
DATA_TYPE_NUM_BITS,
data_type_to_sparse_type,
DataType,
dtype_to_data_type,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
class GroupedEmbeddingConfig:
data_type: DataType
pooling: PoolingType
is_weighted: bool
has_feature_processor: bool
compute_kernel: EmbeddingComputeKernel
embedding_tables: List[ShardedEmbeddingTable]
fused_params: Optional[Dict[str, Any]] = None
def feature_hash_sizes(self) -> List[int]:
feature_hash_sizes = []
for table in self.embedding_tables:
feature_hash_sizes.extend(table.num_features() * [table.num_embeddings])
return feature_hash_sizes
def num_features(self) -> int:
num_features = 0
for table in self.embedding_tables:
num_features += table.num_features()
return num_features
def dim_sum(self) -> int:
dim_sum = 0
for table in self.embedding_tables:
dim_sum += table.num_features() * table.local_cols
return dim_sum
def table_names(self) -> List[str]:
table_names = []
for table in self.embedding_tables:
table_names.append(table.name)
return table_names
def feature_names(self) -> List[str]:
feature_names = []
for table in self.embedding_tables:
feature_names.extend(table.feature_names)
return feature_names
def embedding_dims(self) -> List[int]:
embedding_dims = []
for table in self.embedding_tables:
embedding_dims.extend([table.local_cols] * table.num_features())
return embedding_dims
def embedding_names(self) -> List[str]:
embedding_names = []
for table in self.embedding_tables:
embedding_names.extend(table.embedding_names)
return embedding_names
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
embedding_shard_metadata: List[Optional[ShardMetadata]] = []
for table in self.embedding_tables:
for _ in table.feature_names:
embedding_shard_metadata.append(table.local_metadata)
return embedding_shard_metadata
def _copy_config(
original: GroupedEmbeddingConfig,
data_type: DataType,
sparse_type: SparseType,
device: torch.device,
) -> GroupedEmbeddingConfig:
# Adjust config to quantized version.
# This obviously doesn't work for column-wise sharding.
config = copy.deepcopy(original)
config.data_type = data_type
for table in config.embedding_tables:
row_alignment = 16 if device.type == "cuda" else 1
table.local_cols = rounded_row_size_in_bytes(
table.local_cols, sparse_type, row_alignment
)
if table.local_metadata is not None:
table.local_metadata.shard_sizes = [
table.local_rows,
table.local_cols,
]
global_metadata = table.global_metadata
if global_metadata is not None:
for shard_meta in global_metadata.shards_metadata:
if shard_meta != table.local_metadata:
shard_meta.shard_sizes = [
shard_meta.shard_sizes[0],
rounded_row_size_in_bytes(
shard_meta.shard_sizes[1], sparse_type, row_alignment
),
]
global_metadata.size = torch.Size(
[
global_metadata.size[0],
sum(
shard_meta.shard_sizes[1]
for shard_meta in global_metadata.shards_metadata
),
]
)
return config | null |
9,079 | import copy
import logging
from typing import Any, Dict, Iterator, List, Optional, Tuple
import torch
import torch.distributed as dist
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
EmbeddingLocation,
IntNBitTableBatchedEmbeddingBagsCodegen,
PoolingMode,
rounded_row_size_in_bytes,
)
from torchrec.distributed.batched_embedding_kernel import (
BaseBatchedEmbedding,
BaseBatchedEmbeddingBag,
BatchedDenseEmbedding,
BatchedDenseEmbeddingBag,
)
from torchrec.distributed.embedding_kernel import BaseEmbedding
from torchrec.distributed.embedding_types import (
compute_kernel_to_embedding_location,
GroupedEmbeddingConfig,
)
from torchrec.distributed.fused_params import (
is_fused_param_quant_state_dict_split_scale_bias,
is_fused_param_register_tbe,
tbe_fused_params,
TBEToRegisterMixIn,
)
from torchrec.distributed.utils import append_prefix
from torchrec.modules.embedding_configs import (
DATA_TYPE_NUM_BITS,
data_type_to_sparse_type,
DataType,
dtype_to_data_type,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
DATA_TYPE_NUM_BITS: Dict[DataType, int] = {
DataType.FP32: 32,
DataType.FP16: 16,
DataType.BF16: 16,
DataType.INT8: 8,
DataType.UINT8: 8,
DataType.INT4: 4,
DataType.INT2: 2,
}
def _quantize_weight(
state_dict: Dict[str, torch.Tensor],
data_type: DataType,
) -> List[Tuple[torch.Tensor, Optional[torch.Tensor]]]:
quant_weight_list = []
for weight in state_dict.values():
if weight.dtype == torch.float or weight.dtype == torch.float16:
quantized_weights = (
torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf(
weight, DATA_TYPE_NUM_BITS[data_type]
)
)
else:
raise Exception("Unsupported dtype: {weight.dtype}")
# weight and 4 byte scale shift (2xfp16)
quant_weight = quantized_weights[:, :-4]
scale_shift = quantized_weights[:, -4:]
quant_weight_list.append((quant_weight, scale_shift))
return quant_weight_list | null |
9,080 | import copy
import logging
from typing import Any, Dict, Iterator, List, Optional, Tuple
import torch
import torch.distributed as dist
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
EmbeddingLocation,
IntNBitTableBatchedEmbeddingBagsCodegen,
PoolingMode,
rounded_row_size_in_bytes,
)
from torchrec.distributed.batched_embedding_kernel import (
BaseBatchedEmbedding,
BaseBatchedEmbeddingBag,
BatchedDenseEmbedding,
BatchedDenseEmbeddingBag,
)
from torchrec.distributed.embedding_kernel import BaseEmbedding
from torchrec.distributed.embedding_types import (
compute_kernel_to_embedding_location,
GroupedEmbeddingConfig,
)
from torchrec.distributed.fused_params import (
is_fused_param_quant_state_dict_split_scale_bias,
is_fused_param_register_tbe,
tbe_fused_params,
TBEToRegisterMixIn,
)
from torchrec.distributed.utils import append_prefix
from torchrec.modules.embedding_configs import (
DATA_TYPE_NUM_BITS,
data_type_to_sparse_type,
DataType,
dtype_to_data_type,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
# pyre-ignore
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
def sync(self) -> "KeyedJaggedTensor":
def unsync(self) -> "KeyedJaggedTensor":
def device(self) -> torch.device:
def lengths(self) -> torch.Tensor:
def lengths_or_none(self) -> Optional[torch.Tensor]:
def offsets(self) -> torch.Tensor:
def offsets_or_none(self) -> Optional[torch.Tensor]:
def keys(self) -> List[str]:
def values(self) -> torch.Tensor:
def weights(self) -> torch.Tensor:
def weights_or_none(self) -> Optional[torch.Tensor]:
def stride(self) -> int:
def stride_per_key(self) -> List[int]:
def stride_per_key_per_rank(self) -> List[List[int]]:
def variable_stride_per_key(self) -> bool:
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
def _key_indices(self) -> Dict[str, int]:
def length_per_key(self) -> List[int]:
def length_per_key_or_none(self) -> Optional[List[int]]:
def offset_per_key(self) -> List[int]:
def offset_per_key_or_none(self) -> Optional[List[int]]:
def lengths_offset_per_key(self) -> List[int]:
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
def flatten_lengths(self) -> "KeyedJaggedTensor":
def __getitem__(self, key: str) -> JaggedTensor:
def to_dict(self) -> Dict[str, JaggedTensor]:
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
def __str__(self) -> str:
def pin_memory(self) -> "KeyedJaggedTensor":
def dist_labels(self) -> List[str]:
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
def dist_tensors(self) -> List[torch.Tensor]:
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
def _unwrap_kjt(
features: KeyedJaggedTensor,
) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
if features.device().type == "cuda":
return (
features.values().int(),
features.offsets().int(),
features.weights_or_none(),
)
else:
return features.values(), features.offsets(), features.weights_or_none() | null |
9,081 | import argparse
import copy
import logging
import os
import time
from functools import partial
from typing import List, Optional, Tuple
import torch
from torchrec.distributed.benchmark.benchmark_utils import (
benchmark_module,
BenchmarkResult,
CompileMode,
DLRM_NUM_EMBEDDINGS_PER_FEATURE,
EMBEDDING_DIM,
get_tables,
init_argparse_and_args,
write_report,
)
from torchrec.distributed.embedding_types import EmbeddingComputeKernel, ShardingType
from torchrec.distributed.test_utils.test_model import TestEBCSharder
from torchrec.distributed.types import DataType
from torchrec.modules.embedding_modules import EmbeddingBagCollection
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
BENCH_SHARDING_TYPES: List[ShardingType] = [
ShardingType.TABLE_WISE,
ShardingType.ROW_WISE,
ShardingType.COLUMN_WISE,
]
BENCH_COMPILE_MODES: List[CompileMode] = [
CompileMode.EAGER,
# CompileMode.FX_SCRIPT,
]
def training_func_to_benchmark(
model: torch.nn.Module,
bench_inputs: List[KeyedJaggedTensor],
optimizer: Optional[torch.optim.Optimizer],
) -> None:
for bench_input in bench_inputs:
pooled_embeddings = model(bench_input)
vals = []
for _name, param in pooled_embeddings.to_dict().items():
vals.append(param)
torch.cat(vals, dim=1).sum().backward()
if optimizer:
optimizer.step()
optimizer.zero_grad()
class BenchmarkResult:
"Class for holding results of benchmark runs"
short_name: str
elapsed_time: torch.Tensor
max_mem_allocated: List[int]
rank: int = -1
def get_tables(
table_sizes: List[Tuple[int, int]],
is_pooled: bool = True,
data_type: DataType = DataType.INT8,
) -> Union[List[EmbeddingBagConfig], List[EmbeddingConfig]]:
if is_pooled:
tables: List[EmbeddingBagConfig] = [
EmbeddingBagConfig(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
data_type=data_type,
)
for i, (num_embeddings, embedding_dim) in enumerate(table_sizes)
]
else:
tables: List[EmbeddingConfig] = [
EmbeddingConfig(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
data_type=data_type,
)
for i, (num_embeddings, embedding_dim) in enumerate(table_sizes)
]
return tables
def benchmark_module(
module: torch.nn.Module,
sharder: ModuleSharder[T],
sharding_types: List[ShardingType],
compile_modes: List[CompileMode],
tables: Union[List[EmbeddingBagConfig], List[EmbeddingConfig]],
warmup_iters: int = 20,
bench_iters: int = 500,
prof_iters: int = 20,
batch_size: int = 2048,
world_size: int = 2,
num_benchmarks: int = 5,
output_dir: str = "",
func_to_benchmark: Callable[..., None] = default_func_to_benchmark,
benchmark_func_kwargs: Optional[Dict[str, Any]] = None,
) -> List[BenchmarkResult]:
"""
Args:
eager_module: Eager mode module to be benchmarked
sharding_types: Sharding types to be benchmarked
compile_modes: Compilation modes to be benchmarked
warmup_iters: Number of iterations to run before profiling
bench_iters: Number of iterations to run during profiling
prof_iters: Number of iterations to run after profiling
batch_size: Batch size used in the model
world_size: World size used in the
num_benchmarks: How many times to run over benchmark inputs for statistics
output_dir: Directory to output profiler outputs (traces, stacks)
Returns:
A list of BenchmarkResults
"""
# logging.info(f"###### Benchmarking Module: {eager_module} ######\n")
logging.info(f"Warmup iterations: {warmup_iters}")
logging.info(f"Benchmark iterations: {bench_iters}")
logging.info(f"Profile iterations: {prof_iters}")
logging.info(f"Batch Size: {batch_size}")
logging.info(f"World Size: {world_size}")
logging.info(f"Number of Benchmarks: {num_benchmarks}")
logging.info(f"Output Directory: {output_dir}")
assert (
num_benchmarks > 2
), "num_benchmarks needs to be greater than 2 for statistical analysis"
if isinstance(module, QuantEmbeddingBagCollection) or isinstance(
module, QuantEmbeddingCollection
):
train = False
else:
train = True
benchmark_results: List[BenchmarkResult] = []
if isinstance(tables[0], EmbeddingBagConfig):
wrapped_module = EBCWrapper(module)
else:
wrapped_module = ECWrapper(module)
for sharding_type in sharding_types:
for compile_mode in compile_modes:
# Test sharders should have a singular sharding_type
# pyre-ignore [16]
sharder._sharding_type = sharding_type.value
benchmark_type = benchmark_type_name(compile_mode, sharding_type)
logging.info(
f"\n\n###### Running Benchmark Type: {benchmark_type} ######\n"
)
if train:
res = multi_process_benchmark(
# pyre-ignore[6]
callable=init_module_and_run_benchmark,
module=wrapped_module,
sharder=sharder,
# TODO: GPU hardcode for now, expand if needed for heter hardware
device=torch.device("cuda:0"),
sharding_type=sharding_type,
compile_mode=compile_mode,
world_size=world_size,
batch_size=batch_size,
warmup_iters=warmup_iters,
bench_iters=bench_iters,
prof_iters=prof_iters,
tables=tables,
num_benchmarks=num_benchmarks,
output_dir=output_dir,
func_to_benchmark=func_to_benchmark,
benchmark_func_kwargs=benchmark_func_kwargs,
)
else:
res = init_module_and_run_benchmark(
module=wrapped_module,
sharder=sharder,
# TODO: GPU hardcode for now, expand if needed for heter hardware
device=torch.device("cuda:0"),
sharding_type=sharding_type,
compile_mode=compile_mode,
world_size=world_size,
batch_size=batch_size,
warmup_iters=warmup_iters,
bench_iters=bench_iters,
prof_iters=prof_iters,
tables=tables,
num_benchmarks=num_benchmarks,
output_dir=output_dir,
func_to_benchmark=func_to_benchmark,
benchmark_func_kwargs=benchmark_func_kwargs,
)
gc.collect()
benchmark_results.append(res)
return benchmark_results
class EmbeddingComputeKernel(Enum):
DENSE = "dense"
FUSED = "fused"
FUSED_UVM = "fused_uvm"
FUSED_UVM_CACHING = "fused_uvm_caching"
QUANT = "quant"
QUANT_UVM = "quant_uvm"
QUANT_UVM_CACHING = "quant_uvm_caching"
class TestEBCSharder(EmbeddingBagCollectionSharder):
def __init__(
self,
sharding_type: str,
kernel_type: str,
fused_params: Optional[Dict[str, Any]] = None,
qcomm_codecs_registry: Optional[Dict[str, QuantizedCommCodecs]] = None,
) -> None:
if fused_params is None:
fused_params = {}
self._sharding_type = sharding_type
self._kernel_type = kernel_type
super().__init__(fused_params, qcomm_codecs_registry)
"""
Restricts sharding to single type only.
"""
def sharding_types(self, compute_device_type: str) -> List[str]:
return [self._sharding_type]
"""
Restricts to single impl.
"""
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
return [self._kernel_type]
class EmbeddingBagCollection(EmbeddingBagCollectionInterface):
"""
EmbeddingBagCollection represents a collection of pooled embeddings (`EmbeddingBags`).
It processes sparse data in the form of `KeyedJaggedTensor` with values of the form
[F X B X L] where:
* F: features (keys)
* B: batch size
* L: length of sparse features (jagged)
and outputs a `KeyedTensor` with values of the form [B * (F * D)] where:
* F: features (keys)
* D: each feature's (key's) embedding dimension
* B: batch size
Args:
tables (List[EmbeddingBagConfig]): list of embedding tables.
is_weighted (bool): whether input `KeyedJaggedTensor` is weighted.
device (Optional[torch.device]): default compute device.
Example::
table_0 = EmbeddingBagConfig(
name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"]
)
table_1 = EmbeddingBagConfig(
name="t2", embedding_dim=4, num_embeddings=10, feature_names=["f2"]
)
ebc = EmbeddingBagCollection(tables=[table_0, table_1])
# 0 1 2 <-- batch
# "f1" [0,1] None [2]
# "f2" [3] [4] [5,6,7]
# ^
# feature
features = KeyedJaggedTensor(
keys=["f1", "f2"],
values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]),
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
)
pooled_embeddings = ebc(features)
print(pooled_embeddings.values())
tensor([[-0.8899, -0.1342, -1.9060, -0.0905, -0.2814, -0.9369, -0.7783],
[ 0.0000, 0.0000, 0.0000, 0.1598, 0.0695, 1.3265, -0.1011],
[-0.4256, -1.1846, -2.1648, -1.0893, 0.3590, -1.9784, -0.7681]],
grad_fn=<CatBackward0>)
print(pooled_embeddings.keys())
['f1', 'f2']
print(pooled_embeddings.offset_per_key())
tensor([0, 3, 7])
"""
def __init__(
self,
tables: List[EmbeddingBagConfig],
is_weighted: bool = False,
device: Optional[torch.device] = None,
) -> None:
super().__init__()
torch._C._log_api_usage_once(f"torchrec.modules.{self.__class__.__name__}")
self._is_weighted = is_weighted
self.embedding_bags: nn.ModuleDict = nn.ModuleDict()
self._embedding_bag_configs = tables
self._lengths_per_embedding: List[int] = []
self._dtypes: List[int] = []
table_names = set()
for embedding_config in tables:
if embedding_config.name in table_names:
raise ValueError(f"Duplicate table name {embedding_config.name}")
table_names.add(embedding_config.name)
dtype = (
torch.float32
if embedding_config.data_type == DataType.FP32
else torch.float16
)
self.embedding_bags[embedding_config.name] = nn.EmbeddingBag(
num_embeddings=embedding_config.num_embeddings,
embedding_dim=embedding_config.embedding_dim,
mode=pooling_type_to_str(embedding_config.pooling),
device=device,
include_last_offset=True,
dtype=dtype,
)
if device is None:
device = self.embedding_bags[embedding_config.name].weight.device
self._dtypes.append(embedding_config.data_type.value)
if not embedding_config.feature_names:
embedding_config.feature_names = [embedding_config.name]
self._lengths_per_embedding.extend(
len(embedding_config.feature_names) * [embedding_config.embedding_dim]
)
self._device: torch.device = device or torch.device("cpu")
self._embedding_names: List[str] = [
embedding
for embeddings in get_embedding_names_by_table(tables)
for embedding in embeddings
]
self._feature_names: List[List[str]] = [table.feature_names for table in tables]
self.reset_parameters()
def forward(self, features: KeyedJaggedTensor) -> KeyedTensor:
"""
Args:
features (KeyedJaggedTensor): KJT of form [F X B X L].
Returns:
KeyedTensor
"""
flat_feature_names: List[str] = []
for names in self._feature_names:
flat_feature_names.extend(names)
inverse_indices = reorder_inverse_indices(
inverse_indices=features.inverse_indices_or_none(),
feature_names=flat_feature_names,
)
pooled_embeddings: List[torch.Tensor] = []
feature_dict = features.to_dict()
for i, embedding_bag in enumerate(self.embedding_bags.values()):
for feature_name in self._feature_names[i]:
f = feature_dict[feature_name]
per_sample_weights: Optional[torch.Tensor] = None
if self._is_weighted:
per_sample_weights = (
f.weights().half()
if self._dtypes[i] == DataType.FP16.value
else f.weights()
)
res = embedding_bag(
input=f.values(),
offsets=f.offsets(),
per_sample_weights=(
per_sample_weights if self._is_weighted else None
),
).float()
pooled_embeddings.append(res)
return KeyedTensor(
keys=self._embedding_names,
values=process_pooled_embeddings(
pooled_embeddings=pooled_embeddings,
inverse_indices=inverse_indices,
),
length_per_key=self._lengths_per_embedding,
)
def is_weighted(self) -> bool:
return self._is_weighted
def embedding_bag_configs(self) -> List[EmbeddingBagConfig]:
return self._embedding_bag_configs
def device(self) -> torch.device:
return self._device
def reset_parameters(self) -> None:
if (isinstance(self.device, torch.device) and self.device.type == "meta") or (
isinstance(self.device, str) and self.device == "meta"
):
return
# Initialize embedding bags weights with init_fn
for table_config in self._embedding_bag_configs:
assert table_config.init_fn is not None
param = self.embedding_bags[f"{table_config.name}"].weight
# pyre-ignore
table_config.init_fn(param)
def benchmark_ebc(
tables: List[Tuple[int, int]], args: argparse.Namespace, output_dir: str
) -> List[BenchmarkResult]:
table_configs = get_tables(tables, data_type=DataType.FP32)
sharder = TestEBCSharder(
sharding_type="", # sharding_type gets populated during benchmarking
kernel_type=EmbeddingComputeKernel.DENSE.value,
)
module = EmbeddingBagCollection(
# pyre-ignore [6]
tables=table_configs,
is_weighted=False,
device=torch.device("cpu"),
)
optimizer = torch.optim.SGD(module.parameters(), lr=0.02)
args_kwargs = {
argname: getattr(args, argname)
for argname in dir(args)
# Don't include output_dir since output_dir was modified
if not argname.startswith("_") and argname != "output_dir"
}
return benchmark_module(
module=module,
sharder=sharder,
sharding_types=BENCH_SHARDING_TYPES,
compile_modes=BENCH_COMPILE_MODES,
tables=table_configs,
output_dir=output_dir,
func_to_benchmark=training_func_to_benchmark,
benchmark_func_kwargs={"optimizer": optimizer},
**args_kwargs,
) | null |
9,082 | import argparse
import copy
import logging
import os
import time
from functools import partial
from typing import List, Optional, Tuple
import torch
from torchrec.distributed.benchmark.benchmark_utils import (
benchmark_module,
BenchmarkResult,
CompileMode,
DLRM_NUM_EMBEDDINGS_PER_FEATURE,
EMBEDDING_DIM,
get_tables,
init_argparse_and_args,
write_report,
)
from torchrec.distributed.embedding_types import EmbeddingComputeKernel, ShardingType
from torchrec.distributed.test_utils.test_model import TestEBCSharder
from torchrec.distributed.types import DataType
from torchrec.modules.embedding_modules import EmbeddingBagCollection
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
def main() -> None:
args: argparse.Namespace = init_argparse_and_args()
num_requests = args.bench_iters * args.batch_size * args.num_benchmarks
datetime_sfx: str = time.strftime("%Y%m%dT%H%M%S")
output_dir = args.output_dir
if not os.path.exists(output_dir):
# Create output directory if not exist
os.mkdir(output_dir)
benchmark_results_per_module = []
write_report_funcs_per_module = []
shrunk_table_sizes = []
for i in range(len(TABLE_SIZES)):
if TABLE_SIZES[i][0] > 1000000:
shrunk_table_sizes.append((1000000, TABLE_SIZES[i][1]))
else:
shrunk_table_sizes.append(TABLE_SIZES[i])
for module_name in ["EmbeddingBagCollection"]:
output_dir = args.output_dir + f"/run_{datetime_sfx}"
output_dir += "_ebc"
benchmark_func = benchmark_ebc
if not os.path.exists(output_dir):
# Place all outputs under the datetime folder
os.mkdir(output_dir)
tables_info = "\nTABLE SIZES:"
for i, (num, dim) in enumerate(shrunk_table_sizes):
# FP32 is 4 bytes
mb = int(float(num * dim) / 1024 / 1024) * 4
tables_info += f"\nTABLE[{i}][{num:9}, {dim:4}] {mb:6}Mb"
report: str = (
f"REPORT BENCHMARK {datetime_sfx} world_size:{args.world_size} batch_size:{args.batch_size}\n"
)
report += f"Module: {module_name}\n"
report += tables_info
report += "\n"
report += f"num_requests:{num_requests:8}\n"
report_file: str = f"{output_dir}/run.report"
# Save results to output them once benchmarking is all done
benchmark_results_per_module.append(
benchmark_func(shrunk_table_sizes, args, output_dir)
)
write_report_funcs_per_module.append(
partial(
write_report,
report_file=report_file,
report_str=report,
num_requests=num_requests,
)
)
for i, write_report_func in enumerate(write_report_funcs_per_module):
write_report_func(benchmark_results_per_module[i])
def invoke_main() -> None:
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
main() | null |
9,083 | import argparse
import contextlib
import copy
import gc
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import (
Any,
Callable,
ContextManager,
Dict,
List,
Optional,
Tuple,
TypeVar,
Union,
)
import torch
from torch import multiprocessing as mp
from torch.autograd.profiler import record_function
from torchrec.distributed import DistributedModelParallel
from torchrec.distributed.embedding_types import ShardingType
from torchrec.distributed.planner import EmbeddingShardingPlanner, Topology
from torchrec.distributed.planner.enumerators import EmbeddingEnumerator
from torchrec.distributed.planner.shard_estimators import (
EmbeddingPerfEstimator,
EmbeddingStorageEstimator,
)
from torchrec.distributed.shard import _shard_modules
from torchrec.distributed.test_utils.multi_process import MultiProcessContext
from torchrec.distributed.test_utils.test_model import ModelInput
from torchrec.distributed.types import DataType, ModuleSharder, ShardingEnv
from torchrec.fx import symbolic_trace
from torchrec.modules.embedding_configs import EmbeddingBagConfig, EmbeddingConfig
from torchrec.quant.embedding_modules import (
EmbeddingBagCollection as QuantEmbeddingBagCollection,
EmbeddingCollection as QuantEmbeddingCollection,
)
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
from torchrec.test_utils import get_free_port
logger: logging.Logger = logging.getLogger()
class BenchmarkResult:
"Class for holding results of benchmark runs"
short_name: str
elapsed_time: torch.Tensor
max_mem_allocated: List[int]
rank: int = -1
def write_report(
benchmark_results: List[BenchmarkResult],
report_file: str,
report_str: str,
num_requests: int,
) -> None:
for benchmark_res in benchmark_results:
avg_dur_s = benchmark_res.elapsed_time.mean().item() * 1e-3 # time in seconds
std_dur_s = benchmark_res.elapsed_time.std().item() * 1e-3 # time in seconds
qps = int(num_requests / avg_dur_s)
mem_allocated_by_rank = benchmark_res.max_mem_allocated
mem_str = ""
for i, mem_mb in enumerate(mem_allocated_by_rank):
mem_str += f"Rank {i}: {mem_mb:7}mb "
report_str += f"{benchmark_res.short_name:40} Avg QPS:{qps:10} Avg Duration: {int(1000*avg_dur_s):5}"
report_str += f"ms Standard Dev Duration: {(1000*std_dur_s):.2f}ms\n"
report_str += f"\tMemory Allocated Per Rank:\n\t{mem_str}\n"
with open(report_file, "w") as f:
f.write(report_str)
logger.info(f"Report written to {report_file}:\n{report_str}") | null |
9,084 | import argparse
import contextlib
import copy
import gc
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import (
Any,
Callable,
ContextManager,
Dict,
List,
Optional,
Tuple,
TypeVar,
Union,
)
import torch
from torch import multiprocessing as mp
from torch.autograd.profiler import record_function
from torchrec.distributed import DistributedModelParallel
from torchrec.distributed.embedding_types import ShardingType
from torchrec.distributed.planner import EmbeddingShardingPlanner, Topology
from torchrec.distributed.planner.enumerators import EmbeddingEnumerator
from torchrec.distributed.planner.shard_estimators import (
EmbeddingPerfEstimator,
EmbeddingStorageEstimator,
)
from torchrec.distributed.shard import _shard_modules
from torchrec.distributed.test_utils.multi_process import MultiProcessContext
from torchrec.distributed.test_utils.test_model import ModelInput
from torchrec.distributed.types import DataType, ModuleSharder, ShardingEnv
from torchrec.fx import symbolic_trace
from torchrec.modules.embedding_configs import EmbeddingBagConfig, EmbeddingConfig
from torchrec.quant.embedding_modules import (
EmbeddingBagCollection as QuantEmbeddingBagCollection,
EmbeddingCollection as QuantEmbeddingCollection,
)
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
from torchrec.test_utils import get_free_port
def init_argparse_and_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--warmup_iters", type=int, default=20)
parser.add_argument("--bench_iters", type=int, default=500)
parser.add_argument("--prof_iters", type=int, default=20)
parser.add_argument("--batch_size", type=int, default=2048)
parser.add_argument("--world_size", type=int, default=2)
parser.add_argument("--output_dir", type=str, default="/var/tmp/torchrec-bench")
parser.add_argument("--num_benchmarks", type=int, default=5)
args = parser.parse_args()
return args | null |
9,085 | import argparse
import logging
import os
import time
from functools import partial
from typing import List, Tuple
import torch
from torchrec.distributed.benchmark.benchmark_utils import (
benchmark_module,
BenchmarkResult,
CompileMode,
DLRM_NUM_EMBEDDINGS_PER_FEATURE,
EMBEDDING_DIM,
get_tables,
init_argparse_and_args,
write_report,
)
from torchrec.distributed.embedding_types import EmbeddingComputeKernel, ShardingType
from torchrec.distributed.test_utils.infer_utils import (
TestQuantEBCSharder,
TestQuantECSharder,
)
from torchrec.quant.embedding_modules import (
EmbeddingBagCollection as QuantEmbeddingBagCollection,
EmbeddingCollection as QuantEmbeddingCollection,
)
BENCH_SHARDING_TYPES: List[ShardingType] = [
ShardingType.TABLE_WISE,
ShardingType.ROW_WISE,
ShardingType.COLUMN_WISE,
]
BENCH_COMPILE_MODES: List[CompileMode] = [
CompileMode.EAGER,
CompileMode.FX_SCRIPT,
]
TABLE_SIZES: List[Tuple[int, int]] = [
(num_embeddings, EMBEDDING_DIM)
for num_embeddings in DLRM_NUM_EMBEDDINGS_PER_FEATURE
]
class BenchmarkResult:
def get_tables(
table_sizes: List[Tuple[int, int]],
is_pooled: bool = True,
data_type: DataType = DataType.INT8,
) -> Union[List[EmbeddingBagConfig], List[EmbeddingConfig]]:
def benchmark_module(
module: torch.nn.Module,
sharder: ModuleSharder[T],
sharding_types: List[ShardingType],
compile_modes: List[CompileMode],
tables: Union[List[EmbeddingBagConfig], List[EmbeddingConfig]],
warmup_iters: int = 20,
bench_iters: int = 500,
prof_iters: int = 20,
batch_size: int = 2048,
world_size: int = 2,
num_benchmarks: int = 5,
output_dir: str = "",
func_to_benchmark: Callable[..., None] = default_func_to_benchmark,
benchmark_func_kwargs: Optional[Dict[str, Any]] = None,
) -> List[BenchmarkResult]:
class EmbeddingComputeKernel(Enum):
class TestQuantECSharder(QuantEmbeddingCollectionSharder):
def __init__(
self,
sharding_type: str,
kernel_type: str,
fused_params: Optional[Dict[str, Any]] = None,
shardable_params: Optional[List[str]] = None,
) -> None:
def sharding_types(self, compute_device_type: str) -> List[str]:
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
def shard(
self,
module: QuantEmbeddingCollection,
params: Dict[str, ParameterSharding],
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> ShardedQuantEmbeddingCollection:
# pyre-ignore
def benchmark_qec(args: argparse.Namespace, output_dir: str) -> List[BenchmarkResult]:
tables = get_tables(TABLE_SIZES, is_pooled=False)
sharder = TestQuantECSharder(
sharding_type="",
kernel_type=EmbeddingComputeKernel.QUANT.value,
shardable_params=[table.name for table in tables],
)
module = QuantEmbeddingCollection(
# pyre-ignore [6]
tables=tables,
device=torch.device("cpu"),
quant_state_dict_split_scale_bias=True,
)
args_kwargs = {
argname: getattr(args, argname)
for argname in dir(args)
# Don't include output_dir since output_dir was modified
if not argname.startswith("_") and argname != "output_dir"
}
return benchmark_module(
module=module,
sharder=sharder,
sharding_types=BENCH_SHARDING_TYPES,
compile_modes=BENCH_COMPILE_MODES,
tables=tables,
output_dir=output_dir,
**args_kwargs,
) | null |
9,086 | import argparse
import logging
import os
import time
from functools import partial
from typing import List, Tuple
import torch
from torchrec.distributed.benchmark.benchmark_utils import (
benchmark_module,
BenchmarkResult,
CompileMode,
DLRM_NUM_EMBEDDINGS_PER_FEATURE,
EMBEDDING_DIM,
get_tables,
init_argparse_and_args,
write_report,
)
from torchrec.distributed.embedding_types import EmbeddingComputeKernel, ShardingType
from torchrec.distributed.test_utils.infer_utils import (
TestQuantEBCSharder,
TestQuantECSharder,
)
from torchrec.quant.embedding_modules import (
EmbeddingBagCollection as QuantEmbeddingBagCollection,
EmbeddingCollection as QuantEmbeddingCollection,
)
BENCH_SHARDING_TYPES: List[ShardingType] = [
ShardingType.TABLE_WISE,
ShardingType.ROW_WISE,
ShardingType.COLUMN_WISE,
]
BENCH_COMPILE_MODES: List[CompileMode] = [
CompileMode.EAGER,
CompileMode.FX_SCRIPT,
]
TABLE_SIZES: List[Tuple[int, int]] = [
(num_embeddings, EMBEDDING_DIM)
for num_embeddings in DLRM_NUM_EMBEDDINGS_PER_FEATURE
]
class BenchmarkResult:
"Class for holding results of benchmark runs"
short_name: str
elapsed_time: torch.Tensor
max_mem_allocated: List[int]
rank: int = -1
def get_tables(
table_sizes: List[Tuple[int, int]],
is_pooled: bool = True,
data_type: DataType = DataType.INT8,
) -> Union[List[EmbeddingBagConfig], List[EmbeddingConfig]]:
if is_pooled:
tables: List[EmbeddingBagConfig] = [
EmbeddingBagConfig(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
data_type=data_type,
)
for i, (num_embeddings, embedding_dim) in enumerate(table_sizes)
]
else:
tables: List[EmbeddingConfig] = [
EmbeddingConfig(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim,
name="table_" + str(i),
feature_names=["feature_" + str(i)],
data_type=data_type,
)
for i, (num_embeddings, embedding_dim) in enumerate(table_sizes)
]
return tables
def benchmark_module(
module: torch.nn.Module,
sharder: ModuleSharder[T],
sharding_types: List[ShardingType],
compile_modes: List[CompileMode],
tables: Union[List[EmbeddingBagConfig], List[EmbeddingConfig]],
warmup_iters: int = 20,
bench_iters: int = 500,
prof_iters: int = 20,
batch_size: int = 2048,
world_size: int = 2,
num_benchmarks: int = 5,
output_dir: str = "",
func_to_benchmark: Callable[..., None] = default_func_to_benchmark,
benchmark_func_kwargs: Optional[Dict[str, Any]] = None,
) -> List[BenchmarkResult]:
"""
Args:
eager_module: Eager mode module to be benchmarked
sharding_types: Sharding types to be benchmarked
compile_modes: Compilation modes to be benchmarked
warmup_iters: Number of iterations to run before profiling
bench_iters: Number of iterations to run during profiling
prof_iters: Number of iterations to run after profiling
batch_size: Batch size used in the model
world_size: World size used in the
num_benchmarks: How many times to run over benchmark inputs for statistics
output_dir: Directory to output profiler outputs (traces, stacks)
Returns:
A list of BenchmarkResults
"""
# logging.info(f"###### Benchmarking Module: {eager_module} ######\n")
logging.info(f"Warmup iterations: {warmup_iters}")
logging.info(f"Benchmark iterations: {bench_iters}")
logging.info(f"Profile iterations: {prof_iters}")
logging.info(f"Batch Size: {batch_size}")
logging.info(f"World Size: {world_size}")
logging.info(f"Number of Benchmarks: {num_benchmarks}")
logging.info(f"Output Directory: {output_dir}")
assert (
num_benchmarks > 2
), "num_benchmarks needs to be greater than 2 for statistical analysis"
if isinstance(module, QuantEmbeddingBagCollection) or isinstance(
module, QuantEmbeddingCollection
):
train = False
else:
train = True
benchmark_results: List[BenchmarkResult] = []
if isinstance(tables[0], EmbeddingBagConfig):
wrapped_module = EBCWrapper(module)
else:
wrapped_module = ECWrapper(module)
for sharding_type in sharding_types:
for compile_mode in compile_modes:
# Test sharders should have a singular sharding_type
# pyre-ignore [16]
sharder._sharding_type = sharding_type.value
benchmark_type = benchmark_type_name(compile_mode, sharding_type)
logging.info(
f"\n\n###### Running Benchmark Type: {benchmark_type} ######\n"
)
if train:
res = multi_process_benchmark(
# pyre-ignore[6]
callable=init_module_and_run_benchmark,
module=wrapped_module,
sharder=sharder,
# TODO: GPU hardcode for now, expand if needed for heter hardware
device=torch.device("cuda:0"),
sharding_type=sharding_type,
compile_mode=compile_mode,
world_size=world_size,
batch_size=batch_size,
warmup_iters=warmup_iters,
bench_iters=bench_iters,
prof_iters=prof_iters,
tables=tables,
num_benchmarks=num_benchmarks,
output_dir=output_dir,
func_to_benchmark=func_to_benchmark,
benchmark_func_kwargs=benchmark_func_kwargs,
)
else:
res = init_module_and_run_benchmark(
module=wrapped_module,
sharder=sharder,
# TODO: GPU hardcode for now, expand if needed for heter hardware
device=torch.device("cuda:0"),
sharding_type=sharding_type,
compile_mode=compile_mode,
world_size=world_size,
batch_size=batch_size,
warmup_iters=warmup_iters,
bench_iters=bench_iters,
prof_iters=prof_iters,
tables=tables,
num_benchmarks=num_benchmarks,
output_dir=output_dir,
func_to_benchmark=func_to_benchmark,
benchmark_func_kwargs=benchmark_func_kwargs,
)
gc.collect()
benchmark_results.append(res)
return benchmark_results
class EmbeddingComputeKernel(Enum):
DENSE = "dense"
FUSED = "fused"
FUSED_UVM = "fused_uvm"
FUSED_UVM_CACHING = "fused_uvm_caching"
QUANT = "quant"
QUANT_UVM = "quant_uvm"
QUANT_UVM_CACHING = "quant_uvm_caching"
class TestQuantEBCSharder(QuantEmbeddingBagCollectionSharder):
def __init__(
self,
sharding_type: str,
kernel_type: str,
fused_params: Optional[Dict[str, Any]] = None,
shardable_params: Optional[List[str]] = None,
) -> None:
super().__init__(fused_params=fused_params, shardable_params=shardable_params)
self._sharding_type = sharding_type
self._kernel_type = kernel_type
def sharding_types(self, compute_device_type: str) -> List[str]:
return [self._sharding_type]
def compute_kernels(
self, sharding_type: str, compute_device_type: str
) -> List[str]:
return [self._kernel_type]
def shard(
self,
module: QuantEmbeddingBagCollection,
params: Dict[str, ParameterSharding],
env: ShardingEnv,
device: Optional[torch.device] = None,
) -> ShardedQuantEmbeddingBagCollection:
fused_params = self.fused_params if self.fused_params else {}
fused_params["output_dtype"] = data_type_to_sparse_type(
dtype_to_data_type(module.output_dtype())
)
fused_params[FUSED_PARAM_REGISTER_TBE_BOOL] = getattr(
module, MODULE_ATTR_REGISTER_TBES_BOOL, False
)
fused_params[FUSED_PARAM_QUANT_STATE_DICT_SPLIT_SCALE_BIAS] = getattr(
module, MODULE_ATTR_QUANT_STATE_DICT_SPLIT_SCALE_BIAS, False
)
return ShardedQuantEmbeddingBagCollection(
module=module,
table_name_to_parameter_sharding=params,
env=env,
fused_params=fused_params,
device=device,
)
# pyre-ignore
def benchmark_qebc(args: argparse.Namespace, output_dir: str) -> List[BenchmarkResult]:
tables = get_tables(TABLE_SIZES)
sharder = TestQuantEBCSharder(
sharding_type="",
kernel_type=EmbeddingComputeKernel.QUANT.value,
shardable_params=[table.name for table in tables],
)
module = QuantEmbeddingBagCollection(
# pyre-ignore [6]
tables=tables,
is_weighted=False,
device=torch.device("cpu"),
quant_state_dict_split_scale_bias=True,
)
args_kwargs = {
argname: getattr(args, argname)
for argname in dir(args)
# Don't include output_dir since output_dir was modified
if not argname.startswith("_") and argname != "output_dir"
}
return benchmark_module(
module=module,
sharder=sharder,
sharding_types=BENCH_SHARDING_TYPES,
compile_modes=BENCH_COMPILE_MODES,
tables=tables,
output_dir=output_dir,
**args_kwargs,
) | null |
9,087 | import argparse
import logging
import os
import time
from functools import partial
from typing import List, Tuple
import torch
from torchrec.distributed.benchmark.benchmark_utils import (
benchmark_module,
BenchmarkResult,
CompileMode,
DLRM_NUM_EMBEDDINGS_PER_FEATURE,
EMBEDDING_DIM,
get_tables,
init_argparse_and_args,
write_report,
)
from torchrec.distributed.embedding_types import EmbeddingComputeKernel, ShardingType
from torchrec.distributed.test_utils.infer_utils import (
TestQuantEBCSharder,
TestQuantECSharder,
)
from torchrec.quant.embedding_modules import (
EmbeddingBagCollection as QuantEmbeddingBagCollection,
EmbeddingCollection as QuantEmbeddingCollection,
)
def main() -> None:
args: argparse.Namespace = init_argparse_and_args()
num_requests = args.bench_iters * args.batch_size * args.num_benchmarks
datetime_sfx: str = time.strftime("%Y%m%dT%H%M%S")
output_dir = args.output_dir
if not os.path.exists(output_dir):
# Create output directory if not exist
os.mkdir(output_dir)
benchmark_results_per_module = []
write_report_funcs_per_module = []
for module_name in ["QuantEmbeddingBagCollection", "QuantEmbeddingCollection"]:
output_dir = args.output_dir + f"/run_{datetime_sfx}"
if module_name == "QuantEmbeddingBagCollection":
output_dir += "_qebc"
benchmark_func = benchmark_qebc
else:
output_dir += "_qec"
benchmark_func = benchmark_qec
if not os.path.exists(output_dir):
# Place all outputs under the datetime folder
os.mkdir(output_dir)
tables_info = "\nTABLE SIZES QUANT:"
for i, (num, dim) in enumerate(TABLE_SIZES):
mb = int(float(num * dim) / 1024 / 1024)
tables_info += f"\nTABLE[{i}][{num:9}, {dim:4}] u8: {mb:6}Mb"
report: str = (
f"REPORT BENCHMARK {datetime_sfx} world_size:{args.world_size} batch_size:{args.batch_size}\n"
)
report += f"Module: {module_name}\n"
report += tables_info
report += "\n"
report += f"num_requests:{num_requests:8}\n"
report_file: str = f"{output_dir}/run.report"
# Save results to output them once benchmarking is all done
benchmark_results_per_module.append(benchmark_func(args, output_dir))
write_report_funcs_per_module.append(
partial(
write_report,
report_file=report_file,
report_str=report,
num_requests=num_requests,
)
)
for i, write_report_func in enumerate(write_report_funcs_per_module):
write_report_func(benchmark_results_per_module[i])
def invoke_main() -> None:
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
main() | null |
9,088 | from typing import Any, Callable, Dict, List, Optional, Set, Tuple, TypeVar
import torch
import torch.distributed as dist
from fbgemm_gpu.permute_pooled_embedding_modules_split import (
PermutePooledEmbeddingsSplit,
)
from torchrec.distributed.dist_data import EmbeddingsAllToOne
from torchrec.distributed.embedding_lookup import (
GroupedPooledEmbeddingsLookup,
InferGroupedPooledEmbeddingsLookup,
)
from torchrec.distributed.embedding_sharding import (
BaseEmbeddingDist,
BaseEmbeddingLookup,
BaseSparseFeaturesDist,
EmbeddingShardingContext,
EmbeddingShardingInfo,
)
from torchrec.distributed.embedding_types import (
BaseGroupedFeatureProcessor,
EmbeddingComputeKernel,
KJTList,
ShardedEmbeddingTable,
)
from torchrec.distributed.sharding.tw_sharding import (
BaseTwEmbeddingSharding,
InferTwSparseFeaturesDist,
TwPooledEmbeddingDist,
TwSparseFeaturesDist,
)
from torchrec.distributed.types import (
NullShardingContext,
QuantizedCommCodecs,
ShardedTensorMetadata,
ShardingEnv,
ShardMetadata,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable
def _fx_wrap_permute(
permute_module: PermutePooledEmbeddingsSplit, input: torch.Tensor
) -> torch.Tensor:
return permute_module.forward(input) | null |
9,089 | import math
from typing import Any, cast, Dict, List, Optional, Tuple, TypeVar, Union
import torch
import torch.distributed as dist
from torchrec.distributed.dist_data import (
EmbeddingsAllToOneReduce,
KJTAllToAll,
KJTOneToAll,
PooledEmbeddingsReduceScatter,
VariableBatchPooledEmbeddingsReduceScatter,
)
from torchrec.distributed.embedding_lookup import (
GroupedPooledEmbeddingsLookup,
InferGroupedPooledEmbeddingsLookup,
)
from torchrec.distributed.embedding_sharding import (
BaseEmbeddingDist,
BaseEmbeddingLookup,
BaseSparseFeaturesDist,
bucketize_kjt_before_all2all,
EmbeddingSharding,
EmbeddingShardingContext,
EmbeddingShardingInfo,
group_tables,
)
from torchrec.distributed.embedding_types import (
BaseGroupedFeatureProcessor,
EmbeddingComputeKernel,
GroupedEmbeddingConfig,
KJTList,
ShardedEmbeddingTable,
)
from torchrec.distributed.types import (
Awaitable,
CommOp,
NullShardingContext,
QuantizedCommCodecs,
ShardedTensorMetadata,
ShardingEnv,
ShardMetadata,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable
class GroupedEmbeddingConfig:
def feature_hash_sizes(self) -> List[int]:
def num_features(self) -> int:
def dim_sum(self) -> int:
def table_names(self) -> List[str]:
def feature_names(self) -> List[str]:
def embedding_dims(self) -> List[int]:
def embedding_names(self) -> List[str]:
def embedding_shard_metadata(self) -> List[Optional[ShardMetadata]]:
def get_embedding_shard_metadata(
grouped_embedding_configs_per_rank: List[List[GroupedEmbeddingConfig]],
) -> Tuple[List[List[int]], bool]:
is_even_sharding: bool = True
world_size = len(grouped_embedding_configs_per_rank)
def get_even_shard_sizes(hash_size: int, world_size: int) -> List[int]:
block_size: int = math.ceil(hash_size / world_size)
last_rank: int = hash_size // block_size
expected_even_shard_sizes = [block_size] * last_rank
if hash_size % world_size != 0:
expected_even_shard_sizes.append(hash_size - sum(expected_even_shard_sizes))
return expected_even_shard_sizes
embed_sharding = []
for table in grouped_embedding_configs_per_rank[0][0].embedding_tables:
embed_sharding_per_feature = []
total_rows = 0
sizes = []
# pyre-ignore [16]: `Optional` has no attribute `shards_metadata`
for metadata in table.global_metadata.shards_metadata:
embed_sharding_per_feature.append(metadata.shard_offsets[0])
total_rows += metadata.shard_sizes[0]
sizes.append(metadata.shard_sizes[0])
embed_sharding_per_feature.append(total_rows)
embed_sharding.extend([embed_sharding_per_feature] * len(table.embedding_names))
expected_even_sizes = get_even_shard_sizes(total_rows, world_size)
if sizes != expected_even_sizes:
is_even_sharding = False
return (embed_sharding, is_even_sharding) | null |
9,090 | import math
from typing import Any, cast, Dict, List, Optional, Tuple, TypeVar, Union
import torch
import torch.distributed as dist
from torchrec.distributed.dist_data import (
EmbeddingsAllToOneReduce,
KJTAllToAll,
KJTOneToAll,
PooledEmbeddingsReduceScatter,
VariableBatchPooledEmbeddingsReduceScatter,
)
from torchrec.distributed.embedding_lookup import (
GroupedPooledEmbeddingsLookup,
InferGroupedPooledEmbeddingsLookup,
)
from torchrec.distributed.embedding_sharding import (
BaseEmbeddingDist,
BaseEmbeddingLookup,
BaseSparseFeaturesDist,
bucketize_kjt_before_all2all,
EmbeddingSharding,
EmbeddingShardingContext,
EmbeddingShardingInfo,
group_tables,
)
from torchrec.distributed.embedding_types import (
BaseGroupedFeatureProcessor,
EmbeddingComputeKernel,
GroupedEmbeddingConfig,
KJTList,
ShardedEmbeddingTable,
)
from torchrec.distributed.types import (
Awaitable,
CommOp,
NullShardingContext,
QuantizedCommCodecs,
ShardedTensorMetadata,
ShardingEnv,
ShardMetadata,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable
def _fx_wrap_block_bucketize_row_pos(
block_bucketize_row_pos: List[torch.Tensor],
) -> Optional[List[torch.Tensor]]:
return block_bucketize_row_pos if block_bucketize_row_pos else None | null |
9,091 | import math
from typing import Any, cast, Dict, List, Optional, Tuple, TypeVar, Union
import torch
import torch.distributed as dist
from torchrec.distributed.dist_data import (
EmbeddingsAllToOneReduce,
KJTAllToAll,
KJTOneToAll,
PooledEmbeddingsReduceScatter,
VariableBatchPooledEmbeddingsReduceScatter,
)
from torchrec.distributed.embedding_lookup import (
GroupedPooledEmbeddingsLookup,
InferGroupedPooledEmbeddingsLookup,
)
from torchrec.distributed.embedding_sharding import (
BaseEmbeddingDist,
BaseEmbeddingLookup,
BaseSparseFeaturesDist,
bucketize_kjt_before_all2all,
EmbeddingSharding,
EmbeddingShardingContext,
EmbeddingShardingInfo,
group_tables,
)
from torchrec.distributed.embedding_types import (
BaseGroupedFeatureProcessor,
EmbeddingComputeKernel,
GroupedEmbeddingConfig,
KJTList,
ShardedEmbeddingTable,
)
from torchrec.distributed.types import (
Awaitable,
CommOp,
NullShardingContext,
QuantizedCommCodecs,
ShardedTensorMetadata,
ShardingEnv,
ShardMetadata,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Multistreamable
def get_block_sizes_runtime_device(
block_sizes: List[int],
runtime_device: torch.device,
tensor_cache: Dict[str, Tuple[torch.Tensor, List[torch.Tensor]]],
embedding_shard_metadata: Optional[List[List[int]]] = None,
dtype: torch.dtype = torch.int32,
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
cache_key: str = "__block_sizes"
if cache_key not in tensor_cache:
tensor_cache[cache_key] = (
torch.tensor(
block_sizes,
device=runtime_device,
dtype=dtype,
),
(
[]
if embedding_shard_metadata is None
else [
torch.tensor(
row_pos,
device=runtime_device,
dtype=dtype,
)
for row_pos in embedding_shard_metadata
]
),
)
return tensor_cache[cache_key] | null |
9,092 | from collections import OrderedDict
from dataclasses import dataclass, field
from typing import Any, cast, Dict, Iterator, List, Optional, Set, Tuple, Type, TypeVar
import torch
import torch.distributed as dist
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torchrec.distributed.comm import intra_and_cross_node_pg
from torchrec.distributed.dist_data import (
KJTAllToAll,
PooledEmbeddingsAllToAll,
PooledEmbeddingsAwaitable,
)
from torchrec.distributed.embedding import EmbeddingCollectionSharder
from torchrec.distributed.embedding_sharding import KJTListSplitsAwaitable
from torchrec.distributed.embedding_types import (
BaseEmbeddingSharder,
KJTList,
ShardedEmbeddingModule,
)
from torchrec.distributed.embeddingbag import EmbeddingBagCollectionSharder
from torchrec.distributed.types import (
Awaitable,
CommOp,
LazyAwaitable,
Multistreamable,
NullShardedModuleContext,
ParameterSharding,
QuantizedCommCodecs,
ShardingEnv,
ShardingType,
)
from torchrec.distributed.utils import append_prefix
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection,
EmbeddingCollection,
)
from torchrec.modules.embedding_tower import (
EmbeddingTower,
EmbeddingTowerCollection,
tower_input_params,
)
from torchrec.optim.fused import FusedOptimizerModule
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
class ShardingType(Enum):
"""
Well-known sharding types, used by inter-module optimizations.
"""
# Replicated on all ranks
DATA_PARALLEL = "data_parallel"
# Placed on a single rank
TABLE_WISE = "table_wise"
# Placed on multiple ranks as different sharded tables
COLUMN_WISE = "column_wise"
# Range-split on the first dimension across all ranks
ROW_WISE = "row_wise"
# Row-wise on the same node and table-wise across nodes
# Useful when having multiple ranks per node
# and comms within a single node are more efficient than across nodes.
TABLE_ROW_WISE = "table_row_wise"
# Column-wise on the same node and table-wise across nodes
TABLE_COLUMN_WISE = "table_column_wise"
class ParameterSharding:
"""
Describes the sharding of the parameter.
sharding_type (str): how this parameter is sharded. See ShardingType for well-known
types.
compute_kernel (str): compute kernel to be used by this parameter.
ranks (Optional[List[int]]): rank of each shard.
sharding_spec (Optional[ShardingSpec]): list of ShardMetadata for each shard.
cache_params (Optional[CacheParams]): cache params for embedding lookup.
enforce_hbm (Optional[bool]): whether to use HBM.
stochastic_rounding (Optional[bool]): whether to use stochastic rounding.
bounds_check_mode (Optional[BoundsCheckMode]): bounds check mode.
NOTE:
ShardingType.TABLE_WISE - rank where this embedding is placed
ShardingType.COLUMN_WISE - rank where the embedding shards are placed, seen as
individual tables
ShardingType.TABLE_ROW_WISE - first rank when this embedding is placed
ShardingType.ROW_WISE, ShardingType.DATA_PARALLEL - unused
"""
sharding_type: str
compute_kernel: str
ranks: Optional[List[int]] = None
sharding_spec: Optional[ShardingSpec] = None
cache_params: Optional[CacheParams] = None
enforce_hbm: Optional[bool] = None
stochastic_rounding: Optional[bool] = None
bounds_check_mode: Optional[BoundsCheckMode] = None
def _replace_sharding_with_intra_node(
table_name_to_parameter_sharding: Dict[str, ParameterSharding], local_size: int
) -> None:
for _, value in table_name_to_parameter_sharding.items():
if value.sharding_type == ShardingType.TABLE_ROW_WISE.value:
value.sharding_type = ShardingType.ROW_WISE.value
elif value.sharding_type == ShardingType.TABLE_COLUMN_WISE.value:
value.sharding_type = ShardingType.COLUMN_WISE.value
else:
raise ValueError(f"Sharding type not supported {value.sharding_type}")
if value.ranks:
value.ranks = [rank % local_size for rank in value.ranks]
if value.sharding_spec:
# pyre-ignore [6, 16]
for shard, rank in zip(value.sharding_spec.shards, value.ranks):
shard.placement._rank = rank | null |
9,093 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def _to_lengths(offsets: torch.Tensor) -> torch.Tensor:
return offsets[1:] - offsets[:-1]
def _maybe_compute_lengths(
lengths: Optional[torch.Tensor], offsets: Optional[torch.Tensor]
) -> torch.Tensor:
if lengths is None:
assert offsets is not None
lengths = _to_lengths(offsets)
return lengths | null |
9,094 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def _to_offsets(lengths: torch.Tensor) -> torch.Tensor:
return torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
def _maybe_compute_offsets(
lengths: Optional[torch.Tensor], offsets: Optional[torch.Tensor]
) -> torch.Tensor:
if offsets is None:
assert lengths is not None
offsets = _to_offsets(lengths)
return offsets | null |
9,095 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def _get_inverse_indices_or_throw(
inverse_indices: Optional[Tuple[List[str], torch.Tensor]],
) -> Tuple[List[str], torch.Tensor]:
assert inverse_indices is not None, "This KJT doesn't have inverse indices."
return inverse_indices | null |
9,096 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def _assert_offsets_or_lengths_is_provided(
offsets: Optional[torch.Tensor], lengths: Optional[torch.Tensor]
) -> None:
assert offsets is not None or lengths is not None, "Must provide lengths or offsets" | null |
9,097 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def _regroup_keyed_tensors(
keyed_tensors: List["KeyedTensor"], groups: List[List[str]]
) -> List[torch.Tensor]:
# Shortcut for no re-grouping
if len(keyed_tensors) == len(groups):
match = True
for kt, group in zip(keyed_tensors, groups):
if kt.keys() != group:
match = False
break
if match:
return [kt.values() for kt in keyed_tensors]
embedding_dicts = [keyed_tensor.to_dict() for keyed_tensor in keyed_tensors]
lengths = [keyed_tensor.length_per_key() for keyed_tensor in keyed_tensors]
indices = [keyed_tensor._key_indices() for keyed_tensor in keyed_tensors]
key_dim = keyed_tensors[0].key_dim()
key_to_idx: dict[str, int] = {}
for i, keyed_tensor in enumerate(keyed_tensors):
for key in keyed_tensor.keys():
key_to_idx[key] = i
# Rearrange values based on groups with a single torch.cat operation.
split_lengths: List[int] = []
cat_input: List[torch.Tensor] = []
for group in groups:
group_length = 0
for name in group:
cat_input.append(embedding_dicts[key_to_idx[name]][name])
group_length += lengths[key_to_idx[name]][indices[key_to_idx[name]][name]]
split_lengths.append(group_length)
rearranged_values = torch.cat(cat_input, key_dim)
return list(rearranged_values.split(split_lengths, dim=key_dim)) | null |
9,098 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def _optional_mask(
tensor: Optional[torch.Tensor], mask: torch.Tensor
) -> Optional[torch.Tensor]:
return tensor[mask] if tensor is not None else None | null |
9,099 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def _arange(*args, **kwargs) -> torch.Tensor:
return torch.arange(*args, **kwargs) | null |
9,100 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def _to_offsets(lengths: torch.Tensor) -> torch.Tensor:
return torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
The provided code snippet includes necessary dependencies for implementing the `_permute_tensor_by_segments` function. Write a Python function `def _permute_tensor_by_segments( tensor: torch.Tensor, segment_sizes: torch.Tensor, recat: torch.Tensor, weights: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]` to solve the following problem:
Permutes a tensor by segments according to recat tensor. For variable stride tensors we permute across length per key, which reduces the number of permute indices and lengthens each sequence. `keyed_jagged_index_select_dim1` more efficiently parallelizes work for each permute index and sequence across multiple thread blocks. NOTE: `keyed_jagged_index_select_dim1` is only supported for CUDA.
Here is the function:
def _permute_tensor_by_segments(
tensor: torch.Tensor,
segment_sizes: torch.Tensor,
recat: torch.Tensor,
weights: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
Permutes a tensor by segments according to recat tensor.
For variable stride tensors we permute across length per key, which reduces the
number of permute indices and lengthens each sequence.
`keyed_jagged_index_select_dim1` more efficiently parallelizes work for each permute
index and sequence across multiple thread blocks.
NOTE:
`keyed_jagged_index_select_dim1` is only supported for CUDA.
"""
if tensor.device.type == "cuda":
output = torch.ops.fbgemm.keyed_jagged_index_select_dim1(
tensor,
segment_sizes,
_to_offsets(segment_sizes),
recat,
segment_sizes.numel(),
weights,
)
permuted_tensor = output[0]
permuted_weights = None if weights is None else output[2]
else:
(
_,
permuted_tensor,
permuted_weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
segment_sizes,
tensor,
weights,
None,
)
return permuted_tensor, permuted_weights | Permutes a tensor by segments according to recat tensor. For variable stride tensors we permute across length per key, which reduces the number of permute indices and lengthens each sequence. `keyed_jagged_index_select_dim1` more efficiently parallelizes work for each permute index and sequence across multiple thread blocks. NOTE: `keyed_jagged_index_select_dim1` is only supported for CUDA. |
9,101 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
class JaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""
Represents an (optionally weighted) jagged tensor.
A `JaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. See `KeyedJaggedTensor` for full example.
Implementation is torch.jit.script-able.
NOTE:
We will NOT do input validation as it's expensive, you should always pass in the
valid lengths, offsets, etc.
Args:
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if values have weights. Tensor with same shape
as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
"""
_fields = ["_values", "_weights", "_lengths", "_offsets"]
def __init__(
self,
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
) -> None:
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
_assert_offsets_or_lengths_is_provided(offsets, lengths)
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "JaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return JaggedTensor(
values=torch.empty(0, dtype=values_dtype, device=device),
offsets=torch.empty(0, dtype=lengths_dtype, device=device),
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
weights=weights,
)
def from_dense_lengths(
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` is still of shape (B,).
"""
mask2d = (
_arange(end=values.size(1), device=values.device).expand(values.size(0), -1)
) < lengths.unsqueeze(-1)
return JaggedTensor(
values=values[mask2d],
weights=_optional_mask(weights, mask2d),
lengths=lengths,
)
def from_dense(
values: List[torch.Tensor],
weights: Optional[List[torch.Tensor]] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` and `offsets` are still of shape (B,).
Args:
values (List[torch.Tensor]): a list of tensors for dense representation
weights (Optional[List[torch.Tensor]]): if values have weights, tensor with
the same shape as values.
Returns:
JaggedTensor: JaggedTensor created from 2D dense tensor.
Example::
values = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
weights = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
j1 = JaggedTensor.from_dense(
values=values,
weights=weights,
)
# j1 = [[1.0], [], [7.0], [8.0], [10.0, 11.0, 12.0]]
"""
values_tensor = torch.cat(values, dim=0)
lengths = torch.tensor(
[value.size(0) for value in values],
dtype=torch.int32,
device=values_tensor.device,
)
weights_tensor = torch.cat(weights, dim=0) if weights is not None else None
return JaggedTensor(
values=values_tensor,
weights=weights_tensor,
lengths=lengths,
)
def to_dense(self) -> List[torch.Tensor]:
"""
Constructs a dense-representation of the JT's values.
Returns:
List[torch.Tensor]: list of tensors.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
values_list = jt.to_dense()
# values_list = [
# torch.tensor([1.0, 2.0]),
# torch.tensor([]),
# torch.tensor([3.0]),
# torch.tensor([4.0]),
# torch.tensor([5.0]),
# torch.tensor([6.0, 7.0, 8.0]),
# ]
"""
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.values()[offset:next_offset])
return tensor_list
def to_dense_weights(self) -> Optional[List[torch.Tensor]]:
"""
Constructs a dense-representation of the JT's weights.
Returns:
Optional[List[torch.Tensor]]: list of tensors, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
weights_list = jt.to_dense_weights()
# weights_list = [
# torch.tensor([0.1, 0.2]),
# torch.tensor([]),
# torch.tensor([0.3]),
# torch.tensor([0.4]),
# torch.tensor([0.5]),
# torch.tensor([0.6, 0.7, 0.8]),
# ]
"""
if self.weights_or_none() is None:
return None
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.weights()[offset:next_offset])
return tensor_list
def to_padded_dense(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> torch.Tensor:
"""
Constructs a 2D dense tensor from the JT's values of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
torch.Tensor: 2d dense tensor.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
dt = jt.to_padded_dense(
desired_length=2,
padding_value=10.0,
)
# dt = [
# [1.0, 2.0],
# [10.0, 10.0],
# [3.0, 10.0],
# [4.0, 10.0],
# [5.0, 10.0],
# [6.0, 7.0],
# ]
"""
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.values(), [self.offsets()], [N], padding_value
)
def to_padded_dense_weights(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> Optional[torch.Tensor]:
"""
Constructs a 2D dense tensor from the JT's weights of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
Optional[torch.Tensor]: 2d dense tensor, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
d_wt = jt.to_padded_dense_weights(
desired_length=2,
padding_value=1.0,
)
# d_wt = [
# [0.1, 0.2],
# [1.0, 1.0],
# [0.3, 1.0],
# [0.4, 1.0],
# [0.5, 1.0],
# [0.6, 0.7],
# ]
"""
if self.weights_or_none() is None:
return None
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.weights(), [self.offsets()], [N], padding_value
)
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def to(self, device: torch.device, non_blocking: bool = False) -> "JaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
return JaggedTensor(
values=self._values.to(device, non_blocking=non_blocking),
weights=(
weights.to(device, non_blocking=non_blocking)
if weights is not None
else None
),
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def __str__(self) -> str:
offsets = self.offsets()
if self._weights is None:
return (
"JaggedTensor({\n "
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ "\n})\n"
)
return (
"JaggedTensor({\n"
+ ' "values": '
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ ',\n "weights": '
+ _jagged_values_string(
_get_weights_or_throw(self._weights), offsets, 0, len(offsets) - 1
)
+ "\n})\n"
)
def _jt_flatten(
t: JaggedTensor,
) -> Tuple[List[Optional[torch.Tensor]], None]:
return [getattr(t, a) for a in JaggedTensor._fields], None
def _jt_flatten_with_keys(
t: JaggedTensor,
) -> Tuple[List[Tuple[KeyEntry, Optional[torch.Tensor]]], None]:
values, context = _jt_flatten(t)
# pyre can't tell that GetAttrKey implements the KeyEntry protocol
return [ # pyre-ignore[7]
(GetAttrKey(k), v) for k, v in zip(JaggedTensor._fields, values)
], context | null |
9,102 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
class JaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""
Represents an (optionally weighted) jagged tensor.
A `JaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. See `KeyedJaggedTensor` for full example.
Implementation is torch.jit.script-able.
NOTE:
We will NOT do input validation as it's expensive, you should always pass in the
valid lengths, offsets, etc.
Args:
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if values have weights. Tensor with same shape
as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
"""
_fields = ["_values", "_weights", "_lengths", "_offsets"]
def __init__(
self,
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
) -> None:
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
_assert_offsets_or_lengths_is_provided(offsets, lengths)
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "JaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return JaggedTensor(
values=torch.empty(0, dtype=values_dtype, device=device),
offsets=torch.empty(0, dtype=lengths_dtype, device=device),
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
weights=weights,
)
def from_dense_lengths(
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` is still of shape (B,).
"""
mask2d = (
_arange(end=values.size(1), device=values.device).expand(values.size(0), -1)
) < lengths.unsqueeze(-1)
return JaggedTensor(
values=values[mask2d],
weights=_optional_mask(weights, mask2d),
lengths=lengths,
)
def from_dense(
values: List[torch.Tensor],
weights: Optional[List[torch.Tensor]] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` and `offsets` are still of shape (B,).
Args:
values (List[torch.Tensor]): a list of tensors for dense representation
weights (Optional[List[torch.Tensor]]): if values have weights, tensor with
the same shape as values.
Returns:
JaggedTensor: JaggedTensor created from 2D dense tensor.
Example::
values = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
weights = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
j1 = JaggedTensor.from_dense(
values=values,
weights=weights,
)
# j1 = [[1.0], [], [7.0], [8.0], [10.0, 11.0, 12.0]]
"""
values_tensor = torch.cat(values, dim=0)
lengths = torch.tensor(
[value.size(0) for value in values],
dtype=torch.int32,
device=values_tensor.device,
)
weights_tensor = torch.cat(weights, dim=0) if weights is not None else None
return JaggedTensor(
values=values_tensor,
weights=weights_tensor,
lengths=lengths,
)
def to_dense(self) -> List[torch.Tensor]:
"""
Constructs a dense-representation of the JT's values.
Returns:
List[torch.Tensor]: list of tensors.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
values_list = jt.to_dense()
# values_list = [
# torch.tensor([1.0, 2.0]),
# torch.tensor([]),
# torch.tensor([3.0]),
# torch.tensor([4.0]),
# torch.tensor([5.0]),
# torch.tensor([6.0, 7.0, 8.0]),
# ]
"""
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.values()[offset:next_offset])
return tensor_list
def to_dense_weights(self) -> Optional[List[torch.Tensor]]:
"""
Constructs a dense-representation of the JT's weights.
Returns:
Optional[List[torch.Tensor]]: list of tensors, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
weights_list = jt.to_dense_weights()
# weights_list = [
# torch.tensor([0.1, 0.2]),
# torch.tensor([]),
# torch.tensor([0.3]),
# torch.tensor([0.4]),
# torch.tensor([0.5]),
# torch.tensor([0.6, 0.7, 0.8]),
# ]
"""
if self.weights_or_none() is None:
return None
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.weights()[offset:next_offset])
return tensor_list
def to_padded_dense(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> torch.Tensor:
"""
Constructs a 2D dense tensor from the JT's values of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
torch.Tensor: 2d dense tensor.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
dt = jt.to_padded_dense(
desired_length=2,
padding_value=10.0,
)
# dt = [
# [1.0, 2.0],
# [10.0, 10.0],
# [3.0, 10.0],
# [4.0, 10.0],
# [5.0, 10.0],
# [6.0, 7.0],
# ]
"""
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.values(), [self.offsets()], [N], padding_value
)
def to_padded_dense_weights(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> Optional[torch.Tensor]:
"""
Constructs a 2D dense tensor from the JT's weights of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
Optional[torch.Tensor]: 2d dense tensor, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
d_wt = jt.to_padded_dense_weights(
desired_length=2,
padding_value=1.0,
)
# d_wt = [
# [0.1, 0.2],
# [1.0, 1.0],
# [0.3, 1.0],
# [0.4, 1.0],
# [0.5, 1.0],
# [0.6, 0.7],
# ]
"""
if self.weights_or_none() is None:
return None
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.weights(), [self.offsets()], [N], padding_value
)
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def to(self, device: torch.device, non_blocking: bool = False) -> "JaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
return JaggedTensor(
values=self._values.to(device, non_blocking=non_blocking),
weights=(
weights.to(device, non_blocking=non_blocking)
if weights is not None
else None
),
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def __str__(self) -> str:
offsets = self.offsets()
if self._weights is None:
return (
"JaggedTensor({\n "
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ "\n})\n"
)
return (
"JaggedTensor({\n"
+ ' "values": '
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ ',\n "weights": '
+ _jagged_values_string(
_get_weights_or_throw(self._weights), offsets, 0, len(offsets) - 1
)
+ "\n})\n"
)
def _jt_unflatten(values: List[Optional[torch.Tensor]], context: None) -> JaggedTensor:
return JaggedTensor(*values) | null |
9,103 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
class JaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""
Represents an (optionally weighted) jagged tensor.
A `JaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. See `KeyedJaggedTensor` for full example.
Implementation is torch.jit.script-able.
NOTE:
We will NOT do input validation as it's expensive, you should always pass in the
valid lengths, offsets, etc.
Args:
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if values have weights. Tensor with same shape
as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
"""
_fields = ["_values", "_weights", "_lengths", "_offsets"]
def __init__(
self,
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
) -> None:
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
_assert_offsets_or_lengths_is_provided(offsets, lengths)
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "JaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return JaggedTensor(
values=torch.empty(0, dtype=values_dtype, device=device),
offsets=torch.empty(0, dtype=lengths_dtype, device=device),
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
weights=weights,
)
def from_dense_lengths(
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` is still of shape (B,).
"""
mask2d = (
_arange(end=values.size(1), device=values.device).expand(values.size(0), -1)
) < lengths.unsqueeze(-1)
return JaggedTensor(
values=values[mask2d],
weights=_optional_mask(weights, mask2d),
lengths=lengths,
)
def from_dense(
values: List[torch.Tensor],
weights: Optional[List[torch.Tensor]] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` and `offsets` are still of shape (B,).
Args:
values (List[torch.Tensor]): a list of tensors for dense representation
weights (Optional[List[torch.Tensor]]): if values have weights, tensor with
the same shape as values.
Returns:
JaggedTensor: JaggedTensor created from 2D dense tensor.
Example::
values = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
weights = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
j1 = JaggedTensor.from_dense(
values=values,
weights=weights,
)
# j1 = [[1.0], [], [7.0], [8.0], [10.0, 11.0, 12.0]]
"""
values_tensor = torch.cat(values, dim=0)
lengths = torch.tensor(
[value.size(0) for value in values],
dtype=torch.int32,
device=values_tensor.device,
)
weights_tensor = torch.cat(weights, dim=0) if weights is not None else None
return JaggedTensor(
values=values_tensor,
weights=weights_tensor,
lengths=lengths,
)
def to_dense(self) -> List[torch.Tensor]:
"""
Constructs a dense-representation of the JT's values.
Returns:
List[torch.Tensor]: list of tensors.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
values_list = jt.to_dense()
# values_list = [
# torch.tensor([1.0, 2.0]),
# torch.tensor([]),
# torch.tensor([3.0]),
# torch.tensor([4.0]),
# torch.tensor([5.0]),
# torch.tensor([6.0, 7.0, 8.0]),
# ]
"""
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.values()[offset:next_offset])
return tensor_list
def to_dense_weights(self) -> Optional[List[torch.Tensor]]:
"""
Constructs a dense-representation of the JT's weights.
Returns:
Optional[List[torch.Tensor]]: list of tensors, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
weights_list = jt.to_dense_weights()
# weights_list = [
# torch.tensor([0.1, 0.2]),
# torch.tensor([]),
# torch.tensor([0.3]),
# torch.tensor([0.4]),
# torch.tensor([0.5]),
# torch.tensor([0.6, 0.7, 0.8]),
# ]
"""
if self.weights_or_none() is None:
return None
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.weights()[offset:next_offset])
return tensor_list
def to_padded_dense(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> torch.Tensor:
"""
Constructs a 2D dense tensor from the JT's values of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
torch.Tensor: 2d dense tensor.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
dt = jt.to_padded_dense(
desired_length=2,
padding_value=10.0,
)
# dt = [
# [1.0, 2.0],
# [10.0, 10.0],
# [3.0, 10.0],
# [4.0, 10.0],
# [5.0, 10.0],
# [6.0, 7.0],
# ]
"""
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.values(), [self.offsets()], [N], padding_value
)
def to_padded_dense_weights(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> Optional[torch.Tensor]:
"""
Constructs a 2D dense tensor from the JT's weights of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
Optional[torch.Tensor]: 2d dense tensor, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
d_wt = jt.to_padded_dense_weights(
desired_length=2,
padding_value=1.0,
)
# d_wt = [
# [0.1, 0.2],
# [1.0, 1.0],
# [0.3, 1.0],
# [0.4, 1.0],
# [0.5, 1.0],
# [0.6, 0.7],
# ]
"""
if self.weights_or_none() is None:
return None
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.weights(), [self.offsets()], [N], padding_value
)
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def to(self, device: torch.device, non_blocking: bool = False) -> "JaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
return JaggedTensor(
values=self._values.to(device, non_blocking=non_blocking),
weights=(
weights.to(device, non_blocking=non_blocking)
if weights is not None
else None
),
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def __str__(self) -> str:
offsets = self.offsets()
if self._weights is None:
return (
"JaggedTensor({\n "
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ "\n})\n"
)
return (
"JaggedTensor({\n"
+ ' "values": '
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ ',\n "weights": '
+ _jagged_values_string(
_get_weights_or_throw(self._weights), offsets, 0, len(offsets) - 1
)
+ "\n})\n"
)
def _jt_flatten_spec(t: JaggedTensor, spec: TreeSpec) -> List[Optional[torch.Tensor]]:
return [getattr(t, a) for a in JaggedTensor._fields] | null |
9,104 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def _assert_tensor_has_no_elements_or_has_integers(
tensor: torch.Tensor, tensor_name: str
) -> None:
assert tensor.numel() == 0 or tensor.dtype in [
torch.long,
torch.int,
torch.short,
torch.int8,
torch.uint8,
], "{} must be of integer type, but got {}".format(tensor_name, tensor.dtype) | null |
9,105 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
def _maybe_compute_index_per_key(
keys: List[str],
index_per_key: Optional[Dict[str, int]],
) -> Dict[str, int]:
if index_per_key is None:
index_per_key = {key: i for i, key in enumerate(keys)}
return index_per_key | null |
9,106 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def _maybe_compute_stride_kjt(
keys: List[str],
stride: Optional[int],
lengths: Optional[torch.Tensor],
offsets: Optional[torch.Tensor],
) -> int:
if stride is None:
if len(keys) == 0:
stride = 0
elif offsets is not None and offsets.numel() > 0:
stride = (offsets.numel() - 1) // len(keys)
elif lengths is not None:
stride = lengths.numel() // len(keys)
else:
stride = 0
return stride
def _maybe_compute_stride_kjt_scripted(
keys: List[str],
stride: Optional[int],
lengths: Optional[torch.Tensor],
offsets: Optional[torch.Tensor],
) -> torch.Tensor:
return torch.tensor([_maybe_compute_stride_kjt(keys, stride, lengths, offsets)]) | null |
9,107 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def _cumsum(o: List[int]) -> List[int]:
ret = [0] * (len(o) + 1)
for i in range(len(o)):
ret[i + 1] = ret[i] + o[i]
return ret
def _maybe_compute_length_per_key(
keys: List[str],
stride: int,
stride_per_key: List[int],
variable_stride_per_key: bool,
length_per_key: Optional[List[int]],
lengths: Optional[torch.Tensor],
offsets: Optional[torch.Tensor],
values: Optional[torch.Tensor],
) -> List[int]:
if length_per_key is None:
if values is not None and values.is_meta:
# create dummy lengths per key when on meta device
total_length = values.numel()
_length = [total_length // len(keys)] * len(keys)
_length[0] += total_length % len(keys)
elif len(keys) and offsets is not None and len(offsets) > 0:
_length: List[int] = (
_length_per_key_from_stride_per_key(torch.diff(offsets), stride_per_key)
if variable_stride_per_key
else torch.sum(torch.diff(offsets).view(-1, stride), dim=1).tolist()
)
elif len(keys) and lengths is not None:
_length: List[int] = (
_length_per_key_from_stride_per_key(lengths, stride_per_key)
if variable_stride_per_key
else (
torch.sum(lengths.view(-1, stride), dim=1).tolist()
if lengths.numel() != 0
else [0] * len(keys)
)
)
else:
_length: List[int] = []
length_per_key = _length
return length_per_key
def _maybe_compute_offset_per_key(
keys: List[str],
stride: int,
stride_per_key: List[int],
variable_stride_per_key: bool,
length_per_key: Optional[List[int]],
offset_per_key: Optional[List[int]],
lengths: Optional[torch.Tensor],
offsets: Optional[torch.Tensor],
values: Optional[torch.Tensor],
) -> Tuple[List[int], List[int]]:
if length_per_key is None:
_length_per_key: List[int] = _maybe_compute_length_per_key(
keys=keys,
stride=stride,
stride_per_key=stride_per_key,
variable_stride_per_key=variable_stride_per_key,
length_per_key=length_per_key,
lengths=lengths,
offsets=offsets,
values=values,
)
return _length_per_key, _cumsum(_length_per_key)
elif offset_per_key is None:
return length_per_key, _cumsum(length_per_key)
else:
return length_per_key, offset_per_key | null |
9,108 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def _get_weights_or_throw(weights: Optional[torch.Tensor]) -> torch.Tensor:
def _jagged_values_string(
values: torch.Tensor,
offsets: torch.Tensor,
offset_start: int,
offset_end: int,
) -> str:
def _jagged_tensor_string(
key: str,
values: torch.Tensor,
weights: Optional[torch.Tensor],
offsets: torch.Tensor,
offset_start: int,
offset_end: int,
) -> str:
if weights is None:
return '"{}": '.format(key) + _jagged_values_string(
values, offsets, offset_start, offset_end
)
return (
'"{}"'.format(key)
+ ': {\n "values": '
+ _jagged_values_string(values, offsets, offset_start, offset_end)
+ ',\n "weights": '
+ _jagged_values_string(
_get_weights_or_throw(weights), offsets, offset_start, offset_end
)
+ "\n }"
) | null |
9,109 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def is_torchdynamo_compiling() -> bool: # type: ignore[misc]
return False
def _batched_lengths_to_offsets(lengths: torch.Tensor) -> torch.Tensor:
(f, b) = lengths.shape
offsets_0 = lengths.new_zeros((f, 1))
offsets_1 = torch.cumsum(lengths, dim=-1).to(lengths.dtype)
offsets = torch.cat([offsets_0, offsets_1], dim=-1)
return offsets
class JaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""
Represents an (optionally weighted) jagged tensor.
A `JaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. See `KeyedJaggedTensor` for full example.
Implementation is torch.jit.script-able.
NOTE:
We will NOT do input validation as it's expensive, you should always pass in the
valid lengths, offsets, etc.
Args:
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if values have weights. Tensor with same shape
as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
"""
_fields = ["_values", "_weights", "_lengths", "_offsets"]
def __init__(
self,
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
) -> None:
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
_assert_offsets_or_lengths_is_provided(offsets, lengths)
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "JaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return JaggedTensor(
values=torch.empty(0, dtype=values_dtype, device=device),
offsets=torch.empty(0, dtype=lengths_dtype, device=device),
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
weights=weights,
)
def from_dense_lengths(
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` is still of shape (B,).
"""
mask2d = (
_arange(end=values.size(1), device=values.device).expand(values.size(0), -1)
) < lengths.unsqueeze(-1)
return JaggedTensor(
values=values[mask2d],
weights=_optional_mask(weights, mask2d),
lengths=lengths,
)
def from_dense(
values: List[torch.Tensor],
weights: Optional[List[torch.Tensor]] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` and `offsets` are still of shape (B,).
Args:
values (List[torch.Tensor]): a list of tensors for dense representation
weights (Optional[List[torch.Tensor]]): if values have weights, tensor with
the same shape as values.
Returns:
JaggedTensor: JaggedTensor created from 2D dense tensor.
Example::
values = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
weights = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
j1 = JaggedTensor.from_dense(
values=values,
weights=weights,
)
# j1 = [[1.0], [], [7.0], [8.0], [10.0, 11.0, 12.0]]
"""
values_tensor = torch.cat(values, dim=0)
lengths = torch.tensor(
[value.size(0) for value in values],
dtype=torch.int32,
device=values_tensor.device,
)
weights_tensor = torch.cat(weights, dim=0) if weights is not None else None
return JaggedTensor(
values=values_tensor,
weights=weights_tensor,
lengths=lengths,
)
def to_dense(self) -> List[torch.Tensor]:
"""
Constructs a dense-representation of the JT's values.
Returns:
List[torch.Tensor]: list of tensors.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
values_list = jt.to_dense()
# values_list = [
# torch.tensor([1.0, 2.0]),
# torch.tensor([]),
# torch.tensor([3.0]),
# torch.tensor([4.0]),
# torch.tensor([5.0]),
# torch.tensor([6.0, 7.0, 8.0]),
# ]
"""
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.values()[offset:next_offset])
return tensor_list
def to_dense_weights(self) -> Optional[List[torch.Tensor]]:
"""
Constructs a dense-representation of the JT's weights.
Returns:
Optional[List[torch.Tensor]]: list of tensors, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
weights_list = jt.to_dense_weights()
# weights_list = [
# torch.tensor([0.1, 0.2]),
# torch.tensor([]),
# torch.tensor([0.3]),
# torch.tensor([0.4]),
# torch.tensor([0.5]),
# torch.tensor([0.6, 0.7, 0.8]),
# ]
"""
if self.weights_or_none() is None:
return None
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.weights()[offset:next_offset])
return tensor_list
def to_padded_dense(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> torch.Tensor:
"""
Constructs a 2D dense tensor from the JT's values of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
torch.Tensor: 2d dense tensor.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
dt = jt.to_padded_dense(
desired_length=2,
padding_value=10.0,
)
# dt = [
# [1.0, 2.0],
# [10.0, 10.0],
# [3.0, 10.0],
# [4.0, 10.0],
# [5.0, 10.0],
# [6.0, 7.0],
# ]
"""
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.values(), [self.offsets()], [N], padding_value
)
def to_padded_dense_weights(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> Optional[torch.Tensor]:
"""
Constructs a 2D dense tensor from the JT's weights of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
Optional[torch.Tensor]: 2d dense tensor, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
d_wt = jt.to_padded_dense_weights(
desired_length=2,
padding_value=1.0,
)
# d_wt = [
# [0.1, 0.2],
# [1.0, 1.0],
# [0.3, 1.0],
# [0.4, 1.0],
# [0.5, 1.0],
# [0.6, 0.7],
# ]
"""
if self.weights_or_none() is None:
return None
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.weights(), [self.offsets()], [N], padding_value
)
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def to(self, device: torch.device, non_blocking: bool = False) -> "JaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
return JaggedTensor(
values=self._values.to(device, non_blocking=non_blocking),
weights=(
weights.to(device, non_blocking=non_blocking)
if weights is not None
else None
),
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def __str__(self) -> str:
offsets = self.offsets()
if self._weights is None:
return (
"JaggedTensor({\n "
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ "\n})\n"
)
return (
"JaggedTensor({\n"
+ ' "values": '
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ ',\n "weights": '
+ _jagged_values_string(
_get_weights_or_throw(self._weights), offsets, 0, len(offsets) - 1
)
+ "\n})\n"
)
def _maybe_compute_kjt_to_jt_dict(
stride: int,
stride_per_key: List[int],
keys: List[str],
length_per_key: List[int],
values: torch.Tensor,
lengths: torch.Tensor,
variable_stride_per_key: bool,
weights: Optional[torch.Tensor],
jt_dict: Optional[Dict[str, JaggedTensor]],
) -> Dict[str, JaggedTensor]:
if not length_per_key:
return {}
if jt_dict is None:
_jt_dict: Dict[str, JaggedTensor] = {}
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
cat_size = 0
total_size = values.size(0)
for i in length_per_key:
cat_size += i
torch._check(cat_size <= total_size)
torch._check(cat_size == total_size)
values_list = torch.split(values, length_per_key)
if variable_stride_per_key:
split_lengths = torch.split(lengths, stride_per_key)
split_offsets = [
torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
for lengths in split_lengths
]
else:
split_lengths = torch.unbind(
lengths.view(-1, stride) if lengths.numel() != 0 else lengths, dim=0
)
split_offsets = torch.unbind(
(
_batched_lengths_to_offsets(lengths.view(-1, stride))
if lengths.numel() != 0
else lengths
),
dim=0,
)
if weights is not None:
weights_list = torch.split(weights, length_per_key)
for idx, key in enumerate(keys):
length = split_lengths[idx]
offset = split_offsets[idx]
_jt_dict[key] = JaggedTensor(
lengths=length,
offsets=offset,
values=values_list[idx],
weights=weights_list[idx],
)
else:
for idx, key in enumerate(keys):
length = split_lengths[idx]
offset = split_offsets[idx]
_jt_dict[key] = JaggedTensor(
lengths=length,
offsets=offset,
values=values_list[idx],
)
jt_dict = _jt_dict
return jt_dict | null |
9,110 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def _merge_weights_or_none(
a_weights: Optional[torch.Tensor],
b_weights: Optional[torch.Tensor],
) -> Optional[torch.Tensor]:
assert not (
(a_weights is None) ^ (b_weights is None)
), "Can only merge weighted or unweighted KJTs."
if a_weights is None:
return None
# pyre-ignore[6]
return torch.cat([a_weights, b_weights], dim=0) | null |
9,111 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
def _sum_by_splits(input_list: List[int], splits: List[int]) -> List[int]:
return [
sum(input_list[sum(splits[:i]) : sum(splits[:i]) + n])
for i, n in enumerate(splits)
] | null |
9,112 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
class JaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""
Represents an (optionally weighted) jagged tensor.
A `JaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. See `KeyedJaggedTensor` for full example.
Implementation is torch.jit.script-able.
NOTE:
We will NOT do input validation as it's expensive, you should always pass in the
valid lengths, offsets, etc.
Args:
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if values have weights. Tensor with same shape
as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
"""
_fields = ["_values", "_weights", "_lengths", "_offsets"]
def __init__(
self,
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
) -> None:
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
_assert_offsets_or_lengths_is_provided(offsets, lengths)
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "JaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return JaggedTensor(
values=torch.empty(0, dtype=values_dtype, device=device),
offsets=torch.empty(0, dtype=lengths_dtype, device=device),
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
weights=weights,
)
def from_dense_lengths(
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` is still of shape (B,).
"""
mask2d = (
_arange(end=values.size(1), device=values.device).expand(values.size(0), -1)
) < lengths.unsqueeze(-1)
return JaggedTensor(
values=values[mask2d],
weights=_optional_mask(weights, mask2d),
lengths=lengths,
)
def from_dense(
values: List[torch.Tensor],
weights: Optional[List[torch.Tensor]] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` and `offsets` are still of shape (B,).
Args:
values (List[torch.Tensor]): a list of tensors for dense representation
weights (Optional[List[torch.Tensor]]): if values have weights, tensor with
the same shape as values.
Returns:
JaggedTensor: JaggedTensor created from 2D dense tensor.
Example::
values = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
weights = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
j1 = JaggedTensor.from_dense(
values=values,
weights=weights,
)
# j1 = [[1.0], [], [7.0], [8.0], [10.0, 11.0, 12.0]]
"""
values_tensor = torch.cat(values, dim=0)
lengths = torch.tensor(
[value.size(0) for value in values],
dtype=torch.int32,
device=values_tensor.device,
)
weights_tensor = torch.cat(weights, dim=0) if weights is not None else None
return JaggedTensor(
values=values_tensor,
weights=weights_tensor,
lengths=lengths,
)
def to_dense(self) -> List[torch.Tensor]:
"""
Constructs a dense-representation of the JT's values.
Returns:
List[torch.Tensor]: list of tensors.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
values_list = jt.to_dense()
# values_list = [
# torch.tensor([1.0, 2.0]),
# torch.tensor([]),
# torch.tensor([3.0]),
# torch.tensor([4.0]),
# torch.tensor([5.0]),
# torch.tensor([6.0, 7.0, 8.0]),
# ]
"""
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.values()[offset:next_offset])
return tensor_list
def to_dense_weights(self) -> Optional[List[torch.Tensor]]:
"""
Constructs a dense-representation of the JT's weights.
Returns:
Optional[List[torch.Tensor]]: list of tensors, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
weights_list = jt.to_dense_weights()
# weights_list = [
# torch.tensor([0.1, 0.2]),
# torch.tensor([]),
# torch.tensor([0.3]),
# torch.tensor([0.4]),
# torch.tensor([0.5]),
# torch.tensor([0.6, 0.7, 0.8]),
# ]
"""
if self.weights_or_none() is None:
return None
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.weights()[offset:next_offset])
return tensor_list
def to_padded_dense(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> torch.Tensor:
"""
Constructs a 2D dense tensor from the JT's values of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
torch.Tensor: 2d dense tensor.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
dt = jt.to_padded_dense(
desired_length=2,
padding_value=10.0,
)
# dt = [
# [1.0, 2.0],
# [10.0, 10.0],
# [3.0, 10.0],
# [4.0, 10.0],
# [5.0, 10.0],
# [6.0, 7.0],
# ]
"""
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.values(), [self.offsets()], [N], padding_value
)
def to_padded_dense_weights(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> Optional[torch.Tensor]:
"""
Constructs a 2D dense tensor from the JT's weights of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
Optional[torch.Tensor]: 2d dense tensor, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
d_wt = jt.to_padded_dense_weights(
desired_length=2,
padding_value=1.0,
)
# d_wt = [
# [0.1, 0.2],
# [1.0, 1.0],
# [0.3, 1.0],
# [0.4, 1.0],
# [0.5, 1.0],
# [0.6, 0.7],
# ]
"""
if self.weights_or_none() is None:
return None
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.weights(), [self.offsets()], [N], padding_value
)
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def to(self, device: torch.device, non_blocking: bool = False) -> "JaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
return JaggedTensor(
values=self._values.to(device, non_blocking=non_blocking),
weights=(
weights.to(device, non_blocking=non_blocking)
if weights is not None
else None
),
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def __str__(self) -> str:
offsets = self.offsets()
if self._weights is None:
return (
"JaggedTensor({\n "
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ "\n})\n"
)
return (
"JaggedTensor({\n"
+ ' "values": '
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ ',\n "weights": '
+ _jagged_values_string(
_get_weights_or_throw(self._weights), offsets, 0, len(offsets) - 1
)
+ "\n})\n"
)
def _force_length_offset_computation(
kjt: Union["KeyedJaggedTensor", "JaggedTensor"]
) -> None:
"""Helper function to force length/offset computation for KJT or JT
Mainly used for testing equality, as equal KJT's/JT's can be formed from just using lengths or offsets.
One can be derived from the other so to ensure properly equality checking we force the computation of
the other attribute if it can be done.
"""
offsets = kjt.offsets_or_none()
lengths = kjt.lengths_or_none()
if offsets is not None and lengths is None:
kjt.lengths()
elif lengths is not None and offsets is None:
kjt.offsets()
def _check_attributes(
attr_1: Union[torch.Tensor, List[int], List[str], int, None],
attr_2: Union[torch.Tensor, List[int], List[str], int, None],
comparison_func: Callable[[Any, Any], bool], # pyre-ignore[2]
) -> bool:
"""Helper function to check if two attributes are equal.
Args:
attr_1: The first attribute.
attr_2: The second attribute.
comparison_func (function): Function to compare the attributes.
Returns:
bool: False if the attributes are not equal or one is None while the other isn't, otherwise True.
"""
if attr_1 is not None and attr_2 is not None:
# allclose throws error for different tensor sizes, we check manually for this
if (
comparison_func == torch.allclose
and attr_1.size() != attr_2.size() # pyre-ignore[16]
):
return False
if not comparison_func(attr_1, attr_2):
return False
elif attr_1 is not None or attr_2 is not None:
return False
return True
The provided code snippet includes necessary dependencies for implementing the `jt_is_equal` function. Write a Python function `def jt_is_equal(jt_1: "JaggedTensor", jt_2: "JaggedTensor") -> bool` to solve the following problem:
This function checks if two JaggedTensors are equal by comparing their internal representations. The comparison is done by comparing the values of the internal representations themselves. For optional fields, None values are treated as equal. Args: jt_1 (JaggedTensor): the first JaggedTensor jt_2 (JaggedTensor): the second JaggedTensor Returns: bool: True if both JaggedTensors have the same values
Here is the function:
def jt_is_equal(jt_1: "JaggedTensor", jt_2: "JaggedTensor") -> bool:
"""This function checks if two JaggedTensors are equal by comparing their internal representations.
The comparison is done by comparing the values of the internal representations themselves.
For optional fields, None values are treated as equal.
Args:
jt_1 (JaggedTensor): the first JaggedTensor
jt_2 (JaggedTensor): the second JaggedTensor
Returns:
bool: True if both JaggedTensors have the same values
"""
if not isinstance(jt_1, JaggedTensor) or not isinstance(jt_2, JaggedTensor):
return False
if not _check_attributes(jt_1.values(), jt_2.values(), torch.allclose):
return False
_force_length_offset_computation(jt_1)
_force_length_offset_computation(jt_2)
attributes_to_check = [
(jt_1.weights_or_none(), jt_2.weights_or_none()),
(jt_1.lengths_or_none(), jt_2.lengths_or_none()),
(jt_1.offsets_or_none(), jt_2.offsets_or_none()),
]
for attr_1, attr_2 in attributes_to_check:
if not _check_attributes(
attr_1,
attr_2,
torch.allclose if isinstance(attr_1, torch.Tensor) else operator.eq,
):
return False
return True | This function checks if two JaggedTensors are equal by comparing their internal representations. The comparison is done by comparing the values of the internal representations themselves. For optional fields, None values are treated as equal. Args: jt_1 (JaggedTensor): the first JaggedTensor jt_2 (JaggedTensor): the second JaggedTensor Returns: bool: True if both JaggedTensors have the same values |
9,113 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def _force_length_offset_computation(
kjt: Union["KeyedJaggedTensor", "JaggedTensor"]
) -> None:
"""Helper function to force length/offset computation for KJT or JT
Mainly used for testing equality, as equal KJT's/JT's can be formed from just using lengths or offsets.
One can be derived from the other so to ensure properly equality checking we force the computation of
the other attribute if it can be done.
"""
offsets = kjt.offsets_or_none()
lengths = kjt.lengths_or_none()
if offsets is not None and lengths is None:
kjt.lengths()
elif lengths is not None and offsets is None:
kjt.offsets()
def _check_attributes(
attr_1: Union[torch.Tensor, List[int], List[str], int, None],
attr_2: Union[torch.Tensor, List[int], List[str], int, None],
comparison_func: Callable[[Any, Any], bool], # pyre-ignore[2]
) -> bool:
"""Helper function to check if two attributes are equal.
Args:
attr_1: The first attribute.
attr_2: The second attribute.
comparison_func (function): Function to compare the attributes.
Returns:
bool: False if the attributes are not equal or one is None while the other isn't, otherwise True.
"""
if attr_1 is not None and attr_2 is not None:
# allclose throws error for different tensor sizes, we check manually for this
if (
comparison_func == torch.allclose
and attr_1.size() != attr_2.size() # pyre-ignore[16]
):
return False
if not comparison_func(attr_1, attr_2):
return False
elif attr_1 is not None or attr_2 is not None:
return False
return True
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
The provided code snippet includes necessary dependencies for implementing the `kjt_is_equal` function. Write a Python function `def kjt_is_equal(kjt_1: "KeyedJaggedTensor", kjt_2: "KeyedJaggedTensor") -> bool` to solve the following problem:
This function checks if two KeyedJaggedTensors are equal by comparing their internal representations. The comparison is done by comparing the values of the internal representations themselves. For optional fields, None values are treated as equal. We compare the keys by ensuring that they have the same length and that the corresponding keys are the same order and same values. Args: kjt_1 (KeyedJaggedTensor): the first KeyedJaggedTensor kjt_2 (KeyedJaggedTensor): the second KeyedJaggedTensor Returns: bool: True if both KeyedJaggedTensors have the same values
Here is the function:
def kjt_is_equal(kjt_1: "KeyedJaggedTensor", kjt_2: "KeyedJaggedTensor") -> bool:
"""This function checks if two KeyedJaggedTensors are equal by comparing their internal representations.
The comparison is done by comparing the values of the internal representations themselves.
For optional fields, None values are treated as equal.
We compare the keys by ensuring that they have the same length and that the corresponding keys are the same order and same values.
Args:
kjt_1 (KeyedJaggedTensor): the first KeyedJaggedTensor
kjt_2 (KeyedJaggedTensor): the second KeyedJaggedTensor
Returns:
bool: True if both KeyedJaggedTensors have the same values
"""
if not isinstance(kjt_1, KeyedJaggedTensor) or not isinstance(
kjt_2, KeyedJaggedTensor
):
return False
# check for missing/extra keys
if len(kjt_1.keys()) != len(kjt_2.keys()):
return False
# check if all keys are equal and in same order
for a, b in zip(kjt_1.keys(), kjt_2.keys()):
if a != b:
return False
if not _check_attributes(kjt_1.values(), kjt_2.values(), torch.allclose):
return False
_force_length_offset_computation(kjt_1)
_force_length_offset_computation(kjt_2)
# sync length and offset per key as well
kjt_1.sync()
kjt_2.sync()
attributes_to_check = [
(kjt_1.lengths_or_none(), kjt_2.lengths_or_none()),
(kjt_1.weights_or_none(), kjt_2.weights_or_none()),
(kjt_1.offsets_or_none(), kjt_2.offsets_or_none()),
(kjt_1.length_per_key_or_none(), kjt_2.length_per_key_or_none()),
(kjt_1.offset_per_key_or_none(), kjt_2.offset_per_key_or_none()),
(kjt_1.stride(), kjt_2.stride()),
]
for attr_1, attr_2 in attributes_to_check:
if not _check_attributes(
attr_1,
attr_2,
torch.allclose if isinstance(attr_1, torch.Tensor) else operator.eq,
):
return False
return True | This function checks if two KeyedJaggedTensors are equal by comparing their internal representations. The comparison is done by comparing the values of the internal representations themselves. For optional fields, None values are treated as equal. We compare the keys by ensuring that they have the same length and that the corresponding keys are the same order and same values. Args: kjt_1 (KeyedJaggedTensor): the first KeyedJaggedTensor kjt_2 (KeyedJaggedTensor): the second KeyedJaggedTensor Returns: bool: True if both KeyedJaggedTensors have the same values |
9,114 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
def _kjt_flatten(
t: KeyedJaggedTensor,
) -> Tuple[List[Optional[torch.Tensor]], List[str]]:
return [getattr(t, a) for a in KeyedJaggedTensor._fields], t._keys
def _kjt_flatten_with_keys(
t: KeyedJaggedTensor,
) -> Tuple[List[Tuple[KeyEntry, Optional[torch.Tensor]]], List[str]]:
values, context = _kjt_flatten(t)
# pyre can't tell that GetAttrKey implements the KeyEntry protocol
return [ # pyre-ignore[7]
(GetAttrKey(k), v) for k, v in zip(KeyedJaggedTensor._fields, values)
], context | null |
9,115 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
def _kjt_flatten_spec(
t: KeyedJaggedTensor, spec: TreeSpec
) -> List[Optional[torch.Tensor]]:
return [getattr(t, a) for a in KeyedJaggedTensor._fields] | null |
9,116 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
def _kjt_flatten(
t: KeyedJaggedTensor,
) -> Tuple[List[Optional[torch.Tensor]], List[str]]:
return [getattr(t, a) for a in KeyedJaggedTensor._fields], t._keys
def flatten_kjt_list(
kjt_arr: List[KeyedJaggedTensor],
) -> Tuple[List[Optional[torch.Tensor]], List[List[str]]]:
_flattened_data = []
_flattened_context = []
for t in kjt_arr:
_values, _context = _kjt_flatten(t)
_flattened_data.extend(_values)
_flattened_context.append(_context)
return _flattened_data, _flattened_context | null |
9,117 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
def _kjt_unflatten(
values: List[Optional[torch.Tensor]], context: List[str] # context is the _keys
) -> KeyedJaggedTensor:
return KeyedJaggedTensor(context, *values)
def unflatten_kjt_list(
values: List[Optional[torch.Tensor]], contexts: List[List[str]]
) -> List[KeyedJaggedTensor]:
num_kjt_fields = len(KeyedJaggedTensor._fields)
length = len(values)
return [
_kjt_unflatten(
values[j * num_kjt_fields : (j + 1) * num_kjt_fields],
contexts[j],
)
for j in range(length // num_kjt_fields)
] | null |
9,118 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
def _cumsum(o: List[int]) -> List[int]:
ret = [0] * (len(o) + 1)
for i in range(len(o)):
ret[i + 1] = ret[i] + o[i]
return ret
def _maybe_compute_offset_per_key_kt(
length_per_key: List[int],
offset_per_key: Optional[List[int]],
) -> List[int]:
if offset_per_key is None:
offset_per_key = _cumsum(length_per_key)
return offset_per_key | null |
9,119 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
def _values_string(values: torch.Tensor, start: int, end: int) -> str:
size = values.size()
if len(size) == 1:
return "[" + ", ".join([str(value.item()) for value in values[start:end]]) + "]"
elif len(size) == 2:
values_list: List[str] = []
for value in values[start:end]:
values_list.append("[" + ", ".join([str(s.item()) for s in value]) + "]")
return "[" + ", ".join(values_list) + "]"
else:
raise ValueError(
"the values dimension is larger than 2, we don't support printing"
)
def _keyed_values_string(values: torch.Tensor) -> str:
return (
"["
+ ", ".join([_values_string(value, 0, len(value)) for value in values])
+ "]"
) | null |
9,120 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
class KeyedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""
KeyedTensor holds a concatenated list of dense tensors, each of which can be
accessed by a key.
The keyed dimension can be of variable length (length_per_key).
Common use cases uses include storage of pooled embeddings of different dimensions.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): list of keys.
length_per_key (List[int]): length of each key along key dimension.
values (torch.Tensor): dense tensor, concatenated typically along key dimension.
key_dim (int): key dimension, zero indexed - defaults to 1
(typically B is 0-dimension).
Example::
# kt is KeyedTensor holding
# 0 1 2
# "Embedding A" [1,1] [1,1] [1,1]
# "Embedding B" [2,1,2] [2,1,2] [2,1,2]
# "Embedding C" [3,1,2,3] [3,1,2,3] [3,1,2,3]
tensor_list = [
torch.tensor([[1,1]] * 3),
torch.tensor([[2,1,2]] * 3),
torch.tensor([[3,1,2,3]] * 3),
]
keys = ["Embedding A", "Embedding B", "Embedding C"]
kt = KeyedTensor.from_tensor_list(keys, tensor_list)
kt.values()
# tensor(
# [
# [1, 1, 2, 1, 2, 3, 1, 2, 3],
# [1, 1, 2, 1, 2, 3, 1, 2, 3],
# [1, 1, 2, 1, 2, 3, 1, 2, 3],
# ]
# )
kt["Embedding B"]
# tensor([[2, 1, 2], [2, 1, 2], [2, 1, 2]])
"""
def __init__(
self,
keys: List[str],
length_per_key: List[int],
values: torch.Tensor,
key_dim: int = 1,
# Below exposed to ensure torch.script-able
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
) -> None:
self._keys = keys
self._length_per_key = length_per_key
self._values = values
self._key_dim = key_dim
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
def from_tensor_list(
keys: List[str], tensors: List[torch.Tensor], key_dim: int = 1, cat_dim: int = 1
) -> "KeyedTensor":
length_per_key = [tensor.shape[key_dim] for tensor in tensors]
return KeyedTensor(
keys=keys,
length_per_key=length_per_key,
values=torch.cat(tensors, dim=cat_dim),
key_dim=key_dim,
)
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def key_dim(self) -> int:
return self._key_dim
def offset_per_key(self) -> List[int]:
_offset_per_key = _maybe_compute_offset_per_key_kt(
self._length_per_key,
self._offset_per_key,
)
self._offset_per_key = _offset_per_key
return _offset_per_key
def length_per_key(self) -> List[int]:
return self._length_per_key
def _key_indices(self) -> Dict[str, int]:
_index_per_key = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def __getitem__(self, key: str) -> torch.Tensor:
index = self._key_indices()[key]
start = self.offset_per_key()[index]
length = self._length_per_key[index]
return self._values.narrow(dim=self._key_dim, start=start, length=length)
def to_dict(self) -> Dict[str, torch.Tensor]:
indices = self._key_indices()
lengths = self._length_per_key
split_values = self._values.split(lengths, dim=self._key_dim)
return {key: split_values[index] for (key, index) in indices.items()}
def regroup(
keyed_tensors: List["KeyedTensor"], groups: List[List[str]]
) -> List[torch.Tensor]:
return _regroup_keyed_tensors(keyed_tensors, groups)
def regroup_as_dict(
keyed_tensors: List["KeyedTensor"], groups: List[List[str]], keys: List[str]
) -> Dict[str, torch.Tensor]:
assert len(groups) == len(keys), "Groups and keys should have same length"
embeddings_list = _regroup_keyed_tensors(keyed_tensors, groups)
embeddings_dict: Dict[str, torch.Tensor] = {}
for i, key in enumerate(keys):
embeddings_dict[key] = embeddings_list[i]
return embeddings_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
def to(self, device: torch.device, non_blocking: bool = False) -> "KeyedTensor":
return KeyedTensor(
keys=self._keys,
length_per_key=self._length_per_key,
values=self._values.to(device, non_blocking=non_blocking),
key_dim=self._key_dim,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
)
def __str__(self) -> str:
if len(self._keys) == 0:
return "KeyedTensor()\n"
return (
"KeyedTensor({\n"
+ ",\n".join(
[
' "{}": '.format(key) + _keyed_values_string(self[key])
for key in self._keys
]
)
+ "\n})\n"
)
def _kt_unflatten(values: List[torch.Tensor], context: List[str]) -> KeyedTensor:
return KeyedTensor(context, values[0].tolist(), values[1]) | null |
9,121 | import abc
import operator
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch.autograd.profiler import record_function
from torch.fx._pytree import register_pytree_flatten_spec, TreeSpec
from torch.utils._pytree import GetAttrKey, KeyEntry, register_pytree_node
from torchrec.streamable import Pipelineable
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
try:
from torch._dynamo import is_compiling as is_torchdynamo_compiling
except Exception:
class KeyedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""
KeyedTensor holds a concatenated list of dense tensors, each of which can be
accessed by a key.
The keyed dimension can be of variable length (length_per_key).
Common use cases uses include storage of pooled embeddings of different dimensions.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): list of keys.
length_per_key (List[int]): length of each key along key dimension.
values (torch.Tensor): dense tensor, concatenated typically along key dimension.
key_dim (int): key dimension, zero indexed - defaults to 1
(typically B is 0-dimension).
Example::
# kt is KeyedTensor holding
# 0 1 2
# "Embedding A" [1,1] [1,1] [1,1]
# "Embedding B" [2,1,2] [2,1,2] [2,1,2]
# "Embedding C" [3,1,2,3] [3,1,2,3] [3,1,2,3]
tensor_list = [
torch.tensor([[1,1]] * 3),
torch.tensor([[2,1,2]] * 3),
torch.tensor([[3,1,2,3]] * 3),
]
keys = ["Embedding A", "Embedding B", "Embedding C"]
kt = KeyedTensor.from_tensor_list(keys, tensor_list)
kt.values()
# tensor(
# [
# [1, 1, 2, 1, 2, 3, 1, 2, 3],
# [1, 1, 2, 1, 2, 3, 1, 2, 3],
# [1, 1, 2, 1, 2, 3, 1, 2, 3],
# ]
# )
kt["Embedding B"]
# tensor([[2, 1, 2], [2, 1, 2], [2, 1, 2]])
"""
def __init__(
self,
keys: List[str],
length_per_key: List[int],
values: torch.Tensor,
key_dim: int = 1,
# Below exposed to ensure torch.script-able
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
) -> None:
self._keys = keys
self._length_per_key = length_per_key
self._values = values
self._key_dim = key_dim
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
def from_tensor_list(
keys: List[str], tensors: List[torch.Tensor], key_dim: int = 1, cat_dim: int = 1
) -> "KeyedTensor":
length_per_key = [tensor.shape[key_dim] for tensor in tensors]
return KeyedTensor(
keys=keys,
length_per_key=length_per_key,
values=torch.cat(tensors, dim=cat_dim),
key_dim=key_dim,
)
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def key_dim(self) -> int:
return self._key_dim
def offset_per_key(self) -> List[int]:
_offset_per_key = _maybe_compute_offset_per_key_kt(
self._length_per_key,
self._offset_per_key,
)
self._offset_per_key = _offset_per_key
return _offset_per_key
def length_per_key(self) -> List[int]:
return self._length_per_key
def _key_indices(self) -> Dict[str, int]:
_index_per_key = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def __getitem__(self, key: str) -> torch.Tensor:
index = self._key_indices()[key]
start = self.offset_per_key()[index]
length = self._length_per_key[index]
return self._values.narrow(dim=self._key_dim, start=start, length=length)
def to_dict(self) -> Dict[str, torch.Tensor]:
indices = self._key_indices()
lengths = self._length_per_key
split_values = self._values.split(lengths, dim=self._key_dim)
return {key: split_values[index] for (key, index) in indices.items()}
def regroup(
keyed_tensors: List["KeyedTensor"], groups: List[List[str]]
) -> List[torch.Tensor]:
return _regroup_keyed_tensors(keyed_tensors, groups)
def regroup_as_dict(
keyed_tensors: List["KeyedTensor"], groups: List[List[str]], keys: List[str]
) -> Dict[str, torch.Tensor]:
assert len(groups) == len(keys), "Groups and keys should have same length"
embeddings_list = _regroup_keyed_tensors(keyed_tensors, groups)
embeddings_dict: Dict[str, torch.Tensor] = {}
for i, key in enumerate(keys):
embeddings_dict[key] = embeddings_list[i]
return embeddings_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
def to(self, device: torch.device, non_blocking: bool = False) -> "KeyedTensor":
return KeyedTensor(
keys=self._keys,
length_per_key=self._length_per_key,
values=self._values.to(device, non_blocking=non_blocking),
key_dim=self._key_dim,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
)
def __str__(self) -> str:
if len(self._keys) == 0:
return "KeyedTensor()\n"
return (
"KeyedTensor({\n"
+ ",\n".join(
[
' "{}": '.format(key) + _keyed_values_string(self[key])
for key in self._keys
]
)
+ "\n})\n"
)
def _kt_flatten(
kt: KeyedTensor,
) -> Tuple[List[torch.Tensor], List[str]]:
return [torch.tensor(kt._length_per_key, dtype=torch.int64), kt._values], kt._keys
def _kt_flatten_spec(kt: KeyedTensor, spec: TreeSpec) -> List[torch.Tensor]:
return _kt_flatten(kt)[0] | null |
9,122 | import csv
import math
import random
from dataclasses import dataclass
from functools import partial
from io import IOBase
from typing import Any, Callable, Iterable, Iterator, List, Sequence, Tuple, TypeVar
import torch
from iopath.common.file_io import PathManager, PathManagerFactory
from torch.utils.data import functional_datapipe, get_worker_info, IterDataPipe
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Pipelineable
class _IdxFilter(IterDataPipe):
def __init__(
self, datapipe: IterDataPipe, filter_fn: Callable[[int], bool]
) -> None:
super().__init__()
self.datapipe = datapipe
self.filter_fn = filter_fn
# pyre-ignore[3]
def __iter__(self) -> Iterator[Any]:
for idx, data in enumerate(self.datapipe):
if self.filter_fn(idx):
yield data
def _default_key_fn(idx: int) -> int:
return idx
def train_filter(
key_fn: Callable[[int], int],
train_perc: float,
decimal_places: int,
idx: int,
) -> bool:
return (key_fn(idx) % 10**decimal_places) < round(train_perc * 10**decimal_places)
def val_filter(
key_fn: Callable[[int], int],
train_perc: float,
decimal_places: int,
idx: int,
) -> bool:
return not train_filter(key_fn, train_perc, decimal_places, idx)
def idx_split_train_val(
datapipe: IterDataPipe,
train_perc: float,
decimal_places: int = 2,
key_fn: Callable[[int], int] = _default_key_fn,
) -> Tuple[IterDataPipe, IterDataPipe]:
if not 0.0 < train_perc < 1.0:
raise ValueError("train_perc must be in range (0.0, 1.0)")
return (
_IdxFilter(datapipe, partial(train_filter, key_fn, train_perc, decimal_places)),
_IdxFilter(datapipe, partial(val_filter, key_fn, train_perc, decimal_places)),
) | null |
9,123 | import csv
import math
import random
from dataclasses import dataclass
from functools import partial
from io import IOBase
from typing import Any, Callable, Iterable, Iterator, List, Sequence, Tuple, TypeVar
import torch
from iopath.common.file_io import PathManager, PathManagerFactory
from torch.utils.data import functional_datapipe, get_worker_info, IterDataPipe
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Pipelineable
class _RandFilter(IterDataPipe):
def __init__(
self,
datapipe: IterDataPipe,
filter_fn: Callable[[random.Random], bool],
rand_gen: random.Random,
) -> None:
super().__init__()
self.datapipe = datapipe
self.filter_fn = filter_fn
self.rand_gen = rand_gen
# pyre-ignore[4]
self.rand_gen_init_state: Tuple[Any, ...] = rand_gen.getstate()
# pyre-ignore[3]
def __iter__(self) -> Iterator[Any]:
self.rand_gen.setstate(self.rand_gen_init_state)
for data in self.datapipe:
if self.filter_fn(self.rand_gen):
yield data
def _rand_train_filter_fn(
train_perc: float,
rand_gen: random.Random,
) -> bool:
return rand_gen.random() < train_perc
def _rand_val_filter_fn(train_perc: float, rand_gen: random.Random) -> bool:
return not _rand_train_filter_fn(train_perc, rand_gen)
The provided code snippet includes necessary dependencies for implementing the `rand_split_train_val` function. Write a Python function `def rand_split_train_val( datapipe: IterDataPipe, train_perc: float, random_seed: int = 0, ) -> Tuple[IterDataPipe, IterDataPipe]` to solve the following problem:
Via uniform random sampling, generates two IterDataPipe instances representing disjoint train and val splits of the given IterDataPipe. Args: datapipe (IterDataPipe): datapipe to split. train_perc (float): value in range (0.0, 1.0) specifying target proportion of datapipe samples to include in train split. Note that the actual proportion is not guaranteed to match train_perc exactly. random_seed (int): determines split membership for a given sample and train_perc. Use the same value across calls to generate consistent splits. Example:: datapipe = criteo_terabyte( ("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv") ) train_datapipe, val_datapipe = rand_split_train_val(datapipe, 0.75) train_batch = next(iter(train_datapipe)) val_batch = next(iter(val_datapipe))
Here is the function:
def rand_split_train_val(
datapipe: IterDataPipe,
train_perc: float,
random_seed: int = 0,
) -> Tuple[IterDataPipe, IterDataPipe]:
"""Via uniform random sampling, generates two IterDataPipe instances representing
disjoint train and val splits of the given IterDataPipe.
Args:
datapipe (IterDataPipe): datapipe to split.
train_perc (float): value in range (0.0, 1.0) specifying target proportion of
datapipe samples to include in train split. Note that the actual proportion
is not guaranteed to match train_perc exactly.
random_seed (int): determines split membership for a given sample
and train_perc. Use the same value across calls to generate consistent splits.
Example::
datapipe = criteo_terabyte(
("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
)
train_datapipe, val_datapipe = rand_split_train_val(datapipe, 0.75)
train_batch = next(iter(train_datapipe))
val_batch = next(iter(val_datapipe))
"""
if not 0.0 < train_perc < 1.0:
raise ValueError("train_perc must be in range (0.0, 1.0)")
return _RandFilter(
datapipe, partial(_rand_train_filter_fn, train_perc), random.Random(random_seed)
), _RandFilter(
datapipe, partial(_rand_val_filter_fn, train_perc), random.Random(random_seed)
) | Via uniform random sampling, generates two IterDataPipe instances representing disjoint train and val splits of the given IterDataPipe. Args: datapipe (IterDataPipe): datapipe to split. train_perc (float): value in range (0.0, 1.0) specifying target proportion of datapipe samples to include in train split. Note that the actual proportion is not guaranteed to match train_perc exactly. random_seed (int): determines split membership for a given sample and train_perc. Use the same value across calls to generate consistent splits. Example:: datapipe = criteo_terabyte( ("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv") ) train_datapipe, val_datapipe = rand_split_train_val(datapipe, 0.75) train_batch = next(iter(train_datapipe)) val_batch = next(iter(val_datapipe)) |
9,124 | import csv
import math
import random
from dataclasses import dataclass
from functools import partial
from io import IOBase
from typing import Any, Callable, Iterable, Iterator, List, Sequence, Tuple, TypeVar
import torch
from iopath.common.file_io import PathManager, PathManagerFactory
from torch.utils.data import functional_datapipe, get_worker_info, IterDataPipe
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Pipelineable
T = TypeVar("T")
def safe_cast(val: T, dest_type: Callable[[T], T], default: T) -> T:
try:
return dest_type(val)
except ValueError:
return default | null |
9,125 | import csv
import math
import random
from dataclasses import dataclass
from functools import partial
from io import IOBase
from typing import Any, Callable, Iterable, Iterator, List, Sequence, Tuple, TypeVar
import torch
from iopath.common.file_io import PathManager, PathManagerFactory
from torch.utils.data import functional_datapipe, get_worker_info, IterDataPipe
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
from torchrec.streamable import Pipelineable
def _default_dp_selector(
datapipes: Sequence[IterDataPipe],
) -> Sequence[IterDataPipe]:
worker_info = get_worker_info()
if worker_info is None:
return datapipes
else:
if worker_info.num_workers > len(datapipes):
raise ValueError(
f"Number of workers {worker_info.num_workers} exceeds"
f"number of datapipes ({len(datapipes)})!"
)
offsets = [0]
for num_workers in reversed(range(1, worker_info.num_workers + 1)):
remaining_dps = len(datapipes) - offsets[-1]
dps_to_assign = math.ceil(remaining_dps / num_workers)
offsets.append(offsets[-1] + dps_to_assign)
return datapipes[offsets[worker_info.id] : offsets[worker_info.id + 1]] | null |
9,126 | import os
import shutil
import time
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.data.datapipes as dp
from iopath.common.file_io import PathManager, PathManagerFactory
from pyre_extensions import none_throws
from torch.utils.data import IterableDataset, IterDataPipe
from torchrec.datasets.utils import (
Batch,
LoadFiles,
PATH_MANAGER_KEY,
ReadLinesFromCSV,
safe_cast,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
def _default_row_mapper(example: List[str]) -> Dict[str, Union[int, str]]:
column_names = reversed(DEFAULT_COLUMN_NAMES)
column_type_casters = reversed(COLUMN_TYPE_CASTERS)
return {
next(column_names): next(column_type_casters)(val) for val in reversed(example)
}
class CriteoIterDataPipe(IterDataPipe):
"""
IterDataPipe that can be used to stream either the Criteo 1TB Click Logs Dataset
(https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/) or the
Kaggle/Criteo Display Advertising Dataset
(https://www.kaggle.com/c/criteo-display-ad-challenge/) from the source TSV
files.
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example::
datapipe = CriteoIterDataPipe(
("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
)
datapipe = dp.iter.Batcher(datapipe, 100)
datapipe = dp.iter.Collator(datapipe)
batch = next(iter(datapipe))
"""
def __init__(
self,
paths: Iterable[str],
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> None:
self.paths = paths
self.row_mapper = row_mapper
self.open_kw: Any = open_kw # pyre-ignore[4]
# pyre-ignore[3]
def __iter__(self) -> Iterator[Any]:
worker_info = torch.utils.data.get_worker_info()
paths = self.paths
if worker_info is not None:
paths = (
path
for (idx, path) in enumerate(paths)
if idx % worker_info.num_workers == worker_info.id
)
datapipe = LoadFiles(paths, mode="r", **self.open_kw)
datapipe = ReadLinesFromCSV(datapipe, delimiter="\t")
if self.row_mapper:
datapipe = dp.iter.Mapper(datapipe, self.row_mapper)
yield from datapipe
The provided code snippet includes necessary dependencies for implementing the `criteo_terabyte` function. Write a Python function `def criteo_terabyte( paths: Iterable[str], *, # pyre-ignore[2] row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper, # pyre-ignore[2] **open_kw, ) -> IterDataPipe` to solve the following problem:
`Criteo 1TB Click Logs <https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/>`_ Dataset Args: paths (Iterable[str]): local paths to TSV files that constitute the Criteo 1TB dataset. row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split TSV line. open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open. Example:: datapipe = criteo_terabyte( ("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv") ) datapipe = dp.iter.Batcher(datapipe, 100) datapipe = dp.iter.Collator(datapipe) batch = next(iter(datapipe))
Here is the function:
def criteo_terabyte(
paths: Iterable[str],
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> IterDataPipe:
"""`Criteo 1TB Click Logs <https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/>`_ Dataset
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo 1TB
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example::
datapipe = criteo_terabyte(
("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
)
datapipe = dp.iter.Batcher(datapipe, 100)
datapipe = dp.iter.Collator(datapipe)
batch = next(iter(datapipe))
"""
return CriteoIterDataPipe(paths, row_mapper=row_mapper, **open_kw) | `Criteo 1TB Click Logs <https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/>`_ Dataset Args: paths (Iterable[str]): local paths to TSV files that constitute the Criteo 1TB dataset. row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split TSV line. open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open. Example:: datapipe = criteo_terabyte( ("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv") ) datapipe = dp.iter.Batcher(datapipe, 100) datapipe = dp.iter.Collator(datapipe) batch = next(iter(datapipe)) |
9,127 | import os
import shutil
import time
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.data.datapipes as dp
from iopath.common.file_io import PathManager, PathManagerFactory
from pyre_extensions import none_throws
from torch.utils.data import IterableDataset, IterDataPipe
from torchrec.datasets.utils import (
Batch,
LoadFiles,
PATH_MANAGER_KEY,
ReadLinesFromCSV,
safe_cast,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
def _default_row_mapper(example: List[str]) -> Dict[str, Union[int, str]]:
column_names = reversed(DEFAULT_COLUMN_NAMES)
column_type_casters = reversed(COLUMN_TYPE_CASTERS)
return {
next(column_names): next(column_type_casters)(val) for val in reversed(example)
}
class CriteoIterDataPipe(IterDataPipe):
"""
IterDataPipe that can be used to stream either the Criteo 1TB Click Logs Dataset
(https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/) or the
Kaggle/Criteo Display Advertising Dataset
(https://www.kaggle.com/c/criteo-display-ad-challenge/) from the source TSV
files.
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example::
datapipe = CriteoIterDataPipe(
("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
)
datapipe = dp.iter.Batcher(datapipe, 100)
datapipe = dp.iter.Collator(datapipe)
batch = next(iter(datapipe))
"""
def __init__(
self,
paths: Iterable[str],
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> None:
self.paths = paths
self.row_mapper = row_mapper
self.open_kw: Any = open_kw # pyre-ignore[4]
# pyre-ignore[3]
def __iter__(self) -> Iterator[Any]:
worker_info = torch.utils.data.get_worker_info()
paths = self.paths
if worker_info is not None:
paths = (
path
for (idx, path) in enumerate(paths)
if idx % worker_info.num_workers == worker_info.id
)
datapipe = LoadFiles(paths, mode="r", **self.open_kw)
datapipe = ReadLinesFromCSV(datapipe, delimiter="\t")
if self.row_mapper:
datapipe = dp.iter.Mapper(datapipe, self.row_mapper)
yield from datapipe
The provided code snippet includes necessary dependencies for implementing the `criteo_kaggle` function. Write a Python function `def criteo_kaggle( path: str, *, # pyre-ignore[2] row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper, # pyre-ignore[2] **open_kw, ) -> IterDataPipe` to solve the following problem:
`Kaggle/Criteo Display Advertising <https://www.kaggle.com/c/criteo-display-ad-challenge/>`_ Dataset Args: path (str): local path to train or test dataset file. row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split TSV line. open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open. Example:: train_datapipe = criteo_kaggle( "/home/datasets/criteo_kaggle/train.txt", ) example = next(iter(train_datapipe)) test_datapipe = criteo_kaggle( "/home/datasets/criteo_kaggle/test.txt", ) example = next(iter(test_datapipe))
Here is the function:
def criteo_kaggle(
path: str,
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> IterDataPipe:
"""`Kaggle/Criteo Display Advertising <https://www.kaggle.com/c/criteo-display-ad-challenge/>`_ Dataset
Args:
path (str): local path to train or test dataset file.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split TSV line.
open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open.
Example::
train_datapipe = criteo_kaggle(
"/home/datasets/criteo_kaggle/train.txt",
)
example = next(iter(train_datapipe))
test_datapipe = criteo_kaggle(
"/home/datasets/criteo_kaggle/test.txt",
)
example = next(iter(test_datapipe))
"""
return CriteoIterDataPipe((path,), row_mapper=row_mapper, **open_kw) | `Kaggle/Criteo Display Advertising <https://www.kaggle.com/c/criteo-display-ad-challenge/>`_ Dataset Args: path (str): local path to train or test dataset file. row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split TSV line. open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open. Example:: train_datapipe = criteo_kaggle( "/home/datasets/criteo_kaggle/train.txt", ) example = next(iter(train_datapipe)) test_datapipe = criteo_kaggle( "/home/datasets/criteo_kaggle/test.txt", ) example = next(iter(test_datapipe)) |
9,128 | import argparse
import os
import sys
from multiprocessing import Manager, Process
from typing import List
import numpy as np
from torchrec.datasets.criteo import BinaryCriteoUtils
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Shuffle preprocessed npy dataset.")
parser.add_argument(
"--input_dir_labels_and_dense",
type=str,
required=True,
help="Input directory containing labels and dense features.",
)
parser.add_argument(
"--input_dir_sparse",
type=str,
required=True,
help="Input directory with sparse features. Sometimes these"
" features can be stored in a separate directory from the"
" labels and dense features as extra pre-processing was"
" applied to them.",
)
parser.add_argument(
"--output_dir_full_set",
type=str,
default=None,
help="If specified, store the full dataset (unshuffled).",
)
parser.add_argument(
"--output_dir_shuffled",
type=str,
required=True,
help="Output directory to store split shuffled npy files.",
)
parser.add_argument(
"--random_seed",
type=int,
default=0,
help="random seed for the dataset shuffle",
)
return parser.parse_args(argv) | null |
9,129 | import argparse
import os
import sys
from multiprocessing import Manager, Process
from typing import List
import numpy as np
from torchrec.datasets.criteo import BinaryCriteoUtils
def count_rows(rows_per_file, path, day):
day_file = os.path.join(path, f"day_{day}_labels.npy")
data = np.load(day_file)
num_rows = data.shape[0]
rows_per_file[day] = num_rows
print(f"counted {num_rows} for {day_file}") | null |
9,130 | import argparse
import math
import os
import time
from typing import Sequence
import numpy as np
from tqdm import tqdm
from utils.criteo_constant import (
CAT_FEATURE_COUNT,
DEFAULT_INT_NAMES,
NUM_EMBEDDINGS_PER_FEATURE,
)
def split_binary_file(
binary_file_path: str,
output_dir: str,
categorical_feature_sizes: Sequence[int],
batch_size: int,
source_data_type: str = "int32",
):
record_width = (
1 + len(DEFAULT_INT_NAMES) + len(categorical_feature_sizes)
) # label + numerical + categorical
bytes_per_feature = np.__dict__[source_data_type]().nbytes
bytes_per_entry = record_width * bytes_per_feature
total_size = os.path.getsize(binary_file_path)
batches_num = int(math.ceil((total_size // bytes_per_entry) / batch_size))
file_streams = []
try:
input_data_f = open(binary_file_path, "rb") # noqa: P201
file_streams.append(input_data_f)
numerical_f = open( # noqa: P201
os.path.join(output_dir, "numerical.bin"), "wb+"
)
file_streams.append(numerical_f)
label_f = open(os.path.join(output_dir, "label.bin"), "wb+") # noqa: P201
file_streams.append(label_f)
categorical_fs = []
for i in range(len(categorical_feature_sizes)):
fs = open(os.path.join(output_dir, f"cat_{i}.bin"), "wb+") # noqa: P201
categorical_fs.append(fs)
file_streams.append(fs)
for _ in tqdm(range(batches_num)):
raw_data = np.frombuffer(
input_data_f.read(bytes_per_entry * batch_size), dtype=np.int32
)
batch_data = raw_data.reshape(-1, record_width)
numerical_features = batch_data[:, 1 : 1 + len(DEFAULT_INT_NAMES)].view(
dtype=np.float32
)
numerical_f.write(numerical_features.tobytes())
label = batch_data[:, 0]
label_f.write(label.astype(np.float32).tobytes())
cat_offset = len(DEFAULT_INT_NAMES) + 1
for cat_idx in range(CAT_FEATURE_COUNT):
cat_data = batch_data[
:, (cat_idx + cat_offset) : (cat_idx + cat_offset + 1)
].astype(np.int32)
categorical_fs[cat_idx].write(cat_data.tobytes())
finally:
for stream in file_streams:
stream.close()
NUM_EMBEDDINGS_PER_FEATURE = [
40000000,
39060,
17295,
7424,
20265,
3,
7122,
1543,
63,
40000000,
3067956,
405282,
10,
2209,
11938,
155,
4,
976,
14,
40000000,
40000000,
40000000,
590152,
12973,
108,
36,
]
def split_dataset(dataset_dir: str, output_dir: str, batch_size: int):
train_file = os.path.join(dataset_dir, "train_data.bin")
test_file = os.path.join(dataset_dir, "test_data.bin")
val_file = os.path.join(dataset_dir, "validation_data.bin")
target_train = os.path.join(output_dir, "train")
target_test = os.path.join(output_dir, "test")
target_val = os.path.join(output_dir, "validation")
os.makedirs(output_dir, exist_ok=True)
os.makedirs(target_train, exist_ok=True)
os.makedirs(target_test, exist_ok=True)
os.makedirs(target_val, exist_ok=True)
split_binary_file(
test_file,
target_test,
NUM_EMBEDDINGS_PER_FEATURE,
batch_size,
)
split_binary_file(
train_file,
target_train,
NUM_EMBEDDINGS_PER_FEATURE,
batch_size,
)
split_binary_file(val_file, target_val, NUM_EMBEDDINGS_PER_FEATURE, batch_size) | null |
9,131 | import argparse
import os
import shutil
import subprocess
import time
import numpy as np
import nvtabular as nvt
from utils.criteo_constant import DAYS, DEFAULT_COLUMN_NAMES, DEFAULT_LABEL_NAME
from utils.dask import setup_dask
dtypes = {c: np.int32 for c in DEFAULT_COLUMN_NAMES[:14] + [DEFAULT_LABEL_NAME]}
dtypes.update({c: "hex" for c in DEFAULT_COLUMN_NAMES[14:]})
DAYS = 24
DEFAULT_COLUMN_NAMES: List[str] = [
DEFAULT_LABEL_NAME,
*DEFAULT_INT_NAMES,
*DEFAULT_CAT_NAMES,
]
def convert_tsv_to_parquet(input_path: str, output_base_path: str):
config = {
"engine": "csv",
"names": DEFAULT_COLUMN_NAMES,
"sep": "\t",
"dtypes": dtypes,
"part_size": "128MB",
}
output_path = os.path.join(output_base_path, "criteo_parquet")
if os.path.exists(output_path):
shutil.rmtree(output_path)
os.makedirs(output_path)
# split last day into two parts
number_of_lines = int(
subprocess.check_output(
(f'wc -l {os.path.join(input_path, "day_23")}').split()
).split()[0]
)
valid_set_size = number_of_lines // 2
test_set_size = number_of_lines - valid_set_size
start_time = time.time()
with open(os.path.join(input_path, "day_23.part0"), "w") as f:
subprocess.run(
["head", "-n", str(test_set_size), str(os.path.join(input_path, "day_23"))],
stdout=f,
)
with open(os.path.join(input_path, "day_23.part1"), "w") as f:
subprocess.run(
[
"tail",
"-n",
str(valid_set_size),
str(os.path.join(input_path, "day_23")),
],
stdout=f,
)
print(f"finished splitting the last day, took {time.time() - start_time}")
input_paths = [
os.path.join(input_path, f"day_{day}") for day in range(DAYS - 1)
] + [os.path.join(input_path, f"day_23.part{i}") for i in range(2)]
print(f"handling the input paths: {input_paths}")
tsv_dataset = nvt.Dataset(input_paths, **config)
print("finished loading the tsv dataset")
tsv_dataset.to_parquet(
output_path,
preserve_files=True,
) | null |
9,132 | import argparse
import os
import shutil
import subprocess
import time
import numpy as np
import nvtabular as nvt
from utils.criteo_constant import DAYS, DEFAULT_COLUMN_NAMES, DEFAULT_LABEL_NAME
from utils.dask import setup_dask
def parse_args():
parser = argparse.ArgumentParser(description="Convert criteo tsv to parquet")
parser.add_argument(
"--input_path", "-i", dest="input_path", help="Input path containing tsv files"
)
parser.add_argument(
"--output_base_path", "-o", dest="output_base_path", help="Output base path"
)
args = parser.parse_args()
return args | null |
9,133 | import argparse
import os
import shutil
import time
import numpy as np
import nvtabular as nvt
from merlin.io import Shuffle
from utils.criteo_constant import (
DAYS,
DEFAULT_CAT_NAMES,
DEFAULT_COLUMN_NAMES,
DEFAULT_INT_NAMES,
DEFAULT_LABEL_NAME,
NUM_EMBEDDINGS_PER_FEATURE_DICT,
)
from utils.dask import setup_dask
def parse_args():
parser = argparse.ArgumentParser(description="Preprocess criteo dataset")
parser.add_argument("--base_path", "-b", dest="base_path", help="Base path")
parser.add_argument(
"--shuffle_train",
"-s",
dest="shuffle_train",
default=False,
action="store_true",
help="Base path",
)
args = parser.parse_args()
return args | null |
9,134 | import argparse
import glob
import os
import time
import numpy as np
import pandas as pd
import tqdm
from joblib import delayed, Parallel
from utils.criteo_constant import (
DEFAULT_CAT_NAMES,
DEFAULT_COLUMN_NAMES,
DEFAULT_INT_NAMES,
DEFAULT_LABEL_NAME,
)
DEFAULT_LABEL_NAME = "label"
DEFAULT_INT_NAMES: List[str] = [f"int_{idx}" for idx in range(INT_FEATURE_COUNT)]
DEFAULT_CAT_NAMES: List[str] = [f"cat_{idx}" for idx in range(CAT_FEATURE_COUNT)]
DEFAULT_COLUMN_NAMES: List[str] = [
DEFAULT_LABEL_NAME,
*DEFAULT_INT_NAMES,
*DEFAULT_CAT_NAMES,
]
def process_file(f, dst):
data = pd.read_parquet(f)
data = data[DEFAULT_COLUMN_NAMES]
data[DEFAULT_LABEL_NAME] = data[DEFAULT_LABEL_NAME].astype(np.int32)
data[DEFAULT_INT_NAMES] = data[DEFAULT_INT_NAMES].astype(np.float32)
data[DEFAULT_CAT_NAMES] = data[DEFAULT_CAT_NAMES].astype(np.int32)
data = data.to_records(index=False)
data = data.tobytes()
dst_file = dst + "/" + f.split("/")[-1] + ".bin"
with open(dst_file, "wb") as dst_fd:
dst_fd.write(data) | null |
9,135 | import os
import shutil
import numba
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from nvtabular.utils import device_mem_size
def setup_dask(dask_workdir):
if os.path.exists(dask_workdir):
shutil.rmtree(dask_workdir)
os.makedirs(dask_workdir)
device_limit_frac = 0.8 # Spill GPU-Worker memory to host at this limit.
device_pool_frac = 0.7
# Use total device size to calculate device limit and pool_size
device_size = device_mem_size(kind="total")
device_limit = int(device_limit_frac * device_size)
device_pool_size = int(device_pool_frac * device_size)
cluster = LocalCUDACluster(
protocol="tcp",
n_workers=len(numba.cuda.gpus),
CUDA_VISIBLE_DEVICES=range(len(numba.cuda.gpus)),
device_memory_limit=device_limit,
local_directory=dask_workdir,
rmm_pool_size=(device_pool_size // 256) * 256,
)
return Client(cluster) | null |
9,136 | import argparse
import os
import sys
from typing import List
from torchrec.datasets.criteo import BinaryCriteoUtils
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Criteo sparse -> contiguous preprocessing script. "
)
parser.add_argument(
"--input_dir",
type=str,
required=True,
help="Input directory containing the sparse features in numpy format (.npy). Files in the directory "
"should be named day_{0-23}_sparse.npy.",
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Output directory to store npy files.",
)
parser.add_argument(
"--frequency_threshold",
type=int,
default=0,
help="IDs occuring less than this frequency will be remapped to an index of 1. If this value is not set (e.g. 0), no frequency thresholding will be applied.",
)
return parser.parse_args(argv) | null |
9,137 | import argparse
import os
import sys
from typing import List
from torchrec.datasets.criteo import BinaryCriteoUtils
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Criteo tsv -> npy preprocessing script."
)
parser.add_argument(
"--input_dir",
type=str,
required=True,
help="Input directory containing Criteo tsv files."
"For criteo_1tb, files in the directory should be named day_{0-23}."
"For criteo_kaggle, files in the directory should be train.txt & test.txt.",
)
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Output directory to store npy files.",
)
parser.add_argument(
"--dataset_name",
type=str,
choices=["criteo_1tb", "criteo_kaggle"],
default="criteo_1tb",
help="dataset for experiment, current support criteo_1tb, criteo_kaggle",
)
return parser.parse_args(argv) | null |
9,138 | import os
from typing import Any, Callable, Dict, List, Optional, Union
from torch.utils.data import IterDataPipe
from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV, safe_cast
def _default_row_mapper(example: List[str]) -> Dict[str, Union[float, int, str]]:
return {
DEFAULT_COLUMN_NAMES[idx]: COLUMN_TYPE_CASTERS[idx](val)
for idx, val in enumerate(example)
}
def _movielens(
root: str,
*,
include_movies_data: bool = False,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> IterDataPipe:
ratings_path = os.path.join(root, RATINGS_FILENAME)
datapipe = LoadFiles((ratings_path,), mode="r", **open_kw)
datapipe = ReadLinesFromCSV(datapipe, skip_first_line=True, delimiter=",")
if include_movies_data:
datapipe = _join_with_movies(datapipe, root)
if row_mapper:
datapipe = datapipe.map(row_mapper)
return datapipe
The provided code snippet includes necessary dependencies for implementing the `movielens_20m` function. Write a Python function `def movielens_20m( root: str, *, include_movies_data: bool = False, # pyre-ignore[2] row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper, # pyre-ignore[2] **open_kw, ) -> IterDataPipe` to solve the following problem:
`MovieLens 20M <https://grouplens.org/datasets/movielens/20m/>`_ Dataset Args: root (str): local path to root directory containing MovieLens 20M dataset files. include_movies_data (bool): if True, adds movies data to each line. row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split line. open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open. Example:: datapipe = movielens_20m("/home/datasets/ml-20") datapipe = dp.iter.Batch(datapipe, 100) datapipe = dp.iter.Collate(datapipe) batch = next(iter(datapipe))
Here is the function:
def movielens_20m(
root: str,
*,
include_movies_data: bool = False,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> IterDataPipe:
"""`MovieLens 20M <https://grouplens.org/datasets/movielens/20m/>`_ Dataset
Args:
root (str): local path to root directory containing MovieLens 20M dataset files.
include_movies_data (bool): if True, adds movies data to each line.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split line.
open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open.
Example::
datapipe = movielens_20m("/home/datasets/ml-20")
datapipe = dp.iter.Batch(datapipe, 100)
datapipe = dp.iter.Collate(datapipe)
batch = next(iter(datapipe))
"""
return _movielens(
root,
include_movies_data=include_movies_data,
row_mapper=row_mapper,
**open_kw,
) | `MovieLens 20M <https://grouplens.org/datasets/movielens/20m/>`_ Dataset Args: root (str): local path to root directory containing MovieLens 20M dataset files. include_movies_data (bool): if True, adds movies data to each line. row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split line. open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open. Example:: datapipe = movielens_20m("/home/datasets/ml-20") datapipe = dp.iter.Batch(datapipe, 100) datapipe = dp.iter.Collate(datapipe) batch = next(iter(datapipe)) |
9,139 | import os
from typing import Any, Callable, Dict, List, Optional, Union
from torch.utils.data import IterDataPipe
from torchrec.datasets.utils import LoadFiles, ReadLinesFromCSV, safe_cast
def _default_row_mapper(example: List[str]) -> Dict[str, Union[float, int, str]]:
return {
DEFAULT_COLUMN_NAMES[idx]: COLUMN_TYPE_CASTERS[idx](val)
for idx, val in enumerate(example)
}
def _movielens(
root: str,
*,
include_movies_data: bool = False,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> IterDataPipe:
ratings_path = os.path.join(root, RATINGS_FILENAME)
datapipe = LoadFiles((ratings_path,), mode="r", **open_kw)
datapipe = ReadLinesFromCSV(datapipe, skip_first_line=True, delimiter=",")
if include_movies_data:
datapipe = _join_with_movies(datapipe, root)
if row_mapper:
datapipe = datapipe.map(row_mapper)
return datapipe
The provided code snippet includes necessary dependencies for implementing the `movielens_25m` function. Write a Python function `def movielens_25m( root: str, *, include_movies_data: bool = False, # pyre-ignore[2] row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper, # pyre-ignore[2] **open_kw, ) -> IterDataPipe` to solve the following problem:
`MovieLens 25M <https://grouplens.org/datasets/movielens/25m/>`_ Dataset Args: root (str): local path to root directory containing MovieLens 25M dataset files. include_movies_data (bool): if True, adds movies data to each line. row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split line. open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open. Example:: datapipe = movielens_25m("/home/datasets/ml-25") datapipe = dp.iter.Batch(datapipe, 100) datapipe = dp.iter.Collate(datapipe) batch = next(iter(datapipe))
Here is the function:
def movielens_25m(
root: str,
*,
include_movies_data: bool = False,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> IterDataPipe:
"""`MovieLens 25M <https://grouplens.org/datasets/movielens/25m/>`_ Dataset
Args:
root (str): local path to root directory containing MovieLens 25M dataset files.
include_movies_data (bool): if True, adds movies data to each line.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split line.
open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open.
Example::
datapipe = movielens_25m("/home/datasets/ml-25")
datapipe = dp.iter.Batch(datapipe, 100)
datapipe = dp.iter.Collate(datapipe)
batch = next(iter(datapipe))
"""
return _movielens(
root,
include_movies_data=include_movies_data,
row_mapper=row_mapper,
**open_kw,
) | `MovieLens 25M <https://grouplens.org/datasets/movielens/25m/>`_ Dataset Args: root (str): local path to root directory containing MovieLens 25M dataset files. include_movies_data (bool): if True, adds movies data to each line. row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split line. open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open. Example:: datapipe = movielens_25m("/home/datasets/ml-25") datapipe = dp.iter.Batch(datapipe, 100) datapipe = dp.iter.Collate(datapipe) batch = next(iter(datapipe)) |
9,140 | from typing import Any, Dict, Iterable, List
import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer
def _single_tensor_adagrad(
params: List[Tensor],
grads: List[Tensor],
state_sums: List[Tensor],
state_steps: List[Tensor],
*,
lr: float,
weight_decay: float,
lr_decay: float,
eps: float,
maximize: bool,
) -> None:
for param, grad, state_sum, step_t in zip(params, grads, state_sums, state_steps):
if grad.is_sparse:
raise RuntimeError("RowWise adagrad cannot be used with sparse gradients")
# update step
step_t += 1
step = step_t.item()
grad = grad if not maximize else -grad
row_wise_grad = grad.mean(axis=1).view(-1, 1)
if weight_decay != 0:
grad = grad.add(param, alpha=weight_decay)
row_wise_grad = grad.add(param, alpha=weight_decay)
clr = lr / (1 + (step - 1) * lr_decay)
state_sum.addcmul_(row_wise_grad, row_wise_grad, value=1)
std = state_sum.sqrt().add_(eps)
param.addcdiv_(row_wise_grad, std, value=-clr)
The provided code snippet includes necessary dependencies for implementing the `adagrad` function. Write a Python function `def adagrad( params: List[Tensor], grads: List[Tensor], state_sums: List[Tensor], state_steps: List[Tensor], # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 # setting these as kwargs for now as functional API is compiled by torch/distributed/optim *, lr: float, weight_decay: float, lr_decay: float, eps: float, maximize: bool, ) -> None` to solve the following problem:
r"""Functional API that performs Adagrad algorithm computation. See :class:`~torch.optim.Adagrad` for details.
Here is the function:
def adagrad(
params: List[Tensor],
grads: List[Tensor],
state_sums: List[Tensor],
state_steps: List[Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting these as kwargs for now as functional API is compiled by torch/distributed/optim
*,
lr: float,
weight_decay: float,
lr_decay: float,
eps: float,
maximize: bool,
) -> None:
r"""Functional API that performs Adagrad algorithm computation.
See :class:`~torch.optim.Adagrad` for details.
"""
if not all(isinstance(t, torch.Tensor) for t in state_steps):
raise RuntimeError(
"API has changed, `state_steps` argument must contain a list of singleton tensors"
)
_single_tensor_adagrad(
params,
grads,
state_sums,
state_steps,
lr=lr,
weight_decay=weight_decay,
lr_decay=lr_decay,
eps=eps,
maximize=maximize,
) | r"""Functional API that performs Adagrad algorithm computation. See :class:`~torch.optim.Adagrad` for details. |
9,141 | import logging
import math
from dataclasses import dataclass
from enum import Enum, unique
from typing import Any, List, Tuple
import torch
from torchrec.optim.keyed import KeyedOptimizer, OptimizerWrapper
class WarmupPolicy(Enum):
NONE = "none"
LINEAR = "linear"
CONSTANT = "constant"
POLY = "poly"
STEP = "step"
INVSQRT = "inv_sqrt" # inverse square root
class WarmupStage:
policy: WarmupPolicy = WarmupPolicy.LINEAR
max_iters: int = 1
value: float = 1.0
lr_scale: float = 1.0
# used as number denominator for iters in poly decay
# default to max_iters if not set to value > 0
# also used as stepsize in step decay
# default to 1 if not set to value > 0
decay_iters: int = -1
def _lr_stages(stages: List[WarmupStage]) -> List[WarmupStage]:
last_stage = WarmupStage(policy=WarmupPolicy.NONE, max_iters=1 << 63, value=1.0)
if len(stages) == 0:
return [last_stage]
start_iter = 0
for stage in stages:
assert stage.max_iters > start_iter, (
f"Max iter of the stage {stage} must be greater than the previous "
f"max iter {start_iter}"
)
start_iter = stage.max_iters
if stage.decay_iters <= 0:
if stage.policy == WarmupPolicy.STEP:
stage.decay_iters = 1
else:
stage.decay_iters = stage.max_iters
return stages + [last_stage] | null |
9,142 | import logging
import math
from dataclasses import dataclass
from enum import Enum, unique
from typing import Any, List, Tuple
import torch
from torchrec.optim.keyed import KeyedOptimizer, OptimizerWrapper
class WarmupPolicy(Enum):
class WarmupStage:
def _get_multiplier(stage: WarmupStage, iter: int) -> float:
multiplier = 1.0
if stage.policy == WarmupPolicy.LINEAR:
multiplier = stage.value + (1.0 - stage.value) * iter / stage.max_iters
elif stage.policy == WarmupPolicy.CONSTANT:
multiplier = stage.value
elif stage.policy == WarmupPolicy.POLY:
multiplier = math.pow(1 - iter / stage.decay_iters, stage.value)
elif stage.policy == WarmupPolicy.STEP:
multiplier = math.pow(stage.value, iter // stage.decay_iters)
elif stage.policy == WarmupPolicy.INVSQRT:
multiplier = 1.0 / math.sqrt(iter)
return multiplier * stage.lr_scale | null |
9,143 | from typing import Any, Dict, Iterable, Type
from warnings import warn
import torch
The provided code snippet includes necessary dependencies for implementing the `apply_optimizer_in_backward` function. Write a Python function `def apply_optimizer_in_backward( optimizer_class: Type[torch.optim.Optimizer], params: Iterable[torch.nn.Parameter], optimizer_kwargs: Dict[str, Any], ) -> None` to solve the following problem:
NOTE: This API is deprecated. Please use Pytorch Distributed's _apply_optimizer_in_backward instead. Upon backwards(), parameters will fire the corresponding optimizer Each parameter will have the optimizer_class and optimizer_kwargs attached to _optimizer and _optimizer_kwargs. Note - gradients for these parameters will be set to None after backwards(). This means that any other (non applied) optimizer over this parameter will be a no-op. Args: optimizer_class: Type[torch.optim.Optimizer]: Optimizer to apply to parameter params: Iterator[nn.Parameter]: parameters to apply optimizer state to optimizer_kwargs: Dict[str, Any]: kwargs to pass to optimizer constructor Example:: params_generator = model.parameters() param_1 = next(params_generator) param_2 = list(params_generator) apply_optimizer_in_backward(torch.optim.SGD, [param_1], {"lr": .02}) apply_optimizer_in_backward(torch.optim.Adam, param_2, {"lr": .04}) print(param_1._optimizer, param_1._optimizer_kwargs) >> torch.optim.SGD, {"lr": .02}
Here is the function:
def apply_optimizer_in_backward(
optimizer_class: Type[torch.optim.Optimizer],
params: Iterable[torch.nn.Parameter],
optimizer_kwargs: Dict[str, Any],
) -> None:
"""
NOTE: This API is deprecated. Please use Pytorch Distributed's _apply_optimizer_in_backward instead.
Upon backwards(), parameters will fire the corresponding optimizer
Each parameter will have the optimizer_class and optimizer_kwargs attached to
_optimizer and _optimizer_kwargs.
Note - gradients for these parameters will be set to None after backwards().
This means that any other (non applied) optimizer over this parameter will be
a no-op.
Args:
optimizer_class: Type[torch.optim.Optimizer]: Optimizer to apply to parameter
params: Iterator[nn.Parameter]: parameters to apply optimizer state to
optimizer_kwargs: Dict[str, Any]: kwargs to pass to optimizer constructor
Example::
params_generator = model.parameters()
param_1 = next(params_generator)
param_2 = list(params_generator)
apply_optimizer_in_backward(torch.optim.SGD, [param_1], {"lr": .02})
apply_optimizer_in_backward(torch.optim.Adam, param_2, {"lr": .04})
print(param_1._optimizer, param_1._optimizer_kwargs)
>> torch.optim.SGD, {"lr": .02}
"""
from torch.distributed.optim import _apply_optimizer_in_backward
warn(
"This API is deprecated. Please use Pytorch Distributed's _apply_optimizer_in_backward API instead.",
DeprecationWarning,
)
_apply_optimizer_in_backward(
optimizer_class=optimizer_class,
params=params,
optimizer_kwargs=optimizer_kwargs,
) | NOTE: This API is deprecated. Please use Pytorch Distributed's _apply_optimizer_in_backward instead. Upon backwards(), parameters will fire the corresponding optimizer Each parameter will have the optimizer_class and optimizer_kwargs attached to _optimizer and _optimizer_kwargs. Note - gradients for these parameters will be set to None after backwards(). This means that any other (non applied) optimizer over this parameter will be a no-op. Args: optimizer_class: Type[torch.optim.Optimizer]: Optimizer to apply to parameter params: Iterator[nn.Parameter]: parameters to apply optimizer state to optimizer_kwargs: Dict[str, Any]: kwargs to pass to optimizer constructor Example:: params_generator = model.parameters() param_1 = next(params_generator) param_2 = list(params_generator) apply_optimizer_in_backward(torch.optim.SGD, [param_1], {"lr": .02}) apply_optimizer_in_backward(torch.optim.Adam, param_2, {"lr": .04}) print(param_1._optimizer, param_1._optimizer_kwargs) >> torch.optim.SGD, {"lr": .02} |
9,144 | from typing import Dict, List, Optional, Tuple
import torch
from torch import nn
from torchrec.datasets.utils import Batch
from torchrec.modules.crossnet import LowRankCrossNet
from torchrec.modules.embedding_modules import EmbeddingBagCollection
from torchrec.modules.mlp import MLP
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor
The provided code snippet includes necessary dependencies for implementing the `choose` function. Write a Python function `def choose(n: int, k: int) -> int` to solve the following problem:
Simple implementation of math.comb for Python 3.7 compatibility.
Here is the function:
def choose(n: int, k: int) -> int:
"""
Simple implementation of math.comb for Python 3.7 compatibility.
"""
if 0 <= k <= n:
ntok = 1
ktok = 1
for t in range(1, min(k, n - k) + 1):
ntok *= n
ktok *= t
n -= 1
return ntok // ktok
else:
return 0 | Simple implementation of math.comb for Python 3.7 compatibility. |
9,145 | import argparse
import concurrent.futures
import json
import os
import subprocess
from typing import List
from usort import config as usort_config, usort
from utils import as_posix, LintMessage, LintSeverity
class LintSeverity(str, Enum):
ERROR = "error"
WARNING = "warning"
ADVICE = "advice"
DISABLED = "disabled"
class LintMessage(NamedTuple):
path: Optional[str]
line: Optional[int]
char: Optional[int]
code: str
severity: LintSeverity
name: str
original: Optional[str]
replacement: Optional[str]
description: Optional[str]
def as_posix(name: str) -> str:
return name.replace("\\", "/") if IS_WINDOWS else name
def check_file(
filename: str,
) -> List[LintMessage]:
try:
top_of_file_cat = usort_config.Category("top_of_file")
known = usort_config.known_factory()
# cinder magic imports must be on top (after future imports)
known["__strict__"] = top_of_file_cat
known["__static__"] = top_of_file_cat
config = usort_config.Config(
categories=(
(
usort_config.CAT_FUTURE,
top_of_file_cat,
usort_config.CAT_STANDARD_LIBRARY,
usort_config.CAT_THIRD_PARTY,
usort_config.CAT_FIRST_PARTY,
)
),
known=known,
)
with open(filename, mode="rb") as f:
original = f.read()
result = usort(original, config)
if result.error:
raise result.error
except subprocess.TimeoutExpired:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="USORT",
severity=LintSeverity.ERROR,
name="timeout",
original=None,
replacement=None,
description=(
"usort timed out while trying to process a file. "
"Please report an issue in pytorch/torchrec."
),
)
]
except (OSError, subprocess.CalledProcessError) as err:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="USORT",
severity=LintSeverity.ADVICE,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.decode("utf-8").strip() or "(empty)",
stdout=err.stdout.decode("utf-8").strip() or "(empty)",
)
),
)
]
replacement = result.output
if original == replacement:
return []
return [
LintMessage(
path=filename,
line=None,
char=None,
code="USORT",
severity=LintSeverity.WARNING,
name="format",
original=original.decode("utf-8"),
replacement=replacement.decode("utf-8"),
description="Run `lintrunner -a` to apply this patch.",
)
] | null |
9,146 | import argparse
import concurrent.futures
import json
import logging
import os
import subprocess
import sys
import time
from typing import BinaryIO, List
from utils import as_posix, IS_WINDOWS, LintMessage, LintSeverity
def run_command(
args: List[str],
*,
stdin: BinaryIO,
retries: int,
timeout: int,
) -> "subprocess.CompletedProcess[bytes]":
class LintSeverity(str, Enum):
class LintMessage(NamedTuple):
def as_posix(name: str) -> str:
def check_file(
filename: str,
retries: int,
timeout: int,
) -> List[LintMessage]:
try:
with open(filename, "rb") as f:
original = f.read()
with open(filename, "rb") as f:
proc = run_command(
[sys.executable, "-mblack", "--stdin-filename", filename, "-"],
stdin=f,
retries=retries,
timeout=timeout,
)
except subprocess.TimeoutExpired:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="BLACK",
severity=LintSeverity.ERROR,
name="timeout",
original=None,
replacement=None,
description=(
"black timed out while trying to process a file. "
"Please report an issue in pytorch/torchrec."
),
)
]
except (OSError, subprocess.CalledProcessError) as err:
return [
LintMessage(
path=filename,
line=None,
char=None,
code="BLACK",
severity=LintSeverity.ADVICE,
name="command-failed",
original=None,
replacement=None,
description=(
f"Failed due to {err.__class__.__name__}:\n{err}"
if not isinstance(err, subprocess.CalledProcessError)
else (
"COMMAND (exit code {returncode})\n"
"{command}\n\n"
"STDERR\n{stderr}\n\n"
"STDOUT\n{stdout}"
).format(
returncode=err.returncode,
command=" ".join(as_posix(x) for x in err.cmd),
stderr=err.stderr.decode("utf-8").strip() or "(empty)",
stdout=err.stdout.decode("utf-8").strip() or "(empty)",
)
),
)
]
replacement = proc.stdout
if original == replacement:
return []
return [
LintMessage(
path=filename,
line=None,
char=None,
code="BLACK",
severity=LintSeverity.WARNING,
name="format",
original=original.decode("utf-8"),
replacement=replacement.decode("utf-8"),
description="Run `lintrunner -a` to apply this patch.",
)
] | null |
9,147 | import argparse
import logging
import os
import subprocess
import sys
import time
from typing import List
def run_command(args: List[str]) -> "subprocess.CompletedProcess[bytes]":
logging.debug("$ %s", " ".join(args))
start_time = time.monotonic()
try:
return subprocess.run(args, check=True)
finally:
end_time = time.monotonic()
logging.debug("took %dms", (end_time - start_time) * 1000) | null |
9,148 | import os
import tensorrt as trt
import pycuda.autoinit
import pycuda.driver as cuda
from calibrator import Calibrator
from torch.autograd import Variable
import torch
import numpy as np
import time
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
class Calibrator(trt.IInt8EntropyCalibrator2):
def __init__(self, input_layers, stream, cache_file=""):
trt.IInt8EntropyCalibrator2.__init__(self)
self.input_layers = input_layers
self.stream = stream
self.d_input = cuda.mem_alloc(self.stream.calibration_data.nbytes)
self.cache_file = cache_file
stream.reset()
def get_batch_size(self):
return self.stream.batch_size
def get_batch(self, bindings, names):
batch = self.stream.next_batch()
if not batch.size:
return None
cuda.memcpy_htod(self.d_input, batch)
for i in self.input_layers[0]:
assert names[0] != i
bindings[0] = int(self.d_input)
return bindings
def read_calibration_cache(self):
# If there is a cache, use it instead of calibrating again. Otherwise, implicitly return None.
if os.path.exists(self.cache_file):
with open(self.cache_file, "rb") as f:
logger.info(
"Using calibration cache to save time: {:}".format(self.cache_file)
)
return f.read()
def write_calibration_cache(self, cache):
with open(self.cache_file, "wb") as f:
logger.info(
"Caching calibration data for future use: {:}".format(self.cache_file)
)
f.write(cache)
The provided code snippet includes necessary dependencies for implementing the `get_engine` function. Write a Python function `def get_engine( max_batch_size=1, onnx_file_path="", engine_file_path="", fp16_mode=False, int8_mode=False, calibration_stream=None, calibration_table_path="", save_engine=False, dynamic=False, )` to solve the following problem:
Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it.
Here is the function:
def get_engine(
max_batch_size=1,
onnx_file_path="",
engine_file_path="",
fp16_mode=False,
int8_mode=False,
calibration_stream=None,
calibration_table_path="",
save_engine=False,
dynamic=False,
):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
def build_engine(max_batch_size, save_engine):
"""Takes an ONNX file and creates a TensorRT engine to run inference with"""
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(
1
) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
# parse onnx model file
if not os.path.exists(onnx_file_path):
quit("ONNX file {} not found".format(onnx_file_path))
print("Loading ONNX file from path {}...".format(onnx_file_path))
with open(onnx_file_path, "rb") as model:
print("Beginning ONNX file parsing")
parser.parse(model.read())
assert (
network.num_layers > 0
), "Failed to parse ONNX model. \
Please check if the ONNX model is compatible "
print("Completed parsing of ONNX file")
print(
"Building an engine from file {}; this may take a while...".format(
onnx_file_path
)
)
# build trt engine
builder.max_batch_size = max_batch_size
if not dynamic:
builder.max_workspace_size = 1 << 30 # 1GB
builder.fp16_mode = fp16_mode
if int8_mode:
builder.int8_mode = int8_mode
assert (
calibration_stream
), "Error: a calibration_stream should be provided for int8 mode"
builder.int8_calibrator = Calibrator(
["input"], calibration_stream, calibration_table_path
)
print("Int8 mode enabled")
engine = builder.build_cuda_engine(network)
else:
config = builder.create_builder_config()
config.max_workspace_size = 1 << 30 # 1GB
profile = builder.create_optimization_profile()
profile.set_shape(
network.get_input(0).name,
(1, 3, 200, 200),
(1, 3, 608, 448),
(1, 3, 1200, 1200),
)
# dynamic_engine fp16 set
if fp16_mode:
config.set_flag(trt.BuilderFlag.FP16)
# dynamic_engine int8 set
if int8_mode:
config.set_flag(trt.BuilderFlag.INT8)
assert (
calibration_stream
), "Error: a calibration_stream should be provided for int8 mode"
# choose an calibration profile
# config.set_calibration_profile(profile)
config.int8_calibrator = Calibrator(
["input"], calibration_stream, calibration_table_path
)
print("Int8 mode enabled")
# choose an optimization profile
config.add_optimization_profile(profile)
engine = builder.build_engine(network, config)
# If errors happend when executing builder.build_cuda_engine(network),
# a None-Type object would be returned
if engine is None:
print("Failed to create the engine")
return None
print("Completed creating the engine")
if save_engine:
with open(engine_file_path, "wb") as f:
f.write(engine.serialize())
return engine
if os.path.exists(engine_file_path):
# If a serialized engine exists, load it instead of building a new one.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
else:
return build_engine(max_batch_size, save_engine) | Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it. |
9,149 | import os
import tensorrt as trt
import pycuda.autoinit
import pycuda.driver as cuda
from calibrator import Calibrator
from torch.autograd import Variable
import torch
import numpy as np
import time
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
"""Within this context, host_mom means the cpu memory and device means the GPU memory"""
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream | null |
9,150 | import os
import tensorrt as trt
import pycuda.autoinit
import pycuda.driver as cuda
from calibrator import Calibrator
from torch.autograd import Variable
import torch
import numpy as np
import time
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
def __str__(self):
def __repr__(self):
def allocate_buffers_v2(engine, h_, w_):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
print("engine.get_binding_format_desc", engine.get_binding_format_desc(0))
for count, binding in enumerate(engine):
print("binding:", binding)
size = (
trt.volume(engine.get_binding_shape(binding))
* engine.max_batch_size
* h_
* w_
)
dtype = trt.nptype(engine.get_binding_dtype(binding))
print("dtype:", dtype)
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
print("size:", size)
print("input:", inputs)
print("output:", outputs)
print("------------------")
return inputs, outputs, bindings, stream | null |
9,151 | import os
import tensorrt as trt
import pycuda.autoinit
import pycuda.driver as cuda
from calibrator import Calibrator
from torch.autograd import Variable
import torch
import numpy as np
import time
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
# Transfer data from CPU to GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# GPU Run inference.
context.execute_async(
batch_size=batch_size, bindings=bindings, stream_handle=stream.handle
)
# context.execute(batch_size=batch_size, bindings=bindings)
# Transfer predictions from GPU to CPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs] | null |
9,152 | import os
import tensorrt as trt
import pycuda.autoinit
import pycuda.driver as cuda
from calibrator import Calibrator
from torch.autograd import Variable
import torch
import numpy as np
import time
def do_inference_v2(context, bindings, inputs, outputs, stream, h_, w_, binding_id):
# set the input dimensions
context.set_binding_shape(binding_id, (1, 3, h_, w_))
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
infer_start = time.time()
context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)
# context.execute_v2(bindings=bindings)
infer_end = time.time()
infer_time = infer_end - infer_start
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs], infer_time | null |
9,153 | import os
import tensorrt as trt
import pycuda.autoinit
import pycuda.driver as cuda
from calibrator import Calibrator
from torch.autograd import Variable
import torch
import numpy as np
import time
def postprocess_the_outputs(h_outputs, shape_of_output):
h_outputs = h_outputs.reshape(*shape_of_output).copy()
return h_outputs | null |
9,154 | import os
import tensorrt as trt
import pycuda.autoinit
import pycuda.driver as cuda
from calibrator import Calibrator
from torch.autograd import Variable
import torch
import numpy as np
import time
def to_numpy(tensor):
return (
tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
) | null |
9,155 | import numpy as np
import torch
import torch.nn as nn
import util_trt
from calibrator import SegBatchStream
def evaluate_trt(segmentation_module_trt, loader, cfg, gpu, result_queue_trt):
# pbar = tqdm(total=len(loader))
for batch_data in loader:
# process data
batch_data = batch_data[0]
seg_label = as_numpy(batch_data["seg_label"][0])
img_resized_list = batch_data["img_data"]
with torch.no_grad():
segSize = (seg_label.shape[0], seg_label.shape[1])
scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0], segSize[1])
scores = async_copy_to(scores, gpu)
for img in img_resized_list:
feed_dict = batch_data.copy()
feed_dict["img_data"] = img
del feed_dict["img_ori"]
del feed_dict["info"]
feed_dict = async_copy_to(feed_dict, gpu)
# forward pass
scores_tmp, infer_time = segmentation_module_trt(
feed_dict, segSize=segSize, shape_of_input=img.shape
)
scores = scores + scores_tmp / len(cfg.DATASET.imgSizes)
_, pred = torch.max(scores, dim=1)
# print(scores.squeeze(0)[1,:,:].squeeze(0).shape, pred.shape)
pred = as_numpy(pred.squeeze(0).cpu())
# calculate accuracy and SEND THEM TO MASTER
acc, pix = accuracy(pred, seg_label)
intersection, union = intersectionAndUnion(
pred, seg_label, cfg.DATASET.num_class
)
result_queue_trt.put_nowait((acc, pix, intersection, union))
# visualization
if cfg.VAL.visualize:
visualize_result(
(batch_data["img_ori"], seg_label, batch_data["info"]),
pred,
os.path.join(cfg.DIR, "result"),
scores.squeeze(0)[1, :, :].squeeze(0).cpu().numpy(),
)
# pbar.update(1) | null |
9,156 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import math
import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch.nn import init
from models import nin_gc, nin
def setup_seed(seed):
torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True | null |
9,157 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import math
import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch.nn import init
from models import nin_gc, nin
def save_state(model, best_acc):
print("==> Saving model ...")
state = {
"best_acc": best_acc,
"state_dict": model.state_dict(),
}
state_copy = state["state_dict"].copy()
for key in state_copy.keys():
if "module" in key:
state["state_dict"][key.replace("module.", "")] = state["state_dict"].pop(
key
)
if args.model_type == 0:
if args.sr:
torch.save(state, "models_save/nin_sparse.pth")
elif args.prune_refine:
torch.save(
{"cfg": cfg, "best_acc": best_acc, "state_dict": state["state_dict"]},
"models_save/nin_finetune.pth",
)
else:
torch.save(state, "models_save/nin.pth")
else:
if args.sr:
torch.save(state, "models_save/nin_gc_sparse.pth")
elif args.gc_prune_refine:
torch.save(
{"cfg": cfg, "best_acc": best_acc, "state_dict": state["state_dict"]},
"models_save/nin_gc_retrain.pth",
)
else:
torch.save(state, "models_save/nin_gc.pth") | null |
9,158 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import math
import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch.nn import init
from models import nin_gc, nin
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(trainloader):
if not args.cpu:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
# ***********************稀疏训练(对BN层γ进行约束)**************************
if args.sr:
updateBN()
optimizer.step()
if batch_idx % 100 == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR: {}".format(
epoch,
batch_idx * len(data),
len(trainloader.dataset),
100.0 * batch_idx / len(trainloader),
loss.data.item(),
optimizer.param_groups[0]["lr"],
)
)
return | null |
9,159 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import math
import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch.nn import init
from models import nin_gc, nin
def adjust_learning_rate(optimizer, epoch):
update_list = [80, 130, 180, 230, 280]
if epoch in update_list:
for param_group in optimizer.param_groups:
param_group["lr"] = param_group["lr"] * 0.1
return | null |
9,160 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import math
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch.nn import init
from models import nin_gc, nin, resnet
import quantize
def setup_seed(seed):
torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True | null |
9,161 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import math
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch.nn import init
from models import nin_gc, nin, resnet
import quantize
def save_state(model, best_acc):
print("==> Saving model ...")
state = {
"best_acc": best_acc,
"state_dict": model.state_dict(),
}
state_copy = state["state_dict"].copy()
for key in state_copy.keys():
if "module" in key:
state["state_dict"][key.replace("module.", "")] = state["state_dict"].pop(
key
)
if args.model_type == 0:
if args.bn_fuse:
if args.prune_quant or args.prune_qaft:
torch.save(
{
"cfg": cfg,
"best_acc": best_acc,
"state_dict": state["state_dict"],
},
"models_save/nin_bn_fused.pth",
)
else:
torch.save(state, "models_save/nin_bn_fused.pth")
else:
if args.prune_quant or args.prune_qaft:
torch.save(
{
"cfg": cfg,
"best_acc": best_acc,
"state_dict": state["state_dict"],
},
"models_save/nin.pth",
)
else:
torch.save(state, "models_save/nin.pth")
elif args.model_type == 1:
if args.bn_fuse:
if args.prune_quant or args.prune_qaft:
torch.save(
{
"cfg": cfg,
"best_acc": best_acc,
"state_dict": state["state_dict"],
},
"models_save/nin_gc_bn_fused.pth",
)
else:
torch.save(state, "models_save/nin_gc_bn_fused.pth")
else:
if args.prune_quant or args.prune_qaft:
torch.save(
{
"cfg": cfg,
"best_acc": best_acc,
"state_dict": state["state_dict"],
},
"models_save/nin_gc.pth",
)
else:
torch.save(state, "models_save/nin_gc.pth")
else:
if args.bn_fuse:
torch.save(state, "models_save/resnet_bn_fused.pth")
else:
torch.save(state, "models_save/resnet.pth") | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.