id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
8,861 | import argparse
import sys
from typing import List, Tuple
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops_training import EmbeddingLocation
from torchrec.github.benchmarks import ebc_benchmarks_utils
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.modules.embedding_modules import EmbeddingBagCollection
from torchrec.modules.fused_embedding_modules import FusedEmbeddingBagCollection
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description="TorchRec ebc benchmarks")
parser.add_argument(
"--cpu_only",
action="store_true",
default=False,
help="specify whether to use cpu",
)
parser.add_argument(
"--mode",
type=str,
default="ebc_comparison_dlrm",
help="specify 'ebc_comparison_dlrm', 'ebc_comparison_scaling' or 'fused_ebc_uvm'",
)
return parser.parse_args(argv) | null |
8,862 | import argparse
import sys
from typing import List, Tuple
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops_training import EmbeddingLocation
from torchrec.github.benchmarks import ebc_benchmarks_utils
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.modules.embedding_modules import EmbeddingBagCollection
from torchrec.modules.fused_embedding_modules import FusedEmbeddingBagCollection
def main(argv: List[str]) -> None:
args = parse_args(argv)
if not args.cpu_only and torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
if args.mode == "ebc_comparison_dlrm":
print("Running EBC vs. FusedEBC on DLRM EMB")
for reduction_degree in [128, 64, 32]:
embedding_bag_configs: List[EmbeddingBagConfig] = [
EmbeddingBagConfig(
name=f"ebc_{idx}",
embedding_dim=128,
num_embeddings=num_embeddings,
feature_names=[f"ebc_{idx}_feat_1"],
)
for idx, num_embeddings in enumerate(
get_shrunk_dlrm_num_embeddings(reduction_degree)
)
]
(
ebc_time_avg,
ebc_time_std,
fused_ebc_time_avg,
fused_ebc_time_std,
speedup,
) = get_ebc_comparison(embedding_bag_configs, device)
print(f"when DLRM EMB is reduced by {reduction_degree} times:")
print(f"ebc_time = {ebc_time_avg} +/- {ebc_time_std} sec")
print(f"fused_ebc_time = {fused_ebc_time_avg} +/- {fused_ebc_time_std} sec")
print(f"speedup = {speedup}")
elif args.mode == "fused_ebc_uvm":
print("Running DLRM EMB on FusedEBC with UVM/UVM-caching")
embedding_bag_configs: List[EmbeddingBagConfig] = [
EmbeddingBagConfig(
name=f"ebc_{idx}",
embedding_dim=128,
num_embeddings=num_embeddings,
feature_names=[f"ebc_{idx}_feat_1"],
)
for idx, num_embeddings in enumerate(get_shrunk_dlrm_num_embeddings(2))
]
fused_ebc_time_avg, fused_ebc_time_std = get_fused_ebc_uvm_time(
embedding_bag_configs, device, EmbeddingLocation.MANAGED_CACHING
)
print(
f"FusedEBC with UVM caching on DLRM: {fused_ebc_time_avg} +/- {fused_ebc_time_std} sec"
)
embedding_bag_configs: List[EmbeddingBagConfig] = [
EmbeddingBagConfig(
name=f"ebc_{idx}",
embedding_dim=128,
num_embeddings=num_embeddings,
feature_names=[f"ebc_{idx}_feat_1"],
)
for idx, num_embeddings in enumerate(DLRM_NUM_EMBEDDINGS_PER_FEATURE)
]
fused_ebc_time_avg, fused_ebc_time_std = get_fused_ebc_uvm_time(
embedding_bag_configs, device, EmbeddingLocation.MANAGED
)
print(
f"FusedEBC with UVM management on DLRM: {fused_ebc_time_avg} plus/minus {fused_ebc_time_std} sec"
)
elif args.mode == "ebc_comparison_scaling":
print("Running EBC vs. FusedEBC scaling experiment")
num_tables_list = [10, 100, 1000]
embedding_dim_list = [4, 8, 16, 32, 64, 128]
num_embeddings_list = [4, 8, 16, 32, 64, 128, 256, 1024, 2048, 4096, 8192]
for num_tables in num_tables_list:
for num_embeddings in num_embeddings_list:
for embedding_dim in embedding_dim_list:
embedding_bag_configs: List[EmbeddingBagConfig] = [
EmbeddingBagConfig(
name=f"ebc_{idx}",
embedding_dim=embedding_dim,
num_embeddings=num_embeddings,
feature_names=[f"ebc_{idx}_feat_1"],
)
for idx in range(num_tables)
]
ebc_time, _, fused_ebc_time, _, speedup = get_ebc_comparison(
embedding_bag_configs, device, epochs=3
)
print(
f"EBC num_tables = {num_tables}, num_embeddings = {num_embeddings}, embedding_dim = {embedding_dim}:"
)
print(
f"ebc_time = {ebc_time} sec, fused_ebc_time = {fused_ebc_time} sec, speedup = {speedup}"
)
def invoke_main() -> None:
main(sys.argv[1:]) | null |
8,863 | from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection,
EmbeddingCollection,
)
from torchrec.modules.mc_modules import ManagedCollisionCollection
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
class EmbeddingBagCollection(EmbeddingBagCollectionInterface):
"""
EmbeddingBagCollection represents a collection of pooled embeddings (`EmbeddingBags`).
It processes sparse data in the form of `KeyedJaggedTensor` with values of the form
[F X B X L] where:
* F: features (keys)
* B: batch size
* L: length of sparse features (jagged)
and outputs a `KeyedTensor` with values of the form [B * (F * D)] where:
* F: features (keys)
* D: each feature's (key's) embedding dimension
* B: batch size
Args:
tables (List[EmbeddingBagConfig]): list of embedding tables.
is_weighted (bool): whether input `KeyedJaggedTensor` is weighted.
device (Optional[torch.device]): default compute device.
Example::
table_0 = EmbeddingBagConfig(
name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"]
)
table_1 = EmbeddingBagConfig(
name="t2", embedding_dim=4, num_embeddings=10, feature_names=["f2"]
)
ebc = EmbeddingBagCollection(tables=[table_0, table_1])
# 0 1 2 <-- batch
# "f1" [0,1] None [2]
# "f2" [3] [4] [5,6,7]
# ^
# feature
features = KeyedJaggedTensor(
keys=["f1", "f2"],
values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]),
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
)
pooled_embeddings = ebc(features)
print(pooled_embeddings.values())
tensor([[-0.8899, -0.1342, -1.9060, -0.0905, -0.2814, -0.9369, -0.7783],
[ 0.0000, 0.0000, 0.0000, 0.1598, 0.0695, 1.3265, -0.1011],
[-0.4256, -1.1846, -2.1648, -1.0893, 0.3590, -1.9784, -0.7681]],
grad_fn=<CatBackward0>)
print(pooled_embeddings.keys())
['f1', 'f2']
print(pooled_embeddings.offset_per_key())
tensor([0, 3, 7])
"""
def __init__(
self,
tables: List[EmbeddingBagConfig],
is_weighted: bool = False,
device: Optional[torch.device] = None,
) -> None:
super().__init__()
torch._C._log_api_usage_once(f"torchrec.modules.{self.__class__.__name__}")
self._is_weighted = is_weighted
self.embedding_bags: nn.ModuleDict = nn.ModuleDict()
self._embedding_bag_configs = tables
self._lengths_per_embedding: List[int] = []
self._dtypes: List[int] = []
table_names = set()
for embedding_config in tables:
if embedding_config.name in table_names:
raise ValueError(f"Duplicate table name {embedding_config.name}")
table_names.add(embedding_config.name)
dtype = (
torch.float32
if embedding_config.data_type == DataType.FP32
else torch.float16
)
self.embedding_bags[embedding_config.name] = nn.EmbeddingBag(
num_embeddings=embedding_config.num_embeddings,
embedding_dim=embedding_config.embedding_dim,
mode=pooling_type_to_str(embedding_config.pooling),
device=device,
include_last_offset=True,
dtype=dtype,
)
if device is None:
device = self.embedding_bags[embedding_config.name].weight.device
self._dtypes.append(embedding_config.data_type.value)
if not embedding_config.feature_names:
embedding_config.feature_names = [embedding_config.name]
self._lengths_per_embedding.extend(
len(embedding_config.feature_names) * [embedding_config.embedding_dim]
)
self._device: torch.device = device or torch.device("cpu")
self._embedding_names: List[str] = [
embedding
for embeddings in get_embedding_names_by_table(tables)
for embedding in embeddings
]
self._feature_names: List[List[str]] = [table.feature_names for table in tables]
self.reset_parameters()
def forward(self, features: KeyedJaggedTensor) -> KeyedTensor:
"""
Args:
features (KeyedJaggedTensor): KJT of form [F X B X L].
Returns:
KeyedTensor
"""
flat_feature_names: List[str] = []
for names in self._feature_names:
flat_feature_names.extend(names)
inverse_indices = reorder_inverse_indices(
inverse_indices=features.inverse_indices_or_none(),
feature_names=flat_feature_names,
)
pooled_embeddings: List[torch.Tensor] = []
feature_dict = features.to_dict()
for i, embedding_bag in enumerate(self.embedding_bags.values()):
for feature_name in self._feature_names[i]:
f = feature_dict[feature_name]
per_sample_weights: Optional[torch.Tensor] = None
if self._is_weighted:
per_sample_weights = (
f.weights().half()
if self._dtypes[i] == DataType.FP16.value
else f.weights()
)
res = embedding_bag(
input=f.values(),
offsets=f.offsets(),
per_sample_weights=(
per_sample_weights if self._is_weighted else None
),
).float()
pooled_embeddings.append(res)
return KeyedTensor(
keys=self._embedding_names,
values=process_pooled_embeddings(
pooled_embeddings=pooled_embeddings,
inverse_indices=inverse_indices,
),
length_per_key=self._lengths_per_embedding,
)
def is_weighted(self) -> bool:
return self._is_weighted
def embedding_bag_configs(self) -> List[EmbeddingBagConfig]:
return self._embedding_bag_configs
def device(self) -> torch.device:
return self._device
def reset_parameters(self) -> None:
if (isinstance(self.device, torch.device) and self.device.type == "meta") or (
isinstance(self.device, str) and self.device == "meta"
):
return
# Initialize embedding bags weights with init_fn
for table_config in self._embedding_bag_configs:
assert table_config.init_fn is not None
param = self.embedding_bags[f"{table_config.name}"].weight
# pyre-ignore
table_config.init_fn(param)
class EmbeddingCollection(EmbeddingCollectionInterface):
"""
EmbeddingCollection represents a collection of non-pooled embeddings.
It processes sparse data in the form of `KeyedJaggedTensor` of the form [F X B X L]
where:
* F: features (keys)
* B: batch size
* L: length of sparse features (variable)
and outputs `Dict[feature (key), JaggedTensor]`.
Each `JaggedTensor` contains values of the form (B * L) X D
where:
* B: batch size
* L: length of sparse features (jagged)
* D: each feature's (key's) embedding dimension and lengths are of the form L
Args:
tables (List[EmbeddingConfig]): list of embedding tables.
device (Optional[torch.device]): default compute device.
need_indices (bool): if we need to pass indices to the final lookup dict.
Example::
e1_config = EmbeddingConfig(
name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"]
)
e2_config = EmbeddingConfig(
name="t2", embedding_dim=3, num_embeddings=10, feature_names=["f2"]
)
ec = EmbeddingCollection(tables=[e1_config, e2_config])
# 0 1 2 <-- batch
# 0 [0,1] None [2]
# 1 [3] [4] [5,6,7]
# ^
# feature
features = KeyedJaggedTensor.from_offsets_sync(
keys=["f1", "f2"],
values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]),
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
)
feature_embeddings = ec(features)
print(feature_embeddings['f2'].values())
tensor([[-0.2050, 0.5478, 0.6054],
[ 0.7352, 0.3210, -3.0399],
[ 0.1279, -0.1756, -0.4130],
[ 0.7519, -0.4341, -0.0499],
[ 0.9329, -1.0697, -0.8095]], grad_fn=<EmbeddingBackward>)
"""
def __init__( # noqa C901
self,
tables: List[EmbeddingConfig],
device: Optional[torch.device] = None,
need_indices: bool = False,
) -> None:
super().__init__()
torch._C._log_api_usage_once(f"torchrec.modules.{self.__class__.__name__}")
self.embeddings: nn.ModuleDict = nn.ModuleDict()
self._embedding_configs = tables
self._embedding_dim: int = -1
self._need_indices: bool = need_indices
self._device: torch.device = (
device if device is not None else torch.device("cpu")
)
table_names = set()
for config in tables:
if config.name in table_names:
raise ValueError(f"Duplicate table name {config.name}")
table_names.add(config.name)
self._embedding_dim = (
config.embedding_dim if self._embedding_dim < 0 else self._embedding_dim
)
if self._embedding_dim != config.embedding_dim:
raise ValueError(
"All tables in a EmbeddingCollection are required to have same embedding dimension."
+ f" Violating case: {config.name}'s embedding_dim {config.embedding_dim} !="
+ f" {self._embedding_dim}"
)
dtype = (
torch.float32 if config.data_type == DataType.FP32 else torch.float16
)
self.embeddings[config.name] = nn.Embedding(
num_embeddings=config.num_embeddings,
embedding_dim=config.embedding_dim,
device=device,
dtype=dtype,
)
if config.init_fn is not None:
config.init_fn(self.embeddings[config.name].weight)
if not config.feature_names:
config.feature_names = [config.name]
self._embedding_names_by_table: List[List[str]] = get_embedding_names_by_table(
tables
)
self._feature_names: List[List[str]] = [table.feature_names for table in tables]
def forward(
self,
features: KeyedJaggedTensor,
) -> Dict[str, JaggedTensor]:
"""
Args:
features (KeyedJaggedTensor): KJT of form [F X B X L].
Returns:
Dict[str, JaggedTensor]
"""
feature_embeddings: Dict[str, JaggedTensor] = {}
jt_dict: Dict[str, JaggedTensor] = features.to_dict()
for i, emb_module in enumerate(self.embeddings.values()):
feature_names = self._feature_names[i]
embedding_names = self._embedding_names_by_table[i]
for j, embedding_name in enumerate(embedding_names):
feature_name = feature_names[j]
f = jt_dict[feature_name]
lookup = emb_module(
input=f.values(),
).float()
feature_embeddings[embedding_name] = JaggedTensor(
values=lookup,
lengths=f.lengths(),
weights=f.values() if self._need_indices else None,
)
return feature_embeddings
def need_indices(self) -> bool:
return self._need_indices
def embedding_dim(self) -> int:
return self._embedding_dim
def embedding_configs(self) -> List[EmbeddingConfig]:
return self._embedding_configs
def embedding_names_by_table(self) -> List[List[str]]:
return self._embedding_names_by_table
def device(self) -> torch.device:
return self._device
def reset_parameters(self) -> None:
if (isinstance(self.device, torch.device) and self.device.type == "meta") or (
isinstance(self.device, str) and self.device == "meta"
):
return
# Initialize embedding bags weights with init_fn
for table_config in self._embedding_configs:
assert table_config.init_fn is not None
param = self.embeddings[f"{table_config.name}"].weight
# pyre-ignore
table_config.init_fn(param)
def evict(
evictions: Dict[str, Optional[torch.Tensor]],
ebc: Union[EmbeddingBagCollection, EmbeddingCollection],
) -> None:
# TODO: write function
return | null |
8,864 | import abc
from typing import Dict, Optional
import torch
from torch import nn
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
# pyre-ignore
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
def get_weights_list(
cat_seq: torch.Tensor,
features: KeyedJaggedTensor,
position_weights: Dict[str, nn.Parameter],
) -> Optional[torch.Tensor]:
weights_list = []
seqs = torch.split(cat_seq, features.length_per_key())
for key, seq in zip(features.keys(), seqs):
if key in position_weights.keys():
weights_list.append(torch.gather(position_weights[key], dim=0, index=seq))
else:
weights_list.append(
torch.ones(seq.shape[0], device=features.values().device)
)
return torch.cat(weights_list) if weights_list else features.weights_or_none() | null |
8,865 | import copy
from collections import defaultdict
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
import torch
from torch.profiler import record_function
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
def extract_module_or_tensor_callable(
module_or_callable: Union[
Callable[[], torch.nn.Module],
torch.nn.Module,
Callable[[torch.Tensor], torch.Tensor],
]
) -> Union[torch.nn.Module, Callable[[torch.Tensor], torch.Tensor]]:
try:
# pyre-ignore[20]: PositionalOnly call expects argument in position 0
module = module_or_callable()
if isinstance(module, torch.nn.Module):
return module
else:
raise ValueError(
"Expected callable that takes no input to return "
"a torch.nn.Module, but got: {}".format(type(module))
)
except TypeError as e:
if "required positional argument" in str(e):
# pyre-ignore[7]: Expected `Union[typing.Callable[[torch.Tensor], torch.Tensor], torch.nn.Module]`
return module_or_callable
raise | null |
8,866 | import copy
from collections import defaultdict
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
import torch
from torch.profiler import record_function
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
def get_module_output_dimension(
module: Union[Callable[[torch.Tensor], torch.Tensor], torch.nn.Module],
in_features: int,
) -> int:
input = torch.zeros(1, in_features)
output = module(input)
return output.size(-1)
The provided code snippet includes necessary dependencies for implementing the `check_module_output_dimension` function. Write a Python function `def check_module_output_dimension( module: Union[Iterable[torch.nn.Module], torch.nn.Module], in_features: int, out_features: int, ) -> bool` to solve the following problem:
Verify that the out_features of a given module or a list of modules matches the specified number. If a list of modules or a ModuleList is given, recursively check all the submodules.
Here is the function:
def check_module_output_dimension(
module: Union[Iterable[torch.nn.Module], torch.nn.Module],
in_features: int,
out_features: int,
) -> bool:
"""
Verify that the out_features of a given module or a list of modules matches the
specified number. If a list of modules or a ModuleList is given, recursively check
all the submodules.
"""
if isinstance(module, list) or isinstance(module, torch.nn.ModuleList):
return all(
check_module_output_dimension(submodule, in_features, out_features)
for submodule in module
)
else:
# pyre-fixme[6]: Expected `Union[typing.Callable[[torch.Tensor],
# torch.Tensor], torch.nn.Module]` for 1st param but got
# `Union[Iterable[torch.nn.Module], torch.nn.Module]`.
return get_module_output_dimension(module, in_features) == out_features | Verify that the out_features of a given module or a list of modules matches the specified number. If a list of modules or a ModuleList is given, recursively check all the submodules. |
8,867 | import copy
from collections import defaultdict
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
import torch
from torch.profiler import record_function
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
def init_mlp_weights_xavier_uniform(m: torch.nn.Module) -> None:
if isinstance(m, torch.nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.0)
The provided code snippet includes necessary dependencies for implementing the `construct_modulelist_from_single_module` function. Write a Python function `def construct_modulelist_from_single_module( module: torch.nn.Module, sizes: Tuple[int, ...] ) -> torch.nn.Module` to solve the following problem:
Given a single module, construct a (nested) ModuleList of size of sizes by making copies of the provided module and reinitializing the Linear layers.
Here is the function:
def construct_modulelist_from_single_module(
module: torch.nn.Module, sizes: Tuple[int, ...]
) -> torch.nn.Module:
"""
Given a single module, construct a (nested) ModuleList of size of sizes by making
copies of the provided module and reinitializing the Linear layers.
"""
if len(sizes) == 1:
return torch.nn.ModuleList(
[
copy.deepcopy(module).apply(init_mlp_weights_xavier_uniform)
for _ in range(sizes[0])
]
)
else:
# recursively create nested ModuleList
return torch.nn.ModuleList(
[
construct_modulelist_from_single_module(module, sizes[1:])
for _ in range(sizes[0])
]
) | Given a single module, construct a (nested) ModuleList of size of sizes by making copies of the provided module and reinitializing the Linear layers. |
8,868 | import copy
from collections import defaultdict
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
import torch
from torch.profiler import record_function
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
def convert_list_of_modules_to_modulelist(
modules: Iterable[torch.nn.Module], sizes: Tuple[int, ...]
) -> torch.nn.Module:
assert (
# pyre-fixme[6]: Expected `Sized` for 1st param but got
# `Iterable[torch.nn.Module]`.
len(modules)
== sizes[0]
), f"the counts of modules ({len(modules)}) do not match with the required counts {sizes}"
if len(sizes) == 1:
return torch.nn.ModuleList(modules)
else:
# recursively create nested list
return torch.nn.ModuleList(
convert_list_of_modules_to_modulelist(m, sizes[1:]) for m in modules
) | null |
8,869 | import copy
from collections import defaultdict
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
import torch
from torch.profiler import record_function
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
def _permute_indices(indices: List[int], permute: List[int]) -> List[int]:
permuted_indices = [0] * len(indices)
for i, permuted_index in enumerate(permute):
permuted_indices[i] = indices[permuted_index]
return permuted_indices
# pyre-ignore
class JaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""
Represents an (optionally weighted) jagged tensor.
A `JaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. See `KeyedJaggedTensor` for full example.
Implementation is torch.jit.script-able.
NOTE:
We will NOT do input validation as it's expensive, you should always pass in the
valid lengths, offsets, etc.
Args:
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if values have weights. Tensor with same shape
as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
"""
_fields = ["_values", "_weights", "_lengths", "_offsets"]
def __init__(
self,
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
) -> None:
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
_assert_offsets_or_lengths_is_provided(offsets, lengths)
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "JaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return JaggedTensor(
values=torch.empty(0, dtype=values_dtype, device=device),
offsets=torch.empty(0, dtype=lengths_dtype, device=device),
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
weights=weights,
)
def from_dense_lengths(
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` is still of shape (B,).
"""
mask2d = (
_arange(end=values.size(1), device=values.device).expand(values.size(0), -1)
) < lengths.unsqueeze(-1)
return JaggedTensor(
values=values[mask2d],
weights=_optional_mask(weights, mask2d),
lengths=lengths,
)
def from_dense(
values: List[torch.Tensor],
weights: Optional[List[torch.Tensor]] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` and `offsets` are still of shape (B,).
Args:
values (List[torch.Tensor]): a list of tensors for dense representation
weights (Optional[List[torch.Tensor]]): if values have weights, tensor with
the same shape as values.
Returns:
JaggedTensor: JaggedTensor created from 2D dense tensor.
Example::
values = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
weights = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
j1 = JaggedTensor.from_dense(
values=values,
weights=weights,
)
# j1 = [[1.0], [], [7.0], [8.0], [10.0, 11.0, 12.0]]
"""
values_tensor = torch.cat(values, dim=0)
lengths = torch.tensor(
[value.size(0) for value in values],
dtype=torch.int32,
device=values_tensor.device,
)
weights_tensor = torch.cat(weights, dim=0) if weights is not None else None
return JaggedTensor(
values=values_tensor,
weights=weights_tensor,
lengths=lengths,
)
def to_dense(self) -> List[torch.Tensor]:
"""
Constructs a dense-representation of the JT's values.
Returns:
List[torch.Tensor]: list of tensors.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
values_list = jt.to_dense()
# values_list = [
# torch.tensor([1.0, 2.0]),
# torch.tensor([]),
# torch.tensor([3.0]),
# torch.tensor([4.0]),
# torch.tensor([5.0]),
# torch.tensor([6.0, 7.0, 8.0]),
# ]
"""
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.values()[offset:next_offset])
return tensor_list
def to_dense_weights(self) -> Optional[List[torch.Tensor]]:
"""
Constructs a dense-representation of the JT's weights.
Returns:
Optional[List[torch.Tensor]]: list of tensors, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
weights_list = jt.to_dense_weights()
# weights_list = [
# torch.tensor([0.1, 0.2]),
# torch.tensor([]),
# torch.tensor([0.3]),
# torch.tensor([0.4]),
# torch.tensor([0.5]),
# torch.tensor([0.6, 0.7, 0.8]),
# ]
"""
if self.weights_or_none() is None:
return None
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.weights()[offset:next_offset])
return tensor_list
def to_padded_dense(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> torch.Tensor:
"""
Constructs a 2D dense tensor from the JT's values of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
torch.Tensor: 2d dense tensor.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
dt = jt.to_padded_dense(
desired_length=2,
padding_value=10.0,
)
# dt = [
# [1.0, 2.0],
# [10.0, 10.0],
# [3.0, 10.0],
# [4.0, 10.0],
# [5.0, 10.0],
# [6.0, 7.0],
# ]
"""
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.values(), [self.offsets()], [N], padding_value
)
def to_padded_dense_weights(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> Optional[torch.Tensor]:
"""
Constructs a 2D dense tensor from the JT's weights of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
Optional[torch.Tensor]: 2d dense tensor, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
d_wt = jt.to_padded_dense_weights(
desired_length=2,
padding_value=1.0,
)
# d_wt = [
# [0.1, 0.2],
# [1.0, 1.0],
# [0.3, 1.0],
# [0.4, 1.0],
# [0.5, 1.0],
# [0.6, 0.7],
# ]
"""
if self.weights_or_none() is None:
return None
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.weights(), [self.offsets()], [N], padding_value
)
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def to(self, device: torch.device, non_blocking: bool = False) -> "JaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
return JaggedTensor(
values=self._values.to(device, non_blocking=non_blocking),
weights=(
weights.to(device, non_blocking=non_blocking)
if weights is not None
else None
),
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def __str__(self) -> str:
offsets = self.offsets()
if self._weights is None:
return (
"JaggedTensor({\n "
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ "\n})\n"
)
return (
"JaggedTensor({\n"
+ ' "values": '
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ ',\n "weights": '
+ _jagged_values_string(
_get_weights_or_throw(self._weights), offsets, 0, len(offsets) - 1
)
+ "\n})\n"
)
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
def construct_jagged_tensors(
embeddings: torch.Tensor,
features: KeyedJaggedTensor,
embedding_names: List[str],
need_indices: bool = False,
features_to_permute_indices: Optional[Dict[str, List[int]]] = None,
original_features: Optional[KeyedJaggedTensor] = None,
reverse_indices: Optional[torch.Tensor] = None,
) -> Dict[str, JaggedTensor]:
with record_function("## construct_jagged_tensors ##"):
if original_features is not None:
features = original_features
if reverse_indices is not None:
embeddings = torch.index_select(
embeddings, 0, reverse_indices.to(torch.int32)
)
ret: Dict[str, JaggedTensor] = {}
stride = features.stride()
length_per_key = features.length_per_key()
values = features.values()
lengths = features.lengths().view(-1, stride)
lengths_tuple = torch.unbind(lengths.view(-1, stride), dim=0)
embeddings_list = torch.split(embeddings, length_per_key, dim=0)
values_list = torch.split(values, length_per_key) if need_indices else None
key_indices = defaultdict(list)
for i, key in enumerate(embedding_names):
key_indices[key].append(i)
for key, indices in key_indices.items():
# combines outputs in correct order for CW sharding
indices = (
_permute_indices(indices, features_to_permute_indices[key])
if features_to_permute_indices and key in features_to_permute_indices
else indices
)
ret[key] = JaggedTensor(
lengths=lengths_tuple[indices[0]],
values=(
embeddings_list[indices[0]]
if len(indices) == 1
else torch.cat([embeddings_list[i] for i in indices], dim=1)
),
# pyre-ignore
weights=values_list[indices[0]] if need_indices else None,
)
return ret | null |
8,870 | import copy
from collections import defaultdict
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union
import torch
from torch.profiler import record_function
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
def _fx_to_list(tensor: torch.Tensor) -> List[int]:
return tensor.long().tolist()
def _permute_indices(indices: List[int], permute: List[int]) -> List[int]:
permuted_indices = [0] * len(indices)
for i, permuted_index in enumerate(permute):
permuted_indices[i] = indices[permuted_index]
return permuted_indices
# pyre-ignore
class JaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""
Represents an (optionally weighted) jagged tensor.
A `JaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. See `KeyedJaggedTensor` for full example.
Implementation is torch.jit.script-able.
NOTE:
We will NOT do input validation as it's expensive, you should always pass in the
valid lengths, offsets, etc.
Args:
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if values have weights. Tensor with same shape
as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
"""
_fields = ["_values", "_weights", "_lengths", "_offsets"]
def __init__(
self,
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
) -> None:
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
_assert_offsets_or_lengths_is_provided(offsets, lengths)
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "JaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return JaggedTensor(
values=torch.empty(0, dtype=values_dtype, device=device),
offsets=torch.empty(0, dtype=lengths_dtype, device=device),
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
weights=weights,
)
def from_dense_lengths(
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` is still of shape (B,).
"""
mask2d = (
_arange(end=values.size(1), device=values.device).expand(values.size(0), -1)
) < lengths.unsqueeze(-1)
return JaggedTensor(
values=values[mask2d],
weights=_optional_mask(weights, mask2d),
lengths=lengths,
)
def from_dense(
values: List[torch.Tensor],
weights: Optional[List[torch.Tensor]] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` and `offsets` are still of shape (B,).
Args:
values (List[torch.Tensor]): a list of tensors for dense representation
weights (Optional[List[torch.Tensor]]): if values have weights, tensor with
the same shape as values.
Returns:
JaggedTensor: JaggedTensor created from 2D dense tensor.
Example::
values = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
weights = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
j1 = JaggedTensor.from_dense(
values=values,
weights=weights,
)
# j1 = [[1.0], [], [7.0], [8.0], [10.0, 11.0, 12.0]]
"""
values_tensor = torch.cat(values, dim=0)
lengths = torch.tensor(
[value.size(0) for value in values],
dtype=torch.int32,
device=values_tensor.device,
)
weights_tensor = torch.cat(weights, dim=0) if weights is not None else None
return JaggedTensor(
values=values_tensor,
weights=weights_tensor,
lengths=lengths,
)
def to_dense(self) -> List[torch.Tensor]:
"""
Constructs a dense-representation of the JT's values.
Returns:
List[torch.Tensor]: list of tensors.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
values_list = jt.to_dense()
# values_list = [
# torch.tensor([1.0, 2.0]),
# torch.tensor([]),
# torch.tensor([3.0]),
# torch.tensor([4.0]),
# torch.tensor([5.0]),
# torch.tensor([6.0, 7.0, 8.0]),
# ]
"""
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.values()[offset:next_offset])
return tensor_list
def to_dense_weights(self) -> Optional[List[torch.Tensor]]:
"""
Constructs a dense-representation of the JT's weights.
Returns:
Optional[List[torch.Tensor]]: list of tensors, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
weights_list = jt.to_dense_weights()
# weights_list = [
# torch.tensor([0.1, 0.2]),
# torch.tensor([]),
# torch.tensor([0.3]),
# torch.tensor([0.4]),
# torch.tensor([0.5]),
# torch.tensor([0.6, 0.7, 0.8]),
# ]
"""
if self.weights_or_none() is None:
return None
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.weights()[offset:next_offset])
return tensor_list
def to_padded_dense(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> torch.Tensor:
"""
Constructs a 2D dense tensor from the JT's values of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
torch.Tensor: 2d dense tensor.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
dt = jt.to_padded_dense(
desired_length=2,
padding_value=10.0,
)
# dt = [
# [1.0, 2.0],
# [10.0, 10.0],
# [3.0, 10.0],
# [4.0, 10.0],
# [5.0, 10.0],
# [6.0, 7.0],
# ]
"""
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.values(), [self.offsets()], [N], padding_value
)
def to_padded_dense_weights(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> Optional[torch.Tensor]:
"""
Constructs a 2D dense tensor from the JT's weights of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
Optional[torch.Tensor]: 2d dense tensor, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
d_wt = jt.to_padded_dense_weights(
desired_length=2,
padding_value=1.0,
)
# d_wt = [
# [0.1, 0.2],
# [1.0, 1.0],
# [0.3, 1.0],
# [0.4, 1.0],
# [0.5, 1.0],
# [0.6, 0.7],
# ]
"""
if self.weights_or_none() is None:
return None
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.weights(), [self.offsets()], [N], padding_value
)
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def to(self, device: torch.device, non_blocking: bool = False) -> "JaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
return JaggedTensor(
values=self._values.to(device, non_blocking=non_blocking),
weights=(
weights.to(device, non_blocking=non_blocking)
if weights is not None
else None
),
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def __str__(self) -> str:
offsets = self.offsets()
if self._weights is None:
return (
"JaggedTensor({\n "
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ "\n})\n"
)
return (
"JaggedTensor({\n"
+ ' "values": '
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ ',\n "weights": '
+ _jagged_values_string(
_get_weights_or_throw(self._weights), offsets, 0, len(offsets) - 1
)
+ "\n})\n"
)
def construct_jagged_tensors_inference(
embeddings: torch.Tensor,
lengths: torch.Tensor,
values: torch.Tensor,
embedding_names: List[str],
need_indices: bool = False,
features_to_permute_indices: Optional[Dict[str, List[int]]] = None,
reverse_indices: Optional[torch.Tensor] = None,
) -> Dict[str, JaggedTensor]:
with record_function("## construct_jagged_tensors_inference ##"):
if reverse_indices is not None:
embeddings = torch.index_select(
embeddings, 0, reverse_indices.to(torch.int32)
)
ret: Dict[str, JaggedTensor] = {}
length_per_key: List[int] = _fx_to_list(
torch.sum(lengths.view(len(embedding_names), -1), dim=1)
)
lengths = lengths.view(len(embedding_names), -1)
lengths_tuple = torch.unbind(lengths, dim=0)
embeddings_list = torch.split(embeddings, length_per_key, dim=0)
values_list = torch.split(values, length_per_key) if need_indices else None
key_indices = defaultdict(list)
for i, key in enumerate(embedding_names):
key_indices[key].append(i)
for key, indices in key_indices.items():
# combines outputs in correct order for CW sharding
indices = (
_permute_indices(indices, features_to_permute_indices[key])
if features_to_permute_indices and key in features_to_permute_indices
else indices
)
ret[key] = JaggedTensor(
lengths=lengths_tuple[indices[0]],
values=(
embeddings_list[indices[0]]
if len(indices) == 1
else torch.cat([embeddings_list[i] for i in indices], dim=1)
),
# pyre-ignore
weights=values_list[indices[0]] if need_indices else None,
)
return ret | null |
8,871 | from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection,
EmbeddingCollection,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
class EmbeddingBagCollection(EmbeddingBagCollectionInterface):
"""
EmbeddingBagCollection represents a collection of pooled embeddings (`EmbeddingBags`).
It processes sparse data in the form of `KeyedJaggedTensor` with values of the form
[F X B X L] where:
* F: features (keys)
* B: batch size
* L: length of sparse features (jagged)
and outputs a `KeyedTensor` with values of the form [B * (F * D)] where:
* F: features (keys)
* D: each feature's (key's) embedding dimension
* B: batch size
Args:
tables (List[EmbeddingBagConfig]): list of embedding tables.
is_weighted (bool): whether input `KeyedJaggedTensor` is weighted.
device (Optional[torch.device]): default compute device.
Example::
table_0 = EmbeddingBagConfig(
name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"]
)
table_1 = EmbeddingBagConfig(
name="t2", embedding_dim=4, num_embeddings=10, feature_names=["f2"]
)
ebc = EmbeddingBagCollection(tables=[table_0, table_1])
# 0 1 2 <-- batch
# "f1" [0,1] None [2]
# "f2" [3] [4] [5,6,7]
# ^
# feature
features = KeyedJaggedTensor(
keys=["f1", "f2"],
values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]),
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
)
pooled_embeddings = ebc(features)
print(pooled_embeddings.values())
tensor([[-0.8899, -0.1342, -1.9060, -0.0905, -0.2814, -0.9369, -0.7783],
[ 0.0000, 0.0000, 0.0000, 0.1598, 0.0695, 1.3265, -0.1011],
[-0.4256, -1.1846, -2.1648, -1.0893, 0.3590, -1.9784, -0.7681]],
grad_fn=<CatBackward0>)
print(pooled_embeddings.keys())
['f1', 'f2']
print(pooled_embeddings.offset_per_key())
tensor([0, 3, 7])
"""
def __init__(
self,
tables: List[EmbeddingBagConfig],
is_weighted: bool = False,
device: Optional[torch.device] = None,
) -> None:
super().__init__()
torch._C._log_api_usage_once(f"torchrec.modules.{self.__class__.__name__}")
self._is_weighted = is_weighted
self.embedding_bags: nn.ModuleDict = nn.ModuleDict()
self._embedding_bag_configs = tables
self._lengths_per_embedding: List[int] = []
self._dtypes: List[int] = []
table_names = set()
for embedding_config in tables:
if embedding_config.name in table_names:
raise ValueError(f"Duplicate table name {embedding_config.name}")
table_names.add(embedding_config.name)
dtype = (
torch.float32
if embedding_config.data_type == DataType.FP32
else torch.float16
)
self.embedding_bags[embedding_config.name] = nn.EmbeddingBag(
num_embeddings=embedding_config.num_embeddings,
embedding_dim=embedding_config.embedding_dim,
mode=pooling_type_to_str(embedding_config.pooling),
device=device,
include_last_offset=True,
dtype=dtype,
)
if device is None:
device = self.embedding_bags[embedding_config.name].weight.device
self._dtypes.append(embedding_config.data_type.value)
if not embedding_config.feature_names:
embedding_config.feature_names = [embedding_config.name]
self._lengths_per_embedding.extend(
len(embedding_config.feature_names) * [embedding_config.embedding_dim]
)
self._device: torch.device = device or torch.device("cpu")
self._embedding_names: List[str] = [
embedding
for embeddings in get_embedding_names_by_table(tables)
for embedding in embeddings
]
self._feature_names: List[List[str]] = [table.feature_names for table in tables]
self.reset_parameters()
def forward(self, features: KeyedJaggedTensor) -> KeyedTensor:
"""
Args:
features (KeyedJaggedTensor): KJT of form [F X B X L].
Returns:
KeyedTensor
"""
flat_feature_names: List[str] = []
for names in self._feature_names:
flat_feature_names.extend(names)
inverse_indices = reorder_inverse_indices(
inverse_indices=features.inverse_indices_or_none(),
feature_names=flat_feature_names,
)
pooled_embeddings: List[torch.Tensor] = []
feature_dict = features.to_dict()
for i, embedding_bag in enumerate(self.embedding_bags.values()):
for feature_name in self._feature_names[i]:
f = feature_dict[feature_name]
per_sample_weights: Optional[torch.Tensor] = None
if self._is_weighted:
per_sample_weights = (
f.weights().half()
if self._dtypes[i] == DataType.FP16.value
else f.weights()
)
res = embedding_bag(
input=f.values(),
offsets=f.offsets(),
per_sample_weights=(
per_sample_weights if self._is_weighted else None
),
).float()
pooled_embeddings.append(res)
return KeyedTensor(
keys=self._embedding_names,
values=process_pooled_embeddings(
pooled_embeddings=pooled_embeddings,
inverse_indices=inverse_indices,
),
length_per_key=self._lengths_per_embedding,
)
def is_weighted(self) -> bool:
return self._is_weighted
def embedding_bag_configs(self) -> List[EmbeddingBagConfig]:
return self._embedding_bag_configs
def device(self) -> torch.device:
return self._device
def reset_parameters(self) -> None:
if (isinstance(self.device, torch.device) and self.device.type == "meta") or (
isinstance(self.device, str) and self.device == "meta"
):
return
# Initialize embedding bags weights with init_fn
for table_config in self._embedding_bag_configs:
assert table_config.init_fn is not None
param = self.embedding_bags[f"{table_config.name}"].weight
# pyre-ignore
table_config.init_fn(param)
class EmbeddingCollection(EmbeddingCollectionInterface):
"""
EmbeddingCollection represents a collection of non-pooled embeddings.
It processes sparse data in the form of `KeyedJaggedTensor` of the form [F X B X L]
where:
* F: features (keys)
* B: batch size
* L: length of sparse features (variable)
and outputs `Dict[feature (key), JaggedTensor]`.
Each `JaggedTensor` contains values of the form (B * L) X D
where:
* B: batch size
* L: length of sparse features (jagged)
* D: each feature's (key's) embedding dimension and lengths are of the form L
Args:
tables (List[EmbeddingConfig]): list of embedding tables.
device (Optional[torch.device]): default compute device.
need_indices (bool): if we need to pass indices to the final lookup dict.
Example::
e1_config = EmbeddingConfig(
name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"]
)
e2_config = EmbeddingConfig(
name="t2", embedding_dim=3, num_embeddings=10, feature_names=["f2"]
)
ec = EmbeddingCollection(tables=[e1_config, e2_config])
# 0 1 2 <-- batch
# 0 [0,1] None [2]
# 1 [3] [4] [5,6,7]
# ^
# feature
features = KeyedJaggedTensor.from_offsets_sync(
keys=["f1", "f2"],
values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]),
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
)
feature_embeddings = ec(features)
print(feature_embeddings['f2'].values())
tensor([[-0.2050, 0.5478, 0.6054],
[ 0.7352, 0.3210, -3.0399],
[ 0.1279, -0.1756, -0.4130],
[ 0.7519, -0.4341, -0.0499],
[ 0.9329, -1.0697, -0.8095]], grad_fn=<EmbeddingBackward>)
"""
def __init__( # noqa C901
self,
tables: List[EmbeddingConfig],
device: Optional[torch.device] = None,
need_indices: bool = False,
) -> None:
super().__init__()
torch._C._log_api_usage_once(f"torchrec.modules.{self.__class__.__name__}")
self.embeddings: nn.ModuleDict = nn.ModuleDict()
self._embedding_configs = tables
self._embedding_dim: int = -1
self._need_indices: bool = need_indices
self._device: torch.device = (
device if device is not None else torch.device("cpu")
)
table_names = set()
for config in tables:
if config.name in table_names:
raise ValueError(f"Duplicate table name {config.name}")
table_names.add(config.name)
self._embedding_dim = (
config.embedding_dim if self._embedding_dim < 0 else self._embedding_dim
)
if self._embedding_dim != config.embedding_dim:
raise ValueError(
"All tables in a EmbeddingCollection are required to have same embedding dimension."
+ f" Violating case: {config.name}'s embedding_dim {config.embedding_dim} !="
+ f" {self._embedding_dim}"
)
dtype = (
torch.float32 if config.data_type == DataType.FP32 else torch.float16
)
self.embeddings[config.name] = nn.Embedding(
num_embeddings=config.num_embeddings,
embedding_dim=config.embedding_dim,
device=device,
dtype=dtype,
)
if config.init_fn is not None:
config.init_fn(self.embeddings[config.name].weight)
if not config.feature_names:
config.feature_names = [config.name]
self._embedding_names_by_table: List[List[str]] = get_embedding_names_by_table(
tables
)
self._feature_names: List[List[str]] = [table.feature_names for table in tables]
def forward(
self,
features: KeyedJaggedTensor,
) -> Dict[str, JaggedTensor]:
"""
Args:
features (KeyedJaggedTensor): KJT of form [F X B X L].
Returns:
Dict[str, JaggedTensor]
"""
feature_embeddings: Dict[str, JaggedTensor] = {}
jt_dict: Dict[str, JaggedTensor] = features.to_dict()
for i, emb_module in enumerate(self.embeddings.values()):
feature_names = self._feature_names[i]
embedding_names = self._embedding_names_by_table[i]
for j, embedding_name in enumerate(embedding_names):
feature_name = feature_names[j]
f = jt_dict[feature_name]
lookup = emb_module(
input=f.values(),
).float()
feature_embeddings[embedding_name] = JaggedTensor(
values=lookup,
lengths=f.lengths(),
weights=f.values() if self._need_indices else None,
)
return feature_embeddings
def need_indices(self) -> bool:
return self._need_indices
def embedding_dim(self) -> int:
return self._embedding_dim
def embedding_configs(self) -> List[EmbeddingConfig]:
return self._embedding_configs
def embedding_names_by_table(self) -> List[List[str]]:
return self._embedding_names_by_table
def device(self) -> torch.device:
return self._device
def reset_parameters(self) -> None:
if (isinstance(self.device, torch.device) and self.device.type == "meta") or (
isinstance(self.device, str) and self.device == "meta"
):
return
# Initialize embedding bags weights with init_fn
for table_config in self._embedding_configs:
assert table_config.init_fn is not None
param = self.embeddings[f"{table_config.name}"].weight
# pyre-ignore
table_config.init_fn(param)
The provided code snippet includes necessary dependencies for implementing the `tower_input_params` function. Write a Python function `def tower_input_params(module: nn.Module) -> Tuple[bool, bool]` to solve the following problem:
Utilty to compute the mapping of tower KJT args to pass to the embedding modules. Args: module (nn.Module): Returns: Tuple[bool, bool]: tuple of 2 booleans representing if KJT and weighted KJT are required, respectively.
Here is the function:
def tower_input_params(module: nn.Module) -> Tuple[bool, bool]:
"""
Utilty to compute the mapping of tower KJT args to pass to the embedding modules.
Args:
module (nn.Module):
Returns:
Tuple[bool, bool]: tuple of 2 booleans representing if KJT and weighted KJT are required, respectively.
"""
if isinstance(module, EmbeddingCollection):
return True, False
elif isinstance(module, EmbeddingBagCollection):
return not module.is_weighted(), module.is_weighted()
# default to assuming both kjt and weight_kjt required
return True, True | Utilty to compute the mapping of tower KJT args to pass to the embedding modules. Args: module (nn.Module): Returns: Tuple[bool, bool]: tuple of 2 booleans representing if KJT and weighted KJT are required, respectively. |
8,872 | import abc
from collections import OrderedDict
from typing import Any, Dict, Iterator, List, Optional, Tuple
import torch
import torch.nn as nn
from torchrec.fx.tracer import is_fx_tracing
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
# pyre-ignore
class JaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""
Represents an (optionally weighted) jagged tensor.
A `JaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. See `KeyedJaggedTensor` for full example.
Implementation is torch.jit.script-able.
NOTE:
We will NOT do input validation as it's expensive, you should always pass in the
valid lengths, offsets, etc.
Args:
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if values have weights. Tensor with same shape
as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
"""
_fields = ["_values", "_weights", "_lengths", "_offsets"]
def __init__(
self,
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
) -> None:
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
_assert_offsets_or_lengths_is_provided(offsets, lengths)
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "JaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return JaggedTensor(
values=torch.empty(0, dtype=values_dtype, device=device),
offsets=torch.empty(0, dtype=lengths_dtype, device=device),
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
weights=weights,
)
def from_dense_lengths(
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` is still of shape (B,).
"""
mask2d = (
_arange(end=values.size(1), device=values.device).expand(values.size(0), -1)
) < lengths.unsqueeze(-1)
return JaggedTensor(
values=values[mask2d],
weights=_optional_mask(weights, mask2d),
lengths=lengths,
)
def from_dense(
values: List[torch.Tensor],
weights: Optional[List[torch.Tensor]] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` and `offsets` are still of shape (B,).
Args:
values (List[torch.Tensor]): a list of tensors for dense representation
weights (Optional[List[torch.Tensor]]): if values have weights, tensor with
the same shape as values.
Returns:
JaggedTensor: JaggedTensor created from 2D dense tensor.
Example::
values = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
weights = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
j1 = JaggedTensor.from_dense(
values=values,
weights=weights,
)
# j1 = [[1.0], [], [7.0], [8.0], [10.0, 11.0, 12.0]]
"""
values_tensor = torch.cat(values, dim=0)
lengths = torch.tensor(
[value.size(0) for value in values],
dtype=torch.int32,
device=values_tensor.device,
)
weights_tensor = torch.cat(weights, dim=0) if weights is not None else None
return JaggedTensor(
values=values_tensor,
weights=weights_tensor,
lengths=lengths,
)
def to_dense(self) -> List[torch.Tensor]:
"""
Constructs a dense-representation of the JT's values.
Returns:
List[torch.Tensor]: list of tensors.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
values_list = jt.to_dense()
# values_list = [
# torch.tensor([1.0, 2.0]),
# torch.tensor([]),
# torch.tensor([3.0]),
# torch.tensor([4.0]),
# torch.tensor([5.0]),
# torch.tensor([6.0, 7.0, 8.0]),
# ]
"""
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.values()[offset:next_offset])
return tensor_list
def to_dense_weights(self) -> Optional[List[torch.Tensor]]:
"""
Constructs a dense-representation of the JT's weights.
Returns:
Optional[List[torch.Tensor]]: list of tensors, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
weights_list = jt.to_dense_weights()
# weights_list = [
# torch.tensor([0.1, 0.2]),
# torch.tensor([]),
# torch.tensor([0.3]),
# torch.tensor([0.4]),
# torch.tensor([0.5]),
# torch.tensor([0.6, 0.7, 0.8]),
# ]
"""
if self.weights_or_none() is None:
return None
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.weights()[offset:next_offset])
return tensor_list
def to_padded_dense(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> torch.Tensor:
"""
Constructs a 2D dense tensor from the JT's values of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
torch.Tensor: 2d dense tensor.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
dt = jt.to_padded_dense(
desired_length=2,
padding_value=10.0,
)
# dt = [
# [1.0, 2.0],
# [10.0, 10.0],
# [3.0, 10.0],
# [4.0, 10.0],
# [5.0, 10.0],
# [6.0, 7.0],
# ]
"""
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.values(), [self.offsets()], [N], padding_value
)
def to_padded_dense_weights(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> Optional[torch.Tensor]:
"""
Constructs a 2D dense tensor from the JT's weights of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
Optional[torch.Tensor]: 2d dense tensor, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
d_wt = jt.to_padded_dense_weights(
desired_length=2,
padding_value=1.0,
)
# d_wt = [
# [0.1, 0.2],
# [1.0, 1.0],
# [0.3, 1.0],
# [0.4, 1.0],
# [0.5, 1.0],
# [0.6, 0.7],
# ]
"""
if self.weights_or_none() is None:
return None
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.weights(), [self.offsets()], [N], padding_value
)
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def to(self, device: torch.device, non_blocking: bool = False) -> "JaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
return JaggedTensor(
values=self._values.to(device, non_blocking=non_blocking),
weights=(
weights.to(device, non_blocking=non_blocking)
if weights is not None
else None
),
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def __str__(self) -> str:
offsets = self.offsets()
if self._weights is None:
return (
"JaggedTensor({\n "
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ "\n})\n"
)
return (
"JaggedTensor({\n"
+ ' "values": '
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ ',\n "weights": '
+ _jagged_values_string(
_get_weights_or_throw(self._weights), offsets, 0, len(offsets) - 1
)
+ "\n})\n"
)
def position_weighted_module_update_features(
features: Dict[str, JaggedTensor],
weighted_features: Dict[str, JaggedTensor],
) -> Dict[str, JaggedTensor]:
features.update(weighted_features)
return features | null |
8,873 | import abc
from collections import OrderedDict
from typing import Any, Dict, Iterator, List, Optional, Tuple
import torch
import torch.nn as nn
from torchrec.fx.tracer import is_fx_tracing
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
def offsets_to_range_traceble(
offsets: torch.Tensor, values: torch.Tensor
) -> torch.Tensor:
return torch.ops.fbgemm.offsets_range(offsets.long(), torch.numel(values)) | null |
8,874 | import abc
from collections import defaultdict
from typing import Callable, Dict, List, NamedTuple, Optional, Tuple, Union
import torch
from torch import nn
from torchrec.modules.embedding_configs import BaseEmbeddingConfig
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
# pyre-ignore
class JaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""
Represents an (optionally weighted) jagged tensor.
A `JaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. See `KeyedJaggedTensor` for full example.
Implementation is torch.jit.script-able.
NOTE:
We will NOT do input validation as it's expensive, you should always pass in the
valid lengths, offsets, etc.
Args:
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if values have weights. Tensor with same shape
as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
"""
_fields = ["_values", "_weights", "_lengths", "_offsets"]
def __init__(
self,
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
) -> None:
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
_assert_offsets_or_lengths_is_provided(offsets, lengths)
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "JaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return JaggedTensor(
values=torch.empty(0, dtype=values_dtype, device=device),
offsets=torch.empty(0, dtype=lengths_dtype, device=device),
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
weights=weights,
)
def from_dense_lengths(
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` is still of shape (B,).
"""
mask2d = (
_arange(end=values.size(1), device=values.device).expand(values.size(0), -1)
) < lengths.unsqueeze(-1)
return JaggedTensor(
values=values[mask2d],
weights=_optional_mask(weights, mask2d),
lengths=lengths,
)
def from_dense(
values: List[torch.Tensor],
weights: Optional[List[torch.Tensor]] = None,
) -> "JaggedTensor":
"""
Constructs `JaggedTensor` from dense values/weights of shape (B, N,).
Note that `lengths` and `offsets` are still of shape (B,).
Args:
values (List[torch.Tensor]): a list of tensors for dense representation
weights (Optional[List[torch.Tensor]]): if values have weights, tensor with
the same shape as values.
Returns:
JaggedTensor: JaggedTensor created from 2D dense tensor.
Example::
values = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
weights = [
torch.Tensor([1.0]),
torch.Tensor(),
torch.Tensor([7.0, 8.0]),
torch.Tensor([10.0, 11.0, 12.0]),
]
j1 = JaggedTensor.from_dense(
values=values,
weights=weights,
)
# j1 = [[1.0], [], [7.0], [8.0], [10.0, 11.0, 12.0]]
"""
values_tensor = torch.cat(values, dim=0)
lengths = torch.tensor(
[value.size(0) for value in values],
dtype=torch.int32,
device=values_tensor.device,
)
weights_tensor = torch.cat(weights, dim=0) if weights is not None else None
return JaggedTensor(
values=values_tensor,
weights=weights_tensor,
lengths=lengths,
)
def to_dense(self) -> List[torch.Tensor]:
"""
Constructs a dense-representation of the JT's values.
Returns:
List[torch.Tensor]: list of tensors.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
values_list = jt.to_dense()
# values_list = [
# torch.tensor([1.0, 2.0]),
# torch.tensor([]),
# torch.tensor([3.0]),
# torch.tensor([4.0]),
# torch.tensor([5.0]),
# torch.tensor([6.0, 7.0, 8.0]),
# ]
"""
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.values()[offset:next_offset])
return tensor_list
def to_dense_weights(self) -> Optional[List[torch.Tensor]]:
"""
Constructs a dense-representation of the JT's weights.
Returns:
Optional[List[torch.Tensor]]: list of tensors, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
weights_list = jt.to_dense_weights()
# weights_list = [
# torch.tensor([0.1, 0.2]),
# torch.tensor([]),
# torch.tensor([0.3]),
# torch.tensor([0.4]),
# torch.tensor([0.5]),
# torch.tensor([0.6, 0.7, 0.8]),
# ]
"""
if self.weights_or_none() is None:
return None
tensor_list = []
for index in range(self.offsets().size(0) - 1):
offset = self.offsets()[index].item()
next_offset = self.offsets()[index + 1].item()
tensor_list.append(self.weights()[offset:next_offset])
return tensor_list
def to_padded_dense(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> torch.Tensor:
"""
Constructs a 2D dense tensor from the JT's values of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
torch.Tensor: 2d dense tensor.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, offsets=offsets)
dt = jt.to_padded_dense(
desired_length=2,
padding_value=10.0,
)
# dt = [
# [1.0, 2.0],
# [10.0, 10.0],
# [3.0, 10.0],
# [4.0, 10.0],
# [5.0, 10.0],
# [6.0, 7.0],
# ]
"""
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.values(), [self.offsets()], [N], padding_value
)
def to_padded_dense_weights(
self,
desired_length: Optional[int] = None,
padding_value: float = 0.0,
) -> Optional[torch.Tensor]:
"""
Constructs a 2D dense tensor from the JT's weights of shape (B, N,).
Note that `B` is the length of self.lengths() and `N` is the longest feature
length or `desired_length`.
If `desired_length` > `length` we will pad with `padding_value`, otherwise we
will select the last value at `desired_length`.
Args:
desired_length (int): the length of the tensor.
padding_value (float): padding value if we need to pad.
Returns:
Optional[torch.Tensor]: 2d dense tensor, `None` if no weights.
Example::
values = torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
weights = torch.Tensor([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
offsets = torch.IntTensor([0, 2, 2, 3, 4, 5, 8])
jt = JaggedTensor(values=values, weights=weights, offsets=offsets)
d_wt = jt.to_padded_dense_weights(
desired_length=2,
padding_value=1.0,
)
# d_wt = [
# [0.1, 0.2],
# [1.0, 1.0],
# [0.3, 1.0],
# [0.4, 1.0],
# [0.5, 1.0],
# [0.6, 0.7],
# ]
"""
if self.weights_or_none() is None:
return None
if desired_length is None:
N = int(torch.max(self.lengths()).item())
else:
N = desired_length
return torch.ops.fbgemm.jagged_to_padded_dense(
self.weights(), [self.offsets()], [N], padding_value
)
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def to(self, device: torch.device, non_blocking: bool = False) -> "JaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
return JaggedTensor(
values=self._values.to(device, non_blocking=non_blocking),
weights=(
weights.to(device, non_blocking=non_blocking)
if weights is not None
else None
),
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def __str__(self) -> str:
offsets = self.offsets()
if self._weights is None:
return (
"JaggedTensor({\n "
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ "\n})\n"
)
return (
"JaggedTensor({\n"
+ ' "values": '
+ _jagged_values_string(self._values, offsets, 0, len(offsets) - 1)
+ ',\n "weights": '
+ _jagged_values_string(
_get_weights_or_throw(self._weights), offsets, 0, len(offsets) - 1
)
+ "\n})\n"
)
The provided code snippet includes necessary dependencies for implementing the `apply_mc_method_to_jt_dict` function. Write a Python function `def apply_mc_method_to_jt_dict( method: str, features_dict: Dict[str, JaggedTensor], table_to_features: Dict[str, List[str]], managed_collisions: nn.ModuleDict, ) -> Dict[str, JaggedTensor]` to solve the following problem:
Applies an MC method to a dictionary of JaggedTensors, returning the updated dictionary with same ordering
Here is the function:
def apply_mc_method_to_jt_dict(
method: str,
features_dict: Dict[str, JaggedTensor],
table_to_features: Dict[str, List[str]],
managed_collisions: nn.ModuleDict,
) -> Dict[str, JaggedTensor]:
"""
Applies an MC method to a dictionary of JaggedTensors, returning the updated dictionary with same ordering
"""
mc_output: Dict[str, JaggedTensor] = features_dict.copy()
for table, features in table_to_features.items():
mc_input: Dict[str, JaggedTensor] = {}
for feature in features:
mc_input[feature] = features_dict[feature]
mc_module = managed_collisions[table]
attr = getattr(mc_module, method)
mc_output.update(attr(mc_input))
return mc_output | Applies an MC method to a dictionary of JaggedTensors, returning the updated dictionary with same ordering |
8,875 | import abc
from collections import defaultdict
from typing import Callable, Dict, List, NamedTuple, Optional, Tuple, Union
import torch
from torch import nn
from torchrec.modules.embedding_configs import BaseEmbeddingConfig
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
The provided code snippet includes necessary dependencies for implementing the `dynamic_threshold_filter` function. Write a Python function `def dynamic_threshold_filter( id_counts: torch.Tensor, threshold_skew_multiplier: float = 10.0, ) -> Tuple[torch.Tensor, torch.Tensor]` to solve the following problem:
Threshold is total_count / num_ids * threshold_skew_multiplier. An id is added if its count is strictly greater than the threshold.
Here is the function:
def dynamic_threshold_filter(
id_counts: torch.Tensor,
threshold_skew_multiplier: float = 10.0,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Threshold is total_count / num_ids * threshold_skew_multiplier. An id is
added if its count is strictly greater than the threshold.
"""
num_ids = id_counts.numel()
total_count = id_counts.sum()
BASE_THRESHOLD = 1 / num_ids
threshold_mass = BASE_THRESHOLD * threshold_skew_multiplier
threshold = threshold_mass * total_count
threshold_mask = id_counts > threshold
return threshold_mask, threshold | Threshold is total_count / num_ids * threshold_skew_multiplier. An id is added if its count is strictly greater than the threshold. |
8,876 | import abc
from collections import defaultdict
from typing import Callable, Dict, List, NamedTuple, Optional, Tuple, Union
import torch
from torch import nn
from torchrec.modules.embedding_configs import BaseEmbeddingConfig
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
The provided code snippet includes necessary dependencies for implementing the `average_threshold_filter` function. Write a Python function `def average_threshold_filter( id_counts: torch.Tensor, ) -> Tuple[torch.Tensor, torch.Tensor]` to solve the following problem:
Threshold is average of id_counts. An id is added if its count is strictly greater than the mean.
Here is the function:
def average_threshold_filter(
id_counts: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Threshold is average of id_counts. An id is added if its count is strictly
greater than the mean.
"""
if id_counts.dtype != torch.float:
id_counts = id_counts.float()
threshold = id_counts.mean()
threshold_mask = id_counts > threshold
return threshold_mask, threshold | Threshold is average of id_counts. An id is added if its count is strictly greater than the mean. |
8,877 | import abc
from collections import defaultdict
from typing import Callable, Dict, List, NamedTuple, Optional, Tuple, Union
import torch
from torch import nn
from torchrec.modules.embedding_configs import BaseEmbeddingConfig
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor
The provided code snippet includes necessary dependencies for implementing the `probabilistic_threshold_filter` function. Write a Python function `def probabilistic_threshold_filter( id_counts: torch.Tensor, per_id_probability: float = 0.01, ) -> Tuple[torch.Tensor, torch.Tensor]` to solve the following problem:
Each id has probability per_id_probability of being added. For example, if per_id_probability is 0.01 and an id appears 100 times, then it has a 60% of being added. More precisely, the id score is 1 - (1 - per_id_probability) ^ id_count, and for a randomly generated threshold, the id score is the chance of it being added.
Here is the function:
def probabilistic_threshold_filter(
id_counts: torch.Tensor,
per_id_probability: float = 0.01,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Each id has probability per_id_probability of being added. For example,
if per_id_probability is 0.01 and an id appears 100 times, then it has a 60%
of being added. More precisely, the id score is 1 - (1 - per_id_probability) ^ id_count,
and for a randomly generated threshold, the id score is the chance of it being added.
"""
probability = torch.full_like(id_counts, 1 - per_id_probability, dtype=torch.float)
id_scores = 1 - torch.pow(probability, id_counts)
threshold: torch.Tensor = torch.rand(id_counts.size(), device=id_counts.device)
threshold_mask = id_scores > threshold
return threshold_mask, threshold | Each id has probability per_id_probability of being added. For example, if per_id_probability is 0.01 and an id appears 100 times, then it has a 60% of being added. More precisely, the id score is 1 - (1 - per_id_probability) ^ id_count, and for a randomly generated threshold, the id score is the chance of it being added. |
8,878 | from typing import List
import torch
from torch import nn
from torch.fx import wrap
def _get_flatten_input(inputs: List[torch.Tensor]) -> torch.Tensor:
return torch.cat(
[input.flatten(1) for input in inputs],
dim=1,
) | null |
8,879 | import copy
import itertools
from collections import defaultdict
from typing import Any, cast, Dict, Iterator, List, Optional, Set, Tuple, Type
import torch
import torch.nn as nn
import torchrec.optim as trec_optim
from fbgemm_gpu.split_embedding_configs import EmbOptimType
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
ComputeDevice,
EmbeddingLocation,
SplitTableBatchedEmbeddingBagsCodegen,
)
from torchrec.modules.embedding_configs import (
BaseEmbeddingConfig,
DataType,
EmbeddingBagConfig,
EmbeddingConfig,
pooling_type_to_pooling_mode,
PoolingType,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection,
EmbeddingBagCollectionInterface,
EmbeddingCollection,
EmbeddingCollectionInterface,
get_embedding_names_by_table,
)
from torchrec.optim.fused import FusedOptimizer, FusedOptimizerModule
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
def convert_optimizer_type_and_kwargs(
optimizer_type: Type[torch.optim.Optimizer],
optimizer_kwargs: Dict[str, Any],
) -> Optional[Tuple[EmbOptimType, Dict[str, Any]]]:
optimizer_kwargs = copy.deepcopy(optimizer_kwargs)
if "lr" in optimizer_kwargs:
optimizer_kwargs["learning_rate"] = optimizer_kwargs["lr"]
optimizer_kwargs.pop("lr")
if optimizer_type == torch.optim.SGD:
return (
EmbOptimType.EXACT_SGD,
optimizer_kwargs,
)
elif optimizer_type == torch.optim.Adagrad:
return (EmbOptimType.EXACT_ADAGRAD, optimizer_kwargs)
elif optimizer_type == trec_optim.RowWiseAdagrad:
return (EmbOptimType.EXACT_ROWWISE_ADAGRAD, optimizer_kwargs)
elif optimizer_type == torch.optim.Adam:
return (EmbOptimType.ADAM, optimizer_kwargs)
return None | null |
8,880 | import copy
import itertools
from collections import defaultdict
from typing import Any, cast, Dict, Iterator, List, Optional, Set, Tuple, Type
import torch
import torch.nn as nn
import torchrec.optim as trec_optim
from fbgemm_gpu.split_embedding_configs import EmbOptimType
from fbgemm_gpu.split_table_batched_embeddings_ops_training import (
ComputeDevice,
EmbeddingLocation,
SplitTableBatchedEmbeddingBagsCodegen,
)
from torchrec.modules.embedding_configs import (
BaseEmbeddingConfig,
DataType,
EmbeddingBagConfig,
EmbeddingConfig,
pooling_type_to_pooling_mode,
PoolingType,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection,
EmbeddingBagCollectionInterface,
EmbeddingCollection,
EmbeddingCollectionInterface,
get_embedding_names_by_table,
)
from torchrec.optim.fused import FusedOptimizer, FusedOptimizerModule
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizer
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
class FusedEmbeddingBagCollection(
EmbeddingBagCollectionInterface, FusedOptimizerModule
):
"""
FusedEmbeddingBagCollection represents a collection of pooled embeddings (`EmbeddingBags`).
It utilizes a technique called Optimizer fusion (register the optimizer with model). The semantics
of this is that during the backwards pass, the registered optimizer will be called.
It processes sparse data in the form of `KeyedJaggedTensor` with values of the form
[F X B X L] where:
* F: features (keys)
* B: batch size
* L: length of sparse features (jagged)
and outputs a `KeyedTensor` with values of the form [B x F x D] where:
* F: features (keys)
* D: each feature's (key's) embedding dimension
* B: batch size
Args:
tables (List[EmbeddingBagConfig]): list of embedding tables.
is_weighted (bool): whether input `KeyedJaggedTensor` is weighted.
optimizer (Type[torch.optim.Optimizer]): fusion optimizer type
optimizer_kwargs: Dict[str, Any]: fusion optimizer kwargs
device (Optional[torch.device]): compute device.
Example::
table_0 = EmbeddingBagConfig(
name="t1", embedding_dim=4, num_embeddings=10, feature_names=["f1"]
)
table_1 = EmbeddingBagConfig(
name="t2", embedding_dim=8, num_embeddings=10, feature_names=["f2"]
)
ebc = FusedEmbeddingBagCollection(tables=[table_0, table_1], optimizer_type=torch.optim.SGD, optimizer_kwargs={"lr": .01})
# 0 1 2 <-- batch
# "f1" [0,1] None [2]
# "f2" [3] [4] [5,6,7]
# ^
# feature
features = KeyedJaggedTensor(
keys=["f1", "f2"],
values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]),
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
)
pooled_embeddings = ebc(features)
print(pooled_embeddings.values())
tensor([[ 0.2093, 0.1395, 0.1571, 0.3583, 0.0421, 0.0037, -0.0692, 0.0663,
0.2166, -0.3150, -0.2771, -0.0301],
[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0165, -0.1225, 0.2483, 0.0624,
-0.1168, -0.0509, -0.1309, 0.3059],
[ 0.0811, -0.1779, -0.1443, 0.1097, -0.4410, -0.4036, 0.4458, -0.2735,
-0.3080, -0.2102, -0.0564, 0.5583]], grad_fn=<CatBackward0>)
print(pooled_embeddings.keys())
['f1', 'f2']
print(pooled_embeddings.offset_per_key())
[0, 4, 12]
"""
def __init__(
self,
tables: List[EmbeddingBagConfig],
optimizer_type: Type[torch.optim.Optimizer],
optimizer_kwargs: Dict[str, Any],
is_weighted: bool = False,
device: Optional[torch.device] = None,
location: Optional[EmbeddingLocation] = None,
) -> None:
super().__init__()
self._optimizer_type = optimizer_type
self._optimizer_kwargs = optimizer_kwargs
self._device: torch.device = (
device if device is not None else torch.device("cpu")
)
emb_optim_and_kwargs = convert_optimizer_type_and_kwargs(
optimizer_type, optimizer_kwargs
)
if emb_optim_and_kwargs is None:
raise ValueError(
f"Cannot fuse optimizer_type={optimizer_type} with kwargs {optimizer_kwargs}"
)
(emb_optim_type, emb_opt_kwargs) = emb_optim_and_kwargs
if location in [
EmbeddingLocation.DEVICE,
EmbeddingLocation.MANAGED,
EmbeddingLocation.MANAGED_CACHING,
]:
assert device is not None and device.type in [
"cuda",
"meta",
], f"Using location={location} requires device=cuda or meta"
if device is None:
device = torch.device("cpu")
if location is None:
if device.type in ["cpu", "meta"]:
location = EmbeddingLocation.HOST
elif device.type == "cuda":
location = EmbeddingLocation.DEVICE
else:
raise ValueError("EmbeddingLocation could not be set")
self._is_weighted = is_weighted
self._embedding_bag_configs = tables
# Registering in a List instead of ModuleList because we want don't want them to be auto-registered.
# Their states will be modified via self.embedding_bags
self._emb_modules: List[nn.Module] = []
self._key_to_tables: Dict[
Tuple[PoolingType, DataType], List[EmbeddingBagConfig]
] = defaultdict(list)
self._length_per_key: List[int] = []
for table in tables:
self._length_per_key.extend(
[table.embedding_dim] * len(table.feature_names)
)
key = (table.pooling, table.data_type)
self._key_to_tables[key].append(table)
optims = []
for key, tables in self._key_to_tables.items():
(pooling, data_type) = key
emb_module = _BatchedFusedEmbeddingLookups(
cast(List[BaseEmbeddingConfig], tables),
data_type=data_type,
pooling=pooling,
optimizer_type=emb_optim_type,
optimizer_kwargs=emb_opt_kwargs,
device=device,
embedding_location=location,
)
self._emb_modules.append(emb_module)
params: Dict[str, torch.Tensor] = {}
for param_key, weight in emb_module.fused_optimizer().params.items():
params[f"embedding_bags.{param_key}"] = weight
optims.append(("", emb_module.fused_optimizer()))
self._optim: CombinedOptimizer = CombinedOptimizer(optims)
self._embedding_names: List[str] = list(
itertools.chain(*get_embedding_names_by_table(self._embedding_bag_configs))
)
# We map over the parameters from FBGEMM backed kernels to the canonical nn.EmbeddingBag
# representation. This provides consistency between this class and the EmbeddingBagCollection's
# nn.Module API calls (state_dict, named_modules, etc)
self.embedding_bags: nn.ModuleDict = nn.ModuleDict()
for (_key, tables), emb_module in zip(
self._key_to_tables.items(), self._emb_modules
):
for embedding_config, weight in zip(
tables,
emb_module.split_embedding_weights(),
# torch._tensor.Tensor]` is not a function.
):
self.embedding_bags[embedding_config.name] = torch.nn.Module()
self.embedding_bags[embedding_config.name].register_parameter(
"weight", torch.nn.Parameter(weight)
)
def forward(self, features: KeyedJaggedTensor) -> KeyedTensor:
"""
Args:
features (KeyedJaggedTensor): KJT of form [F X B X L].
Returns:
KeyedTensor
"""
assert features is not None
feature_dict = features.to_dict()
embeddings = []
for emb_op, (_key, tables) in zip(
self._emb_modules, self._key_to_tables.items()
):
indicies = []
lengths = []
offsets = []
weights = []
for table in tables:
for feature in table.feature_names:
f = feature_dict[feature]
indicies.append(f.values())
lengths.append(f.lengths())
if self._is_weighted:
weights.append(f.weights())
indicies = torch.cat(indicies)
lengths = torch.cat(lengths)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
if self._is_weighted:
weights = torch.cat(weights)
embeddings.append(
emb_op(
indicies.int(),
offsets.int(),
weights if self._is_weighted else None,
)
)
embeddings = torch.cat(embeddings, dim=1)
return KeyedTensor(
keys=self._embedding_names,
values=embeddings,
length_per_key=self._length_per_key,
)
def _get_name(self) -> str:
return "FusedEmbeddingBagCollection"
def device(self) -> torch.device:
return self._device
def embedding_bag_configs(self) -> List[EmbeddingBagConfig]:
return self._embedding_bag_configs
def is_weighted(self) -> bool:
return self._is_weighted
def optimizer_type(self) -> Type[torch.optim.Optimizer]:
return self._optimizer_type
def optimizer_kwargs(self) -> Dict[str, Any]:
return self._optimizer_kwargs
def fused_optimizer(self) -> KeyedOptimizer:
return self._optim
class FusedEmbeddingCollection(EmbeddingCollectionInterface, FusedOptimizerModule):
"""
EmbeddingCollection represents a unsharded collection of non-pooled embeddings. The semantics
of this module is that during the backwards pass, the registered optimizer will be called.
It processes sparse data in the form of `KeyedJaggedTensor` of the form [F X B X L]
where:
* F: features (keys)
* B: batch size
* L: length of sparse features (variable)
and outputs `Dict[feature (key), JaggedTensor]`.
Each `JaggedTensor` contains values of the form (B * L) X D
where:
* B: batch size
* L: length of sparse features (jagged)
* D: each feature's (key's) embedding dimension and lengths are of the form L
Args:
tables (List[EmbeddingConfig]): list of embedding tables.
device (Optional[torch.device]): default compute device.
need_indices (bool): if we need to pass indices to the final lookup dict.
Example::
e1_config = EmbeddingConfig(
name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"]
)
e2_config = EmbeddingConfig(
name="t2", embedding_dim=3, num_embeddings=10, feature_names=["f2"]
)
ec = EmbeddingCollection(tables=[e1_config, e2_config])
# 0 1 2 <-- batch
# 0 [0,1] None [2]
# 1 [3] [4] [5,6,7]
# ^
# feature
features = KeyedJaggedTensor.from_offsets_sync(
keys=["f1", "f2"],
values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]),
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
)
feature_embeddings = ec(features)
print(feature_embeddings['f2'].values())
tensor([[-0.2050, 0.5478, 0.6054],
[ 0.7352, 0.3210, -3.0399],
[ 0.1279, -0.1756, -0.4130],
[ 0.7519, -0.4341, -0.0499],
[ 0.9329, -1.0697, -0.8095]], grad_fn=<EmbeddingBackward>)
"""
# noqa lint
def __init__(
self,
tables: List[EmbeddingConfig],
optimizer_type: Type[torch.optim.Optimizer],
optimizer_kwargs: Dict[str, Any],
device: Optional[torch.device] = None,
need_indices: bool = False,
location: Optional[EmbeddingLocation] = None,
) -> None:
super().__init__()
self._optimizer_type = optimizer_type
self._optimizer_kwargs = optimizer_kwargs
emb_optim_and_kwargs = convert_optimizer_type_and_kwargs(
optimizer_type, optimizer_kwargs
)
if emb_optim_and_kwargs is None:
raise ValueError(
f"Cannot fuse optimizer_type={optimizer_type} with kwargs {optimizer_kwargs}"
)
(emb_optim_type, emb_opt_kwargs) = emb_optim_and_kwargs
if location in [
EmbeddingLocation.DEVICE,
EmbeddingLocation.MANAGED,
EmbeddingLocation.MANAGED_CACHING,
]:
assert device is not None and device.type in [
"cuda",
"meta",
], f"Using location={location} requires device=cuda or meta"
if device is None:
device = torch.device("cpu")
assert device.type in [
"cuda",
"meta",
], "FusedEmbeddingCollection is only supported for device in [CUDA, meta] currently. There are plans to support device=CPU."
if location is None:
if device.type in ["cpu", "meta"]:
location = EmbeddingLocation.HOST
elif device.type == "cuda":
location = EmbeddingLocation.DEVICE
else:
raise ValueError("EmbeddingLocation could not be set")
self._embedding_configs = tables
self._need_indices: bool = need_indices
self._embedding_dim: int = -1
# Registering in a List instead of ModuleList because we want don't want them to be auto-registered.
# Their states will be modified via self.embedding_bags
self._emb_modules: List[nn.Module] = []
self._key_to_tables: Dict[DataType, List[EmbeddingConfig]] = defaultdict(list)
seen_features = set()
self._shared_features: Set[str] = set()
for table in tables:
key = table.data_type
self._key_to_tables[key].append(table)
if self._embedding_dim == -1:
self._embedding_dim = table.embedding_dim
elif self._embedding_dim != table.embedding_dim:
raise ValueError(
"All tables in a EmbeddingCollection are required to have same embedding dimension."
+ f" Violating case: {table}'s embedding_dim {table.embedding_dim} !="
+ f" {self._embedding_dim}"
)
for feature in table.feature_names:
if feature in seen_features:
self._shared_features.add(feature)
else:
seen_features.add(feature)
optims = []
for key, tables in self._key_to_tables.items():
data_type = key
emb_module = _BatchedFusedEmbeddingLookups(
cast(List[BaseEmbeddingConfig], tables),
data_type=data_type,
pooling=PoolingType.NONE,
optimizer_type=emb_optim_type,
optimizer_kwargs=emb_opt_kwargs,
device=device,
embedding_location=location,
)
self._emb_modules.append(emb_module)
params: Dict[str, torch.Tensor] = {}
for param_key, weight in emb_module.fused_optimizer().params.items():
params[f"embeddings.{param_key}"] = weight
optims.append(("", emb_module.fused_optimizer()))
self._optim: CombinedOptimizer = CombinedOptimizer(optims)
self._embedding_names: List[str] = list(
itertools.chain(*get_embedding_names_by_table(self._embedding_configs))
)
self._embedding_names_by_table: List[List[str]] = get_embedding_names_by_table(
self._embedding_configs,
)
# We map over the parameters from FBGEMM backed kernels to the canonical nn.EmbeddingBag
# representation. This provides consistency between this class and the EmbeddingBagCollection's
# nn.Module API calls (state_dict, named_modules, etc)
self.embeddings: nn.ModuleDict = nn.ModuleDict()
for (_key, tables), emb_module in zip(
self._key_to_tables.items(), self._emb_modules
):
for embedding_config, weight in zip(
tables,
emb_module.split_embedding_weights(),
# torch._tensor.Tensor]` is not a function.
):
self.embeddings[embedding_config.name] = torch.nn.Module()
self.embeddings[embedding_config.name].register_parameter(
"weight", torch.nn.Parameter(weight)
)
def forward(self, features: KeyedJaggedTensor) -> Dict[str, JaggedTensor]:
"""
Args:
features (KeyedJaggedTensor): KJT of form [F X B X L].
Returns:
Dict[str, JaggedTensor]
"""
assert features is not None
feature_dict = features.to_dict()
feature_embeddings: Dict[str, JaggedTensor] = {}
for emb_op, (_key, tables) in zip(
self._emb_modules, self._key_to_tables.items()
):
indicies = []
lengths = []
offsets = []
feature_names = []
feature_lengths = []
feature_values = []
splits = []
for table in tables:
for feature in table.feature_names:
f = feature_dict[feature]
indicies.append(f.values())
lengths.append(f.lengths())
if feature in self._shared_features:
feature = f"{feature}@{table.name}"
feature_names.append(feature)
feature_values.append(f.values())
feature_lengths.append(f.lengths())
splits.append(torch.sum(feature_lengths[-1]))
indicies = torch.cat(indicies)
lengths = torch.cat(lengths)
offsets = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
lookups = emb_op(indicies.int(), offsets.int(), weights=None)
lookups = torch.split(lookups, split_size_or_sections=splits)
for feature, lookup, feature_length, values in zip(
feature_names, lookups, feature_lengths, feature_values
):
feature_embeddings[feature] = JaggedTensor(
values=lookup,
lengths=feature_length,
# hack to return kJT positional indicies in return type.
weights=values if self.need_indices() else None,
)
return feature_embeddings
def _get_name(self) -> str:
return "FusedEmbeddingCollection"
def embedding_configs(self) -> List[EmbeddingConfig]:
return self._embedding_configs
def embedding_names_by_table(self) -> List[List[str]]:
return self._embedding_names_by_table
def embedding_dim(self) -> int:
return self._embedding_dim
def optimizer_type(self) -> Type[torch.optim.Optimizer]:
return self._optimizer_type
def optimizer_kwargs(self) -> Dict[str, Any]:
return self._optimizer_kwargs
def fused_optimizer(self) -> KeyedOptimizer:
return self._optim
def need_indices(self) -> bool:
return self._need_indices
class EmbeddingBagCollection(EmbeddingBagCollectionInterface):
"""
EmbeddingBagCollection represents a collection of pooled embeddings (`EmbeddingBags`).
It processes sparse data in the form of `KeyedJaggedTensor` with values of the form
[F X B X L] where:
* F: features (keys)
* B: batch size
* L: length of sparse features (jagged)
and outputs a `KeyedTensor` with values of the form [B * (F * D)] where:
* F: features (keys)
* D: each feature's (key's) embedding dimension
* B: batch size
Args:
tables (List[EmbeddingBagConfig]): list of embedding tables.
is_weighted (bool): whether input `KeyedJaggedTensor` is weighted.
device (Optional[torch.device]): default compute device.
Example::
table_0 = EmbeddingBagConfig(
name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"]
)
table_1 = EmbeddingBagConfig(
name="t2", embedding_dim=4, num_embeddings=10, feature_names=["f2"]
)
ebc = EmbeddingBagCollection(tables=[table_0, table_1])
# 0 1 2 <-- batch
# "f1" [0,1] None [2]
# "f2" [3] [4] [5,6,7]
# ^
# feature
features = KeyedJaggedTensor(
keys=["f1", "f2"],
values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]),
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
)
pooled_embeddings = ebc(features)
print(pooled_embeddings.values())
tensor([[-0.8899, -0.1342, -1.9060, -0.0905, -0.2814, -0.9369, -0.7783],
[ 0.0000, 0.0000, 0.0000, 0.1598, 0.0695, 1.3265, -0.1011],
[-0.4256, -1.1846, -2.1648, -1.0893, 0.3590, -1.9784, -0.7681]],
grad_fn=<CatBackward0>)
print(pooled_embeddings.keys())
['f1', 'f2']
print(pooled_embeddings.offset_per_key())
tensor([0, 3, 7])
"""
def __init__(
self,
tables: List[EmbeddingBagConfig],
is_weighted: bool = False,
device: Optional[torch.device] = None,
) -> None:
super().__init__()
torch._C._log_api_usage_once(f"torchrec.modules.{self.__class__.__name__}")
self._is_weighted = is_weighted
self.embedding_bags: nn.ModuleDict = nn.ModuleDict()
self._embedding_bag_configs = tables
self._lengths_per_embedding: List[int] = []
self._dtypes: List[int] = []
table_names = set()
for embedding_config in tables:
if embedding_config.name in table_names:
raise ValueError(f"Duplicate table name {embedding_config.name}")
table_names.add(embedding_config.name)
dtype = (
torch.float32
if embedding_config.data_type == DataType.FP32
else torch.float16
)
self.embedding_bags[embedding_config.name] = nn.EmbeddingBag(
num_embeddings=embedding_config.num_embeddings,
embedding_dim=embedding_config.embedding_dim,
mode=pooling_type_to_str(embedding_config.pooling),
device=device,
include_last_offset=True,
dtype=dtype,
)
if device is None:
device = self.embedding_bags[embedding_config.name].weight.device
self._dtypes.append(embedding_config.data_type.value)
if not embedding_config.feature_names:
embedding_config.feature_names = [embedding_config.name]
self._lengths_per_embedding.extend(
len(embedding_config.feature_names) * [embedding_config.embedding_dim]
)
self._device: torch.device = device or torch.device("cpu")
self._embedding_names: List[str] = [
embedding
for embeddings in get_embedding_names_by_table(tables)
for embedding in embeddings
]
self._feature_names: List[List[str]] = [table.feature_names for table in tables]
self.reset_parameters()
def forward(self, features: KeyedJaggedTensor) -> KeyedTensor:
"""
Args:
features (KeyedJaggedTensor): KJT of form [F X B X L].
Returns:
KeyedTensor
"""
flat_feature_names: List[str] = []
for names in self._feature_names:
flat_feature_names.extend(names)
inverse_indices = reorder_inverse_indices(
inverse_indices=features.inverse_indices_or_none(),
feature_names=flat_feature_names,
)
pooled_embeddings: List[torch.Tensor] = []
feature_dict = features.to_dict()
for i, embedding_bag in enumerate(self.embedding_bags.values()):
for feature_name in self._feature_names[i]:
f = feature_dict[feature_name]
per_sample_weights: Optional[torch.Tensor] = None
if self._is_weighted:
per_sample_weights = (
f.weights().half()
if self._dtypes[i] == DataType.FP16.value
else f.weights()
)
res = embedding_bag(
input=f.values(),
offsets=f.offsets(),
per_sample_weights=(
per_sample_weights if self._is_weighted else None
),
).float()
pooled_embeddings.append(res)
return KeyedTensor(
keys=self._embedding_names,
values=process_pooled_embeddings(
pooled_embeddings=pooled_embeddings,
inverse_indices=inverse_indices,
),
length_per_key=self._lengths_per_embedding,
)
def is_weighted(self) -> bool:
return self._is_weighted
def embedding_bag_configs(self) -> List[EmbeddingBagConfig]:
return self._embedding_bag_configs
def device(self) -> torch.device:
return self._device
def reset_parameters(self) -> None:
if (isinstance(self.device, torch.device) and self.device.type == "meta") or (
isinstance(self.device, str) and self.device == "meta"
):
return
# Initialize embedding bags weights with init_fn
for table_config in self._embedding_bag_configs:
assert table_config.init_fn is not None
param = self.embedding_bags[f"{table_config.name}"].weight
# pyre-ignore
table_config.init_fn(param)
class EmbeddingCollection(EmbeddingCollectionInterface):
"""
EmbeddingCollection represents a collection of non-pooled embeddings.
It processes sparse data in the form of `KeyedJaggedTensor` of the form [F X B X L]
where:
* F: features (keys)
* B: batch size
* L: length of sparse features (variable)
and outputs `Dict[feature (key), JaggedTensor]`.
Each `JaggedTensor` contains values of the form (B * L) X D
where:
* B: batch size
* L: length of sparse features (jagged)
* D: each feature's (key's) embedding dimension and lengths are of the form L
Args:
tables (List[EmbeddingConfig]): list of embedding tables.
device (Optional[torch.device]): default compute device.
need_indices (bool): if we need to pass indices to the final lookup dict.
Example::
e1_config = EmbeddingConfig(
name="t1", embedding_dim=3, num_embeddings=10, feature_names=["f1"]
)
e2_config = EmbeddingConfig(
name="t2", embedding_dim=3, num_embeddings=10, feature_names=["f2"]
)
ec = EmbeddingCollection(tables=[e1_config, e2_config])
# 0 1 2 <-- batch
# 0 [0,1] None [2]
# 1 [3] [4] [5,6,7]
# ^
# feature
features = KeyedJaggedTensor.from_offsets_sync(
keys=["f1", "f2"],
values=torch.tensor([0, 1, 2, 3, 4, 5, 6, 7]),
offsets=torch.tensor([0, 2, 2, 3, 4, 5, 8]),
)
feature_embeddings = ec(features)
print(feature_embeddings['f2'].values())
tensor([[-0.2050, 0.5478, 0.6054],
[ 0.7352, 0.3210, -3.0399],
[ 0.1279, -0.1756, -0.4130],
[ 0.7519, -0.4341, -0.0499],
[ 0.9329, -1.0697, -0.8095]], grad_fn=<EmbeddingBackward>)
"""
def __init__( # noqa C901
self,
tables: List[EmbeddingConfig],
device: Optional[torch.device] = None,
need_indices: bool = False,
) -> None:
super().__init__()
torch._C._log_api_usage_once(f"torchrec.modules.{self.__class__.__name__}")
self.embeddings: nn.ModuleDict = nn.ModuleDict()
self._embedding_configs = tables
self._embedding_dim: int = -1
self._need_indices: bool = need_indices
self._device: torch.device = (
device if device is not None else torch.device("cpu")
)
table_names = set()
for config in tables:
if config.name in table_names:
raise ValueError(f"Duplicate table name {config.name}")
table_names.add(config.name)
self._embedding_dim = (
config.embedding_dim if self._embedding_dim < 0 else self._embedding_dim
)
if self._embedding_dim != config.embedding_dim:
raise ValueError(
"All tables in a EmbeddingCollection are required to have same embedding dimension."
+ f" Violating case: {config.name}'s embedding_dim {config.embedding_dim} !="
+ f" {self._embedding_dim}"
)
dtype = (
torch.float32 if config.data_type == DataType.FP32 else torch.float16
)
self.embeddings[config.name] = nn.Embedding(
num_embeddings=config.num_embeddings,
embedding_dim=config.embedding_dim,
device=device,
dtype=dtype,
)
if config.init_fn is not None:
config.init_fn(self.embeddings[config.name].weight)
if not config.feature_names:
config.feature_names = [config.name]
self._embedding_names_by_table: List[List[str]] = get_embedding_names_by_table(
tables
)
self._feature_names: List[List[str]] = [table.feature_names for table in tables]
def forward(
self,
features: KeyedJaggedTensor,
) -> Dict[str, JaggedTensor]:
"""
Args:
features (KeyedJaggedTensor): KJT of form [F X B X L].
Returns:
Dict[str, JaggedTensor]
"""
feature_embeddings: Dict[str, JaggedTensor] = {}
jt_dict: Dict[str, JaggedTensor] = features.to_dict()
for i, emb_module in enumerate(self.embeddings.values()):
feature_names = self._feature_names[i]
embedding_names = self._embedding_names_by_table[i]
for j, embedding_name in enumerate(embedding_names):
feature_name = feature_names[j]
f = jt_dict[feature_name]
lookup = emb_module(
input=f.values(),
).float()
feature_embeddings[embedding_name] = JaggedTensor(
values=lookup,
lengths=f.lengths(),
weights=f.values() if self._need_indices else None,
)
return feature_embeddings
def need_indices(self) -> bool:
return self._need_indices
def embedding_dim(self) -> int:
return self._embedding_dim
def embedding_configs(self) -> List[EmbeddingConfig]:
return self._embedding_configs
def embedding_names_by_table(self) -> List[List[str]]:
return self._embedding_names_by_table
def device(self) -> torch.device:
return self._device
def reset_parameters(self) -> None:
if (isinstance(self.device, torch.device) and self.device.type == "meta") or (
isinstance(self.device, str) and self.device == "meta"
):
return
# Initialize embedding bags weights with init_fn
for table_config in self._embedding_configs:
assert table_config.init_fn is not None
param = self.embeddings[f"{table_config.name}"].weight
# pyre-ignore
table_config.init_fn(param)
The provided code snippet includes necessary dependencies for implementing the `fuse_embedding_optimizer` function. Write a Python function `def fuse_embedding_optimizer( model: nn.Module, optimizer_type: Type[torch.optim.Optimizer], optimizer_kwargs: Dict[str, Any], device: torch.device, location: Optional[EmbeddingLocation] = None, ) -> nn.Module` to solve the following problem:
Recursively replaces EmbeddingBagCollection and EmbeddingCollection with FusedEmbeddingBagCollection and FusedEmbeddingCollection in a model subtree. The fused modules will be initialized using the passed in optimizer parameters, and model location. Args: model: (nn.Module): optimizer_type: (Type[torch.optim.Optimizer]): optimizer_kwargs: (Dict[Str, Any]): device (Optional[torch.device]): location: (Optional[EmbeddingLocation]): GPU location placement Returns nn.Module: input nn.Module with Fused Embedding Modules Example:: ebc = EmbeddingBagCollection() my_model = ExampleModel(ebc) my_model = fused_embedding_optimizer(my_model, optimizer_type=torch.optim.SGD, optimizer_kwargs={"lr": .01}) kjt = KeyedJaggedTensor() output = my_model(kjt)
Here is the function:
def fuse_embedding_optimizer(
model: nn.Module,
optimizer_type: Type[torch.optim.Optimizer],
optimizer_kwargs: Dict[str, Any],
device: torch.device,
location: Optional[EmbeddingLocation] = None,
) -> nn.Module:
"""
Recursively replaces EmbeddingBagCollection and EmbeddingCollection with
FusedEmbeddingBagCollection and FusedEmbeddingCollection in a model subtree.
The fused modules will be initialized using the passed in optimizer parameters, and model location.
Args:
model: (nn.Module):
optimizer_type: (Type[torch.optim.Optimizer]):
optimizer_kwargs: (Dict[Str, Any]):
device (Optional[torch.device]):
location: (Optional[EmbeddingLocation]): GPU location placement
Returns
nn.Module: input nn.Module with Fused Embedding Modules
Example::
ebc = EmbeddingBagCollection()
my_model = ExampleModel(ebc)
my_model = fused_embedding_optimizer(my_model, optimizer_type=torch.optim.SGD, optimizer_kwargs={"lr": .01})
kjt = KeyedJaggedTensor()
output = my_model(kjt)
"""
# Replace all EBCs and ECs in a with a corresponding FusedEmbeddingModule.
# check if top-level module is EBC/EC
if isinstance(model, EmbeddingBagCollection):
return FusedEmbeddingBagCollection(
model.embedding_bag_configs(),
optimizer_type=optimizer_type,
optimizer_kwargs=optimizer_kwargs,
device=device,
location=location,
)
if isinstance(model, EmbeddingCollection):
return FusedEmbeddingCollection(
model.embedding_configs(),
optimizer_type=optimizer_type,
optimizer_kwargs=optimizer_kwargs,
device=device,
location=location,
)
def replace(_model: nn.Module) -> None:
for child_name, child in _model.named_children():
if isinstance(child, EmbeddingBagCollection):
setattr(
_model,
child_name,
FusedEmbeddingBagCollection(
tables=child.embedding_bag_configs(),
optimizer_type=optimizer_type,
optimizer_kwargs=optimizer_kwargs,
device=device,
location=location,
),
)
elif isinstance(child, EmbeddingCollection):
setattr(
_model,
child_name,
FusedEmbeddingCollection(
tables=child.embedding_configs(),
optimizer_type=optimizer_type,
optimizer_kwargs=optimizer_kwargs,
device=device,
location=location,
),
)
else:
replace(child)
replace(model)
return model | Recursively replaces EmbeddingBagCollection and EmbeddingCollection with FusedEmbeddingBagCollection and FusedEmbeddingCollection in a model subtree. The fused modules will be initialized using the passed in optimizer parameters, and model location. Args: model: (nn.Module): optimizer_type: (Type[torch.optim.Optimizer]): optimizer_kwargs: (Dict[Str, Any]): device (Optional[torch.device]): location: (Optional[EmbeddingLocation]): GPU location placement Returns nn.Module: input nn.Module with Fused Embedding Modules Example:: ebc = EmbeddingBagCollection() my_model = ExampleModel(ebc) my_model = fused_embedding_optimizer(my_model, optimizer_type=torch.optim.SGD, optimizer_kwargs={"lr": .01}) kjt = KeyedJaggedTensor() output = my_model(kjt) |
8,881 | from dataclasses import dataclass, field
from enum import Enum, unique
from functools import partial
from math import sqrt
from typing import Callable, Dict, List, NamedTuple, Optional
import torch
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_training import PoolingMode
from torchrec.types import DataType
class PoolingType(Enum):
SUM = "SUM"
MEAN = "MEAN"
NONE = "NONE"
def pooling_type_to_pooling_mode(pooling_type: PoolingType) -> PoolingMode:
if pooling_type.value == PoolingType.SUM.value:
return PoolingMode.SUM
elif pooling_type.value == PoolingType.MEAN.value:
return PoolingMode.MEAN
elif pooling_type.value == PoolingType.NONE.value:
return PoolingMode.NONE
else:
raise Exception(f"Invalid pooling type {pooling_type}") | null |
8,882 | from dataclasses import dataclass, field
from enum import Enum, unique
from functools import partial
from math import sqrt
from typing import Callable, Dict, List, NamedTuple, Optional
import torch
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_training import PoolingMode
from torchrec.types import DataType
class PoolingType(Enum):
SUM = "SUM"
MEAN = "MEAN"
NONE = "NONE"
def pooling_type_to_str(pooling_type: PoolingType) -> str:
if pooling_type.value == PoolingType.SUM.value:
return "sum"
elif pooling_type.value == PoolingType.MEAN.value:
return "mean"
else:
raise ValueError(f"Unsupported pooling type {pooling_type}") | null |
8,883 | from dataclasses import dataclass, field
from enum import Enum, unique
from functools import partial
from math import sqrt
from typing import Callable, Dict, List, NamedTuple, Optional
import torch
from fbgemm_gpu.split_embedding_configs import SparseType
from fbgemm_gpu.split_table_batched_embeddings_ops_training import PoolingMode
from torchrec.types import DataType
class DataType(Enum):
def __str__(self) -> str:
def data_type_to_dtype(data_type: DataType) -> torch.dtype:
if data_type.value == DataType.FP32.value:
return torch.float32
elif data_type.value == DataType.FP16.value:
return torch.float16
elif data_type.value == DataType.BF16.value:
return torch.bfloat16
elif data_type.value == DataType.INT64.value:
return torch.int64
elif data_type.value == DataType.INT32.value:
return torch.int32
elif data_type.value == DataType.INT8.value:
return torch.int8
elif data_type.value == DataType.UINT8.value:
return torch.uint8
elif data_type.value == DataType.INT4.value:
return torch.quint4x2
elif data_type.value == DataType.INT2.value:
return torch.quint2x4
else:
raise ValueError(f"DataType {data_type} cannot be converted to dtype") | null |
8,884 | from typing import Dict, List, Set, Union
import torch
import torch.nn as nn
from torchrec.modules.embedding_modules import EmbeddingBagCollection
from torchrec.modules.feature_processor_ import (
FeatureProcessor,
FeatureProcessorsCollection,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor, KeyedTensor
# pyre-ignore
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
def apply_feature_processors_to_kjt(
features: KeyedJaggedTensor,
feature_processors: Dict[str, nn.Module],
) -> KeyedJaggedTensor:
processed_weights = []
features_dict = features.to_dict()
for key in features.keys():
jt = features_dict[key]
if key in feature_processors:
fp_jt = feature_processors[key](jt)
processed_weights.append(fp_jt.weights())
else:
processed_weights.append(
torch.ones(jt.values().shape[0], device=jt.values().device),
)
return KeyedJaggedTensor(
keys=features.keys(),
values=features.values(),
weights=(
torch.cat(processed_weights)
if processed_weights
else features.weights_or_none()
),
lengths=features.lengths(),
offsets=features._offsets,
stride=features._stride,
length_per_key=features._length_per_key,
offset_per_key=features._offset_per_key,
index_per_key=features._index_per_key,
) | null |
8,885 | import functools
import inspect
from typing import Any, Callable
import torch
import torch.utils.hooks as hooks
from torch.nn.modules.lazy import _LazyProtocol, LazyModuleMixin
from torch.nn.modules.module import (
_global_backward_hooks,
_global_forward_hooks,
_global_forward_pre_hooks,
)
def _apply_functions_after_first_forward(
module: torch.nn.Module,
# pyre-ignore[2]
input: Any,
# pyre-ignore[2]
output: Any,
) -> None:
_functions_to_lazy_apply = getattr(module, "_functions_to_lazy_apply", None)
if _functions_to_lazy_apply is not None:
for fn in _functions_to_lazy_apply:
module.apply(fn)
delattr(module, "_functions_to_lazy_apply")
module._lazy_apply_hook.remove()
delattr(module, "_lazy_apply_hook")
The provided code snippet includes necessary dependencies for implementing the `lazy_apply` function. Write a Python function `def lazy_apply( module: torch.nn.Module, fn: Callable[[torch.nn.Module], None] ) -> torch.nn.Module` to solve the following problem:
Attaches a function to a module, which will be applied recursively to every submodule (as returned by `.children()`) of the module as well as the module itself right after the first forward pass (i.e. after all submodules and parameters have been initialized). Typical use includes initializing the numerical value of the parameters of a lazy module (i.e. modules inherited from `LazyModuleMixin`). NOTE: `lazy_apply()` can be used on both lazy and non-lazy modules. Args: module (torch.nn.Module): module to recursively apply `fn` on. fn (Callable[[torch.nn.Module], None]): function to be attached to `module` and later be applied to each submodule of `module` and the `module` itself. Returns: torch.nn.Module: `module` with `fn` attached. Example:: @torch.no_grad() def init_weights(m): print(m) if type(m) == torch.nn.LazyLinear: m.weight.fill_(1.0) print(m.weight) linear = torch.nn.LazyLinear(2) lazy_apply(linear, init_weights) # doesn't run `init_weights` immediately input = torch.randn(2, 10) linear(input) # runs `init_weights` only once, right after first forward pass seq = torch.nn.Sequential(torch.nn.LazyLinear(2), torch.nn.LazyLinear(2)) lazy_apply(seq, init_weights) # doesn't run `init_weights` immediately input = torch.randn(2, 10) seq(input) # runs `init_weights` only once, right after first forward pass
Here is the function:
def lazy_apply(
module: torch.nn.Module, fn: Callable[[torch.nn.Module], None]
) -> torch.nn.Module:
"""Attaches a function to a module, which will be applied recursively to every
submodule (as returned by `.children()`) of the module as well as the module itself
right after the first forward pass (i.e. after all submodules and parameters have
been initialized).
Typical use includes initializing the numerical value of the parameters of a lazy
module (i.e. modules inherited from `LazyModuleMixin`).
NOTE:
`lazy_apply()` can be used on both lazy and non-lazy modules.
Args:
module (torch.nn.Module): module to recursively apply `fn` on.
fn (Callable[[torch.nn.Module], None]): function to be attached to `module` and
later be applied to each submodule of `module` and the `module` itself.
Returns:
torch.nn.Module: `module` with `fn` attached.
Example::
@torch.no_grad()
def init_weights(m):
print(m)
if type(m) == torch.nn.LazyLinear:
m.weight.fill_(1.0)
print(m.weight)
linear = torch.nn.LazyLinear(2)
lazy_apply(linear, init_weights) # doesn't run `init_weights` immediately
input = torch.randn(2, 10)
linear(input) # runs `init_weights` only once, right after first forward pass
seq = torch.nn.Sequential(torch.nn.LazyLinear(2), torch.nn.LazyLinear(2))
lazy_apply(seq, init_weights) # doesn't run `init_weights` immediately
input = torch.randn(2, 10)
seq(input) # runs `init_weights` only once, right after first forward pass
"""
if not hasattr(module, "_functions_to_lazy_apply"):
module._functions_to_lazy_apply = []
if not hasattr(module, "_lazy_apply_hook"):
module._lazy_apply_hook = module.register_forward_hook(
_apply_functions_after_first_forward
)
module._functions_to_lazy_apply.append(fn)
return module | Attaches a function to a module, which will be applied recursively to every submodule (as returned by `.children()`) of the module as well as the module itself right after the first forward pass (i.e. after all submodules and parameters have been initialized). Typical use includes initializing the numerical value of the parameters of a lazy module (i.e. modules inherited from `LazyModuleMixin`). NOTE: `lazy_apply()` can be used on both lazy and non-lazy modules. Args: module (torch.nn.Module): module to recursively apply `fn` on. fn (Callable[[torch.nn.Module], None]): function to be attached to `module` and later be applied to each submodule of `module` and the `module` itself. Returns: torch.nn.Module: `module` with `fn` attached. Example:: @torch.no_grad() def init_weights(m): print(m) if type(m) == torch.nn.LazyLinear: m.weight.fill_(1.0) print(m.weight) linear = torch.nn.LazyLinear(2) lazy_apply(linear, init_weights) # doesn't run `init_weights` immediately input = torch.randn(2, 10) linear(input) # runs `init_weights` only once, right after first forward pass seq = torch.nn.Sequential(torch.nn.LazyLinear(2), torch.nn.LazyLinear(2)) lazy_apply(seq, init_weights) # doesn't run `init_weights` immediately input = torch.randn(2, 10) seq(input) # runs `init_weights` only once, right after first forward pass |
8,886 | import abc
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torchrec.modules.embedding_configs import (
DataType,
EmbeddingBagConfig,
EmbeddingConfig,
pooling_type_to_str,
)
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
def reorder_inverse_indices(
inverse_indices: Optional[Tuple[List[str], torch.Tensor]],
feature_names: List[str],
) -> torch.Tensor:
if inverse_indices is None:
return torch.empty(0)
index_per_name = {name: i for i, name in enumerate(inverse_indices[0])}
index = torch.tensor(
[index_per_name[name.split("@")[0]] for name in feature_names],
device=inverse_indices[1].device,
)
return torch.index_select(inverse_indices[1], 0, index) | null |
8,887 | import abc
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torchrec.modules.embedding_configs import (
DataType,
EmbeddingBagConfig,
EmbeddingConfig,
pooling_type_to_str,
)
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
def process_pooled_embeddings(
pooled_embeddings: List[torch.Tensor],
inverse_indices: torch.Tensor,
) -> torch.Tensor:
if inverse_indices.numel() > 0:
pooled_embeddings = torch.ops.fbgemm.group_index_select_dim0(
pooled_embeddings, list(torch.unbind(inverse_indices))
)
return torch.cat(pooled_embeddings, dim=1) | null |
8,888 | import abc
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from torchrec.modules.embedding_configs import (
DataType,
EmbeddingBagConfig,
EmbeddingConfig,
pooling_type_to_str,
)
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
class EmbeddingBagConfig(BaseEmbeddingConfig):
pooling: PoolingType = PoolingType.SUM
class EmbeddingConfig(BaseEmbeddingConfig):
pass
def get_embedding_names_by_table(
tables: Union[List[EmbeddingBagConfig], List[EmbeddingConfig]],
) -> List[List[str]]:
shared_feature: Dict[str, bool] = {}
for embedding_config in tables:
for feature_name in embedding_config.feature_names:
if feature_name not in shared_feature:
shared_feature[feature_name] = False
else:
shared_feature[feature_name] = True
embedding_names_by_table: List[List[str]] = []
for embedding_config in tables:
embedding_names: List[str] = []
for feature_name in embedding_config.feature_names:
if shared_feature[feature_name]:
embedding_names.append(feature_name + "@" + embedding_config.name)
else:
embedding_names.append(feature_name)
embedding_names_by_table.append(embedding_names)
return embedding_names_by_table | null |
8,889 | import abc
import json
from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Optional, Tuple, Type
import torch
import torch.nn as nn
import torch.quantization as quant
import torchrec as trec
import torchrec.quant as trec_quant
from torchrec.modules.embedding_configs import QuantConfig
from torchrec.modules.embedding_modules import (
EmbeddingBagCollectionInterface,
EmbeddingCollectionInterface,
)
def trim_torch_package_prefix_from_typename(typename: str) -> str:
if typename.startswith("<torch_package_"):
# Trim off <torch_package_x> prefix.
typename = ".".join(typename.split(".")[1:])
return typename | null |
8,890 | import abc
import json
from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Optional, Tuple, Type
import torch
import torch.nn as nn
import torch.quantization as quant
import torchrec as trec
import torchrec.quant as trec_quant
from torchrec.modules.embedding_configs import QuantConfig
from torchrec.modules.embedding_modules import (
EmbeddingBagCollectionInterface,
EmbeddingCollectionInterface,
)
class QuantConfig(NamedTuple):
def quantize_embeddings(
module: nn.Module,
dtype: torch.dtype,
inplace: bool,
additional_qconfig_spec_keys: Optional[List[Type[nn.Module]]] = None,
additional_mapping: Optional[Dict[Type[nn.Module], Type[nn.Module]]] = None,
output_dtype: torch.dtype = torch.float,
per_table_weight_dtype: Optional[Dict[str, torch.dtype]] = None,
) -> nn.Module:
qconfig = QuantConfig(
activation=quant.PlaceholderObserver.with_args(dtype=output_dtype),
weight=quant.PlaceholderObserver.with_args(dtype=dtype),
per_table_weight_dtype=per_table_weight_dtype,
)
qconfig_spec: Dict[Type[nn.Module], QuantConfig] = {
trec.EmbeddingBagCollection: qconfig,
}
mapping: Dict[Type[nn.Module], Type[nn.Module]] = {
trec.EmbeddingBagCollection: trec_quant.EmbeddingBagCollection,
}
if additional_qconfig_spec_keys is not None:
for t in additional_qconfig_spec_keys:
qconfig_spec[t] = qconfig
if additional_mapping is not None:
mapping.update(additional_mapping)
return quant.quantize_dynamic(
module,
qconfig_spec=qconfig_spec,
mapping=mapping,
inplace=inplace,
) | null |
8,891 | import abc
import json
from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Optional, Tuple, Type
import torch
import torch.nn as nn
import torch.quantization as quant
import torchrec as trec
import torchrec.quant as trec_quant
from torchrec.modules.embedding_configs import QuantConfig
from torchrec.modules.embedding_modules import (
EmbeddingBagCollectionInterface,
EmbeddingCollectionInterface,
)
def quantize_feature(
module: torch.nn.Module, inputs: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, ...]:
class PredictModule(nn.Module):
def __init__(
self,
module: nn.Module,
) -> None:
def predict_module(
self,
) -> nn.Module:
def predict_forward(self, batch: Dict[str, torch.Tensor]) -> Any:
def forward(self, batch: Dict[str, torch.Tensor]) -> Any:
def state_dict(
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
class EmbeddingBagCollectionInterface(abc.ABC, nn.Module):
def forward(
self,
features: KeyedJaggedTensor,
) -> KeyedTensor:
def embedding_bag_configs(
self,
) -> List[EmbeddingBagConfig]:
def is_weighted(self) -> bool:
class EmbeddingCollectionInterface(abc.ABC, nn.Module):
def forward(
self,
features: KeyedJaggedTensor,
) -> Dict[str, JaggedTensor]:
def embedding_configs(
self,
) -> List[EmbeddingConfig]:
def need_indices(self) -> bool:
def embedding_dim(self) -> int:
def embedding_names_by_table(self) -> List[List[str]]:
def quantize_dense(
predict_module: PredictModule,
dtype: torch.dtype,
additional_embedding_module_type: List[Type[nn.Module]] = [],
) -> nn.Module:
module = predict_module.predict_module
reassign = {}
for name, mod in module.named_children():
# both fused modules and observed custom modules are
# swapped as one unit
if not (
isinstance(mod, EmbeddingBagCollectionInterface)
or isinstance(mod, EmbeddingCollectionInterface)
or any([type(mod) is clazz for clazz in additional_embedding_module_type])
):
if dtype == torch.half:
new_mod = mod.half()
new_mod.register_forward_pre_hook(quantize_feature)
reassign[name] = new_mod
else:
raise NotImplementedError(
"only fp16 is supported for non-embedding module lowering"
)
for key, value in reassign.items():
module._modules[key] = value
return predict_module | null |
8,892 | import abc
from pathlib import Path
from typing import Any, BinaryIO, Dict, List, Type, TypeVar, Union
import torch
from torch.package import PackageExporter
from torchrec.inference.modules import PredictFactory
try:
# pyre-fixme[21]: Could not find module `torch_package_importer`.
import torch_package_importer # @manual
except ImportError:
pass
def load_config_text(name: str) -> str:
return torch_package_importer.load_text("__configs", name) | null |
8,893 | import abc
from pathlib import Path
from typing import Any, BinaryIO, Dict, List, Type, TypeVar, Union
import torch
from torch.package import PackageExporter
from torchrec.inference.modules import PredictFactory
T = TypeVar("T")
try:
# pyre-fixme[21]: Could not find module `torch_package_importer`.
import torch_package_importer # @manual
except ImportError:
pass
def load_pickle_config(name: str, clazz: Type[T]) -> T:
loaded_obj = torch_package_importer.load_pickle("__configs", name)
assert isinstance(
loaded_obj, clazz
), f"The loaded config {type(loaded_obj)} is not of type {clazz}"
return loaded_obj | null |
8,894 | import argparse
import logging
import grpc
import torch
from gen.torchrec.inference import predictor_pb2, predictor_pb2_grpc
from torch.utils.data import DataLoader
from torchrec.datasets.criteo import DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES
from torchrec.datasets.random import RandomRecDataset
from torchrec.datasets.utils import Batch
DEFAULT_INT_NAMES: List[str] = [f"int_{idx}" for idx in range(INT_FEATURE_COUNT)]
DEFAULT_CAT_NAMES: List[str] = [f"cat_{idx}" for idx in range(CAT_FEATURE_COUNT)]
class RandomRecDataset(IterableDataset[Batch]):
"""
Random iterable dataset used to generate batches for recommender systems
(RecSys). Currently produces unweighted sparse features only. TODO: Add
weighted sparse features.
Args:
keys (List[str]): List of feature names for sparse features.
batch_size (int): batch size.
hash_size (Optional[int]): Max sparse id value. All sparse IDs will be taken
modulo this value.
hash_sizes (Optional[List[int]]): Max sparse id value per feature in keys. Each
sparse ID will be taken modulo the corresponding value from this argument. Note, if this is used, hash_size will be ignored.
ids_per_feature (int): Number of IDs per sparse feature.
ids_per_features (int): Number of IDs per sparse feature in each key. Note, if this is used, ids_per_feature will be ignored.
num_dense (int): Number of dense features.
manual_seed (int): Seed for deterministic behavior.
num_batches: (Optional[int]): Num batches to generate before raising StopIteration
num_generated_batches int: Num batches to cache. If num_batches > num_generated batches, then we will cycle to the first generated batch.
If this value is negative, batches will be generated on the fly.
min_ids_per_feature (int): Minimum number of IDs per features.
Example::
dataset = RandomRecDataset(
keys=["feat1", "feat2"],
batch_size=16,
hash_size=100_000,
ids_per_feature=1,
num_dense=13,
),
example = next(iter(dataset))
"""
def __init__(
self,
keys: List[str],
batch_size: int,
hash_size: Optional[int] = 100,
hash_sizes: Optional[List[int]] = None,
ids_per_feature: Optional[int] = 2,
ids_per_features: Optional[List[int]] = None,
num_dense: int = 50,
manual_seed: Optional[int] = None,
num_batches: Optional[int] = None,
num_generated_batches: int = 10,
min_ids_per_feature: Optional[int] = None,
min_ids_per_features: Optional[List[int]] = None,
) -> None:
super().__init__()
if hash_sizes is None:
hash_size = hash_size or 100
hash_sizes = [hash_size] * len(keys)
assert hash_sizes is not None
assert len(hash_sizes) == len(
keys
), "length of hash_sizes must be equal to the number of keys"
if ids_per_features is None:
ids_per_feature = ids_per_feature or 2
ids_per_features = [ids_per_feature] * len(keys)
assert ids_per_features is not None
if min_ids_per_features is None:
min_ids_per_feature = (
min_ids_per_feature
if min_ids_per_feature is not None
else ids_per_feature
)
assert min_ids_per_feature is not None
min_ids_per_features = [min_ids_per_feature] * len(keys)
assert len(ids_per_features) == len(
keys
), "length of ids_per_features must be equal to the number of keys"
self.batch_generator = _RandomRecBatch(
keys=keys,
batch_size=batch_size,
hash_sizes=hash_sizes,
ids_per_features=ids_per_features,
num_dense=num_dense,
manual_seed=manual_seed,
num_batches=None,
num_generated_batches=num_generated_batches,
min_ids_per_features=min_ids_per_features,
)
self.num_batches: int = cast(int, num_batches if not None else sys.maxsize)
def __iter__(self) -> Iterator[Batch]:
return itertools.islice(iter(self.batch_generator), self.num_batches)
def __len__(self) -> int:
return self.num_batches
class Batch(Pipelineable):
dense_features: torch.Tensor
sparse_features: KeyedJaggedTensor
labels: torch.Tensor
def to(self, device: torch.device, non_blocking: bool = False) -> "Batch":
return Batch(
dense_features=self.dense_features.to(
device=device, non_blocking=non_blocking
),
sparse_features=self.sparse_features.to(
device=device, non_blocking=non_blocking
),
labels=self.labels.to(device=device, non_blocking=non_blocking),
)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self.dense_features.record_stream(stream)
self.sparse_features.record_stream(stream)
self.labels.record_stream(stream)
def pin_memory(self) -> "Batch":
return Batch(
dense_features=self.dense_features.pin_memory(),
sparse_features=self.sparse_features.pin_memory(),
labels=self.labels.pin_memory(),
)
def create_training_batch(args: argparse.Namespace) -> Batch:
return next(
iter(
DataLoader(
RandomRecDataset(
keys=DEFAULT_CAT_NAMES,
batch_size=args.batch_size,
hash_size=args.num_embedding_features,
ids_per_feature=1,
num_dense=len(DEFAULT_INT_NAMES),
),
batch_sampler=None,
pin_memory=False,
num_workers=0,
)
)
) | null |
8,895 | import argparse
import logging
import grpc
import torch
from gen.torchrec.inference import predictor_pb2, predictor_pb2_grpc
from torch.utils.data import DataLoader
from torchrec.datasets.criteo import DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES
from torchrec.datasets.random import RandomRecDataset
from torchrec.datasets.utils import Batch
class Batch(Pipelineable):
dense_features: torch.Tensor
sparse_features: KeyedJaggedTensor
labels: torch.Tensor
def to(self, device: torch.device, non_blocking: bool = False) -> "Batch":
return Batch(
dense_features=self.dense_features.to(
device=device, non_blocking=non_blocking
),
sparse_features=self.sparse_features.to(
device=device, non_blocking=non_blocking
),
labels=self.labels.to(device=device, non_blocking=non_blocking),
)
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self.dense_features.record_stream(stream)
self.sparse_features.record_stream(stream)
self.labels.record_stream(stream)
def pin_memory(self) -> "Batch":
return Batch(
dense_features=self.dense_features.pin_memory(),
sparse_features=self.sparse_features.pin_memory(),
labels=self.labels.pin_memory(),
)
def create_request(
batch: Batch, args: argparse.Namespace
) -> predictor_pb2.PredictionRequest:
def to_bytes(tensor: torch.Tensor) -> bytes:
return tensor.cpu().numpy().tobytes()
float_features = predictor_pb2.FloatFeatures(
num_features=args.num_float_features,
values=to_bytes(batch.dense_features),
)
id_list_features = predictor_pb2.SparseFeatures(
num_features=args.num_id_list_features,
values=to_bytes(batch.sparse_features.values()),
lengths=to_bytes(batch.sparse_features.lengths()),
)
id_score_list_features = predictor_pb2.SparseFeatures(num_features=0)
embedding_features = predictor_pb2.FloatFeatures(num_features=0)
unary_features = predictor_pb2.SparseFeatures(num_features=0)
return predictor_pb2.PredictionRequest(
batch_size=args.batch_size,
float_features=float_features,
id_list_features=id_list_features,
id_score_list_features=id_score_list_features,
embedding_features=embedding_features,
unary_features=unary_features,
) | null |
8,896 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_calibration(
calibration_num: torch.Tensor, calibration_denom: torch.Tensor
) -> torch.Tensor:
return torch.where(
calibration_denom <= 0.0, 0.0, calibration_num / calibration_denom
).double() | null |
8,897 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
CALIBRATION_NUM = "calibration_num"
CALIBRATION_DENOM = "calibration_denom"
def get_calibration_states(
labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor
) -> Dict[str, torch.Tensor]:
return {
CALIBRATION_NUM: torch.sum(predictions * weights, dim=-1),
CALIBRATION_DENOM: torch.sum(labels * weights, dim=-1),
} | null |
8,898 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torch import distributed as dist
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _validate_model_outputs(
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
session_ids: torch.Tensor,
) -> None:
assert predictions.shape == labels.shape == weights.shape == session_ids.shape
assert (
predictions.dim() == 2 and predictions.shape[0] > 0 and predictions.shape[1] > 0
) | null |
8,899 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torch import distributed as dist
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
SUM_NDCG = "sum_ndcg"
NUM_SESSIONS = "num_sessions"
def session_ids_to_lengths(
session_ids: torch.Tensor, device: torch.device
) -> torch.Tensor:
"""
Convert session_ids to lengths tensor. It is used in all session-wise loss
computations.
Args:
session_ids: a tensor of session_ids,
e.g., ["1", "2", "2"]
device: the device to put session_ids into
Returns:
session_lengths(torch.Tensor): a tensor of session lengths, e.g., tensor([1, 2])
"""
session_lengths: List[int] = []
if len(session_ids) == 0:
return torch.zeros(1)
length = 1
for i in range(len(session_ids) - 1):
if session_ids[i] == session_ids[i + 1]:
length += 1
else:
session_lengths.append(length)
length = 1
session_lengths.append(length)
return torch.tensor(session_lengths, dtype=torch.int, device=device)
def compute_lambda_ndcg(
prediction: torch.Tensor,
label: torch.Tensor,
weight: torch.Tensor,
session_lengths: torch.Tensor,
use_exp_gain: bool,
) -> torch.Tensor:
"""
Compute the sum lambda NDCG loss from a group of sessions.
Args:
prediction(torch.Tensor): a tensor of predicted scores
label(torch.Tensor): a tensor of labels
weight(torch.Tensor): a tensor of weights
session_lengths(torch.Tensor): a tensor of session lengths converted from
session_ids
use_exp_gain(bool): whether to use exponential gain or not
Returns:
sum_loss(torch.Tensor): a tensor of the sum of the ndcg loss
"""
loss = torch.zeros_like(session_lengths, dtype=torch.double)
cur_index = int(0)
for i, session_length in enumerate(session_lengths):
data_indexes = torch.arange(
cur_index,
cur_index + int(session_length),
dtype=torch.long,
device=prediction.device,
)
session_loss = compute_lambda_ndcg_by_session(
prediction=torch.take(prediction, data_indexes),
label=torch.take(label, data_indexes),
use_exp_gain=use_exp_gain,
)
loss[i] = session_loss * torch.max(torch.take(weight, data_indexes))
cur_index += session_length
return torch.sum(loss)
def get_ndcg_states(
labels: torch.Tensor,
predictions: torch.Tensor,
weights: torch.Tensor,
session_ids: torch.Tensor,
exponential_gain: bool,
) -> Dict[str, torch.Tensor]:
n_tasks = labels.shape[0]
sum_ndcg = torch.zeros(n_tasks, dtype=torch.double).to(labels.device)
for i in range(n_tasks):
session_lengths = session_ids_to_lengths(
session_ids=session_ids[i], device=predictions.device
)
sum_ndcg[i] = compute_lambda_ndcg(
prediction=predictions[i],
label=labels[i],
weight=weights[i],
session_lengths=session_lengths,
use_exp_gain=exponential_gain,
)
return {SUM_NDCG: sum_ndcg, NUM_SESSIONS: session_lengths.shape[-1]} | null |
8,900 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torch import distributed as dist
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_ndcg(sum_ndcg: torch.Tensor, num_sessions: torch.Tensor) -> torch.Tensor:
return sum_ndcg / num_sessions | null |
8,901 | from enum import Enum
from typing import Optional
class MetricNameBase(StrValueMixin, Enum):
pass
class MetricNamespaceBase(StrValueMixin, Enum):
pass
class MetricPrefix(StrValueMixin, Enum):
DEFAULT = ""
LIFETIME = "lifetime_"
WINDOW = "window_"
The provided code snippet includes necessary dependencies for implementing the `task_wildcard_metrics_pattern` function. Write a Python function `def task_wildcard_metrics_pattern( namespace: MetricNamespaceBase, metric_name: MetricNameBase, metric_prefix: MetricPrefix = MetricPrefix.DEFAULT, ) -> str` to solve the following problem:
r"""Get the re (regular expression) pattern to find a set of metrics regardless task names. The motivation to have this API is from the past bugs which tools hard-code the patterns but the naming change, causing some testing issues.
Here is the function:
def task_wildcard_metrics_pattern(
namespace: MetricNamespaceBase,
metric_name: MetricNameBase,
metric_prefix: MetricPrefix = MetricPrefix.DEFAULT,
) -> str:
r"""Get the re (regular expression) pattern to find a set of metrics
regardless task names. The motivation to have this API is from the past
bugs which tools hard-code the patterns but the naming change, causing
some testing issues.
"""
return rf"{namespace}-.+\|{metric_prefix}{metric_name}" | r"""Get the re (regular expression) pattern to find a set of metrics regardless task names. The motivation to have this API is from the past bugs which tools hard-code the patterns but the naming change, causing some testing issues. |
8,902 | from enum import Enum
from typing import Optional
class MetricNameBase(StrValueMixin, Enum):
pass
class MetricNamespaceBase(StrValueMixin, Enum):
pass
class MetricPrefix(StrValueMixin, Enum):
DEFAULT = ""
LIFETIME = "lifetime_"
WINDOW = "window_"
def compose_metric_namespace(
namespace: MetricNamespaceBase,
task_name: str,
) -> str:
r"""Get the full namespace of a metric based on the input parameters"""
return f"{namespace}-{task_name}"
def compose_customized_metric_key(
namespace: str,
metric_name: str,
description: Optional[str] = None,
) -> str:
r"""Get the metric key. The input are unrestricted (string) namespace and
metric_name. This API should only be used by compose_metric_key() and
state metrics as the keys of state metrics are unknown.
"""
return f"{namespace}|{metric_name}{description or ''}"
The provided code snippet includes necessary dependencies for implementing the `compose_metric_key` function. Write a Python function `def compose_metric_key( namespace: MetricNamespaceBase, task_name: str, metric_name: MetricNameBase, metric_prefix: MetricPrefix = MetricPrefix.DEFAULT, description: Optional[str] = None, ) -> str` to solve the following problem:
r"""Get the metric key based on the input parameters
Here is the function:
def compose_metric_key(
namespace: MetricNamespaceBase,
task_name: str,
metric_name: MetricNameBase,
metric_prefix: MetricPrefix = MetricPrefix.DEFAULT,
description: Optional[str] = None,
) -> str:
r"""Get the metric key based on the input parameters"""
return compose_customized_metric_key(
compose_metric_namespace(namespace, task_name),
f"{metric_prefix}{metric_name}",
description,
) | r"""Get the metric key based on the input parameters |
8,903 | from typing import Any, Dict, List, Optional, Type
import torch
from torch import distributed as dist
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _compute_cross_entropy_norm(
mean_label: torch.Tensor,
pos_labels: torch.Tensor,
neg_labels: torch.Tensor,
eta: float,
) -> torch.Tensor:
mean_label = mean_label.double()
mean_label.clamp_(min=eta, max=1 - eta)
return -pos_labels * torch.log2(mean_label) - neg_labels * torch.log2(
1.0 - mean_label
)
def compute_ne_helper(
ce_sum: torch.Tensor,
weighted_num_samples: torch.Tensor,
pos_labels: torch.Tensor,
neg_labels: torch.Tensor,
eta: float,
) -> torch.Tensor:
mean_label = pos_labels / weighted_num_samples
ce_norm = _compute_cross_entropy_norm(mean_label, pos_labels, neg_labels, eta)
return ce_sum / ce_norm | null |
8,904 | from typing import Any, Dict, List, Optional, Type
import torch
from torch import distributed as dist
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_logloss(
ce_sum: torch.Tensor,
pos_labels: torch.Tensor,
neg_labels: torch.Tensor,
eta: float,
) -> torch.Tensor:
# we utilize tensor broadcasting for operations
labels_sum = pos_labels + neg_labels
labels_sum.clamp_(min=eta)
return ce_sum / labels_sum | null |
8,905 | from typing import Any, Dict, List, Optional, Type
import torch
from torch import distributed as dist
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _compute_cross_entropy_norm(
mean_label: torch.Tensor,
pos_labels: torch.Tensor,
neg_labels: torch.Tensor,
eta: float,
) -> torch.Tensor:
def compute_ne(
ce_sum: torch.Tensor,
weighted_num_samples: torch.Tensor,
pos_labels: torch.Tensor,
neg_labels: torch.Tensor,
num_groups: int,
eta: float,
) -> torch.Tensor:
# size should be (num_groups)
result_ne = torch.zeros(num_groups)
for group in range(num_groups):
mean_label = pos_labels[group] / weighted_num_samples[group]
ce_norm = _compute_cross_entropy_norm(
mean_label, pos_labels[group], neg_labels[group], eta
)
ne = ce_sum[group] / ce_norm
result_ne[group] = ne
# ne indexed by group - tensor size (num_groups)
return result_ne | null |
8,906 | from typing import Any, Dict, List, Optional, Type
import torch
from torch import distributed as dist
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_cross_entropy(
labels: torch.Tensor,
predictions: torch.Tensor,
weights: torch.Tensor,
eta: float,
) -> torch.Tensor:
predictions = predictions.double()
predictions.clamp_(min=eta, max=1 - eta)
cross_entropy = -weights * labels * torch.log2(predictions) - weights * (
1.0 - labels
) * torch.log2(1.0 - predictions)
return cross_entropy
def get_segemented_ne_states(
labels: torch.Tensor,
predictions: torch.Tensor,
weights: torch.Tensor,
grouping_keys: torch.Tensor,
eta: float,
num_groups: int,
) -> Dict[str, torch.Tensor]:
groups = torch.unique(grouping_keys)
cross_entropy, weighted_num_samples, pos_labels, neg_labels = (
torch.zeros(num_groups).to(labels.device),
torch.zeros(num_groups).to(labels.device),
torch.zeros(num_groups).to(labels.device),
torch.zeros(num_groups).to(labels.device),
)
for group in groups:
group_mask = grouping_keys == group
group_labels = labels[group_mask]
group_predictions = predictions[group_mask]
group_weights = weights[group_mask]
ce_sum_group = torch.sum(
compute_cross_entropy(
labels=group_labels,
predictions=group_predictions,
weights=group_weights,
eta=eta,
),
dim=-1,
)
weighted_num_samples_group = torch.sum(group_weights, dim=-1)
pos_labels_group = torch.sum(group_weights * group_labels, dim=-1)
neg_labels_group = torch.sum(group_weights * (1.0 - group_labels), dim=-1)
cross_entropy[group] = ce_sum_group.item()
weighted_num_samples[group] = weighted_num_samples_group.item()
pos_labels[group] = pos_labels_group.item()
neg_labels[group] = neg_labels_group.item()
# tensor size for each value is (num_groups)
return {
"cross_entropy_sum": cross_entropy,
"weighted_num_samples": weighted_num_samples,
"pos_labels": pos_labels,
"neg_labels": neg_labels,
} | null |
8,907 | from typing import Any, Dict, List, Optional, Type
import torch
from torch import distributed as dist
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _state_reduction_sum(state: torch.Tensor) -> torch.Tensor:
return state.sum(dim=0) | null |
8,908 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_multiclass_recall_at_k(
tp_at_k: torch.Tensor,
total_weights: torch.Tensor,
) -> torch.Tensor:
return tp_at_k / torch.unsqueeze(total_weights, dim=-1) | null |
8,909 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_true_positives_at_k(
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
n_classes: int,
) -> torch.Tensor:
"""
Compute and return a list of weighted true positives (true predictions) at k. When k = 0,
tp is counted when the 1st predicted class matches the label. When k = 1, tp is counted
when either the 1st or 2nd predicted class matches the label.
Args:
predictions (Tensor): Tensor of label predictions with shape of (n_sample, n_class) or (n_task, n_sample, n_class).
labels (Tensor): Tensor of ground truth labels with shape of (n_sample, ) or (n_task, n_sample).
weights (Tensor): Tensor of weight on each sample, with shape of (n_sample, ) or (n_task, n_sample).
n_classes (int): Number of classes.
Output:
true_positives_list (Tensor): Tensor of true positives with shape of (n_class, ) or (n_task, n_class).
Examples:
>>> predictions = torch.tensor([[0.9, 0.1, 0, 0, 0], [0.1, 0.2, 0.25, 0.15, 0.3], [0, 1.0, 0, 0, 0], [0, 0, 0.2, 0.7, 0.1]])
>>> labels = torch.tensor([0, 3, 1, 2])
>>> weights = torch.tensor([1, 0.25, 0.5, 0.25])
>>> n_classes = 5
>>> true_positives_list = compute_multiclass_k_sum(predictions, labels, n_classes)
>>> true_positives_list
tensor([1.5000, 1.7500, 1.7500, 2.0000, 2.0000])
"""
ranks = torch.argsort(predictions, dim=-1, descending=True)
true_positives = (
torch.zeros(1, device=predictions.device)
if predictions.ndim == 2
else torch.zeros(predictions.shape[0], 1, device=predictions.device)
)
true_positives_list = torch.tensor([], device=predictions.device)
for k in range(n_classes):
mask = torch.unsqueeze(labels, dim=-1) == ranks[..., k : k + 1]
mask = mask * torch.unsqueeze(weights, dim=-1)
true_positives += mask.sum(dim=-2)
true_positives_list = torch.cat((true_positives_list, true_positives), dim=-1)
return true_positives_list
def get_multiclass_recall_states(
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
n_classes: int,
) -> Dict[str, torch.Tensor]:
true_positives_at_k_sum = compute_true_positives_at_k(
predictions, labels, weights, n_classes
)
return {
"tp_at_k": true_positives_at_k_sum,
"total_weights": torch.sum(weights, dim=-1),
} | null |
8,910 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_accuracy(
accuracy_sum: torch.Tensor, weighted_num_samples: torch.Tensor
) -> torch.Tensor:
return torch.where(
weighted_num_samples == 0.0, 0.0, accuracy_sum / weighted_num_samples
).double() | null |
8,911 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_accuracy_sum(
labels: torch.Tensor,
predictions: torch.Tensor,
weights: torch.Tensor,
threshold: float = 0.5,
) -> torch.Tensor:
predictions = predictions.double()
return torch.sum(weights * ((predictions >= threshold) == labels), dim=-1)
def get_accuracy_states(
labels: torch.Tensor,
predictions: torch.Tensor,
weights: Optional[torch.Tensor],
threshold: float = 0.5,
) -> Dict[str, torch.Tensor]:
if weights is None:
weights = torch.ones_like(predictions)
return {
"accuracy_sum": compute_accuracy_sum(labels, predictions, weights, threshold),
"weighted_num_samples": torch.sum(weights, dim=-1),
} | null |
8,912 | import abc
import logging
import time
from typing import Any, Dict, List, Optional, Type, Union
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.profiler import record_function
from torchrec.metrics.accuracy import AccuracyMetric
from torchrec.metrics.auc import AUCMetric
from torchrec.metrics.auprc import AUPRCMetric
from torchrec.metrics.calibration import CalibrationMetric
from torchrec.metrics.ctr import CTRMetric
from torchrec.metrics.mae import MAEMetric
from torchrec.metrics.metrics_config import (
MetricsConfig,
RecMetricEnum,
RecMetricEnumBase,
RecTaskInfo,
StateMetricEnum,
)
from torchrec.metrics.metrics_namespace import (
compose_customized_metric_key,
compose_metric_namespace,
MetricNamespace,
)
from torchrec.metrics.model_utils import parse_task_model_outputs
from torchrec.metrics.mse import MSEMetric
from torchrec.metrics.multiclass_recall import MulticlassRecallMetric
from torchrec.metrics.ndcg import NDCGMetric
from torchrec.metrics.ne import NEMetric
from torchrec.metrics.rauc import RAUCMetric
from torchrec.metrics.rec_metric import RecMetric, RecMetricList
from torchrec.metrics.recall_session import RecallSessionMetric
from torchrec.metrics.scalar import ScalarMetric
from torchrec.metrics.segmented_ne import SegmentedNEMetric
from torchrec.metrics.throughput import ThroughputMetric
from torchrec.metrics.tower_qps import TowerQPSMetric
from torchrec.metrics.weighted_avg import WeightedAvgMetric
from torchrec.metrics.xauc import XAUCMetric
class StateMetric(abc.ABC):
"""
The interface of state metrics for a component (e.g., optimizer, qat).
"""
def get_metrics(self) -> Dict[str, MetricValue]:
pass
class RecMetricModule(nn.Module):
r"""
For the current recommendation models, we assume there will be three
types of metrics, 1.) RecMetric, 2.) Throughput, 3.) StateMetric.
RecMetric is a metric that is computed from the model outputs (labels,
predictions, weights).
Throughput is being a standalone type as its unique characteristic, time-based.
StateMetric is a metric that is computed based on a model componenet
(e.g., Optimizer) internal logic.
Args:
batch_size (int): batch size used by this trainer.
world_size (int): the number of trainers.
rec_tasks (Optional[List[RecTaskInfo]]): the information of the model tasks.
rec_metrics (Optional[RecMetricList]): the list of the RecMetrics.
throughput_metric (Optional[ThroughputMetric]): the ThroughputMetric.
state_metrics (Optional[Dict[str, StateMetric]]): the dict of StateMetrics.
compute_interval_steps (int): the intervals between two compute calls in the unit of batch number
memory_usage_limit_mb (float): the memory usage limit for OOM check
Call Args:
Not supported.
Returns:
Not supported.
Example:
>>> config = dataclasses.replace(
>>> DefaultMetricsConfig, state_metrics=[StateMetricEnum.OPTIMIZERS]
>>> )
>>>
>>> metricModule = generate_metric_module(
>>> metric_class=RecMetricModule,
>>> metrics_config=config,
>>> batch_size=128,
>>> world_size=64,
>>> my_rank=0,
>>> state_metrics_mapping={StateMetricEnum.OPTIMIZERS: mock_optimizer},
>>> device=torch.device("cpu"),
>>> pg=dist.new_group([0]),
>>> )
"""
batch_size: int
world_size: int
rec_tasks: List[RecTaskInfo]
rec_metrics: RecMetricList
throughput_metric: Optional[ThroughputMetric]
state_metrics: Dict[str, StateMetric]
memory_usage_limit_mb: float
memory_usage_mb_avg: float
oom_count: int
compute_count: int
last_compute_time: float
# TODO(chienchin): Reorganize the argument to directly accept a MetricsConfig.
def __init__(
self,
batch_size: int,
world_size: int,
rec_tasks: Optional[List[RecTaskInfo]] = None,
rec_metrics: Optional[RecMetricList] = None,
throughput_metric: Optional[ThroughputMetric] = None,
state_metrics: Optional[Dict[str, StateMetric]] = None,
compute_interval_steps: int = 100,
min_compute_interval: float = 0.0,
max_compute_interval: float = float("inf"),
memory_usage_limit_mb: float = 512,
) -> None:
super().__init__()
self.rec_tasks = rec_tasks if rec_tasks else []
self.rec_metrics = rec_metrics if rec_metrics else RecMetricList([])
self.throughput_metric = throughput_metric
self.state_metrics = state_metrics if state_metrics else {}
self.trained_batches: int = 0
self.batch_size = batch_size
self.world_size = world_size
self.memory_usage_limit_mb = memory_usage_limit_mb
self.memory_usage_mb_avg = 0.0
self.oom_count = 0
self.compute_count = 0
self.compute_interval_steps = compute_interval_steps
self.min_compute_interval = min_compute_interval
self.max_compute_interval = max_compute_interval
if self.min_compute_interval == 0.0 and self.max_compute_interval == float(
"inf"
):
self.min_compute_interval = -1.0
self.max_compute_interval = -1.0
else:
if self.max_compute_interval <= 0.0:
raise ValueError("Max compute interval should not be smaller than 0.0.")
if self.min_compute_interval < 0.0:
raise ValueError("Min compute interval should not be smaller than 0.0.")
self.register_buffer(
"_compute_interval_steps",
torch.zeros(1, dtype=torch.int32),
persistent=False,
)
self.last_compute_time = -1.0
def get_memory_usage(self) -> int:
r"""Total memory of unique RecMetric tensors in bytes"""
total = {}
for metric in self.rec_metrics.rec_metrics:
total.update(metric.get_memory_usage())
return sum(total.values())
def check_memory_usage(self, compute_count: int) -> None:
memory_usage_mb = self.get_memory_usage() / (10**6)
if memory_usage_mb > self.memory_usage_limit_mb:
self.oom_count += 1
logger.warning(
f"MetricModule is using {memory_usage_mb}MB. "
f"This is larger than the limit{self.memory_usage_limit_mb}MB. "
f"This is the f{self.oom_count}th OOM."
)
if (
compute_count > MEMORY_AVG_WARNING_WARMUP
and memory_usage_mb
> self.memory_usage_mb_avg * ((100 + MEMORY_AVG_WARNING_PERCENTAGE) / 100)
):
logger.warning(
f"MetricsModule is using more than {MEMORY_AVG_WARNING_PERCENTAGE}% of "
f"the average memory usage. Current usage: {memory_usage_mb}MB."
)
self.memory_usage_mb_avg = (
self.memory_usage_mb_avg * (compute_count - 1) + memory_usage_mb
) / compute_count
def _update_rec_metrics(
self, model_out: Dict[str, torch.Tensor], **kwargs: Any
) -> None:
r"""the internal update function to parse the model output.
Override this function if the implementation cannot support
the model output format.
"""
if self.rec_metrics and self.rec_tasks:
labels, predictions, weights, required_inputs = parse_task_model_outputs(
self.rec_tasks, model_out, self.get_required_inputs()
)
if required_inputs:
kwargs["required_inputs"] = required_inputs
self.rec_metrics.update(
predictions=predictions,
labels=labels,
weights=weights,
**kwargs,
)
def update(self, model_out: Dict[str, torch.Tensor], **kwargs: Any) -> None:
r"""update() is called per batch, usually right after forward() to
update the local states of metrics based on the model_output.
Throughput.update() is also called due to the implementation sliding window
throughput.
"""
with record_function("## RecMetricModule:update ##"):
self._update_rec_metrics(model_out, **kwargs)
if self.throughput_metric:
self.throughput_metric.update()
self.trained_batches += 1
def _adjust_compute_interval(self) -> None:
"""
Adjust the compute interval (in batches) based on the first two time
elapsed between the first two compute().
"""
if self.last_compute_time > 0 and self.min_compute_interval >= 0:
now = time.time()
interval = now - self.last_compute_time
if not (self.max_compute_interval >= interval >= self.min_compute_interval):
per_step_time = interval / self.compute_interval_steps
assert (
self.max_compute_interval != float("inf")
or self.min_compute_interval != 0.0
), (
"The compute time interval is "
f"[{self.max_compute_interval}, {self.min_compute_interval}]. "
"Something is not correct of this range. __init__() should have "
"captured this earlier."
)
if self.max_compute_interval == float("inf"):
# The `per_step_time` is not perfectly measured -- each
# step training time can vary. Since max_compute_interval
# is set to infinite, adding 1.0 to the `min_compute_interval`
# increase the chance that the final compute interval is
# indeed larger than `min_compute_interval`.
self._compute_interval_steps[0] = int(
(self.min_compute_interval + 1.0) / per_step_time
)
elif self.min_compute_interval == 0.0:
# Similar to the above if, subtracting 1.0 from
# `max_compute_interval` to compute `_compute_interval_steps`
# can increase the chance that the final compute interval
# is indeed smaller than `max_compute_interval`
offset = 0.0 if self.max_compute_interval <= 1.0 else 1.0
self._compute_interval_steps[0] = int(
(self.max_compute_interval - offset) / per_step_time
)
else:
self._compute_interval_steps[0] = int(
(self.max_compute_interval + self.min_compute_interval)
/ 2
/ per_step_time
)
dist.all_reduce(self._compute_interval_steps, op=dist.ReduceOp.MAX)
self.compute_interval_steps = int(self._compute_interval_steps.item())
self.min_compute_interval = -1.0
self.max_compute_interval = -1.0
self.last_compute_time = time.time()
def should_compute(self) -> bool:
return self.trained_batches % self.compute_interval_steps == 0
def compute(self) -> Dict[str, MetricValue]:
r"""compute() is called when the global metrics are required, usually
right before logging the metrics results to the data sink.
"""
self.compute_count += 1
self.check_memory_usage(self.compute_count)
with record_function("## RecMetricModule:compute ##"):
ret: Dict[str, MetricValue] = {}
if self.rec_metrics:
self._adjust_compute_interval()
ret.update(self.rec_metrics.compute())
if self.throughput_metric:
ret.update(self.throughput_metric.compute())
if self.state_metrics:
for namespace, component in self.state_metrics.items():
ret.update(
{
f"{compose_customized_metric_key(namespace, metric_name)}": metric_value
for metric_name, metric_value in component.get_metrics().items()
}
)
return ret
def local_compute(self) -> Dict[str, MetricValue]:
r"""local_compute() is called when per-trainer metrics are required. It's
can be used for debugging. Currently only rec_metrics is supported.
"""
ret: Dict[str, MetricValue] = {}
if self.rec_metrics:
ret.update(self.rec_metrics.local_compute())
return ret
def sync(self) -> None:
self.rec_metrics.sync()
def unsync(self) -> None:
self.rec_metrics.unsync()
def reset(self) -> None:
self.rec_metrics.reset()
def get_required_inputs(self) -> Optional[List[str]]:
return self.rec_metrics.get_required_inputs()
def _generate_rec_metrics(
metrics_config: MetricsConfig,
world_size: int,
my_rank: int,
batch_size: int,
process_group: Optional[dist.ProcessGroup] = None,
) -> RecMetricList:
rec_metrics = []
for metric_enum, metric_def in metrics_config.rec_metrics.items():
kwargs: Dict[str, Any] = {}
if metric_def and metric_def.arguments is not None:
kwargs = metric_def.arguments
rec_tasks: List[RecTaskInfo] = []
if metric_def.rec_tasks and metric_def.rec_task_indices:
raise ValueError(
"Only one of RecMetricDef.rec_tasks and RecMetricDef.rec_task_indices "
"should be specified."
)
if metric_def.rec_tasks:
rec_tasks = metric_def.rec_tasks
elif metric_def.rec_task_indices:
rec_tasks = [
metrics_config.rec_tasks[idx] for idx in metric_def.rec_task_indices
]
else:
raise ValueError(
"One of RecMetricDef.rec_tasks and RecMetricDef.rec_task_indices "
"should be a non-empty list"
)
rec_metrics.append(
REC_METRICS_MAPPING[metric_enum](
world_size=world_size,
my_rank=my_rank,
batch_size=batch_size,
tasks=rec_tasks,
compute_mode=metrics_config.rec_compute_mode,
window_size=metric_def.window_size,
fused_update_limit=metrics_config.fused_update_limit,
compute_on_all_ranks=metrics_config.compute_on_all_ranks,
should_validate_update=metrics_config.should_validate_update,
process_group=process_group,
**kwargs,
)
)
return RecMetricList(rec_metrics)
def _generate_state_metrics(
metrics_config: MetricsConfig,
state_metrics_mapping: Dict[StateMetricEnum, StateMetric],
) -> Dict[str, StateMetric]:
state_metrics: Dict[str, StateMetric] = {}
for metric_enum in metrics_config.state_metrics:
metric_namespace: Optional[MetricNamespace] = (
STATE_METRICS_NAMESPACE_MAPPING.get(metric_enum, None)
)
if metric_namespace is None:
raise ValueError(f"Unknown StateMetrics {metric_enum}")
full_namespace = compose_metric_namespace(
metric_namespace, str(metric_namespace)
)
state_metrics[full_namespace] = state_metrics_mapping[metric_enum]
return state_metrics
class StateMetricEnum(StrValueMixin, Enum):
OPTIMIZERS = "optimizers"
MODEL_CONFIGURATOR = "model_configurator"
class MetricsConfig:
"""The dataclass that lists all the configurations to be used by the
MetricModule.
Args:
rec_tasks (List[RecTaskInfo]): the list of RecTasks that will be shared
by all the metrics.
rec_metrics (Dict[RecMetricEnum, RecMetricDef]): the confiurations of
the RecMetric objects.
throughput_metric: (Optional[ThroughputDef]): the configurations of the ThroughputMetric
object.
rec_compute_mode (RecComputeMode): the computation mode for the
RecMetric objects. This will be applied to all the RecMetric
objects defined by ``rec_metrics``.
fused_update_limit (int): the maximum updates that can be fused. The
default is 0 which means no fusion. Setting this field to 1 is
logically identical to 0. If this field ii larger than 1,
RecMetrics will perform the actual update every ``update()`` calls.
state_metrics (List[StateMetricEnum]): indicates what state_metrics
will be enabled.
compute_interval_steps(int): computing metrics every step can be
expsensive. This field is used to specify the computation interval
in batch count. `should_compute()` return True if the current
trained batch count match the setting.
min_compute_interval(float): minimum compute interval in seconds.
If this value is set (should be larger than 0), MetricModule will
adjust `compute_interval_steps` after the second compute() is called.
max_compute_interval(float): maximum compute interval in seconds.
If this value is set (should be larger than 0), MetricModule will
adjust `compute_interval_steps` after the second compute() is called.
compute_on_all_ranks (bool): whether to compute rec metrics on all ranks.
If False, only compute on rank 0.
should_validate_update (bool): whether to check the inputs of update() and skip
update if the inputs are invalid. Invalid inputs include the case where all
examples have 0 weights for a batch.
"""
rec_tasks: List[RecTaskInfo] = field(default_factory=list)
rec_metrics: Dict[RecMetricEnum, RecMetricDef] = field(default_factory=dict)
throughput_metric: Optional[ThroughputDef] = None
rec_compute_mode: RecComputeMode = RecComputeMode.UNFUSED_TASKS_COMPUTATION
fused_update_limit: int = 0
state_metrics: List[StateMetricEnum] = field(default_factory=list)
compute_interval_steps: int = 100
min_compute_interval: float = 0.0
max_compute_interval: float = float("inf")
compute_on_all_ranks: bool = False
should_validate_update: bool = False
class ThroughputMetric(nn.Module):
"""
The module to calculate throughput. Throughput is defined as the trained examples
across all ranks per second. For example, if the batch size on each rank is 512
and there are 32 ranks, throughput is 512 * 32 / time_to_train_one_step.
Args:
batch_size (int): batch size for the trainer
world_size (int): the number of trainers
window_seconds (int): Throughput use time-based window for window_throughput. This
argument specify the window size in seconds.
warmup_steps (int): the number of warmup batches. No Throughput will be calculated
before the warmup batches count reached.
Call Args:
Not supported.
Returns:
Not supported.
Example::
throughput = ThroughputMetric(
batch_size=128,
world_size=4,
window_seconds=100,
warmup_steps=100
)
"""
_namespace: MetricNamespace = MetricNamespace.THROUGHPUT
_metric_name: MetricName = MetricName.THROUGHPUT
_batch_examples: int
_window_seconds: int
_warmup_steps: int
_window_time_lapse_buffer: Deque[float]
_window_time_lapse: float
_previous_ts: float
_lifetime_throughput_key: str
_window_throughput_key: str
_total_examples_key: str
_steps: int
def __init__(
self,
*,
batch_size: int,
world_size: int,
window_seconds: int,
warmup_steps: int = 100,
) -> None:
super().__init__()
if window_seconds < 1:
raise ValueError(
"window_seconds must be at least 1 to give window throughput "
"the minimum time window"
)
if warmup_steps < 1:
raise ValueError(
"warmup_steps must be at least 1 to give throughput a "
"reasonable begin time."
)
if window_seconds > MAX_WINDOW_TS:
logger.warn(
f"window_seconds is greater than {MAX_WINDOW_TS}, capping to {MAX_WINDOW_TS} to make sure window_qps is not staled"
)
window_seconds = MAX_WINDOW_TS
self._batch_examples = batch_size * world_size
self._window_seconds = window_seconds
self._warmup_steps = warmup_steps
self.register_buffer("total_examples", torch.tensor(0, dtype=torch.long))
self.register_buffer("warmup_examples", torch.tensor(0, dtype=torch.long))
self.register_buffer(
"time_lapse_after_warmup", torch.tensor(0, dtype=torch.double)
)
self._window_time_lapse_buffer = deque(maxlen=MAX_WINDOW_TS)
self._window_time_lapse = 0
self._previous_ts = 0
self._lifetime_throughput_key = compose_metric_key(
self._namespace,
str(self._namespace),
self._metric_name,
MetricPrefix.LIFETIME,
)
self._window_throughput_key = compose_metric_key(
self._namespace,
str(self._namespace),
self._metric_name,
MetricPrefix.WINDOW,
)
self._total_examples_key = compose_metric_key(
self._namespace,
str(self._namespace),
MetricName.TOTAL_EXAMPLES,
)
self._steps = 0
def _check_window(self) -> None:
while self._window_time_lapse > self._window_seconds:
self._window_time_lapse -= self._window_time_lapse_buffer.popleft()
def update(self) -> None:
ts = time.monotonic()
self._steps += 1
self.total_examples += self._batch_examples
if self._steps <= self._warmup_steps:
self.warmup_examples += self._batch_examples
if self._steps == self._warmup_steps:
self._previous_ts = ts
else:
time_lapse = ts - self._previous_ts
self.time_lapse_after_warmup += time_lapse
self._window_time_lapse += time_lapse
self._window_time_lapse_buffer.append(time_lapse)
self._check_window()
self._previous_ts = ts
def compute(self) -> Dict[str, torch.Tensor]:
ret = {self._total_examples_key: self.total_examples}
if self._steps > self._warmup_steps and (
not math.isclose(self.time_lapse_after_warmup.item(), 0)
):
lifetime_throughput = (
self.total_examples - self.warmup_examples
) / self.time_lapse_after_warmup
if not math.isclose(self._window_time_lapse, 0):
window_throughput = (
len(self._window_time_lapse_buffer)
* self._batch_examples
/ self._window_time_lapse
)
else:
window_throughput = 0.0
if not math.isclose(lifetime_throughput.item(), 0):
ret.update(
{
self._lifetime_throughput_key: torch.tensor(
lifetime_throughput, dtype=torch.double
),
self._window_throughput_key: torch.tensor(
window_throughput, dtype=torch.double
),
}
)
return ret
def generate_metric_module(
metric_class: Type[RecMetricModule],
metrics_config: MetricsConfig,
batch_size: int,
world_size: int,
my_rank: int,
state_metrics_mapping: Dict[StateMetricEnum, StateMetric],
device: torch.device,
process_group: Optional[dist.ProcessGroup] = None,
) -> RecMetricModule:
rec_metrics = _generate_rec_metrics(
metrics_config, world_size, my_rank, batch_size, process_group
)
if metrics_config.throughput_metric:
throughput_metric = ThroughputMetric(
batch_size=batch_size,
world_size=world_size,
window_seconds=metrics_config.throughput_metric.window_size,
)
else:
throughput_metric = None
state_metrics = _generate_state_metrics(metrics_config, state_metrics_mapping)
metrics = metric_class(
batch_size=batch_size,
world_size=world_size,
rec_tasks=metrics_config.rec_tasks,
rec_metrics=rec_metrics,
throughput_metric=throughput_metric,
state_metrics=state_metrics,
compute_interval_steps=metrics_config.compute_interval_steps,
min_compute_interval=metrics_config.min_compute_interval,
max_compute_interval=metrics_config.max_compute_interval,
)
metrics.to(device)
return metrics | null |
8,913 | from functools import partial
from typing import Any, cast, Dict, List, Optional, Type
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _compute_auprc_helper(
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
) -> torch.Tensor:
sorted_indices = torch.argsort(predictions, descending=True, dim=-1)
threshold = torch.index_select(predictions, dim=0, index=sorted_indices)
sorted_labels = torch.index_select(labels, dim=0, index=sorted_indices)
sorted_weights = torch.index_select(weights, dim=0, index=sorted_indices)
mask = F.pad(threshold.diff(dim=0) != 0, [0, 1], value=1.0)
num_tp = torch.cumsum(sorted_weights * sorted_labels, dim=0)[mask]
num_fp = torch.cumsum(sorted_weights * (1.0 - sorted_labels), dim=0)[mask]
precision = (num_tp / (num_tp + num_fp)).flip(0)
recall = (num_tp / num_tp[-1]).flip(0)
# The last precision and recall values are 1.0 and 0.0 without a corresponding threshold.
# This ensures that the graph starts on the y-axis.
precision = torch.cat([precision, precision.new_ones(1)])
recall = torch.cat([recall, recall.new_zeros(1)])
# If recalls are NaNs, set NaNs to 1.0s.
if torch.isnan(recall[0]):
recall = torch.nan_to_num(recall, 1.0)
auprc = _riemann_integral(recall, precision)
return auprc
The provided code snippet includes necessary dependencies for implementing the `compute_auprc` function. Write a Python function `def compute_auprc( n_tasks: int, predictions: torch.Tensor, labels: torch.Tensor, weights: torch.Tensor, ) -> torch.Tensor` to solve the following problem:
Computes AUPRC (Area Under the Curve) for binary classification. Args: n_tasks (int): number of tasks. predictions (torch.Tensor): tensor of size (n_tasks, n_examples). labels (torch.Tensor): tensor of size (n_tasks, n_examples). weights (torch.Tensor): tensor of size (n_tasks, n_examples).
Here is the function:
def compute_auprc(
n_tasks: int,
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
) -> torch.Tensor:
"""
Computes AUPRC (Area Under the Curve) for binary classification.
Args:
n_tasks (int): number of tasks.
predictions (torch.Tensor): tensor of size (n_tasks, n_examples).
labels (torch.Tensor): tensor of size (n_tasks, n_examples).
weights (torch.Tensor): tensor of size (n_tasks, n_examples).
"""
auprcs = []
for predictions_i, labels_i, weights_i in zip(predictions, labels, weights):
auprc = _compute_auprc_helper(predictions_i, labels_i, weights_i)
auprcs.append(auprc.view(1))
return torch.cat(auprcs) | Computes AUPRC (Area Under the Curve) for binary classification. Args: n_tasks (int): number of tasks. predictions (torch.Tensor): tensor of size (n_tasks, n_examples). labels (torch.Tensor): tensor of size (n_tasks, n_examples). weights (torch.Tensor): tensor of size (n_tasks, n_examples). |
8,914 | from functools import partial
from typing import Any, cast, Dict, List, Optional, Type
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _compute_auprc_helper(
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
) -> torch.Tensor:
sorted_indices = torch.argsort(predictions, descending=True, dim=-1)
threshold = torch.index_select(predictions, dim=0, index=sorted_indices)
sorted_labels = torch.index_select(labels, dim=0, index=sorted_indices)
sorted_weights = torch.index_select(weights, dim=0, index=sorted_indices)
mask = F.pad(threshold.diff(dim=0) != 0, [0, 1], value=1.0)
num_tp = torch.cumsum(sorted_weights * sorted_labels, dim=0)[mask]
num_fp = torch.cumsum(sorted_weights * (1.0 - sorted_labels), dim=0)[mask]
precision = (num_tp / (num_tp + num_fp)).flip(0)
recall = (num_tp / num_tp[-1]).flip(0)
# The last precision and recall values are 1.0 and 0.0 without a corresponding threshold.
# This ensures that the graph starts on the y-axis.
precision = torch.cat([precision, precision.new_ones(1)])
recall = torch.cat([recall, recall.new_zeros(1)])
# If recalls are NaNs, set NaNs to 1.0s.
if torch.isnan(recall[0]):
recall = torch.nan_to_num(recall, 1.0)
auprc = _riemann_integral(recall, precision)
return auprc
The provided code snippet includes necessary dependencies for implementing the `compute_auprc_per_group` function. Write a Python function `def compute_auprc_per_group( n_tasks: int, predictions: torch.Tensor, labels: torch.Tensor, weights: torch.Tensor, grouping_keys: torch.Tensor, ) -> torch.Tensor` to solve the following problem:
Computes AUPRC (Area Under the Curve) for binary classification for groups of predictions/labels. Args: n_tasks (int): number of tasks predictions (torch.Tensor): tensor of size (n_tasks, n_examples) labels (torch.Tensor): tensor of size (n_tasks, n_examples) weights (torch.Tensor): tensor of size (n_tasks, n_examples) grouping_keys (torch.Tensor): tensor of size (n_examples,) Returns: torch.Tensor: tensor of size (n_tasks,), average of AUPRCs per group.
Here is the function:
def compute_auprc_per_group(
n_tasks: int,
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
grouping_keys: torch.Tensor,
) -> torch.Tensor:
"""
Computes AUPRC (Area Under the Curve) for binary classification for groups of predictions/labels.
Args:
n_tasks (int): number of tasks
predictions (torch.Tensor): tensor of size (n_tasks, n_examples)
labels (torch.Tensor): tensor of size (n_tasks, n_examples)
weights (torch.Tensor): tensor of size (n_tasks, n_examples)
grouping_keys (torch.Tensor): tensor of size (n_examples,)
Returns:
torch.Tensor: tensor of size (n_tasks,), average of AUPRCs per group.
"""
auprcs = []
if grouping_keys.numel() != 0 and grouping_keys[0] == -1:
# we added padding as the first elements during init to avoid floating point exception in sync()
# removing the paddings to avoid numerical errors.
grouping_keys = grouping_keys[1:]
predictions = predictions[:, 1:]
labels = labels[:, 1:]
weights = weights[:, 1:]
# get unique group indices
group_indices = torch.unique(grouping_keys)
for predictions_i, labels_i, weights_i in zip(predictions, labels, weights):
# Loop over each group
auprc_groups_sum = torch.tensor([0], dtype=torch.float32)
for group_idx in group_indices:
# get predictions, labels, and weights for this group
group_mask = grouping_keys == group_idx
grouped_predictions = predictions_i[group_mask]
grouped_labels = labels_i[group_mask]
grouped_weights = weights_i[group_mask]
auprc = _compute_auprc_helper(
grouped_predictions, grouped_labels, grouped_weights
)
auprc_groups_sum = auprc_groups_sum.to(auprc.device)
auprc_groups_sum += auprc.view(1)
avg_auprc = (
auprc_groups_sum / len(group_indices)
if len(group_indices) > 0
else torch.tensor([0.5], dtype=torch.float32)
)
auprcs.append(avg_auprc)
return torch.cat(auprcs) | Computes AUPRC (Area Under the Curve) for binary classification for groups of predictions/labels. Args: n_tasks (int): number of tasks predictions (torch.Tensor): tensor of size (n_tasks, n_examples) labels (torch.Tensor): tensor of size (n_tasks, n_examples) weights (torch.Tensor): tensor of size (n_tasks, n_examples) grouping_keys (torch.Tensor): tensor of size (n_examples,) Returns: torch.Tensor: tensor of size (n_tasks,), average of AUPRCs per group. |
8,915 | from functools import partial
from typing import Any, cast, Dict, List, Optional, Type
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _state_reduction(state: List[torch.Tensor], dim: int = 1) -> List[torch.Tensor]:
return [torch.cat(state, dim=dim)] | null |
8,916 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_ctr(ctr_num: torch.Tensor, ctr_denom: torch.Tensor) -> torch.Tensor:
return torch.where(ctr_denom == 0.0, 0.0, ctr_num / ctr_denom).double() | null |
8,917 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
CTR_NUM = "ctr_num"
CTR_DENOM = "ctr_denom"
def get_ctr_states(
labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor
) -> Dict[str, torch.Tensor]:
return {
CTR_NUM: torch.sum(labels * weights, dim=-1),
CTR_DENOM: torch.sum(weights, dim=-1),
} | null |
8,918 | import logging
from typing import Any, cast, Dict, List, Optional, Set, Type, Union
import torch
from torch import distributed as dist
from torchrec.metrics.metrics_config import RecTaskInfo, SessionMetricDef
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecComputeMode,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _validate_model_outputs(
labels: torch.Tensor,
predictions: torch.Tensor,
weights: torch.Tensor,
sessions: torch.Tensor,
) -> None:
# check if tensors are of the same shape
assert labels.dim() == 2
assert labels.shape == predictions.shape
assert labels.shape == weights.shape
assert labels.shape == sessions.shape | null |
8,919 | import logging
from typing import Any, cast, Dict, List, Optional, Set, Type, Union
import torch
from torch import distributed as dist
from torchrec.metrics.metrics_config import RecTaskInfo, SessionMetricDef
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecComputeMode,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def ranking_within_session(
predictions: torch.Tensor,
session: torch.Tensor,
) -> torch.Tensor:
# rank predictions that belong to the same session
# Example:
# predictions = [1.0, 0.0, 0.51, 0.8, 1.0, 0.0, 0.51, 0.8, 1.0, 0.0, 0.51, 0.8]
# sessions = [1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1]
# return = [0, 5, 3, 2, 1, 6, 4, 1, 0, 4, 3, 2]
n_tasks = predictions.size(0)
matching_session_id = session.view(-1, n_tasks) == session.view(n_tasks, -1)
predictions_relation = predictions.view(-1, n_tasks) >= predictions.view(
n_tasks, -1
)
relation_within_session = matching_session_id & predictions_relation
rank_within_session = torch.sum(matching_session_id, dim=-1) - torch.sum(
relation_within_session, dim=-1
)
return rank_within_session | null |
8,920 | import logging
from typing import Any, cast, Dict, List, Optional, Set, Type, Union
import torch
from torch import distributed as dist
from torchrec.metrics.metrics_config import RecTaskInfo, SessionMetricDef
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecComputeMode,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _calc_num_true_pos(
labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor
) -> torch.Tensor:
# predictions are expected to be 0 or 1 integers.
num_true_pos = torch.sum(weights * labels * (predictions == 1).double(), dim=-1)
return num_true_pos | null |
8,921 | import logging
from typing import Any, cast, Dict, List, Optional, Set, Type, Union
import torch
from torch import distributed as dist
from torchrec.metrics.metrics_config import RecTaskInfo, SessionMetricDef
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecComputeMode,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _calc_num_false_neg(
labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor
) -> torch.Tensor:
# predictions are expected to be 0 or 1 integers.
num_false_neg = torch.sum(weights * labels * (predictions == 0).double(), dim=-1)
return num_false_neg | null |
8,922 | import logging
from typing import Any, cast, Dict, List, Optional, Set, Type, Union
import torch
from torch import distributed as dist
from torchrec.metrics.metrics_config import RecTaskInfo, SessionMetricDef
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecComputeMode,
RecMetric,
RecMetricComputation,
RecMetricException,
)
logger: logging.Logger = logging.getLogger(__name__)
def _calc_recall(
num_true_pos: torch.Tensor, num_false_neg: torch.Tensor
) -> torch.Tensor:
# if num_true_pos + num_false_neg == 0 then we set recall = NaN by default.
recall = torch.tensor([float("nan")])
if (num_true_pos + num_false_neg).item() != 0:
recall = num_true_pos / (num_true_pos + num_false_neg)
else:
logger.warning(
"Recall = NaN. Likely, it means that there were no positive examples passed to the metric yet."
" Please, debug if you expect every batch to include positive examples."
)
return recall | null |
8,923 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_mae(
error_sum: torch.Tensor, weighted_num_samples: torch.Tensor
) -> torch.Tensor:
return torch.where(
weighted_num_samples == 0.0, 0.0, error_sum / weighted_num_samples
).double() | null |
8,924 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_error_sum(
labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor
) -> torch.Tensor:
predictions = predictions.double()
return torch.sum(weights * torch.abs(labels - predictions), dim=-1)
def get_mae_states(
labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor
) -> Dict[str, torch.Tensor]:
return {
"error_sum": compute_error_sum(labels, predictions, weights),
"weighted_num_samples": torch.sum(weights, dim=-1),
} | null |
8,925 | from typing import Dict, List, Optional, Tuple
import torch
from torchrec.metrics.rec_metric import RecTaskInfo
def is_empty_signals(
labels: torch.Tensor,
predictions: torch.Tensor,
weights: torch.Tensor,
) -> bool:
return (
torch.numel(labels) <= 0
and torch.numel(predictions) <= 0
and torch.numel(weights) <= 0
)
def parse_model_outputs(
label_name: str,
prediction_name: str,
weight_name: str,
model_out: Dict[str, torch.Tensor],
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]:
labels = model_out[label_name].squeeze()
if not prediction_name:
assert not weight_name, "weight name must be empty if prediction name is empty"
return (labels, None, None)
assert isinstance(labels, torch.Tensor)
predictions = model_out[prediction_name].squeeze()
assert isinstance(predictions, torch.Tensor)
weights = model_out[weight_name].squeeze()
assert isinstance(weights, torch.Tensor)
if not is_empty_signals(labels, predictions, weights):
if labels.dim() == predictions.dim():
assert (torch.numel(labels) == torch.numel(predictions)) and (
torch.numel(labels) == torch.numel(weights)
), (
"Expect the same number of elements in labels, predictions, and weights. "
f"Instead got {torch.numel(labels)}, {torch.numel(predictions)}, "
f"{torch.numel(weights)}"
)
else: # For multiclass models, labels.size() = (batch_size), and predictions.size() = (batch_size, number_of_classes)
assert torch.numel(labels) == torch.numel(predictions) / predictions.size()[
-1
] and torch.numel(labels) == torch.numel(weights)
# non-empty tensors need to have rank 1
if len(labels.size()) == 0:
labels = labels.unsqueeze(0)
predictions = predictions.unsqueeze(0)
weights = weights.unsqueeze(0)
return labels, predictions, weights
def parse_required_inputs(
model_out: Dict[str, torch.Tensor],
required_inputs_list: List[str],
ndcg_transform_input: bool = False,
) -> Dict[str, torch.Tensor]:
required_inputs: Dict[str, torch.Tensor] = {}
for feature in required_inputs_list:
# convert feature defined from config only
if ndcg_transform_input:
model_out[feature] = (
# pyre-ignore[6]
session_ids_to_tensor(model_out[feature])
if isinstance(model_out[feature], list)
else model_out[feature]
)
required_inputs[feature] = model_out[feature].squeeze()
assert isinstance(required_inputs[feature], torch.Tensor)
return required_inputs
def parse_task_model_outputs(
tasks: List[RecTaskInfo],
model_out: Dict[str, torch.Tensor],
required_inputs_list: Optional[List[str]] = None,
) -> Tuple[
Dict[str, torch.Tensor],
Dict[str, torch.Tensor],
Dict[str, torch.Tensor],
Dict[str, torch.Tensor],
]:
all_labels: Dict[str, torch.Tensor] = {}
all_predictions: Dict[str, torch.Tensor] = {}
all_weights: Dict[str, torch.Tensor] = {}
all_required_inputs: Dict[str, torch.Tensor] = {}
# Convert session_ids to tensor if NDCG metric
ndcg_transform_input = False
for task in tasks:
labels, predictions, weights = parse_model_outputs(
task.label_name, task.prediction_name, task.weight_name, model_out
)
if predictions is not None and weights is not None:
if not is_empty_signals(labels, predictions, weights):
all_labels[task.name] = labels
all_predictions[task.name] = predictions
all_weights[task.name] = weights
else:
if torch.numel(labels) > 0:
all_labels[task.name] = labels
if task.name and task.name.startswith("ndcg"):
ndcg_transform_input = True
if required_inputs_list is not None:
all_required_inputs = parse_required_inputs(
model_out, required_inputs_list, ndcg_transform_input
)
return all_labels, all_predictions, all_weights, all_required_inputs | null |
8,926 | import time
from typing import Any, cast, Dict, List, Optional, Type
import torch
import torch.distributed as dist
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
RecModelOutput,
)
def _compute_tower_qps(
num_examples: torch.Tensor, time_lapse: torch.Tensor
) -> torch.Tensor:
return torch.where(time_lapse <= 0.0, 0.0, num_examples / time_lapse).double() | null |
8,927 | import time
from typing import Any, cast, Dict, List, Optional, Type
import torch
import torch.distributed as dist
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
RecModelOutput,
)
def _max_reduction(state: torch.Tensor) -> torch.Tensor:
return torch.max(state, dim=0).values | null |
8,928 | from functools import partial
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type
import torch
import torch.distributed as dist
from torchmetrics.utilities.distributed import gather_all_tensors
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _concat_if_needed(
predictions: List[torch.Tensor],
labels: List[torch.Tensor],
weights: List[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
This check exists because of how the state is organized due to quirks in RecMetrics.
Since we do not do tensor concatenatation in the compute or update call, there are cases (in non-distributed settings)
where the tensors from updates are not concatted into a single tensor. Which is determined by the length of the list.
"""
preds_t, labels_t, weights_t = None, None, None
if len(predictions) > 1:
preds_t = torch.cat(predictions, dim=-1)
labels_t = torch.cat(labels, dim=-1)
weights_t = torch.cat(weights, dim=-1)
else:
preds_t = predictions[0]
labels_t = labels[0]
weights_t = weights[0]
return preds_t, labels_t, weights_t
def _compute_auc_helper(
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
apply_bin: bool = False,
) -> torch.Tensor:
sorted_indices = torch.argsort(predictions, descending=True, dim=-1)
sorted_labels = torch.index_select(labels, dim=0, index=sorted_indices)
if apply_bin:
# TODO - [add flag to set bining dyamically] for use with soft labels, >=0.039 --> 1, <0.039 --> 0
sorted_labels = torch.ge(sorted_labels, 0.039).to(dtype=sorted_labels.dtype)
sorted_weights = torch.index_select(weights, dim=0, index=sorted_indices)
cum_fp = torch.cumsum(sorted_weights * (1.0 - sorted_labels), dim=0)
cum_tp = torch.cumsum(sorted_weights * sorted_labels, dim=0)
auc = torch.where(
cum_fp[-1] * cum_tp[-1] == 0,
0.5, # 0.5 is the no-signal default value for auc.
torch.trapz(cum_tp, cum_fp) / cum_fp[-1] / cum_tp[-1],
)
return auc
The provided code snippet includes necessary dependencies for implementing the `compute_auc` function. Write a Python function `def compute_auc( n_tasks: int, predictions: List[torch.Tensor], labels: List[torch.Tensor], weights: List[torch.Tensor], apply_bin: bool = False, ) -> torch.Tensor` to solve the following problem:
Computes AUC (Area Under the Curve) for binary classification. Args: n_tasks (int): number of tasks. predictions (List[torch.Tensor]): tensor of size (n_tasks, n_examples). labels (List[torch.Tensor]): tensor of size (n_tasks, n_examples). weights (List[torch.Tensor]): tensor of size (n_tasks, n_examples).
Here is the function:
def compute_auc(
n_tasks: int,
predictions: List[torch.Tensor],
labels: List[torch.Tensor],
weights: List[torch.Tensor],
apply_bin: bool = False,
) -> torch.Tensor:
"""
Computes AUC (Area Under the Curve) for binary classification.
Args:
n_tasks (int): number of tasks.
predictions (List[torch.Tensor]): tensor of size (n_tasks, n_examples).
labels (List[torch.Tensor]): tensor of size (n_tasks, n_examples).
weights (List[torch.Tensor]): tensor of size (n_tasks, n_examples).
"""
preds_t, labels_t, weights_t = _concat_if_needed(predictions, labels, weights)
aucs = []
for predictions_i, labels_i, weights_i in zip(preds_t, labels_t, weights_t):
auc = _compute_auc_helper(predictions_i, labels_i, weights_i, apply_bin)
aucs.append(auc.view(1))
return torch.cat(aucs) | Computes AUC (Area Under the Curve) for binary classification. Args: n_tasks (int): number of tasks. predictions (List[torch.Tensor]): tensor of size (n_tasks, n_examples). labels (List[torch.Tensor]): tensor of size (n_tasks, n_examples). weights (List[torch.Tensor]): tensor of size (n_tasks, n_examples). |
8,929 | from functools import partial
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type
import torch
import torch.distributed as dist
from torchmetrics.utilities.distributed import gather_all_tensors
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _concat_if_needed(
predictions: List[torch.Tensor],
labels: List[torch.Tensor],
weights: List[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
This check exists because of how the state is organized due to quirks in RecMetrics.
Since we do not do tensor concatenatation in the compute or update call, there are cases (in non-distributed settings)
where the tensors from updates are not concatted into a single tensor. Which is determined by the length of the list.
"""
preds_t, labels_t, weights_t = None, None, None
if len(predictions) > 1:
preds_t = torch.cat(predictions, dim=-1)
labels_t = torch.cat(labels, dim=-1)
weights_t = torch.cat(weights, dim=-1)
else:
preds_t = predictions[0]
labels_t = labels[0]
weights_t = weights[0]
return preds_t, labels_t, weights_t
def _compute_auc_helper(
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
apply_bin: bool = False,
) -> torch.Tensor:
sorted_indices = torch.argsort(predictions, descending=True, dim=-1)
sorted_labels = torch.index_select(labels, dim=0, index=sorted_indices)
if apply_bin:
# TODO - [add flag to set bining dyamically] for use with soft labels, >=0.039 --> 1, <0.039 --> 0
sorted_labels = torch.ge(sorted_labels, 0.039).to(dtype=sorted_labels.dtype)
sorted_weights = torch.index_select(weights, dim=0, index=sorted_indices)
cum_fp = torch.cumsum(sorted_weights * (1.0 - sorted_labels), dim=0)
cum_tp = torch.cumsum(sorted_weights * sorted_labels, dim=0)
auc = torch.where(
cum_fp[-1] * cum_tp[-1] == 0,
0.5, # 0.5 is the no-signal default value for auc.
torch.trapz(cum_tp, cum_fp) / cum_fp[-1] / cum_tp[-1],
)
return auc
The provided code snippet includes necessary dependencies for implementing the `compute_auc_per_group` function. Write a Python function `def compute_auc_per_group( n_tasks: int, predictions: List[torch.Tensor], labels: List[torch.Tensor], weights: List[torch.Tensor], grouping_keys: torch.Tensor, ) -> torch.Tensor` to solve the following problem:
Computes AUC (Area Under the Curve) for binary classification for groups of predictions/labels. Args: n_tasks (int): number of tasks predictions (List[torch.Tensor]): tensor of size (n_tasks, n_examples) labels (List[torch.Tensor]: tensor of size (n_tasks, n_examples) weights (List[torch.Tensor]): tensor of size (n_tasks, n_examples) grouping_keys (torch.Tensor): tensor of size (n_examples,) Returns: torch.Tensor: tensor of size (n_tasks,), average of AUCs per group.
Here is the function:
def compute_auc_per_group(
n_tasks: int,
predictions: List[torch.Tensor],
labels: List[torch.Tensor],
weights: List[torch.Tensor],
grouping_keys: torch.Tensor,
) -> torch.Tensor:
"""
Computes AUC (Area Under the Curve) for binary classification for groups of predictions/labels.
Args:
n_tasks (int): number of tasks
predictions (List[torch.Tensor]): tensor of size (n_tasks, n_examples)
labels (List[torch.Tensor]: tensor of size (n_tasks, n_examples)
weights (List[torch.Tensor]): tensor of size (n_tasks, n_examples)
grouping_keys (torch.Tensor): tensor of size (n_examples,)
Returns:
torch.Tensor: tensor of size (n_tasks,), average of AUCs per group.
"""
preds_t, labels_t, weights_t = _concat_if_needed(predictions, labels, weights)
aucs = []
if grouping_keys.numel() != 0 and grouping_keys[0] == -1:
# we added padding as the first elements during init to avoid floating point exception in sync()
# removing the paddings to avoid numerical errors.
grouping_keys = grouping_keys[1:]
# get unique group indices
group_indices = torch.unique(grouping_keys)
for predictions_i, labels_i, weights_i in zip(preds_t, labels_t, weights_t):
# Loop over each group
auc_groups_sum = torch.tensor([0], dtype=torch.float32)
for group_idx in group_indices:
# get predictions, labels, and weights for this group
group_mask = grouping_keys == group_idx
grouped_predictions = predictions_i[group_mask]
grouped_labels = labels_i[group_mask]
grouped_weights = weights_i[group_mask]
auc = _compute_auc_helper(
grouped_predictions, grouped_labels, grouped_weights
)
auc_groups_sum = auc_groups_sum.to(auc.device)
auc_groups_sum += auc.view(1)
avg_auc = (
auc_groups_sum / len(group_indices)
if len(group_indices) > 0
else torch.tensor([0.5], dtype=torch.float32)
)
aucs.append(avg_auc)
return torch.cat(aucs) | Computes AUC (Area Under the Curve) for binary classification for groups of predictions/labels. Args: n_tasks (int): number of tasks predictions (List[torch.Tensor]): tensor of size (n_tasks, n_examples) labels (List[torch.Tensor]: tensor of size (n_tasks, n_examples) weights (List[torch.Tensor]): tensor of size (n_tasks, n_examples) grouping_keys (torch.Tensor): tensor of size (n_examples,) Returns: torch.Tensor: tensor of size (n_tasks,), average of AUCs per group. |
8,930 | from functools import partial
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type
import torch
import torch.distributed as dist
from torchmetrics.utilities.distributed import gather_all_tensors
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _state_reduction(state: List[torch.Tensor], dim: int = 1) -> List[torch.Tensor]:
return [torch.cat(state, dim=dim)] | null |
8,931 | from functools import partial
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type
import torch
import torch.distributed as dist
from torchmetrics.utilities.distributed import gather_all_tensors
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _concat_if_needed(
predictions: List[torch.Tensor],
labels: List[torch.Tensor],
weights: List[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
This check exists because of how the state is organized due to quirks in RecMetrics.
Since we do not do tensor concatenatation in the compute or update call, there are cases (in non-distributed settings)
where the tensors from updates are not concatted into a single tensor. Which is determined by the length of the list.
"""
preds_t, labels_t, weights_t = None, None, None
if len(predictions) > 1:
preds_t = torch.cat(predictions, dim=-1)
labels_t = torch.cat(labels, dim=-1)
weights_t = torch.cat(weights, dim=-1)
else:
preds_t = predictions[0]
labels_t = labels[0]
weights_t = weights[0]
return preds_t, labels_t, weights_t
def _compute_rauc_helper(
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
) -> torch.Tensor:
array = [
x
for x, _ in sorted(
zip(labels.tolist(), predictions.tolist()), key=lambda x: (x[1], x[0])
)
]
return torch.tensor(1 - count_reverse_pairs_divide_and_conquer(array))
The provided code snippet includes necessary dependencies for implementing the `compute_rauc` function. Write a Python function `def compute_rauc( n_tasks: int, predictions: List[torch.Tensor], labels: List[torch.Tensor], weights: List[torch.Tensor], ) -> torch.Tensor` to solve the following problem:
Computes RAUC (Regression AUC) for regression tasks. Args: predictions (List[torch.Tensor]): tensor of size (n_tasks, n_examples). labels (List[torch.Tensor]): tensor of size (n_tasks, n_examples). weights (List[torch.Tensor]): tensor of size (n_tasks, n_examples).
Here is the function:
def compute_rauc(
n_tasks: int,
predictions: List[torch.Tensor],
labels: List[torch.Tensor],
weights: List[torch.Tensor],
) -> torch.Tensor:
"""
Computes RAUC (Regression AUC) for regression tasks.
Args:
predictions (List[torch.Tensor]): tensor of size (n_tasks, n_examples).
labels (List[torch.Tensor]): tensor of size (n_tasks, n_examples).
weights (List[torch.Tensor]): tensor of size (n_tasks, n_examples).
"""
preds_t, labels_t, weights_t = _concat_if_needed(predictions, labels, weights)
raucs = []
for predictions_i, labels_i, weights_i in zip(preds_t, labels_t, weights_t):
rauc = _compute_rauc_helper(predictions_i, labels_i, weights_i)
raucs.append(rauc.view(1))
return torch.cat(raucs) | Computes RAUC (Regression AUC) for regression tasks. Args: predictions (List[torch.Tensor]): tensor of size (n_tasks, n_examples). labels (List[torch.Tensor]): tensor of size (n_tasks, n_examples). weights (List[torch.Tensor]): tensor of size (n_tasks, n_examples). |
8,932 | from functools import partial
from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Type
import torch
import torch.distributed as dist
from torchmetrics.utilities.distributed import gather_all_tensors
from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _concat_if_needed(
predictions: List[torch.Tensor],
labels: List[torch.Tensor],
weights: List[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
This check exists because of how the state is organized due to quirks in RecMetrics.
Since we do not do tensor concatenatation in the compute or update call, there are cases (in non-distributed settings)
where the tensors from updates are not concatted into a single tensor. Which is determined by the length of the list.
"""
preds_t, labels_t, weights_t = None, None, None
if len(predictions) > 1:
preds_t = torch.cat(predictions, dim=-1)
labels_t = torch.cat(labels, dim=-1)
weights_t = torch.cat(weights, dim=-1)
else:
preds_t = predictions[0]
labels_t = labels[0]
weights_t = weights[0]
return preds_t, labels_t, weights_t
def _compute_rauc_helper(
predictions: torch.Tensor,
labels: torch.Tensor,
weights: torch.Tensor,
) -> torch.Tensor:
array = [
x
for x, _ in sorted(
zip(labels.tolist(), predictions.tolist()), key=lambda x: (x[1], x[0])
)
]
return torch.tensor(1 - count_reverse_pairs_divide_and_conquer(array))
The provided code snippet includes necessary dependencies for implementing the `compute_rauc_per_group` function. Write a Python function `def compute_rauc_per_group( n_tasks: int, predictions: List[torch.Tensor], labels: List[torch.Tensor], weights: List[torch.Tensor], grouping_keys: torch.Tensor, ) -> torch.Tensor` to solve the following problem:
Computes RAUC (Regression AUC) for regression tasks for groups of predictions/labels. Args: n_tasks (int): number of tasks predictions (List[torch.Tensor]): tensor of size (n_tasks, n_examples) labels (List[torch.Tensor]: tensor of size (n_tasks, n_examples) weights (List[torch.Tensor]): tensor of size (n_tasks, n_examples) grouping_keys (torch.Tensor): tensor of size (n_examples,) Returns: torch.Tensor: tensor of size (n_tasks,), average of RAUCs per group.
Here is the function:
def compute_rauc_per_group(
n_tasks: int,
predictions: List[torch.Tensor],
labels: List[torch.Tensor],
weights: List[torch.Tensor],
grouping_keys: torch.Tensor,
) -> torch.Tensor:
"""
Computes RAUC (Regression AUC) for regression tasks for groups of predictions/labels.
Args:
n_tasks (int): number of tasks
predictions (List[torch.Tensor]): tensor of size (n_tasks, n_examples)
labels (List[torch.Tensor]: tensor of size (n_tasks, n_examples)
weights (List[torch.Tensor]): tensor of size (n_tasks, n_examples)
grouping_keys (torch.Tensor): tensor of size (n_examples,)
Returns:
torch.Tensor: tensor of size (n_tasks,), average of RAUCs per group.
"""
preds_t, labels_t, weights_t = _concat_if_needed(predictions, labels, weights)
raucs = []
if grouping_keys.numel() != 0 and grouping_keys[0] == -1:
# we added padding as the first elements during init to avoid floating point exception in sync()
# removing the paddings to avoid numerical errors.
grouping_keys = grouping_keys[1:]
# get unique group indices
group_indices = torch.unique(grouping_keys)
for predictions_i, labels_i, weights_i in zip(preds_t, labels_t, weights_t):
# Loop over each group
rauc_groups_sum = torch.tensor([0], dtype=torch.float32)
for group_idx in group_indices:
# get predictions, labels, and weights for this group
group_mask = grouping_keys == group_idx
grouped_predictions = predictions_i[group_mask]
grouped_labels = labels_i[group_mask]
grouped_weights = weights_i[group_mask]
rauc = _compute_rauc_helper(
grouped_predictions, grouped_labels, grouped_weights
)
rauc_groups_sum = rauc_groups_sum.to(rauc.device)
rauc_groups_sum += rauc.view(1)
avg_rauc = (
rauc_groups_sum / len(group_indices)
if len(group_indices) > 0
else torch.tensor([0.5], dtype=torch.float32)
)
raucs.append(avg_rauc)
return torch.cat(raucs) | Computes RAUC (Regression AUC) for regression tasks for groups of predictions/labels. Args: n_tasks (int): number of tasks predictions (List[torch.Tensor]): tensor of size (n_tasks, n_examples) labels (List[torch.Tensor]: tensor of size (n_tasks, n_examples) weights (List[torch.Tensor]): tensor of size (n_tasks, n_examples) grouping_keys (torch.Tensor): tensor of size (n_examples,) Returns: torch.Tensor: tensor of size (n_tasks,), average of RAUCs per group. |
8,934 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_mse(
error_sum: torch.Tensor, weighted_num_samples: torch.Tensor
) -> torch.Tensor:
return torch.where(
weighted_num_samples == 0.0, 0.0, error_sum / weighted_num_samples
).double() | null |
8,935 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_rmse(
error_sum: torch.Tensor, weighted_num_samples: torch.Tensor
) -> torch.Tensor:
return torch.where(
weighted_num_samples == 0.0, 0.0, torch.sqrt(error_sum / weighted_num_samples)
).double() | null |
8,936 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_error_sum(
labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor
) -> torch.Tensor:
predictions = predictions.double()
return torch.sum(weights * torch.square(labels - predictions), dim=-1)
def get_mse_states(
labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor
) -> Dict[str, torch.Tensor]:
return {
"error_sum": compute_error_sum(labels, predictions, weights),
"weighted_num_samples": torch.sum(weights, dim=-1),
} | null |
8,937 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def _compute_cross_entropy_norm(
mean_label: torch.Tensor,
pos_labels: torch.Tensor,
neg_labels: torch.Tensor,
eta: float,
) -> torch.Tensor:
mean_label = mean_label.double()
mean_label.clamp_(min=eta, max=1 - eta)
return -pos_labels * torch.log2(mean_label) - neg_labels * torch.log2(
1.0 - mean_label
)
def compute_ne(
ce_sum: torch.Tensor,
weighted_num_samples: torch.Tensor,
pos_labels: torch.Tensor,
neg_labels: torch.Tensor,
eta: float,
allow_missing_label_with_zero_weight: bool = False,
) -> torch.Tensor:
if allow_missing_label_with_zero_weight and not weighted_num_samples.all():
# If nan were to occur, return a dummy value instead of nan if
# allow_missing_label_with_zero_weight is True
return torch.tensor([eta])
# Goes into this block if all elements in weighted_num_samples > 0
mean_label = pos_labels / weighted_num_samples
ce_norm = _compute_cross_entropy_norm(mean_label, pos_labels, neg_labels, eta)
return ce_sum / ce_norm | null |
8,938 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_logloss(
ce_sum: torch.Tensor,
pos_labels: torch.Tensor,
neg_labels: torch.Tensor,
eta: float,
) -> torch.Tensor:
labels_sum = pos_labels + neg_labels
labels_sum.clamp_(min=eta)
return ce_sum / labels_sum | null |
8,939 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_cross_entropy(
labels: torch.Tensor,
predictions: torch.Tensor,
weights: torch.Tensor,
eta: float,
) -> torch.Tensor:
predictions = predictions.double()
predictions.clamp_(min=eta, max=1 - eta)
cross_entropy = -weights * labels * torch.log2(predictions) - weights * (
1.0 - labels
) * torch.log2(1.0 - predictions)
return cross_entropy
def get_ne_states(
labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor, eta: float
) -> Dict[str, torch.Tensor]:
cross_entropy = compute_cross_entropy(
labels,
predictions,
weights,
eta,
)
return {
"cross_entropy_sum": torch.sum(cross_entropy, dim=-1),
"weighted_num_samples": torch.sum(weights, dim=-1),
"pos_labels": torch.sum(weights * labels, dim=-1),
"neg_labels": torch.sum(weights * (1.0 - labels), dim=-1),
} | null |
8,940 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_xauc(
error_sum: torch.Tensor, weighted_num_pairs: torch.Tensor
) -> torch.Tensor:
return torch.where(
weighted_num_pairs == 0.0, 0.0, error_sum / weighted_num_pairs
).double() | null |
8,941 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
RecMetricException,
)
def compute_error_sum(
labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor
) -> torch.Tensor:
predictions = predictions.double()
errors = []
for predictions_i, labels_i, weights_i in zip(predictions, labels, weights):
preds_x, preds_y = torch.meshgrid(predictions_i, predictions_i)
labels_x, labels_y = torch.meshgrid(labels_i, labels_i)
weights_x, weights_y = torch.meshgrid(weights_i, weights_i)
weights_flag = weights_x * weights_y
match = torch.logical_or(
torch.logical_and(preds_x > preds_y, labels_x > labels_y),
torch.logical_and(preds_x < preds_y, labels_x < labels_y),
)
match = (
weights_flag
* torch.logical_or(
match, torch.logical_and(preds_x == preds_y, labels_x == labels_y)
).double()
)
errors.append(torch.sum(torch.triu(match, diagonal=1)).view(1))
return torch.cat(errors)
def compute_weighted_num_pairs(weights: torch.Tensor) -> torch.Tensor:
num_pairs = []
for weight_i in weights:
weights_x, weights_y = torch.meshgrid(weight_i, weight_i)
weights_flag = weights_x * weights_y
num_pairs.append(torch.sum(torch.triu(weights_flag, diagonal=1)).view(1))
return torch.cat(num_pairs)
def get_xauc_states(
labels: torch.Tensor, predictions: torch.Tensor, weights: torch.Tensor
) -> Dict[str, torch.Tensor]:
return {
"error_sum": compute_error_sum(labels, predictions, weights),
"weighted_num_pairs": compute_weighted_num_pairs(weights),
} | null |
8,942 | from typing import Any, cast, Dict, List, Optional, Type
import torch
from torchrec.metrics.metrics_namespace import MetricName, MetricNamespace, MetricPrefix
from torchrec.metrics.rec_metric import (
MetricComputationReport,
RecMetric,
RecMetricComputation,
)
def get_mean(value_sum: torch.Tensor, num_samples: torch.Tensor) -> torch.Tensor:
return value_sum / num_samples | null |
8,943 | from typing import Optional, Union
import torch
from torch import nn
from torchrec.distributed.model_parallel import DistributedModelParallel
from torchrec.distributed.quant_embeddingbag import ShardedQuantEmbeddingBagCollection
from torchrec.quant.embedding_modules import (
EmbeddingBagCollection as QuantEmbeddingBagCollection,
EmbeddingCollection as QuantEmbeddingCollection,
)
def populate_fx_names(
quant_ebc: Union[QuantEmbeddingBagCollection, ShardedQuantEmbeddingBagCollection]
) -> None:
"""
Assigns fx path to non registered lookup modules. This allows the Torchrec tracer to fallback to
emb_module._fx_path for table batched embeddings.
"""
if isinstance(quant_ebc, QuantEmbeddingBagCollection):
for emb_configs, emb_module in zip(
quant_ebc._key_to_tables, quant_ebc._emb_modules
):
table_names = []
for config in emb_configs:
table_names.append(config.name)
joined_table_names = ",".join(table_names)
emb_module._fx_path = f"emb_module.{joined_table_names}"
elif isinstance(quant_ebc, ShardedQuantEmbeddingBagCollection):
for i, (emb_module, emb_dist_module) in enumerate(
zip(quant_ebc._lookups, quant_ebc._output_dists)
):
embedding_fx_path = f"embedding_lookup.sharding_{i}"
emb_module._fx_path = embedding_fx_path
emb_dist_module._fx_path = f"embedding_dist.{i}"
for rank, rank_module in enumerate(emb_module._embedding_lookups_per_rank):
rank_fx_path = f"{embedding_fx_path}.rank_{rank}"
rank_module._fx_path = rank_fx_path
for group, group_module in enumerate(rank_module._emb_modules):
group_module._fx_path = f"{rank_fx_path}.group_{group}"
group_module._emb_module._fx_path = (
f"{rank_fx_path}.group_{group}.tbe"
)
class ShardedQuantEmbeddingBagCollection(
ShardedQuantEmbeddingModuleState[
ListOfKJTList,
List[List[torch.Tensor]],
KeyedTensor,
NullShardedModuleContext,
],
):
"""
Sharded implementation of `EmbeddingBagCollection`.
This is part of the public API to allow for manual data dist pipelining.
"""
def __init__(
self,
module: EmbeddingBagCollectionInterface,
table_name_to_parameter_sharding: Dict[str, ParameterSharding],
env: ShardingEnv,
fused_params: Optional[Dict[str, Any]] = None,
device: Optional[torch.device] = None,
) -> None:
super().__init__()
self._embedding_bag_configs: List[EmbeddingBagConfig] = (
module.embedding_bag_configs()
)
self._sharding_type_to_sharding_infos: Dict[
str, List[EmbeddingShardingInfo]
] = create_sharding_infos_by_sharding(
module, table_name_to_parameter_sharding, "embedding_bags.", fused_params
)
self._sharding_type_to_sharding: Dict[
str,
EmbeddingSharding[
NullShardingContext,
KJTList,
List[torch.Tensor],
torch.Tensor,
],
] = {
sharding_type: create_infer_embedding_bag_sharding(
sharding_type, embedding_confings, env
)
for sharding_type, embedding_confings in self._sharding_type_to_sharding_infos.items()
}
self._device = device
self._is_weighted: bool = module.is_weighted()
self._input_dists: List[nn.Module] = []
self._lookups: List[nn.Module] = []
self._create_lookups(fused_params, device)
# Ensure output dist is set for post processing from an inference runtime (ie. setting device from runtime).
self._output_dists: torch.nn.ModuleList = torch.nn.ModuleList()
self._embedding_names: List[str] = []
self._embedding_dims: List[int] = []
self._feature_splits: List[int] = []
self._features_order: List[int] = []
# forward pass flow control
self._has_uninitialized_input_dist: bool = True
self._has_uninitialized_output_dist: bool = True
self._has_features_permute: bool = True
tbes: Dict[IntNBitTableBatchedEmbeddingBagsCodegen, GroupedEmbeddingConfig] = (
get_tbes_to_register_from_iterable(self._lookups)
)
self._tbes_configs: Dict[
IntNBitTableBatchedEmbeddingBagsCodegen, GroupedEmbeddingConfig
] = tbes
# Optional registration of TBEs for model post processing utilities
if is_fused_param_register_tbe(fused_params):
self.tbes: torch.nn.ModuleList = torch.nn.ModuleList(tbes.keys())
quant_state_dict_split_scale_bias = (
is_fused_param_quant_state_dict_split_scale_bias(fused_params)
)
if quant_state_dict_split_scale_bias:
self._initialize_torch_state(
tbes=tbes,
table_name_to_parameter_sharding=table_name_to_parameter_sharding,
tables_weights_prefix="embedding_bags",
)
else:
table_wise_sharded_only: bool = all(
sharding_type == ShardingType.TABLE_WISE.value
for sharding_type in self._sharding_type_to_sharding.keys()
)
assert (
table_wise_sharded_only
), "ROW_WISE,COLUMN_WISE shardings can be used only in 'quant_state_dict_split_scale_bias' mode, specify fused_params[FUSED_PARAM_QUANT_STATE_DICT_SPLIT_SCALE_BIAS]=True to __init__ argument"
self.embedding_bags: nn.ModuleDict = nn.ModuleDict()
for table in self._embedding_bag_configs:
self.embedding_bags[table.name] = torch.nn.Module()
for _sharding_type, lookup in zip(
self._sharding_type_to_sharding.keys(), self._lookups
):
lookup_state_dict = lookup.state_dict()
for key in lookup_state_dict:
if key.endswith(".weight"):
table_name = key[: -len(".weight")]
self.embedding_bags[table_name].register_buffer(
"weight", lookup_state_dict[key]
)
def tbes_configs(
self,
) -> Dict[IntNBitTableBatchedEmbeddingBagsCodegen, GroupedEmbeddingConfig]:
return self._tbes_configs
def sharding_type_to_sharding_infos(self) -> Dict[str, List[EmbeddingShardingInfo]]:
return self._sharding_type_to_sharding_infos
def embedding_bag_configs(self) -> List[EmbeddingBagConfig]:
return self._embedding_bag_configs
def _create_input_dist(
self,
input_feature_names: List[str],
features_device: torch.device,
input_dist_device: Optional[torch.device] = None,
) -> None:
feature_names: List[str] = []
for sharding in self._sharding_type_to_sharding.values():
self._input_dists.append(
sharding.create_input_dist(device=input_dist_device)
)
feature_names.extend(sharding.feature_names())
self._feature_splits.append(len(sharding.feature_names()))
if feature_names == input_feature_names:
self._has_features_permute = False
else:
for f in feature_names:
self._features_order.append(input_feature_names.index(f))
self.register_buffer(
"_features_order_tensor",
torch.tensor(
self._features_order, device=features_device, dtype=torch.int32
),
persistent=False,
)
def _create_lookups(
self,
fused_params: Optional[Dict[str, Any]],
device: Optional[torch.device] = None,
) -> None:
for sharding in self._sharding_type_to_sharding.values():
self._lookups.append(
sharding.create_lookup(
device=device,
fused_params=fused_params,
)
)
def _create_output_dist(self, device: Optional[torch.device] = None) -> None:
for sharding in self._sharding_type_to_sharding.values():
self._output_dists.append(sharding.create_output_dist(device))
self._embedding_names.extend(sharding.embedding_names())
self._embedding_dims.extend(sharding.embedding_dims())
# pyre-ignore [14]
# pyre-ignore
def input_dist(
self, ctx: NullShardedModuleContext, features: KeyedJaggedTensor
) -> ListOfKJTList:
if self._has_uninitialized_input_dist:
self._create_input_dist(
features.keys(),
features.device(),
self._device,
)
self._has_uninitialized_input_dist = False
if self._has_uninitialized_output_dist:
self._create_output_dist(features.device())
self._has_uninitialized_output_dist = False
with torch.no_grad():
if self._has_features_permute:
features = features.permute(
self._features_order,
self._features_order_tensor,
)
else:
features = flatten_feature_lengths(features)
features_by_shards = (
[features]
if len(self._feature_splits) == 1
else features.split(self._feature_splits)
)
return ListOfKJTList(
[
self._input_dists[i].forward(features_by_shards[i])
for i in range(len(self._input_dists))
]
)
def compute(
self,
ctx: NullShardedModuleContext,
dist_input: ListOfKJTList,
) -> List[List[torch.Tensor]]:
# syntax for torchscript
return [lookup.forward(dist_input[i]) for i, lookup in enumerate(self._lookups)]
# pyre-ignore
def output_dist(
self,
ctx: NullShardedModuleContext,
output: List[List[torch.Tensor]],
) -> KeyedTensor:
return construct_output_kt(
embeddings=[
dist.forward(output[i]) for i, dist in enumerate(self._output_dists)
],
embedding_dims=self._embedding_dims,
embedding_names=self._embedding_names,
)
# pyre-ignore
def compute_and_output_dist(
self, ctx: NullShardedModuleContext, input: ListOfKJTList
) -> KeyedTensor:
return self.output_dist(ctx, self.compute(ctx, input))
# pyre-ignore
def forward(self, *input, **kwargs) -> KeyedTensor:
ctx = self.create_context()
dist_input = self.input_dist(ctx, *input, **kwargs)
return self.compute_and_output_dist(ctx, dist_input)
def copy(self, device: torch.device) -> nn.Module:
if self._has_uninitialized_output_dist:
self._create_output_dist(device)
self._has_uninitialized_output_dist = False
return super().copy(device)
def shardings(self) -> Dict[str, FeatureShardingMixIn]:
# pyre-ignore [7]
return self._sharding_type_to_sharding
def create_context(self) -> NullShardedModuleContext:
if is_torchdynamo_compiling():
# Context creation is not supported by dynamo yet.
# Context is not needed for TW sharding =>
# Unblocking dynamo TW with None.
# pyre-ignore
return None
return NullShardedModuleContext()
def recursive_populate_fx_names(module: nn.Module) -> None:
if isinstance(module, QuantEmbeddingBagCollection) or isinstance(
module, ShardedQuantEmbeddingBagCollection
):
populate_fx_names(module)
return
for submodule in module.children():
recursive_populate_fx_names(submodule) | null |
8,944 | from typing import Optional, Union
import torch
from torch import nn
from torchrec.distributed.model_parallel import DistributedModelParallel
from torchrec.distributed.quant_embeddingbag import ShardedQuantEmbeddingBagCollection
from torchrec.quant.embedding_modules import (
EmbeddingBagCollection as QuantEmbeddingBagCollection,
EmbeddingCollection as QuantEmbeddingCollection,
)
def _meta_to_cpu_placement(
module: nn.Module, root_module: nn.Module, name: Optional[str] = None
) -> None:
if (
name is not None
and isinstance(module, QuantEmbeddingBagCollection)
and module.device.type == "meta"
):
qebc_cpu = QuantEmbeddingBagCollection(
tables=module.embedding_bag_configs(),
is_weighted=module.is_weighted(),
device=torch.device("cpu"),
output_dtype=module.output_dtype(),
register_tbes=module.register_tbes,
row_alignment=module.row_alignment,
)
setattr(root_module, name, qebc_cpu)
elif (
name is not None
and isinstance(module, QuantEmbeddingCollection)
and module.device.type == "meta"
):
qec_cpu = QuantEmbeddingCollection(
tables=module.embedding_configs(),
device=torch.device("cpu"),
need_indices=module.need_indices(),
output_dtype=module.output_dtype(),
register_tbes=module.register_tbes,
row_alignment=module.row_alignment,
)
setattr(root_module, name, qec_cpu)
else:
for name, submodule in module.named_children():
_meta_to_cpu_placement(submodule, module, name)
class DistributedModelParallel(nn.Module, FusedOptimizerModule):
"""
Entry point to model parallelism.
Args:
module (nn.Module): module to wrap.
env (Optional[ShardingEnv]): sharding environment that has the process group.
device (Optional[torch.device]): compute device, defaults to cpu.
plan (Optional[ShardingPlan]): plan to use when sharding, defaults to
`EmbeddingShardingPlanner.collective_plan()`.
sharders (Optional[List[ModuleSharder[nn.Module]]]): `ModuleSharders` available
to shard with, defaults to `EmbeddingBagCollectionSharder()`.
init_data_parallel (bool): data-parallel modules can be lazy, i.e. they delay
parameter initialization until the first forward pass. Pass `True` to delay
initialization of data parallel modules. Do first forward pass and then call
DistributedModelParallel.init_data_parallel().
init_parameters (bool): initialize parameters for modules still on meta device.
data_parallel_wrapper (Optional[DataParallelWrapper]): custom wrapper for data
parallel modules.
Example::
def init_weights(m):
if isinstance(m, nn.Linear):
m.weight.fill_(1.0)
elif isinstance(m, EmbeddingBagCollection):
for param in m.parameters():
init.kaiming_normal_(param)
m = MyModel(device='meta')
m = DistributedModelParallel(m)
m.apply(init_weights)
"""
def __init__(
self,
module: nn.Module,
env: Optional[ShardingEnv] = None,
device: Optional[torch.device] = None,
plan: Optional[ShardingPlan] = None,
sharders: Optional[List[ModuleSharder[torch.nn.Module]]] = None,
init_data_parallel: bool = True,
init_parameters: bool = True,
data_parallel_wrapper: Optional[DataParallelWrapper] = None,
) -> None:
super().__init__()
torch._C._log_api_usage_once(f"torchrec.distributed.{self.__class__.__name__}")
self.init_parameters = init_parameters
self._ddp_wrapped: bool = False
if env is None:
pg = dist.GroupMember.WORLD
assert pg is not None, "Process group is not initialized"
env = ShardingEnv.from_process_group(pg)
self._env: ShardingEnv = env
if device is None:
device = torch.device("cpu")
self.device: torch.device = device
if sharders is None:
sharders = get_default_sharders()
self._sharder_map: Dict[Type[nn.Module], ModuleSharder[nn.Module]] = {
sharder.module_type: sharder for sharder in sharders
}
if data_parallel_wrapper is None:
data_parallel_wrapper = DefaultDataParallelWrapper()
self._data_parallel_wrapper: DataParallelWrapper = data_parallel_wrapper
if plan is None:
planner = EmbeddingShardingPlanner(
topology=Topology(
local_world_size=get_local_size(self._env.world_size),
world_size=self._env.world_size,
compute_device=self.device.type,
)
)
pg = self._env.process_group
if pg is not None:
plan = planner.collective_plan(module, sharders, pg)
else:
plan = planner.plan(module, sharders)
self._plan: ShardingPlan = plan
self._dmp_wrapped_module: nn.Module = self._init_dmp(module)
self._optim: CombinedOptimizer = self._init_optim(self._dmp_wrapped_module)
if init_parameters:
self._init_parameters(self.module)
if init_data_parallel:
self.init_data_parallel()
def module(self) -> nn.Module:
"""
Property to directly access sharded module, which will not be wrapped in DDP,
FSDP, DMP, or any other parallelism wrappers.
"""
return get_unwrapped_module(self)
def module(self, value: nn.Module) -> None:
if isinstance(self.module, DistributedDataParallel) or isinstance(
self.module, FullyShardedDataParallel
):
raise RuntimeError(
"module can't be set after calling init_data_parallel(...)"
)
else:
self._dmp_wrapped_module = value
# pyre-ignore [2, 3]
def forward(self, *args, **kwargs) -> Any:
return self._dmp_wrapped_module(*args, **kwargs)
def init_data_parallel(self) -> None:
"""
See init_data_parallel c-tor argument for usage.
It's safe to call this method multiple times.
"""
if not self._ddp_wrapped:
# Allocate any 'meta' tensors
if self.init_parameters:
self._init_parameters(self._dmp_wrapped_module)
self._data_parallel_wrapper.wrap(self, self._env, self.device)
self._ddp_wrapped = True
def copy(
self,
device: torch.device,
) -> "DistributedModelParallel":
"""
Recursively copy submodules to new device by calling per-module customized copy
process, since some modules needs to use the original references (like
`ShardedModule` for inference).
"""
assert isinstance(device, torch.device)
# dmp code deep copy
with sharded_model_copy(device=None):
copy_dmp = copy.deepcopy(self)
# tensor resident module deep copy
copy_dmp_wrapped_module = copy_to_device(
self._dmp_wrapped_module, self.device, device
)
copy_dmp._dmp_wrapped_module = copy_dmp_wrapped_module
return copy_dmp
def _init_dmp(self, module: nn.Module) -> nn.Module:
return self._shard_modules_impl(module)
def _init_optim(self, module: nn.Module) -> CombinedOptimizer:
# pyre-ignore [6]
return CombinedOptimizer(self._fused_optim_impl(module, []))
def _fused_optim_impl(
self,
module: nn.Module,
fused_optims: List[Tuple[str, KeyedOptimizer]],
path: str = "",
) -> List[Tuple[str, KeyedOptimizer]]:
if isinstance(module, FusedOptimizerModule):
fused_optims.append((path, module.fused_optimizer))
return fused_optims
for name, child in module.named_children():
self._fused_optim_impl(
child,
fused_optims,
path + "." + name if path else name,
)
return fused_optims
def _shard_modules_impl(
self,
module: nn.Module,
path: str = "",
) -> nn.Module:
# pre-sharded module
if isinstance(module, ShardedModule):
return module
# shardable module
module_sharding_plan = self._plan.get_plan_for_module(path)
if module_sharding_plan:
sharder_key = type(module)
module = self._sharder_map[sharder_key].shard(
module,
module_sharding_plan,
self._env,
self.device,
)
return module
for name, child in module.named_children():
child = self._shard_modules_impl(
child,
path + "." + name if path else name,
)
setattr(module, name, child)
return module
def _init_parameters(self, module: nn.Module) -> None:
def init_parameters(module: nn.Module) -> None:
# Allocate parameters and buffers if over 'meta' device.
has_meta_param = False
for name, param in module._parameters.items():
if isinstance(param, torch.Tensor) and param.device.type == "meta":
module._parameters[name] = nn.Parameter(
torch.empty_like(param, device=self.device),
requires_grad=param.requires_grad,
)
has_meta_param = True
for name, buffer in module._buffers.items():
if isinstance(buffer, torch.Tensor) and buffer.device.type == "meta":
module._buffers[name] = torch.zeros_like(buffer, device=self.device)
# Init parameters if at least one parameter is over 'meta' device.
if has_meta_param and hasattr(module, "reset_parameters"):
module.reset_parameters()
module.apply(init_parameters)
def sparse_grad_parameter_names(
self, destination: Optional[List[str]] = None, prefix: str = ""
) -> List[str]:
destination = [] if destination is None else destination
return self._sparse_grad_parameter_names(self.module, destination, prefix)
def _sparse_grad_parameter_names(
self, module: nn.Module, destination: List[str], prefix: str = ""
) -> List[str]:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
pass
elif isinstance(module, nn.Embedding):
if module.sparse:
destination.append(append_prefix(prefix, "weight"))
elif isinstance(module, nn.EmbeddingBag):
if module.sparse:
destination.append(append_prefix(prefix, "weight"))
else:
for name, child in module.named_children():
self._sparse_grad_parameter_names(
child, destination, append_prefix(prefix, name)
)
return destination
# pyre-ignore [14]
def state_dict(
self,
destination: Optional[Dict[str, Any]] = None,
prefix: str = "",
keep_vars: bool = False,
) -> Dict[str, Any]:
state_dict = get_module(self).state_dict(
destination=destination, prefix=prefix, keep_vars=keep_vars
)
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict, prefix + _DDP_STATE_DICT_PREFIX
)
add_prefix_to_state_dict(state_dict, prefix)
return state_dict
# pyre-fixme[14]: `load_state_dict` overrides method defined in `Module`
# inconsistently.
def load_state_dict(
self,
state_dict: "OrderedDict[str, torch.Tensor]",
prefix: str = "",
strict: bool = True,
) -> _IncompatibleKeys:
return self._load_state_dict(self, state_dict, prefix, strict)
def _load_state_dict(
self,
module: nn.Module,
state_dict: "OrderedDict[str, torch.Tensor]",
prefix: str = "",
strict: bool = True,
) -> _IncompatibleKeys:
missing_keys = []
unexpected_keys = []
module = get_module(module)
if isinstance(module, DistributedDataParallel):
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict, prefix
)
add_prefix_to_state_dict(state_dict, prefix + _DDP_STATE_DICT_PREFIX)
if isinstance(module, ShardedModule):
return module.load_state_dict(state_dict, strict=strict)
else:
module._load_from_state_dict(
state_dict, prefix, {}, strict, missing_keys, unexpected_keys, []
)
for name, child in module.named_children():
m_keys, u_keys = self._load_state_dict(
child,
filter_state_dict(state_dict, prefix + name),
"",
strict,
)
missing_keys.extend(m_keys)
unexpected_keys.extend(u_keys)
return _IncompatibleKeys(
missing_keys=missing_keys, unexpected_keys=unexpected_keys
)
def _named_parameters(
self,
module: nn.Module,
prefix: str = "",
recurse: bool = True,
strip_ddp: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
if strip_ddp:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
yield from module.named_parameters(prefix, recurse)
else:
yield from module.named_parameters(prefix, recurse=False)
for name, child in module.named_children():
yield from self._named_parameters(
child,
append_prefix(prefix, name),
recurse,
strip_ddp,
)
def named_parameters(
self,
prefix: str = "",
recurse: bool = True,
remove_duplicate: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
gen = self._named_parameters(
self.module,
prefix,
recurse,
)
memo = set()
for key, param in gen:
if param in memo:
continue
if remove_duplicate:
memo.add(param)
yield key, param
def bare_named_parameters(
self,
prefix: str = "",
recurse: bool = True,
) -> Iterator[Tuple[str, torch.nn.Parameter]]:
gen = self._named_parameters(
self.module,
prefix,
recurse,
)
memo = set()
for key, param in gen:
if param in memo:
continue
memo.add(param)
yield key, param
def _sharded_parameter_names(module: nn.Module, prefix: str = "") -> Iterator[str]:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
yield from module.sharded_parameter_names(prefix)
else:
for name, child in module.named_children():
yield from DistributedModelParallel._sharded_parameter_names(
child, append_prefix(prefix, name)
)
def _named_buffers(
self, module: nn.Module, prefix: str = "", recurse: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
module = get_unwrapped_module(module)
if isinstance(module, ShardedModule):
yield from module.named_buffers(prefix, recurse)
else:
yield from module.named_buffers(prefix, recurse=False)
for name, child in module.named_children():
yield from self._named_buffers(
child, append_prefix(prefix, name), recurse
)
def named_buffers(
self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True
) -> Iterator[Tuple[str, torch.Tensor]]:
gen = self._named_buffers(self.module, prefix, recurse)
memo = set()
for key, param in gen:
if param in memo:
continue
if remove_duplicate:
memo.add(param)
yield key, param
def fused_optimizer(self) -> KeyedOptimizer:
return self._optim
def plan(self) -> ShardingPlan:
return self._plan
def _reset_parameters(module: nn.Module) -> None:
for _, m in module.named_modules():
if hasattr(m, "reset_parameters"):
m.reset_parameters()
def meta_to_cpu_placement(module: torch.nn.Module) -> None:
if hasattr(module, "_dmp_wrapped_module"):
# for placement update of dmp module, we need to fetch .module (read access) and write
# to .dmp_wrapped_module (write access)
assert type(module) == DistributedModelParallel
_meta_to_cpu_placement(module.module, module, "_dmp_wrapped_module")
else:
# shard module case
_meta_to_cpu_placement(module, module) | null |
8,945 | import copy
import itertools
from collections import defaultdict
from typing import Callable, cast, Dict, List, Optional, Tuple, Type, Union
import torch
import torch.nn as nn
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
EmbeddingLocation,
IntNBitTableBatchedEmbeddingBagsCodegen,
PoolingMode,
)
from torch import Tensor
from torchrec.distributed.utils import none_throws
from torchrec.modules.embedding_configs import (
BaseEmbeddingConfig,
DATA_TYPE_NUM_BITS,
data_type_to_sparse_type,
DataType,
dtype_to_data_type,
EmbeddingBagConfig,
EmbeddingConfig,
pooling_type_to_pooling_mode,
PoolingType,
QuantConfig,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection as OriginalEmbeddingBagCollection,
EmbeddingBagCollectionInterface,
EmbeddingCollection as OriginalEmbeddingCollection,
EmbeddingCollectionInterface,
get_embedding_names_by_table,
)
from torchrec.modules.feature_processor_ import FeatureProcessorsCollection
from torchrec.modules.fp_embedding_modules import (
FeatureProcessedEmbeddingBagCollection as OriginalFeatureProcessedEmbeddingBagCollection,
)
from torchrec.modules.utils import construct_jagged_tensors_inference
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
from torchrec.tensor_types import UInt2Tensor, UInt4Tensor
from torchrec.types import ModuleNoCopyMixin
def _get_batching_hinted_output(lengths: Tensor, output: Tensor) -> Tensor:
# this is a fx rule to help with batching hinting jagged sequence tensor coalescing.
return output | null |
8,946 | import copy
import itertools
from collections import defaultdict
from typing import Callable, cast, Dict, List, Optional, Tuple, Type, Union
import torch
import torch.nn as nn
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
EmbeddingLocation,
IntNBitTableBatchedEmbeddingBagsCodegen,
PoolingMode,
)
from torch import Tensor
from torchrec.distributed.utils import none_throws
from torchrec.modules.embedding_configs import (
BaseEmbeddingConfig,
DATA_TYPE_NUM_BITS,
data_type_to_sparse_type,
DataType,
dtype_to_data_type,
EmbeddingBagConfig,
EmbeddingConfig,
pooling_type_to_pooling_mode,
PoolingType,
QuantConfig,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection as OriginalEmbeddingBagCollection,
EmbeddingBagCollectionInterface,
EmbeddingCollection as OriginalEmbeddingCollection,
EmbeddingCollectionInterface,
get_embedding_names_by_table,
)
from torchrec.modules.feature_processor_ import FeatureProcessorsCollection
from torchrec.modules.fp_embedding_modules import (
FeatureProcessedEmbeddingBagCollection as OriginalFeatureProcessedEmbeddingBagCollection,
)
from torchrec.modules.utils import construct_jagged_tensors_inference
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
from torchrec.tensor_types import UInt2Tensor, UInt4Tensor
from torchrec.types import ModuleNoCopyMixin
# pyre-ignore
class KeyedJaggedTensor(Pipelineable, metaclass=JaggedTensorMeta):
"""Represents an (optionally weighted) keyed jagged tensor.
A `KeyedJaggedTensor` is a tensor with a *jagged dimension* which is dimension whose
slices may be of different lengths. Keyed on first dimension and jagged on the last
dimension.
Implementation is torch.jit.script-able.
Args:
keys (List[str]): keys to the jagged Tensor.
values (torch.Tensor): values tensor in dense representation.
weights (Optional[torch.Tensor]): if the values have weights. Tensor with the
same shape as values.
lengths (Optional[torch.Tensor]): jagged slices, represented as lengths.
offsets (Optional[torch.Tensor]): jagged slices, represented as cumulative
offsets.
stride (Optional[int]): number of examples per batch.
stride_per_key_per_rank (Optional[List[List[int]]]): batch size
(number of examples) per key per rank, with the outer list representing the
keys and the inner list representing the values.
Each value in the inner list represents the number of examples in the batch
from the rank of its index in a distributed context.
length_per_key (Optional[List[int]]): start length for each key.
offset_per_key (Optional[List[int]]): start offset for each key and final
offset.
index_per_key (Optional[Dict[str, int]]): index for each key.
jt_dict (Optional[Dict[str, JaggedTensor]]):
inverse_indices (Optional[Tuple[List[str], torch.Tensor]]): inverse indices to
expand deduplicated embedding output for variable stride per key.
Example::
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
dim_0: keyed dimension (ie. `Feature0`, `Feature1`)
dim_1: optional second dimension (ie. batch size)
dim_2: The jagged dimension which has slice lengths between 0-3 in the above example
# We represent this data with following inputs:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
"""
# This is the subset of fields on KJT which are required (all other fields
# can be derived from these fields, and are only cached)
_fields = [
"_values",
"_weights",
"_lengths",
"_offsets",
]
def __init__(
self,
keys: List[str],
values: torch.Tensor,
weights: Optional[torch.Tensor] = None,
lengths: Optional[torch.Tensor] = None,
offsets: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
# Below exposed to ensure torch.script-able
length_per_key: Optional[List[int]] = None,
offset_per_key: Optional[List[int]] = None,
index_per_key: Optional[Dict[str, int]] = None,
jt_dict: Optional[Dict[str, JaggedTensor]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> None:
self._keys: List[str] = keys
self._values: torch.Tensor = values
self._weights: Optional[torch.Tensor] = weights
if offsets is not None:
_assert_tensor_has_no_elements_or_has_integers(offsets, "offsets")
if lengths is not None:
_assert_tensor_has_no_elements_or_has_integers(lengths, "lengths")
self._lengths: Optional[torch.Tensor] = lengths
self._offsets: Optional[torch.Tensor] = offsets
self._stride_per_key_per_rank: List[List[int]] = []
self._stride_per_key: List[int] = []
self._variable_stride_per_key: bool = False
self._stride: int = -1
if stride_per_key_per_rank is not None:
if stride is not None:
raise ValueError(
"Cannot initialize KJT with both `stride` and `stride_per_key_per_rank`"
)
self._stride_per_key_per_rank = stride_per_key_per_rank
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
self._variable_stride_per_key = True
if not stride_per_key_per_rank:
self._stride = 0
elif all(s == self.stride_per_key()[0] for s in self.stride_per_key()):
self._stride = self.stride_per_key()[0]
else:
if torch.jit.is_tracing():
stride = _maybe_compute_stride_kjt_scripted(
keys, stride, lengths, offsets
)[0]
else:
stride = _maybe_compute_stride_kjt(keys, stride, lengths, offsets)
self._stride = stride
self._stride_per_key_per_rank = [[stride]] * len(self._keys)
self._stride_per_key = [sum(s) for s in self._stride_per_key_per_rank]
# lazy fields
self._length_per_key: Optional[List[int]] = length_per_key
self._offset_per_key: Optional[List[int]] = offset_per_key
self._index_per_key: Optional[Dict[str, int]] = index_per_key
self._jt_dict: Optional[Dict[str, JaggedTensor]] = jt_dict
self._inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = (
inverse_indices
)
self._lengths_offset_per_key: List[int] = []
def from_offsets_sync(
keys: List[str],
values: torch.Tensor,
offsets: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
offsets=offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def from_lengths_sync(
keys: List[str],
values: torch.Tensor,
lengths: torch.Tensor,
weights: Optional[torch.Tensor] = None,
stride: Optional[int] = None,
stride_per_key_per_rank: Optional[List[List[int]]] = None,
inverse_indices: Optional[Tuple[List[str], torch.Tensor]] = None,
) -> "KeyedJaggedTensor":
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
inverse_indices=inverse_indices,
)
return kjt.sync()
def concat(
kjt_list: List["KeyedJaggedTensor"],
) -> "KeyedJaggedTensor":
if len(kjt_list) == 0:
raise ValueError("Can't concat empty KJT list")
is_weighted: bool = kjt_list[0].weights_or_none() is not None
has_length_per_key: bool = True
length_per_key: List[int] = []
keys: List[str] = []
value_list: List[torch.Tensor] = []
weight_list: List[torch.Tensor] = []
length_list: List[torch.Tensor] = []
stride_per_key_per_rank: List[List[int]] = []
stride: Optional[int] = None
variable_stride_per_key_list = [
kjt.variable_stride_per_key() for kjt in kjt_list
]
assert all(variable_stride_per_key_list) or not any(
variable_stride_per_key_list
), "variable stride per key must be consistent for all KJTs"
variable_stride_per_key = all(variable_stride_per_key_list)
for kjt in kjt_list:
curr_is_weighted: bool = kjt.weights_or_none() is not None
if is_weighted != curr_is_weighted:
raise ValueError("Can't merge weighted KJT with unweighted KJT")
_length_per_key: Optional[List[int]] = None
if kjt._length_per_key is None:
has_length_per_key = False
else:
_length_per_key = kjt._length_per_key
if has_length_per_key and _length_per_key is not None:
length_per_key += _length_per_key
keys += kjt.keys()
value_list.append(kjt.values())
if is_weighted:
weight_list.append(kjt.weights())
length_list.append(kjt.lengths())
if variable_stride_per_key:
stride_per_key_per_rank += kjt.stride_per_key_per_rank()
elif stride is None:
stride = kjt.stride()
else:
assert stride == kjt.stride(), "strides must be consistent for all KJTs"
return KeyedJaggedTensor(
keys=keys,
values=torch.cat(value_list, dim=0),
weights=torch.cat(weight_list, dim=0) if is_weighted else None,
lengths=torch.cat(length_list, dim=0),
stride=stride,
stride_per_key_per_rank=(
stride_per_key_per_rank if variable_stride_per_key else None
),
length_per_key=length_per_key if has_length_per_key else None,
)
def empty(
is_weighted: bool = False,
device: Optional[torch.device] = None,
values_dtype: Optional[torch.dtype] = None,
weights_dtype: Optional[torch.dtype] = None,
lengths_dtype: torch.dtype = torch.int32,
) -> "KeyedJaggedTensor":
weights = (
torch.empty(0, dtype=weights_dtype, device=device) if is_weighted else None
)
return KeyedJaggedTensor(
keys=torch.jit.annotate(List[str], []),
values=torch.empty(0, dtype=values_dtype, device=device),
weights=weights,
lengths=torch.empty(0, dtype=lengths_dtype, device=device),
stride=0,
)
def empty_like(kjt: "KeyedJaggedTensor") -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, kjt.stride_per_key_per_rank())
if kjt.variable_stride_per_key()
else (kjt.stride(), None)
)
return KeyedJaggedTensor(
keys=[],
values=torch.empty(0, device=kjt.device(), dtype=kjt.values().dtype),
weights=(
None
if kjt.weights_or_none() is None
else torch.empty(0, device=kjt.device(), dtype=kjt.weights().dtype)
),
lengths=torch.empty(0, device=kjt.device(), dtype=kjt.lengths().dtype),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
)
def from_jt_dict(jt_dict: Dict[str, JaggedTensor]) -> "KeyedJaggedTensor":
"""
Constructs a KeyedJaggedTensor from a Dict[str, JaggedTensor],
but this function will ONLY work if the JaggedTensors all
have the same "implicit" batch_size dimension.
Basically, we can visualize JaggedTensors as 2-D tensors
of the format of [batch_size x variable_feature_dim].
In case, we have some batch without a feature value,
the input JaggedTensor could just not include any values.
But KeyedJaggedTensor (by default) typically pad "None"
so that all the JaggedTensors stored in the KeyedJaggedTensor
have the same batch_size dimension. That is, in the case,
the JaggedTensor input didn't automatically pad
for the empty batches, this function would error / not work.
Consider the visualization of the following KeyedJaggedTensor:
# 0 1 2 <-- dim_1
# "Feature0" [V0,V1] None [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
# ^
# dim_0
Notice that the inputs for this KeyedJaggedTensor would have looked like:
values: torch.Tensor = [V0, V1, V2, V3, V4, V5, V6, V7] # V == any tensor datatype
weights: torch.Tensor = [W0, W1, W2, W3, W4, W5, W6, W7] # W == any tensor datatype
lengths: torch.Tensor = [2, 0, 1, 1, 1, 3] # representing the jagged slice
offsets: torch.Tensor = [0, 2, 2, 3, 4, 5, 8] # offsets from 0 for each jagged slice
keys: List[str] = ["Feature0", "Feature1"] # correspond to each value of dim_0
index_per_key: Dict[str, int] = {"Feature0": 0, "Feature1": 1} # index for each key
offset_per_key: List[int] = [0, 3, 8] # start offset for each key and final offset
Now if the input jt_dict = {
# "Feature0" [V0,V1] [V2]
# "Feature1" [V3] [V4] [V5,V6,V7]
} and the "None" is left out from each JaggedTensor,
then this function would fail as we would not correctly
be able to pad "None" as it does not technically know
the correct batch / place to pad within the JaggedTensor.
Essentially, the lengths Tensor inferred by this function
would be [2, 1, 1, 1, 3] indicating variable batch_size
dim_1 violates the existing assumption / precondition
that KeyedJaggedTensor's should have fixed batch_size dimension.
"""
kjt_keys = list(jt_dict.keys())
kjt_vals_list: List[torch.Tensor] = []
kjt_lens_list: List[torch.Tensor] = []
kjt_weights_list: List[torch.Tensor] = []
stride_per_key: List[int] = []
for jt in jt_dict.values():
stride_per_key.append(len(jt.lengths()))
kjt_vals_list.append(jt.values())
kjt_lens_list.append(jt.lengths())
weight = jt.weights_or_none()
if weight is not None:
kjt_weights_list.append(weight)
kjt_vals = torch.concat(kjt_vals_list)
kjt_lens = torch.concat(kjt_lens_list)
kjt_weights = (
torch.concat(kjt_weights_list) if len(kjt_weights_list) > 0 else None
)
kjt_stride, kjt_stride_per_key_per_rank = (
(stride_per_key[0], None)
if all(s == stride_per_key[0] for s in stride_per_key)
else (None, [[stride] for stride in stride_per_key])
)
kjt = KeyedJaggedTensor(
keys=kjt_keys,
values=kjt_vals,
weights=kjt_weights,
lengths=kjt_lens,
stride=kjt_stride,
stride_per_key_per_rank=kjt_stride_per_key_per_rank,
).sync()
return kjt
def sync(self) -> "KeyedJaggedTensor":
self.length_per_key()
self.offset_per_key()
return self
def unsync(self) -> "KeyedJaggedTensor":
self._length_per_key = None
self._offset_per_key = None
return self
def device(self) -> torch.device:
return self._values.device
def lengths(self) -> torch.Tensor:
_lengths = _maybe_compute_lengths(self._lengths, self._offsets)
self._lengths = _lengths
return _lengths
def lengths_or_none(self) -> Optional[torch.Tensor]:
return self._lengths
def offsets(self) -> torch.Tensor:
_offsets = _maybe_compute_offsets(self._lengths, self._offsets)
self._offsets = _offsets
return _offsets
def offsets_or_none(self) -> Optional[torch.Tensor]:
return self._offsets
def keys(self) -> List[str]:
return self._keys
def values(self) -> torch.Tensor:
return self._values
def weights(self) -> torch.Tensor:
return _get_weights_or_throw(self._weights)
def weights_or_none(self) -> Optional[torch.Tensor]:
return self._weights
def stride(self) -> int:
return self._stride
def stride_per_key(self) -> List[int]:
return self._stride_per_key
def stride_per_key_per_rank(self) -> List[List[int]]:
return self._stride_per_key_per_rank
def variable_stride_per_key(self) -> bool:
return self._variable_stride_per_key
def inverse_indices(self) -> Tuple[List[str], torch.Tensor]:
return _get_inverse_indices_or_throw(self._inverse_indices)
def inverse_indices_or_none(self) -> Optional[Tuple[List[str], torch.Tensor]]:
return self._inverse_indices
def _key_indices(self) -> Dict[str, int]:
_index_per_key: Dict[str, int] = _maybe_compute_index_per_key(
self._keys,
self._index_per_key,
)
self._index_per_key = _index_per_key
return _index_per_key
def length_per_key(self) -> List[int]:
_length_per_key = _maybe_compute_length_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
return _length_per_key
def length_per_key_or_none(self) -> Optional[List[int]]:
return self._length_per_key
def offset_per_key(self) -> List[int]:
_length_per_key, _offset_per_key = _maybe_compute_offset_per_key(
keys=self._keys,
stride=self.stride(),
stride_per_key=self.stride_per_key(),
variable_stride_per_key=self.variable_stride_per_key(),
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
lengths=self._lengths,
offsets=self._offsets,
values=self._values,
)
self._length_per_key = _length_per_key
self._offset_per_key = _offset_per_key
return _offset_per_key
def offset_per_key_or_none(self) -> Optional[List[int]]:
return self._offset_per_key
def lengths_offset_per_key(self) -> List[int]:
if not self._lengths_offset_per_key:
self._lengths_offset_per_key = _cumsum(self.stride_per_key())
return self._lengths_offset_per_key
def split(self, segments: List[int]) -> List["KeyedJaggedTensor"]:
split_list: List[KeyedJaggedTensor] = []
start = 0
start_offset = 0
_length_per_key = self.length_per_key()
_offset_per_key = self.offset_per_key()
for segment in segments:
end = start + segment
end_offset = _offset_per_key[end]
keys: List[str] = self._keys[start:end]
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank()[start:end])
if self.variable_stride_per_key()
else (self._stride, None)
)
if segment == len(self._keys):
# no torch slicing required
split_list.append(
KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self.weights_or_none(),
lengths=self._lengths,
offsets=self._offsets,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=self._jt_dict,
inverse_indices=None,
)
)
elif segment == 0:
empty_int_list: List[int] = torch.jit.annotate(List[int], [])
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=torch.tensor(
empty_int_list,
device=self.device(),
dtype=self._values.dtype,
),
weights=(
None
if self.weights_or_none() is None
else torch.tensor(
empty_int_list,
device=self.device(),
dtype=self.weights().dtype,
)
),
lengths=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
offsets=torch.tensor(
empty_int_list, device=self.device(), dtype=torch.int
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
else:
split_length_per_key = _length_per_key[start:end]
if not torch.jit.is_scripting() and is_torchdynamo_compiling():
# Checks for dynamo dynamic shapes tracing
torch._check_is_size(start_offset)
torch._check_is_size(end_offset)
torch._check_is_size(end_offset - start_offset)
torch._check(start_offset <= self._values.size(0))
torch._check(end_offset <= self._values.size(0))
torch._check(end_offset >= start_offset)
split_list.append(
KeyedJaggedTensor(
keys=keys,
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[
start
] : self.lengths_offset_per_key()[end]
],
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=split_length_per_key,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
)
start = end
start_offset = end_offset
return split_list
def permute(
self,
indices: List[int],
indices_tensor: Optional[torch.Tensor] = None,
include_inverse_indices: bool = False,
) -> "KeyedJaggedTensor":
if indices_tensor is None:
indices_tensor = torch.tensor(
indices, dtype=torch.int, device=self.device()
)
length_per_key = self.length_per_key()
permuted_keys: List[str] = []
permuted_stride_per_key_per_rank: List[List[int]] = []
permuted_length_per_key: List[int] = []
permuted_lengths_sum = 0
for index in indices:
key = self.keys()[index]
permuted_keys.append(key)
permuted_stride_per_key_per_rank.append(
self.stride_per_key_per_rank()[index]
)
permuted_length_per_key.append(length_per_key[index])
permuted_lengths_sum += length_per_key[index]
if self.variable_stride_per_key():
length_per_key_tensor = _pin_and_move(
torch.tensor(self.length_per_key()), self.device()
)
stride_per_key_tensor = _pin_and_move(
torch.tensor(self.stride_per_key()), self.device()
)
permuted_lengths, _ = _permute_tensor_by_segments(
self.lengths(),
stride_per_key_tensor,
indices_tensor,
None,
)
permuted_values, permuted_weights = _permute_tensor_by_segments(
self.values(),
length_per_key_tensor,
indices_tensor,
self.weights_or_none(),
)
else:
(
permuted_lengths,
permuted_values,
permuted_weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
indices_tensor,
self.lengths().view(len(self._keys), -1),
self.values(),
self.weights_or_none(),
permuted_lengths_sum,
)
stride, optional_permuted_stride_per_key_per_rank = (
(None, permuted_stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
kjt = KeyedJaggedTensor(
keys=permuted_keys,
values=permuted_values,
weights=permuted_weights,
lengths=permuted_lengths.view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=optional_permuted_stride_per_key_per_rank,
length_per_key=permuted_length_per_key if len(permuted_keys) > 0 else None,
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=(
self.inverse_indices_or_none() if include_inverse_indices else None
),
)
return kjt
def flatten_lengths(self) -> "KeyedJaggedTensor":
stride, stride_per_key_per_rank = (
(None, self.stride_per_key_per_rank())
if self.variable_stride_per_key()
else (self._stride, None)
)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values,
weights=self._weights,
lengths=self.lengths().view(-1),
offsets=None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self.length_per_key(),
offset_per_key=None,
index_per_key=None,
jt_dict=None,
inverse_indices=None,
)
def __getitem__(self, key: str) -> JaggedTensor:
offset_per_key = self.offset_per_key()
index = self._key_indices()[key]
start_offset = offset_per_key[index]
end_offset = (
offset_per_key[index + 1]
if index + 1 < len(offset_per_key)
else start_offset
)
return JaggedTensor(
values=self._values[start_offset:end_offset],
weights=(
None
if self.weights_or_none() is None
else self.weights()[start_offset:end_offset]
),
lengths=self.lengths()[
self.lengths_offset_per_key()[index] : self.lengths_offset_per_key()[
index + 1
]
],
offsets=None,
)
def to_dict(self) -> Dict[str, JaggedTensor]:
_jt_dict = _maybe_compute_kjt_to_jt_dict(
stride=self.stride(),
stride_per_key=self.stride_per_key(),
keys=self.keys(),
length_per_key=self.length_per_key(),
lengths=self.lengths(),
values=self.values(),
variable_stride_per_key=self.variable_stride_per_key(),
weights=self.weights_or_none(),
jt_dict=self._jt_dict,
)
self._jt_dict = _jt_dict
return _jt_dict
def record_stream(self, stream: torch.cuda.streams.Stream) -> None:
self._values.record_stream(stream)
weights = self._weights
lengths = self._lengths
offsets = self._offsets
if weights is not None:
weights.record_stream(stream)
if lengths is not None:
lengths.record_stream(stream)
if offsets is not None:
offsets.record_stream(stream)
def to(
self,
device: torch.device,
non_blocking: bool = False,
dtype: Optional[torch.dtype] = None,
) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
length_per_key = self._length_per_key
offset_per_key = self._offset_per_key
index_per_key = self._index_per_key
jt_dict = self._jt_dict
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (
inverse_indices[0],
inverse_indices[1].to(device, non_blocking=non_blocking),
)
if weights is not None:
if dtype is not None:
weights = weights.to(
dtype=dtype, device=device, non_blocking=non_blocking
)
else:
weights = weights.to(device=device, non_blocking=non_blocking)
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.to(device, non_blocking=non_blocking),
weights=weights,
lengths=(
lengths.to(device, non_blocking=non_blocking)
if lengths is not None
else None
),
offsets=(
offsets.to(device, non_blocking=non_blocking)
if offsets is not None
else None
),
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=length_per_key,
offset_per_key=offset_per_key,
index_per_key=index_per_key,
jt_dict=jt_dict,
inverse_indices=inverse_indices,
)
def __str__(self) -> str:
if len(self._keys) == 0 or self._offsets is None and self._lengths is None:
return "KeyedJaggedTensor()\n"
offsets = self.offsets()
return (
"KeyedJaggedTensor({\n"
+ ",\n".join(
[
" "
+ _jagged_tensor_string(
self._keys[index],
self._values,
self._weights,
offsets,
sum(self.stride_per_key()[:index]),
sum(self.stride_per_key()[: index + 1]),
)
for index in range(len(self._keys))
]
)
+ "\n})\n"
)
def pin_memory(self) -> "KeyedJaggedTensor":
weights = self._weights
lengths = self._lengths
offsets = self._offsets
stride, stride_per_key_per_rank = (
(None, self._stride_per_key_per_rank)
if self.variable_stride_per_key()
else (self._stride, None)
)
inverse_indices = self._inverse_indices
if inverse_indices is not None:
inverse_indices = (inverse_indices[0], inverse_indices[1].pin_memory())
return KeyedJaggedTensor(
keys=self._keys,
values=self._values.pin_memory(),
weights=weights.pin_memory() if weights is not None else None,
lengths=lengths.pin_memory() if lengths is not None else None,
offsets=offsets.pin_memory() if offsets is not None else None,
stride=stride,
stride_per_key_per_rank=stride_per_key_per_rank,
length_per_key=self._length_per_key,
offset_per_key=self._offset_per_key,
index_per_key=self._index_per_key,
jt_dict=None,
inverse_indices=inverse_indices,
)
def dist_labels(self) -> List[str]:
labels = ["lengths", "values"]
if self.variable_stride_per_key():
labels.append("strides")
if self.weights_or_none() is not None:
labels.append("weights")
return labels
def dist_splits(self, key_splits: List[int]) -> List[List[int]]:
batch_size_per_split = _sum_by_splits(self.stride_per_key(), key_splits)
length_per_split = _sum_by_splits(self.length_per_key(), key_splits)
splits = [batch_size_per_split, length_per_split]
if self.variable_stride_per_key():
splits.append(key_splits)
if self.weights_or_none() is not None:
splits.append(length_per_split)
return splits
def dist_tensors(self) -> List[torch.Tensor]:
tensors = [self.lengths(), self.values()]
if self.variable_stride_per_key():
strides = _pin_and_move(torch.tensor(self.stride_per_key()), self.device())
tensors.append(strides)
if self.weights_or_none() is not None:
tensors.append(self.weights())
return tensors
def dist_init(
keys: List[str],
tensors: List[torch.Tensor],
variable_stride_per_key: bool,
num_workers: int,
recat: Optional[torch.Tensor],
stride_per_rank: Optional[List[int]],
stagger: int = 1,
) -> "KeyedJaggedTensor":
assert len(tensors) in [2, 3, 4]
lengths = tensors[0]
values = tensors[1]
stride_per_rank_per_key = tensors[2] if variable_stride_per_key else None
weights = (
tensors[-1]
if (variable_stride_per_key and len(tensors) == 4)
or (not variable_stride_per_key and len(tensors) == 3)
else None
)
if variable_stride_per_key:
assert stride_per_rank_per_key is not None
stride_per_key_per_rank: List[List[int]] = stride_per_rank_per_key.view(
num_workers, len(keys)
).T.tolist()
strides_cumsum: List[int] = torch.ops.fbgemm.asynchronous_complete_cumsum(
stride_per_rank_per_key
).tolist()
cumsum_lengths = torch.ops.fbgemm.asynchronous_complete_cumsum(lengths)
length_per_key = (
cumsum_lengths[strides_cumsum[1:]] - cumsum_lengths[strides_cumsum[:-1]]
)
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
lengths, _ = _permute_tensor_by_segments(
lengths,
stride_per_rank_per_key,
recat,
None,
)
values, weights = _permute_tensor_by_segments(
values,
length_per_key,
recat,
weights,
)
if not stride_per_key_per_rank:
stride_per_key_per_rank = [[0]] * len(keys)
if stagger > 1:
stride_per_key_per_rank_stagger: List[List[int]] = []
local_world_size = num_workers // stagger
for i in range(len(keys)):
stride_per_rank_stagger: List[int] = []
for j in range(local_world_size):
stride_per_rank_stagger.extend(
stride_per_key_per_rank[i][j::local_world_size]
)
stride_per_key_per_rank_stagger.append(stride_per_rank_stagger)
stride_per_key_per_rank = stride_per_key_per_rank_stagger
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride_per_key_per_rank=stride_per_key_per_rank,
)
return kjt.sync()
else:
assert stride_per_rank is not None
with record_function("## all2all_data:recat_values ##"):
if recat is not None and recat.numel() > 0:
stride = stride_per_rank[0]
if all(s == stride for s in stride_per_rank):
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_2D_sparse_data(
recat,
lengths.view(-1, stride),
values,
weights,
values.numel(),
)
lengths = lengths.view(-1)
else: # variable batch size per rank
(
lengths,
values,
weights,
) = torch.ops.fbgemm.permute_1D_sparse_data(
recat,
lengths.view(-1),
values,
weights,
values.numel(),
)
kjt = KeyedJaggedTensor(
keys=keys,
values=values,
weights=weights,
lengths=lengths,
stride=sum(stride_per_rank),
)
return kjt.sync()
def _get_feature_length(feature: KeyedJaggedTensor) -> Tensor:
return feature.lengths() | null |
8,947 | import copy
import itertools
from collections import defaultdict
from typing import Callable, cast, Dict, List, Optional, Tuple, Type, Union
import torch
import torch.nn as nn
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
EmbeddingLocation,
IntNBitTableBatchedEmbeddingBagsCodegen,
PoolingMode,
)
from torch import Tensor
from torchrec.distributed.utils import none_throws
from torchrec.modules.embedding_configs import (
BaseEmbeddingConfig,
DATA_TYPE_NUM_BITS,
data_type_to_sparse_type,
DataType,
dtype_to_data_type,
EmbeddingBagConfig,
EmbeddingConfig,
pooling_type_to_pooling_mode,
PoolingType,
QuantConfig,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection as OriginalEmbeddingBagCollection,
EmbeddingBagCollectionInterface,
EmbeddingCollection as OriginalEmbeddingCollection,
EmbeddingCollectionInterface,
get_embedding_names_by_table,
)
from torchrec.modules.feature_processor_ import FeatureProcessorsCollection
from torchrec.modules.fp_embedding_modules import (
FeatureProcessedEmbeddingBagCollection as OriginalFeatureProcessedEmbeddingBagCollection,
)
from torchrec.modules.utils import construct_jagged_tensors_inference
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
from torchrec.tensor_types import UInt2Tensor, UInt4Tensor
from torchrec.types import ModuleNoCopyMixin
MODULE_ATTR_QUANT_STATE_DICT_SPLIT_SCALE_BIAS: str = (
"__quant_state_dict_split_scale_bias"
)
def quant_prep_enable_quant_state_dict_split_scale_bias(module: nn.Module) -> None:
setattr(module, MODULE_ATTR_QUANT_STATE_DICT_SPLIT_SCALE_BIAS, True) | null |
8,948 | import copy
import itertools
from collections import defaultdict
from typing import Callable, cast, Dict, List, Optional, Tuple, Type, Union
import torch
import torch.nn as nn
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
EmbeddingLocation,
IntNBitTableBatchedEmbeddingBagsCodegen,
PoolingMode,
)
from torch import Tensor
from torchrec.distributed.utils import none_throws
from torchrec.modules.embedding_configs import (
BaseEmbeddingConfig,
DATA_TYPE_NUM_BITS,
data_type_to_sparse_type,
DataType,
dtype_to_data_type,
EmbeddingBagConfig,
EmbeddingConfig,
pooling_type_to_pooling_mode,
PoolingType,
QuantConfig,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection as OriginalEmbeddingBagCollection,
EmbeddingBagCollectionInterface,
EmbeddingCollection as OriginalEmbeddingCollection,
EmbeddingCollectionInterface,
get_embedding_names_by_table,
)
from torchrec.modules.feature_processor_ import FeatureProcessorsCollection
from torchrec.modules.fp_embedding_modules import (
FeatureProcessedEmbeddingBagCollection as OriginalFeatureProcessedEmbeddingBagCollection,
)
from torchrec.modules.utils import construct_jagged_tensors_inference
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
from torchrec.tensor_types import UInt2Tensor, UInt4Tensor
from torchrec.types import ModuleNoCopyMixin
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
MODULE_ATTR_QUANT_STATE_DICT_SPLIT_SCALE_BIAS: str = (
"__quant_state_dict_split_scale_bias"
)
def for_each_module_of_type_do(
module: nn.Module,
module_types: List[Type[torch.nn.Module]],
op: Callable[[torch.nn.Module], None],
) -> None:
for m in module.modules():
if any([isinstance(m, t) for t in module_types]):
op(m)
def quant_prep_enable_quant_state_dict_split_scale_bias_for_types(
module: nn.Module, module_types: List[Type[torch.nn.Module]]
) -> None:
for_each_module_of_type_do(
module,
module_types,
lambda m: setattr(m, MODULE_ATTR_QUANT_STATE_DICT_SPLIT_SCALE_BIAS, True),
) | null |
8,949 | import copy
import itertools
from collections import defaultdict
from typing import Callable, cast, Dict, List, Optional, Tuple, Type, Union
import torch
import torch.nn as nn
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
EmbeddingLocation,
IntNBitTableBatchedEmbeddingBagsCodegen,
PoolingMode,
)
from torch import Tensor
from torchrec.distributed.utils import none_throws
from torchrec.modules.embedding_configs import (
BaseEmbeddingConfig,
DATA_TYPE_NUM_BITS,
data_type_to_sparse_type,
DataType,
dtype_to_data_type,
EmbeddingBagConfig,
EmbeddingConfig,
pooling_type_to_pooling_mode,
PoolingType,
QuantConfig,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection as OriginalEmbeddingBagCollection,
EmbeddingBagCollectionInterface,
EmbeddingCollection as OriginalEmbeddingCollection,
EmbeddingCollectionInterface,
get_embedding_names_by_table,
)
from torchrec.modules.feature_processor_ import FeatureProcessorsCollection
from torchrec.modules.fp_embedding_modules import (
FeatureProcessedEmbeddingBagCollection as OriginalFeatureProcessedEmbeddingBagCollection,
)
from torchrec.modules.utils import construct_jagged_tensors_inference
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
from torchrec.tensor_types import UInt2Tensor, UInt4Tensor
from torchrec.types import ModuleNoCopyMixin
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
MODULE_ATTR_REGISTER_TBES_BOOL: str = "__register_tbes_in_named_modules"
def for_each_module_of_type_do(
module: nn.Module,
module_types: List[Type[torch.nn.Module]],
op: Callable[[torch.nn.Module], None],
) -> None:
for m in module.modules():
if any([isinstance(m, t) for t in module_types]):
op(m)
def quant_prep_enable_register_tbes(
module: nn.Module, module_types: List[Type[torch.nn.Module]]
) -> None:
for_each_module_of_type_do(
module,
module_types,
lambda m: setattr(m, MODULE_ATTR_REGISTER_TBES_BOOL, True),
) | null |
8,950 | import copy
import itertools
from collections import defaultdict
from typing import Callable, cast, Dict, List, Optional, Tuple, Type, Union
import torch
import torch.nn as nn
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
EmbeddingLocation,
IntNBitTableBatchedEmbeddingBagsCodegen,
PoolingMode,
)
from torch import Tensor
from torchrec.distributed.utils import none_throws
from torchrec.modules.embedding_configs import (
BaseEmbeddingConfig,
DATA_TYPE_NUM_BITS,
data_type_to_sparse_type,
DataType,
dtype_to_data_type,
EmbeddingBagConfig,
EmbeddingConfig,
pooling_type_to_pooling_mode,
PoolingType,
QuantConfig,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection as OriginalEmbeddingBagCollection,
EmbeddingBagCollectionInterface,
EmbeddingCollection as OriginalEmbeddingCollection,
EmbeddingCollectionInterface,
get_embedding_names_by_table,
)
from torchrec.modules.feature_processor_ import FeatureProcessorsCollection
from torchrec.modules.fp_embedding_modules import (
FeatureProcessedEmbeddingBagCollection as OriginalFeatureProcessedEmbeddingBagCollection,
)
from torchrec.modules.utils import construct_jagged_tensors_inference
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
from torchrec.tensor_types import UInt2Tensor, UInt4Tensor
from torchrec.types import ModuleNoCopyMixin
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
MODULE_ATTR_ROW_ALIGNMENT_INT: str = "__register_row_alignment_in_named_modules"
def for_each_module_of_type_do(
module: nn.Module,
module_types: List[Type[torch.nn.Module]],
op: Callable[[torch.nn.Module], None],
) -> None:
for m in module.modules():
if any([isinstance(m, t) for t in module_types]):
op(m)
def quant_prep_customize_row_alignment(
module: nn.Module, module_types: List[Type[torch.nn.Module]], row_alignment: int
) -> None:
for_each_module_of_type_do(
module,
module_types,
lambda m: setattr(m, MODULE_ATTR_ROW_ALIGNMENT_INT, row_alignment),
) | null |
8,951 | import copy
import itertools
from collections import defaultdict
from typing import Callable, cast, Dict, List, Optional, Tuple, Type, Union
import torch
import torch.nn as nn
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
EmbeddingLocation,
IntNBitTableBatchedEmbeddingBagsCodegen,
PoolingMode,
)
from torch import Tensor
from torchrec.distributed.utils import none_throws
from torchrec.modules.embedding_configs import (
BaseEmbeddingConfig,
DATA_TYPE_NUM_BITS,
data_type_to_sparse_type,
DataType,
dtype_to_data_type,
EmbeddingBagConfig,
EmbeddingConfig,
pooling_type_to_pooling_mode,
PoolingType,
QuantConfig,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection as OriginalEmbeddingBagCollection,
EmbeddingBagCollectionInterface,
EmbeddingCollection as OriginalEmbeddingCollection,
EmbeddingCollectionInterface,
get_embedding_names_by_table,
)
from torchrec.modules.feature_processor_ import FeatureProcessorsCollection
from torchrec.modules.fp_embedding_modules import (
FeatureProcessedEmbeddingBagCollection as OriginalFeatureProcessedEmbeddingBagCollection,
)
from torchrec.modules.utils import construct_jagged_tensors_inference
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
from torchrec.tensor_types import UInt2Tensor, UInt4Tensor
from torchrec.types import ModuleNoCopyMixin
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
def pruned_num_embeddings(pruning_indices_mapping: Tensor) -> int:
return int(torch.max(pruning_indices_mapping).item()) + 1
DATA_TYPE_NUM_BITS: Dict[DataType, int] = {
DataType.FP32: 32,
DataType.FP16: 16,
DataType.BF16: 16,
DataType.INT8: 8,
DataType.UINT8: 8,
DataType.INT4: 4,
DataType.INT2: 2,
}
def quantize_state_dict(
module: nn.Module,
table_name_to_quantized_weights: Dict[str, Tuple[Tensor, Tensor]],
table_name_to_data_type: Dict[str, DataType],
table_name_to_pruning_indices_mapping: Optional[Dict[str, Tensor]] = None,
) -> torch.device:
device = torch.device("cpu")
if not table_name_to_pruning_indices_mapping:
table_name_to_pruning_indices_mapping = {}
for key, tensor in module.state_dict().items():
# Extract table name from state dict key.
# e.g. ebc.embedding_bags.t1.weight
splits = key.split(".")
assert splits[-1] == "weight"
table_name = splits[-2]
data_type = table_name_to_data_type[table_name]
num_rows = tensor.shape[0]
pruning_indices_mapping: Optional[Tensor] = None
if table_name in table_name_to_pruning_indices_mapping:
pruning_indices_mapping = table_name_to_pruning_indices_mapping[table_name]
if pruning_indices_mapping is not None:
num_rows = pruned_num_embeddings(pruning_indices_mapping)
device = tensor.device
num_bits = DATA_TYPE_NUM_BITS[data_type]
if tensor.is_meta:
quant_weight = torch.empty(
(num_rows, (tensor.shape[1] * num_bits) // 8),
device="meta",
dtype=torch.uint8,
)
if (
data_type == DataType.INT8
or data_type == DataType.INT4
or data_type == DataType.INT2
):
scale_shift = torch.empty(
(num_rows, 4),
device="meta",
dtype=torch.uint8,
)
else:
scale_shift = None
else:
if pruning_indices_mapping is not None:
rows_mask = pruning_indices_mapping.gt(-1)
tensor = tensor[rows_mask, :]
if tensor.dtype == torch.float or tensor.dtype == torch.float16:
if data_type == DataType.FP16:
if tensor.dtype == torch.float:
tensor = tensor.half()
quant_res = tensor.view(torch.uint8)
else:
quant_res = (
torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf(
tensor, num_bits
)
)
else:
raise Exception("Unsupported dtype: {tensor.dtype}")
if (
data_type == DataType.INT8
or data_type == DataType.INT4
or data_type == DataType.INT2
):
quant_weight, scale_shift = (
quant_res[:, :-4],
quant_res[:, -4:],
)
else:
quant_weight, scale_shift = quant_res, None
table_name_to_quantized_weights[table_name] = (quant_weight, scale_shift)
return device | null |
8,952 | import copy
import itertools
from collections import defaultdict
from typing import Callable, cast, Dict, List, Optional, Tuple, Type, Union
import torch
import torch.nn as nn
from fbgemm_gpu.split_table_batched_embeddings_ops_inference import (
EmbeddingLocation,
IntNBitTableBatchedEmbeddingBagsCodegen,
PoolingMode,
)
from torch import Tensor
from torchrec.distributed.utils import none_throws
from torchrec.modules.embedding_configs import (
BaseEmbeddingConfig,
DATA_TYPE_NUM_BITS,
data_type_to_sparse_type,
DataType,
dtype_to_data_type,
EmbeddingBagConfig,
EmbeddingConfig,
pooling_type_to_pooling_mode,
PoolingType,
QuantConfig,
)
from torchrec.modules.embedding_modules import (
EmbeddingBagCollection as OriginalEmbeddingBagCollection,
EmbeddingBagCollectionInterface,
EmbeddingCollection as OriginalEmbeddingCollection,
EmbeddingCollectionInterface,
get_embedding_names_by_table,
)
from torchrec.modules.feature_processor_ import FeatureProcessorsCollection
from torchrec.modules.fp_embedding_modules import (
FeatureProcessedEmbeddingBagCollection as OriginalFeatureProcessedEmbeddingBagCollection,
)
from torchrec.modules.utils import construct_jagged_tensors_inference
from torchrec.sparse.jagged_tensor import JaggedTensor, KeyedJaggedTensor, KeyedTensor
from torchrec.tensor_types import UInt2Tensor, UInt4Tensor
from torchrec.types import ModuleNoCopyMixin
try:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
except OSError:
pass
def dtype_to_data_type(dtype: torch.dtype) -> DataType:
if dtype == torch.float:
return DataType.FP32
elif dtype == torch.float16 or dtype == torch.half:
return DataType.FP16
elif dtype == torch.bfloat16:
return DataType.BF16
elif dtype in {torch.int, torch.int32}:
return DataType.INT32
elif dtype in {torch.long, torch.int64}:
return DataType.INT64
elif dtype in {torch.quint8, torch.qint8, torch.int8}:
return DataType.INT8
elif dtype == torch.uint8:
return DataType.UINT8
elif dtype == torch.quint4x2:
return DataType.INT4
elif dtype == torch.quint2x4:
return DataType.INT2
else:
raise Exception(f"Invalid data type {dtype}")
class BaseEmbeddingConfig:
num_embeddings: int
embedding_dim: int
name: str = ""
data_type: DataType = DataType.FP32
feature_names: List[str] = field(default_factory=list)
weight_init_max: Optional[float] = None
weight_init_min: Optional[float] = None
# if table is pruned:
# num_embeddings is pruned number of embeddings,
# pruning_indices_mapping is not None and contains remapping tensor of shape=[unpruned_num_embeddings], dtype=int32
pruning_indices_remapping: Optional[torch.Tensor] = None
init_fn: Optional[Callable[[torch.Tensor], Optional[torch.Tensor]]] = None
# when the position_weighted feature is in this table config,
# enable this flag to support rw_sharding
need_pos: bool = False
def get_weight_init_max(self) -> float:
if self.weight_init_max is None:
return sqrt(1 / self.num_embeddings)
else:
return self.weight_init_max
def get_weight_init_min(self) -> float:
if self.weight_init_min is None:
return -sqrt(1 / self.num_embeddings)
else:
return self.weight_init_min
def num_features(self) -> int:
return len(self.feature_names)
def __post_init__(self) -> None:
if self.init_fn is None:
self.init_fn = partial(
torch.nn.init.uniform_,
a=self.get_weight_init_min(),
b=self.get_weight_init_max(),
)
class QuantConfig(NamedTuple):
activation: torch.quantization.PlaceholderObserver
weight: torch.quantization.PlaceholderObserver
per_table_weight_dtype: Optional[Dict[str, torch.dtype]] = None
def _update_embedding_configs(
embedding_configs: List[BaseEmbeddingConfig],
quant_config: Union[QuantConfig, torch.quantization.QConfig],
) -> None:
per_table_weight_dtype = (
quant_config.per_table_weight_dtype
if isinstance(quant_config, QuantConfig) and quant_config.per_table_weight_dtype
else {}
)
for config in embedding_configs:
config.data_type = dtype_to_data_type(
per_table_weight_dtype[config.name]
if config.name in per_table_weight_dtype
else quant_config.weight().dtype
) | null |
8,953 | from typing import Any, Callable, Dict, List, Optional, Union
import torch
from torch.fx._compatibility import compatibility
from torch.fx.graph import Graph
from torch.fx.node import Argument
from torchrec.distributed.types import LazyAwaitable, NoWait
from torchrec.fx.utils import dmp_fx_trace_forward
_is_fx_tracing_flag = False
def is_fx_tracing() -> bool:
return _is_fx_tracing_flag | null |
8,954 | from typing import Any, Callable, Dict, List, Optional, Union
import torch
from torch.fx._compatibility import compatibility
from torch.fx.graph import Graph
from torch.fx.node import Argument
from torchrec.distributed.types import LazyAwaitable, NoWait
from torchrec.fx.utils import dmp_fx_trace_forward
class Tracer(torch.fx.Tracer):
"""
Custom FX tracer for torchrec
See `Torch.FX documentation <https://pytorch.org/docs/stable/fx.html>`_
We create a custom FX tracer to trace torchrec based models. The custom tracer
handles python generic types (i.e. NoWait[T], Awaitable[T]) and lower it to
TorchScript if needed
"""
def __init__(self, leaf_modules: Optional[List[str]] = None) -> None:
super().__init__()
self._leaf_modules: List[str] = leaf_modules if leaf_modules is not None else []
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name: str) -> bool:
"""
Override FX definition to include quantized embedding bags
"""
if type(m).__name__ in self._leaf_modules:
return True
return super().is_leaf_module(m, module_qualified_name)
def trace(
self,
# pyre-ignore[2]: Missing parameter annotation [2]: Parameter `root` must have a type that does not contain `Any`
root: Union[torch.nn.Module, Callable[..., Any]],
concrete_args: Optional[Dict[str, Any]] = None,
) -> Graph:
global _is_fx_tracing_flag
old_is_fx_tracing_flag = _is_fx_tracing_flag
_is_fx_tracing_flag = True
try:
# TODO(ivankobzarev): support DMP not only on the root level
from torchrec.distributed.model_parallel import DistributedModelParallel
if isinstance(root, DistributedModelParallel):
# In the case where the module is wrapped in DMP, you need to replace DMP's forward
# call with a new signature, one with explicit args, because fx can't handle variable args.
# Furthermore, we need to provide the `fn_root` argument because when tracing a function,
# fx uses an empty module as the root (unless one is explicitly provided), which leads to
# issues with path_of_module and named_buffers.
# TODO(shababayub): This can be removed if we either stop supporting dmp wrapping
# for fx trace or strip dmp name in named_modules path (much like named_buffers).
if isinstance(root, torch.nn.Module):
for prefix, module in root.named_modules():
# TODO(T140754678): Remove this workaround to _fx_path
module._fx_path = prefix
dmp = root
graph = super().trace(
root=dmp_fx_trace_forward(dmp, self),
concrete_args=concrete_args,
)
self.root._dmp_wrapped_module = dmp._dmp_wrapped_module
else:
# Unwrapped dmp modules and composibility api will enter here.
graph = super().trace(
root=root,
concrete_args=concrete_args,
)
finally:
_is_fx_tracing_flag = old_is_fx_tracing_flag
return graph
# pyre-ignore[2]
def create_arg(self, a: Any) -> Argument:
"""
A method to specify the behavior of tracing when preparing values to
be used as arguments to nodes in the ``Graph``.
Adds support for the NoWait type in addition to the default tracer
Args:
a (Any): The value to be emitted as an ``Argument`` in the ``Graph``.
Returns:
Argument: The value ``a`` converted into the appropriate ``Argument``
"""
if isinstance(a, NoWait):
return self.create_node(
"call_function",
target=NoWait,
args=self.create_arg((a._obj,)),
kwargs={},
type_expr=NoWait,
)
# jit script has explicit convertions to torch.device from str
if isinstance(a, torch.device):
return super().create_arg(f"{a.type}:{a.index}")
# Not equivalent to when LazyAwaitable.wait() is called in eager. Here can be called earlier, as attr was not requested and this is not guranteed to be torch function
# TODO(ivankobzarev): support equivalent timing of LazyAwaitable
if isinstance(a, LazyAwaitable):
if a._result is None:
a._result = a.wait()
return super().create_arg(a._result)
return super().create_arg(a)
def path_of_module(self, mod: torch.nn.Module) -> str:
"""
Allows trace-ability of non registered modules. This is typically used for Table Batched Embeddings
made to look like nn.EmbeddingBags
"""
if hasattr(mod, "_fx_path"):
return mod._fx_path
else:
return super().path_of_module(mod)
The provided code snippet includes necessary dependencies for implementing the `symbolic_trace` function. Write a Python function `def symbolic_trace( # pyre-ignore[24] root: Union[torch.nn.Module, Callable], concrete_args: Optional[Dict[str, Any]] = None, leaf_modules: Optional[List[str]] = None, ) -> torch.fx.GraphModule` to solve the following problem:
Symbolic tracing API Given an ``nn.Module`` or function instance ``root``, this function will return a ``GraphModule`` constructed by recording operations seen while tracing through ``root``. ``concrete_args`` allows you to partially specialize your function, whether it's to remove control flow or data structures. Args: root (Union[torch.nn.Module, Callable]): Module or function to be traced and converted into a Graph representation. concrete_args (Optional[Dict[str, any]]): Inputs to be partially specialized Returns: GraphModule: a Module created from the recorded operations from ``root``.
Here is the function:
def symbolic_trace(
# pyre-ignore[24]
root: Union[torch.nn.Module, Callable],
concrete_args: Optional[Dict[str, Any]] = None,
leaf_modules: Optional[List[str]] = None,
) -> torch.fx.GraphModule:
"""
Symbolic tracing API
Given an ``nn.Module`` or function instance ``root``, this function will return a ``GraphModule``
constructed by recording operations seen while tracing through ``root``.
``concrete_args`` allows you to partially specialize your function, whether it's to remove control flow or data structures.
Args:
root (Union[torch.nn.Module, Callable]): Module or function to be traced and converted
into a Graph representation.
concrete_args (Optional[Dict[str, any]]): Inputs to be partially specialized
Returns:
GraphModule: a Module created from the recorded operations from ``root``.
"""
tracer = Tracer(leaf_modules)
graph = tracer.trace(root, concrete_args)
return torch.fx.GraphModule(root, graph) | Symbolic tracing API Given an ``nn.Module`` or function instance ``root``, this function will return a ``GraphModule`` constructed by recording operations seen while tracing through ``root``. ``concrete_args`` allows you to partially specialize your function, whether it's to remove control flow or data structures. Args: root (Union[torch.nn.Module, Callable]): Module or function to be traced and converted into a Graph representation. concrete_args (Optional[Dict[str, any]]): Inputs to be partially specialized Returns: GraphModule: a Module created from the recorded operations from ``root``. |
8,955 | import inspect
from typing import Any, Dict, List, Set
import torch
from torch.fx._symbolic_trace import is_fx_tracing
def fake_range():
# pyre-fixme[16]: Module `_C` has no attribute `_jit_tree_views`.
return torch._C._jit_tree_views.SourceRangeFactory("", None, 0, 0).make_raw_range(
0, 1
)
def dmp_fx_trace_forward( # noqa: C901
# pyre-ignore
dmp,
tracer: torch.fx.Tracer,
):
func = dmp._dmp_wrapped_module.forward
sign: inspect.Signature = inspect.signature(func)
module_to_type_str: Dict[str, Set[str]] = {}
def add_if_missing(module: str, type_str: str) -> None:
if module not in module_to_type_str:
_set = set()
_set.add(type_str)
module_to_type_str[module] = _set
else:
s = module_to_type_str[module]
if type_str not in s:
s.add(type_str)
def torch_no_import(t: torch.Type) -> bool:
return isinstance(
t, (torch.FloatType, torch.IntType, torch.ComplexType, torch.StringType)
)
def torch_typing(t: torch.Type) -> bool:
return isinstance(
t,
(
torch.TupleType,
torch.ListType,
torch.DictType,
torch.OptionalType,
torch.AnyType,
),
)
exec_imports = []
args_call = ", ".join([f"{p.name}" for p in sign.parameters.values()])
types = []
try:
args_decls: List[str] = []
for p in sign.parameters.values():
pann = p.annotation
ptype = torch.jit.annotations.try_ann_to_type(pann, fake_range())
types.append(ptype)
args_decls.append(f"{p.name}: {ptype}")
while len(types) > 0:
t = types.pop()
if torch_no_import(t):
continue
t_base_name = f"{t}".split("[")[0]
if torch_typing(t):
add_if_missing("typing", t_base_name)
else:
if hasattr(t, "__module__") and not torch_no_import(t):
m = t.__module__
add_if_missing(f"{m}", f"{t}".split("[")[0])
if hasattr(t, "containedTypes"):
contained_types = getattr(t, "containedTypes", None)()
for ctype in contained_types:
types.append(ctype)
if hasattr(t, "getElementType"):
el_type = getattr(t, "getElementType", None)()
args_decl = ", ".join(args_decls)
for m, s in module_to_type_str.items():
ts = ", ".join(s)
exec_imports.append(f"from {m} import {ts}")
except Exception as e:
print(f"Exception:{e}")
# Catching here if source is not available to proceed hoping that jit will infer correct types without annotations.
# Often it fails here when can not access to dataclass generated __init__
args_decl = args_call
exec_def_fn_name = "__fx_forward"
exec_dmp_wrapper_local_name = "_dmp_wrapped_module_local"
_dmp_wrapped_module_local = dmp
locals_dict = locals()
exec_def = f"def {exec_def_fn_name}({args_decl}):\n return {exec_dmp_wrapper_local_name}({args_call})"
exec_imports_str = "\n".join(exec_imports)
pycode = f"{exec_imports_str}\n{exec_def}"
exec(pycode, locals_dict) # noqa: P204 Allow use of exec
wrapper = locals_dict[exec_def_fn_name]
wrapper.__signature__ = sign
return wrapper | null |
8,956 | import inspect
from typing import Any, Dict, List, Set
import torch
from torch.fx._symbolic_trace import is_fx_tracing
def _fx_marker(s: str, any_proxy_unused: Any) -> None:
pass
def fx_marker(s: str, any_proxy_unused: Any) -> None:
if is_fx_tracing():
_fx_marker(s, any_proxy_unused) | null |
8,957 | import inspect
from typing import Any, Dict, List, Set
import torch
from torch.fx._symbolic_trace import is_fx_tracing
def _fx_marker(s: str, any_proxy_unused: Any) -> None:
pass
def is_marker_node(node: torch.fx.Node, marker_name: str) -> bool:
# bool() syntax for pyre
return bool(
node.op == "call_function"
and node.target == _fx_marker
and isinstance(node.args[0], str)
and node.args[0] == marker_name
) | null |
8,958 | import ast
import json
from argparse import ArgumentParser, Namespace
from typing import Any, Dict, List, Optional, Tuple
def check_class_definition(python_path: str, node: ast.ClassDef) -> None:
"""
This function will run set of sanity checks against class definitions
and their docstrings.
Args:
python_path: Path to the file that is getting checked
node: AST node with the ClassDef that needs to be checked
Returns:
None
"""
assert (
type(node) == ast.ClassDef
), "Received invalid node type. Expected ClassDef, got: {}".format(type(node))
is_TorchRec_module = False
is_test_file = "tests" in python_path
for base in node.bases:
# For now only names and attributes are supported
if type(base) != ast.Name and type(base) != ast.Attribute: # pragma: nocover
continue
# We assume that TorchRec module has one of the following inheritance patterns:
# 1. `class SomeTorchRecModule(LazyModuleExtensionMixin, torch.nn.Module)`
# 2. `class SomeTorchRecModule(torch.nn.Module)`
# pyre-ignore[16]: `_ast.expr` has no attribute `id`.
if hasattr(base, "id") and base.id == "LazyModuleExtensionMixin":
is_TorchRec_module = True
break
# pyre-ignore[16]: `_ast.expr` has no attribute `id`.
elif hasattr(base, "attr") and base.attr == "Module":
is_TorchRec_module = True
break
if not is_TorchRec_module or is_test_file:
return
docstring: Optional[str] = ast.get_docstring(node)
if docstring is None:
print_error_message(
python_path,
node,
"No docstring found in a TorchRec module",
"TorchRec modules are required to have a docstring describing how "
"to use them. Given Module don't have a docstring, please fix this.",
)
return
# Check presence of the example:
if "Example:" not in docstring:
print_error_message(
python_path,
node,
"No runnable example in a TorchRec module",
"TorchRec modules are required to have runnable examples in "
'"Example:" section. Please fix the docstring',
)
# Check correctness of the Args for a class definition:
required_keywords = ["Args:"]
missing_keywords = []
for keyword in required_keywords:
if keyword not in docstring:
missing_keywords.append(keyword)
if len(missing_keywords) > 0:
print_error_message(
python_path,
node,
"Missing required keywords from TorchRec module",
"TorchRec modules are required to description of their args and "
'results in "Args:". '
"Missing keywords: {}.".format(missing_keywords),
)
# Check actual args from the functions
# pyre-ignore[33]: Explicit annotation for `functions` cannot contain `Any`.
functions: Dict[str, Tuple[List[Any], List[Any]]] = {}
function_sub_nodes = {}
for sub_node in node.body:
if type(sub_node) == ast.FunctionDef:
assert isinstance(sub_node, ast.FunctionDef)
functions[sub_node.name] = get_function_args(sub_node)
function_sub_nodes[sub_node.name] = sub_node
def check_function(function_name: str) -> None:
if function_name not in functions:
return
if function_name == "__init__":
# NOTE: -1 to not count the `self` argument.
num_args = sum([len(args) for args in functions[function_name]]) - 1
if num_args > MAX_NUM_ARGS_IN_MODULE_CTOR:
print_error_message(
python_path,
node,
"TorchRec module has too many constructor arguments",
"TorchRec module can have at most {} constructor arguments, but this module has {}.".format(
MAX_NUM_ARGS_IN_MODULE_CTOR,
num_args,
),
)
if function_name in functions:
missing_required_args = []
missing_optional_args = []
for arg in functions[function_name][0]:
# Ignore checks for required self and net args
if arg == "self" or arg == "net":
continue
assert docstring is not None
if arg not in docstring:
missing_required_args.append(arg)
for arg in functions[function_name][1]:
assert docstring is not None
if arg not in docstring:
missing_optional_args.append(arg)
if len(missing_required_args) > 0 or len(missing_optional_args) > 0:
print_error_message(
python_path,
node,
"Missing docstring descriptions for {} function arguments.".format(
function_name
),
(
"Missing descriptions for {} function arguments. "
"Missing required args: {}, missing optional args: {}"
).format(
function_name,
missing_required_args,
missing_optional_args,
),
)
# pyre-ignore[53]
def check_function_docstring(function_name: str) -> None:
if function_name not in functions:
return
function_docstring: Optional[str] = None
function_docstring = ast.get_docstring(function_sub_nodes[function_name])
if function_docstring is None:
print_error_message(
python_path,
node,
"Missing docstring for {} function".format(function_name),
"Missing docstring for {} function".format(function_name),
)
return
missing_required_args = []
missing_optional_args = []
for arg in functions[function_name][0]:
# Ignore checks for required self and net args
if arg == "self" or arg == "net":
continue
assert function_docstring is not None
if arg not in function_docstring:
missing_required_args.append(arg)
for arg in functions[function_name][1]:
assert function_docstring is not None
if arg not in function_docstring:
missing_optional_args.append(arg)
if len(missing_required_args) > 0 or len(missing_optional_args) > 0:
print_error_message(
python_path,
node,
"Missing docstring descriptions for {} function arguments.".format(
function_name
),
(
"Missing descriptions for {} function arguments. "
"Missing required args: {}, missing optional args: {}"
).format(
function_name,
missing_required_args,
missing_optional_args,
),
)
assert function_docstring is not None
if "Returns:" not in function_docstring:
print_error_message(
python_path,
node,
"Missing docstring descriptions for {} function arguments.".format(
function_name
),
(
"Missing descriptions for {} function arguments. "
"Missing Returns section"
).format(
function_name,
),
)
check_function("__init__")
check_function_docstring("forward")
def read_file(path: str) -> str: # pragma: nocover
"""
This function simply reads contents of the file. It's moved out to a function
purely to simplify testing process.
Args:
path: File to read.
Returns:
content(str): Content of given file.
"""
return open(path).read()
The provided code snippet includes necessary dependencies for implementing the `linter_one_file` function. Write a Python function `def linter_one_file(python_path: str) -> None` to solve the following problem:
This function will check all Modules defined in the given file for a valid documentation based on the AST. Input args: python_path: Path to the file that need to be verified with the linter. Returns: None
Here is the function:
def linter_one_file(python_path: str) -> None:
"""
This function will check all Modules defined in the given file for a valid
documentation based on the AST.
Input args:
python_path: Path to the file that need to be verified with the linter.
Returns:
None
"""
python_path = python_path.strip()
try:
for node in ast.parse(read_file(python_path)).body:
if type(node) == ast.ClassDef:
assert isinstance(node, ast.ClassDef)
check_class_definition(python_path, node)
except SyntaxError as e: # pragma: nocover
# possible failing due to file parsing error
lint_item = {
"path": python_path,
"line": e.lineno,
"char": e.offset,
"severity": "warning",
"name": "syntax-error",
"description": (
f"There is a linter parser error with message: {e.msg}. "
"Please report the diff to torchrec oncall"
),
"bypassChangedLineFiltering": True,
}
print(json.dumps(lint_item)) | This function will check all Modules defined in the given file for a valid documentation based on the AST. Input args: python_path: Path to the file that need to be verified with the linter. Returns: None |
8,959 | import ast
import json
from argparse import ArgumentParser, Namespace
from typing import Any, Dict, List, Optional, Tuple
def _make_argparse() -> ArgumentParser: # pragma: nocover
parser = ArgumentParser(
description="TorchRec docstring linter", fromfile_prefix_chars="@"
)
parser.add_argument("source_files", nargs="+", help="Path to python source files")
return parser
def _parse_args() -> Namespace: # pragma: nocover
ap = _make_argparse()
return ap.parse_args() | null |
8,960 | import itertools
from typing import List, Tuple
import torch
import torch._prims_common as utils
def down_size(N: int, size: torch.Size) -> Tuple[int, int]:
assert size[-1] % N == 0, f"{size} last dim not divisible by {N}"
return (*size[:-1], size[-1] // N) | null |
8,961 | import itertools
from typing import List, Tuple
import torch
import torch._prims_common as utils
def up_size(N: int, size: torch.Size) -> Tuple[int, int]:
return (*size[:-1], size[-1] * N) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.