repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/colossalai_layer/__init__.py
colossalai/legacy/nn/layer/colossalai_layer/__init__.py
from ._utils import partition_batch from .dropout import Dropout from .embedding import Embedding, PatchEmbedding from .linear import Classifier, Linear from .normalization import LayerNorm __all__ = ["Linear", "Classifier", "Embedding", "PatchEmbedding", "LayerNorm", "Dropout", "partition_batch"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/colossalai_layer/embedding.py
colossalai/legacy/nn/layer/colossalai_layer/embedding.py
import math from typing import Callable from torch import dtype, nn from colossalai.accelerator import get_accelerator from colossalai.nn import init from ..parallel_1d import Embedding1D, PatchEmbedding1D, VocabParallelEmbedding1D from ..parallel_2d import Embedding2D, PatchEmbedding2D, VocabParallelEmbedding2D from ..parallel_2p5d import Embedding2p5D, PatchEmbedding2p5D, VocabParallelEmbedding2p5D from ..parallel_3d import Embedding3D, PatchEmbedding3D, VocabParallelEmbedding3D from ..utils import get_tensor_parallel_mode from ..vanilla import VanillaPatchEmbedding from ._utils import ColossalaiModule _parallel_embedding = { "1d": Embedding1D, "2d": Embedding2D, "2.5d": Embedding2p5D, "3d": Embedding3D, } _vocab_parallel_embedding = { "1d": VocabParallelEmbedding1D, "2d": VocabParallelEmbedding2D, "2.5d": VocabParallelEmbedding2p5D, "3d": VocabParallelEmbedding3D, } _parallel_patchembedding = { None: VanillaPatchEmbedding, "1d": PatchEmbedding1D, "2d": PatchEmbedding2D, "2.5d": PatchEmbedding2p5D, "3d": PatchEmbedding3D, } class Embedding(ColossalaiModule): r"""Embedding for colossalai. Args: num_embeddings (int): number of embeddings. embedding_dim (int): dimension of embedding. padding_idx (int, optional): If specified, the entries at padding_idx do not contribute to the gradient; therefore, the embedding vector at padding_idx is not updated during training, i.e. it remains as a fixed “pad”, defaults to None. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. weight_initializer (:class:`typing.Callable`, optional): he initializer of weight, defaults to normal initializer. The ``args`` and ``kwargs`` used in :class:`torch.nn.functional.embedding` should contain: :: max_norm (float, optional): If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm. Note: this will modify weight in-place. norm_type (float, optional): The p of the p-norm to compute for the max_norm option. Default 2. scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default False. sparse (bool, optional): If True, gradient w.r.t. weight will be a sparse tensor. Default False. More details about ``args`` and ``kwargs`` could be found in `Embedding <https://pytorch.org/docs/stable/generated/torch.nn.functional.embedding.html#torch.nn.functional.embedding>`_. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_ """ def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: int = None, dtype: dtype = None, weight_initializer: Callable = init.normal_(), vocab_parallel_limit: int = 2048, *args, **kwargs, ) -> None: tensor_parallel = get_tensor_parallel_mode() if tensor_parallel is None: embed = ( nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx, *args, **kwargs) .to(dtype) .to(get_accelerator().get_current_device()) ) weight_initializer(embed.weight, fan_in=num_embeddings, fan_out=embedding_dim) elif num_embeddings <= vocab_parallel_limit: embed = _parallel_embedding[tensor_parallel]( num_embeddings, embedding_dim, padding_idx=padding_idx, dtype=dtype, weight_initializer=weight_initializer, *args, **kwargs, ) else: embed = _vocab_parallel_embedding[tensor_parallel]( num_embeddings, embedding_dim, padding_idx=padding_idx, dtype=dtype, weight_initializer=weight_initializer, *args, **kwargs, ) super().__init__(embed) class PatchEmbedding(ColossalaiModule): """2D Image to Patch Embedding. Args: img_size (int): image size. patch_size (int): patch size. in_chans (int): number of channels of input image. embed_size (int): size of embedding. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. flatten (bool, optional): whether to flatten output tensor, defaults to True. weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. position_embed_initializer (:class:`typing.Callable`, optional): The initializer of position embedding, defaults to zeros initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, img_size: int, patch_size: int, in_chans: int, embed_size: int, dtype: dtype = None, flatten: bool = True, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), position_embed_initializer: Callable = init.zeros_(), ) -> None: tensor_parallel = get_tensor_parallel_mode() embed = _parallel_patchembedding[tensor_parallel]( img_size, patch_size, in_chans, embed_size, dtype=dtype, flatten=flatten, weight_initializer=weight_initializer, bias_initializer=bias_initializer, position_embed_initializer=position_embed_initializer, ) super().__init__(embed)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/colossalai_layer/normalization.py
colossalai/legacy/nn/layer/colossalai_layer/normalization.py
from torch import nn from colossalai.accelerator import get_accelerator from ..parallel_1d import LayerNorm1D from ..parallel_2d import LayerNorm2D from ..parallel_2p5d import LayerNorm2p5D from ..parallel_3d import LayerNorm3D from ..utils import get_tensor_parallel_mode from ..vanilla import VanillaLayerNorm from ._utils import ColossalaiModule _parallel_layernorm = { None: VanillaLayerNorm, "1d": LayerNorm1D, "2d": LayerNorm2D, "2.5d": LayerNorm2p5D, "3d": LayerNorm3D, } class LayerNorm(ColossalaiModule): r"""Layer Normalization for colossalai. Args: normalized_shape (int): input shape from an expected input of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]` If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps (float): a value added to the denominator for numerical stability, defaults to 1e-05. bias (bool, optional): Whether to add a bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. """ def __init__(self, normalized_shape: int, eps=1e-05, bias=True, dtype=None) -> None: tensor_parallel = get_tensor_parallel_mode() if tensor_parallel is None: norm = nn.LayerNorm(normalized_shape, eps=eps).to(dtype).to(get_accelerator().get_current_device()) else: norm = _parallel_layernorm[tensor_parallel](normalized_shape, eps=eps, dtype=dtype) super().__init__(norm)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/colossalai_layer/_utils.py
colossalai/legacy/nn/layer/colossalai_layer/_utils.py
import torch.nn as nn from torch import Tensor from ..parallel_2d._operation import split_batch_2d from ..parallel_2p5d._operation import split_batch_2p5d from ..parallel_3d._operation import split_batch_3d from ..utils import get_tensor_parallel_mode _parallel_split_batch = {"2d": split_batch_2d, "2.5d": split_batch_2p5d, "3d": split_batch_3d} def partition_batch(input_) -> Tensor: tensor_parallel_mode = get_tensor_parallel_mode() if tensor_parallel_mode in _parallel_split_batch: if isinstance(input_, dict): return {k: _parallel_split_batch[tensor_parallel_mode](v) for k, v in input_.items()} else: return _parallel_split_batch[tensor_parallel_mode](input_) else: return input_ class ColossalaiModule(nn.Module): def __init__(self, module: nn.Module, **kwargs): super().__init__() self.module = module for k, v in kwargs.items(): setattr(self, k, v) def __getattr__(self, name: str): if name == "module": return super().__getattr__(name) elif hasattr(self.module, name): return getattr(self.module, name) elif name in self.__dict__: return self.__dict__[name] raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, name)) def forward(self, *args): return self.module(*args)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/colossalai_layer/linear.py
colossalai/legacy/nn/layer/colossalai_layer/linear.py
import inspect import math from typing import Callable from torch import dtype, nn from colossalai.nn import init from ..parallel_1d import * from ..parallel_2d import * from ..parallel_2p5d import * from ..parallel_3d import * from ..utils import get_tensor_parallel_mode from ..vanilla import * from ._utils import ColossalaiModule _parallel_linear = {None: VanillaLinear, "1d": Linear1D, "2d": Linear2D, "2.5d": Linear2p5D, "3d": Linear3D} _parallel_classifier = { None: VanillaClassifier, "1d": Classifier1D, "2d": Classifier2D, "2.5d": Classifier2p5D, "3d": Classifier3D, } _vocab_parallel_classifier = { "1d": VocabParallelClassifier1D, "2d": VocabParallelClassifier2D, "2.5d": VocabParallelClassifier2p5D, "3d": VocabParallelClassifier3D, } class Linear(ColossalaiModule): """Linear layer of colossalai. Args: in_features (int): size of each input sample. out_features (int): size of each output sample. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. Note: ``kwargs`` would contain different parameters when you use different parallelisms. The ``kwargs`` should contain parameters below: :: Linear1D: gather_output: bool (optional, default to be false) skip_bias_add: bool (optional, default to be false) Linear2D: skip_bias_add: bool (optional, default to be false) Linear2p5D: skip_bias_add: bool (optional, default to be false) Linear3D: None More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, dtype: dtype = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), **kwargs, ) -> None: tensor_parallel = get_tensor_parallel_mode() linear_cls = _parallel_linear[tensor_parallel] gather_output = kwargs.pop("gather_output", None) if "gather_output" in inspect.signature(linear_cls.__init__).parameters.keys(): # gather_out arg is available kwargs["gather_output"] = gather_output layer = linear_cls( in_features, out_features, bias=bias, dtype=dtype, weight_initializer=weight_initializer, bias_initializer=bias_initializer, **kwargs, ) super().__init__(layer) class Classifier(ColossalaiModule): """Classifier layer of colossalai. Args: in_features (int): size of each input sample. num_classes (int): number of classes. weight (:class:`torch.nn.Parameter`, optional): weight of the classifier, defaults to None. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, num_classes: int, weight: nn.Parameter = None, bias: bool = True, dtype: dtype = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), vocab_parallel_limit: int = 2048, ) -> None: tensor_parallel = get_tensor_parallel_mode() if num_classes <= vocab_parallel_limit or tensor_parallel is None: layer = _parallel_classifier[tensor_parallel]( in_features, num_classes, weight=weight, bias=bias, dtype=dtype, weight_initializer=weight_initializer, bias_initializer=bias_initializer, ) else: layer = _vocab_parallel_classifier[tensor_parallel]( in_features, num_classes, weight=weight, bias=bias, dtype=dtype, weight_initializer=weight_initializer, bias_initializer=bias_initializer, ) super().__init__(layer)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_1d/_operation.py
colossalai/legacy/nn/layer/parallel_1d/_operation.py
import torch import torch.distributed as dist from colossalai.legacy.core import global_context as gpc try: import fused_mix_prec_layer_norm_cuda except: fused_mix_prec_layer_norm_cuda = None class FusedLayerNormAffineFunction1D(torch.autograd.Function): r"""Layernorm Args: input: input matrix. weight: weight matrix. bias: bias matrix. normalized_shape: input shape from an expected input of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]` If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps: a value added to the denominator for numerical stability """ @staticmethod def forward(ctx, input, weight, bias, normalized_shape, eps): ctx.normalized_shape = normalized_shape ctx.eps = eps input_ = input.contiguous() weight_ = weight.contiguous() bias_ = bias.contiguous() output, mean, invvar = fused_mix_prec_layer_norm_cuda.forward_affine( input_, ctx.normalized_shape, weight_, bias_, ctx.eps ) ctx.save_for_backward(input_, weight_, bias_, mean, invvar) return output @staticmethod def backward(ctx, grad_output): input_, weight_, bias_, mean, invvar = ctx.saved_tensors grad_input = grad_weight = grad_bias = None grad_input, grad_weight, grad_bias = fused_mix_prec_layer_norm_cuda.backward_affine( grad_output.contiguous(), mean, invvar, input_, ctx.normalized_shape, weight_, bias_, ctx.eps ) return grad_input, grad_weight, grad_bias, None, None class LinearWithAsyncCommunication(torch.autograd.Function): """ Linear layer execution with asynchronous communication in backprop. """ @staticmethod def forward(ctx, input_, weight, bias, parallel_mode, async_grad_allreduce): ctx.save_for_backward(input_, weight) ctx.use_bias = bias is not None ctx.parallel_mode = parallel_mode ctx.async_grad_allreduce = async_grad_allreduce output = torch.matmul(input_, weight.t()) if bias is not None: output = output + bias return output @staticmethod def backward(ctx, grad_output): input, weight = ctx.saved_tensors use_bias = ctx.use_bias total_input = input grad_input = grad_output.matmul(weight) # Convert the tensor shapes to 2D for execution compatibility grad_output = grad_output.view(grad_output.shape[0] * grad_output.shape[1], grad_output.shape[2]) total_input = total_input.view(total_input.shape[0] * total_input.shape[1], total_input.shape[2]) if ctx.async_grad_allreduce: # Asynchronous all-reduce handle = dist.all_reduce(grad_input, group=gpc.get_group(ctx.parallel_mode), async_op=True) # Delay the start of weight gradient computation shortly (3us) to have # all-reduce scheduled first and have GPU resources allocated # TODO: This seems to only work if you add torch.cuda.Event.wait() # _ = torch.zeros(1, device=grad_output.device) grad_weight = grad_output.t().matmul(total_input) grad_bias = grad_output.sum(dim=0) if use_bias else None if ctx.async_grad_allreduce: handle.wait() return grad_input, grad_weight, grad_bias, None, None, None def linear_with_async_comm(input_, weight, bias, parallel_mode, async_grad_allreduce): return LinearWithAsyncCommunication.apply(input_, weight, bias, parallel_mode, async_grad_allreduce)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_1d/__init__.py
colossalai/legacy/nn/layer/parallel_1d/__init__.py
from .layers import ( Classifier1D, Dropout1D, Embedding1D, LayerNorm1D, Linear1D, Linear1D_Col, Linear1D_Row, PatchEmbedding1D, VocabParallelClassifier1D, VocabParallelEmbedding1D, ) __all__ = [ "Linear1D", "Linear1D_Col", "Linear1D_Row", "Embedding1D", "Dropout1D", "Classifier1D", "VocabParallelClassifier1D", "VocabParallelEmbedding1D", "LayerNorm1D", "PatchEmbedding1D", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_1d/_utils.py
colossalai/legacy/nn/layer/parallel_1d/_utils.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch import torch.distributed as dist from colossalai.legacy.core import global_context as gpc from colossalai.legacy.global_variables import tensor_parallel_env as env from ..utils import divide def set_parallel_input(input_parallel: bool): env.parallel_input_1d = input_parallel def get_parallel_input(): return env.parallel_input_1d def vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank): index_f = rank * per_partition_vocab_size index_l = index_f + per_partition_vocab_size return index_f, index_l def vocab_range_from_global_vocab_size(global_vocab_size, rank, world_size): per_partition_vocab_size = divide(global_vocab_size, world_size) return vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank) def _reduce(input_, parallel_mode): # skip if only one rank involved if gpc.get_world_size(parallel_mode) == 1: return input_ group = gpc.get_cpu_group(parallel_mode) if input_.device.type == "cpu" else gpc.get_group(parallel_mode) dist.all_reduce(input_, group=group) return input_ def _split(input_, parallel_mode, dim=-1): # skip if only one rank involved world_size = gpc.get_world_size(parallel_mode) if world_size == 1: return input_ # Split along last dimension. dim_size = input_.size(dim) assert dim_size % world_size == 0, ( f"The dimension to split ({dim_size}) is not a multiple of world size ({world_size}), " f"cannot split tensor evenly" ) tensor_list = torch.split(input_, dim_size // world_size, dim=dim) rank = gpc.get_local_rank(parallel_mode) output = tensor_list[rank].contiguous() return output def _gather(input_, parallel_mode, dim=-1): # skip if only one rank involved world_size = gpc.get_world_size(parallel_mode) if world_size == 1: return input_ # all gather rank = gpc.get_local_rank(parallel_mode) tensor_list = [torch.empty_like(input_) for _ in range(world_size)] tensor_list[rank] = input_ group = gpc.get_cpu_group(parallel_mode) if input_.device.type == "cpu" else gpc.get_group(parallel_mode) torch.distributed.all_gather(tensor_list, input_, group=group) # concat output = torch.cat(tensor_list, dim=dim).contiguous() return output class _ReduceGrad(torch.autograd.Function): """ Pass the input to the model parallel region. Args: input_: input matrix. parallel_mode: parallel mode. """ @staticmethod def symbolic(graph, input_): return input_ @staticmethod def forward(ctx, input_, parallel_mode): ctx.mode = parallel_mode return input_ @staticmethod def backward(ctx, grad_output): return _reduce(grad_output, ctx.mode), None class _ReduceInput(torch.autograd.Function): """ All-reduce the input from the model parallel region. Args: input_: input matrix. parallel_mode: parallel mode. """ @staticmethod def symbolic(graph, input_): return _reduce(input_) @staticmethod def forward(ctx, input_, parallel_mode): return _reduce(input_, parallel_mode) @staticmethod def backward(ctx, grad_output): return grad_output, None class _SplitForwardGatherBackward(torch.autograd.Function): """ Split the input and keep only the corresponding chuck to the rank. Args: input_: input matrix. parallel_mode: parallel mode. dim: dimension """ @staticmethod def symbolic(graph, input_): return _split(input_) @staticmethod def forward(ctx, input_, parallel_mode, dim): ctx.mode = parallel_mode ctx.dim = dim return _split(input_, parallel_mode, dim) @staticmethod def backward(ctx, grad_output): return _gather(grad_output, ctx.mode, ctx.dim), None, None class _GatherForwardSplitBackward(torch.autograd.Function): """Gather the input from model parallel region and concatenate. Args: input_: input matrix. parallel_mode: parallel mode. dim: dimension """ @staticmethod def symbolic(graph, input_): return _gather(input_) @staticmethod def forward(ctx, input_, parallel_mode, dim): ctx.mode = parallel_mode ctx.dim = dim return _gather(input_, parallel_mode, dim) @staticmethod def backward(ctx, grad_output): return _split(grad_output, ctx.mode, ctx.dim), None, None def reduce_grad(input_, parallel_mode): return _ReduceGrad.apply(input_, parallel_mode) def reduce_input(input_, parallel_mode): return _ReduceInput.apply(input_, parallel_mode) def split_forward_gather_backward(input_, parallel_mode, dim): return _SplitForwardGatherBackward.apply(input_, parallel_mode, dim) def gather_forward_split_backward(input_, parallel_mode, dim): return _GatherForwardSplitBackward.apply(input_, parallel_mode, dim)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_1d/layers.py
colossalai/legacy/nn/layer/parallel_1d/layers.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import math from collections import OrderedDict from typing import Callable, Tuple import torch import torch.nn.functional as F from torch import Tensor from torch.nn.parameter import Parameter from colossalai.accelerator import get_accelerator from colossalai.legacy.communication import broadcast from colossalai.legacy.context import ParallelMode, seed from colossalai.legacy.context.parallel_context import global_context as gpc from colossalai.legacy.global_variables import tensor_parallel_env as env from colossalai.legacy.registry import LAYERS from colossalai.legacy.utils.checkpointing import ( broadcast_state_dict, gather_tensor_parallel_state_dict, partition_tensor_parallel_state_dict, ) from colossalai.nn import init as init from colossalai.nn.layer.layernorm import MixedFusedLayerNorm as LayerNorm from ..base_layer import ParallelLayer from ..colossalai_layer._utils import ColossalaiModule from ..utils import divide, set_tensor_parallel_attribute_by_partition from ..vanilla import VanillaPatchEmbedding from ._operation import linear_with_async_comm from ._utils import ( gather_forward_split_backward, get_parallel_input, reduce_grad, reduce_input, set_parallel_input, split_forward_gather_backward, ) Fast_LN = None try: from apex.contrib.layer_norm.layer_norm import FastLayerNorm Fast_LN = FastLayerNorm except ImportError: pass @LAYERS.register_module class Linear1D(ColossalaiModule): r"""Linear layer for 1D parallelism. Args: in_features (int): size of each input sample. out_features (int): size of each output sample. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. gather_output (bool, optional): Whether to call all-gather on output, defaults to False. skip_bias_add (bool, optional): If set to ``True``, it will skip bias add for linear layer, which is preserved for kernel fusion, defaults to False weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, dtype: torch.dtype = None, gather_output: bool = False, skip_bias_add: bool = False, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), ): parallel_input = get_parallel_input() if not parallel_input and not gather_output: layer = Linear1D_Col( in_features, out_features, bias=bias, dtype=dtype, skip_bias_add=skip_bias_add, weight_initializer=weight_initializer, bias_initializer=bias_initializer, ) else: layer = Linear1D_Row( in_features, out_features, bias=bias, dtype=dtype, parallel_input=parallel_input, skip_bias_add=skip_bias_add, weight_initializer=weight_initializer, bias_initializer=bias_initializer, ) super().__init__(layer) @LAYERS.register_module class LayerNorm1D(ColossalaiModule): r""" Layer Normalization for colossalai Args: normalized_shape (int): input shape from an expected input of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]` If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps (float): a value added to the denominator for numerical stability, defaults to 1e-05. bias (bool, optional): Whether to add a bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. """ _fast_ln_supported_sizes = [ 1024, 1536, 2048, 2304, 3072, 3840, 4096, 5120, 6144, 8192, 10240, 12288, 12800, 15360, 16384, 18432, 20480, 24576, 25600, 30720, 32768, 40960, 49152, 65536, ] def __init__(self, normalized_shape: int, eps=1e-05, bias=True, dtype=None): if Fast_LN is not None and normalized_shape in self._fast_ln_supported_sizes: norm = Fast_LN(normalized_shape, eps=eps).to(dtype) else: norm = None try: from apex.normalization import FusedLayerNorm norm = FusedLayerNorm(normalized_shape, eps=eps).to(dtype) except ImportError: norm = LayerNorm(normalized_shape, eps=eps).to(dtype) super().__init__(norm) def _load_from_state_dict(self, state_dict, prefix, *args): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight # bias bias = state_dict.pop(bias_key, None) if bias is not None: local_state[bias_key] = bias local_state = broadcast_state_dict(local_state, ParallelMode.PARALLEL_1D) super()._load_from_state_dict(local_state, prefix, *args) def _save_to_state_dict(self, destination, prefix, keep_vars): if gpc.get_local_rank(ParallelMode.TENSOR) == 0: super()._save_to_state_dict(destination, prefix, keep_vars) @LAYERS.register_module class Classifier1D(ParallelLayer): r"""RowLinear with given weight. Classifier of 1D parallelism. Args: in_features (int): size of each input sample. num_classes (int): number of classes. weight (:class:`torch.nn.Parameter`, optional): weight of the classifier, defaults to None. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, num_classes: int, weight: Parameter = None, bias: bool = True, dtype: torch.dtype = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), ): super().__init__() self.in_features = in_features self.num_classes = num_classes self.parallel_input = get_parallel_input() # Divide the weight matrix along the last dimension. self.input_size_per_partition = divide(in_features, gpc.tensor_parallel_size) # Parameters. # Initialize weight. factory_kwargs = {"device": get_accelerator().get_current_device(), "dtype": dtype} if weight is not None: self.weight = weight self.has_weight = False else: self.weight = Parameter(torch.empty(self.num_classes, self.input_size_per_partition, **factory_kwargs)) self.has_weight = True if bias: self.bias = Parameter(torch.empty(self.num_classes, **factory_kwargs)) else: self.bias = None with seed(ParallelMode.TENSOR): self.reset_parameters(weight_initializer, bias_initializer) self._set_tensor_parallel_attributes() set_parallel_input(False) env.vocab_parallel = False def reset_parameters(self, weight_initializer, bias_initializer) -> None: fan_in, fan_out = self.in_features, self.num_classes if self.has_weight: weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) broadcast(self.bias, gpc.get_ranks_in_group(ParallelMode.PARALLEL_1D)[0], ParallelMode.PARALLEL_1D) def _set_tensor_parallel_attributes(self): if self.has_weight: num_partition = gpc.get_world_size(ParallelMode.TENSOR) set_tensor_parallel_attribute_by_partition(self.weight, num_partition) def _load_from_global_state_dict(self, state_dict, prefix, *args): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight if self.has_weight: weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight # bias if self.bias is not None: bias = state_dict.pop(bias_key, None) if bias is not None: local_state[bias_key] = bias local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_1D, dims={weight_key: -1, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, ) super()._load_from_global_state_dict(local_state, prefix, *args) def _save_to_global_state_dict(self, destination, prefix, keep_vars): weight_key = prefix + "weight" bias_key = prefix + "bias" local_state = OrderedDict() if self.has_weight: local_state[weight_key] = self.weight if self.bias is not None: local_state[bias_key] = self.bias local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_1D, dims={weight_key: -1, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, keep_vars=keep_vars, ) destination.update(local_state) def forward(self, input_: Tensor) -> Tensor: # Set up backprop all-reduce. if self.parallel_input: assert ( input_.shape[-1] == self.weight.shape[-1] ), "Invalid shapes in Classifier1D forward: input={}, weight={}. Expected last dim of input {}.".format( input_.shape, self.weight.shape, self.weight.shape[-1] ) input_ = input_ else: assert ( divide(input_.shape[-1], gpc.tensor_parallel_size) == self.weight.shape[-1] ), "Invalid shapes in Classifier1D forward: input={}, weight={}. Expected last dim of input {}.".format( input_.shape, self.weight.shape, self.weight.shape[-1] * gpc.tensor_parallel_size ) input_ = split_forward_gather_backward(input_, ParallelMode.PARALLEL_1D, dim=-1) output_parallel = F.linear(input_, self.weight) output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D) if self.bias is not None: output = output + self.bias return output @LAYERS.register_module class VocabParallelClassifier1D(ParallelLayer): r"""ColLinear with given weight. Classifier of 1D parallelism. Args: in_features (int): size of each input sample. num_classes (int): number of classes. weight (:class:`torch.nn.Parameter`, optional): weight of the classifier, defaults to None. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, num_classes: int, weight: Parameter = None, bias: bool = True, dtype: torch.dtype = None, gather_output: bool = False, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), ): super().__init__() self.in_features = in_features self.num_classes = num_classes self.gather_output = gather_output self.parallel_input = get_parallel_input() # Divide the weight matrix along the last dimension. self.num_classes_per_partition = divide(num_classes, gpc.tensor_parallel_size) # Parameters. # Initialize weight. factory_kwargs = {"device": get_accelerator().get_current_device(), "dtype": dtype} if weight is not None: self.weight = weight self.has_weight = False else: self.weight = Parameter(torch.empty(self.num_classes_per_partition, self.in_features, **factory_kwargs)) self.has_weight = True if bias: self.bias = Parameter(torch.empty(self.num_classes_per_partition, **factory_kwargs)) else: self.bias = None with seed(ParallelMode.TENSOR): self.reset_parameters(weight_initializer, bias_initializer) self._set_tensor_parallel_attributes() set_parallel_input(False) env.vocab_parallel = True def reset_parameters(self, weight_initializer, bias_initializer) -> None: fan_in, fan_out = self.in_features, self.num_classes if self.has_weight: weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) def _set_tensor_parallel_attributes(self): num_partition = gpc.get_world_size(ParallelMode.TENSOR) if self.has_weight: set_tensor_parallel_attribute_by_partition(self.weight, num_partition) if self.bias is not None: set_tensor_parallel_attribute_by_partition(self.bias, num_partition) def _load_from_global_state_dict(self, state_dict, prefix, *args): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight if self.has_weight: weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight # bias if self.bias is not None: bias = state_dict.pop(bias_key, None) if bias is not None: local_state[bias_key] = bias local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_1D, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, ) super()._load_from_global_state_dict(local_state, prefix, *args) def _save_to_global_state_dict(self, destination, prefix, keep_vars): weight_key = prefix + "weight" bias_key = prefix + "bias" local_state = OrderedDict() if self.has_weight: local_state[weight_key] = self.weight if self.bias is not None: local_state[bias_key] = self.bias local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_1D, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, keep_vars=keep_vars, ) destination.update(local_state) def forward(self, input_: Tensor) -> Tensor: assert ( input_.shape[-1] == self.weight.shape[-1] ), "Invalid shapes in VocabParallelClassifier1D forward: input={}, weight={}. Expected last dim of input {}.".format( input_.shape, self.weight.shape, self.weight.shape[-1] ) # Set up backprop all-reduce. input_parallel = reduce_grad(input_, ParallelMode.PARALLEL_1D) # Matrix multiply. output_parallel = F.linear(input_parallel, self.weight, self.bias) if self.gather_output: # All-gather across the partitions. output = gather_forward_split_backward(output_parallel, ParallelMode.PARALLEL_1D, dim=-1) else: output = output_parallel return output @LAYERS.register_module class Linear1D_Col(ParallelLayer): r"""Linear layer with column parallelism. The linear layer is defined as :math:`Y = XA + b`. A is parallelized along its second dimension as :math:`A = [A_1, ..., A_p]`. Args: in_features (int): size of each input sample. out_features (int): size of each output sample. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. gather_output (bool, optional): If true, call all-gather on output and make Y available to all GPUs, otherwise, every GPU will have its output which is :math:`Y_i = XA_i`, defaults to False skip_bias_add (bool, optional): If set to ``True``, it will skip bias add for linear layer, which is preserved for kernel fusion, defaults to False weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, dtype: torch.dtype = None, gather_output: bool = False, skip_bias_add: bool = False, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), ): super().__init__() # Keep input parameters self.in_features = in_features self.out_features = out_features self.gather_output = gather_output self.skip_bias_add = skip_bias_add if skip_bias_add and not bias: raise ValueError("cannot skip bias addition if bias is None") self.out_features_per_partition = divide(out_features, gpc.tensor_parallel_size) # Parameters. # Initialize weight. factory_kwargs = {"device": get_accelerator().get_current_device(), "dtype": dtype} self.weight = Parameter(torch.empty(self.out_features_per_partition, self.in_features, **factory_kwargs)) if bias: self.bias = Parameter(torch.empty(self.out_features_per_partition, **factory_kwargs)) else: self.bias = None with seed(ParallelMode.TENSOR): self.reset_parameters(weight_initializer, bias_initializer) self._set_tensor_parallel_attributes() is_parallel_output = not self.gather_output set_parallel_input(is_parallel_output) def reset_parameters(self, weight_initializer, bias_initializer) -> None: fan_in, fan_out = self.in_features, self.out_features weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) def _set_tensor_parallel_attributes(self): num_partition = gpc.get_world_size(ParallelMode.TENSOR) set_tensor_parallel_attribute_by_partition(self.weight, num_partition) if self.bias is not None: set_tensor_parallel_attribute_by_partition(self.bias, num_partition) def _load_from_global_state_dict(self, state_dict, prefix, *args): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight # bias if self.bias is not None: bias = state_dict.pop(bias_key, None) if bias is not None: local_state[bias_key] = bias local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_1D, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, ) super()._load_from_global_state_dict(local_state, prefix, *args) def _save_to_global_state_dict(self, destination, prefix, keep_vars): weight_key = prefix + "weight" bias_key = prefix + "bias" local_state = OrderedDict({weight_key: self.weight}) if self.bias is not None: local_state[bias_key] = self.bias local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_1D, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, keep_vars=keep_vars, ) destination.update(local_state) def forward(self, input_: Tensor) -> Tuple[Tensor, Tensor]: assert ( input_.shape[-1] == self.weight.shape[-1] ), "Invalid shapes in Linear1D_Col forward: input={}, weight={}. Expected last dim of input {}.".format( input_.shape, self.weight.shape, self.weight.shape[-1] ) # Set up backprop all-reduce. # input_parallel = reduce_grad(input_, ParallelMode.PARALLEL_1D) input_parallel = input_ # Matrix multiply. bias = self.bias if not self.skip_bias_add else None # output_parallel = F.linear(input_parallel, self.weight, bias) output_parallel = linear_with_async_comm(input_parallel, self.weight, bias, ParallelMode.PARALLEL_1D, True) if self.gather_output: # All-gather across the partitions. output = gather_forward_split_backward(output_parallel, ParallelMode.PARALLEL_1D, dim=-1) else: output = output_parallel if self.skip_bias_add: return output, self.bias else: return output @LAYERS.register_module class Linear1D_Row(ParallelLayer): r"""Linear layer with row parallelism Args: in_features (int): size of each input sample. out_features (int): size of each output sample. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. parallel_input (bool, optional): If set to ``True``, it's assumed that the input is split, defaults to False. skip_bias_add (bool, optional): If set to ``True``, it will skip bias add for linear layer, which is preserved for kernel fusion, defaults to False weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, dtype: torch.dtype = None, parallel_input: bool = True, skip_bias_add: bool = False, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), stream_chunk_num: int = 1, ): super().__init__() self.stream_chunk_num = stream_chunk_num # Keep input parameters self.in_features = in_features self.out_features = out_features self.parallel_input = parallel_input self.skip_bias_add = skip_bias_add if skip_bias_add and not bias: raise ValueError("cannot skip bias addition if bias is None") # Divide the weight matrix along the last dimension. self.input_size_per_partition = divide(in_features, gpc.tensor_parallel_size) # Parameters. # Initialize weight. factory_kwargs = {"device": get_accelerator().get_current_device(), "dtype": dtype} self.weight = Parameter(torch.empty(self.out_features, self.input_size_per_partition, **factory_kwargs)) if self.stream_chunk_num > 1: # TODO() work for inference only self.chunk_weight() if bias: self.bias = Parameter(torch.empty(self.out_features, **factory_kwargs)) else: self.bias = None with seed(ParallelMode.TENSOR): self.reset_parameters(weight_initializer, bias_initializer) self._set_tensor_parallel_attributes() set_parallel_input(False) def chunk_weight(self): self.weight_list = torch.chunk(self.weight, self.stream_chunk_num, dim=0) def reset_parameters(self, weight_initializer, bias_initializer) -> None: fan_in, fan_out = self.in_features, self.out_features weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) broadcast(self.bias, gpc.get_ranks_in_group(ParallelMode.PARALLEL_1D)[0], ParallelMode.PARALLEL_1D) def _set_tensor_parallel_attributes(self): num_partition = gpc.get_world_size(ParallelMode.TENSOR) set_tensor_parallel_attribute_by_partition(self.weight, num_partition) def _load_from_global_state_dict(self, state_dict, prefix, *args): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight # bias if self.bias is not None: bias = state_dict.pop(bias_key, None) if bias is not None: local_state[bias_key] = bias local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_1D, dims={weight_key: -1, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, ) super()._load_from_global_state_dict(local_state, prefix, *args) def _save_to_global_state_dict(self, destination, prefix, keep_vars): weight_key = prefix + "weight" bias_key = prefix + "bias" local_state = OrderedDict({weight_key: self.weight}) if self.bias is not None: local_state[bias_key] = self.bias local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_1D, dims={weight_key: -1, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, keep_vars=keep_vars, ) destination.update(local_state) def forward(self, input_: Tensor) -> Tensor: # Set up backprop all-reduce. if self.parallel_input: assert ( input_.shape[-1] == self.weight.shape[-1] ), "Invalid shapes in Linear1D_Row forward: input={}, weight={}. Expected last dim of input {}.".format( input_.shape, self.weight.shape, self.weight.shape[-1] ) input_ = input_ else: assert ( divide(input_.shape[-1], gpc.tensor_parallel_size) == self.weight.shape[-1] ), "Invalid shapes in Linear1D_Row forward: input={}, weight={}. Expected last dim of input {}.".format( input_.shape, self.weight.shape, self.weight.shape[-1] * gpc.tensor_parallel_size ) input_ = split_forward_gather_backward(input_, ParallelMode.PARALLEL_1D, dim=-1) if self.stream_chunk_num > 1: if self.training: raise RuntimeError("use stream_chunk_num=1 in Linear1D_Row for training!") with torch.no_grad(): output_parallel_list = [None for i in range(self.stream_chunk_num)] handle_list = [] for i in range(self.stream_chunk_num): output_parallel_list[i] = F.linear(input_, self.weight_list[i]) handle = torch.distributed.all_reduce( output_parallel_list[i], group=gpc.get_group(ParallelMode.PARALLEL_1D), async_op=True ) handle_list.append(handle) # output_parallel_list[i] = reduce_input(output_parallel_list[i], ParallelMode.PARALLEL_1D) for handle in handle_list: handle.wait() output = torch.cat(output_parallel_list, dim=-1) else: output_parallel = F.linear(input_, self.weight) # output_parallel = linear_with_async_comm(input_, self.weight, None, ParallelMode.PARALLEL_1D, False) output = reduce_input(output_parallel, ParallelMode.PARALLEL_1D) if not self.skip_bias_add: if self.bias is not None: output = output + self.bias return output else: return output, self.bias @LAYERS.register_module class Embedding1D(ParallelLayer): r"""Embedding for 1D parallelism. Args: num_embeddings (int): number of embeddings. embedding_dim (int): dimension of embedding. padding_idx (int, optional): If specified, the entries at padding_idx do not contribute to the gradient; therefore, the embedding vector at padding_idx is not updated during training, i.e. it remains as a fixed “pad”, defaults to None. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. weight_initializer (:class:`typing.Callable`, optional): he initializer of weight, defaults to normal initializer. The ``args`` and ``kwargs`` used in :class:`torch.nn.functional.embedding` should contain: :: max_norm (float, optional): If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm. Note: this will modify weight in-place. norm_type (float, optional): The p of the p-norm to compute for the max_norm option. Default 2. scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default False. sparse (bool, optional): If True, gradient w.r.t. weight will be a sparse tensor. Default False. More details about ``args`` and ``kwargs`` could be found in
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_3d/_operation.py
colossalai/legacy/nn/layer/parallel_3d/_operation.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Optional, Tuple import torch from torch import Tensor from torch.cuda.amp import custom_bwd, custom_fwd from colossalai.legacy.communication import all_gather, all_reduce, broadcast, reduce, reduce_scatter from colossalai.legacy.constants import INPUT_GROUP_3D, WEIGHT_GROUP_3D from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from ._utils import get_parallel_mode_from_env, push_async_grad class _Linear3D(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward( ctx, input_: Tensor, weight: Tensor, weight_id: int, input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode, output_parallel_mode: ParallelMode, ) -> Tensor: ctx.weight_id = weight_id ctx.input_parallel_mode = input_parallel_mode ctx.weight_parallel_mode = weight_parallel_mode ctx.output_parallel_mode = output_parallel_mode input_ = all_gather(input_, 0, input_parallel_mode) weight = all_gather(weight, 0, weight_parallel_mode) ctx.save_for_backward(input_, weight) output = torch.matmul(input_, weight) output = reduce_scatter(output, 0, output_parallel_mode) return output @staticmethod @custom_bwd def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]: input_, weight = ctx.saved_tensors output_grad = all_gather(output_grad, 0, ctx.output_parallel_mode) input_grad = torch.matmul(output_grad, weight.transpose(0, 1)) input_grad, input_op = reduce_scatter(input_grad, 0, ctx.input_parallel_mode, async_op=True) weight_grad = torch.matmul( input_.reshape(-1, input_.shape[-1]).transpose(0, 1), output_grad.reshape(-1, output_grad.shape[-1]) ) weight_grad, op = reduce_scatter(weight_grad, 0, ctx.weight_parallel_mode, async_op=True) weight_grad = push_async_grad(op, weight_grad, ctx.weight_id) input_op.wait() return input_grad, weight_grad, None, None, None, None def linear_3d( input_: Tensor, weight: Tensor, input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode, output_parallel_mode: ParallelMode, ) -> Tensor: r"""Linear layer for 3D parallelism. Args: input_ (:class:`torch.tensor`): input matrix. weight (:class:`torch.tensor`): matrix of weight. input_parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): input parallel mode. weight_parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): weight parallel mode. output_parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): output parallel mode. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ return _Linear3D.apply( input_, weight, id(weight), input_parallel_mode, weight_parallel_mode, output_parallel_mode, ) class _Classifier3D(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward( ctx, input_: Tensor, weight: Tensor, bias: Optional[Tensor], weight_id: int, bias_id: Optional[int], input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode, output_parallel_mode: ParallelMode, ) -> Tensor: ctx.use_bias = bias is not None ctx.weight_id = weight_id src_rank = gpc.get_ranks_in_group(input_parallel_mode)[gpc.get_local_rank(output_parallel_mode)] weight = broadcast(weight, src_rank, input_parallel_mode) ctx.save_for_backward(input_, weight) output = torch.matmul(input_, weight.transpose(0, 1)) output = all_reduce(output, output_parallel_mode) if bias is not None: ctx.bias_id = bias_id output += bias ctx.src_rank = src_rank ctx.input_parallel_mode = input_parallel_mode ctx.weight_parallel_mode = weight_parallel_mode ctx.output_parallel_mode = output_parallel_mode return output @staticmethod @custom_bwd def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]: input_, weight = ctx.saved_tensors weight_grad = torch.matmul( output_grad.reshape(-1, output_grad.shape[-1]).transpose(0, 1), input_.reshape(-1, input_.shape[-1]) ) weight_grad = reduce(weight_grad, ctx.src_rank, ctx.input_parallel_mode) if gpc.get_local_rank(ctx.input_parallel_mode) == gpc.get_local_rank(ctx.output_parallel_mode): weight_grad, op = all_reduce(weight_grad, ctx.weight_parallel_mode, async_op=True) weight_grad = push_async_grad(op, weight_grad, ctx.weight_id) else: weight_grad = None if ctx.use_bias: bias_grad = torch.sum(output_grad, dim=tuple(range(len(output_grad.shape))[:-1])) bias_grad = all_reduce(bias_grad, ctx.input_parallel_mode) bias_grad, op = all_reduce(bias_grad, ctx.weight_parallel_mode, async_op=True) bias_grad = push_async_grad(op, bias_grad, ctx.bias_id) else: bias_grad = None input_grad = torch.matmul(output_grad, weight) return input_grad, weight_grad, bias_grad, None, None, None, None, None def classifier_3d( input_: Tensor, weight: Tensor, bias: Optional[Tensor], input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode, output_parallel_mode: ParallelMode, ) -> Tensor: r"""3D parallel classifier. Args: input_ (:class:`torch.tensor`): input matrix. weight (:class:`torch.tensor`): matrix of weight. bias (:class:`torch.tensor`): matrix of bias. input_parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): input parallel mode. weight_parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): weight parallel mode. output_parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): output parallel mode. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ return _Classifier3D.apply( input_, weight, bias, id(weight), id(bias) if bias is not None else None, input_parallel_mode, weight_parallel_mode, output_parallel_mode, ) class _VocabParallelClassifier3D(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward( ctx, input_: Tensor, weight: Tensor, bias: Optional[Tensor], weight_id: int, bias_id: Optional[int], input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode, output_parallel_mode: ParallelMode, ) -> Tensor: ctx.use_bias = bias is not None ctx.weight_id = weight_id input_ = all_gather(input_, 0, input_parallel_mode) weight = all_gather(weight, 0, weight_parallel_mode).transpose(0, 1) ctx.save_for_backward(input_, weight) output = torch.matmul(input_, weight) output = reduce_scatter(output, 0, output_parallel_mode) if bias is not None: ctx.bias_id = bias_id output += bias ctx.input_parallel_mode = input_parallel_mode ctx.weight_parallel_mode = weight_parallel_mode ctx.output_parallel_mode = output_parallel_mode return output @staticmethod @custom_bwd def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]: input_, weight = ctx.saved_tensors output_grad = all_gather(output_grad, 0, ctx.output_parallel_mode) input_grad = torch.matmul(output_grad, weight.transpose(0, 1)) input_grad, input_op = reduce_scatter(input_grad, 0, ctx.input_parallel_mode, async_op=True) weight_grad = torch.matmul( input_.reshape(-1, input_.shape[-1]).transpose(0, 1), output_grad.reshape(-1, output_grad.shape[-1]) ) weight_grad, op = reduce_scatter(weight_grad.transpose(0, 1), 0, ctx.weight_parallel_mode, async_op=True) weight_grad = push_async_grad(op, weight_grad, ctx.weight_id) if ctx.use_bias: bias_grad = torch.sum(output_grad, dim=tuple(range(len(output_grad.shape))[:-1])) bias_grad, op = all_reduce(bias_grad, ctx.weight_parallel_mode, async_op=True) bias_grad = push_async_grad(op, bias_grad, ctx.bias_id) else: bias_grad = None input_op.wait() return input_grad, weight_grad, bias_grad, None, None, None, None, None def vocab_parallel_classifier_3d( input_: Tensor, weight: Tensor, bias: Optional[Tensor], input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode, output_parallel_mode: ParallelMode, ) -> Tensor: r"""3D vocab parallel classifier. Args: input_ (:class:`torch.tensor`): input matrix. weight (:class:`torch.tensor`): matrix of weight. bias (:class:`torch.tensor`): matrix of bias. input_parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): input parallel mode. weight_parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): weight parallel mode. output_parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): output parallel mode. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ return _VocabParallelClassifier3D.apply( input_, weight, bias, id(weight), id(bias) if bias is not None else None, input_parallel_mode, weight_parallel_mode, output_parallel_mode, ) @torch.jit.script def norm_forward(x: Tensor, mean: Tensor, sqr_mean: Tensor, weight: Tensor, bias: Tensor, eps: float): mu = x - mean var = sqr_mean - mean**2 sigma = torch.sqrt(var + eps) z = mu / sigma output = weight * z + bias return output, mu, sigma @torch.jit.script def norm_backward(grad: Tensor, mu: Tensor, sigma: Tensor, weight: Tensor): # dbias, dweight = grad, grad * mu / sigma dz = grad * weight dmu = dz / sigma dvar = dz * mu * (-0.5) * sigma ** (-3) dmean = -dmu dvar = torch.sum(dvar, -1, keepdim=True) dmean = torch.sum(dmean, -1, keepdim=True) return dmu, dmean, dvar class _Layernorm3D(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float32) def forward( ctx, input_: Tensor, weight: Tensor, bias: Tensor, weight_id: int, bias_id: int, normalized_shape: int, eps: float, output_parallel_mode: ParallelMode, input_x_weight_parallel_mode: ParallelMode, ) -> Tensor: ctx.weight_id = weight_id ctx.bias_id = bias_id sum_ = torch.sum(input_, dim=-1, keepdim=True) sqr_sum = torch.sum(input_**2, dim=-1, keepdim=True) mean, sqr_mean = all_reduce(torch.stack((sum_, sqr_sum)), output_parallel_mode) / normalized_shape output, mu, sigma = norm_forward(input_, mean, sqr_mean, weight, bias, eps) ctx.save_for_backward(mu, sigma, weight) ctx.normalized_shape = normalized_shape ctx.output_parallel_mode = output_parallel_mode ctx.input_x_weight_parallel_mode = input_x_weight_parallel_mode return output @staticmethod @custom_bwd def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]: mu, sigma, weight = ctx.saved_tensors bias_grad, weight_grad = output_grad, output_grad * mu / sigma bias_grad = torch.sum(bias_grad, dim=tuple(range(len(bias_grad.shape))[:-1])) bias_grad, op = all_reduce(bias_grad, ctx.input_x_weight_parallel_mode, async_op=True) bias_grad = push_async_grad(op, bias_grad, ctx.bias_id) weight_grad = torch.sum(weight_grad, dim=tuple(range(len(weight_grad.shape))[:-1])) weight_grad, op = all_reduce(weight_grad, ctx.input_x_weight_parallel_mode, async_op=True) weight_grad = push_async_grad(op, weight_grad, ctx.weight_id) dmu, dmean, dvar = norm_backward(output_grad, mu, sigma, weight) dvar, dmean = all_reduce(torch.stack((dvar, dmean)), ctx.output_parallel_mode) input_grad = dmu + (dmean + 2 * dvar * mu) / ctx.normalized_shape return input_grad, weight_grad, bias_grad, None, None, None, None, None, None, None, None def layernorm_3d( input_: Tensor, weight: Tensor, bias: Tensor, normalized_shape: int, eps: float, output_parallel_mode: ParallelMode, input_x_weight_parallel_mode: ParallelMode, ) -> Tensor: r"""3D parallel Layernorm. Args: input_ (:class:`torch.tensor`): input matrix. weight (:class:`torch.tensor`): matrix of weight. bias (:class:`torch.tensor`): matrix of bias. normalized_shape (int): input shape from an expected input of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]` If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps (float): a value added to the denominator for numerical stability output_parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): output parallel mode. input_x_weight_parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): input x weight parallel mode. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ return _Layernorm3D.apply( input_, weight, bias, id(weight), id(bias), normalized_shape, eps, output_parallel_mode, input_x_weight_parallel_mode, ) def split_tensor_3d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor: r"""Splits 3D parallel tensor in specified dimension. Args: tensor (:class:`torch.tensor`): Input tensor. dim (int): Specified dimension in which to split. parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`, optional): Parallel mode. Returns: :class:`torch.tensor`: The tensor has been split. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ dim_size = tensor.size(dim) world_size = gpc.get_world_size(parallel_mode) assert dim_size % world_size == 0, ( f"The dimension {dim} to split, size ({dim_size}) is not a multiple of world size ({world_size}), " f"cannot split tensor evenly" ) if tensor.size(dim) <= 1: return tensor output = torch.chunk(tensor, gpc.get_world_size(parallel_mode), dim=dim)[ gpc.get_local_rank(parallel_mode) ].contiguous() return output def split_batch_3d( input_: Tensor, dim: int = 0, input_parallel_mode: ParallelMode = ParallelMode.PARALLEL_3D_INPUT, weight_parallel_mode: ParallelMode = ParallelMode.PARALLEL_3D_WEIGHT, ) -> Tensor: r"""Splits 3D tensor in batch. Args: input_ (:class:`torch.tensor`): Input tensor. dim (int): Specified dimension in which to split. input_parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`, optional): input parallel mode. weight_parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`, optional): weight parallel mode. Returns: :class:`torch.tensor`: The tensor has been split. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ if input_.size(dim) <= 1: return input_ weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) weight_world_size = gpc.get_world_size(weight_parallel_mode) input_world_size = gpc.get_world_size(input_parallel_mode) output = torch.chunk(input_, weight_world_size, dim=dim)[gpc.get_local_rank(weight_parallel_mode)].contiguous() output = torch.chunk(output, input_world_size, dim=dim)[gpc.get_local_rank(input_parallel_mode)].contiguous() return output class _ReduceTensor3D(torch.autograd.Function): @staticmethod def forward(ctx, input_, parallel_mode): return all_reduce(input_, parallel_mode) @staticmethod def backward(ctx, output_grad): return output_grad, None def reduce_tensor_3d(tensor: Tensor, parallel_mode: ParallelMode) -> Tensor: r"""All-reduce the input Args: tensor (:class:`torch.tensor`): Input tensor. parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): Parallel mode. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ return _ReduceTensor3D.apply(tensor, parallel_mode) class _AllGatherTensor3D(torch.autograd.Function): @staticmethod def forward(ctx, input_, dim, parallel_mode): ctx.dim = dim ctx.parallel_mode = parallel_mode output = all_gather(input_, dim, parallel_mode) return output @staticmethod def backward(ctx, output_grad): input_grad = reduce_scatter(output_grad, ctx.dim, ctx.parallel_mode) return input_grad, None, None def all_gather_tensor_3d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor: r"""All-reduce the gradient in backward pass. Args: tensor (:class:`torch.tensor`): Input tensor. dim (int): Dimension to gather. parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): Parallel mode. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ return _AllGatherTensor3D.apply(tensor, dim, parallel_mode) class _ReduceScatterTensor3D(torch.autograd.Function): @staticmethod def forward(ctx, input_, dim, parallel_mode): ctx.dim = dim ctx.parallel_mode = parallel_mode return reduce_scatter(input_, dim, parallel_mode) @staticmethod def backward(ctx, output_grad): input_grad = all_gather(output_grad, ctx.dim, ctx.parallel_mode) return input_grad, None, None def reduce_scatter_tensor_3d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor: r"""Reduce-scatter the input. Args: tensor (:class:`torch.tensor`): Input tensor. dim (int): Dimension to scatter. parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): Parallel mode. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ dim_size = tensor.size(dim) world_size = gpc.get_world_size(parallel_mode) assert ( dim_size % world_size == 0 ), f"The batch size ({dim_size}) is not a multiple of square of 3D depth ({world_size})." return _ReduceScatterTensor3D.apply(tensor, dim, parallel_mode) class _ReduceByBatch3D(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float32) def forward( ctx, input_: Tensor, input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode, reduce_mean: bool = False, ) -> Tensor: output = all_reduce(input_, input_parallel_mode) output = all_reduce(output, weight_parallel_mode) ctx.reduce_mean = reduce_mean if reduce_mean: reduce_size = gpc.get_world_size(input_parallel_mode) * gpc.get_world_size(weight_parallel_mode) ctx.reduce_size = reduce_size return output.clone() / reduce_size return output.clone() @staticmethod @custom_bwd def backward(ctx, output_grad: Tensor) -> Tuple[Tensor, ...]: if ctx.reduce_mean: return output_grad / ctx.reduce_size, None, None, None else: return output_grad, None, None, None def reduce_by_batch_3d( tensor: Tensor, input_parallel_mode: ParallelMode, weight_parallel_mode: ParallelMode, reduce_mean: bool = False ) -> Tensor: r"""All-reduce the input from the model parallel region. Args: input_parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): input parallel mode. weight_parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`): weight parallel mode. reduce_mean (bool, optional): If set to ``True``, it will divide the output by (input parallel size * weight parallel size), default to False. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ return _ReduceByBatch3D.apply(tensor, input_parallel_mode, weight_parallel_mode, reduce_mean)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_3d/__init__.py
colossalai/legacy/nn/layer/parallel_3d/__init__.py
from ._operation import reduce_by_batch_3d, split_batch_3d, split_tensor_3d from .layers import ( Classifier3D, Embedding3D, LayerNorm3D, Linear3D, PatchEmbedding3D, VocabParallelClassifier3D, VocabParallelEmbedding3D, ) __all__ = [ "reduce_by_batch_3d", "split_tensor_3d", "split_batch_3d", "Linear3D", "LayerNorm3D", "PatchEmbedding3D", "Classifier3D", "Embedding3D", "VocabParallelEmbedding3D", "VocabParallelClassifier3D", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_3d/_utils.py
colossalai/legacy/nn/layer/parallel_3d/_utils.py
from collections import OrderedDict from functools import partial import torch from torch import Tensor from colossalai.legacy.constants import ( INPUT_GROUP_3D, INPUT_X_WEIGHT_3D, OUTPUT_GROUP_3D, OUTPUT_X_WEIGHT_3D, WEIGHT_GROUP_3D, ) from colossalai.legacy.core import global_context as gpc from colossalai.legacy.global_variables import tensor_parallel_env as env def get_depth_from_env() -> int: try: depth = env.depth_3d assert depth > 0, "DEPTH must be greater than zero" return depth except KeyError: raise EnvironmentError( "DEPTH is not found in the current environment, " "please make sure that you have used the correct process group initializer" ) def get_parallel_mode_from_env(group): assert group in [ INPUT_GROUP_3D, WEIGHT_GROUP_3D, OUTPUT_GROUP_3D, INPUT_X_WEIGHT_3D, OUTPUT_X_WEIGHT_3D, ], f"{group} is not valid for 3D tensor parallelism." return getattr(env, group) def swap_in_out_group(): env.input_group_3d, env.output_group_3d = env.output_group_3d, env.input_group_3d env.input_x_weight_group_3d, env.output_x_weight_group_3d = ( env.output_x_weight_group_3d, env.input_x_weight_group_3d, ) def dbg_check_shape(tensor: Tensor, shape: tuple): rank = gpc.get_global_rank() if rank == 0: print(tensor.shape) assert tensor.shape == shape, "{} does not match {}".format(tensor.shape, shape) class AsyncGradientBucket(object): def __init__(self): self.bucket = OrderedDict() def __len__(self): return len(self.bucket) def push(self, async_op, grad_tensor, param_id): self.bucket[param_id] = tuple((async_op, grad_tensor)) return torch.zeros_like(grad_tensor, dtype=grad_tensor.dtype, device=grad_tensor.device) def pop(self, param_id): grad = None if param_id in self.bucket: op, grad = self.bucket.pop(param_id) if op is not None: op.wait() return grad def synchronize(self, params): for p in params: i = id(p) if i in self.bucket: op, grad = self.bucket.pop(i) if op is not None: op.wait() p.grad.add_(grad) _async_grad_bucket = AsyncGradientBucket() def push_async_grad(op, grad, param_id): return _async_grad_bucket.push(op, grad, param_id) def pop_async_grad(param_id): return _async_grad_bucket.pop(param_id) def _async_grad_hook(grad, param_id): grad.add_(pop_async_grad(param_id)) return grad def register_async_grad_hook(param): param.register_hook(partial(_async_grad_hook, param_id=id(param))) def synchronize(params=list()): _async_grad_bucket.synchronize(params) torch.cuda.default_stream().synchronize() if len(_async_grad_bucket) > 0: raise RuntimeError(f"{len(_async_grad_bucket)} asynchronous gradient(s) not collected.")
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_3d/layers.py
colossalai/legacy/nn/layer/parallel_3d/layers.py
import math from collections import OrderedDict from typing import Callable import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from torch.nn import Parameter from colossalai.accelerator import get_accelerator from colossalai.legacy.communication import all_reduce, broadcast from colossalai.legacy.constants import ( INPUT_GROUP_3D, INPUT_X_WEIGHT_3D, OUTPUT_GROUP_3D, OUTPUT_X_WEIGHT_3D, WEIGHT_GROUP_3D, ) from colossalai.legacy.context import ParallelMode, seed from colossalai.legacy.core import global_context as gpc from colossalai.legacy.global_variables import tensor_parallel_env as env from colossalai.legacy.nn.layer.base_layer import ParallelLayer from colossalai.legacy.registry import LAYERS from colossalai.legacy.utils.checkpointing import ( broadcast_state_dict, gather_tensor_parallel_state_dict, partition_tensor_parallel_state_dict, ) from colossalai.nn import init as init from ..utils import divide, set_tensor_parallel_attribute_by_partition, to_2tuple from ._operation import ( all_gather_tensor_3d, classifier_3d, layernorm_3d, linear_3d, reduce_scatter_tensor_3d, split_batch_3d, split_tensor_3d, vocab_parallel_classifier_3d, ) from ._utils import get_depth_from_env, get_parallel_mode_from_env, register_async_grad_hook, swap_in_out_group @LAYERS.register_module class LayerNorm3D(ParallelLayer): r"""Layer Normalization for 3D parallelism. Args: normalized_shape (int): input shape from an expected input of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]` If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps (float, optional): a value added to the denominator for numerical stability, defaults to 1e-12. bias (bool, optional): Whether to add a bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. """ def __init__(self, normalized_shape: int, eps: float = 1e-12, bias=True, dtype=None): super().__init__() self.input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) self.weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) self.output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) self.input_x_weight_parallel_mode = get_parallel_mode_from_env(INPUT_X_WEIGHT_3D) self.depth = get_depth_from_env() self.normalized_shape = normalized_shape self.normalized_shape_per_partition = divide(normalized_shape, self.depth) self.weight = Parameter( torch.ones(self.normalized_shape_per_partition, device=get_accelerator().get_current_device(), dtype=dtype) ) if bias: self.bias = Parameter( torch.zeros( self.normalized_shape_per_partition, device=get_accelerator().get_current_device(), dtype=dtype ) ) else: self.bias = None self.variance_epsilon = eps self.reset_parameters() self._set_tensor_parallel_attributes() def _set_tensor_parallel_attributes(self) -> None: set_tensor_parallel_attribute_by_partition(self.weight, self.depth) if self.bias is not None: set_tensor_parallel_attribute_by_partition(self.bias, self.depth) def reset_parameters(self) -> None: init.ones_()(self.weight) register_async_grad_hook(self.weight) if self.bias is not None: init.zeros_()(self.bias) register_async_grad_hook(self.bias) def _load_from_global_state_dict(self, state_dict, prefix, *args, **kwargs): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight.transpose(0, 1) # bias bias = state_dict.pop(bias_key, None) if bias is not None: local_state[bias_key] = bias # partition in output groups if gpc.get_local_rank(self.input_parallel_mode) == 0 and gpc.get_local_rank(self.weight_parallel_mode) == 0: local_state = partition_tensor_parallel_state_dict( local_state, self.output_parallel_mode, dims={weight_key: 0, bias_key: 0}, partition_states={ weight_key: True, bias_key: True, }, ) # broadcast in input groups if gpc.get_local_rank(self.weight_parallel_mode) == 0: local_state = broadcast_state_dict(local_state, self.input_parallel_mode) # broadcast in weight groups local_state = broadcast_state_dict(local_state, self.weight_parallel_mode) super()._load_from_global_state_dict(local_state, prefix, *args, **kwargs) def _save_to_global_state_dict(self, destination, prefix, keep_vars): weight_key = prefix + "weight" bias_key = prefix + "bias" local_state = OrderedDict({weight_key: self.weight}) if self.bias is not None: local_state[bias_key] = self.bias # gather in output groups if gpc.get_local_rank(self.input_parallel_mode) == 0 and gpc.get_local_rank(self.weight_parallel_mode) == 0: local_state = gather_tensor_parallel_state_dict( local_state, self.output_parallel_mode, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, keep_vars=keep_vars, ) if gpc.get_local_rank(ParallelMode.TENSOR) == 0: destination.update(local_state) def forward(self, input_: Tensor) -> Tensor: return layernorm_3d( input_, self.weight, self.bias, self.normalized_shape, self.variance_epsilon, self.output_parallel_mode, self.input_x_weight_parallel_mode, ) @LAYERS.register_module class Linear3D(ParallelLayer): r"""Linear layer for 3D parallelism. Args: in_features (int): size of each input sample. out_features (int): size of each output sample. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, dtype: torch.dtype = None, skip_bias_add: bool = False, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), ): super().__init__() self.in_features = in_features self.out_features = out_features self.input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) self.weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) self.output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) self.output_x_weight_parallel_mode = get_parallel_mode_from_env(OUTPUT_X_WEIGHT_3D) self.depth = get_depth_from_env() self.skip_bias_add = skip_bias_add self.in_features_per_partition = divide(in_features, self.depth**2) self.out_features_per_partition = divide(out_features, self.depth) self.bias_features_per_partition = divide(out_features, self.depth) self.weight = Parameter( torch.empty( self.in_features_per_partition, self.out_features_per_partition, device=get_accelerator().get_current_device(), dtype=dtype, ) ) if bias: self.bias = Parameter( torch.zeros( self.bias_features_per_partition, device=get_accelerator().get_current_device(), dtype=dtype ) ) else: self.bias = None self.reset_parameters(weight_initializer, bias_initializer) self._set_tensor_parallel_attributes() swap_in_out_group() def _set_tensor_parallel_attributes(self) -> None: set_tensor_parallel_attribute_by_partition(self.weight, self.depth**3) if self.bias is not None: set_tensor_parallel_attribute_by_partition(self.bias, self.depth) def _sync_grad_hook(self, grad) -> Tensor: grad = all_reduce(grad.clone(), self.output_x_weight_parallel_mode) return grad def reset_parameters(self, weight_initializer, bias_initializer) -> None: with seed(ParallelMode.TENSOR): fan_in, fan_out = self.in_features, self.out_features weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) register_async_grad_hook(self.weight) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) broadcast( self.bias, gpc.get_ranks_in_group(self.output_x_weight_parallel_mode)[0], self.output_x_weight_parallel_mode, ) self.bias.register_hook(self._sync_grad_hook) def _load_from_global_state_dict(self, state_dict, prefix, *args, **kwargs): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight.transpose(0, 1) # bias if self.bias is not None: bias = state_dict.pop(bias_key, None) if bias is not None: local_state[bias_key] = bias # partition in output groups if gpc.get_local_rank(self.input_parallel_mode) == 0 and gpc.get_local_rank(self.weight_parallel_mode) == 0: local_state = partition_tensor_parallel_state_dict( local_state, self.output_parallel_mode, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, ) # partition in input groups if gpc.get_local_rank(self.weight_parallel_mode) == 0: local_state = partition_tensor_parallel_state_dict( local_state, self.input_parallel_mode, dims={weight_key: -1, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, ) # partition in weight groups local_state = partition_tensor_parallel_state_dict( local_state, self.weight_parallel_mode, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, ) super()._load_from_global_state_dict(local_state, prefix, *args, **kwargs) def _save_to_global_state_dict(self, destination, prefix, keep_vars): weight_key = prefix + "weight" bias_key = prefix + "bias" local_state = OrderedDict({weight_key: self.weight}) if self.bias is not None: local_state[bias_key] = self.bias # gather in weight groups local_state = gather_tensor_parallel_state_dict( local_state, self.weight_parallel_mode, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, keep_vars=keep_vars, ) # gather in input groups if gpc.get_local_rank(self.weight_parallel_mode) == 0: local_state = gather_tensor_parallel_state_dict( local_state, self.input_parallel_mode, dims={weight_key: -1, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, keep_vars=keep_vars, ) # gather in output groups if gpc.get_local_rank(self.input_parallel_mode) == 0 and gpc.get_local_rank(self.weight_parallel_mode) == 0: local_state = gather_tensor_parallel_state_dict( local_state, self.output_parallel_mode, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, keep_vars=keep_vars, ) if gpc.get_local_rank(ParallelMode.TENSOR) == 0: local_state[weight_key] = local_state[weight_key].transpose(0, 1) destination.update(local_state) def forward(self, input_: Tensor) -> Tensor: output = linear_3d( input_, self.weight, self.input_parallel_mode, self.weight_parallel_mode, self.output_parallel_mode, ) if not self.skip_bias_add: if self.bias is not None: output = output + self.bias return output else: return output, self.bias @LAYERS.register_module class Classifier3D(ParallelLayer): r"""Classifier for 3D parallelism. Args: in_features (int): size of each input sample. num_classes (int): number of classes. weight (:class:`torch.nn.Parameter`, optional): weight of the classifier, defaults to None. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, num_classes: int, weight: Parameter = None, bias: bool = True, dtype: torch.dtype = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), ): super().__init__() self.in_features = in_features self.num_classes = num_classes self.input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) self.weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) self.output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) self.depth = get_depth_from_env() self.in_features_per_partition = divide(in_features, self.depth) if weight is not None: self.weight = weight self.has_weight = False else: self.weight = Parameter( torch.empty( self.num_classes, self.in_features_per_partition, device=get_accelerator().get_current_device(), dtype=dtype, ) ) self.has_weight = True if bias: self.bias = Parameter( torch.zeros(self.num_classes, device=get_accelerator().get_current_device(), dtype=dtype) ) else: self.bias = None self.reset_parameters(weight_initializer, bias_initializer) self._set_tensor_parallel_attributes() def _set_tensor_parallel_attributes(self) -> None: if self.has_weight: set_tensor_parallel_attribute_by_partition(self.weight, self.depth) def reset_parameters(self, weight_initializer, bias_initializer) -> None: with seed(ParallelMode.TENSOR): fan_in, fan_out = self.in_features, self.num_classes if self.has_weight: weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) broadcast(self.weight, gpc.get_ranks_in_group(self.weight_parallel_mode)[0], self.weight_parallel_mode) register_async_grad_hook(self.weight) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) broadcast(self.bias, gpc.get_ranks_in_group(ParallelMode.TENSOR)[0], ParallelMode.TENSOR) register_async_grad_hook(self.bias) def _load_from_global_state_dict(self, state_dict, prefix, *args, **kwargs): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight if self.has_weight: weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight # bias if self.bias is not None: bias = state_dict.pop(bias_key, None) if bias is not None: local_state[bias_key] = bias # partition in output groups if gpc.get_local_rank(self.input_parallel_mode) == 0 and gpc.get_local_rank(self.weight_parallel_mode) == 0: local_state = partition_tensor_parallel_state_dict( local_state, self.output_parallel_mode, dims={weight_key: -1, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, ) # broadcast in input groups if gpc.get_local_rank(self.weight_parallel_mode) == 0: local_state = broadcast_state_dict(local_state, self.input_parallel_mode) # broadcast in weight groups local_state = broadcast_state_dict(local_state, self.weight_parallel_mode) super()._load_from_global_state_dict(local_state, prefix, *args, **kwargs) def _save_to_global_state_dict(self, destination, prefix, keep_vars): weight_key = prefix + "weight" bias_key = prefix + "bias" local_state = OrderedDict() if self.has_weight: local_state[weight_key] = self.weight if self.bias is not None: local_state[bias_key] = self.bias # gather in output groups if gpc.get_local_rank(self.input_parallel_mode) == 0 and gpc.get_local_rank(self.weight_parallel_mode) == 0: local_state = gather_tensor_parallel_state_dict( local_state, self.output_parallel_mode, dims={weight_key: -1, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, keep_vars=keep_vars, ) if gpc.get_local_rank(ParallelMode.TENSOR) == 0: destination.update(local_state) def forward(self, input_: Tensor) -> Tensor: return classifier_3d( input_, self.weight, self.bias, self.input_parallel_mode, self.weight_parallel_mode, self.output_parallel_mode, ) @LAYERS.register_module class VocabParallelClassifier3D(ParallelLayer): r"""Vocab parallel classifier layer for 3D parallelism. Args: in_features (int): size of each input sample. num_classes (int): number of classes. weight (:class:`torch.nn.Parameter`, optional): weight of the classifier, defaults to None. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, num_classes: int, weight: Parameter = None, bias: bool = True, dtype: torch.dtype = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), ): super().__init__() self.in_features = in_features self.num_classes = num_classes self.input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) self.weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) self.output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) self.output_x_weight_parallel_mode = get_parallel_mode_from_env(OUTPUT_X_WEIGHT_3D) self.depth = get_depth_from_env() self.in_features_per_partition = divide(in_features, self.depth) self.out_features_per_partition = divide(num_classes, self.depth**2) self.bias_features_per_partition = divide(num_classes, self.depth) if weight is not None: self.weight = weight self.has_weight = False else: self.weight = Parameter( torch.empty( self.out_features_per_partition, self.in_features_per_partition, device=get_accelerator().get_current_device(), dtype=dtype, ) ) self.has_weight = True if bias: self.bias = Parameter( torch.zeros( self.bias_features_per_partition, device=get_accelerator().get_current_device(), dtype=dtype ) ) else: self.bias = None self.reset_parameters(weight_initializer, bias_initializer) self._set_tensor_parallel_attributes() swap_in_out_group() env.vocab_parallel = True def _set_tensor_parallel_attributes(self) -> None: if self.has_weight: set_tensor_parallel_attribute_by_partition(self.weight, self.depth**3) if self.bias is not None: set_tensor_parallel_attribute_by_partition(self.bias, self.depth) def reset_parameters(self, weight_initializer, bias_initializer) -> None: with seed(ParallelMode.TENSOR): fan_in, fan_out = self.in_features, self.num_classes if self.has_weight: weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) register_async_grad_hook(self.weight) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) broadcast( self.bias, gpc.get_ranks_in_group(self.output_x_weight_parallel_mode)[0], self.output_x_weight_parallel_mode, ) register_async_grad_hook(self.bias) def _load_from_global_state_dict(self, state_dict, prefix, *args, **kwargs): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight if self.has_weight: weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight # bias if self.bias is not None: bias = state_dict.pop(bias_key, None) if bias is not None: local_state[bias_key] = bias # partition in output groups if gpc.get_local_rank(self.input_parallel_mode) == 0 and gpc.get_local_rank(self.weight_parallel_mode) == 0: local_state = partition_tensor_parallel_state_dict( local_state, self.output_parallel_mode, dims={weight_key: -1, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, ) # partition in input groups if gpc.get_local_rank(self.weight_parallel_mode) == 0: local_state = partition_tensor_parallel_state_dict( local_state, self.input_parallel_mode, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, ) # partition in weight groups local_state = partition_tensor_parallel_state_dict( local_state, self.weight_parallel_mode, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, ) super()._load_from_global_state_dict(local_state, prefix, *args, **kwargs) def _save_to_global_state_dict(self, destination, prefix, keep_vars): weight_key = prefix + "weight" bias_key = prefix + "bias" local_state = OrderedDict({weight_key: self.weight}) if self.bias is not None: local_state[bias_key] = self.bias # gather in weight groups local_state = gather_tensor_parallel_state_dict( local_state, self.weight_parallel_mode, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, keep_vars=keep_vars, ) # gather in input groups if gpc.get_local_rank(self.weight_parallel_mode) == 0: local_state = gather_tensor_parallel_state_dict( local_state, self.input_parallel_mode, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, keep_vars=keep_vars, ) # gather in output groups if gpc.get_local_rank(self.input_parallel_mode) == 0 and gpc.get_local_rank(self.weight_parallel_mode) == 0: local_state = gather_tensor_parallel_state_dict( local_state, self.output_parallel_mode, dims={weight_key: -1, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, keep_vars=keep_vars, ) if gpc.get_local_rank(ParallelMode.TENSOR) == 0: destination.update(local_state) def forward(self, input_: Tensor) -> Tensor: return vocab_parallel_classifier_3d( input_, self.weight, self.bias, self.input_parallel_mode, self.weight_parallel_mode, self.output_parallel_mode, ) @LAYERS.register_module class PatchEmbedding3D(ParallelLayer): r"""2D Image to Patch Embedding. Args: img_size (int): image size. patch_size (int): patch size. in_chans (int): number of channels of input image. embed_size (int): size of embedding. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. flatten (bool, optional): whether to flatten output tensor, defaults to True. weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. position_embed_initializer (:class:`typing.Callable`, optional): The initializer of position embedding, defaults to zeros initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, img_size: int, patch_size: int, in_chans: int, embed_size: int, flatten: bool = True, dtype: torch.dtype = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), position_embed_initializer: Callable = init.zeros_(), ): super().__init__() self.depth = get_depth_from_env() self.input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) self.weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) self.output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) self.input_x_weight_parallel_mode = get_parallel_mode_from_env(INPUT_X_WEIGHT_3D) img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.img_size = img_size self.patch_size = patch_size self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.embed_size = embed_size embed_size_per_partition = embed_size // self.depth self.flatten = flatten self.weight = nn.Parameter( torch.empty( (embed_size_per_partition, in_chans, *self.patch_size), device=get_accelerator().get_current_device(), dtype=dtype, ) ) self.bias = nn.Parameter( torch.empty(embed_size_per_partition, device=get_accelerator().get_current_device(), dtype=dtype) ) self.cls_token = nn.Parameter( torch.zeros((1, 1, embed_size_per_partition), device=get_accelerator().get_current_device(), dtype=dtype) ) self.pos_embed = nn.Parameter( torch.zeros( (1, self.num_patches + 1, embed_size_per_partition), device=get_accelerator().get_current_device(), dtype=dtype, ) ) self.reset_parameters(weight_initializer, bias_initializer, position_embed_initializer) self._set_tensor_parallel_attributes() def _set_tensor_parallel_attributes(self) -> None: set_tensor_parallel_attribute_by_partition(self.weight, self.depth) set_tensor_parallel_attribute_by_partition(self.bias, self.depth) set_tensor_parallel_attribute_by_partition(self.cls_token, self.depth) set_tensor_parallel_attribute_by_partition(self.pos_embed, self.depth) def _sync_grad_hook(self, grad) -> Tensor: grad = all_reduce(grad.clone(), self.input_x_weight_parallel_mode) return grad def reset_parameters(self, weight_initializer, bias_initializer, position_embed_initializer) -> None: with seed(ParallelMode.TENSOR): fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight) fan_out = self.embed_size weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) bias_initializer(self.bias, fan_in=fan_in) position_embed_initializer(self.pos_embed) src_rank = gpc.get_ranks_in_group(self.input_x_weight_parallel_mode)[0] broadcast(self.weight, src_rank, self.input_x_weight_parallel_mode) broadcast(self.bias, src_rank, self.input_x_weight_parallel_mode) broadcast(self.pos_embed, src_rank, self.input_x_weight_parallel_mode) self.weight.register_hook(self._sync_grad_hook) self.bias.register_hook(self._sync_grad_hook) self.cls_token.register_hook(self._sync_grad_hook) self.pos_embed.register_hook(self._sync_grad_hook) def _load_from_global_state_dict(self, state_dict, prefix, *args, **kwargs): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" cls_token_key = prefix + "cls_token" pos_embed_key = prefix + "pos_embed" if gpc.get_local_rank(ParallelMode.TENSOR) == 0:
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_2p5d/_operation.py
colossalai/legacy/nn/layer/parallel_2p5d/_operation.py
from typing import Any, Tuple import torch import torch.distributed as dist from torch import Tensor from torch.cuda.amp import custom_bwd, custom_fwd from colossalai.accelerator import get_accelerator from colossalai.legacy.communication.collective import all_gather, all_reduce, reduce_scatter from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc def get_parallel_group(parallel_mode: ParallelMode): return gpc.get_group(parallel_mode) def get_global_rank(): return gpc.get_global_rank() def get_parallel_rank(parallel_mode: ParallelMode): return gpc.get_local_rank(parallel_mode) class _Classifier2p5D(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward( ctx: Any, A: Tensor, B: Tensor, bias, tesseract_dim: int, out_shape: Tuple[int, ...], row_rank: int, col_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int, ) -> Tensor: A = A.clone().detach() A_shape = A.shape A = A.reshape((-1, A_shape[-1])) B_shape = B.shape B = B.reshape((-1, B_shape[-1])) B_temp = all_gather(B, -1, col_parallel_mode) if ctx: ctx.save_for_backward(A, B_temp) C = torch.matmul(A, B_temp.transpose(0, 1)) C = all_reduce(C, row_parallel_mode) ctx.use_bias = bias is not None if bias is not None: C = C + bias out = C.reshape(out_shape) if ctx: ctx.tesseract_dim = tesseract_dim ctx.row_rank = row_rank ctx.col_rank = col_rank ctx.row_parallel_mode = row_parallel_mode ctx.col_parallel_mode = col_parallel_mode ctx.A_shape = A_shape ctx.B_shape = B_shape ctx.data_parallel_rank = data_parallel_rank ctx.pipeline_parallel_rank = pipeline_parallel_rank ctx.pipeline_parallel_size = pipeline_parallel_size ctx.tensor_parallel_size = tensor_parallel_size return out @staticmethod @custom_bwd def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: A, B = ctx.saved_tensors with torch.no_grad(): A_grad = torch.matmul(output_grad, B) A_grad = A_grad.reshape(ctx.A_shape) B_grad = torch.matmul(output_grad.reshape(-1, output_grad.shape[-1]).transpose(0, 1), A) B_grad = reduce_scatter(B_grad, -1, ctx.col_parallel_mode) B_grad = B_grad.reshape(ctx.B_shape) if ctx.use_bias: bias_grad = torch.sum(output_grad, dim=tuple(range(output_grad.ndim - 1))) bias_grad = all_reduce(bias_grad, ctx.col_parallel_mode) else: bias_grad = None return A_grad, B_grad, bias_grad, None, None, None, None, None, None, None, None, None, None def classifier_2p5d( A: Tensor, B: Tensor, bias, tesseract_dim: int, out_shape: Tuple[int, ...], row_rank: int, col_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int, ) -> Tensor: r"""Classifier. Args: A (:class:`torch.tensor`): matrix :math:`A`. B (:class:`torch.tensor`): matrix :math:`B`. bias (:class:`torch.tensor`): matrix of bias. tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism. out_shape (:class:`torch.size`): shape of output tensor. row_rank (int): the rank of row. col_rank (int): the rank of column. row_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): row parallel mode. col_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): column parallel mode. data_parallel_rank (int): data parallel rank. pipeline_parallel_rank (int): pipeline parallel rank pipeline_parallel_size (int): pipeline parallel size. tensor_parallel_size (int): tensor parallel size. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ return _Classifier2p5D.apply( A, B, bias, tesseract_dim, out_shape, row_rank, col_rank, row_parallel_mode, col_parallel_mode, data_parallel_rank, pipeline_parallel_rank, pipeline_parallel_size, tensor_parallel_size, ) class Matmul_AB_2p5D(torch.autograd.Function): r"""Matrix multiplication for :math:`C = AB`. Args: A (:class:`torch.tensor`): matrix :math:`A`. B (:class:`torch.tensor`): matrix :math:`B`. tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism. out_shape (:class:`torch.size`): shape of output tensor. row_rank (int): the rank of row. col_rank (int): the rank of column. dep_rank (int): the rank of depth. row_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): row parallel mode. col_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): column parallel mode. data_parallel_rank (int): data parallel rank. pipeline_parallel_rank (int): pipeline parallel rank pipeline_parallel_size (int): pipeline parallel size. tensor_parallel_size (int): tensor parallel size. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward( ctx: Any, A: Tensor, B: Tensor, tesseract_dim: int, out_shape: Tuple[int, ...], row_rank: int, col_rank: int, dep_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int, ) -> Tensor: # A: [b / dq, s, h / q] -> [(b * s) / dq, h / q] # B: [h / dq, s / q] # C: [b / dq, s, s / q] -> [(b * s) / dq, s / q] assert A.shape[-1] == B.shape[-2], "Invalid shapes: A={}, B={} for AB.".format(A.shape, B.shape) if ctx: ctx.save_for_backward(A, B) A_shape = A.shape A = A.reshape((-1, A_shape[-1])) B_shape = B.shape B = B.reshape((-1, B_shape[-1])) C_shape = (A.shape[0], B.shape[-1]) C = torch.zeros(C_shape, dtype=A.dtype, device=get_accelerator().get_current_device()) # use circular buffer to store the communication tensor # 2 is enough for all cases A_list = [torch.empty_like(A) for _ in range(2)] B_list = [torch.empty_like(B) for _ in range(2)] row_group = gpc.get_group(row_parallel_mode) col_group = gpc.get_group(col_parallel_mode) src_a = ( tesseract_dim * row_rank + tesseract_dim**2 * dep_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size ) src_b = ( col_rank + tesseract_dim**2 * dep_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size ) opa = [None] * 2 opb = [None] * 2 A_list[0].copy_(A) B_list[0].copy_(B) opa[0] = dist.broadcast(A_list[0], src=src_a, group=row_group, async_op=True) opb[0] = dist.broadcast(B_list[0], src=src_b, group=col_group, async_op=True) cur = 0 for i in range(tesseract_dim): if i != tesseract_dim - 1: A_list[1 - cur].copy_(A) opa[1 - cur] = dist.broadcast(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True) B_list[1 - cur].copy_(B) opb[1 - cur] = dist.broadcast( B_list[1 - cur], src=src_b + tesseract_dim, group=col_group, async_op=True ) if opa[cur] is not None: opa[cur].wait() if opb[cur] is not None: opb[cur].wait() torch.addmm(C, A_list[cur], B_list[cur], out=C) cur = 1 - cur src_a += 1 src_b += tesseract_dim out = C.reshape(out_shape) if ctx: ctx.tesseract_dim = tesseract_dim ctx.row_rank = row_rank ctx.col_rank = col_rank ctx.dep_rank = dep_rank ctx.row_parallel_mode = row_parallel_mode ctx.col_parallel_mode = col_parallel_mode ctx.A_shape = A_shape ctx.B_shape = B_shape ctx.data_parallel_rank = data_parallel_rank ctx.pipeline_parallel_rank = pipeline_parallel_rank ctx.pipeline_parallel_size = pipeline_parallel_size ctx.tensor_parallel_size = tensor_parallel_size return out @staticmethod @custom_bwd def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: A, B = ctx.saved_tensors with torch.no_grad(): A_grad = Matmul_ABT_2p5D.apply( output_grad, B, ctx.tesseract_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank, ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank, ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size, ctx.tensor_parallel_size, ) B_grad = Matmul_ATB_2p5D.apply( A, output_grad, ctx.tesseract_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank, ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank, ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size, ctx.tensor_parallel_size, ) return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None, None, None, None class Matmul_ABT_2p5D(torch.autograd.Function): r"""Matrix multiplication for :math:`C = AB^T`. Args: A (:class:`torch.tensor`): matrix :math:`A`. B (:class:`torch.tensor`): matrix :math:`B`. tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism. out_shape (:class:`torch.size`): shape of output tensor. row_rank (int): the rank of row. col_rank (int): the rank of column. dep_rank (int): the rank of depth. row_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): row parallel mode. col_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): column parallel mode. data_parallel_rank (int): data parallel rank. pipeline_parallel_rank (int): pipeline parallel rank pipeline_parallel_size (int): pipeline parallel size. tensor_parallel_size (int): tensor parallel size. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward( ctx: Any, A: Tensor, B: Tensor, tesseract_dim: int, out_shape: Tuple[int, ...], row_rank: int, col_rank: int, dep_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int, ) -> Tensor: assert A.shape[-1] == B.shape[-1], "Invalid shapes: A={}, B={} for ABT.".format(A.shape, B.shape) if ctx: ctx.save_for_backward(A, B) A_shape = A.shape A = A.reshape((-1, A_shape[-1])) B_shape = B.shape B = B.reshape((-1, B_shape[-1])) C_shape = (A.shape[0], B.shape[0]) C = torch.empty(C_shape, dtype=A.dtype, device=get_accelerator().get_current_device()) # use circular buffer to store the communication tensor # 2 is enough for all cases B_list = [torch.empty_like(B) for _ in range(2)] C_list = [torch.empty_like(C) for _ in range(2)] row_group = gpc.get_group(row_parallel_mode) col_group = gpc.get_group(col_parallel_mode) src_b = ( col_rank + tesseract_dim**2 * dep_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size ) src_c = ( tesseract_dim * row_rank + tesseract_dim**2 * dep_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size ) opb = [None] * 2 opr = [None] * 2 B_list[0].copy_(B) opb[0] = dist.broadcast(B_list[0], src=src_b, group=col_group, async_op=True) cur = 0 for i in range(tesseract_dim): if i != tesseract_dim - 1: B_list[1 - cur].copy_(B) opb[1 - cur] = dist.broadcast( B_list[1 - cur], src=src_b + tesseract_dim, group=col_group, async_op=True ) if opr[cur] is not None: opr[cur].wait() if i - 2 == col_rank: C.copy_(C_list[cur]) if opb[cur] is not None: opb[cur].wait() torch.matmul(A, B_list[cur].transpose(0, 1), out=C_list[cur]) opr[cur] = dist.reduce(C_list[cur], dst=src_c, group=row_group, async_op=True) cur = 1 - cur src_b += tesseract_dim src_c += 1 for op in opr: op.wait() if tesseract_dim - 2 == col_rank: C.copy_(C_list[cur]) if tesseract_dim - 1 == col_rank: C.copy_(C_list[1 - cur]) out = C.reshape(out_shape) if ctx: ctx.tesseract_dim = tesseract_dim ctx.row_rank = row_rank ctx.col_rank = col_rank ctx.dep_rank = dep_rank ctx.row_parallel_mode = row_parallel_mode ctx.col_parallel_mode = col_parallel_mode ctx.A_shape = A_shape ctx.B_shape = B_shape ctx.data_parallel_rank = data_parallel_rank ctx.pipeline_parallel_rank = pipeline_parallel_rank ctx.pipeline_parallel_size = pipeline_parallel_size ctx.tensor_parallel_size = tensor_parallel_size return out @staticmethod @custom_bwd def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: A, B = ctx.saved_tensors with torch.no_grad(): A_grad = Matmul_AB_2p5D.apply( output_grad, B, ctx.tesseract_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank, ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank, ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size, ctx.tensor_parallel_size, ) B_grad = Matmul_ATB_2p5D.apply( output_grad, A, ctx.tesseract_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank, ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank, ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size, ctx.tensor_parallel_size, ) return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None, None, None, None class Matmul_ATB_2p5D(torch.autograd.Function): r"""Matrix multiplication for :math:`C = A^TB` Args: A (:class:`torch.tensor`): matrix :math:`A`. B (:class:`torch.tensor`): matrix :math:`B`. tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism. out_shape (:class:`torch.size`): shape of output tensor. row_rank (int): the rank of row. col_rank (int): the rank of column. dep_rank (int): the rank of depth. row_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): row parallel mode. col_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): column parallel mode. data_parallel_rank (int): data parallel rank. pipeline_parallel_rank (int): pipeline parallel rank pipeline_parallel_size (int): pipeline parallel size. tensor_parallel_size (int): tensor parallel size. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward( ctx: Any, A: Tensor, B: Tensor, tesseract_dim: int, out_shape: Tuple[int, ...], row_rank: int, col_rank: int, dep_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int, ): assert A.shape[-2] == B.shape[-2], "Invalid shapes: A={}, B={} for ATB.".format(A.shape, B.shape) if ctx: ctx.save_for_backward(A, B) A_shape = A.shape A = A.reshape((-1, A_shape[-1])) B_shape = B.shape B = B.reshape((-1, B_shape[-1])) C_shape = (A.shape[-1], B.shape[-1]) C = torch.empty(C_shape, dtype=A.dtype, device=get_accelerator().get_current_device()) # use circular buffer to store the communication tensor # 2 is enough for all cases A_list = [torch.empty_like(A) for _ in range(2)] C_list = [torch.empty_like(C) for _ in range(2)] row_group = gpc.get_group(row_parallel_mode) col_group = gpc.get_group(col_parallel_mode) src_a = ( tesseract_dim * row_rank + tesseract_dim**2 * dep_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size ) src_c = ( col_rank + tesseract_dim**2 * dep_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size ) opa = [None] * 2 opr = [None] * 2 A_list[0].copy_(A) opa[0] = dist.broadcast(A_list[0], src=src_a, group=row_group, async_op=True) cur = 0 for i in range(tesseract_dim): if i != tesseract_dim - 1: A_list[1 - cur].copy_(A) opa[1 - cur] = dist.broadcast(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True) if opr[cur] is not None: opr[cur].wait() if i - 2 == row_rank: C.copy_(C_list[cur]) if opa[cur] is not None: opa[cur].wait() torch.matmul(A_list[cur].transpose(0, 1), B, out=C_list[cur]) opr[cur] = dist.reduce(C_list[cur], dst=src_c, group=col_group, async_op=True) cur = 1 - cur src_a += 1 src_c += tesseract_dim for op in opr: op.wait() if tesseract_dim - 2 == row_rank: C.copy_(C_list[cur]) if tesseract_dim - 1 == row_rank: C.copy_(C_list[1 - cur]) out = C.reshape(out_shape) if ctx: ctx.tesseract_dim = tesseract_dim ctx.row_rank = row_rank ctx.col_rank = col_rank ctx.dep_rank = dep_rank ctx.row_parallel_mode = row_parallel_mode ctx.col_parallel_mode = col_parallel_mode ctx.A_shape = A_shape ctx.B_shape = B_shape ctx.data_parallel_rank = data_parallel_rank ctx.pipeline_parallel_rank = pipeline_parallel_rank ctx.pipeline_parallel_size = pipeline_parallel_size ctx.tensor_parallel_size = tensor_parallel_size return out @staticmethod @custom_bwd def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: A, B = ctx.saved_tensors with torch.no_grad(): A_grad = Matmul_ABT_2p5D.apply( B, output_grad, ctx.tesseract_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank, ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank, ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size, ctx.tensor_parallel_size, ) B_grad = Matmul_AB_2p5D.apply( A, output_grad, ctx.tesseract_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank, ctx.dep_rank, ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank, ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size, ctx.tensor_parallel_size, ) return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None, None, None, None class _Add_Bias_2p5D(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward( ctx: Any, input: Tensor, bias: Tensor, output_size_per_partition: int, tesseract_dim: int, row_rank: int, col_rank: int, dep_rank: int, col_parallel_mode: ParallelMode, skip_bias_add: bool, data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int, ) -> Tensor: if row_rank == 0: bias_temp = bias.clone() else: bias_temp = torch.zeros( output_size_per_partition, dtype=bias.dtype, device=get_accelerator().get_current_device() ) src_rank = ( col_rank + dep_rank * tesseract_dim**2 + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size ) dist.broadcast(bias_temp, src=src_rank, group=get_parallel_group(col_parallel_mode)) ctx.row_rank = row_rank ctx.col_rank = col_rank ctx.dep_rank = dep_rank ctx.tesseract_dim = tesseract_dim ctx.col_parallel_mode = col_parallel_mode ctx.bias = skip_bias_add ctx.data_parallel_rank = data_parallel_rank ctx.pipeline_parallel_rank = pipeline_parallel_rank ctx.pipeline_parallel_size = pipeline_parallel_size ctx.tensor_parallel_size = tensor_parallel_size if skip_bias_add: return bias_temp else: output = input + bias_temp return output @staticmethod @custom_bwd def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: row_rank = ctx.row_rank col_rank = ctx.col_rank dep_rank = ctx.dep_rank tesseract_dim = ctx.tesseract_dim col_parallel_mode = ctx.col_parallel_mode data_parallel_rank = ctx.data_parallel_rank pipeline_parallel_rank = ctx.pipeline_parallel_rank pipeline_parallel_size = ctx.pipeline_parallel_size tensor_parallel_size = ctx.tensor_parallel_size if ctx.bias: dst_rank = ( col_rank + dep_rank * (tesseract_dim**2) + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size ) dist.reduce(output_grad, dst=dst_rank, group=get_parallel_group(col_parallel_mode)) if row_rank == 0: return ( None, output_grad, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ) else: grad_tmp = torch.zeros_like(output_grad) return ( None, grad_tmp, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ) else: reduce_dim = tuple(range(output_grad.ndim - 1)) reduce = torch.sum(output_grad, dim=reduce_dim) dst_rank = ( col_rank + dep_rank * (tesseract_dim**2) + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size ) dist.reduce(reduce, dst=dst_rank, group=get_parallel_group(col_parallel_mode)) if row_rank == 0: return ( output_grad, reduce, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ) else: reduce_tmp = torch.zeros_like(reduce) return ( output_grad, reduce_tmp, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, ) def add_bias_2p5d( input: Tensor, bias: Tensor, output_size_per_partition: int, tesseract_dim: int, row_rank: int, col_rank: int, dep_rank: int, col_parallel_mode: ParallelMode, skip_bias_add: bool, data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int, ) -> Tensor: r"""Matrix add bias: :math:`C = A + b`. Args: input (:class:`torch.tensor`): matrix :math:`A`. bias (:class:`torch.tensor`): matrix :math:`B`. tesseract_dim (int): dimension of TESSERACT fo 2.5D parallelism. output_size_per_partition (int): output size in each partition. row_rank (int): the rank of row. col_rank (int): the rank of column. dep_rank (int): the rank of depth. col_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): column parallel mode. skip_bias_add (bool): If set to ``True``, it will skip bias add for linear layer, which is preserved for kernel fusion. data_parallel_rank (int): data parallel rank. pipeline_parallel_rank (int): pipeline parallel rank pipeline_parallel_size (int): pipeline parallel size. tensor_parallel_size (int): tensor parallel size. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ return _Add_Bias_2p5D.apply( input, bias, output_size_per_partition, tesseract_dim, row_rank, col_rank, dep_rank, col_parallel_mode, skip_bias_add, data_parallel_rank, pipeline_parallel_rank, pipeline_parallel_size, tensor_parallel_size, ) class _Layernorm2p5D(torch.autograd.Function): r"""Layernorm. Args: input (:class:`torch.tensor`): input matrix. E_x (:class:`torch.tensor`): mean. Var_x (:class:`torch.tensor`): variance. hidden_size (int): hidden size. row_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): row parallel mode. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ @staticmethod @custom_fwd(cast_inputs=torch.float32) def forward( ctx: Any, input: Tensor, E_x: Tensor, Var_x: Tensor, hidden_size: int, row_parallel_mode: ParallelMode ) -> Tensor: input = input - E_x # in here, input = x - E[x], Var_x = 1 / sqrt(Var[x] + eps) ctx.hidden_size = hidden_size output = input * Var_x ctx.save_for_backward(output, Var_x) ctx.row_parallel_mode = row_parallel_mode return output @staticmethod @custom_bwd def backward(ctx, output_grad): row_parallel_mode = ctx.row_parallel_mode x, Var_x = ctx.saved_tensors # in here, Var_x = 1 / sqrt(Var[x] + eps), x = (x - E[x]) * Var_x with torch.no_grad(): output_grad_sum = torch.sum(output_grad, dim=-1, keepdim=True) torch.distributed.all_reduce(output_grad_sum, group=get_parallel_group(row_parallel_mode)) output_grad_sum /= ctx.hidden_size output_grad_mul_x_sum = torch.sum(output_grad * x, dim=-1, keepdim=True) torch.distributed.all_reduce(output_grad_mul_x_sum, group=get_parallel_group(row_parallel_mode)) output_grad_mul_x_sum /= ctx.hidden_size input_grad = output_grad.clone() input_grad -= x * output_grad_mul_x_sum input_grad -= output_grad_sum input_grad *= Var_x return input_grad, None, None, None, None, None, None def layernorm_2p5d( input: Tensor, E_x: Tensor, Var_x: Tensor, hidden_size: int, row_parallel_mode: ParallelMode ) -> Tensor: r"""Layernorm. Args: input (:class:`torch.tensor`): input matrix. E_x (:class:`torch.tensor`): mean. Var_x (:class:`torch.tensor`): variance. hidden_size (int): hidden size. row_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): row parallel mode. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_2p5d/__init__.py
colossalai/legacy/nn/layer/parallel_2p5d/__init__.py
from ._operation import reduce_by_batch_2p5d, split_batch_2p5d from .layers import ( Classifier2p5D, Embedding2p5D, LayerNorm2p5D, Linear2p5D, PatchEmbedding2p5D, VocabParallelClassifier2p5D, VocabParallelEmbedding2p5D, ) __all__ = [ "split_batch_2p5d", "reduce_by_batch_2p5d", "Linear2p5D", "LayerNorm2p5D", "Classifier2p5D", "PatchEmbedding2p5D", "Embedding2p5D", "VocabParallelClassifier2p5D", "VocabParallelEmbedding2p5D", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_2p5d/_utils.py
colossalai/legacy/nn/layer/parallel_2p5d/_utils.py
from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.global_variables import tensor_parallel_env as env def get_tesseract_dim_dep_from_env(): try: tesseract_dim = env.tesseract_dim tesseract_dep = env.tesseract_dep assert tesseract_dim > 0, "TESSERACT_DIM must be larger than zero" assert tesseract_dep > 0, "TESSERACT_DEP must be larger than zero" return tesseract_dim, tesseract_dep except KeyError: raise EnvironmentError( "TESSERACT_DIM or TESSERACT_DEP is not found in the current environment, " "please make sure that you have used the correct process group initializer" ) def assert_tesseract_initialization(): assert ( gpc.is_initialized(ParallelMode.PARALLEL_2P5D_COL) and gpc.is_initialized(ParallelMode.PARALLEL_2P5D_ROW) and gpc.is_initialized(ParallelMode.PARALLEL_2P5D_DEP) and gpc.is_initialized(ParallelMode.PARALLEL_2P5D_XZ) ), ( "Both PARALLEL_2P5D_COL, PARALLEL_2P5D_ROW, PARALLEL_2P5D_DEP and PARALLEL_2P5D_XZ " "must be initialized by the process group initializer" )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_2p5d/layers.py
colossalai/legacy/nn/layer/parallel_2p5d/layers.py
import math from collections import OrderedDict from typing import Callable import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from torch.nn import Parameter from colossalai.accelerator import get_accelerator from colossalai.legacy.communication import broadcast from colossalai.legacy.context import ParallelMode, seed from colossalai.legacy.core import global_context as gpc from colossalai.legacy.global_variables import tensor_parallel_env as env from colossalai.legacy.registry import LAYERS from colossalai.legacy.utils.checkpointing import ( broadcast_state_dict, gather_tensor_parallel_state_dict, partition_tensor_parallel_state_dict, ) from colossalai.nn import init as init from ..base_layer import ParallelLayer from ..utils import divide, set_tensor_parallel_attribute_by_partition, to_2tuple from ._operation import ( Matmul_AB_2p5D, Matmul_ABT_2p5D, add_bias_2p5d, all_gather_tensor_2p5d, classifier_2p5d, layernorm_2p5d, reduce_scatter_tensor_2p5d, split_batch_2p5d, ) from ._utils import assert_tesseract_initialization, get_tesseract_dim_dep_from_env @LAYERS.register_module class Linear2p5D(ParallelLayer): r"""Linear layer for 2.5D parallelism. Args: in_features (int): size of each input sample. out_features (int): size of each output sample. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. skip_bias_add (bool, optional): If set to ``True``, it will skip bias add for linear layer, which is preserved for kernel fusion, defaults to False. weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, dtype: torch.dtype = None, skip_bias_add: bool = False, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), ): super().__init__() self.in_features = in_features self.out_features = out_features self.skip_bias_add = skip_bias_add # parallel setting assert_tesseract_initialization() self.row_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) self.col_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) self.dep_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP) self.tesseract_dim, _ = get_tesseract_dim_dep_from_env() # partitioning dimension self.input_size_per_partition = divide(in_features, self.tesseract_dim) self.hidden_size_per_partition = divide(out_features, self.tesseract_dim) # create weight, shape: [k/q, h/q] factory_kwargs = {"device": get_accelerator().get_current_device(), "dtype": dtype} self.weight = Parameter( torch.empty(self.input_size_per_partition, self.hidden_size_per_partition, **factory_kwargs) ) # create bias, shape: [h/q] if bias: self.bias = Parameter(torch.empty(self.hidden_size_per_partition, **factory_kwargs)) else: self.register_parameter("bias", None) # initialize parameters with seed(ParallelMode.TENSOR): self.reset_parameters(weight_initializer, bias_initializer) self._set_tensor_parallel_attributes() def _set_tensor_parallel_attributes(self): set_tensor_parallel_attribute_by_partition(self.weight, self.tesseract_dim**2) if self.bias is not None: set_tensor_parallel_attribute_by_partition(self.bias, self.tesseract_dim) def reset_parameters(self, weight_initializer, bias_initializer) -> None: fan_in, fan_out = self.in_features, self.out_features weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) def _load_from_global_state_dict(self, state_dict, prefix, *args, **kwargs): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight.transpose(0, 1) # bias if self.bias is not None: bias = state_dict.pop(bias_key, None) if bias is not None: local_state[bias_key] = bias # broadcast in dep groups if ( gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) == 0 and gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) == 0 ): broadcast_state_dict(local_state, ParallelMode.PARALLEL_2P5D_DEP) # partition in column groups if gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) == 0: local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_COL, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, ) # partition in row groups local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_ROW, dims={weight_key: -1, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, ) super()._load_from_global_state_dict(local_state, prefix, *args, **kwargs) def _save_to_global_state_dict(self, destination, prefix, keep_vars): if gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP) == 0: weight_key = prefix + "weight" bias_key = prefix + "bias" local_state = OrderedDict({weight_key: self.weight}) if self.bias is not None: local_state[bias_key] = self.bias # gather in row groups local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_ROW, dims={weight_key: -1, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, keep_vars=keep_vars, ) # gather in column groups if gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) == 0: local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_COL, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: False}, keep_vars=keep_vars, ) if gpc.get_local_rank(ParallelMode.TENSOR) == 0: local_state[weight_key] = local_state[weight_key].transpose(0, 1) destination.update(local_state) def forward(self, x: Tensor) -> Tensor: # input: [m/dq, n/q, k/q] # output: [m/dq, n/q, h/q] out_shape = x.shape[:-1] + (self.hidden_size_per_partition,) output = Matmul_AB_2p5D.apply( x, self.weight, self.tesseract_dim, out_shape, self.row_rank, self.col_rank, self.dep_rank, ParallelMode.PARALLEL_2P5D_ROW, ParallelMode.PARALLEL_2P5D_COL, self.data_parallel_rank, self.pipeline_parallel_rank, self.pipeline_parallel_size, self.tensor_parallel_size, ) if self.bias is not None: if self.skip_bias_add: bias = add_bias_2p5d( None, self.bias, self.hidden_size_per_partition, self.tesseract_dim, self.row_rank, self.col_rank, self.dep_rank, ParallelMode.PARALLEL_2P5D_COL, True, self.data_parallel_rank, self.pipeline_parallel_rank, self.pipeline_parallel_size, self.tensor_parallel_size, ) return output, bias else: output = add_bias_2p5d( output, self.bias, self.hidden_size_per_partition, self.tesseract_dim, self.row_rank, self.col_rank, self.dep_rank, ParallelMode.PARALLEL_2P5D_COL, False, self.data_parallel_rank, self.pipeline_parallel_rank, self.pipeline_parallel_size, self.tensor_parallel_size, ) return output else: return output @LAYERS.register_module class LayerNorm2p5D(ParallelLayer): r"""Layer Normalization for 2.5D parallelism. Args: normalized_shape (int): input shape from an expected input of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]` If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps (float, optional): a value added to the denominator for numerical stability, defaults to 1e-05. bias (bool, optional): Whether to add a bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. """ def __init__(self, normalized_shape: int, eps: float = 1e-05, bias=True, dtype=None): super().__init__() # layer norm config self.normalized_shape = normalized_shape self.variance_epsilon = eps # parallel setting assert_tesseract_initialization() self.row_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) self.col_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) self.dep_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_DEP) self.tesseract_dim, _ = get_tesseract_dim_dep_from_env() # partitioning dimension self.partitioned_partition = divide(normalized_shape, self.tesseract_dim) # * # create parameters factory_kwargs = {"device": get_accelerator().get_current_device(), "dtype": dtype} self.weight = Parameter(torch.ones(self.partitioned_partition, **factory_kwargs)) if bias: self.bias = Parameter(torch.zeros(self.partitioned_partition, **factory_kwargs)) else: self.bias = None self._set_tensor_parallel_attribute() def _set_tensor_parallel_attribute(self): set_tensor_parallel_attribute_by_partition(self.weight, self.tesseract_dim) if self.bias is not None: set_tensor_parallel_attribute_by_partition(self.bias, self.tesseract_dim) def _load_from_global_state_dict(self, state_dict, prefix, *args, **kwargs): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight # bias bias = state_dict.pop(bias_key, None) if bias is not None: local_state[bias_key] = bias # partition in row groups if gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) == 0: local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_ROW, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, ) # partition in column groups local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_COL, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, ) super()._load_from_global_state_dict(local_state, prefix, *args, **kwargs) def _save_to_global_state_dict(self, destination, prefix, keep_vars): weight_key = prefix + "weight" bias_key = prefix + "bias" local_state = OrderedDict({weight_key: self.weight}) if self.bias is not None: local_state[bias_key] = self.bias # gather in column groups local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_COL, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, keep_vars=keep_vars, ) # gather in row groups if gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) == 0: local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_ROW, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, keep_vars=keep_vars, ) if gpc.get_local_rank(ParallelMode.TENSOR) == 0: destination.update(local_state) def forward(self, x: Tensor) -> Tensor: with torch.no_grad(): E_x = torch.sum(x, dim=-1, keepdim=True) # [b/q, s, 1] torch.distributed.all_reduce(E_x, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW)) E_x /= self.normalized_shape # Var_x in the block below is the sum of input^2 Var_x = torch.sum(x * x, dim=-1, keepdim=True) # [b/q, s, 1] torch.distributed.all_reduce(Var_x, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW)) Var_x /= self.normalized_shape Var_x = Var_x - E_x * E_x # variance of x [b/q, s, 1] # this time 1/sqrt(Var_x + epsilon) Var_x = 1.0 / torch.sqrt(Var_x + self.variance_epsilon) output = layernorm_2p5d(x, E_x, Var_x, self.normalized_shape, ParallelMode.PARALLEL_2P5D_ROW) scale = add_bias_2p5d( None, self.weight, self.partitioned_partition, self.tesseract_dim, self.row_rank, self.col_rank, self.dep_rank, ParallelMode.PARALLEL_2P5D_COL, True, self.data_parallel_rank, self.pipeline_parallel_rank, self.pipeline_parallel_size, self.tensor_parallel_size, ) if self.bias is not None: bias = add_bias_2p5d( None, self.bias, self.partitioned_partition, self.tesseract_dim, self.row_rank, self.col_rank, self.dep_rank, ParallelMode.PARALLEL_2P5D_COL, True, self.data_parallel_rank, self.pipeline_parallel_rank, self.pipeline_parallel_size, self.tensor_parallel_size, ) output = torch.addcmul(bias, scale, output) else: output = torch.mul(scale, output) return output @LAYERS.register_module class PatchEmbedding2p5D(ParallelLayer): r"""2D Image to Patch Embedding. Args: img_size (int): image size. patch_size (int): patch size. in_chans (int): number of channels of input image. embed_size (int): size of embedding. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. flatten (bool, optional): whether to flatten output tensor, defaults to True. weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. position_embed_initializer (:class:`typing.Callable`, optional): The initializer of position embedding, defaults to zeros initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, img_size: int, patch_size: int, in_chans: int, embed_size: int, flatten: bool = True, dtype: torch.dtype = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), position_embed_initializer: Callable = init.zeros_(), ): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) assert_tesseract_initialization() self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env() self.img_size = img_size self.patch_size = patch_size self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.flatten = flatten self.embed_size = embed_size self.embed_size_per_partition = embed_size // self.tesseract_dim**2 with seed(ParallelMode.TENSOR): self.weight = Parameter( torch.empty( (self.embed_size_per_partition, in_chans, *self.patch_size), device=get_accelerator().get_current_device(), dtype=dtype, ) ) self.bias = Parameter( torch.empty(self.embed_size_per_partition, device=get_accelerator().get_current_device(), dtype=dtype) ) self.cls_token = Parameter( torch.zeros( (1, 1, self.embed_size_per_partition), device=get_accelerator().get_current_device(), dtype=dtype ) ) self.pos_embed = Parameter( torch.zeros( (1, self.num_patches + 1, self.embed_size_per_partition), device=get_accelerator().get_current_device(), dtype=dtype, ) ) self.reset_parameters(weight_initializer, bias_initializer, position_embed_initializer) self._set_tensor_parallel_attribute() def _set_tensor_parallel_attribute(self): set_tensor_parallel_attribute_by_partition(self.weight, self.tesseract_dim**2) set_tensor_parallel_attribute_by_partition(self.bias, self.tesseract_dim**2) set_tensor_parallel_attribute_by_partition(self.cls_token, self.tesseract_dim**2) set_tensor_parallel_attribute_by_partition(self.pos_embed, self.tesseract_dim**2) def reset_parameters(self, weight_initializer, bias_initializer, position_embed_initializer): with seed(ParallelMode.TENSOR): fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight) fan_out = self.embed_size weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) bias_initializer(self.bias, fan_in=fan_in) position_embed_initializer(self.pos_embed) def _load_from_global_state_dict(self, state_dict, prefix, *args, **kwargs): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" cls_token_key = prefix + "cls_token" pos_embed_key = prefix + "pos_embed" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight # bias bias = state_dict.pop(bias_key, None) if bias is not None: local_state[bias_key] = bias # cls token cls_token = state_dict.pop(cls_token_key, None) if cls_token is not None: local_state[cls_token_key] = cls_token # pos embed pos_embed = state_dict.pop(pos_embed_key, None) if pos_embed is not None: local_state[pos_embed_key] = pos_embed # partition in row groups if gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) == 0: local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_ROW, dims={weight_key: 0, bias_key: 0, cls_token_key: -1, pos_embed_key: -1}, partition_states={weight_key: True, bias_key: True, cls_token_key: True, pos_embed_key: True}, ) # partition in column groups local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_COL, dims={weight_key: 0, bias_key: 0, cls_token_key: -1, pos_embed_key: -1}, partition_states={weight_key: True, bias_key: True, cls_token_key: True, pos_embed_key: True}, ) super()._load_from_global_state_dict(local_state, prefix, *args, **kwargs) def _save_to_global_state_dict(self, destination, prefix, keep_vars): weight_key = prefix + "weight" bias_key = prefix + "bias" cls_token_key = prefix + "cls_token" pos_embed_key = prefix + "pos_embed" local_state = OrderedDict( {weight_key: self.weight, bias_key: self.bias, cls_token_key: self.cls_token, pos_embed_key: self.pos_embed} ) # gather in column groups local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_COL, dims={weight_key: 0, bias_key: 0, cls_token_key: -1, pos_embed_key: -1}, partition_states={weight_key: True, bias_key: True, cls_token_key: True, pos_embed_key: True}, keep_vars=keep_vars, ) # gather in row groups if gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) == 0: local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_ROW, dims={weight_key: 0, bias_key: 0, cls_token_key: -1, pos_embed_key: -1}, partition_states={weight_key: True, bias_key: True, cls_token_key: True, pos_embed_key: True}, keep_vars=keep_vars, ) if gpc.get_local_rank(ParallelMode.TENSOR) == 0: destination.update(local_state) def forward(self, input_: Tensor) -> Tensor: input_ = split_batch_2p5d(input_, 0) B, C, H, W = input_.shape assert ( H == self.img_size[0] and W == self.img_size[1] ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." weight = all_gather_tensor_2p5d(self.weight, 0, ParallelMode.PARALLEL_2P5D_COL) bias = all_gather_tensor_2p5d(self.bias, 0, ParallelMode.PARALLEL_2P5D_COL) output = F.conv2d(input_, weight, bias, stride=self.patch_size) if self.flatten: output = output.flatten(2).transpose(1, 2) # BCHW -> BNC cls_token = all_gather_tensor_2p5d(self.cls_token, -1, ParallelMode.PARALLEL_2P5D_COL) pos_embed = all_gather_tensor_2p5d(self.pos_embed, -1, ParallelMode.PARALLEL_2P5D_COL) cls_token = cls_token.expand(output.shape[0], -1, -1) output = torch.cat((cls_token, output), dim=1) output = output + pos_embed return output @LAYERS.register_module class Embedding2p5D(ParallelLayer): r"""Embedding for 2.5D parallelism. Args: num_embeddings (int): number of embeddings. embedding_dim (int): dimension of embedding. padding_idx (int, optional): If specified, the entries at padding_idx do not contribute to the gradient; therefore, the embedding vector at padding_idx is not updated during training, i.e. it remains as a fixed “pad”, defaults to None. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. weight_initializer (:class:`typing.Callable`, optional): he initializer of weight, defaults to normal initializer. The ``args`` and ``kwargs`` used in :class:``torch.nn.functional.embedding`` should contain: :: max_norm (float, optional): If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm. Note: this will modify weight in-place. norm_type (float, optional): The p of the p-norm to compute for the max_norm option. Default 2. scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default False. sparse (bool, optional): If True, gradient w.r.t. weight will be a sparse tensor. Default False. More details about ``args`` and ``kwargs`` could be found in `Embedding <https://pytorch.org/docs/stable/generated/torch.nn.functional.embedding.html#torch.nn.functional.embedding>`_. More details about initializer please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_ """ def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: int = None, dtype: torch.dtype = None, weight_initializer: Callable = init.normal_(), *args, **kwargs, ): super().__init__() assert_tesseract_initialization() self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env() self.num_embeddings = num_embeddings self.embed_dim = embedding_dim embed_dim_per_partition = embedding_dim // self.tesseract_dim**2 self.padding_idx = padding_idx self.embed_args = args self.embed_kwargs = kwargs self.weight = Parameter( torch.empty( (num_embeddings, embed_dim_per_partition), device=get_accelerator().get_current_device(), dtype=dtype ) ) self.reset_parameters(weight_initializer) self._set_tensor_parallel_attributes() def _set_tensor_parallel_attributes(self): set_tensor_parallel_attribute_by_partition(self.weight, self.tesseract_dim**2) def reset_parameters(self, weight_initializer) -> None: with seed(ParallelMode.TENSOR): fan_in, fan_out = self.num_embeddings, self.embed_dim weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) self._fill_padding_idx_with_zero() def _fill_padding_idx_with_zero(self) -> None: if self.padding_idx is not None: with torch.no_grad(): self.weight[self.padding_idx].fill_(0) def _load_from_global_state_dict(self, state_dict, prefix, *args, **kwargs): local_state = OrderedDict() weight_key = prefix + "weight" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight # partition in row groups if gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) == 0: local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_ROW, dims={weight_key: -1}, partition_states={weight_key: True}, ) # partition in column groups local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_COL, dims={weight_key: -1}, partition_states={weight_key: True}, ) super()._load_from_global_state_dict(local_state, prefix, *args, **kwargs) def _save_to_global_state_dict(self, destination, prefix, keep_vars): weight_key = prefix + "weight" local_state = OrderedDict({weight_key: self.weight}) # gather in column groups local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_COL, dims={weight_key: -1}, partition_states={weight_key: True}, keep_vars=keep_vars, ) # gather in row groups if gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL) == 0: local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2P5D_ROW, dims={weight_key: -1}, partition_states={weight_key: True}, keep_vars=keep_vars, ) if gpc.get_local_rank(ParallelMode.TENSOR) == 0: destination.update(local_state) def forward(self, input_: Tensor) -> Tensor: input_ = split_batch_2p5d(input_, 0) weight = all_gather_tensor_2p5d(self.weight, -1, ParallelMode.PARALLEL_2P5D_COL) output = F.embedding(input_, weight, self.padding_idx, *self.embed_args, **self.embed_kwargs) return output @LAYERS.register_module class VocabParallelEmbedding2p5D(ParallelLayer): """Embedding parallelized in the vocabulary dimension. Args: num_embeddings (int): number of embeddings. embedding_dim (int): dimension of embedding. padding_idx (int, optional): If specified, the entries at padding_idx do not contribute to the gradient; therefore, the embedding vector at padding_idx is not updated during training, i.e. it remains as a fixed “pad”, defaults to None. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. weight_initializer (:class:`typing.Callable`, optional): he initializer of weight, defaults to normal initializer. The ``args`` and ``kwargs`` used in :class:``torch.nn.functional.embedding`` should contain: :: max_norm (float, optional): If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm. Note: this will modify weight in-place. norm_type (float, optional): The p of the p-norm to compute for the max_norm option. Default 2. scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default False. sparse (bool, optional): If True, gradient w.r.t. weight will be a sparse tensor. Default False. More details about ``args`` and ``kwargs`` could be found in `Embedding <https://pytorch.org/docs/stable/generated/torch.nn.functional.embedding.html#torch.nn.functional.embedding>`_. More details about initializer please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: int = None, dtype: torch.dtype = None, weight_initializer: Callable = init.normal_(), *args, **kwargs, ): super().__init__() self.num_embeddings = num_embeddings self.embed_dim = embedding_dim self.padding_idx = padding_idx self.embed_args = args self.embed_kwargs = kwargs assert_tesseract_initialization() self.tesseract_dim, self.tesseract_dep = get_tesseract_dim_dep_from_env() self.num_embeddings_per_partition = divide(self.num_embeddings, self.tesseract_dim) self.embed_dim_per_partition = divide(self.embed_dim, self.tesseract_dim) tensor_parallel_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_COL)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/metric/accuracy_2p5d.py
colossalai/legacy/nn/metric/accuracy_2p5d.py
import torch from torch import nn from colossalai.legacy.nn.layer.parallel_2p5d import reduce_by_batch_2p5d, split_batch_2p5d from ._utils import calc_acc class Accuracy2p5D(nn.Module): """Accuracy for 2p5D parallelism""" def __init__(self): super().__init__() def forward(self, logits, targets): """Calculate the accuracy of predicted labels. Args: logits (:class:`torch.tensor`): Predicted labels. targets (:class:`torch.tensor`): True labels from data. Returns: float: the accuracy of prediction. """ with torch.no_grad(): targets = split_batch_2p5d(targets) correct = calc_acc(logits, targets) correct = reduce_by_batch_2p5d(correct) return correct
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/metric/accuracy_3d.py
colossalai/legacy/nn/metric/accuracy_3d.py
import torch from torch import nn from colossalai.legacy.constants import INPUT_GROUP_3D, WEIGHT_GROUP_3D from colossalai.legacy.nn.layer.parallel_3d import reduce_by_batch_3d, split_tensor_3d from colossalai.legacy.nn.layer.parallel_3d._utils import get_parallel_mode_from_env from ._utils import calc_acc class Accuracy3D(nn.Module): """Accuracy for 3D parallelism""" def __init__(self): super().__init__() self.input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) self.weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) def forward(self, logits, targets): """Calculate the accuracy of predicted labels. Args: logits (:class:`torch.tensor`): Predicted labels. targets (:class:`torch.tensor`): True labels from data. Returns: float: the accuracy of prediction. """ with torch.no_grad(): targets = split_tensor_3d(targets, 0, self.weight_parallel_mode) targets = split_tensor_3d(targets, 0, self.input_parallel_mode) correct = calc_acc(logits, targets) correct = reduce_by_batch_3d(correct, self.input_parallel_mode, self.weight_parallel_mode) return correct
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/metric/__init__.py
colossalai/legacy/nn/metric/__init__.py
from torch import nn from colossalai.legacy.nn.layer.utils import get_tensor_parallel_mode from ._utils import calc_acc from .accuracy_2d import Accuracy2D from .accuracy_2p5d import Accuracy2p5D from .accuracy_3d import Accuracy3D _parallel_accuracy = { "2d": Accuracy2D, "2.5d": Accuracy2p5D, "3d": Accuracy3D, } class Accuracy(nn.Module): def __init__(self): super().__init__() tensor_parallel = get_tensor_parallel_mode() if tensor_parallel not in _parallel_accuracy: self.acc = calc_acc else: self.acc = _parallel_accuracy[tensor_parallel]() def forward(self, *args): return self.acc(*args)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/metric/_utils.py
colossalai/legacy/nn/metric/_utils.py
import torch def calc_acc(logits, targets): preds = torch.argmax(logits, dim=-1) correct = torch.sum(targets == preds) return correct
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/metric/accuracy_2d.py
colossalai/legacy/nn/metric/accuracy_2d.py
import torch from torch import nn from colossalai.legacy.nn.layer.parallel_2d import reduce_by_batch_2d, split_batch_2d from ._utils import calc_acc class Accuracy2D(nn.Module): """Accuracy for 2D parallelism""" def __init__(self): super().__init__() def forward(self, logits, targets): """Calculate the accuracy of predicted labels. Args: logits (:class:`torch.tensor`): Predicted labels. targets (:class:`torch.tensor`): True labels from data. Returns: float: the accuracy of prediction. """ with torch.no_grad(): targets = split_batch_2d(targets) correct = calc_acc(logits, targets) correct = reduce_by_batch_2d(correct) return correct
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/reducer.py
colossalai/legacy/nn/parallel/reducer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the BSD license found in the # LICENSE file in the root directory of this source tree. import functools from typing import Callable, Dict, List, Optional, Tuple import torch import torch.distributed as dist from torch import Tensor from torch.distributed import ProcessGroup class Bucket: def __init__(self, size: int, dtype: torch.dtype, device: torch.device, group: ProcessGroup): self.buffer = torch.zeros(size, dtype=dtype, device=device) self.group = group self.offset = 0 self.callbacks: List[Callable] = [] def flush(self) -> None: """Flush content of the bucket.""" if self.offset == 0: assert len(self.callbacks) == 0 return # reduce-scatter bucket dist.all_reduce(self.buffer[: self.offset], group=self.group) # execute post-reduction callbacks for callback_fn in self.callbacks: callback_fn() # reuse input bucket but allocate a fresh output shard self.offset = 0 self.callbacks.clear() self.buffer = torch.zeros_like(self.buffer) def alloc(self) -> None: if self.buffer.storage().size() == 0: self.buffer.storage().resize_(self.buffer.numel()) def free(self) -> None: assert self.offset == 0 and self.callbacks == [], "Incorrect call of teardown" self.buffer.storage().resize_(0) def append(self, tensor: Tensor, callback_fn: Callable): tensor_size = tensor.numel() offset = self.offset self.buffer[offset : offset + tensor_size].copy_(tensor.flatten()) self.offset += tensor_size # callback will be given the reduced result if callback_fn is not None: result_view = self.buffer[offset : offset + tensor_size].view(tensor.shape) self.callbacks.append(functools.partial(callback_fn, result_view)) @property def avail_size(self) -> int: return self.buffer.size(0) - self.offset class Reducer: def __init__(self, bucket_size_mb: int = 25): self.bucket_size_mb = bucket_size_mb self.buckets: Dict[Tuple[torch.dtype, torch.device, ProcessGroup], Bucket] = {} @torch.no_grad() def all_reduce_async( self, tensor: Tensor, group: ProcessGroup, callback_fn: Optional[Callable] = None, ) -> None: bucket_size = self._get_bucket_size(tensor.element_size()) if tensor.numel() >= bucket_size: dist.all_reduce(tensor, group=group) if callback_fn is not None: callback_fn(tensor) return bucket = self._get_bucket(tensor, group) if tensor.numel() > bucket.avail_size: # not enough space remaining in bucket, flush it now bucket.flush() bucket.append(tensor, callback_fn) @torch.no_grad() def flush(self) -> None: for bucket in self.buckets.values(): bucket.flush() @torch.no_grad() def free(self) -> None: for bucket in self.buckets.values(): bucket.free() @functools.lru_cache() def _get_bucket_size(self, element_size: int) -> int: if self.bucket_size_mb <= 0: # Values <= 0 disable bucketing. return 0 MB = 1024 * 1024 bucket_size = self.bucket_size_mb * MB / element_size return int(bucket_size) def _get_bucket(self, tensor: Tensor, group: ProcessGroup) -> Bucket: key = (tensor.dtype, tensor.device, group) if key not in self.buckets: bucket_size = self._get_bucket_size(tensor.element_size()) self.buckets[key] = Bucket(bucket_size, tensor.dtype, tensor.device, group) self.buckets[key].alloc() return self.buckets[key]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/data_parallel.py
colossalai/legacy/nn/parallel/data_parallel.py
from collections import OrderedDict from functools import partial from typing import Iterable, Optional, Set import torch import torch.distributed as dist from colossalai.legacy.tensor import ProcessGroup as ColoProcessGroup from colossalai.utils import is_ddp_ignored from .reducer import Reducer def free_storage(data: torch.Tensor) -> None: """Free underlying storage of a Tensor.""" if data.storage().size() > 0: # Since we're modifying the Tensor's Storage directly, make sure the Tensor # is the sole occupant of the Storage. assert data.storage_offset() == 0 data.storage().resize_(0) def _cast_float(args, dtype: torch.dtype): if isinstance(args, torch.Tensor) and torch.is_floating_point(args): args = args.to(dtype) elif isinstance(args, (list, tuple)): args = type(args)(_cast_float(t, dtype) for t in args) elif isinstance(args, dict): args = {k: _cast_float(v, dtype) for k, v in args.items()} return args class ColoDDP(torch.nn.Module): """Distributed data parallel for ColoTensor. Nested ColoDDP is not supported now. Example: >>> from colossalai.legacy.core import global_context as gpc >>> from colossalai.legacy.context import ParallelMode >>> model = torch.nn.Linear(20, 1) >>> pg = ProcessGroup(tp_degree = world_size//2) >>> model = ColoDDP(model, pg) >>> logits = model(x) >>> loss = criterion(logits, labels) >>> model.backward(loss) Args: module (torch.nn.Module): Module to apply DDP. process_group (Optional[dist.ProcessGroup], optional): The process group which DDP uses. If it's None, the default data parallel group will be used. Defaults to None. """ def __init__( self, module: torch.nn.Module, process_group: ColoProcessGroup, bucket_cap_mb: int = 25, rebuild_bucket: bool = True, ) -> None: assert not isinstance(module, ColoDDP) super().__init__() self.module = module self.comm_stream: torch.cuda.Stream = torch.cuda.Stream() assert process_group self.process_group = process_group self.dp_world_size = self.process_group.dp_world_size() self.reducer = Reducer(bucket_cap_mb) self.rebuild_bucket = rebuild_bucket for p in module.parameters(): if is_ddp_ignored(p): continue if p.requires_grad: p.register_hook(partial(self.grad_handle, p)) def parameters(self, recurse: bool = True): return self.module.parameters(recurse) def named_parameters(self, prefix: str = "", recurse: bool = True): return self.module.named_parameters(prefix, recurse) def named_buffers(self, prefix: str = "", recurse: bool = True): return self.module.named_buffers(prefix, recurse) def named_children(self): return self.module.named_children() def named_modules( self, memo: Optional[Set[torch.nn.Module]] = None, prefix: str = "", remove_duplicate: bool = True ): return self.module.named_modules(memo, prefix, remove_duplicate) def forward(self, *args, **kwargs): self.module.zero_grad(set_to_none=True) return self.module(*args, **kwargs) def backward(self, loss: torch.Tensor): loss.backward() with torch.cuda.stream(self.comm_stream): self.reducer.flush() torch.cuda.current_stream().wait_stream(self.comm_stream) if self.rebuild_bucket: self.reducer.free() for p in self.module.parameters(): if is_ddp_ignored(p): continue if p.grad.device.type != "cpu": p.grad = p._saved_grad def grad_handle(self, p, grad): if grad.device.type != "cpu": empty_grad = torch.empty_like(grad) free_storage(empty_grad) if self.dp_world_size > 1: grad = grad / self.dp_world_size self.comm_stream.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(self.comm_stream): self.reducer.all_reduce_async( grad, group=self.process_group.dp_process_group(), callback_fn=partial(self._save_grad, p) ) grad.record_stream(self.comm_stream) else: ColoDDP._save_grad(p, grad) return empty_grad else: # TODO(jiaruifang) fixme self.process_group.set_cpu_groups() dist.all_reduce(grad, group=self.process_group.cpu_dp_process_group()) return grad @staticmethod def _save_grad(p, grad): if hasattr(p, "_saved_grad"): p._saved_grad.add_(grad) else: p._saved_grad = grad def zero_grad(self, set_to_none: bool = False) -> None: self.module.zero_grad(set_to_none=True) for p in self.module.parameters(): if getattr(p, "_saved_grad", None) is not None: if set_to_none: p._saved_grad = None else: if p._saved_grad.grad_fn is not None: p._saved_grad.detach_() else: p._saved_grad.requires_grad_(False) p._saved_grad.zero_() @staticmethod def set_params_to_ignore(params_to_ignore: Iterable[torch.Tensor]) -> None: """Sets parameters to be ignored by DDP. This method must be called before initializing ColoDDP. Example: >>> params_to_ignore = [] >>> for p in module.parameters(): >>> if should_ignore(p): >>> params_to_ignore.append(p) >>> ColoDDP.set_params_to_ignore(params_to_ignore) >>> module = ColoDDP(module) Args: params_to_ignore (Iterable[torch.Tensor]): A list of parameters to be ignored. """ for p in params_to_ignore: p._ddp_to_ignore = True def state_dict(self, destination=None, prefix="", keep_vars=False): return self.module.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars) def load_state_dict(self, state_dict: "OrderedDict[str, torch.Tensor]", strict: bool = True): return self.module.load_state_dict(state_dict, strict)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/__init__.py
colossalai/legacy/nn/parallel/__init__.py
from .data_parallel import ColoDDP __all__ = [ "ColoDDP", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/layers/module_utils.py
colossalai/legacy/nn/parallel/layers/module_utils.py
from typing import Dict import torch from colossalai.legacy.tensor import ComputeSpec, ProcessGroup from colossalai.tensor import ColoParameter from . import ColoModule _COLOSSAL_MODULES: Dict[type, ColoModule] = {} def register_colo_module(module_type: type, colo_module: ColoModule): global _COLOSSAL_MODULES _COLOSSAL_MODULES[module_type] = colo_module def is_colo_module(module: torch.nn.Module): global _COLOSSAL_MODULES for module_type in _COLOSSAL_MODULES.keys(): if isinstance(module, module_type): return True return False def get_colo_module(module: torch.nn.Module): global _COLOSSAL_MODULES if is_colo_module(module): for module_type, colo_module in _COLOSSAL_MODULES.items(): if isinstance(module, module_type): return colo_module else: return None def check_colo_module(module: torch.nn.Module, pg: ProcessGroup, recursive=True): if is_colo_module(module): colo_module = get_colo_module(module) param_names = colo_module.get_param_names() compute_pattern = None for param_name in param_names: param = module.get_parameter(param_name) if not isinstance(param, ColoParameter): raise Exception(f"Invalid ColoParameter spec: {param} in {module} is not a ColoParameter.") if param.has_compute_spec(): cur_compute_pattern = param.compute_spec.compute_pattern if compute_pattern is None: compute_pattern = cur_compute_pattern else: if cur_compute_pattern != compute_pattern: raise Exception( f"Invalid ColoParameter spec: Params in {module} have different compute_pattern." ) else: continue if compute_pattern is not None: colo_module.register(compute_pattern, pg) if not colo_module.has_compute_pattern(compute_pattern): raise Exception( f"Invalid ColoParameter spec: ComputePattern {compute_pattern} in {module} is not allowed." ) match_specs = False allowed_specs = colo_module.get_dist_specs(compute_pattern) for _, param_specs in allowed_specs.items(): cur_match = True for param_name, dist_spec in param_specs.items(): param = module.get_parameter(param_name) if param.has_compute_spec(): if dist_spec != param.dist_spec: cur_match = False break else: if dist_spec is not None: cur_match = False break if cur_match == True: match_specs = True break if match_specs == False: raise Exception(f"Invalid ColoParameter spec: Params in {module} are incorrectly sharded.") if recursive == True: for submodule in module.children(): check_colo_module(submodule, pg=pg, recursive=True) def init_colo_module( module: torch.nn.Module, compute_spec: ComputeSpec, pg: ProcessGroup, recursive=True, mode="default" ): compute_pattern = compute_spec.compute_pattern if is_colo_module(module): # for each param # set its process_group, dist_spec and compute_spec colo_module = get_colo_module(module) colo_module.register(compute_pattern, pg) if not colo_module.has_compute_pattern_with_mode(compute_pattern, mode=mode): raise NotImplementedError # a set for modules which update at least one param in the init process. # these modules need to be checked whether all params still match one of the valid compute pattern. modules_update_param = {module} for param_name, dist_spec in colo_module.get_dist_specs_with_mode(compute_pattern, mode=mode).items(): if dist_spec is None: continue param = module.get_parameter(param_name) if isinstance(param, ColoParameter): param.set_process_group(pg) param.set_dist_spec(dist_spec) param.compute_spec = compute_spec for mod in param.shared_param_modules: modules_update_param.add(mod) for mod in modules_update_param: check_colo_module(mod, pg, recursive=False) if recursive == True: for submodule in module.children(): init_colo_module(submodule, compute_spec, pg=pg, recursive=True, mode=mode)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/layers/__init__.py
colossalai/legacy/nn/parallel/layers/__init__.py
from .cache_embedding import ( CachedEmbeddingBag, CachedParamMgr, EvictionStrategy, LimitBuffIndexCopyer, ParallelCachedEmbeddingBag, ParallelCachedEmbeddingBagTablewise, ParallelCachedEmbeddingBagTablewiseSpiltCache, TablewiseEmbeddingBagConfig, ) from .colo_module import ColoModule from .embedding import ColoEmbedding from .linear import ColoLinear from .module_utils import check_colo_module, get_colo_module, init_colo_module, is_colo_module, register_colo_module __all__ = [ "ColoModule", "register_colo_module", "is_colo_module", "get_colo_module", "init_colo_module", "check_colo_module", "ColoLinear", "ColoEmbedding", "CachedEmbeddingBag", "ParallelCachedEmbeddingBag", "CachedParamMgr", "LimitBuffIndexCopyer", "EvictionStrategy", "ParallelCachedEmbeddingBagTablewise", "TablewiseEmbeddingBagConfig", "ParallelCachedEmbeddingBagTablewiseSpiltCache", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/layers/colo_module.py
colossalai/legacy/nn/parallel/layers/colo_module.py
from typing import Dict, List from colossalai.legacy.tensor import ComputePattern from colossalai.legacy.tensor.distspec import _DistSpec class ColoModule(object): def __init__(self): self._shard_params: List[str] = [] self._allowed_patterns: Dict[ComputePattern, Dict[str, Dict[str, _DistSpec]]] = {} def _register_shard_params(self, params: List[str]): self._shard_params = params def _register_allowed_patterns( self, compute_pattern: ComputePattern, dist_specs: Dict[str, _DistSpec], mode="default" ): assert ( list(dist_specs.keys()).sort() == self._shard_params.sort() ), "Every registered param should have dist_spec." if not compute_pattern in self._allowed_patterns: self._allowed_patterns[compute_pattern] = {} self._allowed_patterns[compute_pattern][mode] = dist_specs def _set_default(self, compute_pattern: ComputePattern, target_mode): self._allowed_patterns[compute_pattern]["default"] = self._allowed_patterns[compute_pattern][target_mode] def has_compute_pattern(self, compute_pattern: ComputePattern): return compute_pattern in self._allowed_patterns def get_dist_specs(self, compute_pattern: ComputePattern): assert self.has_compute_pattern(compute_pattern) return self._allowed_patterns[compute_pattern] def has_compute_pattern_with_mode(self, compute_pattern: ComputePattern, mode="default"): return compute_pattern in self._allowed_patterns and mode in self._allowed_patterns[compute_pattern] def get_dist_specs_with_mode(self, compute_pattern: ComputePattern, mode="default"): assert self.has_compute_pattern_with_mode(compute_pattern, mode) return self._allowed_patterns[compute_pattern][mode] def get_param_names(self): return self._shard_params def register(self, compute_pattern, pg): raise NotImplementedError
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/layers/embedding.py
colossalai/legacy/nn/parallel/layers/embedding.py
from colossalai.legacy.tensor import ComputePattern, ProcessGroup, ShardSpec from .colo_module import ColoModule class ColoEmbedding(ColoModule): def __init__(self): super(ColoEmbedding, self).__init__() self._register_shard_params(["weight"]) def register(self, compute_pattern, pg: ProcessGroup): if not compute_pattern in self._allowed_patterns: if ComputePattern.TP1D == compute_pattern: self._set_TP1D(pg) def _set_TP1D(self, pg: ProcessGroup): # TP1D Row Linear _compute_pattern = ComputePattern.TP1D self._register_allowed_patterns( compute_pattern=_compute_pattern, dist_specs={ "weight": ShardSpec([0], [pg.tp_world_size()]), }, mode="row", ) # TP1D Col Linear self._register_allowed_patterns( compute_pattern=_compute_pattern, dist_specs={ "weight": ShardSpec([-1], [pg.tp_world_size()]), }, mode="col", ) self._set_default(compute_pattern=_compute_pattern, target_mode="row")
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/layers/linear.py
colossalai/legacy/nn/parallel/layers/linear.py
from colossalai.legacy.tensor import ComputePattern, ProcessGroup, ShardSpec from .colo_module import ColoModule class ColoLinear(ColoModule): def __init__(self): super(ColoLinear, self).__init__() self._register_shard_params(["weight", "bias"]) def register(self, compute_pattern, pg: ProcessGroup): if not compute_pattern in self._allowed_patterns: if ComputePattern.TP1D == compute_pattern: self._set_TP1D(pg) def _set_TP1D(self, pg): # TP1D Row Linear _compute_pattern = ComputePattern.TP1D self._register_allowed_patterns( compute_pattern=_compute_pattern, dist_specs={"weight": ShardSpec([-1], [pg.tp_world_size()]), "bias": None}, mode="row", ) # TP1D Col Linear self._register_allowed_patterns( compute_pattern=_compute_pattern, dist_specs={"weight": ShardSpec([0], [pg.tp_world_size()]), "bias": ShardSpec([0], [pg.tp_world_size()])}, mode="col", ) self._set_default(compute_pattern=_compute_pattern, target_mode="row")
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/layers/cache_embedding/parallel_cached_embedding.py
colossalai/legacy/nn/parallel/layers/cache_embedding/parallel_cached_embedding.py
from typing import List, Optional, Tuple import torch import torch.nn.functional as F from colossalai.legacy.nn._ops._utils import dual_all_to_all from colossalai.legacy.tensor import ColoTensorSpec, ComputePattern, ProcessGroup, ShardSpec from colossalai.tensor import ColoTensor from .cache_mgr import EvictionStrategy from .cached_embedding import CachedEmbeddingBag def get_partition(embedding_dim, rank, world_size) -> Tuple[int, int, bool]: if world_size == 1: return 0, embedding_dim, True assert embedding_dim >= world_size, ( f"Embedding dimension {embedding_dim} must be larger than the world size " f"{world_size} of the process group" ) chunk_size = embedding_dim // world_size threshold = embedding_dim % world_size # if embedding dim is divisible by world size if threshold == 0: return rank * chunk_size, (rank + 1) * chunk_size, True # align with the split strategy of torch.tensor_split size_list = [chunk_size + 1 if i < threshold else chunk_size for i in range(world_size)] offset = sum(size_list[:rank]) return offset, offset + size_list[rank], False class ParallelCachedEmbeddingBag(CachedEmbeddingBag): def __init__( self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, mode="mean", include_last_offset=False, dtype=None, device=None, cache_ratio=0.01, ids_freq_mapping=None, warmup_ratio=0.7, buffer_size=50_000, pin_weight=False, evict_strategy: EvictionStrategy = EvictionStrategy.DATASET, ): self.rank = torch.distributed.get_rank() self.world_size = torch.distributed.get_world_size() self.partition_start_index, self.partition_end_index, divisible = get_partition( embedding_dim, self.rank, self.world_size ) self.embedding_dim_per_partition = self.partition_end_index - self.partition_start_index super(ParallelCachedEmbeddingBag, self).__init__( num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, _weight, mode, include_last_offset, dtype, device, cache_ratio, ids_freq_mapping, warmup_ratio, buffer_size, pin_weight, evict_strategy, ) self.cache_op = True def _weight_alloc(self, dtype, device): weight = torch.empty(self.num_embeddings, self.embedding_dim_per_partition, device=device, dtype=dtype) with torch.no_grad(): weight.data.uniform_(-1 / self.num_embeddings, 1 / self.num_embeddings) if self.padding_idx is not None: weight[self.padding_idx].fill_(0) colo_tensor_spec = ColoTensorSpec( pg=ProcessGroup(tp_degree=self.world_size), dist_attr=ShardSpec(dims=[-1], num_partitions=[self.world_size]), compute_attr=ComputePattern.TP1D, ) return ColoTensor.from_torch_tensor(weight, spec=colo_tensor_spec) def forward( self, indices, offsets=None, per_sample_weights=None, shape_hook=None, scatter_dim=0, gather_dim=-1, ): if self.cache_op: with torch.no_grad(): indices = self.cache_weight_mgr.prepare_ids(indices) output_shard = F.embedding_bag( indices.cuda(), self.cache_weight_mgr.cuda_cached_weight, offsets, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.mode, self.sparse, per_sample_weights, self.include_last_offset, self.padding_idx, ) if shape_hook is not None: output_shard = shape_hook(output_shard) output_full = dual_all_to_all( output_shard, self.weight.get_process_group(), scatter_dim=scatter_dim, gather_dim=gather_dim ) return output_full def set_cache_op(self, cache_op: bool = True): self.cache_op = cache_op @classmethod def from_pretrained( cls, embedding: torch.Tensor, freeze: bool = True, padding_idx: Optional[int] = None, max_norm: Optional[float] = None, norm_type: float = 2.0, scale_grad_by_freq: bool = False, sparse: bool = False, mode: str = "mean", include_last_offset: bool = False, cuda_row_num: int = 100_000, ids_freq_mapping: Optional[List[int]] = None, warmup_ratio: float = 0.7, buffer_size: int = 0, ) -> "ParallelCachedEmbeddingBag": rows, cols = embedding.shape embedding_bag = cls( rows, cols, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, embedding, mode, include_last_offset, cuda_row_num=cuda_row_num, ids_freq_mapping=ids_freq_mapping, warmup_ratio=warmup_ratio, buffer_size=buffer_size, ) embedding_bag.cache_weight_mgr.cuda_cached_weight.requires_grad_ = not freeze return embedding_bag def print_comm_stats_(self): self.cache_weight_mgr.print_comm_stats() def element_size(self): return self.weight.element_size()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/layers/cache_embedding/cached_embedding.py
colossalai/legacy/nn/parallel/layers/cache_embedding/cached_embedding.py
from typing import Iterator, List, Optional, Tuple, Union import torch import torch.nn.functional as F from torch.nn.parameter import Parameter from .base_embedding import BaseEmbeddingBag from .cache_mgr import CachedParamMgr, EvictionStrategy class CachedEmbeddingBag(BaseEmbeddingBag): """CachedEmbeddingBag Cached Embedding. Apply a GPU-based software cache approaches to dynamically manage the embedding table in the CPU and GPU memory space. It can leverage the id's frequency statistics of the target dataset, by passing a frequency list to param `ids_freq_mapping`. You can also apply a naive LFU cache eviction strategy by setting `evict_strategy` as EvictionStrategy.LFU. Args: num_embeddings (int): size of the dictionary of embeddings embedding_dim (int): the size of each embedding vector padding_idx (int, optional): If specified, the entries at padding_idx do not contribute to the gradient; therefore, the embedding vector at padding_idx is not updated during training, i.e. it remains as a fixed “pad”. For a newly constructed EmbeddingBag, the embedding vector at padding_idx will default to all zeros, but can be updated to another value to be used as the padding vector. Note that the embedding vector at padding_idx is excluded from the reduction. max_norm (float, optional): If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm norm_type (str, optional): The p of the p-norm to compute for the max_norm option. Defaults to 2. scale_grad_by_freq (bool, optional): if given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default False. Note: this option is not supported when mode="max". Defaults to False. sparse (bool, optional): if True, gradient w.r.t. weight matrix will be a sparse tensor. See Notes for more details regarding sparse gradients. Note: this option is not supported when mode="max".. Defaults to False. _weight (torch.Tensor, optional): an embedding weight tensor. Concatenate multiple tables in a embedding bag as a single one. Defaults to None. mode (str, optional): "sum", "mean" or "max". Specifies the way to reduce the bag. "sum" computes the weighted sum, taking per_sample_weights into consideration. "mean" computes the average of the values in the bag, "max" computes the max value over each bag. Default: "mean". Defaults to 'mean'. include_last_offset (bool, optional): if True, offsets has one additional element, where the last element is equivalent to the size of indices. This matches the CSR format.. Defaults to False. dtype (torch.dtype, optional): data type of the cpu weight initialization. Defaults to None meaning float32. device (torch.device, optional): device type to the cpu weight. Defaults to None meaning cpu. cache_ratio (float, float): cache ratio of the #cuda_weight_row / #cpu_weight_row ids_freq_mapping (Union[List, torch.Tensor], optional): the frequency of each embedding vector occurs in dataset. Defaults to None. warmup_ratio (float, optional): the ratio of cuda cache is warmuped with. Defaults to 0.7. buffer_size (int, optional): the max number of vectors in transmitter buffer. If set to 0, the buffer is not used. Defaults to 0. pin_weight (bool, optional): pin the cpu weight. Defaults to False. evict_strategy (EvictionStrategy, optional): evict strategy of the software cache. Defaults to EvictionStrategy.DATASET. """ def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: int = None, max_norm: float = None, norm_type: float = 2.0, scale_grad_by_freq: bool = False, sparse: bool = False, _weight: Optional[torch.Tensor] = None, mode: str = "mean", include_last_offset: bool = False, dtype: Optional[torch.dtype] = None, device: Optional[torch.device] = None, cache_ratio: float = 0.01, ids_freq_mapping: Optional[Union[List, torch.Tensor]] = None, warmup_ratio: float = 0.7, buffer_size: int = 0, pin_weight: bool = False, evict_strategy: EvictionStrategy = EvictionStrategy.LFU, ): super(CachedEmbeddingBag, self).__init__( num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, mode, include_last_offset, ) assert cache_ratio <= 1.0, f"cache ratio {cache_ratio} must less than 1.0" self.evict_strategy = evict_strategy if _weight is None: _weight = self._weight_alloc(dtype, device) cuda_row_num = int(num_embeddings * cache_ratio) # configure weight & cache self._preprocess(_weight, cuda_row_num, ids_freq_mapping, warmup_ratio, buffer_size, pin_weight) self.cache_op = True def set_cache_mgr_async_copy(self, flag): self.cache_weight_mgr._async_copy = flag def _weight_alloc(self, dtype, device): weight = torch.empty(self.num_embeddings, self.embedding_dim, dtype=dtype, device=device) with torch.no_grad(): weight.data.uniform_(-1 / self.num_embeddings, 1 / self.num_embeddings) if self.padding_idx is not None: weight[self.padding_idx].fill_(0) return weight def _preprocess( self, weight, cuda_row_num: int, ids_freq_mapping: Optional[List[int]] = None, warmup_ratio=0.7, buffer_size=50_000, pin_weight=False, ): """ Called after initialized. Reorder the weight rows according to the ids_freq_mapping. Then, let the weights of the Module be managed by a CachedParamMgr. Args: cuda_row_num (int): number of rows can be hosted in CUDA memory ids_freq_mapping (List[int]): a list, idx is id number, value is freq warmup_ratio (float): the amount of rows preloaded in cuda cache """ self.cache_weight_mgr = CachedParamMgr( weight, cuda_row_num, buffer_size, pin_weight, evict_strategy=self.evict_strategy ) self.cache_weight_mgr.reorder(ids_freq_mapping, warmup_ratio) def forward(self, input, offsets=None, per_sample_weights=None, shape_hook=None): if self.cache_op: with torch.no_grad(): input = self.cache_weight_mgr.prepare_ids(input) embeddings = F.embedding_bag( input.cuda(), self.cache_weight_mgr.cuda_cached_weight, offsets, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.mode, self.sparse, per_sample_weights, self.include_last_offset, self.padding_idx, ) if shape_hook is not None: embeddings = shape_hook(embeddings) return embeddings @property def weight(self): return self.cache_weight_mgr.weight def named_parameters(self, prefix: str = "", recurse: bool = True) -> Iterator[Tuple[str, Parameter]]: yield "weight", self.cache_weight_mgr.cuda_cached_weight def parameters(self, recurse: bool = True) -> Iterator[Parameter]: yield self.cache_weight_mgr.cuda_cached_weight def set_cache_op(self, cache_op: bool = True): self.cache_op = cache_op ############################# Perf Log ################################### @property def num_hits_history(self): return self.cache_weight_mgr.num_hits_history @property def num_miss_history(self): return self.cache_weight_mgr.num_miss_history @property def num_write_back_history(self): return self.cache_weight_mgr.num_write_back_history @property def swap_in_bandwidth(self): if self.cache_weight_mgr._cpu_to_cuda_numel > 0: return ( self.cache_weight_mgr._cpu_to_cuda_numel * self.cache_weight_mgr.elem_size_in_byte / 1e6 / self.cache_weight_mgr._cpu_to_cuda_elapse ) else: return 0 @property def swap_out_bandwidth(self): if self.cache_weight_mgr._cuda_to_cpu_numel > 0: return ( self.cache_weight_mgr._cuda_to_cpu_numel * self.cache_weight_mgr.elem_size_in_byte / 1e6 / self.cache_weight_mgr._cuda_to_cpu_elapse ) return 0
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/layers/cache_embedding/copyer.py
colossalai/legacy/nn/parallel/layers/cache_embedding/copyer.py
import torch from torch import LongTensor class LimitBuffIndexCopyer(object): """LimitBuffIndexCopyer Index Copy using limited temp buffer on CUDA. Args: size (int): buffer size """ def __init__(self, size: int) -> None: self._buff_size = size @torch.no_grad() def index_copy(self, dim: int, src_index: LongTensor, tgt_index: LongTensor, src: torch.Tensor, tgt: torch.Tensor): """copy src tensor[src_index] -(index_select)-> tmp -(index_copy_)-> tgt tensor [tgt_index] The valid rows in the src tensor are continuous, while rows in tgt tensor is scattered. Args: dim (int): dimension along which to index src_index (int): indices of src tensor to select from tgt_index (int): indices of tgt tensor to select from src (torch.Tensor): the tensor containing values to copy tgt (torch.Tensor): the tensor to be copied """ # tgt.index_copy_(dim, index, src) assert dim == 0, "only support index_copy on dim 0" assert tgt.dim() == 2 assert src.dim() == 2 tgt_device = tgt.device src_device = src.device assert src_index.numel() == tgt_index.numel() dim_size = src_index.numel() src_index = src_index.to(src_device) for begin_pos in range(0, dim_size, self._buff_size): cur_len = min(self._buff_size, dim_size - begin_pos) src_idx_piece = src_index.narrow(0, begin_pos, cur_len) if src_device.type == "cpu" and tgt_device.type == "cuda": cpu_tmp_buffer = src.index_select(dim, src_idx_piece).pin_memory() tmp_buffer = torch.empty_like(cpu_tmp_buffer, device=tgt_device) tmp_buffer.copy_(cpu_tmp_buffer) else: tmp_buffer = src.index_select(dim, src_idx_piece).to(tgt_device) tgt_idx_piece = tgt_index.narrow(0, begin_pos, cur_len) tgt.index_copy_(dim, tgt_idx_piece, tmp_buffer)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/layers/cache_embedding/cache_mgr.py
colossalai/legacy/nn/parallel/layers/cache_embedding/cache_mgr.py
import sys from contextlib import contextmanager from enum import Enum from typing import List, Optional import numpy as np import torch from contexttimer import Timer from torch.profiler import record_function from .copyer import LimitBuffIndexCopyer class EvictionStrategy(Enum): LFU = 1 # dataset aware eviction strategy DATASET = 2 def _wait_for_data(t, stream: Optional[torch.cuda.streams.Stream]) -> None: if stream is None: return torch.cuda.current_stream().wait_stream(stream) # As mentioned in https://pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html, # PyTorch uses the "caching allocator" for memory allocation for tensors. When a tensor is # freed, its memory is likely to be reused by newly constructed tensors. By default, # this allocator traces whether a tensor is still in use by only the CUDA stream where it # was created. When a tensor is used by additional CUDA streams, we need to call record_stream # to tell the allocator about all these streams. Otherwise, the allocator might free the # underlying memory of the tensor once it is no longer used by the creator stream. This is # a notable programming trick when we write programs using multi CUDA streams. cur_stream = torch.cuda.current_stream() assert isinstance(t, torch.Tensor) t.record_stream(cur_stream) class CachedParamMgr(torch.nn.Module): """ Manage Embedding Weights on CPU and CUDA memory uses a software cache. CPU maintains the entire original weight. CUDA maintains a fraction of the weights used in the upcoming computation. The row number in CUDA is controlled by `cuda_row_num`. During training, GPU needs to transmit embedding rows between CPU and GPU. Args: weight (torch.Tensor): the weight of the Embedding layer. cuda_row_num (int, optional): the number of rows cached in CUDA memory. Defaults to 0. buffer_size (int, optional): the number of rows in a data transmitter buffer. Defaults to 50_000. pin_weight (bool, optional): use pin memory to store the cpu weight. If set `True`, the cpu memory usage will increase largely. Defaults to False. evict_strategy (EvictionStrategy, optional): the eviction strategy. There are two options. `EvictionStrategy.LFU`: use the least frequently used cache. `EvictionStrategy.DATASET`: use the stats collected from the target dataset. It usually leads to less cpu-gpu communication volume. Defaults to EvictionStrategy.DATASET. """ def __init__( self, weight: torch.Tensor, cuda_row_num: int = 0, buffer_size: int = 0, pin_weight: bool = True, evict_strategy: EvictionStrategy = EvictionStrategy.DATASET, async_copy: bool = False, ) -> None: super(CachedParamMgr, self).__init__() self.buffer_size = buffer_size self.num_embeddings, self.embedding_dim = weight.shape self.cuda_row_num = cuda_row_num self._cuda_available_row_num = self.cuda_row_num self.pin_weight = pin_weight self.elem_size_in_byte = weight.element_size() # weight configure self._init_weight(weight) # Perf log self.num_hits_history = [] self.num_miss_history = [] self.num_write_back_history = [] self._evict_strategy = evict_strategy self._async_copy = async_copy if self._async_copy: self._memcpy_stream = torch.cuda.Stream() print("use async copy") if self._evict_strategy == EvictionStrategy.LFU: # cache_row_idx -> frequency, freq of the cache rows. # classic lfu cache. evict the minimal freq value row in cuda cache. self.register_buffer( "freq_cnter", torch.empty(self.cuda_row_num, device=torch.cuda.current_device(), dtype=torch.long).fill_(sys.maxsize), persistent=False, ) self._elapsed_dict = {} self._show_cache_miss = True self._reset_comm_stats() def _reset_comm_stats(self): for k in self._elapsed_dict.keys(): self._elapsed_dict[k] = 0 self._cpu_to_cuda_numel = 0 self._cuda_to_cpu_numel = 0 if self._show_cache_miss: self._cache_miss = 0 self._total_cache = 0 @contextmanager def timer(self, name): with Timer() as t: yield torch.cuda.synchronize() if name not in self._elapsed_dict.keys(): self._elapsed_dict[name] = 0 self._elapsed_dict[name] += t.elapsed def _find_evict_gpu_idxs(self, evict_num: int) -> torch.Tensor: """_find_evict_gpu_idxs Find the gpu idxs to be evicted, according to their freq. Args: evict_num (int): how many rows has to be evicted Returns: torch.Tensor: a list tensor (1D), contains the gpu_row_idxs. """ if self._evict_strategy == EvictionStrategy.LFU: # find the minimal evict_num freq entries in cached_idx_map _, evict_gpu_row_idxs = torch.topk(self.freq_cnter, evict_num, largest=False) return evict_gpu_row_idxs elif self._evict_strategy == EvictionStrategy.DATASET: # cached_idx_map itself implies the priority of eviction. # The value of self.cached_idx_map represents cpu_row_idx. # The larger it is, the less frequently it will appear in the dataset, # and the higher its eviction priority will be. _, evict_gpu_row_idxs = torch.topk(self.cached_idx_map, evict_num, largest=True) return evict_gpu_row_idxs else: raise TypeError def _init_weight(self, weight): if self.cuda_row_num > 0: # Enable cache with introducing auxiliary data structures self.cuda_cached_weight = torch.nn.Parameter( torch.zeros( self.cuda_row_num, self.embedding_dim, device=torch.cuda.current_device(), dtype=weight.dtype ) ) # pin memory cpu for higher CPU-GPU copy bandwidth self.weight = weight.pin_memory() if self.pin_weight else weight # map original id to new id with respect to frequency # id -> cpu_row_idx self.register_buffer( "idx_map", torch.arange(self.num_embeddings, dtype=torch.long, device=torch.cuda.current_device()), persistent=False, ) # cached_idx_map: gpu_row_idx -> cpu_row_idx self.register_buffer( "cached_idx_map", torch.empty(self.cuda_row_num, device=torch.cuda.current_device(), dtype=torch.long).fill_(-1), persistent=False, ) # cpu_row_id -> gpu_row_idx. # gpu_row_idx as -1 means cpu_row_id not in CUDA. self.register_buffer( "inverted_cached_idx", torch.zeros(self.num_embeddings, device=torch.cuda.current_device(), dtype=torch.long).fill_(-1), persistent=False, ) self.evict_backlist = torch.tensor([], device=torch.cuda.current_device()) # index copy buffer size should less than 10% of cuda weight. if self.buffer_size > 0: self.limit_buff_index_copyer = LimitBuffIndexCopyer(self.buffer_size) else: # Disable cache so that FreqCacheEmbedding is compatible with vanilla EmbeddingBag # self.weight = torch.nn.Parameter(weight) # self.cuda_cached_weight = self.weight raise NotImplementedError() def cpu_weight_data(self, row_idx: int) -> torch.Tensor: """ access a row of CPU weight. Args: row_idx (int): the idx of rows Returns: torch.Tensor: a piece of memory in CPU weight corresponding to row id's payload. The tensor is 1-D. """ return ( self.weight.data.view(-1) .narrow(0, int(row_idx) * self.embedding_dim, self.embedding_dim) .view(1, self.embedding_dim) ) @property def cuda_available_row_num(self): return self._cuda_available_row_num @torch.no_grad() def reorder(self, ids_freq_mapping: Optional[List[int]] = None, warmup_ratio=0.7): """reorder reorder the weight according to ids' frequency in dataset before training. Execute only once before training, also known as warmup phase. Note: If you would like to use the DATASET as the eviction strategy, you must call this function. Note: If you are use the LFU as the eviction strategy, you can skip this function. If you still use this function. It will initialize The frequency in LFU cache using the dataset statistics. Args: ids_freq_mapping (List[int]): a list, whose offset is id number, value is freq. if None then not reorder the cpu weight. warmup_ratio (float): the amount of chunks preloaded in cuda cache """ # reorder phase: reorder the cpu weight according to their freq stats in the target dataset. # reorder only works for DATASET eviction strategy. if ids_freq_mapping is not None and not isinstance(ids_freq_mapping, torch.Tensor): ids_freq_mapping = torch.tensor(ids_freq_mapping) if self._evict_strategy == EvictionStrategy.DATASET: if ids_freq_mapping is not None: tmp_idx = torch.argsort(ids_freq_mapping, descending=True) sorted_idx = torch.argsort(tmp_idx) self.idx_map.data.copy_(sorted_idx) # warmup phase: copy #preload_row_num rows from cpu to gpu. preload_row_num = min(int(np.ceil(self.cuda_row_num * warmup_ratio)), self.num_embeddings) if preload_row_num > 0: with Timer() as timer: # extract rows from cpu weight if self._evict_strategy == EvictionStrategy.LFU and ids_freq_mapping is not None: freq_value, preload_cpu_ids = torch.topk(ids_freq_mapping, preload_row_num, dim=0, largest=True) preload_cuda_row_idxs = torch.arange(preload_row_num).cuda() else: preload_cpu_ids = torch.arange(preload_row_num) preload_cuda_row_idxs = preload_cpu_ids.cuda() if self.buffer_size > 0: self.limit_buff_index_copyer.index_copy( 0, src_index=preload_cpu_ids, tgt_index=preload_cuda_row_idxs, src=self.weight.view(self.num_embeddings, -1), tgt=self.cuda_cached_weight.view(self.cuda_row_num, -1), ) else: preload_rows = self.weight.view(self.num_embeddings, -1).index_select(0, preload_cpu_ids).cuda() self.cuda_cached_weight.view(self.cuda_row_num, -1).index_copy_( 0, preload_cuda_row_idxs, preload_rows ) # update auxiliary info self.cached_idx_map[preload_cuda_row_idxs] = preload_cpu_ids.cuda() self.inverted_cached_idx[preload_cpu_ids] = preload_cuda_row_idxs self._cuda_available_row_num -= preload_row_num if self._evict_strategy == EvictionStrategy.LFU: # if the ids_freq_mapping is not None, we initialize the embedding row's freq value in LFU as its freq in dataset. if ids_freq_mapping is None: self.freq_cnter.index_fill_(0, preload_cuda_row_idxs, 0) else: self.freq_cnter[preload_cuda_row_idxs] = freq_value.cuda() print(f"Cache warmup finished cost {timer.elapsed} sec.") def flush(self): """flush all CUDA rows to CPU. The function is usually called after training finished. """ slots = torch.nonzero(self.cached_idx_map > -1).squeeze(1) row_ids = self.cached_idx_map[slots] rows = self.cuda_cached_weight.view(self.cuda_row_num, -1).index_select(0, slots).cpu() self.weight.view(self.num_embeddings, -1).index_copy_(0, row_ids.cpu(), rows) self.cached_idx_map.index_fill_(0, slots, -1) self.inverted_cached_idx.index_fill_(0, row_ids, -1) self._cuda_available_row_num += slots.numel() if self._show_cache_miss: self._cache_miss = 0 self._total_cache = 0 if self._evict_strategy == EvictionStrategy.LFU: self.freq_cnter.fill_(sys.maxsize) assert self._cuda_available_row_num == self.cuda_row_num assert torch.all(self.inverted_cached_idx == -1).item() assert torch.all(self.cached_idx_map == -1).item() def print_comm_stats(self): if self._cuda_to_cpu_numel > 0 and "3_evict_out" in self._elapsed_dict: elapsed = self._elapsed_dict["3_evict_out"] print( f"CUDA->CPU BWD {self._cuda_to_cpu_numel * self.elem_size_in_byte / 1e6 / elapsed} MB/s {self._cuda_to_cpu_numel / 1e6} M elem" ) print(f"cuda_to_cpu_elapse {elapsed} sec") if self._cpu_to_cuda_numel > 0 and "5_evict_in" in self._elapsed_dict: elapsed = self._elapsed_dict["5_evict_in"] print( f"CPU->CUDA BWD {self._cpu_to_cuda_numel * self.elem_size_in_byte / 1e6 / elapsed} MB/s {self._cpu_to_cuda_numel / 1e6} M elem" ) print(f"cpu_to_cuda_elapse {elapsed} sec") for k, v in self._elapsed_dict.items(): print(f"{k}: {v}") print(f"cache miss ratio {self._cache_miss / self._total_cache}") @torch.no_grad() def _id_to_cached_cuda_id(self, ids: torch.Tensor) -> torch.Tensor: """ convert ids to indices in self.cuda_cached_weight. Implemented with parallel operations on GPU. Args: ids (torch.Tensor): ids from the dataset Returns: torch.Tensor: contains indices in self.cuda_cached_weight """ ids = self.idx_map.index_select(0, ids.view(-1)) ret = self.inverted_cached_idx.index_select(0, ids) return ret @torch.no_grad() def prepare_ids(self, ids: torch.Tensor) -> torch.Tensor: """ move the cpu embedding rows w.r.t. ids into CUDA memory Args: ids (torch.Tensor): the ids to be computed Returns: torch.Tensor: indices on the cuda_cached_weight. """ torch.cuda.synchronize() with self.timer("cache_op") as gtimer: # identify cpu rows to cache with self.timer("1_identify_cpu_row_idxs") as timer: with record_function("(cache) get unique indices"): if self._evict_strategy == EvictionStrategy.LFU: cpu_row_idxs, repeat_times = torch.unique(ids, return_counts=True) else: cpu_row_idxs, repeat_times = torch.unique(self.idx_map.index_select(0, ids), return_counts=True) assert len(cpu_row_idxs) <= self.cuda_row_num, ( f"You move {len(cpu_row_idxs)} embedding rows from CPU to CUDA. " f"It is larger than the capacity of the cache, which at most contains {self.cuda_row_num} rows, " f"Please increase cuda_row_num or decrease the training batch size." ) self.evict_backlist = cpu_row_idxs tmp = torch.isin(cpu_row_idxs, self.cached_idx_map, invert=True) comm_cpu_row_idxs = cpu_row_idxs[tmp] if self._show_cache_miss: self._cache_miss += torch.sum(repeat_times[tmp]) self._total_cache += ids.numel() self.num_hits_history.append(len(cpu_row_idxs) - len(comm_cpu_row_idxs)) self.num_miss_history.append(len(comm_cpu_row_idxs)) self.num_write_back_history.append(0) # move sure the cuda rows will not be evicted! with record_function("(cache) prepare_rows_on_cuda"): with self.timer("prepare_rows_on_cuda") as timer: self._prepare_rows_on_cuda(comm_cpu_row_idxs) self.evict_backlist = torch.tensor([], device=cpu_row_idxs.device, dtype=cpu_row_idxs.dtype) with self.timer("6_update_cache") as timer: with record_function("6_update_cache"): gpu_row_idxs = self._id_to_cached_cuda_id(ids) # update for LFU. if self._evict_strategy == EvictionStrategy.LFU: unique_gpu_row_idxs = self.inverted_cached_idx[cpu_row_idxs] self.freq_cnter.scatter_add_(0, unique_gpu_row_idxs, repeat_times) return gpu_row_idxs def _row_in_cuda(self, row_id: int) -> bool: return self.inverted_cached_idx[row_id] != -1 @torch.no_grad() def _prepare_rows_on_cuda(self, cpu_row_idxs: torch.Tensor) -> None: """prepare rows in cpu_row_idxs on CUDA memory Args: cpu_row_idxs (torch.Tensor): the rows to be placed on CUDA """ evict_num = cpu_row_idxs.numel() - self.cuda_available_row_num cpu_row_idxs_copy = cpu_row_idxs.cpu() # move evict in rows to gpu if self._async_copy: if self.buffer_size == 0: evict_in_rows_gpu = ( self.weight.view(self.num_embeddings, -1).index_select(0, cpu_row_idxs_copy).pin_memory() ) with torch.cuda.stream(self._memcpy_stream): evict_in_rows_gpu = evict_in_rows_gpu.to(torch.cuda.current_device(), non_blocking=True) else: raise NotImplemented if evict_num > 0: with self.timer("2_identify_cuda_row_idxs") as timer: mask_cpu_row_idx = torch.isin(self.cached_idx_map, self.evict_backlist) invalid_idxs = torch.nonzero(mask_cpu_row_idx).squeeze(1) if self._evict_strategy == EvictionStrategy.DATASET: # mask method. # set cached_idx_map[invalid_idxs] to -2. # so those idxs will be sorted to end, therefore not being chosen as victim backup_idxs = self.cached_idx_map[mask_cpu_row_idx].clone() self.cached_idx_map.index_fill_(0, invalid_idxs, -2) with self.timer("2_1_find_evict_gpu_idxs") as timer: evict_gpu_row_idxs = self._find_evict_gpu_idxs(evict_num) # move evict out rows to cpu if self._async_copy: evict_out_rows_gpu = self.cuda_cached_weight.view(self.cuda_row_num, -1).index_select( 0, evict_gpu_row_idxs ) evict_out_rows_cpu = torch.empty_like(evict_out_rows_gpu, device="cpu", pin_memory=True) with torch.cuda.stream(None): evict_out_rows_cpu.copy_(evict_out_rows_gpu, non_blocking=True) self.cached_idx_map.index_copy_(0, invalid_idxs, backup_idxs) elif self._evict_strategy == EvictionStrategy.LFU: with self.timer("2_1_backup_freqs") as timer: backup_freqs = self.freq_cnter[invalid_idxs].clone() self.freq_cnter.index_fill_(0, invalid_idxs, sys.maxsize) with self.timer("2_2_find_evict_gpu_idxs") as timer: evict_gpu_row_idxs = self._find_evict_gpu_idxs(evict_num) if self._async_copy: evict_out_rows_gpu = self.cuda_cached_weight.view(self.cuda_row_num, -1).index_select( 0, evict_gpu_row_idxs ) evict_out_rows_cpu = torch.empty_like(evict_out_rows_gpu, device="cpu", pin_memory=True) with torch.cuda.stream(None): evict_out_rows_cpu.copy_(evict_out_rows_gpu, non_blocking=True) with self.timer("2_3_revert_freqs") as timer: self.freq_cnter.index_copy_(0, invalid_idxs, backup_freqs) evict_info = self.cached_idx_map[evict_gpu_row_idxs] with self.timer("3_evict_out") as timer: if self.buffer_size > 0: self.limit_buff_index_copyer.index_copy( 0, src_index=evict_gpu_row_idxs, tgt_index=evict_info.cpu(), src=self.cuda_cached_weight.view(self.cuda_row_num, -1), tgt=self.weight.view(self.num_embeddings, -1), ) else: # allocate tmp memory on CPU and copy rows on CUDA to CPU. # TODO async gpu -> cpu if self._async_copy: _wait_for_data(evict_out_rows_cpu, None) else: with self.timer("3_1_evict_out_index_select") as timer: evict_out_rows_cpu = self.cuda_cached_weight.view(self.cuda_row_num, -1).index_select( 0, evict_gpu_row_idxs ) with self.timer("3_2_evict_out_gpu_to_cpu_copy") as timer: evict_out_rows_cpu = evict_out_rows_cpu.cpu() with self.timer("3_2_evict_out_cpu_copy") as timer: self.weight.view(self.num_embeddings, -1).index_copy_(0, evict_info.cpu(), evict_out_rows_cpu) self.cached_idx_map.index_fill_(0, evict_gpu_row_idxs, -1) self.inverted_cached_idx.index_fill_(0, evict_info, -1) # self.freq_cnter.index_fill(0, evict_gpu_row_idxs, sys.maxsize) # unnecessary self._cuda_available_row_num += evict_num weight_size = evict_gpu_row_idxs.numel() * self.embedding_dim self._cuda_to_cpu_numel += weight_size # print(f"evict embedding weight: {weight_size*self.elem_size_in_byte/1e6:.2f} MB") # slots of cuda weight to evict in with self.timer("4_identify_cuda_slot") as timer: slots = torch.nonzero(self.cached_idx_map == -1).squeeze(1)[: cpu_row_idxs.numel()] # TODO wait for optimize with self.timer("5_evict_in") as timer: # Here also allocate extra memory on CUDA. #cpu_row_idxs if self.buffer_size > 0: self.limit_buff_index_copyer.index_copy( 0, src_index=cpu_row_idxs_copy, tgt_index=slots, src=self.weight.view(self.num_embeddings, -1), tgt=self.cuda_cached_weight.view(self.cuda_row_num, -1), ) else: if self._async_copy: _wait_for_data(evict_in_rows_gpu, self._memcpy_stream) else: with self.timer("5_1_evict_in_index_select") as timer: # narrow index select to a subset of self.weight # tmp = torch.narrow(self.weight.view(self.num_embeddings, -1), 0, min(cpu_row_idxs).cpu(), max(cpu_row_idxs) - min(cpu_row_idxs) + 1) # evict_in_rows_gpu = tmp.index_select(0, cpu_row_idxs_copy - min(cpu_row_idxs).cpu()) evict_in_rows_gpu = ( self.weight.view(self.num_embeddings, -1).index_select(0, cpu_row_idxs_copy).pin_memory() ) with self.timer("5_2_evict_in_gpu_to_cpu_copy") as timer: evict_in_rows_gpu = evict_in_rows_gpu.cuda() with self.timer("5_3_evict_in_index_copy") as timer: self.cuda_cached_weight.view(self.cuda_row_num, -1).index_copy_(0, slots, evict_in_rows_gpu) with self.timer("6_update_cache") as timer: self.cached_idx_map[slots] = cpu_row_idxs self.inverted_cached_idx.index_copy_(0, cpu_row_idxs, slots) if self._evict_strategy == EvictionStrategy.LFU: self.freq_cnter.index_fill_(0, slots, 0) self._cuda_available_row_num -= cpu_row_idxs.numel() weight_size = cpu_row_idxs.numel() * self.embedding_dim self._cpu_to_cuda_numel += weight_size # print(f"admit embedding weight: {weight_size*self.elem_size_in_byte/1e6:.2f} MB") def _find_free_cuda_row(self) -> int: if self._cuda_available_row_num == 0: return -1 candidates = torch.nonzero(self.cached_idx_map == -1).squeeze(1) return candidates[0].item() def _evict(self) -> int: """ deprecated evict one row from cuda to cpu. Returns: (int) : the slot id be evicted. """ mask = torch.logical_or(torch.isin(self.cached_idx_map, self.evict_backlist), self.cached_idx_map == -1) buf = self.cached_idx_map[mask].clone() idx = torch.nonzero(mask).squeeze(1) self.cached_idx_map.index_fill_(0, idx, -1) max_row, max_cpu_row_idx = torch.max(self.cached_idx_map, dim=0) max_gpu_row_idx = self.cached_idx_map[max_cpu_row_idx] if max_gpu_row_idx == -1: raise RuntimeError("Can not evict a row") max_gpu_row_idx = max_gpu_row_idx.item() max_offset = self.inverted_cached_idx[max_gpu_row_idx] # recover self.cached_idx_map.index_copy_(0, idx, buf) with Timer() as timer: cuda_tensor = torch.narrow( self.cuda_cached_weight.view(-1), 0, max_offset * self.embedding_dim, self.embedding_dim ).view(1, self.embedding_dim) self.cpu_weight_data(max_gpu_row_idx).data.copy_(cuda_tensor) # update inverted_cached_idx, min_slot_id is evicted from cuda self.cached_idx_map[max_cpu_row_idx] = -1 if self._evict_strategy == EvictionStrategy.LFU: self.freq_cnter[max_cpu_row_idx] = sys.maxsize self.inverted_cached_idx[max_gpu_row_idx] = -1 self._cuda_available_row_num += 1 self._cuda_to_cpu_numel += self.embedding_dim # self.num_write_back_history[-1] += 1 return max_cpu_row_idx @torch.no_grad() def _admit(self, row_id: int): """ deprecated move in row_id to CUDA Args: row_id (int): the id of row to be moved in """ # find a free slot in partial cuda weight slot_id = self._find_free_cuda_row() if slot_id == -1: # evict one row slot_id = self._evict() slot_offset = slot_id # copy payload from cpu to cuda with Timer() as timer: cuda_tensor = torch.narrow( self.cuda_cached_weight.view(-1), 0, slot_offset * self.embedding_dim, self.embedding_dim ).view(1, self.embedding_dim) cuda_tensor.data.copy_(self.cpu_weight_data(row_id)) # update the inverted_cached_idx self.cached_idx_map[slot_id] = row_id if self._evict_strategy == EvictionStrategy.LFU: self.freq_cnter[slot_id] = 0 self.inverted_cached_idx[row_id] = slot_offset self._cuda_available_row_num -= 1 self._cpu_to_cuda_numel += self.embedding_dim
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/layers/cache_embedding/parallel_cached_embedding_tablewise_split_cache.py
colossalai/legacy/nn/parallel/layers/cache_embedding/parallel_cached_embedding_tablewise_split_cache.py
import abc from typing import List import torch import torch.distributed as dist import torch.nn as nn from torch.profiler import record_function from colossalai.legacy.nn._ops._utils import dual_all_to_all_tablewise from colossalai.legacy.tensor import ProcessGroup from .cache_mgr import EvictionStrategy from .cached_embedding import CachedEmbeddingBag from .embedding_config import TablewiseEmbeddingBagConfig class ParallelCachedEmbeddingBagTablewiseSpiltCache(abc.ABC, nn.Module): """ every table assigned to this class instance is managed by a CachedEmbeddingBag. """ def __init__( self, embedding_bag_config_list: List[TablewiseEmbeddingBagConfig], embedding_dim: int, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, mode="mean", include_last_offset=False, dtype=None, device=None, warmup_ratio=0.7, pin_weight=False, evict_strategy: EvictionStrategy = EvictionStrategy.LFU, ): super(ParallelCachedEmbeddingBagTablewiseSpiltCache, self).__init__() self.rank = dist.get_rank() self.world_size = dist.get_world_size() self.rank_of_tables = [config.assigned_rank for config in embedding_bag_config_list] self.global_table_num_embeddings_list = [config.num_embeddings for config in embedding_bag_config_list] self.global_tables_num = len(embedding_bag_config_list) self.global_tables_offsets = torch.cumsum(torch.tensor([0] + self.global_table_num_embeddings_list), 0).cuda() self.assigned_table_list: List[int] = [] for i, rank in enumerate(self.rank_of_tables): if rank == self.rank: self.assigned_table_list.append(i) self.include_last_offset = include_last_offset self.pg = ProcessGroup(tp_degree=self.world_size) # prepare CachedEmbeddingBag list self.cached_embedding_bag_list: nn.ModuleList = nn.ModuleList() for config in embedding_bag_config_list: if config.assigned_rank != self.rank: continue self.cached_embedding_bag_list.append( CachedEmbeddingBag( num_embeddings=config.num_embeddings, embedding_dim=embedding_dim, padding_idx=padding_idx, max_norm=max_norm, norm_type=norm_type, scale_grad_by_freq=scale_grad_by_freq, sparse=sparse, _weight=config.initial_weight, mode=mode, include_last_offset=include_last_offset, dtype=dtype, device=device, cuda_row_num=config.cuda_row_num, ids_freq_mapping=config.ids_freq_mapping, warmup_ratio=warmup_ratio, buffer_size=config.buffer_size, pin_weight=pin_weight, evict_strategy=evict_strategy, ) ) # prepare list shape for all_to_all output self.embedding_dim_per_rank = [0 for i in range(self.world_size)] for rank in self.rank_of_tables: self.embedding_dim_per_rank[rank] += embedding_dim def forward(self, indices: torch.Tensor, offsets: torch.Tensor = None, per_sample_weights=None, shape_hook=None): # determine indices to handle batch_size = (offsets.shape[0]) // self.global_tables_num local_output_list = [] for i, handle_table in enumerate(self.assigned_table_list): with record_function("(tablewise) prepare indices and offsets"): with record_function("part 1"): indices_start_position = offsets[batch_size * handle_table] if (not self.include_last_offset) and (batch_size * (handle_table + 1) >= indices.shape[0]): # till the end special case indices_end_position = indices.shape[0] else: indices_end_position = offsets[batch_size * (handle_table + 1)] with record_function("part 2"): # local_indices = indices[indices_start_position:indices_end_position] - self.global_tables_offsets[handle_table] local_indices = indices.narrow( 0, indices_start_position, indices_end_position - indices_start_position ).sub(self.global_tables_offsets[handle_table]) if self.include_last_offset: # local_offsets = offsets[batch_size * handle_table:batch_size * (handle_table + 1) + 1] - offsets[batch_size * (handle_table)] local_offsets = offsets.narrow(0, batch_size * handle_table, batch_size + 1).sub( offsets[batch_size * (handle_table)] ) else: # local_offsets = offsets[batch_size * handle_table:batch_size * (handle_table + 1)] - offsets[batch_size * (handle_table)] local_offsets = offsets.narrow(0, batch_size * handle_table, batch_size).sub( offsets[batch_size * (handle_table)] ) local_per_sample_weights = None if per_sample_weights != None: local_per_sample_weights = per_sample_weights[indices_start_position:indices_end_position] with record_function("(tablewise) tablewise forward"): local_output_list.append( self.cached_embedding_bag_list[i](local_indices, local_offsets, local_per_sample_weights) ) # get result of shape = (batch_size, (len(assigned_table_list)*embedding_dim)) local_output = torch.cat(local_output_list, 1) # then concatenate those local_output on the second dimension. # use all_to_all remains = batch_size % self.world_size scatter_strides = [batch_size // self.world_size + int(i < remains) for i in range(self.world_size)] output_full = dual_all_to_all_tablewise(local_output, self.pg, scatter_strides, self.embedding_dim_per_rank) if shape_hook is not None: output_full = shape_hook(output_full) return output_full def element_size(self): if len(self.assigned_table_list) == 0: return 0 return self.cached_embedding_bag_list[0].cache_weight_mgr.weight.element_size() def print_comm_stats_(self): cuda_to_cpu_elem_num = 0 cpu_to_cuda_elem_num = 0 for cached_embedding_bag in self.cached_embedding_bag_list: cuda_to_cpu_elem_num += cached_embedding_bag.cache_weight_mgr._cuda_to_cpu_numel cpu_to_cuda_elem_num += cached_embedding_bag.cache_weight_mgr._cpu_to_cuda_numel print(f"CUDA->CPU num: {cuda_to_cpu_elem_num / 1e6} M elem") print(f"CPU->CUDA num: {cpu_to_cuda_elem_num / 1e6} M elem")
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/layers/cache_embedding/embedding_config.py
colossalai/legacy/nn/parallel/layers/cache_embedding/embedding_config.py
import torch class TablewiseEmbeddingBagConfig: """ example: def prepare_tablewise_config(args, cache_ratio, ...): embedding_bag_config_list: List[TablewiseEmbeddingBagConfig] = [] ... return embedding_bag_config_list """ def __init__( self, num_embeddings: int, cuda_row_num: int, assigned_rank: int = 0, buffer_size=50_000, ids_freq_mapping=None, initial_weight: torch.tensor = None, name: str = "", ): self.num_embeddings = num_embeddings self.cuda_row_num = cuda_row_num self.assigned_rank = assigned_rank self.buffer_size = buffer_size self.ids_freq_mapping = ids_freq_mapping self.initial_weight = initial_weight self.name = name
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/layers/cache_embedding/parallel_cached_embedding_tablewise.py
colossalai/legacy/nn/parallel/layers/cache_embedding/parallel_cached_embedding_tablewise.py
from typing import List import torch import torch.distributed as dist import torch.nn.functional as F from colossalai.legacy.nn._ops._utils import dual_all_to_all_tablewise from colossalai.legacy.tensor import ProcessGroup from .cache_mgr import EvictionStrategy from .cached_embedding import CachedEmbeddingBag from .embedding_config import TablewiseEmbeddingBagConfig class ParallelCachedEmbeddingBagTablewise(CachedEmbeddingBag): """ all tables assigned to this class instance are managed by a single CachedEmbeddingBag. Those parameters in TablewiseEmbeddingBagConfig are ignored: cuda_row_num, buffer_size, initial_weight. """ def __init__( self, embedding_bag_config_list: List[TablewiseEmbeddingBagConfig], embedding_dim: int, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, _weight=None, mode="mean", include_last_offset=False, dtype=None, device=None, cache_ratio=0.01, warmup_ratio=0.7, buffer_size=50_000, pin_weight=False, evict_strategy: EvictionStrategy = EvictionStrategy.LFU, ): self.rank = dist.get_rank() self.world_size = dist.get_world_size() self.rank_of_tables = [config.assigned_rank for config in embedding_bag_config_list] self.global_table_num_embeddings_list = [config.num_embeddings for config in embedding_bag_config_list] self.global_tables_num = len(embedding_bag_config_list) self.global_tables_offsets = torch.cumsum(torch.tensor([0] + self.global_table_num_embeddings_list), 0).cuda() self.assigned_table_list: List[int] = [] self.pg = ProcessGroup(tp_degree=self.world_size) self.num_embeddings = 0 for i, rank in enumerate(self.rank_of_tables): if rank == self.rank: self.assigned_table_list.append(i) self.num_embeddings += self.global_table_num_embeddings_list[i] self.include_last_offset = include_last_offset ids_freq_mapping = [] for config in embedding_bag_config_list: if config.assigned_rank == self.rank: if config.ids_freq_mapping != None: ids_freq_mapping.extend(config.ids_freq_mapping) else: ids_freq_mapping = None break self.cache_ratio = cache_ratio # table-associate cache int(cache_ratio * self.num_embeddings) super(ParallelCachedEmbeddingBagTablewise, self).__init__( self.num_embeddings, embedding_dim, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse, _weight, mode, include_last_offset, dtype, device, cache_ratio, ids_freq_mapping, warmup_ratio, buffer_size, pin_weight, evict_strategy, ) # for assigned tables reconnection: self.idx_offset_list = [] offset_cumsum = 0 for table_i, table_num_embeddings in enumerate(self.global_table_num_embeddings_list): if self.rank_of_tables[table_i] == self.rank: self.idx_offset_list.append(offset_cumsum) else: offset_cumsum += table_num_embeddings # prepare list shape for all_to_all output self.embedding_dim_per_rank = [0 for i in range(self.world_size)] for rank in self.rank_of_tables: self.embedding_dim_per_rank[rank] += embedding_dim self.cache_op = True def forward( self, indices: torch.Tensor, offsets: torch.Tensor = None, per_sample_weights=None, shape_hook=None, already_split_along_rank=True, ): if not already_split_along_rank: # not recommanded. it takes time. batch_size = (offsets.shape[0]) // self.global_tables_num local_indices, local_offsets, local_per_sample_weights = self.split_along_rank( batch_size, indices, offsets, per_sample_weights ) else: # recommanded. batch_size = (offsets.shape[0]) // len(self.assigned_table_list) local_indices, local_offsets, local_per_sample_weights = indices, offsets, per_sample_weights if self.cache_op: with torch.no_grad(): indices = self.cache_weight_mgr.prepare_ids(local_indices) local_output = F.embedding_bag( indices.cuda(), self.cache_weight_mgr.cuda_cached_weight, local_offsets, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.mode, self.sparse, local_per_sample_weights, self.include_last_offset, self.padding_idx, ) local_output = torch.cat(local_output.split(batch_size), 1) remains = batch_size % self.world_size scatter_strides = [batch_size // self.world_size + int(i < remains) for i in range(self.world_size)] output_full = dual_all_to_all_tablewise(local_output, self.pg, scatter_strides, self.embedding_dim_per_rank) if shape_hook is not None: output_full = shape_hook(output_full) return output_full def split_along_rank( self, batch_size, indices: torch.Tensor, offsets: torch.Tensor = None, per_sample_weights=None ): """ if input indices and offsets haven't been splitted along assigned rank, this function will do it. it takes time. please consider splitting data during batch loading. """ local_indices_list: List(torch.Tensor) = [] local_offsets_list: List(torch.Tensor) = [] if per_sample_weights != None: local_per_sample_weights_list: List(torch.Tensor) = [] offset_pre_end = 0 # local_offsets trick for i, handle_table in enumerate(self.assigned_table_list): indices_start_position = offsets[batch_size * handle_table] if (not self.include_last_offset) and (batch_size * (handle_table + 1) >= indices.shape[0]): # till-the-end special case indices_end_position = indices.shape[0] else: indices_end_position = offsets[batch_size * (handle_table + 1)] # alternative approach: reduce malloc """ # 1. local_indices_list: local_indices = indices.narrow(0, indices_start_position, indices_end_position - indices_start_position) torch.sub(local_indices, self.idx_offset_list[i], out=local_indices) local_indices_list.append(local_indices) # 2. local_offsets_list: if i + 1 == len(self.assigned_table_list): # till-the-end special case if not self.include_last_offset: local_offsets = offsets.narrow(0, batch_size * handle_table, batch_size) else: local_offsets = offsets.narrow(0, batch_size * handle_table, batch_size + 1) torch.add(local_offsets, offset_pre_end - offsets[batch_size * handle_table], out=local_offsets) local_offsets_list.append(local_offsets) else: temp_holder = offsets[batch_size * handle_table].item() local_offsets = offsets.narrow(0, batch_size * handle_table, batch_size) torch.add(local_offsets, offset_pre_end - offsets[batch_size * handle_table], out=local_offsets) offset_pre_end = offsets[batch_size * (handle_table + 1)] + offset_pre_end - temp_holder local_offsets_list.append(local_offsets) """ # 1. local_indices_list: local_indices_list.append( indices.narrow(0, indices_start_position, indices_end_position - indices_start_position).sub( self.idx_offset_list[i] ) ) # 2. local_offsets_list: if i + 1 == len(self.assigned_table_list): # till-the-end special case if not self.include_last_offset: local_offsets = offsets.narrow(0, batch_size * handle_table, batch_size).add( offset_pre_end - offsets[batch_size * (handle_table)] ) else: local_offsets = offsets.narrow(0, batch_size * handle_table, batch_size + 1).add( offset_pre_end - offsets[batch_size * (handle_table)] ) local_offsets_list.append(local_offsets) else: local_offsets = offsets.narrow(0, batch_size * handle_table, batch_size + 1).add( offset_pre_end - offsets[batch_size * (handle_table)] ) offset_pre_end = local_offsets[-1] local_offsets_list.append(local_offsets[:-1]) # 3. local_per_sample_weights_list: if per_sample_weights != None: local_per_sample_weights_list.append(per_sample_weights[indices_start_position:indices_end_position]) local_indices = torch.cat(local_indices_list, 0) local_offsets = torch.cat(local_offsets_list, 0) local_per_sample_weights = None if per_sample_weights != None: local_per_sample_weights = torch.cat(local_per_sample_weights_list, 0) return local_indices, local_offsets, local_per_sample_weights def set_cache_op(self, cache_op: bool = True): self.cache_op = cache_op def print_comm_stats_(self): self.cache_weight_mgr.print_comm_stats() def element_size(self): return self.weight.element_size()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/layers/cache_embedding/base_embedding.py
colossalai/legacy/nn/parallel/layers/cache_embedding/base_embedding.py
import abc import torch.nn as nn class BaseEmbeddingBag(abc.ABC, nn.Module): def __init__( self, num_embeddings, embedding_dim, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False, sparse=False, mode="mean", include_last_offset=False, ): super(BaseEmbeddingBag, self).__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim if padding_idx is not None: if padding_idx > 0: assert padding_idx < self.num_embeddings, "Padding_idx must be within num_embeddings" elif padding_idx < 0: assert padding_idx >= -self.num_embeddings, "Padding_idx must be within num_embeddings" padding_idx = self.num_embeddings + padding_idx self.padding_idx = padding_idx self.max_norm = max_norm self.norm_type = norm_type self.scale_grad_by_freq = scale_grad_by_freq self.sparse = sparse # Specific to embedding bag self.mode = mode self.include_last_offset = include_last_offset
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/parallel/layers/cache_embedding/__init__.py
colossalai/legacy/nn/parallel/layers/cache_embedding/__init__.py
from .cache_mgr import CachedParamMgr, EvictionStrategy from .cached_embedding import CachedEmbeddingBag from .copyer import LimitBuffIndexCopyer from .embedding_config import TablewiseEmbeddingBagConfig from .parallel_cached_embedding import ParallelCachedEmbeddingBag from .parallel_cached_embedding_tablewise import ParallelCachedEmbeddingBagTablewise from .parallel_cached_embedding_tablewise_split_cache import ParallelCachedEmbeddingBagTablewiseSpiltCache __all__ = [ "CachedParamMgr", "LimitBuffIndexCopyer", "CachedEmbeddingBag", "ParallelCachedEmbeddingBag", "EvictionStrategy", "ParallelCachedEmbeddingBagTablewise", "TablewiseEmbeddingBagConfig", "ParallelCachedEmbeddingBagTablewiseSpiltCache", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/_base_engine.py
colossalai/legacy/engine/_base_engine.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- # this code is inspired by the DeepSpeed library and implemented with our own design from scratch from typing import Iterable, List, Optional, Type from torch import Tensor from torch.nn import Module from torch.nn.modules.loss import _Loss from colossalai.interface import OptimizerWrapper from colossalai.legacy.engine.gradient_handler import BaseGradientHandler from colossalai.legacy.engine.schedule import ( BaseSchedule, InterleavedPipelineSchedule, NonPipelineSchedule, PipelineSchedule, ) from colossalai.legacy.zero.gemini import BaseOpHook, register_ophooks_recursively from colossalai.logging import get_dist_logger class Engine: """Basic engine class for training and evaluation. It runs a specific process method :meth:`step` which is based on the given :attr:`schedule` over each batch of a dataset. It controls a iteration in training. Args: model (``torch.nn.Module``): The neural network model. optimizer (``colossalai.interface.OptimizerWrapper``): Optimizer for updating the parameters. criterion (``torch.nn.modules.loss._Loss``, optional): Loss function for calculating loss. gradient_handlers (List[``BaseGradientHandler``], optional): A list of gradient handler used in backward. clip_grad_norm (float, optional): The norm of gradient clipping. ophook_list (list): List of ophook. verbose (bool): whether to display log info. schedule (''BaseSchedule''): Runtime schedule. Examples: >>> # define model, criterion, optimizer, lr_scheduler, train_dataloader for your training >>> model = ... >>> criterion = ... >>> optimizer = ... >>> train_dataloader = ... >>> engine, _, _, _ = colossalai.initialize(model, optimizer, criterion) >>> engine.train() >>> for inputs, labels in train_dataloader >>> # set gradients to zero >>> engine.zero_grad() >>> # run forward pass >>> outputs = engine(inputs) >>> # compute loss value and run backward pass >>> loss = engine.criterion(outputs, labels) >>> engine.backward(loss) >>> # update parameters >>> engine.step() The example of using Engine in training could be find in `Training with engine and trainer <https://www.colossalai.org/docs/basics/engine_trainer>`_. and `Run resnet cifar10 with engine <https://github.com/hpcaitech/ColossalAI-Examples/blob/main/image/resnet/run_resnet_cifar10_with_engine.py>`_. """ def __init__( self, model: Module, optimizer: "OptimizerWrapper", criterion: Optional[_Loss] = None, gradient_handlers: Optional[List[BaseGradientHandler]] = None, clip_grad_norm: float = 0.0, ophook_list: Optional[List[BaseOpHook]] = None, verbose: bool = True, schedule: Optional[BaseSchedule] = None, ): self._model = model self._optimizer = optimizer self._criterion = criterion self._clip_grad_norm = clip_grad_norm self._verbose = verbose self._logger = get_dist_logger() # state self.training = True # default # build gradient handler if gradient_handlers: self._gradient_handlers = gradient_handlers else: self._gradient_handlers = [] if ophook_list is None: self._ophook_list = [] else: self._ophook_list = ophook_list # build schedule if schedule: assert isinstance( schedule, BaseSchedule ), f"expected schedule to be of type BaseSchedule, but got {type(schedule)}" self._schedule = schedule else: self._schedule = NonPipelineSchedule() if self.uses_pipeline: self._schedule.pre_processing(self) # register hook if any if len(self._ophook_list) > 0: register_ophooks_recursively(self._model, self._ophook_list) @property def ophooks(self): """show current activated ophooks""" return self._ophook_list @property def model(self): """Model attached to the engine""" return self._model @property def optimizer(self): """Optimizer attached to the engine""" return self._optimizer @property def criterion(self): """Criterion attached to the engine""" return self._criterion @property def schedule(self): """Schedule attached to the engine""" return self._schedule @property def uses_pipeline(self): """show the pipeline parallel used or not""" return isinstance(self._schedule, (PipelineSchedule, InterleavedPipelineSchedule)) def add_hook(self, ophook: Type[BaseOpHook]) -> None: """add necessary hook""" # whether this hook exist for h in self._ophook_list: if type(h) == type(ophook): logger = get_dist_logger() logger.warning(f"duplicate hooks, at least two instance of {type(ophook)}") self._ophook_list.append(ophook) register_ophooks_recursively(self._model, self._ophook_list) def remove_hook(self, ophook: Type[BaseOpHook]) -> None: """remove hook""" logger = get_dist_logger() logger.warning(f"removing hooks is currently not supported") def zero_grad(self): """Set the gradient of parameters to zero""" self.optimizer.zero_grad() def step(self): """Execute parameter update""" self._all_reduce_gradients() self.optimizer.clip_grad_by_norm(self._clip_grad_norm) return self.optimizer.step() def backward(self, loss: Tensor): """Start backward propagation given the loss value computed by a loss function. Args: loss (:class:`torch.Tensor`): Loss value computed by a loss function. """ ret = self.optimizer.backward(loss) for ophook in self._ophook_list: ophook.post_iter() return ret def backward_by_grad(self, tensor, grad): """Start backward propagation given the gradient of the output tensor. Args: tensor (:class:`torch.Tensor`): Output tensor. grad (:class:`torch.Tensor`): Gradient passed back to the output. """ ret = self.optimizer.backward_by_grad(tensor, grad) for ophook in self._ophook_list: ophook.post_iter() return ret def __call__(self, *args, **kwargs): """Run the forward step for the model. Returns: Tuple[:class:`torch.Tensor`] or :class:`torch.Tensor`: Output of the model. """ return self.model(*args, **kwargs) def _all_reduce_gradients(self): """Handles all-reduce operations of gradients across different parallel groups.""" for handler in self._gradient_handlers: handler.handle_gradient() def execute_schedule(self, data_iter: Iterable, **kwargs): """Run the forward, loss computation, and backward for the model. Returns a tuple of (output, label, loss). Returns: Tuple[:class:`torch.Tensor`]: A tuple of (output, label, loss). """ output, label, loss = self._schedule.forward_backward_step(self, data_iter, **kwargs) return output, label, loss def train(self): """Sets the model to training mode.""" self.training = True self._model.train() def eval(self): """Sets the model to evaluation mode.""" self.training = False self._model.eval()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/__init__.py
colossalai/legacy/engine/__init__.py
from ._base_engine import Engine from .gradient_handler import * __all__ = ["Engine"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/gradient_handler/_zero_gradient_handler.py
colossalai/legacy/engine/gradient_handler/_zero_gradient_handler.py
from colossalai.legacy.registry import GRADIENT_HANDLER from ._base_gradient_handler import BaseGradientHandler @GRADIENT_HANDLER.register_module class ZeROGradientHandler(BaseGradientHandler): """A helper class to handle all-reduce operations in a data parallel group. A all-reduce collective communication will be operated in :func:`handle_gradient` among a data parallel group. This class is specialized with ZeRO optimization. Args: model (Module): Model where the gradients accumulate. optimizer (Optimizer): Optimizer for updating the parameters. """ def handle_gradient(self): """A method running a all-reduce operation in a data parallel group.""" self._optimizer.sync_grad()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/gradient_handler/_moe_gradient_handler.py
colossalai/legacy/engine/gradient_handler/_moe_gradient_handler.py
from colossalai.context.moe_context import MOE_CONTEXT from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.registry import GRADIENT_HANDLER from colossalai.utils.moe import get_moe_epsize_param_dict from ._base_gradient_handler import BaseGradientHandler from .utils import bucket_allreduce @GRADIENT_HANDLER.register_module class MoeGradientHandler(BaseGradientHandler): """A helper class to handle all-reduce operations in a data parallel group and moe model parallel. A all-reduce collective communication will be operated in :func:`handle_gradient` among a data parallel group. For better performance, it bucketizes the gradients of all parameters that are the same type to improve the efficiency of communication. Args: model (Module): Model where the gradients accumulate. optimizer (Optimizer): Optimizer for updating the parameters. """ def __init__(self, model, optimizer=None): super().__init__(model, optimizer) def handle_gradient(self): """A method running an all-reduce operation in a data parallel group. Then running an all-reduce operation for all parameters in experts across moe model parallel group """ global_data = gpc.data_parallel_size if global_data > 1: epsize_param_dict = get_moe_epsize_param_dict(self._model) # epsize is 1, indicating the params are replicated among processes in data parallelism # use the ParallelMode.DATA to get data parallel group # reduce gradients for all parameters in data parallelism if 1 in epsize_param_dict: bucket_allreduce(param_list=epsize_param_dict[1], group=gpc.get_group(ParallelMode.DATA)) for ep_size in epsize_param_dict: if ep_size != 1 and ep_size != MOE_CONTEXT.world_size: bucket_allreduce( param_list=epsize_param_dict[ep_size], group=MOE_CONTEXT.parallel_info_dict[ep_size].dp_group )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/gradient_handler/_sequence_parallel_gradient_handler.py
colossalai/legacy/engine/gradient_handler/_sequence_parallel_gradient_handler.py
from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.registry import GRADIENT_HANDLER from ._base_gradient_handler import BaseGradientHandler from .utils import bucket_allreduce @GRADIENT_HANDLER.register_module class SequenceParallelGradientHandler(BaseGradientHandler): """A helper class to handle all-reduce operations in a data parallel group. A all-reduce collective communication will be operated in :func:`handle_gradient` among a data parallel group. For better performance, it bucketizes the gradients of all parameters that are the same type to improve the efficiency of communication. Args: model (Module): Model where the gradients accumulate. optimizer (Optimizer): Optimizer for updating the parameters. """ def handle_gradient(self): """A method running a all-reduce operation in a data parallel group.""" if gpc.get_world_size(ParallelMode.SEQUENCE_DP) > 1: bucket_allreduce(param_list=self._model.parameters(), group=gpc.get_group(ParallelMode.SEQUENCE_DP))
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/gradient_handler/_pipeline_parallel_gradient_handler.py
colossalai/legacy/engine/gradient_handler/_pipeline_parallel_gradient_handler.py
#!/usr/bin/env python from collections import defaultdict import torch import torch.distributed as dist from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from colossalai.legacy.core import global_context as gpc from colossalai.legacy.registry import GRADIENT_HANDLER from ._base_gradient_handler import BaseGradientHandler @GRADIENT_HANDLER.register_module class PipelineSharedModuleGradientHandler(BaseGradientHandler): """A helper class to handle all-reduce operations in sub parallel groups. A all-reduce collective communication will be operated in :func:`handle_gradient` among all sub pipeline parallel groups. For better performance, it bucketizes the gradients of all parameters that are the same type to improve the efficiency of communication. Args: model (Module): Model where the gradients accumulate. optimizer (Optimizer): Optimizer for updating the parameters. """ def handle_gradient(self): """A method running a all-reduce operation in sub pipeline parallel groups.""" if gpc.pipeline_parallel_size > 1: # bucketize and all-reduce buckets = defaultdict(lambda: defaultdict(list)) # Pack the buckets. for param in self._model.parameters(): group = getattr(param, "pipeline_shared_module_pg", None) if ( param.requires_grad and group is not None and ( (hasattr(param, "colo_attr") and not param.colo_attr.saved_grad.is_null()) or param.grad is not None ) ): tp = param.data.type() buckets[group][tp].append(param) # For each bucket, all-reduce and copy all-reduced grads. for group, group_buckets in buckets.items(): for tp, bucket in group_buckets.items(): grads = [ param.colo_attr.grad_payload if hasattr(param, "colo_attr") else param.grad.data for param in bucket ] coalesced = _flatten_dense_tensors(grads).to(torch.cuda.current_device()) dist.all_reduce(coalesced, op=dist.ReduceOp.SUM, group=group) for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)): buf.copy_(synced)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/gradient_handler/utils.py
colossalai/legacy/engine/gradient_handler/utils.py
from typing import Iterable import torch.distributed as dist import torch.nn as nn from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors def bucket_allreduce(param_list: Iterable[nn.Parameter], group=None): # get communication world size comm_size = dist.get_world_size(group) # bucketize and all-reduce buckets = {} # Pack the buckets. for param in param_list: if param.requires_grad and param.grad is not None: tp = param.data.type() if tp not in buckets: buckets[tp] = [] buckets[tp].append(param) # For each bucket, all-reduce and copy all-reduced grads. for tp in buckets: bucket = buckets[tp] grads = [param.grad.data for param in bucket] coalesced = _flatten_dense_tensors(grads) coalesced /= comm_size dist.all_reduce(coalesced, group=group) for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)): buf.copy_(synced)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/gradient_handler/__init__.py
colossalai/legacy/engine/gradient_handler/__init__.py
from ._base_gradient_handler import BaseGradientHandler from ._data_parallel_gradient_handler import DataParallelGradientHandler from ._pipeline_parallel_gradient_handler import PipelineSharedModuleGradientHandler from ._sequence_parallel_gradient_handler import SequenceParallelGradientHandler from ._zero_gradient_handler import ZeROGradientHandler __all__ = [ "BaseGradientHandler", "DataParallelGradientHandler", "ZeROGradientHandler", "PipelineSharedModuleGradientHandler", "SequenceParallelGradientHandler", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/gradient_handler/_data_parallel_gradient_handler.py
colossalai/legacy/engine/gradient_handler/_data_parallel_gradient_handler.py
from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.registry import GRADIENT_HANDLER from ._base_gradient_handler import BaseGradientHandler from .utils import bucket_allreduce @GRADIENT_HANDLER.register_module class DataParallelGradientHandler(BaseGradientHandler): """A helper class to handle all-reduce operations in a data parallel group. A all-reduce collective communication will be operated in :func:`handle_gradient` among a data parallel group. For better performance, it bucketizes the gradients of all parameters that are the same type to improve the efficiency of communication. Args: model (Module): Model where the gradients accumulate. optimizer (Optimizer): Optimizer for updating the parameters. """ def handle_gradient(self): """A method running a all-reduce operation in a data parallel group.""" # TODO: add memory buffer if gpc.data_parallel_size > 1: bucket_allreduce(param_list=self._model.parameters(), group=gpc.get_group(ParallelMode.DATA))
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/gradient_handler/_base_gradient_handler.py
colossalai/legacy/engine/gradient_handler/_base_gradient_handler.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod class BaseGradientHandler(ABC): """A basic helper class to handle all-reduce operations of gradients across different parallel groups before optimization. Args: model (Module): Model where the gradients accumulate. optimizer (Optimizer): Optimizer for updating the parameters. """ def __init__(self, model, optimizer): self._model = model self._optimizer = optimizer @abstractmethod def handle_gradient(self): """A method to accumulate gradients across different parallel groups. Users should write their own functions or just use the functions in pre-defined subclasses. """
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/schedule/_pipeline_schedule_v2.py
colossalai/legacy/engine/schedule/_pipeline_schedule_v2.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Iterable, Tuple import torch.cuda import colossalai.legacy.communication.p2p_v2 as comm from colossalai.accelerator import get_accelerator from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.engine import Engine from ._pipeline_schedule import PipelineSchedule def pack_return_tensors(return_tensors): output, label = tuple(zip(*return_tensors)) if isinstance(output[0], torch.Tensor): output = torch.cat(output, dim=0) elif isinstance(output[0], (list, tuple)): output = tuple(torch.cat(tensors, dim=0) for tensors in zip(*output)) else: raise TypeError(f"Output of model must be tensor or list/tuple of tensors") if isinstance(label[0], torch.Tensor): label = torch.cat(label, dim=0) else: merged_label = {k: [] for k in label[0].keys()} for d in label: for k, v in d.items(): merged_label[k].append(v) label = {k: torch.cat(v, dim=0) for k, v in merged_label.items()} return output, label class PipelineScheduleV2(PipelineSchedule): """Derived class of PipelineSchedule, the only difference is that forward_backward_step is reconstructed with p2p_v2 Args: num_microbatches (int): The number of microbatches. data_process_func (Callable, optional): The preprocessing function which receives a batch of data, and it will be executed in `load_batch`. tensor_shape (torch.Size, optional): Specified shape in pipeline communication. scatter_gather_tensors (bool, optional): If set to `True`, communication will be reduced over pipeline when using 1D tensor parallelization. Example: # this shows an example of customized data_process_func def data_process_func(stage_output, dataloader_output): output1, output2 = stage_output item1, item2, item3 = dataloader_output # assume item2 is not needed data = (output1, output2, item1) label = item3 return data, label """ def forward_backward_step( self, engine: Engine, data_iter: Iterable, forward_only=False, return_loss=True, return_output_label=True ) -> Tuple[torch.Tensor]: """Runs non-interleaved 1F1B schedule, with communication between pipeline stages. Returns a tuple with losses if the last stage, an empty tuple otherwise. Args: engine (colossalai.legacy.engine.Engine): Colossalai engine for training and inference. data_iter (Iterable): Dataloader as the form of an iterator, obtained by calling iter(dataloader). forward_only (bool, optional): Whether run forward step only. Default is false. If true, no backward will be run. return_loss (bool, optional): Whether returns the loss value. Default is true. return_output_label (bool, optional): If False, the output and label won't be returned. Returns: Tuple[:class:`torch.Tensor`]: A tuple of (output, label, loss), loss and label could be None. """ assert ( forward_only or return_loss ), "The argument 'return_loss' has to be True when 'forward_only' is False, but got False." self.load_batch(data_iter) # num_warmup_microbatches is the step when not all the processes are working num_warmup_microbatches = ( gpc.get_world_size(ParallelMode.PIPELINE) - gpc.get_local_rank(ParallelMode.PIPELINE) - 1 ) num_warmup_microbatches = min(num_warmup_microbatches, self.num_microbatches) num_microbatches_remaining = self.num_microbatches - num_warmup_microbatches # Input, output tensors only need to be saved when doing backward passes input_objs = None output_objs = None # local_rank = gpc.get_local_rank(ParallelMode.PIPELINE) if not forward_only: input_objs = [] output_objs = [] return_tensors = [] if return_loss and gpc.is_pipeline_last_stage(ignore_virtual=True): accum_loss = torch.zeros(1, device=get_accelerator().get_current_device()) else: accum_loss = None # Run warmup forward passes. for i in range(num_warmup_microbatches): input_obj = comm.recv_forward() output_obj = self._forward_step( engine, input_obj, return_tensors, return_output_label=return_output_label, accum_loss=accum_loss ) comm.send_forward(output_obj) if not forward_only: input_objs.append(input_obj) output_objs.append(output_obj) # Before running 1F1B, need to receive first forward tensor. # If all microbatches are run in warmup / cooldown phase, then no need to # receive this tensor here. if num_microbatches_remaining > 0: input_obj = comm.recv_forward() # Run 1F1B in steady state. for i in range(num_microbatches_remaining): last_iteration = i == (num_microbatches_remaining - 1) output_obj = self._forward_step( engine, input_obj, return_tensors, return_output_label=return_output_label, accum_loss=accum_loss ) if forward_only: comm.send_forward(output_obj) if not last_iteration: input_obj = comm.recv_forward() else: # TODO adjust here comm.send_forward(output_obj) output_obj_grad = comm.recv_backward() # Add input_obj and output_obj to end of list. input_objs.append(input_obj) output_objs.append(output_obj) # Pop output_obj and output_obj from the start of the list for # the backward pass. input_obj = input_objs.pop(0) output_obj = output_objs.pop(0) input_obj_grad = self._backward_step(engine, input_obj, output_obj, output_obj_grad) if last_iteration: input_obj = None comm.send_backward(input_obj_grad) else: input_obj = comm.recv_forward() comm.send_backward(input_obj_grad) # Run cooldown backward passes. if not forward_only: for i in range(num_warmup_microbatches): input_obj = input_objs.pop(0) output_obj = output_objs.pop(0) output_obj_grad = comm.recv_backward() input_obj_grad = self._backward_step(engine, input_obj, output_obj, output_obj_grad) comm.send_backward(input_obj_grad) if len(return_tensors) > 0: output, label = pack_return_tensors(return_tensors) return output, label, accum_loss else: return None, None, accum_loss
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/schedule/_pipeline_schedule.py
colossalai/legacy/engine/schedule/_pipeline_schedule.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import inspect from typing import Callable, List, Tuple, Union import torch.cuda import colossalai.legacy.communication as comm from colossalai.accelerator import get_accelerator from colossalai.legacy.amp.naive_amp import NaiveAMPModel from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.utils import switch_virtual_pipeline_parallel_rank from colossalai.logging import get_dist_logger from ._base_schedule import BaseSchedule def get_tensor_shape(): if hasattr(gpc.config, "TENSOR_SHAPE"): return gpc.config.TENSOR_SHAPE if not gpc.is_initialized(ParallelMode.PIPELINE): return None if ( hasattr(gpc.config, "SEQ_LENGTH") and hasattr(gpc.config, "GLOBAL_BATCH_SIZE") and hasattr(gpc.config, "GLOBAL_BATCH_SIZE") and hasattr(gpc.config, "HIDDEN_SIZE") ): if gpc.is_initialized(ParallelMode.DATA): dp_size = gpc.get_world_size(ParallelMode.DATA) else: dp_size = 1 if gpc.is_initialized(ParallelMode.SEQUENCE): seq_size = gpc.get_world_size(ParallelMode.SEQUENCE) else: seq_size = 1 tensor_shape = ( gpc.config.SEQ_LENGTH // seq_size, gpc.config.GLOBAL_BATCH_SIZE // dp_size // gpc.config.NUM_MICRO_BATCHES, gpc.config.HIDDEN_SIZE, ) return tensor_shape else: return None def pack_return_tensors(return_tensors): output, label = tuple(zip(*return_tensors)) if isinstance(output[0], torch.Tensor): output = torch.cat(output, dim=0) elif isinstance(output[0], (list, tuple)): output = tuple(torch.cat(tensors, dim=0) for tensors in zip(*output)) else: raise TypeError(f"Output of model must be tensor or list/tuple of tensors") if isinstance(label[0], torch.Tensor): label = torch.cat(label, dim=0) else: merged_label = {k: [] for k in label[0].keys()} for d in label: for k, v in d.items(): merged_label[k].append(v) label = {k: torch.cat(v, dim=0) for k, v in merged_label.items()} return output, label class PipelineSchedule(BaseSchedule): """A helper schedule class for pipeline parallelism running environment. It uses non-interleaved 1F1B strategy. Other properties are similar as :class:`NonPipelineSchedule`. Args: num_microbatches (int): The number of microbatches. data_process_func (Callable, optional): The preprocessing function which receives a batch of data, and it will be executed in `load_batch`. tensor_shape (torch.Size, optional): Specified shape in pipeline communication. scatter_gather_tensors (bool, optional): If set to `True`, communication will be reduced over pipeline when using 1D tensor parallelization. Example: # this shows an example of customized data_process_func def data_process_func(stage_output, dataloader_output): output1, output2 = stage_output item1, item2, item3 = dataloader_output # assume item2 is not needed data = (output1, output2, item1) label = item3 return data, label """ def __init__( self, num_microbatches, data_process_func: Callable = None, tensor_shape: Union[torch.Size, List[int], Tuple[int]] = None, scatter_gather_tensors: bool = False, ): # we need to make sure that the signature of the data_process_func is valid if data_process_func: sig = inspect.signature(data_process_func) assert len(sig.parameters) == 2, ( "The data_process_func only takes in two parameters for NonPipelineSchedule, " "which is the tensors passed by the previous pipeline stage and the dataloader output from this stage, " "i.e. data_process_func(stage_output, dataloader_output)." ) super().__init__(data_process_func=data_process_func) assert num_microbatches > 0, f"expected num_microbatches to be larger then 1, but got {num_microbatches}" self.num_microbatches = num_microbatches self.dtype = torch.float assert not isinstance( tensor_shape, int ), "tensor_shape type should be one of Union[torch.Size, List[int], Tuple[int]]." if tensor_shape is None: self.tensor_shape = tensor_shape elif isinstance(tensor_shape, torch.Size): self.tensor_shape = tensor_shape else: self.tensor_shape = torch.Size(tensor_shape) self.scatter_gather_tensors = False if gpc.is_initialized(ParallelMode.PARALLEL_1D) and gpc.get_world_size(ParallelMode.PARALLEL_1D) > 1: self.scatter_gather_tensors = scatter_gather_tensors self._logger = get_dist_logger() # cache for the batch data self.batch_data = None def load_batch(self, data_iter): # Pipeline schedule just puts data in memory batch_data = super().load_batch(data_iter, to_gpu=False) self.microbatch_offset = 0 assert self.batch_size % self.num_microbatches == 0, "Batch size should divided by the number of microbatches" self.microbatch_size = self.batch_size // self.num_microbatches self.batch_data = batch_data def _get_data_slice(self, data, offset): if isinstance(data, torch.Tensor): return data[offset : offset + self.microbatch_size] elif isinstance(data, (list, tuple)): data_dict = {} for element in data: if isinstance(element, dict): data_dict.update({k: v[offset : offset + self.microbatch_size] for k, v in element.items()}) elif data_dict: data_dict["label"] = element[offset : offset + self.microbatch_size] if data_dict: return data_dict return [val[offset : offset + self.microbatch_size] for val in data] elif isinstance(data, dict): return {k: v[offset : offset + self.microbatch_size] for k, v in data.items()} else: raise TypeError(f"Expected data to be of type torch.Tensor, list, tuple, or dict, but got {type(data)}") def load_micro_batch(self): micro_batch_data = self._get_data_slice(self.batch_data, self.microbatch_offset) self.microbatch_offset += self.microbatch_size return self._move_to_device(micro_batch_data) def pre_processing(self, engine): from colossalai.legacy.zero import ShardedModelV2 # TODO: remove this after testing new zero with pipeline parallelism model = engine.model if isinstance(model, NaiveAMPModel): self.dtype = torch.half model = model.model if isinstance(model, ShardedModelV2): self.dtype = torch.half model = model.module # sig = inspect.signature(model.forward) # for p in sig.parameters.values(): # assert p.kind != inspect.Parameter.VAR_POSITIONAL, '*args is not supported' @staticmethod def _call_engine(model, data): if data is not None: if isinstance(data, torch.Tensor): return model(data) elif isinstance(data, (list, tuple)): return model(*data) elif isinstance(data, dict): stage_output = None if "stage_output" in data: stage_output = data.pop("stage_output") if stage_output is None: return model(**data) elif isinstance(stage_output, torch.Tensor): return model(stage_output, **data) elif isinstance(stage_output, (tuple, list)): return model(*stage_output, **data) else: raise TypeError( f"Expected stage_output to be of type torch.Tensor, list, or tuple, but got {type(stage_output)}" ) else: raise TypeError(f"Expected data to be of type torch.Tensor, list, tuple, or dict, but got {type(data)}") def _get_actual_forward_func(self, module): if isinstance(module, NaiveAMPModel): sig = inspect.signature(module.model.forward) elif hasattr(module, "colo_attr"): sig = inspect.signature(module.module.forward) else: sig = inspect.signature(module.forward) return sig def _get_data_label_for_current_step(self, stage_output, micro_batch_data, criterion, model): if self.data_process_func: # use customized function to get data and label data, label = self.data_process_func(stage_output, micro_batch_data) else: if isinstance(micro_batch_data, (tuple, list)): if gpc.is_first_rank(ParallelMode.PIPELINE): # for the first stage, we use the data from the # dataloader output by default data, label = micro_batch_data else: # for non-first stage, we use the output passed # by the previous as the model input data = stage_output _, label = micro_batch_data elif isinstance(micro_batch_data, dict): data = {} data["stage_output"] = stage_output if "label" in micro_batch_data: label = micro_batch_data.pop("label") else: label = None load_data = micro_batch_data data.update(load_data) return data, label def _forward_step(self, engine, input_obj, return_tensors, return_output_label=True, accum_loss=None): """Forward step for passed-in model. If it is the first stage, the input tensor is obtained from data_iterator, otherwise the passed-in input_obj is used. Returns output tensor. This is a helper function and can be ignored by users. Args: engine (colossalai.legacy.engine.Engine): Colossalai engine for training and inference. input_obj (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Input tensor for this pipeline stage. return_tensors (List[:class:`torch.Tensor`]): A list of tensors to return. return_output_label (bool, optional): Whether returns output labels. accum_loss (optional): Where accumulated loss stores. Returns: Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: output or the loss value of the current pipeline stage. """ micro_batch_data = self.load_micro_batch() data, label = self._get_data_label_for_current_step(input_obj, micro_batch_data, engine.criterion, engine.model) output_obj = self._call_engine(engine.model, data) if gpc.is_last_rank(ParallelMode.PIPELINE): if return_output_label: return_tensors.append((output_obj, label)) if accum_loss is not None: loss_reduced = self._call_engine_criterion(engine, output_obj, label) / self.num_microbatches accum_loss.add_(loss_reduced.detach()) return loss_reduced else: # forward only, it's useless since backward is not needed return output_obj else: if isinstance(output_obj, torch.Tensor): self._logger.debug( f"Global rank {gpc.get_global_rank()}, pipeline rank {gpc.get_local_rank(ParallelMode.PIPELINE)} forward output tensor {output_obj.shape}, dtype {output_obj.dtype}" ) return output_obj def _backward_step(self, engine, input_obj, output_obj, output_obj_grad): """Backward step through the passed-in output tensor. If it is the last stage, the output_obj_grad is None, otherwise it is the gradients with respect to stage's output tensor. Returns the gradients with respect to the input tensor (None if first stage). This is a helper function and can be ignored by users. Args: engine (colossalai.legacy.engine.Engine): Colossalai engine for training and inference. input_obj (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): input tensor for this pipeline stage. output_obj (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): output tensor for this pipeline stage. output_obj_grad (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): gradient of output tensor for this pipeline stage. Returns: Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: gradient of input tensor. """ # Retain the grad on the input_obj. if input_obj is not None: if isinstance(input_obj, torch.Tensor): input_obj.retain_grad() else: for in_tensor in input_obj: if in_tensor is not None: in_tensor.retain_grad() # Backward pass. if output_obj_grad is None: engine.backward(output_obj) else: engine.backward_by_grad(output_obj, output_obj_grad) # Collect the grad of the input_obj. input_obj_grad = None if input_obj is not None: if isinstance(input_obj, torch.Tensor): input_obj_grad = input_obj.grad else: input_obj_grad = [] for in_tensor in input_obj: input_obj_grad.append(in_tensor.grad) return input_obj_grad def forward_backward_step(self, engine, data_iter, forward_only=False, return_loss=True, return_output_label=True): """Runs non-interleaved 1F1B schedule, with communication between pipeline stages. Returns a tuple with losses if the last stage, an empty tuple otherwise. Args: engine (colossalai.legacy.engine.Engine): Colossalai engine for training and inference. data_iter (Iterable): Dataloader as the form of an iterator, obtained by calling iter(dataloader). forward_only (bool, optional): Whether run forward step only. Default is false. If true, no backward will be run. return_loss (bool, optional): Whether returns the loss value. Default is true. return_output_label (bool, optional): If False, the output and label won't be returned. Returns: Tuple[:class:`torch.Tensor`]: A tuple of (output, label, loss), loss and label could be None. """ assert ( forward_only or return_loss ), "The argument 'return_loss' has to be True when 'forward_only' is False, but got False." self.load_batch(data_iter) num_warmup_microbatches = ( gpc.get_world_size(ParallelMode.PIPELINE) - gpc.get_local_rank(ParallelMode.PIPELINE) - 1 ) num_warmup_microbatches = min(num_warmup_microbatches, self.num_microbatches) num_microbatches_remaining = self.num_microbatches - num_warmup_microbatches # Input, output tensors only need to be saved when doing backward passes input_objs = None output_objs = None if not forward_only: input_objs = [] output_objs = [] return_tensors = [] if return_loss and gpc.is_pipeline_last_stage(ignore_virtual=True): accum_loss = torch.zeros(1, device=get_accelerator().get_current_device()) else: accum_loss = None # Used for tensor meta information communication ft_shapes = self.tensor_shape bt_shapes = None fs_checker = self.tensor_shape is None # Run warmup forward passes. for i in range(num_warmup_microbatches): if not gpc.is_first_rank(ParallelMode.PIPELINE): ft_shapes = comm.recv_obj_meta(ft_shapes) input_obj = comm.recv_forward( ft_shapes, dtype=self.dtype, scatter_gather_tensors=self.scatter_gather_tensors ) output_obj = self._forward_step( engine, input_obj, return_tensors, return_output_label=return_output_label, accum_loss=accum_loss ) if not gpc.is_last_rank(ParallelMode.PIPELINE): if isinstance(output_obj, torch.Tensor): bt_shapes = output_obj.shape else: bt_shapes = [] for out_tensor in output_obj: bt_shapes.append(out_tensor.shape) fs_checker = comm.send_obj_meta(output_obj, fs_checker) comm.send_forward(output_obj, scatter_gather_tensors=self.scatter_gather_tensors) if not forward_only: input_objs.append(input_obj) output_objs.append(output_obj) # Before running 1F1B, need to receive first forward tensor. # If all microbatches are run in warmup / cooldown phase, then no need to # receive this tensor here. if num_microbatches_remaining > 0: if not gpc.is_first_rank(ParallelMode.PIPELINE): ft_shapes = comm.recv_obj_meta(ft_shapes) input_obj = comm.recv_forward( ft_shapes, dtype=self.dtype, scatter_gather_tensors=self.scatter_gather_tensors ) # Run 1F1B in steady state. for i in range(num_microbatches_remaining): last_iteration = i == (num_microbatches_remaining - 1) output_obj = self._forward_step( engine, input_obj, return_tensors, return_output_label=return_output_label, accum_loss=accum_loss ) if forward_only: comm.send_forward(output_obj, scatter_gather_tensors=self.scatter_gather_tensors) if not last_iteration: input_obj = comm.recv_forward( ft_shapes, dtype=self.dtype, scatter_gather_tensors=self.scatter_gather_tensors ) else: output_obj_grad = comm.send_forward_recv_backward( output_obj, bt_shapes, dtype=self.dtype, scatter_gather_tensors=self.scatter_gather_tensors ) # Add input_obj and output_obj to end of list. input_objs.append(input_obj) output_objs.append(output_obj) # Pop output_obj and output_obj from the start of the list for # the backward pass. input_obj = input_objs.pop(0) output_obj = output_objs.pop(0) input_obj_grad = self._backward_step(engine, input_obj, output_obj, output_obj_grad) if last_iteration: input_obj = None comm.send_backward(input_obj_grad, scatter_gather_tensors=self.scatter_gather_tensors) else: input_obj = comm.send_backward_recv_forward( input_obj_grad, ft_shapes, dtype=self.dtype, scatter_gather_tensors=self.scatter_gather_tensors ) # Run cooldown backward passes. if not forward_only: for i in range(num_warmup_microbatches): input_obj = input_objs.pop(0) output_obj = output_objs.pop(0) output_obj_grad = comm.recv_backward( bt_shapes, dtype=self.dtype, scatter_gather_tensors=self.scatter_gather_tensors ) input_obj_grad = self._backward_step(engine, input_obj, output_obj, output_obj_grad) comm.send_backward(input_obj_grad, scatter_gather_tensors=self.scatter_gather_tensors) if len(return_tensors) > 0: output, label = pack_return_tensors(return_tensors) return output, label, accum_loss else: return None, None, accum_loss class InterleavedPipelineSchedule(PipelineSchedule): def __init__( self, num_microbatches: int, num_model_chunks: int, data_process_func: Callable = None, tensor_shape: Union[torch.Size, List[int], Tuple[int]] = None, scatter_gather_tensors: bool = False, ): """A helper schedule class for pipeline parallelism running environment. It uses interleaved 1F1B strategy. Other properties are similar as :class:`NonPipelineSchedule`. Args: num_microbatches (int): The number of microbatches. num_model_chunks (int): The number of model chunks. data_process_func (Callable, optional): The preprocessing function which receives a batch of data, and it will be executed in `load_batch`. tensor_shape (torch.Size, optional): Specified shape in pipeline communication. scatter_gather_tensors (bool, optional): If set to `True`, communication will be reduced over pipeline when using 1D tensor parallelization. """ assert ( num_microbatches % gpc.get_world_size(ParallelMode.PIPELINE) == 0 ), "num_microbatches must be an integer multiple of pipeline parallel world size" assert ( isinstance(num_model_chunks, int) and num_model_chunks > 0 ), f"expected num_model_chunks to be an integer and larger than 0, but got {num_model_chunks}" super().__init__( num_microbatches, data_process_func=data_process_func, tensor_shape=tensor_shape, scatter_gather_tensors=scatter_gather_tensors, ) gpc.set_virtual_pipeline_parallel_size(num_model_chunks) gpc.set_virtual_pipeline_parallel_rank(0) self.num_model_chunks = num_model_chunks def pre_processing(self, engine): from colossalai.zero.sharded_model.sharded_model_v2 import ShardedModelV2 if isinstance(engine.model, ShardedModelV2): self.dtype = torch.half elif isinstance(engine.model[0], NaiveAMPModel): self.dtype = torch.half for model in engine.model: if isinstance(model, NaiveAMPModel): model = model.model sig = inspect.signature(model.forward) for p in sig.parameters.values(): assert p.kind != inspect.Parameter.VAR_POSITIONAL, "*args is not supported" def load_batch(self, data_iter): super().load_batch(data_iter) # overwrite microbatch_offset, since model chunks load the same microbatch, and should tract the offset self.microbatch_offset = [0 for _ in range(self.num_model_chunks)] def load_micro_batch(self, model_chunk_id): data = self._get_data_slice(self.batch_data, self.microbatch_offset[model_chunk_id]) self.microbatch_offset[model_chunk_id] += self.microbatch_size return self._move_to_device(data) def _forward_step( self, engine, model_chunk_id, input_obj, return_tensors, return_output_label=True, accum_loss=None ): """Forward step for passed-in model. If it is the first stage, the input tensor is obtained from data_iterator, otherwise the passed-in input_obj is used. Returns output tensor. This is a helper function and can be ignored by users. Args: engine (colossalai.legacy.engine.Engine): Colossalai engine for training and inference. model_chunk_id (int): The id of model chunks. input_obj (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Input tensor for this pipeline stage. return_tensors (List[:class:`torch.Tensor`]): A list of tensors to return. return_output_label (bool, optional): Whether returns output labels. accum_loss (optional): Where accumulated loss stores. Returns: Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: output or the loss value of the current pipeline stage. """ micro_batch_data = self.load_micro_batch(model_chunk_id) data, label = self._get_data_label_for_current_step( input_obj, micro_batch_data, engine.criterion, engine.model[model_chunk_id] ) output_obj = self._call_engine(engine.model[model_chunk_id], data) if gpc.is_pipeline_last_stage(): if return_output_label: return_tensors.append((output_obj, label)) if accum_loss is not None: loss_reduced = self._call_engine_criterion(engine, output_obj, label) / self.num_microbatches accum_loss.add_(loss_reduced.detach()) return loss_reduced else: # forward only, it's useless since backward is not needed return output_obj else: if isinstance(output_obj, torch.Tensor): self._logger.debug( f"Global rank {gpc.get_global_rank()}, pipeline rank {gpc.get_local_rank(ParallelMode.PIPELINE)} forward output tensor {output_obj.shape}, dtype {output_obj.dtype}" ) return output_obj def forward_backward_step(self, engine, data_iter, forward_only=False, return_loss=True, return_output_label=True): """Run interleaved 1F1B schedule (model split into model chunks), with communication between pipeline stages as needed. Args: engine (colossalai.legacy.engine.Engine): Colossalai engine for training and inference. data_iter (Iterable): Dataloader as the form of an iterator, obtained by calling iter(dataloader). forward_only (bool, optional): Whether run forward step only. Default is false. If true, no backward will be run. return_loss (bool, optional): Whether returns the loss value. Default is true. return_output_label (bool, optional): If False, the output and label won't be returned. Returns: Tuple[:class:`torch.Tensor`]: A tuple of (output, label, loss), loss and label could be None. The loss would be returned only in the last stage. """ assert ( forward_only or return_loss ), "The argument 'return_loss' has to be True when 'forward_only' is False, but got False." self.load_batch(data_iter) model = engine.model input_objs = [[] for _ in range(len(model))] output_objs = [[] for _ in range(len(model))] return_tensors = [] if not forward_only: output_obj_grads = [[] for _ in range(len(model))] if return_loss and gpc.is_pipeline_last_stage(ignore_virtual=True): accum_loss = torch.zeros(1, device=get_accelerator().get_current_device()) else: accum_loss = None # Used for obj meta information communication input_obj_shapes = [self.tensor_shape for _ in range(len(model))] output_obj_shapes = [None for _ in range(len(model))] send_tensor_shape_flags = [self.tensor_shape is None for _ in range(len(model))] pipeline_parallel_size = gpc.get_world_size(ParallelMode.PIPELINE) pipeline_parallel_rank = gpc.get_local_rank(ParallelMode.PIPELINE) # Compute number of warmup and remaining microbatches. num_model_chunks = len(model) num_microbatches = self.num_microbatches * num_model_chunks all_warmup_microbatches = False if forward_only: num_warmup_microbatches = num_microbatches else: # Run all forward passes and then all backward passes if number of # microbatches is just the number of pipeline stages. # Otherwise, perform (num_model_chunks-1)*pipeline_parallel_size on # all workers, followed by more microbatches after depending on # stage ID (more forward passes for earlier stages, later stages can # immediately start with 1F1B). if self.num_microbatches == pipeline_parallel_size: num_warmup_microbatches = num_microbatches all_warmup_microbatches = True else: num_warmup_microbatches = (pipeline_parallel_size - pipeline_parallel_rank - 1) * 2 num_warmup_microbatches += (num_model_chunks - 1) * pipeline_parallel_size num_warmup_microbatches = min(num_warmup_microbatches, num_microbatches) num_microbatches_remaining = num_microbatches - num_warmup_microbatches def get_model_chunk_id(microbatch_id, forward): """Helper method to get the model chunk ID given the iteration number.""" microbatch_id_in_group = microbatch_id % (pipeline_parallel_size * num_model_chunks) model_chunk_id = microbatch_id_in_group // pipeline_parallel_size if not forward: model_chunk_id = num_model_chunks - model_chunk_id - 1 return model_chunk_id def _forward_step_helper(microbatch_id): """Helper method to run forward step with model split into chunks (run set_virtual_pipeline_model_parallel_rank() before calling forward_step()).""" model_chunk_id = get_model_chunk_id(microbatch_id, forward=True) gpc.set_virtual_pipeline_parallel_rank(model_chunk_id) # forward step if gpc.is_pipeline_first_stage(): if len(input_objs[model_chunk_id]) == len(output_objs[model_chunk_id]): input_objs[model_chunk_id].append(None) input_obj = input_objs[model_chunk_id][-1] output_obj = self._forward_step( engine, model_chunk_id, input_obj, return_tensors, return_output_label=return_output_label, accum_loss=accum_loss, ) output_objs[model_chunk_id].append(output_obj) # if forward-only, no need to save tensors for a backward pass if forward_only: input_objs[model_chunk_id].pop() output_objs[model_chunk_id].pop() return output_obj def _backward_step_helper(microbatch_id): """Helper method to run backward step with model split into chunks (run set_virtual_pipeline_model_parallel_rank() before calling backward_step()).""" model_chunk_id = get_model_chunk_id(microbatch_id, forward=False) gpc.set_virtual_pipeline_parallel_rank(model_chunk_id) if gpc.is_pipeline_last_stage(): if len(output_obj_grads[model_chunk_id]) == 0: output_obj_grads[model_chunk_id].append(None) input_obj = input_objs[model_chunk_id].pop(0) output_obj = output_objs[model_chunk_id].pop(0) output_obj_grad = output_obj_grads[model_chunk_id].pop(0) input_obj_grad = self._backward_step(engine, input_obj, output_obj, output_obj_grad) return input_obj_grad # Run warmup forward passes. gpc.set_virtual_pipeline_parallel_rank(0) if not gpc.is_pipeline_first_stage(): input_obj_shapes[0] = comm.recv_obj_meta(input_obj_shapes[0]) input_objs[0].append( comm.recv_forward(input_obj_shapes[0], dtype=self.dtype, scatter_gather_tensors=self.scatter_gather_tensors) ) for k in range(num_warmup_microbatches): model_chunk_id = get_model_chunk_id(k, forward=True) output_obj = _forward_step_helper(k) if not gpc.is_pipeline_last_stage(): if isinstance(output_obj, torch.Tensor): output_obj_shapes[model_chunk_id] = output_obj.shape else: output_obj_shapes[model_chunk_id] = [] for out_tensor in output_obj: output_obj_shapes[model_chunk_id].append(out_tensor.shape) send_tensor_shape_flags[model_chunk_id] = comm.send_obj_meta( output_obj, send_tensor_shape_flags[model_chunk_id] ) # Determine if tensor should be received from previous stage. next_forward_model_chunk_id = get_model_chunk_id(k + 1, forward=True) recv_prev = True
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/schedule/_non_pipeline_schedule.py
colossalai/legacy/engine/schedule/_non_pipeline_schedule.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import inspect from typing import Callable, Iterable import torch from colossalai.utils import conditional_context from ._base_schedule import BaseSchedule class NonPipelineSchedule(BaseSchedule): """A helper schedule class for no pipeline parallelism running environment. During one process, it loads a batch of dataset and feeds it to the model. After getting the output and calculating the loss, it will use :meth:`step` to update the parameters if it is in training mode. Args: data_process_func (Callable, optional): The preprocessing function which receives a batch of data and returns a tuple in the form of (data, label). and it will be executed in load_batch. Example: # this shows an example of customized data_process_func def data_process_func(dataloader_output): item1, item2, item3 = dataloader_output data = (item1, item2) label = item3 return data, label """ def __init__(self, data_process_func: Callable = None): # check that non-pipeline schedule data process func only takes in one parameter # which is the batch data if data_process_func: sig = inspect.signature(data_process_func) assert len(sig.parameters) == 1, ( "The data_process_func only takes in one parameter for NonPipelineSchedule, " "which is a tuple of tensors for the current batch, " "i.e. data_process_func(dataloader_output)." ) super().__init__(data_process_func) def forward_backward_step( self, engine, data_iter: Iterable, forward_only: bool = False, return_loss: bool = True, return_output_label: bool = True, ): """The process function that loads a batch of dataset and feeds it to the model. The returned labels and loss will None if :attr:`return_loss` is False. Args: engine (colossalai.legacy.engine.Engine): Colossalai engine for training and inference. data_iter (Iterable): Dataloader as the form of an iterator, obtained by calling iter(dataloader). forward_only (bool, optional): If True, the model is run for the forward pass, else back propagation will be executed. return_loss (bool, optional): Loss will be returned if True. return_output_label (bool, optional): Output and label will be returned if True. Returns: Tuple[:class:`torch.Tensor`]: A tuple of (output, label, loss), loss and label could be None. """ assert ( forward_only or return_loss ), "The argument 'return_loss' has to be True when 'forward_only' is False, but got False." batch_data = self.load_batch(data_iter) if self.data_process_func: data, label = self.data_process_func(batch_data) else: # if not batch data process func is given, # then we regard the batch data as a simple tuple of (data, label) data, label = batch_data # forward with conditional_context(torch.no_grad(), enable=forward_only): output = self._call_engine(engine, data) if return_loss: loss = self._call_engine_criterion(engine, output, label) if not forward_only: engine.backward(loss) if return_output_label: if return_loss: return output, label, loss else: return output, label, None else: if return_loss: return None, None, loss else: return None, None, None
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/schedule/__init__.py
colossalai/legacy/engine/schedule/__init__.py
from ._base_schedule import BaseSchedule from ._non_pipeline_schedule import NonPipelineSchedule from ._pipeline_schedule import InterleavedPipelineSchedule, PipelineSchedule, get_tensor_shape __all__ = ["BaseSchedule", "NonPipelineSchedule", "PipelineSchedule", "InterleavedPipelineSchedule", "get_tensor_shape"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/schedule/_base_schedule.py
colossalai/legacy/engine/schedule/_base_schedule.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod from typing import Callable, Iterable import torch from colossalai.accelerator import get_accelerator from colossalai.logging import get_dist_logger class BaseSchedule(ABC): """A basic helper class to control the process of training or evaluation. It mainly composes of forward_backward_step for gradient backward and optimizer_step for parameters update. For the convenience to enable FP16, we aggregate all codes that contain the control of FP16 in class schedule. Args: data_process_func (Callable, optional): The preprocessing function which receives a batch of data and arranges them into data and label. """ def __init__(self, data_process_func: Callable = None): self.logger = get_dist_logger() self.data_process_func = data_process_func @staticmethod def _move_tensor(element): if torch.is_tensor(element): if not element.is_cuda: return element.to(get_accelerator().get_current_device()).detach() return element def _move_to_device(self, data): if isinstance(data, torch.Tensor): data = data.to(get_accelerator().get_current_device()) elif isinstance(data, (list, tuple)): data_to_return = [] for element in data: if isinstance(element, dict): data_to_return.append({k: self._move_tensor(v) for k, v in element.items()}) else: data_to_return.append(self._move_tensor(element)) data = data_to_return elif isinstance(data, dict): data = {k: self._move_tensor(v) for k, v in data.items()} else: raise TypeError( f"Expected batch data to be of type torch.Tensor, list, tuple, or dict, but got {type(data)}" ) return data def _get_batch_size(self, data): if isinstance(data, torch.Tensor): return data.size(0) elif isinstance(data, (list, tuple)): if isinstance(data[0], dict): return data[0][list(data[0].keys())[0]].size(0) return data[0].size(0) elif isinstance(data, dict): return data[list(data.keys())[0]].size(0) def load_batch(self, data_iter, to_gpu=True): """Loads a batch from data iterator. It returns the data and labels which are already in the same GPU as where the model's. Args: data_iter (Iterable): Data iterator from which get a batch of data, obtained by calling iter(dataloader). to_gpu (bool, optional): Whether the data should be moved to GPU Returns: Tuple (:class:`Tensor`, :class:`torch.Tensor`): A tuple of (data, label). """ if data_iter is None: raise RuntimeError("Dataloader is not defined.") batch_data = next(data_iter) if to_gpu: batch_data = self._move_to_device(batch_data) self.batch_size = self._get_batch_size(batch_data) return batch_data def pre_processing(self, engine): """To perform actions before running the schedule.""" @abstractmethod def forward_backward_step( self, engine, data_iter: Iterable, forward_only: bool, return_loss: bool = True, return_output_label: bool = True, ): """The process function over a batch of dataset for training or evaluation. Args: engine (colossalai.legacy.engine.Engine): Colossalai engine for training and inference. data_iter (Iterable): Data iterator from which get a batch of data, obtained by calling iter(dataloader). forward_only (bool): If True, the process won't include backward. return_loss (bool, optional): If False, the loss won't be returned. return_output_label (bool, optional): If False, the output and label won't be returned. """ @staticmethod def _call_engine(engine, inputs): if isinstance(inputs, torch.Tensor): return engine(inputs) elif isinstance(inputs, (list, tuple)): return engine(*inputs) elif isinstance(inputs, dict): return engine(**inputs) else: TypeError( f"Expected engine inputs to be of type torch.Tensor, list, tuple, or dict, but got {type(inputs)}" ) @staticmethod def _call_engine_criterion(engine, outputs, labels): assert isinstance( outputs, (torch.Tensor, list, tuple, dict) ), f"Expect output of model is (torch.Tensor, list, tuple), got {type(outputs)}" if isinstance(outputs, torch.Tensor): outputs = (outputs,) if isinstance(labels, torch.Tensor): labels = (labels,) if isinstance(outputs, (tuple, list)) and isinstance(labels, (tuple, list)): return engine.criterion(*outputs, *labels) elif isinstance(outputs, (tuple, list)) and isinstance(labels, dict): return engine.criterion(*outputs, **labels) elif isinstance(outputs, dict) and isinstance(labels, dict): return engine.criterion(**outputs, **labels) elif isinstance(outputs, dict) and isinstance(labels, (list, tuple)): raise ValueError(f"Expected labels to be a dict when the model outputs are dict, but got {type(labels)}") else: raise TypeError( f"Expected model outputs and labels to be of type torch.Tensor ' \ '(which is auto-converted to tuple), list, tuple, or dict, ' \ 'but got {type(outputs)} (model outputs) and {type(labels)} (labels)" )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/gradient_accumulation/_gradient_accumulation.py
colossalai/legacy/engine/gradient_accumulation/_gradient_accumulation.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Any, Iterable, Tuple, Union import torch.nn as nn from torch import Tensor from torch.nn.parallel.distributed import DistributedDataParallel from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler from torch.utils.data import DataLoader from colossalai.interface import OptimizerWrapper from colossalai.legacy.engine import BaseGradientHandler from colossalai.utils import conditional_context class GradAccumOptimizer(OptimizerWrapper): """A wrapper for the optimizer to enable gradient accumulation by skipping the steps before accumulation size is reached. Args: optim (:class:`torch.optim.Optimizer`): Your optimizer object for gradient accumulation. accumulate_size (int): The number of steps to accumulate gradients. model (:class:`torch.nn.Module`): Your model object to check if it is DistributedDataParallel for special handling of no_sync() context. """ def __init__(self, optim: Optimizer, accumulate_size: int, model: nn.Module = None): super().__init__(optim) self.accumulate_size = accumulate_size self.accumulate_step = 0 # handle pytorch ddp auto all reduce self.model = model self.is_torch_ddp = isinstance(self.model, DistributedDataParallel) def zero_grad(self, *args, **kwargs) -> None: """ Set all gradients to zero. Args: *args: positional arguments for the optimizer wrapped **kwargs: keyword arguments for the optimizer wrapped """ if self.accumulate_step == 0: self.optim.zero_grad(*args, **kwargs) def step(self, *args, **kwargs) -> None: """ Update the model parameters. Args: *args: positional arguments for the optimizer wrapped **kwargs: keyword arguments for the optimizer wrapped """ if self.accumulate_step < self.accumulate_size: return None else: self.accumulate_step = 0 return self.optim.step(*args, **kwargs) def clip_grad_norm(self, model: nn.Module, max_norm: float) -> None: """ Clip gradients by norm. Args: model (:class:`torch.nn.Module`): a torch module instance max_norm (float): the max norm for gradient clipping """ if self.accumulate_step < self.accumulate_size: pass else: self.optim.clip_grad_by_norm(max_norm) def backward(self, loss: Tensor) -> None: """Execute backward pass. Args: loss (:class:`torch.Tensor`): the loss value. """ self.accumulate_step += 1 if self.is_torch_ddp: no_sync = self.accumulate_step < self.accumulate_size with conditional_context(self.model.no_sync(), enable=no_sync): scaled_loss = loss / self.accumulate_size self.optim.backward(scaled_loss) else: scaled_loss = loss / self.accumulate_size self.optim.backward(scaled_loss) def backward_by_grad(self, tensor: Tensor, grad: Tensor) -> None: """Execute backward pass given the gradients of the output. Args: loss (:class:`torch.Tensor`): the loss value. grad (:class:`torch.Tensor`): the output gradient. """ self.accumulate_step += 1 no_sync = self.is_torch_ddp and self.accumulate_step < self.accumulate_size if no_sync: with self.model.no_sync(): self.optim.backward_by_grad(tensor, grad) else: self.optim.backward_by_grad(tensor, grad) class GradAccumDataloader: """A wrapper for dataloader to enable gradient accumulation by dropping the last incomplete steps. Note: The dataloader would drop the last incomplete steps for gradient accumulation. For example, if a dataloader has 10 batches of data and accumulate size is 4. The model parameters will be updated only twice at step 4 and step 8. The last two batches of data do not form a complete 4-step cycle. Thus, they will be automatically skipped by this class. If the dataloader is not standard PyTorch dataloader, (e.g. Dali dataloader), this class will automatically consume (load data for nothing) the remaining 2 batches. Args: dataloader (``Iterable``): Your dataloader object for gradient accumulation. accumulate_size (int): The number of steps to accumulate gradients. """ def __init__(self, dataloader: Iterable, accumulate_size: int) -> None: self.dataloader = dataloader self.consume_remain_data = not isinstance(dataloader, DataLoader) self.steps_per_epoch = len(dataloader) - len(dataloader) % accumulate_size def __getattr__(self, __name: str) -> Any: return getattr(self.dataloader, __name) def __len__(self) -> int: return self.steps_per_epoch def __iter__(self) -> Iterable: self._cur_step = 0 self._dataiter = iter(self.dataloader) return self def __next__(self) -> Union[Tensor, Tuple[Tensor]]: if self._cur_step < self.steps_per_epoch: self._cur_step += 1 data = next(self._dataiter) if self._cur_step == self.steps_per_epoch and self.consume_remain_data: # this is to handle non standard pytorch dataloader # such as dali dataloader while True: try: _ = next(self._dataiter) except StopIteration: break return data else: raise StopIteration class GradAccumLrSchedulerByStep(_LRScheduler): """A wrapper for the LR scheduler to enable gradient accumulation by skipping the steps before accumulation size is reached. Args: lr_scheduler (:class:`torch.optim.lr_scheduler._LRScheduler`): Your ``lr_scheduler`` object for gradient accumulation. accumulate_size (int): The number of steps to accumulate gradients. """ def __init__(self, lr_scheduler: _LRScheduler, accumulate_size: int) -> None: self.lr_scheduler = lr_scheduler self.accumulate_size = accumulate_size self.accumulate_step = 0 @staticmethod def compute_effective_steps_per_epoch(dataloader: Iterable, accumulate_size: int) -> int: """ Computes the number of effective training iterations. An effective iteration is defined as the the aggregation of <accumulate_size> iterations. For examples, if accumulate_size = 4, then 4 iterations are considered as one effective iteration. Args: dataloader (``Iterable``): Your dataloader object for gradient accumulation. accumulate_size (int): The number of steps to accumulate gradients. """ return len(dataloader) // accumulate_size def __getattr__(self, __name: str) -> Any: return getattr(self.lr_scheduler, __name) def step(self, *args, **kwargs) -> None: """ Update the learning rate. Args: *args: positional arguments for the lr scheduler wrapped. **kwargs: keyword arguments for the lr scheduler wrapped. """ self.accumulate_step += 1 if self.accumulate_step < self.accumulate_size: pass else: self.accumulate_step = 0 self.lr_scheduler.step(*args, **kwargs) def get_lr(self) -> Tensor: """ Compute the next learning rate. Returns: Tensor: the upcoming learning rate. """ return self.lr_scheduler.get_lr() def get_last_lr(self) -> Tensor: """ Returns the current learning rate. Returns: Tensor: the current learning rate. """ return self.lr_scheduler.get_last_lr() def print_lr(self, *args, **kwargs) -> None: """ Print he learning rate. Args: *args: positional arguments for the lr scheduler wrapped. **kwargs: keyword arguments for the lr scheduler wrapped. """ self.lr_scheduler.print_lr(*args, **kwargs) def state_dict(self) -> dict: """ Returns the states of the lr scheduler as dictionary. Returns: dict: the states of the lr scheduler. """ return self.lr_scheduler.state_dict() def load_state_dict(self, state_dict: dict) -> None: """ Load the states of the lr scheduler from a dictionary object. Returns: dict: the states of the lr scheduler. """ self.lr_scheduler.load_state_dict(state_dict) class GradAccumGradientHandler: r"""A wrapper for the gradient handler to enable gradient accumulation by skipping the steps before accumulation size is reached. Args: grad_handler (:class:`colossalai.legacy.engine.BaseGradientHandler`): Your ``gradient_handler`` object for gradient accumulation, would be called when achieving `accumulate_size`. accumulate_size (int): The number of steps to accumulate gradients. More details about ``gradient_handlers`` could be found in `Gradient_handler <https://github.com/hpcaitech/ColossalAI/tree/main/colossalai/engine/gradient_handler>`_. """ def __init__(self, grad_handler: BaseGradientHandler, accumulate_size: int) -> None: assert isinstance( grad_handler, BaseGradientHandler ), f"expected grad_handler to be type BaseGradientHandler, but got {type(grad_handler)}" self.grad_handler = grad_handler self.accumulate_size = accumulate_size self.accumulate_step = 0 def handle_gradient(self) -> None: """ Handle gradients reduction only in the last gradient accumulation step. """ self.accumulate_step += 1 if self.accumulate_step < self.accumulate_size: pass else: self.accumulate_step = 0 self.grad_handler.handle_gradient()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/engine/gradient_accumulation/__init__.py
colossalai/legacy/engine/gradient_accumulation/__init__.py
from typing import Iterable, List import torch.nn as nn from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler from colossalai.legacy.engine import BaseGradientHandler from ._gradient_accumulation import ( GradAccumDataloader, GradAccumGradientHandler, GradAccumLrSchedulerByStep, GradAccumOptimizer, ) __all__ = [ "accumulate_gradient", "GradAccumDataloader", "GradAccumOptimizer", "GradAccumLrSchedulerByStep", "GradAccumGradientHandler", ] def accumulate_gradient( model: nn.Module, optimizer: Optimizer, dataloader: Iterable, accumulate_size: int, gradient_handlers: List[BaseGradientHandler] = None, lr_scheduler: _LRScheduler = None, ): r"""Turning model, optimizer, dataloader into corresponding object for gradient accumulation. Args: model (:class:`torch.nn.Module`): your model object for gradient accumulation. optimizer (:class:`torch.optim.Optimizer`): your optimizer object for gradient accumulation. dataloader (:class:`torch.utils.data.DataLoader` or iterable objects): your dataloader object, would be called like iter(dataloader) accumulate_size (int): the number of steps to accumulate gradients gradient_handlers (List[:class:`colossalai.legacy.engine.BaseGradientHandler`]): list of gradient handler objects. Default is None. lr_scheduler (`torch.optim.lr_scheduler` or `colossalai.nn.lr_scheduler`): your ``lr_scheduler`` object for gradient accumulation. Defaults to None. More details about `gradient_handlers` could be found in `Gradient_handler <https://github.com/hpcaitech/ColossalAI/tree/main/colossalai/engine/gradient_handler>`_. More details about `lr_scheduler` could be found `lr_scheduler <https://github.com/hpcaitech/ColossalAI/tree/main/colossalai/nn/lr_scheduler>`_. and `how to adjust learning rate <https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate>`_. """ optimizer = GradAccumOptimizer(optimizer, accumulate_size=accumulate_size, model=model) dataloader = GradAccumDataloader(dataloader, accumulate_size=accumulate_size) if gradient_handlers is not None: gradient_handlers = [GradAccumGradientHandler(handler, accumulate_size) for handler in gradient_handlers] if lr_scheduler is not None: lr_scheduler = GradAccumLrSchedulerByStep(lr_scheduler, accumulate_size=accumulate_size) return optimizer, dataloader, gradient_handlers, lr_scheduler
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/async_engine.py
colossalai/legacy/inference/async_engine.py
import asyncio from colossalai.inference.dynamic_batching.ray_dist_init import Driver from .dynamic_batching.io_struct import RequestOutput from .dynamic_batching.sampling_params import SamplingParams class RequestTracker: """ A class for trace down all the requests, abstraction for async """ def __init__(self) -> None: self._requests: asyncio.Queue[str] = asyncio.Queue() self._finished_requests: asyncio.Queue[RequestOutput] = asyncio.Queue() self.new_requests_event = None def __contains__(self, item): return item in self._requests def init_event(self): self.new_requests_event = asyncio.Event() def add_request(self, request_id: str): """Add a request to be sent to the engine on the next background loop iteration.""" self._requests.put_nowait(request_id) self.new_requests_event.set() # NOTE: we may find a better way to clear this event def add_stop(self): """ Add a StopIteration flag to stop async generator. """ self._finished_requests.put_nowait(StopIteration) self.new_requests_event.clear() def process_request_output(self, request_output: RequestOutput) -> None: """Process a request output from the engine.""" self._finished_requests.put_nowait(request_output) async def wait_for_new_requests(self): await self.new_requests_event.wait() def __aiter__(self): return self async def __anext__(self) -> RequestOutput: result = await self._finished_requests.get() # print("result of ", result) if result is StopIteration: raise StopAsyncIteration return result class Async_Engine: """ Use an engine to launch RAY Driver --> RAY Worker --> Async_Manager Background loop: inference reqs in waiting list (Listen) Request Tracker: manage incoming requests and restore finished ones Generate: exposed func for add new input and return finished ones """ def __init__( self, router_config, engine_config, start_engine_loop: bool = True, ) -> None: self.driver = Driver(router_config=router_config, engine_config=engine_config) self.background_loop = None self.start_engine_loop = start_engine_loop self._request_tracker = RequestTracker() def _step(self): """ Logic for handling requests """ request_outputs = self.driver.step() if request_outputs is not None: for request_output in request_outputs: self._request_tracker.process_request_output(request_output) self._request_tracker.add_stop() def abort_request(self, request_id: str): self.driver.abort(request_id) def _has_requests_in_progress(self): return self.driver.is_running() async def run_loop_fwd(self): has_requests_in_progress = self._has_requests_in_progress() while True: if not has_requests_in_progress: await self._request_tracker.wait_for_new_requests() self._step() await asyncio.sleep(0) @property def is_running(self): return self.background_loop is not None and not self.background_loop.done() def start_background_loop(self): if self.is_running: raise RuntimeError("Background loop is already running.") self._request_tracker.init_event() self.background_loop_unshielded = asyncio.get_event_loop().create_task(self.run_loop_fwd()) self.background_loop = asyncio.shield(self.background_loop_unshielded) async def add_request(self, request_id: str, prompt: str, sampling_params: SamplingParams): self.driver.add_input(request_id, prompt, sampling_params) self._request_tracker.add_request(request_id) async def generate(self, request_id: str, prompt: str, sampling_params: SamplingParams): """ The only exposed func, adding new request and return a async generator that yields the existing results. """ try: if not self.is_running: self.start_background_loop() await self.add_request(request_id, prompt, sampling_params) async for request_output in self._request_tracker: yield request_output except (Exception, asyncio.CancelledError) as e: # If there is an exception or coroutine is cancelled, abort the request. self.abort_request(request_id) raise e
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/async_manager.py
colossalai/legacy/inference/async_manager.py
from typing import List from .dynamic_batching.io_struct import Batch, Req, RequestOutput from .manager import DynamicBatchManager from .tensor_parallel import TPInferEngine class Async_DynamicBatchManager(DynamicBatchManager): def __init__( self, tp_engine: TPInferEngine, max_total_token_num: int, batch_max_tokens: int, model: str, tokenizer=None, eos_id=None, log_stats=True, log_stats_interval=10, running_batch: Batch = None, waiting_req_list: List = [], ): """ Args: tp_engine : The tp engine that dynamic batch manager hold, defined before dynamic batch manager max_total_token_num : max_total_token_num for memory manager, default to: max batch size * (max input len + max output len) batch_max_tokens : max tokens of one batch, default to (max input + output len) * num_requests running_max_req_size : max request size of running batch, equals to MAX_BATCH_SIZE of tp engine eos_id : The end token of a seq model: the model weight dir path, the app will load config, weights and tokenizer from this dir log_stats : whether to log stats log_stats_interval : log stats interval running_batch : running batch waiting_req_list : list of waiting requests, initialized before dynamic batch manager """ super().__init__( tp_engine, max_total_token_num, batch_max_tokens, model, tokenizer, eos_id, log_stats, log_stats_interval, running_batch, waiting_req_list, ) def _step(self): """ Logic for handling requests """ has_new_finished = False if self.running_batch is None: new_batch = self.req_queue.generate_new_batch(self.running_batch) if new_batch is not None: self.stats_tool.count_prompt_tokens(new_batch) self.running_batch = new_batch has_new_finished, outputs = self._prefill_batch(self.running_batch) self._filter_running_batch() self.has_wait_tokens = 0 else: if self.has_wait_tokens < self.max_wait_tokens: self.stats_tool.count_output_tokens(self.running_batch) has_new_finished, outputs = self._decode_batch(self.running_batch) self._filter_running_batch() self.has_wait_tokens += 1 else: new_mini_batch = self.req_queue.generate_new_batch(self.running_batch) if new_mini_batch is not None: self.stats_tool.count_prompt_tokens(new_mini_batch) has_new_finished, outputs = self._prefill_batch(new_mini_batch) if not new_mini_batch.is_clear(): self._merge_batch(self.running_batch, new_mini_batch) self.running_batch.merge(new_mini_batch) self.has_wait_tokens = 0 else: self.stats_tool.count_output_tokens(self.running_batch) has_new_finished, outputs = self._decode_batch(self.running_batch) self._filter_running_batch() self.has_wait_tokens += 1 if has_new_finished: return outputs return None def _prefill_batch(self, batch): """ For all batches, no matter it is a new batch or a mini batch, we need to do prefill first. """ self._init_batch(batch) # TODO: figure out if cache and batch id is needed ans = self.engine._prefill_batch(batch.batch_id) req_to_out_token_id = ans self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id, self.engine.max_output_len) outputs = self._handle_finish_req(batch, has_new_finished_req) return has_new_finished_req, outputs # delete finished reqs def _decode_batch(self, batch: Batch): """ Decoding process """ ans = self.engine._decode_batch(batch.batch_id) req_to_out_token_id = ans self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id, self.engine.max_output_len) outputs = self._handle_finish_req(batch, has_new_finished_req) return has_new_finished_req, outputs def _handle_finish_req(self, batch: Batch, has_new_finished_req): if has_new_finished_req: finished_reqs = batch.filter_finished() if batch.is_clear(): self._remove_batch(batch) else: self._filter_batch(batch) return self._output_process(finished_reqs) return None def _output_process(self, finished_reqs: List[Req]): """ Process the output of a batch. """ outputs = [] for req in finished_reqs: output = self.tokenizer.decode(req.output_ids) outputs.append(RequestOutput(req.request_id, req.prompts, req.prompt_ids, output)) return outputs def start_dynamic_batching(args, tp_engine, waiting_req_list): try: batch_manager = Async_DynamicBatchManager( tp_engine=tp_engine, max_total_token_num=args.max_total_token_num, batch_max_tokens=args.batch_max_tokens, eos_id=args.eos_id, model=args.model, log_stats=not args.disable_log_stats, log_stats_interval=args.log_stats_interval, waiting_req_list=waiting_req_list, ) except Exception: raise Exception return batch_manager
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/__init__.py
colossalai/legacy/inference/__init__.py
from .hybridengine import CaiInferEngine from .hybridengine.polices import LlamaModelInferPolicy __all__ = ["CaiInferEngine", "LlamaModelInferPolicy"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/manager.py
colossalai/legacy/inference/manager.py
# Adapted from https://github.com/ModelTC/lightllm import time from typing import List from .dynamic_batching.get_tokenizer import get_tokenizer from .dynamic_batching.infer_batch import InferBatch from .dynamic_batching.io_struct import Batch, Req from .dynamic_batching.req_queue import ReqQueue from .dynamic_batching.sampling_params import SamplingParams from .dynamic_batching.stats import Stats from .tensor_parallel import TPInferEngine class DynamicBatchManager: def __init__( self, tp_engine: TPInferEngine, max_total_token_num, batch_max_tokens, model, tokenizer=None, eos_id=None, log_stats=True, log_stats_interval=10, running_batch: Batch = None, waiting_req_list: List = [], ): """ Args: tp_engine : The tp engine that dynamic batch manager hold, defined before dynamic batch manager max_total_token_num : max_total_token_num for memory manager, default to: max batch size * (max input len + max output len) batch_max_tokens : max tokens of one batch, default to (max input + output len) * num_requests running_max_req_size : max request size of running batch, equals to MAX_BATCH_SIZE of tp engine eos_id : The end token of a seq model: the model weight dir path, the app will load config, weights and tokenizer from this dir log_stats : whether to log stats log_stats_interval : log stats interval running_batch : running batch waiting_req_list : list of waiting requests, initialized before dynamic batch manager """ self.engine = tp_engine self.max_total_token_num = max_total_token_num running_max_req_size = self.engine.max_batch_size if self.engine is not None else 2 self.req_queue = ReqQueue(max_total_token_num, batch_max_tokens, running_max_req_size, waiting_req_list) # all the inputs should be put into req_queue: waiting req list assert max_total_token_num >= self.engine.max_batch_size * ( self.engine.max_input_len + self.engine.max_output_len ), "max_total_token_num should be greater than max_batch_size * (max_input_len+max_output_len)" assert ( batch_max_tokens >= self.engine.max_input_len + self.engine.max_output_len ), "batch_max_tokens should be greater than (max_input_len+max_output_len)" self.running_batch: Batch = running_batch self.eos_id = eos_id self.has_wait_tokens = 0 self.max_wait_tokens = 10 self.model = model self.stats_tool = Stats(log_stats, log_stats_interval) self.mem_usage_interval = log_stats_interval * 2 self.tokenizer = get_tokenizer(tokenizer_name=self.model) if tokenizer is None else tokenizer if self.eos_id == None: self.eos_id = self.tokenizer.eos_token_id def add_req(self, request_id: str, prompt_ids: List[int], sampling_params: SamplingParams, prompts: str = ""): """ Add new request to req queue, during initialization all requests are held in waiting list. """ sampling_params.max_new_tokens = ( self.engine.max_output_len if sampling_params.max_new_tokens > self.engine.max_output_len else sampling_params.max_new_tokens ) req = Req(request_id, prompt_ids, sampling_params, prompts) self.req_queue.append(req) return def add_input(self, request_id, prompts, sampling_params): """ Encode and Add new input to req queue. support one sequence input for now. """ prompt_ids = self.tokenizer.encode(prompts) prompt_len = len(prompt_ids) if prompt_len > self.engine.max_input_len: raise ValueError(f"the input prompt token len {prompt_len} is too long > {self.engine.max_input_len}") sampling_params.stop_sentences_to_token_ids(self.tokenizer) self.add_req(request_id, prompt_ids, sampling_params, prompts) return def abort(self, request_id): if self.running_batch is not None: for req in self.running_batch.reqs: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True for req in self.req_queue.waiting_req_list: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True return def loop_for_fwd(self): """ The main loop for a dynamic batching process. """ counter_count = 0 # self.running_batch is not None or self.req_queue.waiting_req_list while self.running_batch is not None or self.req_queue.waiting_req_list: yield from self._step() counter_count += 1 if self.running_batch is not None: if counter_count % self.mem_usage_interval == 0: print( "current batch size:", len(self.running_batch.reqs), "token used ratio:", self.running_batch.calcu_used_tokens() / self.max_total_token_num, ) self.stats_tool.print_stats() if self.running_batch is None: time.sleep(0.1) # 10ms def _step(self): """ Logic for handling requests """ if self.running_batch is None: new_batch = self.req_queue.generate_new_batch(self.running_batch) if new_batch is not None: self.stats_tool.count_prompt_tokens(new_batch) self.running_batch = new_batch yield from self._prefill_batch(self.running_batch) self._filter_running_batch() self.has_wait_tokens = 0 return if self.has_wait_tokens < self.max_wait_tokens: self.stats_tool.count_output_tokens(self.running_batch) yield from self._decode_batch(self.running_batch) self._filter_running_batch() self.has_wait_tokens += 1 return else: new_mini_batch = self.req_queue.generate_new_batch(self.running_batch) if new_mini_batch is not None: self.stats_tool.count_prompt_tokens(new_mini_batch) yield from self._prefill_batch(new_mini_batch) if not new_mini_batch.is_clear(): self._merge_batch(self.running_batch, new_mini_batch) self.running_batch.merge(new_mini_batch) self.has_wait_tokens = 0 else: self.stats_tool.count_output_tokens(self.running_batch) yield from self._decode_batch(self.running_batch) self._filter_running_batch() self.has_wait_tokens += 1 return def _init_batch(self, batch: Batch, dtype="fp16"): reqs = [r.to_rpc_obj() for r in batch.reqs] batch_id = batch.batch_id import torch if dtype == "fp16": dtype = torch.float16 else: assert False, "error dtype" batch_data = InferBatch.init_batch( batch_id, reqs, dtype, torch.cuda.current_device(), self.engine.cache_manager, self.engine.model.config.vocab_size, self.engine.max_input_len + self.engine.max_output_len, ) self.engine.cache[batch_id] = batch_data def _prefill_batch(self, batch): """ For all batches, no matter it is a new batch or a mini batch, we need to do prefill first. """ self._init_batch(batch) # TODO: figure out if cache and batch id is needed ans = self.engine._prefill_batch(batch.batch_id) req_to_out_token_id = ans self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id, self.engine.max_output_len) yield from self._handle_finish_req(batch, has_new_finished_req) # delete finished reqs def _decode_batch(self, batch: Batch): """ Decoding process """ ans = self.engine._decode_batch(batch.batch_id) req_to_out_token_id = ans self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id, self.engine.max_output_len) yield from self._handle_finish_req(batch, has_new_finished_req) def _filter_batch(self, batch: Batch): batch_id = batch.batch_id req_id_list = [r.request_id for r in batch.reqs] batch = self.engine.cache.pop(batch_id) filter_batch = batch.filter(req_id_list) del batch self.engine.cache[batch_id] = filter_batch def _merge_batch(self, batch1, batch2): """ Merge new mini batch into running batch. """ batch1 = self.engine.cache.pop(batch1.batch_id) batch2 = self.engine.cache.pop(batch2.batch_id) m_batch = InferBatch.merge(batch1, batch2) self.engine.cache[batch1.batch_id] = m_batch del batch1 del batch2 def _remove_batch(self, batch): """ Remove finished batch. """ batch = self.engine.cache.pop(batch.batch_id) batch.free_self() del batch def _handle_finish_req(self, batch: Batch, has_new_finished_req): if has_new_finished_req: finished_reqs = batch.filter_finished() if batch.is_clear(): self._remove_batch(batch) else: self._filter_batch(batch) yield from self._output_process(finished_reqs) def _filter_running_batch(self): if self.running_batch is not None and self.running_batch.is_clear(): self.running_batch = None def _add_token_id_to_req(self, batch: Batch, req_ans): for req_id, (new_token_id, new_gen_metadata) in req_ans.items(): req = batch.id_to_reqs[req_id] req.output_ids.append(new_token_id) req.output_metadata_list.append(new_gen_metadata) return def _output_process(self, finished_reqs: List[Req]): """ Process the output of a batch. """ for req in finished_reqs: output = self.tokenizer.decode(req.output_ids) yield req.prompts + output def clean_up(self): # this logic should be implemented in the future. pass def generate(self, request_id, prompts, sampling_params): """ Generate the output of a request. """ self.add_input(request_id, prompts, sampling_params) return self.loop_for_fwd() def is_running(self): return self.running_batch is not None or self.req_queue.waiting_req_list def start_dynamic_batching(args, tp_engine, waiting_req_list): try: batch_manager = DynamicBatchManager( tp_engine=tp_engine, max_total_token_num=args.max_total_token_num, batch_max_tokens=args.batch_max_tokens, eos_id=args.eos_id, model=args.model, log_stats=not args.disable_log_stats, log_stats_interval=args.log_stats_interval, waiting_req_list=waiting_req_list, ) except Exception: raise Exception return batch_manager
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/quant/gptq/__init__.py
colossalai/legacy/inference/quant/gptq/__init__.py
from .cai_gptq import HAS_AUTO_GPTQ if HAS_AUTO_GPTQ: from .cai_gptq import CaiGPTQLinearOp, CaiQuantLinear
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/quant/gptq/cai_gptq/gptq_op.py
colossalai/legacy/inference/quant/gptq/cai_gptq/gptq_op.py
import torch from colossalai.kernel.triton import gptq_fused_linear_triton class CaiGPTQLinearOp(torch.nn.Module): def __init__(self, gptq_group_size, gptq_quant_bits): super(CaiGPTQLinearOp, self).__init__() self.group_size = gptq_group_size self.bits = gptq_quant_bits self.maxq = 2**self.bits - 1 self.empty_tensor = torch.zeros(4, device=torch.cuda.current_device()) def forward( self, input: torch.Tensor, weight: torch.Tensor, weight_scales: torch.Tensor, weight_zeros: torch.Tensor, g_idx: torch.Tensor = None, act_type=0, bias: torch.Tensor = None, residual: torch.Tensor = None, qkv_fused=False, ): add_bias = True if bias is None: bias = self.empty_tensor add_bias = False add_residual = True if residual is None: residual = self.empty_tensor add_residual = False x = input.view(-1, input.shape[-1]) out = gptq_fused_linear_triton( x, weight, weight_scales, weight_zeros, bias, residual, self.bits, self.maxq, self.group_size, qkv_fused, add_bias, add_residual, act_type=act_type, g_idx=g_idx, ) if qkv_fused: out = out.view(3, input.shape[0], input.shape[1], weight.shape[-1]) else: out = out.view(input.shape[0], input.shape[1], weight.shape[-1]) return out
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/quant/gptq/cai_gptq/cai_quant_linear.py
colossalai/legacy/inference/quant/gptq/cai_gptq/cai_quant_linear.py
# Adapted from AutoGPTQ auto_gptq: https://github.com/PanQiWei/AutoGPTQ import math import warnings from typing import List, Union import numpy as np import torch import torch.distributed as dist import torch.nn as nn from torch.distributed import ProcessGroup from colossalai.lazy import LazyInitContext from colossalai.shardformer.layer import ParallelModule from .gptq_op import CaiGPTQLinearOp HAS_GPTQ_CUDA = False try: from colossalai.kernel.op_builder.gptq import GPTQBuilder gptq_cuda = GPTQBuilder().load() HAS_GPTQ_CUDA = True except ImportError: warnings.warn("CUDA gptq is not installed") HAS_GPTQ_CUDA = False class CaiQuantLinear(nn.Module): def __init__(self, bits, groupsize, infeatures, outfeatures, bias, tp_size=1, tp_rank=0, row_split=False): super().__init__() if bits not in [2, 4, 8]: raise NotImplementedError("Only 2,4,8 bits are supported.") self.infeatures = infeatures self.outfeatures = outfeatures self.bits = bits self.maxq = 2**self.bits - 1 self.groupsize = groupsize if groupsize != -1 else infeatures self.register_buffer("qweight", torch.zeros((infeatures // 32 * self.bits, outfeatures), dtype=torch.int32)) self.register_buffer( "qzeros", torch.zeros((math.ceil(infeatures / self.groupsize), outfeatures // 32 * self.bits), dtype=torch.int32), ) self.register_buffer( "scales", torch.zeros((math.ceil(infeatures / self.groupsize), outfeatures), dtype=torch.float16) ) if row_split: self.register_buffer( "g_idx", torch.tensor( [(i + (tp_rank * self.infeatures)) // self.groupsize for i in range(infeatures)], dtype=torch.int32 ), ) else: self.register_buffer( "g_idx", torch.tensor([i // self.groupsize for i in range(infeatures)], dtype=torch.int32) ) if bias: self.register_buffer("bias", torch.zeros((outfeatures), dtype=torch.float16)) else: self.bias = None self.gptq_linear = CaiGPTQLinearOp(groupsize, bits) self.q4 = None self.empty_tensor = torch.empty((1, 1), device="meta") self.tp_size = tp_size self.tp_rank = tp_rank self.row_split = row_split def pack(self, linear, scales, zeros, g_idx=None): g_idx = ( g_idx.clone() if g_idx is not None else torch.tensor([i // self.groupsize for i in range(self.infeatures)], dtype=torch.int32) ) scales = scales.t().contiguous() zeros = zeros.t().contiguous() scale_zeros = zeros * scales half_scales = scales.clone().half() # print("scale shape ", scales.shape, scale_zeros.shape, linear.weight.shape) self.scales = scales.clone().half() if linear.bias is not None: self.bias = linear.bias.clone().half() pbits = 32 ptype = torch.int32 unsign_type = np.uint32 sign_type = np.int32 intweight = [] for idx in range(self.infeatures): intweight.append( torch.round((linear.weight.data[:, idx] + scale_zeros[g_idx[idx]]) / half_scales[g_idx[idx]]).to(ptype)[ :, None ] ) intweight = torch.cat(intweight, dim=1) intweight = intweight.t().contiguous() intweight = intweight.numpy().astype(unsign_type) qweight = np.zeros((intweight.shape[0] // pbits * self.bits, intweight.shape[1]), dtype=unsign_type) i = 0 row = 0 while row < qweight.shape[0]: if self.bits in [2, 4, 8]: for j in range(i, i + (pbits // self.bits)): qweight[row] |= intweight[j] << (self.bits * (j - i)) i += pbits // self.bits row += 1 else: raise NotImplementedError("Only 2,4,8 bits are supported.") qweight = qweight.astype(sign_type) qweight1 = torch.from_numpy(qweight) qweight1 = qweight1.contiguous() # .to("cuda") self.qweight.data.copy_(qweight1) qzeros = np.zeros((zeros.shape[0], zeros.shape[1] // pbits * self.bits), dtype=unsign_type) zeros -= 1 zeros = zeros.numpy().astype(unsign_type) i = 0 col = 0 while col < qzeros.shape[1]: if self.bits in [2, 4, 8]: for j in range(i, i + (pbits // self.bits)): qzeros[:, col] |= zeros[:, j] << (self.bits * (j - i)) i += pbits // self.bits col += 1 else: raise NotImplementedError("Only 2,4,8 bits are supported.") qzeros = qzeros.astype(sign_type) qzeros = torch.from_numpy(qzeros) qzeros = qzeros self.qzeros.data.copy_(qzeros) if torch.equal(self.g_idx.to(g_idx.device), g_idx): self.g_idx = None else: self.g_idx = g_idx def init_q4(self): assert self.qweight.device.type == "cuda" self.q4_width = self.qweight.shape[1] if self.g_idx is not None: if self.row_split and torch.equal( self.g_idx, torch.tensor( [(i + (self.tp_rank * self.infeatures)) // self.groupsize for i in range(self.infeatures)], dtype=torch.int32, device=self.g_idx.device, ), ): self.g_idx = None elif torch.equal( self.g_idx, torch.tensor( [i // self.groupsize for i in range(self.infeatures)], dtype=torch.int32, device=self.g_idx.device ), ): self.g_idx = None if self.g_idx is not None: g_idx = self.g_idx.to("cpu") else: g_idx = self.empty_tensor self.q4 = gptq_cuda.make_q4(self.qweight, self.qzeros, self.scales, g_idx, torch.cuda.current_device()) torch.cuda.synchronize() def forward(self, x): outshape = x.shape[:-1] + (self.outfeatures,) if HAS_GPTQ_CUDA and self.bits == 4: if self.q4 is None: self.init_q4() x = x.view(-1, x.shape[-1]) output = torch.empty((x.shape[0], self.outfeatures), dtype=torch.float16, device=x.device) gptq_cuda.q4_matmul(x.half(), self.q4, output) if self.bias is not None and (not self.row_split or self.tp_size == 1): output.add_(self.bias) else: if self.bias is not None and (not self.row_split or self.tp_size == 1): bias = self.bias else: bias = None output = self.gptq_linear( x, self.qweight, self.scales, self.qzeros, g_idx=self.g_idx, bias=bias, ) return output.view(outshape) def split_column_copy(gptq_linear, cai_linear, tp_size=1, tp_rank=0, split_num=1): qweights = gptq_linear.qweight.split(gptq_linear.out_features // split_num, dim=-1) qzeros = gptq_linear.qzeros.split(gptq_linear.out_features // (32 // cai_linear.bits) // split_num, dim=-1) scales = gptq_linear.scales.split(gptq_linear.out_features // split_num, dim=-1) g_idx = gptq_linear.g_idx if gptq_linear.bias is not None: bias = gptq_linear.bias.split(gptq_linear.out_features // split_num, dim=-1) cai_split_out_features = cai_linear.outfeatures // split_num zero_split_block = cai_linear.outfeatures // (32 // cai_linear.bits) // split_num for i in range(split_num): cai_linear.qweight[:, i * cai_split_out_features : (i + 1) * cai_split_out_features] = qweights[i][ :, tp_rank * cai_split_out_features : (tp_rank + 1) * cai_split_out_features ] cai_linear.qzeros[:, i * zero_split_block : (i + 1) * zero_split_block] = qzeros[i][ :, tp_rank * zero_split_block : (tp_rank + 1) * zero_split_block ] cai_linear.scales[:, i * cai_split_out_features : (i + 1) * cai_split_out_features] = scales[i][ :, tp_rank * cai_split_out_features : (tp_rank + 1) * cai_split_out_features ] if cai_linear.bias is not None: cai_linear.bias[i * cai_split_out_features : (i + 1) * cai_split_out_features] = bias[i][ tp_rank * cai_split_out_features : (tp_rank + 1) * cai_split_out_features ] cai_linear.g_idx.copy_(g_idx) def split_row_copy(gptq_linear, cai_linear, tp_rank=0, split_num=1): qweights = gptq_linear.qweight.split(gptq_linear.in_features // split_num, dim=0) qzeros = gptq_linear.qzeros.split(gptq_linear.in_features // split_num, dim=0) scales = gptq_linear.scales.split(gptq_linear.in_features // split_num, dim=0) g_idxs = gptq_linear.g_idx.split(gptq_linear.in_features // split_num, dim=0) cai_split_in_features = cai_linear.infeatures // (32 // cai_linear.bits) // split_num zero_split_block = cai_linear.infeatures // cai_linear.groupsize // split_num idx_split_features = cai_linear.infeatures // split_num for i in range(split_num): cai_linear.qweight[i * cai_split_in_features : (i + 1) * cai_split_in_features, :] = qweights[i][ tp_rank * cai_split_in_features : (tp_rank + 1) * cai_split_in_features, : ] cai_linear.qzeros[i * zero_split_block : (i + 1) * zero_split_block, :] = qzeros[i][ tp_rank * zero_split_block : (tp_rank + 1) * zero_split_block, : ] cai_linear.scales[i * zero_split_block : (i + 1) * zero_split_block, :] = scales[i][ tp_rank * zero_split_block : (tp_rank + 1) * zero_split_block, : ] cai_linear.g_idx[i * idx_split_features : (i + 1) * idx_split_features] = g_idxs[i][ tp_rank * idx_split_features : (tp_rank + 1) * idx_split_features ] if cai_linear.bias is not None: cai_linear.bias.copy_(gptq_linear.bias) class RowCaiQuantLinear(CaiQuantLinear, ParallelModule): def __init__(self, bits, groupsize, infeatures, outfeatures, bias, tp_size=1, tp_rank=0, row_split=False): super().__init__( bits, groupsize, infeatures, outfeatures, bias, tp_size=tp_size, tp_rank=tp_rank, row_split=row_split ) self.process_group = None @staticmethod def from_native_module( module: nn.Module, process_group: Union[ProcessGroup, List[ProcessGroup]], *args, **kwargs ) -> ParallelModule: LazyInitContext.materialize(module) # get the attributes in_features = module.in_features # ensure only one process group is passed if isinstance(process_group, (list, tuple)): assert len(process_group) == 1, f"Expected only one process group, got {len(process_group)}." process_group = process_group[0] tp_size = dist.get_world_size(process_group) tp_rank = dist.get_rank(process_group) if in_features < tp_size: return module if in_features % tp_size != 0: raise ValueError( f"The size of in_features:{in_features} is not integer multiples of tensor parallel size: {tp_size}!" ) linear_1d = RowCaiQuantLinear( module.bits, module.group_size, module.in_features // tp_size, module.out_features, module.bias is not None, tp_size=tp_size, tp_rank=tp_rank, row_split=True, ) linear_1d.process_group = process_group split_row_copy(module, linear_1d, tp_rank=tp_rank, **kwargs) return linear_1d def forward(self, x): output = super().forward(x) if self.tp_size > 1: dist.all_reduce(output, op=dist.ReduceOp.SUM, group=self.process_group) if self.bias is not None: output.add_(self.bias) return output class ColCaiQuantLinear(CaiQuantLinear, ParallelModule): def __init__(self, bits, groupsize, infeatures, outfeatures, bias, tp_size=1, tp_rank=0, row_split=False): super().__init__( bits, groupsize, infeatures, outfeatures, bias, tp_size=tp_size, tp_rank=tp_rank, row_split=row_split ) self.process_group = None @staticmethod def from_native_module( module: nn.Module, process_group: Union[ProcessGroup, List[ProcessGroup]], *args, **kwargs ) -> ParallelModule: LazyInitContext.materialize(module) # get the attributes in_features = module.in_features # ensure only one process group is passed if isinstance(process_group, (list, tuple)): assert len(process_group) == 1, f"Expected only one process group, got {len(process_group)}." process_group = process_group[0] tp_size = dist.get_world_size(process_group) tp_rank = dist.get_rank(process_group) if in_features < tp_size: return module if in_features % tp_size != 0: raise ValueError( f"The size of in_features:{in_features} is not integer multiples of tensor parallel size: {tp_size}!" ) linear_1d = ColCaiQuantLinear( module.bits, module.group_size, module.in_features, module.out_features // tp_size, module.bias is not None, tp_size=tp_size, tp_rank=tp_rank, ) linear_1d.process_group = process_group split_column_copy(module, linear_1d, tp_rank=tp_rank, **kwargs) return linear_1d
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/quant/gptq/cai_gptq/__init__.py
colossalai/legacy/inference/quant/gptq/cai_gptq/__init__.py
import warnings HAS_AUTO_GPTQ = False try: import auto_gptq HAS_AUTO_GPTQ = True except ImportError: warnings.warn("please install auto-gptq from https://github.com/PanQiWei/AutoGPTQ") HAS_AUTO_GPTQ = False if HAS_AUTO_GPTQ: from .cai_quant_linear import CaiQuantLinear, ColCaiQuantLinear, RowCaiQuantLinear from .gptq_op import CaiGPTQLinearOp
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/quant/smoothquant/__init__.py
colossalai/legacy/inference/quant/smoothquant/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/quant/smoothquant/models/base_model.py
colossalai/legacy/inference/quant/smoothquant/models/base_model.py
# Adapted from AutoGPTQ: https://github.com/PanQiWei/AutoGPTQ # Adapted from smoothquant: https://github.com/mit-han-lab/smoothquant/blob/main/smoothquant/calibration.py # Adapted from smoothquant: https://github.com/mit-han-lab/smoothquant/blob/main/smoothquant/smooth.py import os import warnings from abc import abstractmethod from functools import partial from os.path import isdir, isfile, join from typing import Dict, List, Optional, Union import accelerate import numpy as np import torch import torch.nn as nn import transformers from safetensors.torch import save_file as safe_save from tqdm import tqdm from transformers import AutoConfig, AutoModelForCausalLM, PreTrainedModel from transformers.modeling_utils import no_init_weights from transformers.utils.generic import ContextManagers from transformers.utils.hub import PushToHubMixin, cached_file from colossalai.inference.tensor_parallel.batch_infer_state import BatchInferState from colossalai.inference.tensor_parallel.kvcache_manager import MemoryManager SUPPORTED_MODELS = ["llama"] class BaseSmoothForCausalLM(nn.Module, PushToHubMixin): layer_type: str = None def __init__(self, model: PreTrainedModel, quantized: bool = False): super().__init__() self.model = model self.model_type = self.model.config.model_type self._quantized = quantized self.config = self.model.config self.cache_manager = None self.max_total_token_num = 0 @property def quantized(self): return self._quantized def init_cache_manager(self, max_total_token_num=2048): if self.config.model_type == "llama": head_num = self.config.num_key_value_heads layer_num = self.config.num_hidden_layers head_dim = self.config.hidden_size // head_num self.cache_manager = MemoryManager(max_total_token_num, torch.int8, head_num, head_dim, layer_num) self.max_total_token_num = max_total_token_num def init_batch_state(self, max_output_len=256, **kwargs): input_ids = kwargs["input_ids"] batch_size = len(input_ids) seq_start_indexes = torch.zeros(batch_size, dtype=torch.int32, device="cuda") seq_lengths = torch.zeros(batch_size, dtype=torch.int32, device="cuda") start_index = 0 max_len_in_batch = -1 for i in range(batch_size): seq_len = len(input_ids[i]) seq_lengths[i] = seq_len seq_start_indexes[i] = start_index start_index += seq_len max_len_in_batch = seq_len if seq_len > max_len_in_batch else max_len_in_batch if "max_total_token_num" in kwargs.keys(): max_total_token_num = kwargs["max_total_token_num"] self.init_cache_manager(max_total_token_num) if "max_new_tokens" in kwargs.keys(): max_output_len = kwargs["max_new_tokens"] if batch_size * (max_len_in_batch + max_output_len) > self.max_total_token_num: max_total_token_num = batch_size * (max_len_in_batch + max_output_len) warnings.warn(f"reset max tokens to {max_total_token_num}") self.init_cache_manager(max_total_token_num) block_loc = torch.empty((batch_size, max_len_in_batch + max_output_len), dtype=torch.long, device="cuda") batch_infer_state = BatchInferState(batch_size, max_len_in_batch) batch_infer_state.seq_len = seq_lengths.to("cuda") batch_infer_state.start_loc = seq_start_indexes.to("cuda") batch_infer_state.block_loc = block_loc batch_infer_state.decode_layer_id = 0 batch_infer_state.is_context_stage = True batch_infer_state.set_cache_manager(self.cache_manager) batch_infer_state.cache_manager.free_all() return batch_infer_state @abstractmethod @torch.inference_mode() def quantize( self, examples: List[Dict[str, Union[List[int], torch.LongTensor]]], ): if self.quantized: raise EnvironmentError("can't execute quantize because the model is quantized.") def forward(self, *args, **kwargs): return self.model(*args, **kwargs) def generate(self, **kwargs): """shortcut for model.generate""" batch_infer_state = self.init_batch_state(**kwargs) if self.config.model_type == "llama": setattr(self.model.model, "infer_state", batch_infer_state) with torch.inference_mode(): return self.model.generate(**kwargs) def prepare_inputs_for_generation(self, *args, **kwargs): """shortcut for model.prepare_inputs_for_generation""" return self.model.prepare_inputs_for_generation(*args, **kwargs) def collect_act_scales(self, model, tokenizer, dataset, device, num_samples=512, seq_len=512): for text in tqdm(dataset): input_ids = tokenizer(text, return_tensors="pt", max_length=seq_len, truncation=True).input_ids.to(device) model(input_ids) def collect_act_dict(self, model, tokenizer, dataset, act_dict, device, num_samples=512, seq_len=512): pbar = tqdm(dataset) for text in pbar: input_ids = tokenizer(text, return_tensors="pt", max_length=seq_len, truncation=True).input_ids.to(device) model(input_ids) mean_scale = np.mean([v["input"] for v in act_dict.values()]) pbar.set_description(f"Mean input scale: {mean_scale:.2f}") # Adatped from https://github.com/mit-han-lab/smoothquant/blob/main/smoothquant/calibration.py def get_act_scales(self, model, tokenizer, dataset, num_samples=512, seq_len=512): model.eval() device = next(model.parameters()).device act_scales = {} def stat_tensor(name, tensor): hidden_dim = tensor.shape[-1] tensor = tensor.view(-1, hidden_dim).abs().detach() comming_max = torch.max(tensor, dim=0)[0].float().cpu() if name in act_scales: act_scales[name] = torch.max(act_scales[name], comming_max) else: act_scales[name] = comming_max def stat_input_hook(m, x, y, name): if isinstance(x, tuple): x = x[0] stat_tensor(name, x) hooks = [] for name, m in model.named_modules(): if isinstance(m, nn.Linear): hooks.append(m.register_forward_hook(partial(stat_input_hook, name=name))) self.collect_act_scales(model, tokenizer, dataset, device, num_samples, seq_len) for h in hooks: h.remove() return act_scales # Adapted from https://github.com/mit-han-lab/smoothquant/blob/main/smoothquant/smooth.py @torch.no_grad() def smooth_ln_fcs(self, ln, fcs, act_scales, alpha=0.5): if not isinstance(fcs, list): fcs = [fcs] for fc in fcs: assert isinstance(fc, nn.Linear) assert ln.weight.numel() == fc.in_features == act_scales.numel() device, dtype = fcs[0].weight.device, fcs[0].weight.dtype act_scales = act_scales.to(device=device, dtype=dtype) weight_scales = torch.cat([fc.weight.abs().max(dim=0, keepdim=True)[0] for fc in fcs], dim=0) weight_scales = weight_scales.max(dim=0)[0].clamp(min=1e-5) scales = (act_scales.pow(alpha) / weight_scales.pow(1 - alpha)).clamp(min=1e-5).to(device).to(dtype) ln.weight.div_(scales) if hasattr(ln, "bias"): ln.bias.div_(scales) for fc in fcs: fc.weight.mul_(scales.view(1, -1)) @classmethod def create_quantized_model(model): raise NotImplementedError("Not implement create_quantized_model method") # Adapted from AutoGPTQ: https://github.com/PanQiWei/AutoGPTQ/blob/main/auto_gptq/modeling/_base.py def save_quantized( self, save_dir: str, model_basename: str, use_safetensors: bool = False, safetensors_metadata: Optional[Dict[str, str]] = None, ): """save quantized model and configs to local disk""" os.makedirs(save_dir, exist_ok=True) if not self.quantized: raise EnvironmentError("can only save quantized model, please execute .quantize first.") self.model.to("cpu") model_base_name = model_basename # or f"smooth-" if use_safetensors: model_save_name = model_base_name + ".safetensors" state_dict = self.model.state_dict() state_dict = {k: v.clone().contiguous() for k, v in state_dict.items()} if safetensors_metadata is None: safetensors_metadata = {} elif not isinstance(safetensors_metadata, dict): raise TypeError("safetensors_metadata must be a dictionary.") else: print(f"Received safetensors_metadata: {safetensors_metadata}") new_safetensors_metadata = {} converted_keys = False for key, value in safetensors_metadata.items(): if not isinstance(key, str) or not isinstance(value, str): converted_keys = True try: new_key = str(key) new_value = str(value) except Exception as e: raise TypeError( f"safetensors_metadata: both keys and values must be strings and an error occured when trying to convert them: {e}" ) if new_key in new_safetensors_metadata: print( f"After converting safetensors_metadata keys to strings, the key '{new_key}' is duplicated. Ensure that all your metadata keys are strings to avoid overwriting." ) new_safetensors_metadata[new_key] = new_value safetensors_metadata = new_safetensors_metadata if converted_keys: print( f"One or more safetensors_metadata keys or values had to be converted to str(). Final safetensors_metadata: {safetensors_metadata}" ) # Format is required to enable Accelerate to load the metadata # otherwise it raises an OSError safetensors_metadata["format"] = "pt" safe_save(state_dict, join(save_dir, model_save_name), safetensors_metadata) else: model_save_name = model_base_name + ".bin" torch.save(self.model.state_dict(), join(save_dir, model_save_name)) self.model.config.save_pretrained(save_dir) # Adapted from AutoGPTQ: https://github.com/PanQiWei/AutoGPTQ/blob/main/auto_gptq/modeling/_base.py def save_pretrained( self, save_dir: str, use_safetensors: bool = False, safetensors_metadata: Optional[Dict[str, str]] = None, **kwargs, ): """alias of save_quantized""" warnings.warn("you are using save_pretrained, which will re-direct to save_quantized.") self.save_quantized(save_dir, use_safetensors, safetensors_metadata) # Adapted from AutoGPTQ: https://github.com/PanQiWei/AutoGPTQ/blob/main/auto_gptq/modeling/_base.py @classmethod def from_pretrained( cls, pretrained_model_name_or_path: str, max_memory: Optional[dict] = None, trust_remote_code: bool = False, torch_dtype: torch.dtype = torch.float16, **model_init_kwargs, ): if not torch.cuda.is_available(): raise EnvironmentError("Load pretrained model to do quantization requires CUDA available.") def skip(*args, **kwargs): pass torch.nn.init.kaiming_uniform_ = skip torch.nn.init.uniform_ = skip torch.nn.init.normal_ = skip # Parameters related to loading from Hugging Face Hub cache_dir = model_init_kwargs.pop("cache_dir", None) force_download = model_init_kwargs.pop("force_download", False) resume_download = model_init_kwargs.pop("resume_download", False) proxies = model_init_kwargs.pop("proxies", None) local_files_only = model_init_kwargs.pop("local_files_only", False) use_auth_token = model_init_kwargs.pop("use_auth_token", None) revision = model_init_kwargs.pop("revision", None) subfolder = model_init_kwargs.pop("subfolder", "") model_init_kwargs.pop("_commit_hash", None) cached_file_kwargs = { "cache_dir": cache_dir, "force_download": force_download, "proxies": proxies, "resume_download": resume_download, "local_files_only": local_files_only, "use_auth_token": use_auth_token, "revision": revision, "subfolder": subfolder, } config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=True, **cached_file_kwargs) if config.model_type not in SUPPORTED_MODELS: raise TypeError(f"{config.model_type} isn't supported yet.") # enforce some values despite user specified model_init_kwargs["torch_dtype"] = torch_dtype model_init_kwargs["trust_remote_code"] = trust_remote_code if max_memory: if "disk" in max_memory: raise NotImplementedError("disk offload not support yet.") with accelerate.init_empty_weights(): model = AutoModelForCausalLM.from_config(config, trust_remote_code=True) model.tie_weights() max_memory = accelerate.utils.get_balanced_memory( model, max_memory=max_memory, no_split_module_classes=[cls.layer_type], dtype=model_init_kwargs["torch_dtype"], low_zero=False, ) model_init_kwargs["device_map"] = accelerate.infer_auto_device_map( model, max_memory=max_memory, no_split_module_classes=[cls.layer_type], dtype=model_init_kwargs["torch_dtype"], ) model_init_kwargs["low_cpu_mem_usage"] = True del model else: model_init_kwargs["device_map"] = None model_init_kwargs["low_cpu_mem_usage"] = False torch.cuda.empty_cache() merged_kwargs = {**model_init_kwargs, **cached_file_kwargs} model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, **merged_kwargs) model_config = model.config.to_dict() seq_len_keys = ["max_position_embeddings", "seq_length", "n_positions"] if any([k in model_config for k in seq_len_keys]): for key in seq_len_keys: if key in model_config: model.seqlen = model_config[key] break else: warnings.warn("can't get model's sequence length from model config, will set to 4096.") model.seqlen = 4096 model.eval() return cls(model, False) # Adapted from AutoGPTQ: https://github.com/PanQiWei/AutoGPTQ/blob/main/auto_gptq/modeling/_base.py @classmethod def from_quantized( cls, model_name_or_path: Optional[str], model_basename: Optional[str] = None, device_map: Optional[Union[str, Dict[str, Union[int, str]]]] = None, max_memory: Optional[dict] = None, device: Optional[Union[str, int]] = None, low_cpu_mem_usage: bool = False, torch_dtype: Optional[torch.dtype] = None, use_safetensors: bool = False, trust_remote_code: bool = False, **kwargs, ): """load quantized model from local disk""" # Parameters related to loading from Hugging Face Hub cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) use_auth_token = kwargs.pop("use_auth_token", None) revision = kwargs.pop("revision", None) subfolder = kwargs.pop("subfolder", "") commit_hash = kwargs.pop("_commit_hash", None) cached_file_kwargs = { "cache_dir": cache_dir, "force_download": force_download, "proxies": proxies, "resume_download": resume_download, "local_files_only": local_files_only, "use_auth_token": use_auth_token, "revision": revision, "subfolder": subfolder, "_raise_exceptions_for_missing_entries": False, "_commit_hash": commit_hash, } # == step1: prepare configs and file names == # config = AutoConfig.from_pretrained( model_name_or_path, trust_remote_code=trust_remote_code, **cached_file_kwargs ) if config.model_type not in SUPPORTED_MODELS: raise TypeError(f"{config.model_type} isn't supported yet.") extensions = [] if use_safetensors: extensions.append(".safetensors") else: extensions += [".bin", ".pt"] model_name_or_path = str(model_name_or_path) is_local = isdir(model_name_or_path) resolved_archive_file = None if is_local: model_save_name = join(model_name_or_path, model_basename) for ext in extensions: if isfile(model_save_name + ext): resolved_archive_file = model_save_name + ext break else: # remote for ext in extensions: resolved_archive_file = cached_file(model_name_or_path, model_basename + ext, **cached_file_kwargs) if resolved_archive_file is not None: break if resolved_archive_file is None: # Could not find a model file to use raise FileNotFoundError(f"Could not find model in {model_name_or_path}") model_save_name = resolved_archive_file # == step2: convert model to quantized-model (replace Linear) == # def skip(*args, **kwargs): pass torch.nn.init.kaiming_uniform_ = skip torch.nn.init.uniform_ = skip torch.nn.init.normal_ = skip transformers.modeling_utils._init_weights = False init_contexts = [no_init_weights()] if low_cpu_mem_usage: init_contexts.append(accelerate.init_empty_weights(include_buffers=True)) with ContextManagers(init_contexts): model = AutoModelForCausalLM.from_config( config, trust_remote_code=trust_remote_code, torch_dtype=torch_dtype ) cls.create_quantized_model(model) model.tie_weights() # == step3: load checkpoint to quantized-model == # accelerate.utils.modeling.load_checkpoint_in_model( model, checkpoint=model_save_name, offload_state_dict=True, offload_buffers=True ) # == step4: set seqlen == # model_config = model.config.to_dict() seq_len_keys = ["max_position_embeddings", "seq_length", "n_positions"] if any([k in model_config for k in seq_len_keys]): for key in seq_len_keys: if key in model_config: model.seqlen = model_config[key] break else: warnings.warn("can't get model's sequence length from model config, will set to 4096.") model.seqlen = 4096 return cls( model, True, ) def __getattr__(self, item): try: return super().__getattr__(item) except: return getattr(self.model, item) __all__ = ["BaseSmoothForCausalLM"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/quant/smoothquant/models/llama.py
colossalai/legacy/inference/quant/smoothquant/models/llama.py
import math import os import types from collections import defaultdict from functools import partial from typing import List, Optional, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from torch_int.nn.bmm import BMM_S8T_S8N_F32T, BMM_S8T_S8N_S8T from transformers import PreTrainedModel from transformers.modeling_outputs import BaseModelOutputWithPast from transformers.models.llama.configuration_llama import LlamaConfig from transformers.models.llama.modeling_llama import ( LLAMA_INPUTS_DOCSTRING, LlamaAttention, LlamaDecoderLayer, LlamaMLP, LlamaRotaryEmbedding, repeat_kv, rotate_half, ) from transformers.utils import add_start_docstrings_to_model_forward from colossalai.inference.tensor_parallel.batch_infer_state import BatchInferState from colossalai.kernel.triton import ( copy_kv_cache_to_dest, int8_rotary_embedding_fwd, smooth_llama_context_attn_fwd, smooth_token_attention_fwd, ) from .base_model import BaseSmoothForCausalLM from .linear import W8A8B8O8Linear, W8A8BFP32O32LinearSiLU, W8A8BFP32OFP32Linear class LLamaSmoothquantAttention(nn.Module): def __init__( self, hidden_size: int, num_heads: int, ): super().__init__() self.hidden_size = hidden_size self.num_heads = num_heads self.head_dim = hidden_size // num_heads if (self.head_dim * num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {num_heads})." ) self.qk_bmm = BMM_S8T_S8N_F32T(1.0) self.pv_bmm = BMM_S8T_S8N_S8T(1.0) self.k_proj = W8A8B8O8Linear(hidden_size, hidden_size) self.v_proj = W8A8B8O8Linear(hidden_size, hidden_size) self.q_proj = W8A8B8O8Linear(hidden_size, hidden_size) self.o_proj = W8A8BFP32OFP32Linear(hidden_size, hidden_size) self.register_buffer("q_output_scale", torch.tensor([1.0])) self.register_buffer("k_output_scale", torch.tensor([1.0])) self.register_buffer("v_output_scale", torch.tensor([1.0])) self.register_buffer("q_rotary_output_scale", torch.tensor([1.0])) self.register_buffer("k_rotary_output_scale", torch.tensor([1.0])) self.register_buffer("out_input_scale", torch.tensor([1.0])) self.register_buffer("attn_input_scale", torch.tensor([1.0])) self._init_rope() self.num_key_value_heads = num_heads def _init_rope(self): self.rotary_emb = LlamaRotaryEmbedding( self.head_dim, max_position_embeddings=2048, base=10000.0, ) @staticmethod def pack( module: LlamaAttention, attn_input_scale: float, q_output_scale: float, k_output_scale: float, v_output_scale: float, q_rotary_output_scale: float, k_rotary_output_scale: float, out_input_scale: float, ): int8_module = LLamaSmoothquantAttention(module.hidden_size, module.num_heads) int8_module.attn_input_scale = torch.tensor([attn_input_scale]) int8_module.q_output_scale = torch.tensor([q_output_scale]) int8_module.k_output_scale = torch.tensor([k_output_scale]) int8_module.v_output_scale = torch.tensor([v_output_scale]) int8_module.q_rotary_output_scale = torch.tensor([q_rotary_output_scale]) int8_module.k_rotary_output_scale = torch.tensor([k_rotary_output_scale]) int8_module.q_proj = W8A8B8O8Linear.from_float(module.q_proj, attn_input_scale, q_output_scale) int8_module.k_proj = W8A8B8O8Linear.from_float(module.k_proj, attn_input_scale, k_output_scale) int8_module.v_proj = W8A8B8O8Linear.from_float(module.v_proj, attn_input_scale, v_output_scale) int8_module.o_proj = W8A8BFP32OFP32Linear.from_float(module.o_proj, out_input_scale) int8_module.out_input_scale = torch.tensor([out_input_scale]) return int8_module def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() @torch.no_grad() def forward( self, hidden_states: torch.Tensor, rotary_emb: Tuple[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, padding_mask: Optional[torch.LongTensor] = None, infer_state: Optional[BatchInferState] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) cos = rotary_emb[0] sin = rotary_emb[1] int8_rotary_embedding_fwd( query_states.view(-1, self.num_heads, self.head_dim), cos, sin, self.q_output_scale.item(), self.q_rotary_output_scale.item(), ) int8_rotary_embedding_fwd( key_states.view(-1, self.num_heads, self.head_dim), cos, sin, self.k_output_scale.item(), self.k_rotary_output_scale.item(), ) def _copy_kv_to_mem_cache(layer_id, key_buffer, value_buffer, context_mem_index, mem_manager): copy_kv_cache_to_dest(key_buffer, context_mem_index, mem_manager.key_buffer[layer_id]) copy_kv_cache_to_dest(value_buffer, context_mem_index, mem_manager.value_buffer[layer_id]) return query_states = query_states.view(-1, self.num_heads, self.head_dim) key_states = key_states.view(-1, self.num_heads, self.head_dim) value_states = value_states.view(-1, self.num_heads, self.head_dim) if infer_state.is_context_stage: # first token generation # copy key and value calculated in current step to memory manager _copy_kv_to_mem_cache( infer_state.decode_layer_id, key_states, value_states, infer_state.context_mem_index, infer_state.cache_manager, ) attn_output = torch.empty_like(query_states) smooth_llama_context_attn_fwd( query_states, key_states, value_states, attn_output, self.q_rotary_output_scale.item(), self.k_rotary_output_scale.item(), self.v_output_scale.item(), self.out_input_scale.item(), infer_state.start_loc, infer_state.seq_len, q_len, ) else: if infer_state.decode_is_contiguous: # if decode is contiguous, then we copy to key cache and value cache in cache manager directly cache_k = infer_state.cache_manager.key_buffer[infer_state.decode_layer_id][ infer_state.decode_mem_start : infer_state.decode_mem_end, :, : ] cache_v = infer_state.cache_manager.value_buffer[infer_state.decode_layer_id][ infer_state.decode_mem_start : infer_state.decode_mem_end, :, : ] cache_k.copy_(key_states) cache_v.copy_(value_states) else: # if decode is not contiguous, use triton kernel to copy key and value cache # k, v shape: [batch_size, num_heads, head_dim/embed_size_per_head _copy_kv_to_mem_cache( infer_state.decode_layer_id, key_states, value_states, infer_state.decode_mem_index, infer_state.cache_manager, ) # (batch_size, seqlen, nheads, headdim) attn_output = torch.empty_like(query_states) smooth_token_attention_fwd( query_states, infer_state.cache_manager.key_buffer[infer_state.decode_layer_id], infer_state.cache_manager.value_buffer[infer_state.decode_layer_id], attn_output, self.q_rotary_output_scale.item(), self.k_rotary_output_scale.item(), self.v_output_scale.item(), self.out_input_scale.item(), infer_state.block_loc, infer_state.start_loc, infer_state.seq_len, infer_state.max_len_in_batch, ) attn_output = attn_output.view(bsz, q_len, self.num_heads * self.head_dim) attn_output = self.o_proj(attn_output) return attn_output, None, None class LlamaLayerNormQ(torch.nn.Module): def __init__(self, dim, eps=1e-5): super().__init__() self.input_scale = 1.0 self.variance_epsilon = eps self.register_buffer("weight", torch.ones(dim, dtype=torch.float32)) def forward(self, x): ln_output_fp = torch.nn.functional.layer_norm(x, x.shape[-1:], self.weight, None, self.variance_epsilon) ln_output_int8 = ln_output_fp.round().clamp(-128, 127).to(torch.int8) return ln_output_int8 @staticmethod def from_float(module: torch.nn.LayerNorm, output_scale: float): assert module.weight.shape[0] == module.weight.numel() q_module = LlamaLayerNormQ(module.weight.shape[0], module.variance_epsilon) q_module.weight = module.weight / output_scale return q_module class LlamaSmoothquantMLP(nn.Module): def __init__(self, intermediate_size, hidden_size): super().__init__() self.gate_proj = W8A8BFP32O32LinearSiLU(hidden_size, intermediate_size) self.up_proj = W8A8BFP32OFP32Linear(hidden_size, intermediate_size) self.down_proj = W8A8BFP32OFP32Linear(intermediate_size, hidden_size) self.register_buffer("down_proj_input_scale", torch.tensor([1.0])) @staticmethod def pack( mlp_module: LlamaMLP, gate_proj_input_scale: float, up_proj_input_scale: float, down_proj_input_scale: float, ): int8_module = LlamaSmoothquantMLP( mlp_module.intermediate_size, mlp_module.hidden_size, ) int8_module.gate_proj = W8A8BFP32O32LinearSiLU.from_float(mlp_module.gate_proj, gate_proj_input_scale) int8_module.up_proj = W8A8BFP32OFP32Linear.from_float(mlp_module.up_proj, up_proj_input_scale) int8_module.down_proj = W8A8BFP32OFP32Linear.from_float(mlp_module.down_proj, down_proj_input_scale) int8_module.down_proj_input_scale = torch.tensor([down_proj_input_scale]) return int8_module def forward( self, hidden_states: torch.Tensor, ): x_shape = hidden_states.shape gate_out = self.gate_proj(hidden_states) up_out = self.up_proj(hidden_states) inter_out = gate_out * up_out inter_out = inter_out.div_(self.down_proj_input_scale.item()).round().clamp(-128, 127).to(torch.int8) down_out = self.down_proj(inter_out) down_out = down_out.view(*x_shape[:-1], -1) return down_out class LlamaSmoothquantDecoderLayer(nn.Module): def __init__(self, config: LlamaConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = LLamaSmoothquantAttention(config.hidden_size, config.num_attention_heads) self.mlp = LlamaSmoothquantMLP(config.intermediate_size, config.hidden_size) self.input_layernorm = LlamaLayerNormQ(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = LlamaLayerNormQ(config.hidden_size, eps=config.rms_norm_eps) @staticmethod def pack( module: LlamaDecoderLayer, attn_input_scale: float, q_output_scale: float, k_output_scale: float, v_output_scale: float, q_rotary_output_scale: float, k_rotary_output_scale: float, out_input_scale: float, gate_input_scale: float, up_input_scale: float, down_input_scale: float, ): config = module.self_attn.config int8_decoder_layer = LlamaSmoothquantDecoderLayer(config) int8_decoder_layer.input_layernorm = LlamaLayerNormQ.from_float(module.input_layernorm, attn_input_scale) int8_decoder_layer.self_attn = LLamaSmoothquantAttention.pack( module.self_attn, attn_input_scale, q_output_scale, k_output_scale, v_output_scale, q_rotary_output_scale, k_rotary_output_scale, out_input_scale, ) int8_decoder_layer.post_attention_layernorm = LlamaLayerNormQ.from_float( module.post_attention_layernorm, gate_input_scale ) int8_decoder_layer.mlp = LlamaSmoothquantMLP.pack( module.mlp, gate_input_scale, up_input_scale, down_input_scale, ) return int8_decoder_layer def forward( self, hidden_states: torch.Tensor, rotary_emb: Tuple[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, padding_mask: Optional[torch.LongTensor] = None, infer_state: Optional[BatchInferState] = None, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, rotary_emb=rotary_emb, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, padding_mask=padding_mask, infer_state=infer_state, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states return hidden_states, None, None class LlamaApplyRotary(nn.Module): def __init__(self): super().__init__() def forward(self, x, cos, sin, position_ids): # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] x_embed = (x * cos) + (rotate_half(x) * sin) return x_embed # Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py def llama_decoder_layer_forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, padding_mask: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() if self.config.pretraining_tp > 1: key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp query_slices = self.q_proj.weight.split((self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0) key_slices = self.k_proj.weight.split(key_value_slicing, dim=0) value_slices = self.v_proj.weight.split(key_value_slicing, dim=0) query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)] query_states = torch.cat(query_states, dim=-1) key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)] key_states = torch.cat(key_states, dim=-1) value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)] value_states = torch.cat(value_states, dim=-1) else: query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states = self.q_apply_rotary(query_states, cos, sin, position_ids) key_states = self.k_apply_rotary(key_states, cos, sin, position_ids) if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) if self.config.pretraining_tp > 1: attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2) o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1) attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)]) else: attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value def init_to_get_rotary(config, base=10000, use_elem=False): """ This function initializes the rotary positional embedding, it is compatible for all models and is called in ShardFormer Args: base : calculation arg use_elem : activated when using chatglm-based models """ config.head_dim_ = config.hidden_size // config.num_attention_heads if not hasattr(config, "rope_scaling"): rope_scaling_factor = 1.0 else: rope_scaling_factor = config.rope_scaling.factor if config.rope_scaling is not None else 1.0 if hasattr(config, "max_sequence_length"): max_seq_len = config.max_sequence_length elif hasattr(config, "max_position_embeddings"): max_seq_len = config.max_position_embeddings * rope_scaling_factor else: max_seq_len = 2048 * rope_scaling_factor base = float(base) # NTK ref: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ try: ntk_alpha = float(os.environ.get("INFER_NTK_ALPHA", 1)) assert ntk_alpha >= 1 if ntk_alpha > 1: print(f"Note: NTK enabled, alpha set to {ntk_alpha}") max_seq_len *= ntk_alpha base = base * (ntk_alpha ** (config.head_dim_ / (config.head_dim_ - 2))) # Base change formula except: pass n_elem = config.head_dim_ if use_elem: n_elem //= 2 inv_freq = 1.0 / (base ** (torch.arange(0, n_elem, 2, device="cpu", dtype=torch.float32) / n_elem)) t = torch.arange(max_seq_len + 1024 * 64, device="cpu", dtype=torch.float32) / rope_scaling_factor freqs = torch.outer(t, inv_freq) _cos_cached = torch.cos(freqs).to(torch.float) _sin_cached = torch.sin(freqs).to(torch.float) return _cos_cached, _sin_cached # Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) def llama_model_forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") infer_state = self.infer_state if infer_state.is_context_stage: past_key_values_length = 0 else: past_key_values_length = infer_state.max_len_in_batch - 1 seq_length_with_past = seq_length + past_key_values_length # NOTE: differentiate with prefill stage # block_loc require different value-assigning method for two different stage # NOTE: differentiate with prefill stage # block_loc require different value-assigning method for two different stage if infer_state.is_context_stage: infer_state.context_mem_index = infer_state.cache_manager.alloc(infer_state.total_token_num) infer_state.init_block_loc( infer_state.block_loc, infer_state.seq_len, seq_length, infer_state.context_mem_index ) else: alloc_mem = infer_state.cache_manager.alloc_contiguous(batch_size) if alloc_mem is not None: infer_state.decode_is_contiguous = True infer_state.decode_mem_index = alloc_mem[0] infer_state.decode_mem_start = alloc_mem[1] infer_state.decode_mem_end = alloc_mem[2] infer_state.block_loc[:, seq_length_with_past - 1] = infer_state.decode_mem_index else: print(f" *** Encountered allocation non-contiguous") print(f" infer_state.cache_manager.max_len_in_batch: {infer_state.max_len_in_batch}") infer_state.decode_is_contiguous = False alloc_mem = infer_state.cache_manager.alloc(batch_size) infer_state.decode_mem_index = alloc_mem infer_state.block_loc[:, seq_length_with_past - 1] = infer_state.decode_mem_index if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) # embed positions if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device) padding_mask = None else: if 0 in attention_mask: padding_mask = attention_mask else: padding_mask = None attention_mask = self._prepare_decoder_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length ) hidden_states = inputs_embeds if self.gradient_checkpointing and self.training: raise NotImplementedError("not implement gradient_checkpointing and training options ") if past_key_values_length == 0: position_cos = torch.index_select(self._cos_cached, 0, position_ids.view(-1)).view( position_ids.view(-1).shape[0], -1 ) position_sin = torch.index_select(self._sin_cached, 0, position_ids.view(-1)).view( position_ids.view(-1).shape[0], -1 ) else: position_cos = torch.index_select(self._cos_cached, 0, position_ids.view(-1)).view(batch_size, -1) position_sin = torch.index_select(self._sin_cached, 0, position_ids.view(-1)).view(batch_size, -1) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None infer_state.decode_layer_id = 0 for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None layer_outputs = decoder_layer( hidden_states, rotary_emb=(position_cos, position_sin), attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, padding_mask=padding_mask, infer_state=infer_state, ) hidden_states = layer_outputs[0] infer_state.decode_layer_id += 1 if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) infer_state.is_context_stage = False infer_state.start_loc = infer_state.start_loc + torch.arange(0, batch_size, dtype=torch.int32, device="cuda") infer_state.seq_len += 1 infer_state.max_len_in_batch += 1 next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) class SmoothLlamaForCausalLM(BaseSmoothForCausalLM): layer_type = "LlamaDecoderLayer" def __init__(self, model: PreTrainedModel, quantized: bool = False): super().__init__(model, quantized) # Adatped from https://github.com/mit-han-lab/smoothquant/blob/main/smoothquant/calibration.py def get_act_dict( self, tokenizer, dataset, num_samples=512, seq_len=512, ): llama_model = self.model llama_model.eval() device = next(llama_model.parameters()).device # print("model:", llama_model) act_dict = defaultdict(dict) def stat_io_hook(m, x, y, name): if isinstance(x, tuple): x = x[0] if name not in act_dict or "input" not in act_dict[name]: act_dict[name]["input"] = x.detach().abs().max().item() else: act_dict[name]["input"] = max(act_dict[name]["input"], x.detach().abs().max().item()) if isinstance(y, tuple): y = y[0] if name not in act_dict or "output" not in act_dict[name]: act_dict[name]["output"] = y.detach().abs().max().item() else: act_dict[name]["output"] = max(act_dict[name]["output"], y.detach().abs().max().item()) for name, m in llama_model.named_modules(): if isinstance(m, LlamaAttention): setattr(m, "q_apply_rotary", LlamaApplyRotary()) setattr(m, "k_apply_rotary", LlamaApplyRotary()) m.forward = types.MethodType(llama_decoder_layer_forward, m) hooks = [] for name, m in llama_model.named_modules(): if isinstance(m, LlamaApplyRotary): hooks.append(m.register_forward_hook(partial(stat_io_hook, name=name))) if isinstance(m, torch.nn.Linear): hooks.append(m.register_forward_hook(partial(stat_io_hook, name=name))) self.collect_act_dict(llama_model, tokenizer, dataset, act_dict, device, num_samples, seq_len) for hook in hooks: hook.remove() return act_dict def smooth_fn(self, scales, alpha=0.5): model = self.model for name, module in model.named_modules(): if isinstance(module, LlamaDecoderLayer): attn_ln = module.input_layernorm qkv = [module.self_attn.q_proj, module.self_attn.k_proj, module.self_attn.v_proj] qkv_input_scales = scales[name + ".self_attn.q_proj"] self.smooth_ln_fcs(attn_ln, qkv, qkv_input_scales, alpha) def create_quantized_model(model): llama_config = model.config for i, layer in enumerate(model.model.layers): model.model.layers[i] = LlamaSmoothquantDecoderLayer(llama_config) model.model.forward = types.MethodType(llama_model_forward, model.model) cos, sin = init_to_get_rotary(llama_config) model.model.register_buffer("_cos_cached", cos) model.model.register_buffer("_sin_cached", sin) def quantized( self, tokenizer, dataset, num_samples=512, seq_len=512, alpha=0.5, ): llama_model = self.model
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/quant/smoothquant/models/__init__.py
colossalai/legacy/inference/quant/smoothquant/models/__init__.py
try: import torch_int HAS_TORCH_INT = True except ImportError: HAS_TORCH_INT = False raise ImportError( "Not install torch_int. Please install torch_int from https://github.com/Guangxuan-Xiao/torch-int" ) if HAS_TORCH_INT: from .llama import LLamaSmoothquantAttention, LlamaSmoothquantMLP
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/quant/smoothquant/models/linear.py
colossalai/legacy/inference/quant/smoothquant/models/linear.py
# modified from torch-int: https://github.com/Guangxuan-Xiao/torch-int/blob/main/torch_int/nn/linear.py import torch from torch_int._CUDA import linear_a8_w8_b8_o8, linear_a8_w8_bfp32_ofp32 from torch_int.functional.quantization import quantize_per_tensor_absmax try: from colossalai.kernel.op_builder.smoothquant import SmoothquantBuilder smoothquant_cuda = SmoothquantBuilder().load() HAS_SMOOTHQUANT_CUDA = True except ImportError: HAS_SMOOTHQUANT_CUDA = False raise ImportError("CUDA smoothquant linear is not installed") class W8A8BFP32O32LinearSiLU(torch.nn.Module): def __init__(self, in_features, out_features, alpha=1.0, beta=1.0): super().__init__() self.in_features = in_features self.out_features = out_features self.register_buffer( "weight", torch.randint( -127, 127, (self.out_features, self.in_features), dtype=torch.int8, requires_grad=False, ), ) self.register_buffer( "bias", torch.zeros((1, self.out_features), dtype=torch.float, requires_grad=False), ) self.register_buffer("a", torch.tensor(alpha)) def to(self, *args, **kwargs): super().to(*args, **kwargs) self.weight = self.weight.to(*args, **kwargs) self.bias = self.bias.to(*args, **kwargs) return self @torch.no_grad() def forward(self, x): x_shape = x.shape x = x.view(-1, x_shape[-1]) y = smoothquant_cuda.linear_silu_a8_w8_bfp32_ofp32(x, self.weight, self.bias, self.a.item(), 1.0) y = y.view(*x_shape[:-1], -1) return y @staticmethod def from_float(module: torch.nn.Linear, input_scale): int8_module = W8A8BFP32O32LinearSiLU(module.in_features, module.out_features) int8_weight, weight_scale = quantize_per_tensor_absmax(module.weight) alpha = input_scale * weight_scale int8_module.weight = int8_weight if module.bias is not None: int8_module.bias.data.copy_(module.bias.to(torch.float)) int8_module.a = alpha return int8_module # modified from torch-int: https://github.com/Guangxuan-Xiao/torch-int/blob/main/torch_int/nn/linear.py class W8A8B8O8Linear(torch.nn.Module): # For qkv_proj def __init__(self, in_features, out_features, alpha=1.0, beta=1.0): super().__init__() self.in_features = in_features self.out_features = out_features self.register_buffer( "weight", torch.randint( -127, 127, (self.out_features, self.in_features), dtype=torch.int8, requires_grad=False, ), ) self.register_buffer( "bias", torch.zeros((1, self.out_features), dtype=torch.int8, requires_grad=False), ) self.register_buffer("a", torch.tensor(alpha)) self.register_buffer("b", torch.tensor(beta)) def to(self, *args, **kwargs): super().to(*args, **kwargs) self.weight = self.weight.to(*args, **kwargs) self.bias = self.bias.to(*args, **kwargs) return self @torch.no_grad() def forward(self, x): x_shape = x.shape x = x.view(-1, x_shape[-1]) y = linear_a8_w8_b8_o8(x, self.weight, self.bias, self.a.item(), self.b.item()) y = y.view(*x_shape[:-1], -1) return y @staticmethod def from_float(module: torch.nn.Linear, input_scale, output_scale): int8_module = W8A8B8O8Linear(module.in_features, module.out_features) int8_weight, weight_scale = quantize_per_tensor_absmax(module.weight) alpha = input_scale * weight_scale / output_scale int8_module.weight = int8_weight int8_module.a = alpha if module.bias is not None: int8_bias, bias_scale = quantize_per_tensor_absmax(module.bias) int8_module.bias = int8_bias beta = bias_scale / output_scale int8_module.b = beta return int8_module # modified from torch-int: https://github.com/Guangxuan-Xiao/torch-int/blob/main/torch_int/nn/linear.py class W8A8BFP32OFP32Linear(torch.nn.Module): # For fc2 and out_proj def __init__(self, in_features, out_features, alpha=1.0, beta=1.0): super().__init__() self.in_features = in_features self.out_features = out_features self.register_buffer( "weight", torch.randint( -127, 127, (self.out_features, self.in_features), dtype=torch.int8, requires_grad=False, ), ) self.register_buffer( "bias", torch.zeros(self.out_features, dtype=torch.float32, requires_grad=False), ) self.register_buffer("a", torch.tensor(alpha)) def _apply(self, fn): # prevent the bias from being converted to half super()._apply(fn) self.bias = self.bias.to(torch.float32) return self def to(self, *args, **kwargs): super().to(*args, **kwargs) self.weight = self.weight.to(*args, **kwargs) self.bias = self.bias.to(*args, **kwargs) self.bias = self.bias.to(torch.float32) return self @torch.no_grad() def forward(self, x): x_shape = x.shape x = x.view(-1, x_shape[-1]) y = linear_a8_w8_bfp32_ofp32(x, self.weight, self.bias, self.a.item(), 1) y = y.view(*x_shape[:-1], -1) return y @staticmethod def from_float(module: torch.nn.Linear, input_scale): int8_module = W8A8BFP32OFP32Linear(module.in_features, module.out_features) int8_weight, weight_scale = quantize_per_tensor_absmax(module.weight) alpha = input_scale * weight_scale int8_module.weight = int8_weight int8_module.a = alpha int8_module.input_scale = input_scale int8_module.weight_scale = weight_scale if module.bias is not None: int8_module.bias = module.bias.to(torch.float32) return int8_module
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/tensor_parallel/kvcache_manager.py
colossalai/legacy/inference/tensor_parallel/kvcache_manager.py
""" Refered/Modified from lightllm/common/mem_manager.py of the ModelTC/lightllm GitHub repository https://github.com/ModelTC/lightllm/blob/050af3ce65edca617e2f30ec2479397d5bb248c9/lightllm/common/mem_manager.py we slightly changed it to make it suitable for our colossal-ai shardformer TP-engine design. """ import torch from transformers.utils import logging class MemoryManager: r""" Manage token block indexes and allocate physical memory for key and value cache Args: size: maximum token number used as the size of key and value buffer dtype: data type of cached key and value head_num: number of heads the memory manager is responsible for head_dim: embedded size per head layer_num: the number of layers in the model device: device used to store the key and value cache """ def __init__( self, size: int, dtype: torch.dtype, head_num: int, head_dim: int, layer_num: int, device: torch.device = torch.device("cuda"), ): self.logger = logging.get_logger(__name__) self.available_size = size self.max_len_in_batch = 0 self._init_mem_states(size, device) self._init_kv_buffers(size, device, dtype, head_num, head_dim, layer_num) def _init_mem_states(self, size, device): """Initialize tensors used to manage memory states""" self.mem_state = torch.ones((size,), dtype=torch.bool, device=device) self.mem_cum_sum = torch.empty((size,), dtype=torch.int32, device=device) self.indexes = torch.arange(0, size, dtype=torch.long, device=device) def _init_kv_buffers(self, size, device, dtype, head_num, head_dim, layer_num): """Initialize key buffer and value buffer on specified device""" self.key_buffer = [ torch.empty((size, head_num, head_dim), dtype=dtype, device=device) for _ in range(layer_num) ] self.value_buffer = [ torch.empty((size, head_num, head_dim), dtype=dtype, device=device) for _ in range(layer_num) ] @torch.no_grad() def alloc(self, required_size): """allocate space of required_size by providing indexes representing available physical spaces""" if required_size > self.available_size: self.logger.warning(f"No enough cache: required_size {required_size} " f"left_size {self.available_size}") return None torch.cumsum(self.mem_state, dim=0, dtype=torch.int32, out=self.mem_cum_sum) select_index = torch.logical_and(self.mem_cum_sum <= required_size, self.mem_state == 1) select_index = self.indexes[select_index] self.mem_state[select_index] = 0 self.available_size -= len(select_index) return select_index @torch.no_grad() def alloc_contiguous(self, required_size): """allocate contiguous space of required_size""" if required_size > self.available_size: self.logger.warning(f"No enough cache: required_size {required_size} " f"left_size {self.available_size}") return None torch.cumsum(self.mem_state, dim=0, dtype=torch.int32, out=self.mem_cum_sum) sum_size = len(self.mem_cum_sum) loc_sums = ( self.mem_cum_sum[required_size - 1 :] - self.mem_cum_sum[0 : sum_size - required_size + 1] + self.mem_state[0 : sum_size - required_size + 1] ) can_used_loc = self.indexes[0 : sum_size - required_size + 1][loc_sums == required_size] if can_used_loc.shape[0] == 0: self.logger.info( f"No enough contiguous cache: required_size {required_size} " f"left_size {self.available_size}" ) return None start_loc = can_used_loc[0] select_index = self.indexes[start_loc : start_loc + required_size] self.mem_state[select_index] = 0 self.available_size -= len(select_index) start = start_loc.item() end = start + required_size return select_index, start, end @torch.no_grad() def free(self, free_index): """free memory by updating memory states based on given indexes""" self.available_size += free_index.shape[0] self.mem_state[free_index] = 1 @torch.no_grad() def free_all(self): """free all memory by updating memory states""" self.available_size = len(self.mem_state) self.mem_state[:] = 1 self.max_len_in_batch = 0 self.logger.info("freed all space of memory manager")
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/tensor_parallel/__init__.py
colossalai/legacy/inference/tensor_parallel/__init__.py
from .engine import TPInferEngine from .kvcache_manager import MemoryManager __all__ = ["MemoryManager", "TPInferEngine"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/tensor_parallel/batch_infer_state.py
colossalai/legacy/inference/tensor_parallel/batch_infer_state.py
# might want to consider combine with InferenceConfig in colossalai/ppinference/inference_config.py later from dataclasses import dataclass import torch from transformers.tokenization_utils_base import BatchEncoding from .kvcache_manager import MemoryManager # adapted from: lightllm/server/router/model_infer/infer_batch.py @dataclass class BatchInferState: r""" Information to be passed and used for a batch of inputs during a single model forward """ batch_size: int max_len_in_batch: int cache_manager: MemoryManager = None block_loc: torch.Tensor = None start_loc: torch.Tensor = None seq_len: torch.Tensor = None past_key_values_len: int = None is_context_stage: bool = False context_mem_index: torch.Tensor = None decode_is_contiguous: bool = None decode_mem_start: int = None decode_mem_end: int = None decode_mem_index: torch.Tensor = None decode_layer_id: int = None device: torch.device = torch.device("cuda") @property def total_token_num(self): # return self.batch_size * self.max_len_in_batch assert self.seq_len is not None and self.seq_len.size(0) > 0 return int(torch.sum(self.seq_len)) def set_cache_manager(self, manager: MemoryManager): self.cache_manager = manager # adapted from: https://github.com/ModelTC/lightllm/blob/28c1267cfca536b7b4f28e921e03de735b003039/lightllm/common/infer_utils.py#L1 @staticmethod def init_block_loc( b_loc: torch.Tensor, seq_len: torch.Tensor, max_len_in_batch: int, alloc_mem_index: torch.Tensor ): """in-place update block loc mapping based on the sequence length of the inputs in current bath""" start_index = 0 seq_len_numpy = seq_len.cpu().numpy() for i, cur_seq_len in enumerate(seq_len_numpy): b_loc[i, max_len_in_batch - cur_seq_len : max_len_in_batch] = alloc_mem_index[ start_index : start_index + cur_seq_len ] start_index += cur_seq_len return @classmethod def init_from_batch( cls, batch: torch.Tensor, max_input_len: int, max_output_len: int, cache_manager: MemoryManager, ): if not isinstance(batch, (BatchEncoding, dict, list, torch.Tensor)): raise TypeError(f"batch type {type(batch)} is not supported in prepare_batch_state") input_ids_list = None attention_mask = None if isinstance(batch, (BatchEncoding, dict)): input_ids_list = batch["input_ids"] attention_mask = batch["attention_mask"] else: input_ids_list = batch if isinstance(input_ids_list[0], int): # for a single input input_ids_list = [input_ids_list] attention_mask = [attention_mask] if attention_mask is not None else attention_mask batch_size = len(input_ids_list) seq_start_indexes = torch.zeros(batch_size, dtype=torch.int32, device="cuda") seq_lengths = torch.zeros(batch_size, dtype=torch.int32, device="cuda") start_index = 0 max_len_in_batch = -1 if isinstance(batch, (BatchEncoding, dict)): for i, attn_mask in enumerate(attention_mask): curr_seq_len = len(attn_mask) seq_lengths[i] = curr_seq_len seq_start_indexes[i] = start_index start_index += curr_seq_len max_len_in_batch = curr_seq_len if curr_seq_len > max_len_in_batch else max_len_in_batch else: length = max(len(input_id) for input_id in input_ids_list) for i, input_ids in enumerate(input_ids_list): curr_seq_len = length seq_lengths[i] = curr_seq_len seq_start_indexes[i] = start_index start_index += curr_seq_len max_len_in_batch = curr_seq_len if curr_seq_len > max_len_in_batch else max_len_in_batch block_loc = torch.zeros((batch_size, max_input_len + max_output_len), dtype=torch.long, device="cuda") return cls( batch_size=batch_size, max_len_in_batch=max_len_in_batch, seq_len=seq_lengths.to("cuda"), start_loc=seq_start_indexes.to("cuda"), block_loc=block_loc, decode_layer_id=0, past_key_values_len=0, is_context_stage=True, cache_manager=cache_manager, )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/tensor_parallel/engine.py
colossalai/legacy/inference/tensor_parallel/engine.py
from typing import Any, Callable, List, Optional, Union import torch import torch.nn as nn from transformers import BloomForCausalLM, LlamaForCausalLM from transformers.generation import GenerationConfig from transformers.generation.stopping_criteria import StoppingCriteriaList from transformers.tokenization_utils_base import BatchEncoding from colossalai.shardformer import ShardConfig, ShardFormer from colossalai.shardformer.policies.auto_policy import get_autopolicy from .batch_infer_state import BatchInferState from .kvcache_manager import MemoryManager # from dynamic_batching.infer_batch import InferBatch DP_AXIS, PP_AXIS, TP_AXIS = 0, 1, 2 _supported_models = [ "LlamaForCausalLM", "LlamaModel", "BloomForCausalLM", "ChatGLMModel", "ChatGLMForConditionalGeneration", "LlamaGPTQForCausalLM", "BloomGPTQForCausalLM", ] class TPInferEngine: """Engine class for tensor parallel inference. Args: model (Module): original model, e.g. huggingface CausalLM shard_config (ShardConfig): The config for sharding original model max_batch_size (int): maximum batch size max_input_len (int): maximum input length of sequence max_output_len (int): maximum output length of output tokens dtype (torch.dtype): datatype used to init KV cache space device (str): device the KV cache of engine to be initialized on Examples: >>> # define model and shard config for your inference >>> model = ... >>> generate_kwargs = ... >>> shard_config = ShardConfig(enable_tensor_parallelism=True, extra_kwargs={"inference_only": True}) >>> infer_engine = TPInferEngine(model, shard_config, MAX_BATCH_SIZE, MAX_INPUT_LEN, MAX_OUTPUT_LEN) >>> outputs = infer_engine.generate(input_ids, **generate_kwargs) """ def __init__( self, model: nn.Module, shard_config: ShardConfig, max_batch_size: int, max_input_len: int, max_output_len: int, dtype: torch.dtype = torch.float16, device: str = "cuda", ) -> None: self.max_batch_size = max_batch_size self.max_input_len = max_input_len self.max_output_len = max_output_len self.max_total_token_num = self.max_batch_size * (self.max_input_len + self.max_output_len) # Constraints relatable with specs of devices and model # This may change into an optional arg in the future assert self.max_batch_size <= 64, "Max batch size exceeds the constraint" assert self.max_input_len + self.max_output_len <= 4096, "Max length exceeds the constraint" self.dtype = dtype self.head_dim = model.config.hidden_size // model.config.num_attention_heads self.head_num = model.config.num_attention_heads num_hidden_layers = ( model.config.num_hidden_layers if hasattr(model.config, "num_hidden_layers") else model.config.num_layers ) self.layer_num = num_hidden_layers self.multi_query_group_num = model.config.num_attention_heads # default to attention_heads if hasattr(model.config, "multi_query_attention"): self.multi_query_attention = getattr(model.config, "multi_query_attention") if hasattr(model.config, "multi_query_group_num"): self.multi_query_group_num = getattr(model.config, "multi_query_group_num") if hasattr(model.config, "num_key_value_heads"): self.multi_query_group_num = getattr(model.config, "num_key_value_heads") self.tp_size = -1 # to be set with given shard config in self.prepare_shard_config self.cache_manager = None self.max_dq_buffer_size = 1 self.max_inner_outer_dim = 1 self.gptq_temp_state_buffer = None self.gptq_temp_dq_buffer = None self.bits = -1 self.use_act_order = False self.shard_config = shard_config self.model = None self.cache = {} # optimize the original model by sharding with ShardFormer self._optimize_model(model=model.to(device)) def _init_manager(self) -> None: assert self.tp_size >= 1, "TP size not initialized without providing a valid ShardConfig" assert self.head_num % self.tp_size == 0, f"Cannot shard {self.head_num} heads with tp size {self.tp_size}" self.head_num //= self.tp_size # update sharded number of heads if hasattr(self, "multi_query_attention"): # NOTE the logic of MQA tensor parallelism should be specified. assert ( self.multi_query_group_num % self.tp_size == 0 ), f"Cannot shard {self.multi_query_group_num} query groups with tp size {self.tp_size}" self.cache_manager = MemoryManager( self.max_total_token_num, self.dtype, self.multi_query_group_num // self.tp_size, self.head_dim, self.layer_num, ) else: self.cache_manager = MemoryManager( self.max_total_token_num, self.dtype, self.head_num, self.head_dim, self.layer_num ) def _post_init_gptq_buffer(self, model: nn.Module) -> None: from colossalai.inference.quant.gptq.cai_gptq import CaiQuantLinear HAS_GPTQ_CUDA = False try: from colossalai.kernel.op_builder.gptq import GPTQBuilder gptq_cuda = GPTQBuilder().load() HAS_GPTQ_CUDA = True except ImportError: warnings.warn("CUDA gptq is not installed") HAS_GPTQ_CUDA = False for name, submodule in model.named_modules(): if isinstance(submodule, CaiQuantLinear): self.max_dq_buffer_size = max(self.max_dq_buffer_size, submodule.qweight.numel() * 8) if self.use_act_order: self.max_inner_outer_dim = max( self.max_inner_outer_dim, submodule.infeatures, submodule.outfeatures ) self.bits = submodule.bits if not (HAS_GPTQ_CUDA and self.bits == 4): return max_input_len = 1 if self.use_act_order: max_input_len = self.max_input_len # The temp_state buffer is required to reorder X in the act-order case. # The temp_dq buffer is required to dequantize weights when using cuBLAS, typically for the prefill. self.gptq_temp_state_buffer = torch.zeros( (max_input_len, self.max_inner_outer_dim), dtype=torch.float16, device=torch.cuda.current_device() ) self.gptq_temp_dq_buffer = torch.zeros( (1, self.max_dq_buffer_size), dtype=torch.float16, device=torch.cuda.current_device() ) gptq_cuda.prepare_buffers( torch.device(torch.cuda.current_device()), self.gptq_temp_state_buffer, self.gptq_temp_dq_buffer ) # Using the default from exllama repo here. matmul_recons_thd = 8 matmul_fused_remap = False matmul_no_half2 = False gptq_cuda.set_tuning_params(matmul_recons_thd, matmul_fused_remap, matmul_no_half2) torch.cuda.empty_cache() def _optimize_model(self, model: nn.Module) -> None: """ Optimize the original model by sharding with ShardFormer. In further generation, use the sharded model instead of original model. """ # NOTE we will change to use an inference config later with additional attrs we want assert self.shard_config.extra_kwargs["inference_only"] is True shardformer = ShardFormer(shard_config=self.shard_config) self._prepare_with_shard_config(shard_config=self.shard_config) self._shard_model_by(shardformer, model) def _prepare_with_shard_config(self, shard_config: Optional[ShardConfig] = None) -> ShardConfig: """Prepare the engine with a given ShardConfig. Args: shard_config (ShardConfig): shard config given to specify settings of the engine. If not provided, a default ShardConfig with tp size 1 will be created. """ self.tp_size = 1 if shard_config is None: shard_config = ShardConfig( tensor_parallel_process_group=None, pipeline_stage_manager=None, enable_tensor_parallelism=False, enable_fused_normalization=False, enable_all_optimization=False, enable_flash_attention=False, enable_jit_fused=False, extra_kwargs={"inference_only": True}, ) else: shard_config.extra_kwargs = {"inference_only": True} shard_config.pipeline_stage_manager = None if shard_config.enable_tensor_parallelism: self.tp_size = shard_config.tensor_parallel_size self._init_manager() return shard_config def _shard_model_by(self, shardformer: ShardFormer, model: nn.Module) -> None: """Shard original model by the given ShardFormer and store the sharded model.""" assert ( self.tp_size == shardformer.shard_config.tensor_parallel_size ), "Discrepancy between the tp size of TPInferEngine and the tp size of shard config" model_name = model.__class__.__name__ assert model_name in self.supported_models, f"Unsupported model cls {model_name} for TP inference." if self.shard_config.extra_kwargs.get("inference_gptq", False): model = model.model policy = get_autopolicy(model, shard_config=self.shard_config) self.model, _ = shardformer.optimize(model, policy) if self.shard_config.extra_kwargs.get("inference_gptq", False): self._post_init_gptq_buffer(self.model) self.model = self.model.cuda() @property def supported_models(self) -> List[str]: return _supported_models def generate(self, input_tokens: Union[BatchEncoding, dict, list, torch.Tensor], **generate_kwargs) -> torch.Tensor: """Generate token sequence. Args: input_tokens: could be one of the following types 1. BatchEncoding or dict (e.g. tokenizer batch_encode) 2. list of input token ids (e.g. appended result of tokenizer encode) 3. torch.Tensor (e.g. tokenizer encode with return_tensors='pt') Returns: torch.Tensor: The returned sequence is given inputs + generated_tokens. """ if isinstance(input_tokens, torch.Tensor): input_tokens = dict(input_ids=input_tokens, attention_mask=torch.ones_like(input_tokens, dtype=torch.bool)) for t in input_tokens: if torch.is_tensor(input_tokens[t]): input_tokens[t] = input_tokens[t].cuda() if "max_new_tokens" not in generate_kwargs: generate_kwargs.update(max_new_tokens=self.max_output_len) return self._generate_by_set_infer_state(input_tokens, **generate_kwargs) def prepare_batch_state(self, inputs) -> BatchInferState: """ Create and prepare BatchInferState used for inference during model forwrad, by processing each sequence of the given inputs. Args: inputs: should be one of the following types 1. BatchEncoding or dict (e.g. tokenizer batch_encode) 2. list of input token ids (e.g. appended result of tokenizer encode) 3. torch.Tensor (e.g. tokenizer encode with return_tensors='pt') NOTE For torch.Tensor inputs representing a batch of inputs, we are unable to retrieve the actual length (e.g. number of tokens) of each input without attention mask Hence, for torch.Tensor with shape [bs, l] where bs > 1, we will assume all the inputs in the batch has the maximum length l Returns: BatchInferState: the states for the current batch during inference """ if not isinstance(inputs, (BatchEncoding, dict, list, torch.Tensor)): raise TypeError(f"inputs type {type(inputs)} is not supported in prepare_batch_state") input_ids_list = None attention_mask = None if isinstance(inputs, (BatchEncoding, dict)): input_ids_list = inputs["input_ids"] attention_mask = inputs["attention_mask"] else: input_ids_list = inputs if isinstance(input_ids_list[0], int): # for a single input input_ids_list = [input_ids_list] attention_mask = [attention_mask] if attention_mask is not None else attention_mask batch_size = len(input_ids_list) seq_start_indexes = torch.zeros(batch_size, dtype=torch.int32, device="cuda") seq_lengths = torch.zeros(batch_size, dtype=torch.int32, device="cuda") start_index = 0 max_len_in_batch = -1 if isinstance(inputs, (BatchEncoding, dict)): for i, attn_mask in enumerate(attention_mask): curr_seq_len = len(attn_mask) # if isinstance(attn_mask, torch.Tensor): # curr_seq_len = int(torch.sum(attn_mask)) # else: # curr_seq_len = int(sum(attn_mask)) seq_lengths[i] = curr_seq_len seq_start_indexes[i] = start_index start_index += curr_seq_len max_len_in_batch = curr_seq_len if curr_seq_len > max_len_in_batch else max_len_in_batch else: length = max(len(input_id) for input_id in input_ids_list) for i, input_ids in enumerate(input_ids_list): curr_seq_len = length seq_lengths[i] = curr_seq_len seq_start_indexes[i] = start_index start_index += curr_seq_len max_len_in_batch = curr_seq_len if curr_seq_len > max_len_in_batch else max_len_in_batch block_loc = torch.empty((batch_size, self.max_input_len + self.max_output_len), dtype=torch.long, device="cuda") batch_infer_state = BatchInferState(batch_size, max_len_in_batch) batch_infer_state.seq_len = seq_lengths.to("cuda") batch_infer_state.start_loc = seq_start_indexes.to("cuda") batch_infer_state.block_loc = block_loc batch_infer_state.decode_layer_id = 0 batch_infer_state.past_key_values_len = 0 batch_infer_state.is_context_stage = True batch_infer_state.set_cache_manager(self.cache_manager) return batch_infer_state @torch.no_grad() def _generate_by_set_infer_state(self, input_tokens, **generate_kwargs) -> torch.Tensor: """ Generate output tokens by setting BatchInferState as an attribute to the model and calling model.generate Args: inputs: should be one of the following types 1. BatchEncoding or dict (e.g. tokenizer batch_encode) 2. list of input token ids (e.g. appended result of tokenizer encode) 3. torch.Tensor (e.g. tokenizer encode with return_tensors='pt') """ # for testing, always use sharded model assert self.model is not None, "sharded model does not exist" batch_infer_state = self.prepare_batch_state(input_tokens) assert batch_infer_state.max_len_in_batch <= self.max_input_len, "max length in batch exceeds limit" # set BatchInferState for the current batch as attr to model # NOTE this is not a preferable way to pass BatchInferState during inference # we might want to rewrite generate function (e.g. _generate_by_pass_infer_state) # and pass BatchInferState via model forward model = self.model if isinstance(model, LlamaForCausalLM): model = self.model.model elif isinstance(model, BloomForCausalLM): model = self.model.transformer setattr(model, "infer_state", batch_infer_state) outputs = self.model.generate(**input_tokens, **generate_kwargs, early_stopping=False) # NOTE In future development, we're going to let the scheduler to handle the cache, # instead of freeing space explicitly at the end of generation self.cache_manager.free_all() return outputs # TODO might want to implement the func that generates output tokens by passing BatchInferState # as an arg into model.forward. # It requires rewriting model generate and replacing model forward. @torch.no_grad() def _generate_by_pass_infer_state( self, input_tokens, max_out_length: int, generation_config: Optional[GenerationConfig] = None, stopping_criteria: Optional[StoppingCriteriaList] = None, prepare_inputs_fn: Optional[Callable[[torch.Tensor, Any], dict]] = None, **model_kwargs, ) -> torch.Tensor: raise NotImplementedError("generate by passing BatchInferState is not implemented.") # might want to use in rewritten generate method: use after model.forward # BatchInferState is created and kept during generation # after each iter of model forward, we should update BatchInferState def _update_batch_state(self, infer_state: Optional[BatchInferState]) -> None: batch_size = infer_state.batch_size device = infer_state.start_loc.device infer_state.start_loc = infer_state.start_loc + torch.arange(0, batch_size, dtype=torch.int32, device=device) infer_state.seq_len += 1 @torch.no_grad() def forward(self, batch_id, is_prefill): """ Forward is used in Dynamic Batching Manager """ batch = self.cache.pop(batch_id) if is_prefill: input_ = torch.tensor(batch.all_input_ids).cuda() else: input_ = batch.input_ids.reshape(len(batch), 1) batch_args = { "batch_size": len(batch), "max_len_in_batch": batch.nopad_max_len_in_batch, "block_loc": batch.nopad_b_loc, "start_loc": batch.nopad_b_start_loc, "seq_len": batch.nopad_b_seq_len, "cache_manager": batch.cache_manager, "is_context_stage": is_prefill, } infer_state = BatchInferState(**batch_args) model = self.model if isinstance(model, LlamaForCausalLM): model = self.model.model elif isinstance(model, BloomForCausalLM): model = self.model.transformer setattr(model, "infer_state", infer_state) output = self.model.forward(input_ids=input_) logits = output.logits # bsz, seq_len, vocab_size prob_out = torch.softmax( logits[ :, -1, ], dim=-1, ).squeeze(1) # prob_out: bsz, vocab_size predict_ids = torch.argmax(prob_out, dim=-1, keepdim=True) prob_out = torch.log(prob_out).detach().cpu().numpy() predict_ids = predict_ids.detach().cpu().numpy() # [ batch_size, 1 ] output_dict = {} new_input_ids = [] for i, (r, all_input_ids, next_token_id, next_token_logprob) in enumerate( zip(batch.requests, batch.all_input_ids, predict_ids, prob_out) ): next_token_id = int(next_token_id) next_token_logprob = next_token_logprob[next_token_id] # all_input_ids_tensor = torch.tensor(all_input_ids, dtype=torch.long, device="cuda") all_input_ids.append(next_token_id) # all_input_ids_tensor = None new_input_ids.append(next_token_id) batch.all_input_ids[i] = all_input_ids batch.input_lengths[i] += 1 batch.out_token_id_counts[i][next_token_id] += 1 metadata = { "id": int(next_token_id), "logprob": float(next_token_logprob), } output_dict[r["request_id"]] = (int(next_token_id), metadata) batch.input_ids = torch.tensor(new_input_ids, dtype=torch.long).cuda() batch.nopad_total_token_num += len(batch) batch.nopad_max_len_in_batch += 1 # NOTE: we may repalce this self.cache[batch.batch_id] = batch return output_dict @torch.no_grad() def _prefill_batch(self, batch_id): return self.forward(batch_id, is_prefill=True) @torch.no_grad() def _decode_batch(self, batch_id): return self.forward(batch_id, is_prefill=False) # might want to create a sequence pool # add a single request/sequence/input text at a time and record its length # In other words, store the actual length of input tokens representing a single input text # E.g. "Introduce landmarks in Beijing" # => add request # => record token length and other necessary information to be used # => engine hold all these necessary information until `generate` (or other name) is called, # => put information already recorded in batchinferstate and pass it to model forward # => clear records in engine def add_request(): raise NotImplementedError()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/tensor_parallel/policies/llama.py
colossalai/legacy/inference/tensor_parallel/policies/llama.py
from functools import partial import torch from transformers.models.llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaModel, LlamaRMSNorm from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, SubModuleReplacementDescription # import colossalai from colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy from ..modeling._utils import init_to_get_rotary from ..modeling.llama import LlamaInferenceForwards try: from lightllm.models.llama.triton_kernel.rmsnorm import rmsnorm_forward as lightllm_rmsnorm_forward HAS_TRITON_RMSNORM = True except: print("you should install triton from https://github.com/openai/triton") HAS_TRITON_RMSNORM = False def get_triton_rmsnorm_forward(): if HAS_TRITON_RMSNORM: def _triton_rmsnorm_forward(self: LlamaRMSNorm, hidden_states: torch.Tensor): return lightllm_rmsnorm_forward(hidden_states, self.weight.data, self.variance_epsilon) return _triton_rmsnorm_forward else: return None class LlamaModelInferPolicy(LlamaForCausalLMPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): policy = super().module_policy() if self.shard_config.extra_kwargs.get("inference_gptq", False): from colossalai.inference.quant.gptq.cai_gptq import ColCaiQuantLinear, RowCaiQuantLinear decoder_attribute_replacement = { "self_attn.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "self_attn.num_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, } policy[LlamaDecoderLayer] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=ColCaiQuantLinear, kwargs={"split_num": 1}, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=ColCaiQuantLinear, kwargs={"split_num": 1}, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=ColCaiQuantLinear, kwargs={"split_num": 1}, ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=RowCaiQuantLinear, kwargs={"split_num": 1}, ), SubModuleReplacementDescription( suffix="mlp.gate_proj", target_module=ColCaiQuantLinear, kwargs={"split_num": 1}, ), SubModuleReplacementDescription( suffix="mlp.up_proj", target_module=ColCaiQuantLinear, kwargs={"split_num": 1}, ), SubModuleReplacementDescription( suffix="mlp.down_proj", target_module=RowCaiQuantLinear, kwargs={"split_num": 1}, ), ], ) self.shard_config._infer() infer_forward = LlamaInferenceForwards.llama_model_forward method_replacement = {"forward": partial(infer_forward)} self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaModel) infer_forward = LlamaInferenceForwards.llama_decoder_layer_forward method_replacement = {"forward": partial(infer_forward)} self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=LlamaDecoderLayer ) infer_forward = LlamaInferenceForwards.llama_flash_attn_kvcache_forward method_replacement = {"forward": partial(infer_forward)} self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=LlamaAttention ) infer_forward = None if HAS_TRITON_RMSNORM: infer_forward = get_triton_rmsnorm_forward() if infer_forward is not None: method_replacement = {"forward": partial(infer_forward)} self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=LlamaRMSNorm ) return policy def postprocess(self): init_to_get_rotary(self.model.model) return self.model
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/tensor_parallel/policies/chatglm2.py
colossalai/legacy/inference/tensor_parallel/policies/chatglm2.py
from functools import partial from colossalai.shardformer.modeling.chatglm2_6b.modeling_chatglm import ( ChatGLMForConditionalGeneration, ChatGLMModel, GLMBlock, GLMTransformer, SelfAttention, ) # import colossalai from colossalai.shardformer.policies.chatglm2 import ChatGLMModelPolicy from ..modeling._utils import init_to_get_rotary from ..modeling.chatglm2 import ChatGLM2InferenceForwards try: HAS_TRITON_RMSNORM = True except: print("you should install triton from https://github.com/openai/triton") HAS_TRITON_RMSNORM = False class ChatGLM2InferPolicy(ChatGLMModelPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): policy = super().module_policy() self.shard_config._infer() model_infer_forward = ChatGLM2InferenceForwards.chatglm_model_forward method_replacement = {"forward": model_infer_forward} self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=ChatGLMModel) encoder_infer_forward = ChatGLM2InferenceForwards.chatglm_encoder_forward method_replacement = {"forward": encoder_infer_forward} self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=GLMTransformer ) encoder_layer_infer_forward = ChatGLM2InferenceForwards.chatglm_glmblock_forward method_replacement = {"forward": encoder_layer_infer_forward} self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=GLMBlock) attn_infer_forward = ChatGLM2InferenceForwards.chatglm_flash_attn_kvcache_forward method_replacement = {"forward": attn_infer_forward} self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=SelfAttention ) if self.shard_config.enable_tensor_parallelism: policy[GLMBlock].attribute_replacement["self_attention.num_multi_query_groups_per_partition"] = ( self.model.config.multi_query_group_num // self.shard_config.tensor_parallel_size ) # for rmsnorm and others, we need to check the shape return policy def postprocess(self): init_to_get_rotary(self.model) return self.model class ChatGLM2ForConditionalGenerationInferPolicy(ChatGLM2InferPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): policy = super().module_policy() model_infer_forward = ChatGLM2InferenceForwards.chatglm_for_conditional_generation_forward method_replacement = {"forward": partial(model_infer_forward)} self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=ChatGLMForConditionalGeneration ) return policy def postprocess(self): return super().postprocess()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/tensor_parallel/policies/bloom.py
colossalai/legacy/inference/tensor_parallel/policies/bloom.py
from functools import partial import torch from torch.nn import LayerNorm import colossalai.shardformer.layer as col_nn from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, SubModuleReplacementDescription from colossalai.shardformer.policies.bloom import BloomForCausalLMPolicy from ..modeling.bloom import BloomInferenceForwards try: from colossalai.kernel.triton import layer_norm HAS_TRITON_NORM = True except: print("Some of our kernels require triton. You might want to install triton from https://github.com/openai/triton") HAS_TRITON_NORM = False def get_triton_layernorm_forward(): if HAS_TRITON_NORM: def _triton_layernorm_forward(self: LayerNorm, hidden_states: torch.Tensor): return layer_norm(hidden_states, self.weight.data, self.bias, self.eps) return _triton_layernorm_forward else: return None class BloomModelInferPolicy(BloomForCausalLMPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): from transformers.models.bloom.modeling_bloom import BloomAttention, BloomBlock, BloomForCausalLM, BloomModel policy = super().module_policy() if self.shard_config.extra_kwargs.get("inference_gptq", False): from colossalai.inference.quant.gptq.cai_gptq import ColCaiQuantLinear, RowCaiQuantLinear policy[BloomBlock] = ModulePolicyDescription( attribute_replacement={ "self_attention.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "self_attention.split_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "self_attention.num_heads": self.model.config.n_head // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attention.query_key_value", target_module=ColCaiQuantLinear, kwargs={"split_num": 3}, ), SubModuleReplacementDescription( suffix="self_attention.dense", target_module=RowCaiQuantLinear, kwargs={"split_num": 1} ), SubModuleReplacementDescription( suffix="self_attention.attention_dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="mlp.dense_h_to_4h", target_module=ColCaiQuantLinear, kwargs={"split_num": 1} ), SubModuleReplacementDescription( suffix="mlp.dense_4h_to_h", target_module=RowCaiQuantLinear, kwargs={"split_num": 1} ), ], ) # NOTE set inference mode to shard config self.shard_config._infer() method_replacement = { "forward": BloomInferenceForwards.bloom_for_causal_lm_forward, "prepare_inputs_for_generation": BloomInferenceForwards.bloom_for_causal_lm_prepare_inputs_for_generation, } self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=BloomForCausalLM ) method_replacement = {"forward": BloomInferenceForwards.bloom_model_forward} self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=BloomModel) method_replacement = {"forward": BloomInferenceForwards.bloom_block_forward} self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=BloomBlock) method_replacement = {"forward": BloomInferenceForwards.bloom_attention_forward} self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=BloomAttention ) if HAS_TRITON_NORM: infer_method = get_triton_layernorm_forward() method_replacement = {"forward": partial(infer_method)} self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=LayerNorm ) return policy
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/tensor_parallel/policies/__init__.py
colossalai/legacy/inference/tensor_parallel/policies/__init__.py
from .bloom import BloomModelInferPolicy from .chatglm2 import ChatGLM2InferPolicy from .llama import LlamaModelInferPolicy __all__ = ["BloomModelInferPolicy", "LlamaModelInferPolicy", "ChatGLM2InferPolicy"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/tensor_parallel/modeling/llama.py
colossalai/legacy/inference/tensor_parallel/modeling/llama.py
import math from typing import List, Optional, Tuple import torch from transformers.modeling_outputs import BaseModelOutputWithPast from transformers.models.llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaModel from colossalai.inference.tensor_parallel.batch_infer_state import BatchInferState from colossalai.kernel.triton import llama_context_attn_fwd, token_attention_fwd from colossalai.kernel.triton.token_attention_kernel import Llama2TokenAttentionForwards from ._utils import copy_kv_to_mem_cache try: from lightllm.models.llama.triton_kernel.context_flashattention_nopad import ( context_attention_fwd as lightllm_llama_context_attention_fwd, ) from lightllm.models.llama.triton_kernel.rotary_emb import rotary_emb_fwd as llama_rotary_embedding_fwd HAS_LIGHTLLM_KERNEL = True except: print("please install lightllm from source to run inference: https://github.com/ModelTC/lightllm") HAS_LIGHTLLM_KERNEL = False try: from flash_attn import flash_attn_with_kvcache HAS_FLASH_KERNEL = True except: HAS_FLASH_KERNEL = False print("please install flash attentiom from https://github.com/Dao-AILab/flash-attention") def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids): # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def llama_triton_context_attention( query_states, key_states, value_states, attn_output, infer_state, num_key_value_groups=1 ): # if num_key_value_groups == 1: if HAS_LIGHTLLM_KERNEL is False: llama_context_attn_fwd( query_states, key_states, value_states, attn_output, infer_state.start_loc, infer_state.seq_len, # infer_state.cache_manager.past_key_values_length, infer_state.max_len_in_batch, ) else: lightllm_llama_context_attention_fwd( query_states, key_states, value_states, attn_output, infer_state.start_loc, infer_state.seq_len, # infer_state.cache_manager.past_key_values_length, infer_state.max_len_in_batch, ) def llama_triton_token_attention(query_states, attn_output, infer_state, num_key_value_groups=1): assert HAS_LIGHTLLM_KERNEL is True, "You have to install lightllm kernel to run token attention for llama models" if num_key_value_groups == 1: token_attention_fwd( query_states, infer_state.cache_manager.key_buffer[infer_state.decode_layer_id], infer_state.cache_manager.value_buffer[infer_state.decode_layer_id], attn_output, infer_state.block_loc, infer_state.start_loc, infer_state.seq_len, # infer_state.cache_manager.past_key_values_length, infer_state.max_len_in_batch, ) else: Llama2TokenAttentionForwards.token_attn( query_states, infer_state.cache_manager.key_buffer[infer_state.decode_layer_id], infer_state.cache_manager.value_buffer[infer_state.decode_layer_id], attn_output, infer_state.block_loc, infer_state.start_loc, infer_state.seq_len, # infer_state.cache_manager.past_key_values_length, infer_state.max_len_in_batch, infer_state.other_kv_index, ) class LlamaInferenceForwards: """ This class holds forwards for llama inference. We intend to replace the forward methods for LlamaModel, LlamaDecoderLayer, and LlamaAttention for LlamaForCausalLM. """ @staticmethod def llama_model_forward( self: LlamaModel, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): infer_state = self.infer_state return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") if infer_state.is_context_stage: past_key_values_length = 0 else: past_key_values_length = infer_state.max_len_in_batch - 1 # NOTE: differentiate with prefill stage # block_loc require different value-assigning method for two different stage if use_cache and seq_length != 1: # NOTE assume prefill stage # allocate memory block infer_state.is_context_stage = True # set prefill stage, notify attention layer infer_state.context_mem_index = infer_state.cache_manager.alloc(infer_state.total_token_num) infer_state.init_block_loc( infer_state.block_loc, infer_state.seq_len, seq_length, infer_state.context_mem_index ) else: infer_state.is_context_stage = False alloc_mem = infer_state.cache_manager.alloc_contiguous(batch_size) if alloc_mem is not None: infer_state.decode_is_contiguous = True infer_state.decode_mem_index = alloc_mem[0] infer_state.decode_mem_start = alloc_mem[1] infer_state.decode_mem_end = alloc_mem[2] infer_state.block_loc[:, infer_state.max_len_in_batch - 1] = infer_state.decode_mem_index else: print(f" *** Encountered allocation non-contiguous") print(f" infer_state.max_len_in_batch : {infer_state.max_len_in_batch}") infer_state.decode_is_contiguous = False alloc_mem = infer_state.cache_manager.alloc(batch_size) infer_state.decode_mem_index = alloc_mem # infer_state.decode_key_buffer = torch.empty((batch_size, self.tp_head_num_, self.head_dim_), dtype=torch.float16, device="cuda") # infer_state.decode_value_buffer = torch.empty((batch_size, self.tp_head_num_, self.head_dim_), dtype=torch.float16, device="cuda") infer_state.block_loc[:, infer_state.max_len_in_batch - 1] = infer_state.decode_mem_index if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.repeat(batch_size, 1) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() if infer_state.is_context_stage: infer_state.position_cos = torch.index_select(self._cos_cached, 0, position_ids.view(-1)).view( position_ids.view(-1).shape[0], -1 ) infer_state.position_sin = torch.index_select(self._sin_cached, 0, position_ids.view(-1)).view( position_ids.view(-1).shape[0], -1 ) else: seq_len = infer_state.seq_len infer_state.position_cos = torch.index_select(self._cos_cached, 0, seq_len - 1).view(seq_len.shape[0], -1) infer_state.position_sin = torch.index_select(self._sin_cached, 0, seq_len - 1).view(seq_len.shape[0], -1) infer_state.other_kv_index = infer_state.block_loc[0, infer_state.max_len_in_batch - 1].item() if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) # embed positions if attention_mask is None: attention_mask = torch.ones( (batch_size, infer_state.max_len_in_batch), dtype=torch.bool, device=inputs_embeds.device ) attention_mask = self._prepare_decoder_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length ) hidden_states = inputs_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None infer_state.decode_layer_id = 0 for idx, decoder_layer in enumerate(self.layers): past_key_value = past_key_values[idx] if past_key_values is not None else None # NOTE: modify here for passing args to decoder layer layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, infer_state=infer_state, ) infer_state.decode_layer_id += 1 hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) hidden_states = self.norm(hidden_states) next_cache = next_decoder_cache if use_cache else None # update indices # infer_state.block_loc[:, infer_state.max_len_in_batch-1] = infer_state.total_token_num + torch.arange(0, batch_size, dtype=torch.int32, device="cuda") infer_state.start_loc += torch.arange(0, batch_size, dtype=torch.int32, device="cuda") infer_state.seq_len += 1 infer_state.max_len_in_batch += 1 if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) @staticmethod def llama_decoder_layer_forward( self: LlamaDecoderLayer, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, infer_state: Optional[BatchInferState] = None, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, infer_state=infer_state, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs @staticmethod def llama_flash_attn_kvcache_forward( self: LlamaAttention, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, infer_state: Optional[BatchInferState] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: assert use_cache is True, "use_cache should be set to True using this llama attention" bsz, q_len, _ = hidden_states.size() # NOTE might think about better way to handle transposed k and v # key_states [bs, seq_len, num_heads, head_dim/embed_size_per_head] # key_states_transposed [bs, num_heads, seq_len, head_dim/embed_size_per_head] query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim) key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim) value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim) # NOTE might want to revise # need some way to record the length of past key values cache # since we won't return past_key_value_cache right now cos, sin = infer_state.position_cos, infer_state.position_sin llama_rotary_embedding_fwd(query_states.view(-1, self.num_heads, self.head_dim), cos, sin) llama_rotary_embedding_fwd(key_states.view(-1, self.num_key_value_heads, self.head_dim), cos, sin) query_states = query_states.reshape(-1, self.num_heads, self.head_dim) key_states = key_states.reshape(-1, self.num_key_value_heads, self.head_dim) value_states = value_states.reshape(-1, self.num_key_value_heads, self.head_dim) if infer_state.is_context_stage: # first token generation # copy key and value calculated in current step to memory manager copy_kv_to_mem_cache( infer_state.decode_layer_id, key_states, value_states, infer_state.context_mem_index, infer_state.cache_manager, ) attn_output = torch.empty_like(query_states) llama_triton_context_attention( query_states, key_states, value_states, attn_output, infer_state, num_key_value_groups=self.num_key_value_groups, ) else: if infer_state.decode_is_contiguous: # if decode is contiguous, then we copy to key cache and value cache in cache manager directly cache_k = infer_state.cache_manager.key_buffer[infer_state.decode_layer_id][ infer_state.decode_mem_start : infer_state.decode_mem_end, :, : ] cache_v = infer_state.cache_manager.value_buffer[infer_state.decode_layer_id][ infer_state.decode_mem_start : infer_state.decode_mem_end, :, : ] cache_k.copy_(key_states) cache_v.copy_(value_states) else: # if decode is not contiguous, use triton kernel to copy key and value cache # k, v shape: [batch_size, num_heads, head_dim/embed_size_per_head copy_kv_to_mem_cache( infer_state.decode_layer_id, key_states, value_states, infer_state.decode_mem_index, infer_state.cache_manager, ) if HAS_LIGHTLLM_KERNEL: attn_output = torch.empty_like(query_states) llama_triton_token_attention( query_states, attn_output, infer_state, num_key_value_groups=self.num_key_value_groups ) else: self.num_heads // self.num_key_value_heads cache_k = infer_state.cache_manager.key_buffer[infer_state.decode_layer_id] cache_v = infer_state.cache_manager.value_buffer[infer_state.decode_layer_id] query_states = query_states.view(bsz, -1, self.num_heads, self.head_dim) copy_cache_k = cache_k.view(bsz, -1, self.num_key_value_heads, self.head_dim) copy_cache_v = cache_v.view(bsz, -1, self.num_key_value_heads, self.head_dim) attn_output = flash_attn_with_kvcache( q=query_states, k_cache=copy_cache_k, v_cache=copy_cache_v, softmax_scale=1 / math.sqrt(self.head_dim), causal=True, ) attn_output = attn_output.view(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) # return past_key_value as None return attn_output, None, None
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/tensor_parallel/modeling/chatglm2.py
colossalai/legacy/inference/tensor_parallel/modeling/chatglm2.py
import os from typing import Optional, Tuple import torch from torch.nn import CrossEntropyLoss from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from colossalai.inference.tensor_parallel.batch_infer_state import BatchInferState from colossalai.kernel.triton.token_attention_kernel import Llama2TokenAttentionForwards from colossalai.shardformer.modeling.chatglm2_6b.modeling_chatglm import ( ChatGLMForConditionalGeneration, ChatGLMModel, GLMBlock, GLMTransformer, SelfAttention, split_tensor_along_last_dim, ) from ._utils import copy_kv_to_mem_cache try: from lightllm.models.chatglm2.triton_kernel.rotary_emb import rotary_emb_fwd as chatglm2_rotary_emb_fwd from lightllm.models.llama2.triton_kernel.context_flashattention_nopad import ( context_attention_fwd as lightllm_llama2_context_attention_fwd, ) HAS_LIGHTLLM_KERNEL = True except: print("please install lightllm from source to run inference: https://github.com/ModelTC/lightllm") HAS_LIGHTLLM_KERNEL = False # This func is same as Llama model init_to_get_rotary, we should move them into _utils.py def _init_to_get_rotary(self, base=10000): self.config.head_dim_ = self.config.hidden_size // self.config.num_attention_heads if not hasattr(self.config, "rope_scaling"): rope_scaling_factor = 1.0 else: rope_scaling_factor = self.config.rope_scaling.factor if self.config.rope_scaling is not None else 1.0 if hasattr(self.config, "max_sequence_length"): max_seq_len = self.config.max_sequence_length elif hasattr(self.config, "max_position_embeddings"): max_seq_len = self.config.max_position_embeddings * rope_scaling_factor else: max_seq_len = 2048 * rope_scaling_factor base = float(base) # NTK ref: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ try: ntk_alpha = float(os.environ.get("INFER_NTK_ALPHA", 1)) assert ntk_alpha >= 1 if ntk_alpha > 1: print(f"Note: NTK enabled, alpha set to {ntk_alpha}") max_seq_len *= ntk_alpha base = base * (ntk_alpha ** (self.head_dim_ / (self.head_dim_ - 2))) # Base change formula except: pass n_elem = self.config.head_dim_ // 2 inv_freq = 1.0 / (base ** (torch.arange(0, n_elem, 2, device="cpu", dtype=torch.float32) / n_elem)) t = torch.arange(max_seq_len + 1024 * 64, device="cpu", dtype=torch.float32) / rope_scaling_factor freqs = torch.outer(t, inv_freq) self._cos_cached = torch.cos(freqs).to(torch.float16).cuda() self._sin_cached = torch.sin(freqs).to(torch.float16).cuda() return def get_masks(self, input_ids, past_length, padding_mask=None): batch_size, seq_length = input_ids.shape full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device) full_attention_mask.tril_() if past_length: full_attention_mask = torch.cat( ( torch.ones(batch_size, seq_length, past_length, device=input_ids.device), full_attention_mask, ), dim=-1, ) if padding_mask is not None: full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1) if not past_length and padding_mask is not None: full_attention_mask -= padding_mask.unsqueeze(-1) - 1 full_attention_mask = (full_attention_mask < 0.5).bool() full_attention_mask.unsqueeze_(1) return full_attention_mask class ChatGLM2InferenceForwards: """ This class holds forwards for Chatglm2 inference. We intend to replace the forward methods for ChatGLMModel, ChatGLMEecoderLayer, and ChatGLMAttention. """ @staticmethod def chatglm_for_conditional_generation_forward( self: ChatGLMForConditionalGeneration, input_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, return_last_logit: Optional[bool] = False, ): use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict infer_state = self.infer_state if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") if infer_state.is_context_stage: past_key_values_length = 0 else: past_key_values_length = infer_state.max_len_in_batch - 1 seq_length_with_past = seq_length + past_key_values_length # prefill stage at first if use_cache and seq_length != 1: infer_state.is_context_stage = True infer_state.context_mem_index = infer_state.cache_manager.alloc(infer_state.total_token_num) infer_state.init_block_loc( infer_state.block_loc, infer_state.seq_len, seq_length, infer_state.context_mem_index ) else: infer_state.is_context_stage = False alloc_mem = infer_state.cache_manager.alloc_contiguous(batch_size) if alloc_mem is not None: infer_state.decode_is_contiguous = True infer_state.decode_mem_index = alloc_mem[0] infer_state.decode_mem_start = alloc_mem[1] infer_state.decode_mem_end = alloc_mem[2] infer_state.block_loc[:, seq_length_with_past - 1] = infer_state.decode_mem_index else: print(f" *** Encountered allocation non-contiguous") print( f" infer_state.cache_manager.past_key_values_length: {infer_state.cache_manager.past_key_values_length}" ) infer_state.decode_is_contiguous = False alloc_mem = infer_state.cache_manager.alloc(batch_size) infer_state.decode_mem_index = alloc_mem # infer_state.decode_key_buffer = torch.empty((batch_size, self.tp_head_num_, self.head_dim_), dtype=torch.float16, device="cuda") # infer_state.decode_value_buffer = torch.empty((batch_size, self.tp_head_num_, self.head_dim_), dtype=torch.float16, device="cuda") infer_state.block_loc[:, seq_length_with_past - 1] = infer_state.decode_mem_index # related to rotary embedding if infer_state.is_context_stage: infer_state.position_cos = torch.index_select(self._cos_cached, 0, position_ids.view(-1)).view( position_ids.view(-1).shape[0], -1 ) infer_state.position_sin = torch.index_select(self._sin_cached, 0, position_ids.view(-1)).view( position_ids.view(-1).shape[0], -1 ) else: seq_len = infer_state.seq_len infer_state.position_cos = torch.index_select(self._cos_cached, 0, seq_len - 1).view(seq_len.shape[0], -1) infer_state.position_sin = torch.index_select(self._sin_cached, 0, seq_len - 1).view(seq_len.shape[0], -1) infer_state.other_kv_index = infer_state.block_loc[0, infer_state.max_len_in_batch - 1].item() transformer_outputs = self.transformer( input_ids=input_ids, position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_hidden_states=output_hidden_states, return_dict=return_dict, infer_state=infer_state, ) hidden_states = transformer_outputs[0] if return_last_logit: hidden_states = hidden_states[-1:] lm_logits = self.transformer.output_layer(hidden_states) lm_logits = lm_logits.transpose(0, 1).contiguous() loss = None if labels is not None: lm_logits = lm_logits.to(torch.float32) # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) lm_logits = lm_logits.to(hidden_states.dtype) loss = loss.to(hidden_states.dtype) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @staticmethod def chatglm_model_forward( self: ChatGLMModel, input_ids, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.BoolTensor] = None, full_attention_mask: Optional[torch.BoolTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, infer_state: BatchInferState = None, ): output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, seq_length = input_ids.shape if inputs_embeds is None: inputs_embeds = self.embedding(input_ids) if self.pre_seq_len is not None: if past_key_values is None: past_key_values = self.get_prompt( batch_size=batch_size, device=input_ids.device, dtype=inputs_embeds.dtype, ) if attention_mask is not None: attention_mask = torch.cat( [ attention_mask.new_ones((batch_size, self.pre_seq_len)), attention_mask, ], dim=-1, ) if full_attention_mask is None: if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1): full_attention_mask = get_masks( self, input_ids, infer_state.cache_manager.past_key_values_length, padding_mask=attention_mask ) # Run encoder. hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder( inputs_embeds, full_attention_mask, kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states, infer_state=infer_state, ) # update indices # infer_state.block_loc[:, infer_state.max_len_in_batch-1] = infer_state.total_token_num + torch.arange(0, batch_size, dtype=torch.int32, device="cuda") infer_state.start_loc = infer_state.start_loc + torch.arange(0, batch_size, dtype=torch.int32, device="cuda") infer_state.seq_len += 1 infer_state.max_len_in_batch += 1 if not return_dict: return tuple( v for v in [ hidden_states, presents, all_hidden_states, all_self_attentions, ] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @staticmethod def chatglm_encoder_forward( self: GLMTransformer, hidden_states, attention_mask, kv_caches=None, use_cache: Optional[bool] = True, output_hidden_states: Optional[bool] = False, infer_state: Optional[BatchInferState] = None, ): hidden_states = hidden_states.transpose(0, 1).contiguous() if not kv_caches: kv_caches = [None for _ in range(self.num_layers)] presents = () if use_cache else None all_self_attentions = None all_hidden_states = () if output_hidden_states else None infer_state.decode_layer_id = 0 for index in range(self.num_layers): layer = self.layers[index] layer_ret = layer( hidden_states, attention_mask, kv_cache=kv_caches[index], use_cache=use_cache, infer_state=infer_state, ) infer_state.decode_layer_id += 1 hidden_states, kv_cache = layer_ret if use_cache: presents = presents + (kv_cache,) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # Final layer norm. hidden_states = hidden_states.transpose(0, 1).contiguous() if self.post_layer_norm: hidden_states = self.final_layernorm(hidden_states) return hidden_states, presents, all_hidden_states, all_self_attentions @staticmethod def chatglm_glmblock_forward( self: GLMBlock, hidden_states, attention_mask, kv_cache=None, use_cache=True, infer_state: Optional[BatchInferState] = None, ): # hidden_states: [s, b, h] # Layer norm at the beginning of the transformer layer. layernorm_output = self.input_layernorm(hidden_states) # Self attention. attention_output, kv_cache = self.self_attention( layernorm_output, attention_mask, kv_cache=kv_cache, use_cache=use_cache, infer_state=infer_state, ) # Residual connection. if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training) layernorm_input = residual + layernorm_input # Layer norm post the self attention. layernorm_output = self.post_attention_layernorm(layernorm_input) # MLP. mlp_output = self.mlp(layernorm_output) # Second residual connection. if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = layernorm_input output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training) output = residual + output return output, kv_cache @staticmethod def chatglm_flash_attn_kvcache_forward( self: SelfAttention, hidden_states, attention_mask, kv_cache=None, use_cache=True, infer_state: Optional[BatchInferState] = None, ): assert use_cache is True, "use_cache should be set to True using this chatglm attention" # hidden_states: original :[sq, b, h] --> this [b, sq, h] batch_size = hidden_states.shape[0] hidden_size = hidden_states.shape[-1] # Attention heads [sq, b, h] --> [sq, b, (np * 3 * hn)] mixed_x_layer = self.query_key_value(hidden_states) if self.multi_query_attention: (query_layer, key_layer, value_layer) = mixed_x_layer.split( [ self.num_attention_heads_per_partition * self.hidden_size_per_attention_head, self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, ], dim=-1, ) query_layer = query_layer.view( query_layer.size()[:-1] + ( self.num_attention_heads_per_partition, self.hidden_size_per_attention_head, ) ) key_layer = key_layer.view( key_layer.size()[:-1] + ( self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head, ) ) value_layer = value_layer.view( value_layer.size()[:-1] + ( self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head, ) ) else: new_tensor_shape = mixed_x_layer.size()[:-1] + ( self.num_attention_heads_per_partition, 3 * self.hidden_size_per_attention_head, ) mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn] (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3) cos, sin = infer_state.position_cos, infer_state.position_sin chatglm2_rotary_emb_fwd( query_layer.view(-1, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head), cos, sin ) if self.multi_query_attention: chatglm2_rotary_emb_fwd( key_layer.view(-1, self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head), cos, sin, ) else: chatglm2_rotary_emb_fwd( key_layer.view(-1, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head), cos, sin, ) # reshape q k v to [bsz*sql, num_heads, head_dim] 2*1 ,32/2 ,128 query_layer = query_layer.reshape( -1, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head ) key_layer = key_layer.reshape( -1, self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head ) value_layer = value_layer.reshape( -1, self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head ) if infer_state.is_context_stage: # first token generation: # copy key and value calculated in current step to memory manager copy_kv_to_mem_cache( infer_state.decode_layer_id, key_layer, value_layer, infer_state.context_mem_index, infer_state.cache_manager, ) attn_output = torch.empty_like(query_layer.contiguous().view(-1, self.projection_size)) # NOTE: no bug in context attn fwd (del it ) lightllm_llama2_context_attention_fwd( query_layer, key_layer, value_layer, attn_output.view(-1, self.num_attention_heads_per_partition, self.hidden_size_per_attention_head), infer_state.start_loc, infer_state.seq_len, infer_state.max_len_in_batch, ) else: if infer_state.decode_is_contiguous: # if decode is contiguous, then we copy to key cache and value cache in cache manager directly cache_k = infer_state.cache_manager.key_buffer[infer_state.decode_layer_id][ infer_state.decode_mem_start : infer_state.decode_mem_end, :, : ] cache_v = infer_state.cache_manager.value_buffer[infer_state.decode_layer_id][ infer_state.decode_mem_start : infer_state.decode_mem_end, :, : ] cache_k.copy_(key_layer) cache_v.copy_(value_layer) else: # if decode is not contiguous, use triton kernel to copy key and value cache # k, v shape: [batch_size, num_heads, head_dim/embed_size_per_head copy_kv_to_mem_cache( infer_state.decode_layer_id, key_layer, value_layer, infer_state.decode_mem_index, infer_state.cache_manager, ) # second token and follows attn_output = torch.empty_like(query_layer.contiguous().view(-1, self.projection_size)) cache_k = infer_state.cache_manager.key_buffer[infer_state.decode_layer_id][ : infer_state.decode_mem_end, :, : ] cache_v = infer_state.cache_manager.value_buffer[infer_state.decode_layer_id][ : infer_state.decode_mem_end, :, : ] # ================================== # core attention computation is replaced by triton kernel # ================================== Llama2TokenAttentionForwards.token_attn( query_layer, cache_k, cache_v, attn_output, infer_state.block_loc, infer_state.start_loc, infer_state.seq_len, infer_state.max_len_in_batch, infer_state.other_kv_index, ) # print('after attention',torch.isnan(attn_output).any()) # ================= # Output:[b,sq, h] # ================= output = self.dense(attn_output).reshape(batch_size, -1, hidden_size) return output, kv_cache
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/tensor_parallel/modeling/bloom.py
colossalai/legacy/inference/tensor_parallel/modeling/bloom.py
import math import warnings from typing import Optional, Tuple, Union import torch import torch.distributed as dist from torch.nn import CrossEntropyLoss from torch.nn import functional as F from transformers.models.bloom.modeling_bloom import ( BaseModelOutputWithPastAndCrossAttentions, BloomAttention, BloomBlock, BloomForCausalLM, BloomModel, CausalLMOutputWithCrossAttentions, ) from transformers.utils import logging from colossalai.inference.tensor_parallel.batch_infer_state import BatchInferState from colossalai.kernel.triton import bloom_context_attn_fwd, copy_kv_cache_to_dest, token_attention_fwd try: from lightllm.models.bloom.triton_kernel.context_flashattention_nopad import ( context_attention_fwd as lightllm_bloom_context_attention_fwd, ) HAS_LIGHTLLM_KERNEL = True except: HAS_LIGHTLLM_KERNEL = False def generate_alibi(n_head, dtype=torch.float16): """ This method is adapted from `_generate_alibi` function in `lightllm/models/bloom/layer_weights/transformer_layer_weight.py` of the ModelTC/lightllm GitHub repository. This method is originally the `build_alibi_tensor` function in `transformers/models/bloom/modeling_bloom.py` of the huggingface/transformers GitHub repository. """ def get_slopes_power_of_2(n): start = 2 ** (-(2 ** -(math.log2(n) - 3))) return [start * start**i for i in range(n)] def get_slopes(n): if math.log2(n).is_integer(): return get_slopes_power_of_2(n) else: closest_power_of_2 = 2 ** math.floor(math.log2(n)) slopes_power_of_2 = get_slopes_power_of_2(closest_power_of_2) slopes_double = get_slopes(2 * closest_power_of_2) slopes_combined = slopes_power_of_2 + slopes_double[0::2][: n - closest_power_of_2] return slopes_combined slopes = get_slopes(n_head) return torch.tensor(slopes, dtype=dtype) class BloomInferenceForwards: """ This class serves a micro library for bloom inference forwards. We intend to replace the forward methods for BloomForCausalLM, BloomModel, BloomBlock, and BloomAttention, as well as prepare_inputs_for_generation method for BloomForCausalLM. For future improvement, we might want to skip replacing methods for BloomForCausalLM, and call BloomModel.forward iteratively in TpInferEngine """ @staticmethod def bloom_model_forward( self: BloomModel, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, infer_state: Optional[BatchInferState] = None, **deprecated_arguments, ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]: logger = logging.get_logger(__name__) if deprecated_arguments.pop("position_ids", False) is not False: # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None` warnings.warn( "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore" " passing `position_ids`.", FutureWarning, ) if len(deprecated_arguments) > 0: raise ValueError(f"Got unexpected arguments: {deprecated_arguments}") output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") # still need to keep past_key_values to fit original forward flow if past_key_values is None: past_key_values = tuple([None] * len(self.h)) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape batch_size x num_heads x N x N # head_mask has shape n_layer x batch x num_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) hidden_states = self.word_embeddings_layernorm(inputs_embeds) presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # NOTE determine if BatchInferState is passed in via arg # if not, get the attr binded to the model # We might wantto remove setattr later if infer_state is None: assert hasattr(self, "infer_state") infer_state = self.infer_state # infer_state.cache_manager = self.cache_manager if infer_state.is_context_stage: past_key_values_length = 0 else: past_key_values_length = infer_state.max_len_in_batch - 1 if use_cache and seq_length != 1: # prefill stage infer_state.is_context_stage = True # set prefill stage, notify attention layer infer_state.context_mem_index = infer_state.cache_manager.alloc(infer_state.total_token_num) BatchInferState.init_block_loc( infer_state.block_loc, infer_state.seq_len, seq_length, infer_state.context_mem_index ) else: infer_state.is_context_stage = False alloc_mem = infer_state.cache_manager.alloc_contiguous(batch_size) if alloc_mem is not None: infer_state.decode_is_contiguous = True infer_state.decode_mem_index = alloc_mem[0] infer_state.decode_mem_start = alloc_mem[1] infer_state.decode_mem_end = alloc_mem[2] infer_state.block_loc[:, infer_state.max_len_in_batch - 1] = infer_state.decode_mem_index else: print(f" *** Encountered allocation non-contiguous") print(f" infer_state.max_len_in_batch : {infer_state.max_len_in_batch}") infer_state.decode_is_contiguous = False alloc_mem = infer_state.cache_manager.alloc(batch_size) infer_state.decode_mem_index = alloc_mem # infer_state.decode_key_buffer = torch.empty((batch_size, self.tp_head_num_, self.head_dim_), dtype=torch.float16, device="cuda") # infer_state.decode_value_buffer = torch.empty((batch_size, self.tp_head_num_, self.head_dim_), dtype=torch.float16, device="cuda") infer_state.block_loc[:, infer_state.max_len_in_batch - 1] = infer_state.decode_mem_index if attention_mask is None: attention_mask = torch.ones((batch_size, infer_state.max_len_in_batch), device=hidden_states.device) else: attention_mask = attention_mask.to(hidden_states.device) # NOTE revise: we might want to store a single 1D alibi(length is #heads) in model, # or store to BatchInferState to prevent re-calculating # When we have multiple process group (e.g. dp together with tp), we need to pass the pg to here # alibi = generate_alibi(self.num_heads).contiguous().cuda() tp_size = dist.get_world_size() curr_tp_rank = dist.get_rank() alibi = ( generate_alibi(self.num_heads * tp_size) .contiguous()[curr_tp_rank * self.num_heads : (curr_tp_rank + 1) * self.num_heads] .cuda() ) causal_mask = self._prepare_attn_mask( attention_mask, input_shape=(batch_size, seq_length), past_key_values_length=past_key_values_length, ) infer_state.decode_layer_id = 0 for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: # NOTE: currently our KV cache manager does not handle this condition def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, use_cache=use_cache, output_attentions=output_attentions) return custom_forward outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, alibi, causal_mask, layer_past, head_mask[i], ) else: outputs = block( hidden_states, layer_past=layer_past, attention_mask=causal_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, alibi=alibi, infer_state=infer_state, ) infer_state.decode_layer_id += 1 hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) # Add last hidden state hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # update indices of kv cache block # NOT READY FOR PRIME TIME # might want to remove this part, instead, better to pass the BatchInferState from model forward, # and update these information in engine.generate after model foward called infer_state.start_loc = infer_state.start_loc + torch.arange(0, batch_size, dtype=torch.int32, device="cuda") infer_state.seq_len += 1 infer_state.max_len_in_batch += 1 if not return_dict: return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, # should always be (None, None, ..., None) hidden_states=all_hidden_states, attentions=all_self_attentions, ) @staticmethod def bloom_for_causal_lm_forward( self: BloomForCausalLM, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, infer_state: Optional[BatchInferState] = None, **deprecated_arguments, ): r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ logging.get_logger(__name__) if deprecated_arguments.pop("position_ids", False) is not False: # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None` warnings.warn( "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore" " passing `position_ids`.", FutureWarning, ) if len(deprecated_arguments) > 0: raise ValueError(f"Got unexpected arguments: {deprecated_arguments}") return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = BloomInferenceForwards.bloom_model_forward( self.transformer, input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, infer_state=infer_state, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() batch_size, seq_length, vocab_size = shift_logits.shape # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct( shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length) ) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @staticmethod def bloom_for_causal_lm_prepare_inputs_for_generation( self: BloomForCausalLM, input_ids: torch.LongTensor, past_key_values: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, **kwargs, ) -> dict: # only last token for input_ids if past is not None if past_key_values: input_ids = input_ids[:, -1].unsqueeze(-1) # NOTE we won't use past key values here # the cache may be in the stardard format (e.g. in contrastive search), convert to bloom's format if needed # if past_key_values[0][0].shape[0] == input_ids.shape[0]: # past_key_values = self._convert_to_bloom_cache(past_key_values) # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @staticmethod def bloom_block_forward( self: BloomBlock, hidden_states: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, infer_state: Optional[BatchInferState] = None, ): # hidden_states: [batch_size, seq_length, hidden_size] # Layer norm at the beginning of the transformer layer. layernorm_output = self.input_layernorm(hidden_states) # Layer norm post the self attention. if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states # Self attention. attn_outputs = self.self_attention( layernorm_output, residual, layer_past=layer_past, attention_mask=attention_mask, alibi=alibi, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, infer_state=infer_state, ) attention_output = attn_outputs[0] outputs = attn_outputs[1:] layernorm_output = self.post_attention_layernorm(attention_output) # Get residual if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = attention_output # MLP. output = self.mlp(layernorm_output, residual) if use_cache: outputs = (output,) + outputs else: outputs = (output,) + outputs[1:] return outputs # hidden_states, present, attentions @staticmethod def bloom_attention_forward( self: BloomAttention, hidden_states: torch.Tensor, residual: torch.Tensor, alibi: torch.Tensor, attention_mask: torch.Tensor, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, infer_state: Optional[BatchInferState] = None, ): fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size] # 3 x [batch_size, seq_length, num_heads, head_dim] (query_layer, key_layer, value_layer) = self._split_heads(fused_qkv) batch_size, q_length, H, D_HEAD = query_layer.shape k = key_layer.reshape(-1, H, D_HEAD) # batch_size * q_length, H, D_HEAD, q_lenth == 1 v = value_layer.reshape(-1, H, D_HEAD) # batch_size * q_length, H, D_HEAD, q_lenth == 1 mem_manager = infer_state.cache_manager layer_id = infer_state.decode_layer_id if infer_state.is_context_stage: # context process max_input_len = q_length b_start_loc = infer_state.start_loc b_seq_len = infer_state.seq_len[:batch_size] q = query_layer.reshape(-1, H, D_HEAD) copy_kv_cache_to_dest(k, infer_state.context_mem_index, mem_manager.key_buffer[layer_id]) copy_kv_cache_to_dest(v, infer_state.context_mem_index, mem_manager.value_buffer[layer_id]) # output = self.output[:batch_size*q_length, :, :] output = torch.empty_like(q) if HAS_LIGHTLLM_KERNEL: lightllm_bloom_context_attention_fwd(q, k, v, output, alibi, b_start_loc, b_seq_len, max_input_len) else: bloom_context_attn_fwd(q, k, v, output, b_start_loc, b_seq_len, max_input_len, alibi) context_layer = output.view(batch_size, q_length, H * D_HEAD) else: # query_layer = query_layer.transpose(1, 2).reshape(batch_size * self.num_heads, q_length, self.head_dim) # need shape: batch_size, H, D_HEAD (q_length == 1), input q shape : (batch_size, q_length(1), H, D_HEAD) assert q_length == 1, "for non-context process, we only support q_length == 1" q = query_layer.reshape(-1, H, D_HEAD) if infer_state.decode_is_contiguous: # if decode is contiguous, then we copy to key cache and value cache in cache manager directly cache_k = infer_state.cache_manager.key_buffer[layer_id][ infer_state.decode_mem_start : infer_state.decode_mem_end, :, : ] cache_v = infer_state.cache_manager.value_buffer[layer_id][ infer_state.decode_mem_start : infer_state.decode_mem_end, :, : ] cache_k.copy_(k) cache_v.copy_(v) else: # if decode is not contiguous, use triton kernel to copy key and value cache # k, v shape: [batch_size, num_heads, head_dim/embed_size_per_head] copy_kv_cache_to_dest(k, infer_state.decode_mem_index, mem_manager.key_buffer[layer_id]) copy_kv_cache_to_dest(v, infer_state.decode_mem_index, mem_manager.value_buffer[layer_id]) b_start_loc = infer_state.start_loc b_loc = infer_state.block_loc b_seq_len = infer_state.seq_len output = torch.empty_like(q) token_attention_fwd( q, mem_manager.key_buffer[layer_id], mem_manager.value_buffer[layer_id], output, b_loc, b_start_loc, b_seq_len, infer_state.max_len_in_batch, alibi, ) context_layer = output.view(batch_size, q_length, H * D_HEAD) # NOTE: always set present as none for now, instead of returning past key value to the next decoding, # we create the past key value pair from the cache manager present = None # aggregate results across tp ranks. See here: https://github.com/pytorch/pytorch/issues/76232 if self.pretraining_tp > 1 and self.slow_but_exact: slices = self.hidden_size / self.pretraining_tp output_tensor = torch.zeros_like(context_layer) for i in range(self.pretraining_tp): output_tensor = output_tensor + F.linear( context_layer[:, :, int(i * slices) : int((i + 1) * slices)], self.dense.weight[:, int(i * slices) : int((i + 1) * slices)], ) else: output_tensor = self.dense(context_layer) # dropout is not required here during inference output_tensor = residual + output_tensor outputs = (output_tensor, present) assert output_attentions is False, "we do not support output_attentions at this time" return outputs
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/tensor_parallel/modeling/__init__.py
colossalai/legacy/inference/tensor_parallel/modeling/__init__.py
from .bloom import BloomInferenceForwards from .chatglm2 import ChatGLM2InferenceForwards from .llama import LlamaInferenceForwards __all__ = ["BloomInferenceForwards", "LlamaInferenceForwards", "ChatGLM2InferenceForwards"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/tensor_parallel/modeling/_utils.py
colossalai/legacy/inference/tensor_parallel/modeling/_utils.py
""" Utils for model inference """ import os import torch from colossalai.kernel.triton.copy_kv_cache_dest import copy_kv_cache_to_dest def copy_kv_to_mem_cache(layer_id, key_buffer, value_buffer, context_mem_index, mem_manager): """ This function copies the key and value cache to the memory cache Args: layer_id : id of current layer key_buffer : key cache value_buffer : value cache context_mem_index : index of memory cache in kv cache manager mem_manager : cache manager """ copy_kv_cache_to_dest(key_buffer, context_mem_index, mem_manager.key_buffer[layer_id]) copy_kv_cache_to_dest(value_buffer, context_mem_index, mem_manager.value_buffer[layer_id]) def init_to_get_rotary(self, base=10000, use_elem=False): """ This function initializes the rotary positional embedding, it is compatible for all models and is called in ShardFormer Args: self : Model that holds the rotary positional embedding base : calculation arg use_elem : activated when using chatglm-based models """ self.config.head_dim_ = self.config.hidden_size // self.config.num_attention_heads if not hasattr(self.config, "rope_scaling"): rope_scaling_factor = 1.0 else: rope_scaling_factor = self.config.rope_scaling.factor if self.config.rope_scaling is not None else 1.0 if hasattr(self.config, "max_sequence_length"): max_seq_len = self.config.max_sequence_length elif hasattr(self.config, "max_position_embeddings"): max_seq_len = self.config.max_position_embeddings * rope_scaling_factor else: max_seq_len = 2048 * rope_scaling_factor base = float(base) # NTK ref: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ ntk_alpha = os.environ.get("INFER_NTK_ALPHA", None) if ntk_alpha is not None: ntk_alpha = float(ntk_alpha) assert ntk_alpha >= 1, "NTK alpha must be greater than or equal to 1" if ntk_alpha > 1: print(f"Note: NTK enabled, alpha set to {ntk_alpha}") max_seq_len *= ntk_alpha base = base * (ntk_alpha ** (self.head_dim_ / (self.head_dim_ - 2))) # Base change formula n_elem = self.config.head_dim_ if use_elem: n_elem //= 2 inv_freq = 1.0 / (base ** (torch.arange(0, n_elem, 2, device="cpu", dtype=torch.float32) / n_elem)) t = torch.arange(max_seq_len + 1024 * 64, device="cpu", dtype=torch.float32) / rope_scaling_factor freqs = torch.outer(t, inv_freq) self._cos_cached = torch.cos(freqs).to(torch.float16).cuda() self._sin_cached = torch.sin(freqs).to(torch.float16).cuda()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/dynamic_batching/ray_init_config.py
colossalai/legacy/inference/dynamic_batching/ray_init_config.py
import logging import yaml from pydantic import BaseModel logger = logging.getLogger(__name__) class EngineArgsClass(BaseModel): """Config for Engine""" model: str tensor_parallel_size: int = 2 max_batch_size: int = 4 max_input_len: int = 128 max_output_len: int = 32 class RooterArgsClass(BaseModel): """Config for Rooter""" max_total_token_num: int = 42 batch_max_tokens: int = 42 eos_id: int = 0 disable_log_stats: bool = False log_stats_interval: int = 10 model: str class RayInitConfig(BaseModel): """All-together configs without app router config""" engine_config_data: EngineArgsClass router_config_data: RooterArgsClass @classmethod def from_yaml_path(cls, path: str): try: with open(path, "r") as yaml_file: try: config = yaml.safe_load(yaml_file) # serve deployment config engine_config = config.get("engine_config", {}) router_config = config.get("router_config", {}) return cls( engine_config_data=engine_config, router_config_data=router_config, ) except yaml.YAMLError as e: logger.error(f"An Error occurred when parsing yaml: {e}") raise except FileNotFoundError: logger.error(f"The file '{path}' does not exist!") raise except OSError as e: logger.error(f"An Error occurred: {e}") raise
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/dynamic_batching/ray_dist_init.py
colossalai/legacy/inference/dynamic_batching/ray_dist_init.py
import logging import os from typing import List import ray import ray.util.collective as collective import torch from transformers import AutoModelForCausalLM import colossalai from colossalai.inference.async_manager import start_dynamic_batching from colossalai.inference.dynamic_batching.get_tokenizer import get_tokenizer from colossalai.inference.dynamic_batching.io_struct import RequestOutput from colossalai.inference.dynamic_batching.ray_init_config import EngineArgsClass, RooterArgsClass from colossalai.inference.dynamic_batching.sampling_params import SamplingParams from colossalai.inference.tensor_parallel.engine import TPInferEngine from colossalai.shardformer import ShardConfig from colossalai.testing import free_port ray_serve_logger = logging.getLogger("ray.serve") def log_cuda_info(scope_name: str): ray_serve_logger.info(f" {scope_name}: ray.get_gpu_ids(): {ray.get_gpu_ids()}") ray_serve_logger.info( f" {scope_name}: CUDA_VISIBLE_DEVICES: {os.getenv('CUDA_VISIBLE_DEVICES', 'NO DEVICES FOUND!')}" ) if torch.cuda.is_available(): ray_serve_logger.info( f" {scope_name}: cuda current_device: {torch.cuda.current_device()}, cuda device count: {torch.cuda.device_count()}" ) else: ray_serve_logger.info(f" {scope_name}: cuda is not available!") @ray.remote(num_gpus=1) class Worker: def __init__( self, model_path: str, tensor_parallel_size: int, max_batch_size: int, max_input_len: int, max_output_len: int, router_config: RooterArgsClass, ): log_cuda_info("Worker.init") self.tensor_parallel_size = tensor_parallel_size self.model_path = model_path self.max_batch_size = max_batch_size self.max_input_len = max_input_len self.max_output_len = max_output_len self.router_config = router_config def setup(self, world_size, rank, port): # initialize a ray collective group, otherwise colossalai distributed env won't be built successfully collective.init_collective_group(world_size, rank, "nccl", "default") # initialize and set distributed environment colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl") ray_serve_logger.info(f"Worker with rank {rank} (world size {world_size}) setting up..") log_cuda_info("Worker.setup") # Load model self.tokenizer = get_tokenizer(tokenizer_name=self.model_path) if self.tokenizer.pad_token is None: self.tokenizer.pad_token = self.tokenizer.eos_token self.model = AutoModelForCausalLM.from_pretrained( self.model_path, pad_token_id=self.tokenizer.pad_token_id, torch_dtype=torch.float16 ) shard_config = ShardConfig( enable_tensor_parallelism=True if world_size > 1 else False, extra_kwargs={"inference_only": True} ) self.infer_engine = TPInferEngine( self.model, shard_config, self.max_batch_size, self.max_input_len, self.max_output_len ) self.start_dynamic_batching = start_dynamic_batching(self.router_config, self.infer_engine, []) return True # def generate(self, request_id: str, prompt: str, sampling_params: SamplingParams) -> List[str]: # ray_serve_logger.info(f"text: {prompt}") # final_outputs = self.start_dynamic_batching.generate(prompt, sampling_params, request_id) # return final_outputs def add_input(self, request_id: str, prompt: str, sampling_params: SamplingParams): self.start_dynamic_batching.add_input(request_id, prompt, sampling_params) def abort(self, request_id: str): self.start_dynamic_batching.abort(request_id) def step(self) -> List[RequestOutput]: return self.start_dynamic_batching._step() def add_req(self, prompt_ids: List[int], sampling_params: SamplingParams, request_id: str, prompt: str): self.start_dynamic_batching.add_req(prompt_ids, sampling_params, request_id, prompt) def is_running(self): return self.start_dynamic_batching.is_running() class Driver: def __init__(self, router_config: RooterArgsClass, engine_config: EngineArgsClass): log_cuda_info("Driver:init") model_path = engine_config.model tensor_parallel_size = engine_config.tensor_parallel_size self.num_workers = tensor_parallel_size self.workers = [] init_rets = [] # Just grab a free port on localhost # NOTE workers in this communication group listen to the same port available_port = free_port() for i in range(self.num_workers): worker_name = "worker_idx_{}".format(i) w = Worker.options(name=worker_name).remote( model_path, self.num_workers, engine_config.max_batch_size, engine_config.max_input_len, engine_config.max_output_len, router_config, ) self.workers.append(w) init_rets.append(w.setup.remote(self.num_workers, i, available_port)) _options = { "group_name": "default_driver", "world_size": self.num_workers, "ranks": [i for i in range(self.num_workers)], "backend": "nccl", } collective.create_collective_group(self.workers, **_options) _ = ray.get(init_rets) def add_input(self, request_id: str, prompt: str, sampling_params: SamplingParams): ray.get([w.add_input.remote(request_id, prompt, sampling_params) for w in self.workers]) def abort(self, request_id: str): ray.get([w.abort.remote(request_id) for w in self.workers]) def step(self): results = ray.get([w.step.remote() for w in self.workers]) outputs = results[0] # get any one of the copies return outputs def add_req(self, request_id: str, prompt_ids: List[int], sampling_params: SamplingParams, prompt: str): ray.get([w.add_req.remote(prompt_ids, sampling_params, request_id, prompt) for w in self.workers]) def is_running(self): results = ray.get([w.is_running.remote() for w in self.workers]) return any(results)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/dynamic_batching/get_tokenizer.py
colossalai/legacy/inference/dynamic_batching/get_tokenizer.py
""" Motivated by VllM (https://github.com/vllm-project/vllm), This module is trying to resolve the tokenizer issue. license: MIT, see LICENSE for more details. """ from transformers import AutoTokenizer _FAST_LLAMA_TOKENIZER = "hf-internal-testing/llama-tokenizer" def get_tokenizer( tokenizer=None, tokenizer_name: str = "", trust_remote_code: bool = False, use_fast: bool = True, ): if tokenizer is not None: tokenizer = tokenizer else: if "llama" in tokenizer_name.lower() and use_fast == True: print( "For some LLaMA-based models, initializing the fast tokenizer may " "take a long time. To eliminate the initialization time, consider " f"using '{_FAST_LLAMA_TOKENIZER}' instead of the original " "tokenizer. This is done automatically in Colossalai." ) tokenizer_name = _FAST_LLAMA_TOKENIZER try: tokenizer = AutoTokenizer.from_pretrained( tokenizer_name, use_fast=use_fast, trust_remote_code=trust_remote_code ) except TypeError: use_fast = False tokenizer = AutoTokenizer.from_pretrained( tokenizer_name, use_fast=use_fast, trust_remote_code=trust_remote_code ) return tokenizer
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/dynamic_batching/__init__.py
colossalai/legacy/inference/dynamic_batching/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/dynamic_batching/infer_batch.py
colossalai/legacy/inference/dynamic_batching/infer_batch.py
# Adapted from https://github.com/ModelTC/lightllm import collections from dataclasses import dataclass from typing import Dict, List, Tuple import numpy as np import torch from colossalai.inference.tensor_parallel import MemoryManager # make batch infer state an attr of InferBatch class InferSamplingParams: def __init__( self, do_sample: bool = False, presence_penalty: float = 0.0, frequency_penalty: float = 0.0, temperature: float = 1.0, top_p: float = 1.0, top_k: int = -1, vocab_size: int = -1, ) -> None: self.do_sample = do_sample self.presence_penalty = presence_penalty self.frequency_penalty = frequency_penalty self.temperature = temperature self.top_p = top_p self.top_k = top_k if self.top_k == -1: self.top_k = vocab_size return @dataclass class InferBatch: batch_id: int requests: List requests_idx_mapping: Dict[int, int] input_ids: torch.Tensor all_input_ids: List[List[int]] input_lengths: List[int] out_token_id_counts: List sampling_param_list: List[InferSamplingParams] nopad_total_token_num: int nopad_max_len_in_batch: int nopad_b_loc: torch.Tensor nopad_b_start_loc: torch.Tensor nopad_b_seq_len: torch.Tensor cache_manager: MemoryManager max_total_len: int @classmethod @torch.no_grad() def init_batch( cls, batch_id, requests, dtype: torch.dtype, device: torch.device, cache_manager: MemoryManager, vocab_size: int, max_total_len: int, ) -> "InferBatch": input_lengths = [] all_input_ids = [] requests_idx_mapping = {} out_token_id_counts = [] sampling_param_list = [] nopad_total_token_num = 0 nopad_max_len_in_batch = 0 nopad_b_loc = torch.empty((len(requests), max_total_len + 12), dtype=torch.long, device="cuda") # to avoid memory leak , we pre-allocate 12 more space for each batch. nopad_b_start_loc = torch.zeros(len(requests), dtype=torch.int32, device="cuda") for i, r in enumerate(requests): # request id -> idx in list mapping requests_idx_mapping[r["request_id"]] = i tokenized_input = r["input_id"] input_length = len(tokenized_input) input_lengths.append(input_length) all_input_ids.append(tokenized_input) out_token_id_counts.append(collections.defaultdict(int)) # postprocessor sampling_param = r["sampling_param"] sampling_param["vocab_size"] = vocab_size sampling_param_list.append(InferSamplingParams(**sampling_param)) nopad_total_token_num += input_length nopad_max_len_in_batch = max(nopad_max_len_in_batch, input_length) nopad_b_seq_len = torch.tensor(input_lengths, dtype=torch.int32, device="cuda") nopad_b_start_loc[1:] = torch.cumsum(nopad_b_seq_len, dim=0, dtype=torch.int32)[0:-1] if len(requests) > 1: input_ids = np.concatenate(all_input_ids, dtype=np.int64) else: input_ids = all_input_ids[0] # Create tensors on device input_ids = torch.tensor(input_ids, dtype=torch.int64, device=device) return cls( batch_id=batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, input_lengths=input_lengths, all_input_ids=all_input_ids, nopad_total_token_num=nopad_total_token_num, nopad_max_len_in_batch=nopad_max_len_in_batch, nopad_b_loc=nopad_b_loc, nopad_b_start_loc=nopad_b_start_loc, nopad_b_seq_len=nopad_b_seq_len, out_token_id_counts=out_token_id_counts, sampling_param_list=sampling_param_list, cache_manager=cache_manager, max_total_len=max_total_len, ) @torch.no_grad() def free_self(self) -> None: """ Free the memory of the InferBatch itself """ remove_index = [] for idx in range(len(self)): remove_index.append( self.nopad_b_loc[ idx, (self.nopad_max_len_in_batch - 1) - (self.nopad_b_seq_len[idx] - 1) : (self.nopad_max_len_in_batch - 1), ] ) remove_index = torch.cat(remove_index, dim=-1) self.cache_manager.free(remove_index) @torch.no_grad() def filter(self, request_ids: List[int]) -> "InferBatch": """ Filter finished batch and return a new InferBatch with left ones. """ if len(request_ids) == 0: raise ValueError("Batch must have at least one request") if len(request_ids) == len(self): return self requests_idx_mapping = {} indices = [] requests = [] all_input_ids = [] input_lengths = [] nopad_total_token_num = 0 nopad_max_len_in_batch = 0 nopad_b_loc = torch.empty((len(request_ids), self.max_total_len + 12), dtype=torch.long, device="cuda") nopad_b_start_loc = torch.zeros(len(request_ids), dtype=torch.int32, device="cuda") nopad_b_seq_len = torch.zeros(len(request_ids), dtype=torch.int32, device="cuda") left_idx = [] for i, request_id in enumerate(request_ids): idx = self.requests_idx_mapping[request_id] left_idx.append(idx) left_idx_set = set(left_idx) remove_index = [] for idx in range(len(self)): if idx not in left_idx_set: remove_index.append( self.nopad_b_loc[ idx, (self.nopad_max_len_in_batch - 1) - (self.nopad_b_seq_len[idx] - 1) : (self.nopad_max_len_in_batch - 1), ] ) remove_index = torch.cat(remove_index, dim=-1) self.cache_manager.free(remove_index) nopad_max_len_in_batch = 0 for i, request_id in enumerate(request_ids): idx = self.requests_idx_mapping[request_id] indices.append(idx) nopad_b_seq_len[:] = self.nopad_b_seq_len[indices] nopad_max_len_in_batch = torch.max(nopad_b_seq_len).item() nopad_b_start_loc[1:] = torch.cumsum(nopad_b_seq_len, dim=0, dtype=torch.int32)[0:-1] nopad_total_token_num = torch.sum(nopad_b_seq_len).item() nopad_b_loc[:, 0 : (nopad_max_len_in_batch - 1)] = self.nopad_b_loc[ indices, (self.nopad_max_len_in_batch - 1) - (nopad_max_len_in_batch - 1) : (self.nopad_max_len_in_batch - 1), ] for i, request_id in enumerate(request_ids): idx = self.requests_idx_mapping[request_id] requests_idx_mapping[request_id] = i requests.append(self.requests[idx]) all_input_ids.append(self.all_input_ids[idx]) input_lengths.append(self.input_lengths[idx]) input_ids = self.input_ids[indices] return InferBatch( batch_id=self.batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, input_lengths=input_lengths, all_input_ids=all_input_ids, nopad_total_token_num=nopad_total_token_num, nopad_max_len_in_batch=nopad_max_len_in_batch, nopad_b_loc=nopad_b_loc, nopad_b_start_loc=nopad_b_start_loc, nopad_b_seq_len=nopad_b_seq_len, out_token_id_counts=[self.out_token_id_counts[_i] for _i in indices], sampling_param_list=[self.sampling_param_list[_i] for _i in indices], cache_manager=self.cache_manager, max_total_len=self.max_total_len, ) @classmethod @torch.no_grad() def merge(cls, batch1, batch2) -> "InferBatch": """ Return megerd new InferBatch """ requests = batch1.requests + batch2.requests requests_idx_mapping = {} new_batch_size = len(batch1) + len(batch2) input_ids = batch1.input_ids.new_empty(new_batch_size) all_input_ids = [] input_lengths = [] out_token_id_counts = [] sampling_param_list = [] cumulative_batch_size = 0 nopad_total_token_num = batch1.nopad_total_token_num + batch2.nopad_total_token_num nopad_max_len_in_batch = max(batch1.nopad_max_len_in_batch, batch2.nopad_max_len_in_batch) max_total_len = max(batch1.max_total_len, batch2.max_total_len) nopad_b_loc = torch.empty((new_batch_size, batch1.max_total_len + 12), dtype=torch.long, device="cuda") nopad_b_start_loc = torch.zeros(new_batch_size, dtype=torch.int32, device="cuda") nopad_b_seq_len = torch.zeros(new_batch_size, dtype=torch.int32, device="cuda") nopad_start_loc_len_temp = 0 batches = [batch1, batch2] for i, batch in enumerate(batches): if i == 0: requests_idx_mapping = batch.requests_idx_mapping else: for k, v in batch.requests_idx_mapping.items(): requests_idx_mapping[k] = v + cumulative_batch_size start_index = cumulative_batch_size end_index = cumulative_batch_size + len(batch) input_ids[start_index:end_index] = batch.input_ids nopad_b_seq_len[start_index:end_index] = batch.nopad_b_seq_len nopad_b_start_loc[start_index:end_index] = batch.nopad_b_start_loc + nopad_start_loc_len_temp nopad_start_loc_len_temp = nopad_b_start_loc[end_index - 1] + nopad_b_seq_len[end_index - 1] nopad_b_loc[ start_index:end_index, nopad_max_len_in_batch - batch.nopad_max_len_in_batch : nopad_max_len_in_batch - 1, ] = batch.nopad_b_loc[:, : batch.nopad_max_len_in_batch - 1] all_input_ids.extend(batch.all_input_ids) input_lengths.extend(batch.input_lengths) out_token_id_counts.extend(batch.out_token_id_counts) sampling_param_list.extend(batch.sampling_param_list) # Update cumulative_batch_size += len(batch) nopad_b_loc[:, nopad_max_len_in_batch - 1] = ( nopad_total_token_num - new_batch_size + torch.arange(0, new_batch_size, dtype=torch.int32, device="cuda") ) return InferBatch( batch_id=batches[0].batch_id, requests=requests, requests_idx_mapping=requests_idx_mapping, input_ids=input_ids, input_lengths=input_lengths, all_input_ids=all_input_ids, nopad_total_token_num=nopad_total_token_num, nopad_max_len_in_batch=nopad_max_len_in_batch, nopad_b_loc=nopad_b_loc, nopad_b_start_loc=nopad_b_start_loc, nopad_b_seq_len=nopad_b_seq_len, out_token_id_counts=out_token_id_counts, sampling_param_list=sampling_param_list, cache_manager=batches[0].cache_manager, max_total_len=max_total_len, ) def __len__(self): return len(self.requests) def get_post_sample_tensors(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: presence_penalties: List[float] = [] frequency_penalties: List[float] = [] temperatures: List[float] = [] top_ps: List[float] = [] top_ks: List[int] = [] p_token_ids: List[int] = [] p_token_counts: List[int] = [] p_seq_len: List[int] = [ 0, ] p_max_len_in_batch: int = 0 for i, id_to_count in enumerate(self.out_token_id_counts): sample_param = self.sampling_param_list[i] presence_penalties.append(sample_param.presence_penalty) frequency_penalties.append(sample_param.frequency_penalty) temperatures.append(sample_param.temperature) top_ps.append(sample_param.top_p) top_ks.append(sample_param.top_k) for token_id, count in id_to_count.items(): p_token_ids.append(token_id) p_token_counts.append(count) p_seq_len.append(len(id_to_count)) p_max_len_in_batch = max(p_max_len_in_batch, len(id_to_count)) presence_penalties = torch.tensor(presence_penalties, dtype=torch.float, device="cuda") frequency_penalties = torch.tensor(frequency_penalties, dtype=torch.float, device="cuda") temperatures = torch.tensor(temperatures, dtype=torch.float, device="cuda") top_ps = torch.tensor(top_ps, dtype=torch.float, device="cuda") top_ks = torch.tensor(top_ks, dtype=torch.int32, device="cuda") p_token_ids = torch.tensor(p_token_ids, dtype=torch.int32, device="cuda") p_token_counts = torch.tensor(p_token_counts, dtype=torch.int32, device="cuda") p_seq_len = torch.tensor(p_seq_len, dtype=torch.int32, device="cuda") p_cumsum_seq_len = torch.cumsum(p_seq_len, dim=0, dtype=torch.int32) return ( presence_penalties, frequency_penalties, temperatures, top_ps, top_ks, p_token_ids, p_token_counts, p_cumsum_seq_len, p_max_len_in_batch, )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/dynamic_batching/io_struct.py
colossalai/legacy/inference/dynamic_batching/io_struct.py
# Adapted from https://github.com/ModelTC/lightllm from typing import Dict, List, Tuple from .sampling_params import SamplingParams class Req: def __init__(self, request_id, prompt_ids, sample_params: SamplingParams, prompts: str = ""): self.request_id = request_id self.prompt_ids = prompt_ids self.input_len = len(prompt_ids) self.max_output_len = sample_params.max_new_tokens self.sample_params = sample_params self.output_ids = [] self.output_metadata_list = [] self.has_generate_finished = False self.aborted = False self.prompts = prompts def to_rpc_obj(self): return { "request_id": self.request_id, "input_id": self.prompt_ids, "output_len": self.max_output_len, "sampling_param": self.sample_params.to_dict(), } def stop_sequences_matched(self): # should we add stpp sequences to the sample params? if self.sample_params.stop_sequences is not None: for stop_token_ids in self.sample_params.stop_sequences: stop_len = len(stop_token_ids) if ( stop_len > 0 and len(self.output_ids) >= stop_len and all(self.output_ids[-(stop_len - i)] == stop_token_ids[i] for i in range(stop_len)) ): return True return False def __repr__(self): return f"request_id(n={self.request_id}, " f"prompt_ids={self.prompt_ids}, " class Batch: def __init__(self, batch_id, reqs: List[Req]): self.batch_id = batch_id self.reqs = reqs self.id_to_reqs = {req.request_id: req for req in reqs} def input_tokens(self): batch_input_tokens = 0 for req in self.reqs: batch_input_tokens += req.input_len return batch_input_tokens def calcu_max_tokens(self): tokens = 0 for req in self.reqs: tokens += req.input_len + req.max_output_len return tokens def calcu_used_tokens(self): tokens = 0 for req in self.reqs: tokens += req.input_len + len(req.output_ids) return tokens def mark_finished_req(self, eos_id, engine_max_output_len): has_new_finish = False for req in self.reqs: if req.stop_sequences_matched(): req.has_generate_finished = True has_new_finish = True if len(req.output_ids) >= engine_max_output_len: req.has_generate_finished = True has_new_finish = True if req.output_ids[-1] == eos_id and req.sample_params.ignore_eos == False: req.has_generate_finished = True has_new_finish = True if len(req.output_ids) >= req.max_output_len or req.aborted: req.has_generate_finished = True has_new_finish = True return has_new_finish def filter_finished(self) -> List[Req]: """ Filter finished requests from the batch, the finished ones will be removed from 'reqs'. """ # TODO: the logic of return should be defined here. unfinished_req = [] finished_req = [] for req in self.reqs: if not req.has_generate_finished: unfinished_req.append(req) else: finished_req.append(req) self.reqs = unfinished_req self.id_to_reqs = {req.request_id: req for req in self.reqs} return finished_req def is_clear(self): return len(self.reqs) == 0 def merge(self, mini_batch): for _req in mini_batch.reqs: self.reqs.append(_req) self.id_to_reqs = {req.request_id: req for req in self.reqs} return def __repr__(self): return f"batch_id={self.batch_id}, " f"reqs={self.reqs}, " def __len__(self): return len(self.reqs) class BatchTokenIdOut: def __init__(self): self.reqs_infs: List[Tuple[str, int, Dict, bool, bool]] = ( [] ) # [req_id, new_token_id, gen_metadata, finished_state, abort_state] class BatchStrOut: def __init__(self): self.reqs_infs: List[Tuple[str, str, Dict, bool, bool]] = ( [] ) # [req_id, token_str, gen_metadata, finished_state, abort_state] class AbortReq: def __init__(self, req_id): self.req_id = req_id class RequestOutput: """The output data of a request to the LLM. Args: request_id: The unique ID of the request. prompt: The prompt string of the request. prompt_token_ids: The token IDs of the prompt. outputs: The output sequences of the request. """ def __init__( self, request_id: str, prompt: str, prompt_token_ids: List[int], outputs, ) -> None: self.request_id = request_id self.prompt = prompt self.prompt_token_ids = prompt_token_ids self.outputs = outputs def __repr__(self) -> str: return ( f"RequestOutput(request_id={self.request_id}, " f"prompt={self.prompt!r}, " f"prompt_token_ids={self.prompt_token_ids}, " f"outputs={self.outputs}, " )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/dynamic_batching/stats.py
colossalai/legacy/inference/dynamic_batching/stats.py
# Adapted from https://github.com/ModelTC/lightllm import time class Stats: def __init__(self, log_status, log_stats_interval) -> None: self.log_stats = log_status self.log_stats_interval = log_stats_interval self.last_log_time = time.time() self.all_tokens = 0 self.output_tokens = 0 self.prompt_tokens = 0 return def count_prompt_tokens(self, run_batch): if self.log_stats: tokens = run_batch.input_tokens() self.prompt_tokens += tokens self.all_tokens += tokens return def count_output_tokens(self, run_batch): if self.log_stats: tokens = len(run_batch.reqs) self.output_tokens += tokens self.all_tokens += tokens return def print_stats(self): if not self.log_stats: return now = time.time() if now - self.last_log_time > self.log_stats_interval: print( f"Avg tokens(prompt+generate) throughput: {self.all_tokens/(now-self.last_log_time):8.3f} tokens/s\n" f"Avg prompt tokens throughput: {self.prompt_tokens/(now-self.last_log_time):8.3f} tokens/s\n" f"Avg generate tokens throughput: {self.output_tokens/(now-self.last_log_time):8.3f} tokens/s" ) self.all_tokens = 0 self.output_tokens = 0 self.prompt_tokens = 0 self.last_log_time = now return
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/dynamic_batching/sampling_params.py
colossalai/legacy/inference/dynamic_batching/sampling_params.py
# Adapted from https://github.com/ModelTC/lightllm """Sampling parameters for text generation.""" from typing import List, Optional, Union _SAMPLING_EPS = 1e-5 class SamplingParams: def __init__( self, do_sample: bool = False, presence_penalty: float = 0.0, frequency_penalty: float = 0.0, temperature: float = 1.0, top_p: float = 1.0, top_k: int = -1, # -1 is for all ignore_eos: bool = False, max_new_tokens: int = 256, stop_sequences: Optional[Union[str, List[str]]] = None, # conditions to stop generation ) -> None: self.do_sample = do_sample self.presence_penalty = presence_penalty self.frequency_penalty = frequency_penalty self.temperature = temperature self.top_p = top_p self.top_k = top_k self.ignore_eos = ignore_eos self.max_new_tokens = max_new_tokens self.stop_sequences = stop_sequences if self.do_sample == False: self.temperature = 1.0 self.top_p = 1.0 self.top_k = 1 if ( self.temperature >= 0.0 and self.temperature < _SAMPLING_EPS ): # temperature is too slow, change to greedy search self.temperature = 1.0 self.top_k = 1 return def verify(self): if self.presence_penalty < 0.0: raise ValueError(f"presence_penalty must >= 0.0, got {self.presence_penalty}") if self.frequency_penalty < 0.0: raise ValueError(f"frequency_penalty must >= 0.0, got {self.frequency_penalty}") if self.temperature <= 0.0: raise ValueError(f"temperature must > 0.0, got {self.temperature}") if self.top_p <= 0.0 or self.top_p > 1.0: raise ValueError(f"top_p must in (0.0, 1.0], got {self.top_p}") if self.top_k < -1 or self.top_k == 0: raise ValueError(f"top_k must be -1 (disable), or at least 1, got {self.top_k}.") if self.max_new_tokens < 1: raise ValueError(f"max_new_tokens must be at least 1 , got {self.max_new_tokens}.") return def stop_sentences_to_token_ids(self, tokenizer): if self.stop_sequences is None: self.stop_sequences = [] else: if isinstance(self.stop_sequences, str): self.stop_sequences = [self.stop_sequences] new_stop_sequences = [] for stop_str in self.stop_sequences: stop_str_ids = tokenizer.encode(stop_str) if stop_str_ids is not None and len(stop_str_ids) >= 1: # remove bos_token_id stop_str_ids = stop_str_ids[1:] if len(stop_str_ids) > 0: new_stop_sequences.append(stop_str_ids) self.stop_sequences = new_stop_sequences return def to_dict(self): ret = {} ret["do_sample"] = self.do_sample ret["presence_penalty"] = self.presence_penalty ret["frequency_penalty"] = self.frequency_penalty ret["temperature"] = self.temperature ret["top_p"] = self.top_p ret["top_k"] = self.top_k # if self.ignore_eos is not None: # ret["ignore_eos"] = self.ignore_eos return ret
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/dynamic_batching/req_queue.py
colossalai/legacy/inference/dynamic_batching/req_queue.py
# Adapted from https://github.com/ModelTC/lightllm import uuid from typing import List import numpy as np from .io_struct import Batch, Req class ReqQueue: def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size, waiting_req_list=[]) -> None: self.max_total_tokens = max_total_tokens assert batch_max_tokens is not None self.batch_max_tokens = batch_max_tokens self.running_max_req_size = running_max_req_size self.waiting_req_list: List[Req] = waiting_req_list def append(self, req): self.waiting_req_list.append(req) return def _init_cache_list(self, current_batch: Batch): if current_batch is not None: self.cache_len_list = [ (req.input_len + len(req.output_ids), req.max_output_len - len(req.output_ids) - 1) for req in current_batch.reqs ] else: self.cache_len_list = [] # @calculate_time(show=True, min_cost_ms=0.1) def _can_add_new_req(self, req): self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis self.cache_len_list.sort(key=lambda x: -x[1]) left_out_len_array = np.array([e[1] for e in self.cache_len_list]) # assert left_out_len_array.min() >= 0 has_run_len_array = np.array([e[0] for e in self.cache_len_list]) cum_run_len_array = np.cumsum(has_run_len_array) size_array = np.arange(1, len(self.cache_len_list) + 1, 1) need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max() # NOTE: change here < to <= return need_max_token_num <= self.max_total_tokens and len(self.cache_len_list) <= self.running_max_req_size def generate_new_batch(self, current_batch: Batch = None): if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size: return None self._init_cache_list(current_batch) can_run_list = [] new_batch_total_tokens = 0 aborted_count = 0 for req in self.waiting_req_list: flag = self._can_add_new_req(req) if req.aborted: aborted_count += 1 continue if flag and new_batch_total_tokens + req.input_len <= self.batch_max_tokens: can_run_list.append(req) new_batch_total_tokens += req.input_len else: break if len(can_run_list) != 0: new_batch = Batch(uuid.uuid4().hex, can_run_list) self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count :] return new_batch else: return None def __len__(self): return self.waiting_req_list.__len__()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/serving/torch_serve/Colossal_Inference_Handler.py
colossalai/legacy/inference/serving/torch_serve/Colossal_Inference_Handler.py
import logging import os import zipfile from abc import ABC import torch import transformers from transformers import AutoTokenizer, BloomForCausalLM, BloomTokenizerFast, LlamaForCausalLM from ts.torch_handler.base_handler import BaseHandler import colossalai from colossalai.inference.tensor_parallel.engine import TPInferEngine from colossalai.shardformer import ShardConfig from colossalai.testing import free_port logger = logging.getLogger(__name__) logger.info("Transformers version %s", transformers.__version__) logger.info("ColossalAI version %s", colossalai.__version__) class ColossalInferenceHandler(BaseHandler, ABC): """ Transformers handler class for testing """ def __init__(self): super(ColossalInferenceHandler, self).__init__() self.infer_engine = None self.max_batch_size = None self.max_input_len = None self.max_output_len = None self.tokenizer = None self.initialized = False def initialize(self, ctx): """Expected behaviour: the sharded Bloom/Llama model is loaded. Args: ctx (context): It is a JSON Object containing information pertaining to the model artefacts parameters. """ if ctx is not None or not hasattr(ctx, "model_yaml_config"): logger.error("Context ctx and model-config are not appropriately passed in.") self.manifest = ctx.manifest gpu_id = ctx.system_properties.get("gpu_id", -1) model_dir = ctx.system_properties.get("model_dir") # Inference configs are collected together in model yaml config for handler use inference_config = ctx.model_yaml_config["handler"] self.inference_config = inference_config logger.info(self.inference_config) self.tp_size = self.inference_config.get("tp_size", 1) self.max_batch_size = self.inference_config.get("max_batch_size", 4) self.max_input_len = self.inference_config.get("max_input_len", 1024) self.max_output_len = self.inference_config.get("max_output_len", 128) self.device = torch.device("cuda:" + str(gpu_id) if torch.cuda.is_available() and gpu_id >= 0 else "cpu") logger.info(f"Device set to {self.device}") logger.info(f"torch.cuda.device_count() {torch.cuda.device_count()}") # Unpacking from model_dir model_dir_path = os.path.join(model_dir, "model") with zipfile.ZipFile(model_dir + "/model.zip", "r") as zip_ref: zip_ref.extractall(model_dir_path) logger.info(f"Loading {self.inference_config['model_type']} pretrain model and tokenizer") if self.inference_config["model_type"] == "bloom": self.model = BloomForCausalLM.from_pretrained( model_dir_path, ) self.tokenizer = BloomTokenizerFast.from_pretrained(model_dir_path, return_tensors="pt") elif self.inference_config["model_type"] == "llama": self.model = LlamaForCausalLM.from_pretrained( model_dir_path, ) self.tokenizer = AutoTokenizer.from_pretrained(model_dir_path, return_tensors="pt") else: logger.warning(f"Model type {self.inference_config['model_type']} not supported yet.") logger.info("Transformer model from path %s loaded successfully", model_dir) # NOTE world_size, rank, host, port here are used to launch colossalai dist environment # This world_size is different from the world size of TorchServe world_size = int(os.getenv("WORLD_SIZE", self.tp_size)) assert world_size == 1, "Colossal-Inference with tensor parallel is not supported on TorchServe for now" rank = int(os.getenv("RANK", gpu_id)) local_rank = int(os.getenv("LOCAL_RANK", gpu_id)) host = os.getenv("MASTER_ADDR", "localhost") port = os.getenv("MASTER_PORT", free_port()) # use a random free port logger.info( f" world_size {world_size}" f" local_rank {local_rank}" f" rank {rank}" f" host {host}" f" port {port}" ) torch.cuda.set_device(self.device) self.model.half() self.model.cuda() self.model.eval() colossalai.launch(rank=rank, world_size=world_size, host=host, port=port, backend="nccl") logger.info("Initializing TPInferEngine ...") shard_config = ShardConfig( enable_tensor_parallelism=True if self.tp_size > 1 else False, extra_kwargs={"inference_only": True} ) self.infer_engine = TPInferEngine( self.model, shard_config, self.max_batch_size, self.max_input_len, self.max_output_len ) logger.info("TPInferEngine initialized successfully") self.model = self.infer_engine.model self.initialized = True def preprocess(self, requests): """Basic text preprocessing, based on the user's chocie of application mode. Args: requests: The Input data in the form of text is passed on to the preprocess function. Returns: list : The preprocess function returns a list of Tensor for the size of the word tokens. """ logger.info("Pre-processing requests") input_ids_batch = None attention_mask_batch = None for idx, data in enumerate(requests): input_text = data.get("data") if input_text is None: input_text = data.get("body") if isinstance(input_text, (bytes, bytearray)): input_text = input_text.decode("utf-8") logger.info("Received text: '%s'", input_text) inputs = self.tokenizer.encode_plus( input_text, max_length=self.max_input_len, padding=True, add_special_tokens=True, return_tensors="pt", truncation=True, ) input_ids = inputs["input_ids"].to(self.device) attention_mask = inputs["attention_mask"].to(self.device) # making a batch out of the recieved requests # attention masks are passed for cases where input tokens are padded. if input_ids.shape is not None: if input_ids_batch is None: input_ids_batch = input_ids attention_mask_batch = attention_mask else: input_ids_batch = torch.cat((input_ids_batch, input_ids), 0) attention_mask_batch = torch.cat((attention_mask_batch, attention_mask), 0) return (input_ids_batch, attention_mask_batch) def inference(self, input_batch): """Predict the class (or classes) of the received text using the serialized transformers checkpoint. Args: input_batch (list): List of Text Tensors from the pre-process function is passed here Returns: list : It returns a list of the predicted value for the input text """ input_ids_batch, attention_mask_batch = input_batch inferences = [] do_sample = self.inference_config.get("do_sample", True) top_p = self.inference_config.get("top_p", 0.95 if do_sample else 1.0) top_k = self.inference_config.get("top_k", 60 if do_sample else 50) input_ids_batch = input_ids_batch.to(self.device) outputs = self.infer_engine.generate( dict(input_ids=input_ids_batch, attention_mask=attention_mask_batch), do_sample=do_sample, top_p=top_p, top_k=top_k, ) for i, _ in enumerate(outputs): inferences.append(self.tokenizer.decode(outputs[i], skip_special_tokens=True)) # For testing only logger.info( f"Generated text: {inferences}", ) return inferences def postprocess(self, inference_output): """Post Process Function converts the predicted response into Torchserve readable format. Args: inference_output (list): It contains the predicted response of the input text. Returns: (list): Returns a list of the Predictions and Explanations. """ return inference_output
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/serving/ray_serve/Colossal_Inference_rayserve.py
colossalai/legacy/inference/serving/ray_serve/Colossal_Inference_rayserve.py
import logging import os from typing import Any, List, Union import ray import ray.util.collective as collective import starlette import torch from pydantic import BaseModel from ray import serve from ray.serve import Application from transformers import AutoModelForCausalLM, AutoTokenizer import colossalai from colossalai.inference.tensor_parallel.engine import TPInferEngine from colossalai.shardformer import ShardConfig from colossalai.testing import free_port ray_serve_logger = logging.getLogger("ray.serve") class GenConfigArgs(BaseModel): """Config for generation""" path: str tp_size: int = 2 max_batch_size: int = 4 max_input_len: int = 128 max_output_len: int = 32 def log_cuda_info(scope_name: str): ray_serve_logger.info(f" {scope_name}: ray.get_gpu_ids(): {ray.get_gpu_ids()}") ray_serve_logger.info( f" {scope_name}: CUDA_VISIBLE_DEVICES: {os.getenv('CUDA_VISIBLE_DEVICES', 'NO DEVICES FOUND!')}" ) if torch.cuda.is_available(): ray_serve_logger.info( f" {scope_name}: cuda current_device: {torch.cuda.current_device()}, cuda device count: {torch.cuda.device_count()}" ) else: ray_serve_logger.info(f" {scope_name}: cuda is not available!") @ray.remote(num_gpus=1) class Worker: def __init__(self, model_path: str, tp_size: int, max_batch_size: int, max_input_len: int, max_output_len: int): log_cuda_info("Worker.init") self.tp_size = tp_size self.model_path = model_path self.max_batch_size = max_batch_size self.max_input_len = max_input_len self.max_output_len = max_output_len def setup(self, world_size, rank, port): # initialize a ray collective group, otherwise colossalai distributed env won't be built successfully collective.init_collective_group(world_size, rank, "nccl", "default") # initialize and set distributed environment colossalai.launch(rank=rank, world_size=world_size, host="localhost", port=port, backend="nccl") ray_serve_logger.info(f"Worker with rank {rank} (world size {world_size}) setting up..") log_cuda_info("Worker.setup") # Load model self.tokenizer = AutoTokenizer.from_pretrained(self.model_path) if self.tokenizer.pad_token is None: self.tokenizer.pad_token = self.tokenizer.eos_token self.model = AutoModelForCausalLM.from_pretrained( self.model_path, pad_token_id=self.tokenizer.pad_token_id, torch_dtype=torch.float16 ) shard_config = ShardConfig( enable_tensor_parallelism=True if world_size > 1 else False, extra_kwargs={"inference_only": True} ) self.infer_engine = TPInferEngine( self.model, shard_config, self.max_batch_size, self.max_input_len, self.max_output_len ) self.generate_kwargs = dict(max_new_tokens=self.max_output_len, do_sample=False) return True def generate(self, text: Union[str, List[str]]) -> str: input_tokens = self.tokenizer.batch_encode_plus(text, return_tensors="pt", padding=True) ray_serve_logger.info(f"text: {text},\ninput_tokens: {input_tokens}") model_output = self.infer_engine.generate(input_tokens, **self.generate_kwargs) ray_serve_logger.info(f"model_output.shape: {model_output.shape}") text_output = [] for i in range(len(model_output)): text_output.append(self.tokenizer.decode(model_output[i])) ray_serve_logger.info(f"output: {text_output}") return text_output @serve.deployment( ray_actor_options={"num_cpus": 1, "num_gpus": 0}, max_concurrent_queries=5, autoscaling_config={ "target_num_ongoing_requests_per_replica": 1, "min_replicas": 1, "initial_replicas": 1, "max_replicas": 1, }, ) class Driver: def __init__(self, config: GenConfigArgs): log_cuda_info("Driver:init") model_path = config.path tp_size = config.tp_size self.num_workers = tp_size self.workers = [] init_rets = [] # Just grab a free port on localhost # NOTE workers in this communication group listen to the same port available_port = free_port() for i in range(self.num_workers): worker_name = "worker_idx_{}".format(i) w = Worker.options(name=worker_name).remote( model_path, self.num_workers, config.max_batch_size, config.max_input_len, config.max_output_len ) self.workers.append(w) init_rets.append(w.setup.remote(self.num_workers, i, available_port)) _options = { "group_name": "default_driver", "world_size": self.num_workers, "ranks": [i for i in range(self.num_workers)], "backend": "nccl", } collective.create_collective_group(self.workers, **_options) _ = ray.get(init_rets) # set batch wait delay in seconds and maximum number of sequences in a batch @serve.batch(batch_wait_timeout_s=0.8, max_batch_size=4) async def batch_generate(self, requests: List[str]): ray_serve_logger.info(f"Driver.batch_generate: requests length: {len(requests)}\n requests: {requests}") results = ray.get([w.generate.remote(requests) for w in self.workers]) text_res = results[0] # get any one of the copies return text_res async def __call__(self, request: starlette.requests.Request) -> Any: return await self.batch_generate(request.query_params["text"]) def app(args: GenConfigArgs) -> Application: print(args) if args.path is None or not os.path.exists(args.path): raise ValueError("Model path not provided or invalid path!") return Driver.options(name="Colossal-Inference-Driver").bind(config=args)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/serving/ray_serve/send_requests.py
colossalai/legacy/inference/serving/ray_serve/send_requests.py
import ray import requests @ray.remote def send_query(text): resp = requests.get("http://localhost:8000/?text={}".format(text)) return resp.text test_sentences = [ "Introduce some landmarks in Beijing", "What is the weather today", "Coding requires practice and patience", "Rainy days inspire cozy reading", "Laughter is contagious and heartwarming", "Hiking mountains builds strength and resilience", "Family bonds grow stronger with time", "Science unlocks mysteries of the universe", "Music soothes the soul and ignites passion", "Artistic expression knows no boundaries", ] results = ray.get([send_query.remote(text) for text in test_sentences]) print("Result returned:") for res in results: print(res)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/serving/ray_serve/send_request.py
colossalai/legacy/inference/serving/ray_serve/send_request.py
import ray import requests @ray.remote def send_query(text): resp = requests.get("http://localhost:8000/?text={}".format(text)) return resp.text test_sentence = "Introduce some landmarks in Beijing" result = ray.get(send_query.remote(test_sentence)) print("Result returned:") print(result)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/hybridengine/__init__.py
colossalai/legacy/inference/hybridengine/__init__.py
from .engine import CaiInferEngine __all__ = ["CaiInferEngine"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/hybridengine/engine.py
colossalai/legacy/inference/hybridengine/engine.py
import torch import torch.distributed as dist import torch.nn as nn from transformers.tokenization_utils_base import BatchEncoding from colossalai.cluster import ProcessGroupMesh from colossalai.pipeline.schedule.generate import GenerateSchedule from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer import ShardConfig, ShardFormer from colossalai.shardformer.policies.base_policy import Policy from ..pipeline.microbatch_manager import MicroBatchManager from ..tensor_parallel.kvcache_manager import MemoryManager PP_AXIS, TP_AXIS = 0, 1 _supported_models = [ "LlamaForCausalLM", ] class CaiInferEngine: """ CaiInferEngine is a class that handles the pipeline parallel inference. Args: tp_size (int): the size of tensor parallelism. pp_size (int): the size of pipeline parallelism. model (`nn.Module`): the model not in pipeline style, and will be modified with `ShardFormer`. model_policy (`colossalai.shardformer.policies.base_policy.Policy`): the policy to shardformer model. micro_batch_size (int): the micro batch size. micro_batch_buffer_size (int): the buffer size for micro batch. Normally, it should be the same as the number of pipeline stages. max_batch_size (int): the maximum batch size. max_input_len (int): the maximum input length. max_output_len (int): the maximum output length. Example: ```python from colossalai.inference import InferEngine from colossalai.inference.pipeline.policies import LlamaModelInferPolicy import colossalai from transformers import LlamaForCausalLM, LlamaTokenizer colossalai.launch_from_torch() model = LlamaForCausalLM.from_pretrained("your_path_to_model") tokenizer = LlamaTokenizer.from_pretrained("/home/lczyh/share/models/llama-7b-hf") # assume the model is inferred with 2 pipeline stages inferengine = CaiInferEngine(pp_size=2, model=model, model_policy=LlamaModelInferPolicy()) input = ["Introduce a landmark in China ","Introduce a landmark in China "] data = tokenizer(input, return_tensors='pt') output = inferengine.inference([data.to('cuda').data]) ``` """ def __init__( self, tp_size: int = 1, pp_size: int = 1, dtype: str = "fp16", model: nn.Module = None, model_policy: Policy = None, micro_batch_size: int = 1, micro_batch_buffer_size: int = None, max_batch_size: int = 4, max_input_len: int = 32, max_output_len: int = 32, verbose: bool = False, # TODO: implement early_stopping, and various generation options early_stopping: bool = False, do_sample: bool = False, num_beams: int = 1, ) -> None: assert model.__class__.__name__ in _supported_models, f"Model {model.__class__.__name__} is not supported." assert ( tp_size * pp_size == dist.get_world_size() ), f"TP size({tp_size}) * PP size({pp_size}) should be equal to the global world size ({dist.get_world_size()})" assert model and model_policy, "Model with model_policy should be provided." assert dtype in ["fp16", "fp32", "bf16"], "dtype should be one of 'fp16', 'fp32', 'bf16'" assert max_batch_size <= 64, "Max batch size exceeds the constraint" assert max_input_len + max_output_len <= 4096, "Max length exceeds the constraint" # TODO: support only tensor parallel inference assert pp_size > 1, "Not support only tensor parallel inference." self.pp_size = pp_size self.tp_size = tp_size if dtype == "fp16": self.dtype = torch.float16 model.half() elif dtype == "bf16": self.dtype = torch.bfloat16 model.to(torch.bfloat16) else: self.dtype = torch.float32 # Init pg mesh pg_mesh = ProcessGroupMesh(pp_size, tp_size) stage_manager = None if pp_size > 1: stage_manager = PipelineStageManager(pg_mesh, PP_AXIS, True) self.cache_manager_list = [ self._init_manager(model, max_batch_size, max_input_len, max_output_len) for _ in range(micro_batch_buffer_size or pp_size) ] self.mb_manager = MicroBatchManager( stage_manager.stage, micro_batch_size, micro_batch_buffer_size or pp_size, max_input_len, max_output_len, self.cache_manager_list, ) self.verbose = verbose self.schedule = GenerateSchedule(stage_manager, self.mb_manager, verbose) self.model = self._shardformer(model, model_policy, stage_manager, pg_mesh.get_group_along_axis(TP_AXIS)) def inference(self, input_list): """ Args: input_list (list): a list of input data, each element is a `BatchEncoding` or `dict`. Returns: out (list): a list of output data, each element is a list of token. timestamp (float): the time cost of the inference, only return when verbose is `True`. """ assert isinstance( input_list, (BatchEncoding, dict) ), f"Only accept BatchEncoding or dict as input, but got {input_list.__class__.__name__}." if isinstance(input_list, BatchEncoding): input_list = input_list.data out, timestamp = self.schedule.generate_step(self.model, iter([input_list])) if self.verbose: return out, timestamp else: return out def _shardformer(self, model, model_policy, stage_manager, tp_group): shardconfig = ShardConfig( tensor_parallel_process_group=tp_group, pipeline_stage_manager=stage_manager, enable_tensor_parallelism=False, enable_fused_normalization=False, enable_all_optimization=False, enable_flash_attention=False, enable_jit_fused=False, enable_sequence_parallelism=False, ) shardformer = ShardFormer(shard_config=shardconfig) shard_model, _ = shardformer.optimize(model, model_policy) return shard_model.cuda() def _init_manager(self, model, max_batch_size: int, max_input_len: int, max_output_len: int) -> None: max_total_token_num = max_batch_size * (max_input_len + max_output_len) head_dim = model.config.hidden_size // model.config.num_attention_heads head_num = model.config.num_attention_heads num_hidden_layers = ( model.config.num_hidden_layers if hasattr(model.config, "num_hidden_layers") else model.config.num_layers ) layer_num = num_hidden_layers // self.pp_size cache_manager = MemoryManager(max_total_token_num, self.dtype, head_num, head_dim, layer_num) return cache_manager
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/hybridengine/modeling/llama.py
colossalai/legacy/inference/hybridengine/modeling/llama.py
# This code is adapted from huggingface transformers: https://github.com/huggingface/transformers/blob/v4.34.1/src/transformers/models/llama/modeling_llama.py import math from typing import List, Optional, Tuple import torch from transformers.models.llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaModel from transformers.utils import logging from colossalai.inference.tensor_parallel.batch_infer_state import BatchInferState from colossalai.kernel.triton import llama_context_attn_fwd, token_attention_fwd from colossalai.kernel.triton.token_attention_kernel import Llama2TokenAttentionForwards from colossalai.pipeline.stage_manager import PipelineStageManager from ._utils import copy_kv_to_mem_cache try: from lightllm.models.llama2.triton_kernel.context_flashattention_nopad import ( context_attention_fwd as lightllm_llama2_context_attention_fwd, ) from lightllm.models.llama.triton_kernel.context_flashattention_nopad import ( context_attention_fwd as lightllm_context_attention_fwd, ) from lightllm.models.llama.triton_kernel.rotary_emb import rotary_emb_fwd as llama_rotary_embedding_fwd HAS_LIGHTLLM_KERNEL = True except: print("please install lightllm from source to run inference: https://github.com/ModelTC/lightllm") HAS_LIGHTLLM_KERNEL = False try: from flash_attn import flash_attn_with_kvcache HAS_FLASH_KERNEL = True except: HAS_FLASH_KERNEL = False print("please install flash attentiom from https://github.com/Dao-AILab/flash-attention") def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids): # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed def llama_triton_context_attention( query_states, key_states, value_states, attn_output, infer_state, num_key_value_groups=1 ): if num_key_value_groups == 1: if HAS_LIGHTLLM_KERNEL is False: llama_context_attn_fwd( query_states, key_states, value_states, attn_output, infer_state.start_loc, infer_state.seq_len, # infer_state.cache_manager.past_key_values_length, infer_state.max_len_in_batch, ) else: lightllm_context_attention_fwd( query_states, key_states, value_states, attn_output, infer_state.start_loc, infer_state.seq_len, # infer_state.cache_manager.past_key_values_length, infer_state.max_len_in_batch, ) else: assert HAS_LIGHTLLM_KERNEL is True, "You have to install lightllm kernels to run llama2 model" lightllm_llama2_context_attention_fwd( query_states, key_states, value_states, attn_output, infer_state.start_loc, infer_state.seq_len, # infer_state.cache_manager.past_key_values_length, infer_state.max_len_in_batch, ) def llama_triton_token_attention(query_states, attn_output, infer_state, num_key_value_groups=1): assert HAS_LIGHTLLM_KERNEL is True, "You have to install lightllm kernel to run token attention for llama models" if num_key_value_groups == 1: token_attention_fwd( query_states, infer_state.cache_manager.key_buffer[infer_state.decode_layer_id], infer_state.cache_manager.value_buffer[infer_state.decode_layer_id], attn_output, infer_state.block_loc, infer_state.start_loc, infer_state.seq_len, # infer_state.cache_manager.past_key_values_length, infer_state.max_len_in_batch, ) else: Llama2TokenAttentionForwards.token_attn( query_states, infer_state.cache_manager.key_buffer[infer_state.decode_layer_id], infer_state.cache_manager.value_buffer[infer_state.decode_layer_id], attn_output, infer_state.block_loc, infer_state.start_loc, infer_state.seq_len, # infer_state.cache_manager.past_key_values_length, infer_state.max_len_in_batch, infer_state.other_kv_index, ) class LlamaInferenceForwards: """ This class holds forwards for llama inference. We intend to replace the forward methods for LlamaModel, LlamaDecoderLayer, and LlamaAttention for LlamaForCausalLM. """ @staticmethod def llama_causal_lm_forward( self: LlamaForCausalLM, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, infer_state: BatchInferState = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, ): r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. """ logger = logging.get_logger(__name__) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False # If is first stage and after warmup, go throught lm_head first if stage_manager.is_first_stage() and hidden_states is not None: lm_logits = self.lm_head(hidden_states) return {"logits": lm_logits} # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = LlamaInferenceForwards.llama_model_forward( self.model, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, infer_state=infer_state, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, ) return outputs @staticmethod def llama_model_forward( self: LlamaModel, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, infer_state: BatchInferState = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict use_cache = use_cache if use_cache is not None else self.config.use_cache # retrieve input_ids and inputs_embeds if stage_manager is None or stage_manager.is_first_stage(): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds else: assert stage_manager is not None assert hidden_states is not None, f"hidden_state should not be none in stage {stage_manager.stage}" input_shape = hidden_states.shape[:-1] batch_size, seq_length = input_shape device = hidden_states.device if infer_state.is_context_stage: past_key_values_length = 0 else: past_key_values_length = infer_state.max_len_in_batch - 1 # NOTE: differentiate with prefill stage # block_loc require different value-assigning method for two different stage if use_cache and seq_length != 1: # NOTE assume prefill stage # allocate memory block infer_state.is_context_stage = True # set prefill stage, notify attention layer infer_state.context_mem_index = infer_state.cache_manager.alloc(infer_state.total_token_num) infer_state.init_block_loc( infer_state.block_loc, infer_state.seq_len, seq_length, infer_state.context_mem_index ) else: infer_state.is_context_stage = False alloc_mem = infer_state.cache_manager.alloc_contiguous(batch_size) if alloc_mem is not None: infer_state.decode_is_contiguous = True infer_state.decode_mem_index = alloc_mem[0] infer_state.decode_mem_start = alloc_mem[1] infer_state.decode_mem_end = alloc_mem[2] infer_state.block_loc[:, infer_state.max_len_in_batch - 1] = infer_state.decode_mem_index else: infer_state.decode_is_contiguous = False alloc_mem = infer_state.cache_manager.alloc(batch_size) infer_state.decode_mem_index = alloc_mem infer_state.block_loc[:, infer_state.max_len_in_batch - 1] = infer_state.decode_mem_index if position_ids is None: position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.repeat(batch_size, 1) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() if infer_state.is_context_stage: infer_state.position_cos = torch.index_select(self._cos_cached, 0, position_ids.view(-1)).view( position_ids.view(-1).shape[0], -1 ) infer_state.position_sin = torch.index_select(self._sin_cached, 0, position_ids.view(-1)).view( position_ids.view(-1).shape[0], -1 ) else: seq_len = infer_state.seq_len infer_state.position_cos = torch.index_select(self._cos_cached, 0, seq_len - 1).view(seq_len.shape[0], -1) infer_state.position_sin = torch.index_select(self._sin_cached, 0, seq_len - 1).view(seq_len.shape[0], -1) infer_state.other_kv_index = infer_state.block_loc[0, infer_state.max_len_in_batch - 1].item() # embed positions if attention_mask is None: attention_mask = torch.ones( (batch_size, infer_state.max_len_in_batch), dtype=torch.bool, device=hidden_states.device ) attention_mask = self._prepare_decoder_attention_mask( attention_mask, (batch_size, seq_length), hidden_states, past_key_values_length ) # decoder layers infer_state.decode_layer_id = 0 start_idx, end_idx = stage_index[0], stage_index[1] if past_key_values is None: past_key_values = tuple([None] * (end_idx - start_idx + 1)) for idx, past_key_value in zip(range(start_idx, end_idx), past_key_values): decoder_layer = self.layers[idx] # NOTE: modify here for passing args to decoder layer layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, infer_state=infer_state, ) infer_state.decode_layer_id += 1 hidden_states = layer_outputs[0] if stage_manager.is_last_stage() or stage_manager.num_stages == 1: hidden_states = self.norm(hidden_states) # update indices # infer_state.block_loc[:, infer_state.max_len_in_batch-1] = infer_state.total_token_num + torch.arange(0, batch_size, dtype=torch.int32, device="cuda") infer_state.start_loc += torch.arange(0, batch_size, dtype=torch.int32, device="cuda") infer_state.seq_len += 1 infer_state.max_len_in_batch += 1 # if not return_dict: # return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) # return BaseModelOutputWithPast( # last_hidden_state=hidden_states, # past_key_values=next_cache, # hidden_states=all_hidden_states, # attentions=all_self_attns, # ) return {"hidden_states": hidden_states} @staticmethod def llama_decoder_layer_forward( self: LlamaDecoderLayer, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, infer_state: Optional[BatchInferState] = None, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, infer_state=infer_state, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs @staticmethod def llama_flash_attn_kvcache_forward( self: LlamaAttention, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, infer_state: Optional[BatchInferState] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: assert use_cache is True, "use_cache should be set to True using this llama attention" bsz, q_len, _ = hidden_states.size() # NOTE might think about better way to handle transposed k and v # key_states [bs, seq_len, num_heads, head_dim/embed_size_per_head] # key_states_transposed [bs, num_heads, seq_len, head_dim/embed_size_per_head] query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim) key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim) value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_key_value_heads, self.head_dim) # NOTE might want to revise # need some way to record the length of past key values cache # since we won't return past_key_value_cache right now cos, sin = infer_state.position_cos, infer_state.position_sin llama_rotary_embedding_fwd(query_states.view(-1, self.num_heads, self.head_dim), cos, sin) llama_rotary_embedding_fwd(key_states.view(-1, self.num_key_value_heads, self.head_dim), cos, sin) query_states = query_states.reshape(-1, self.num_heads, self.head_dim) key_states = key_states.reshape(-1, self.num_key_value_heads, self.head_dim) value_states = value_states.reshape(-1, self.num_key_value_heads, self.head_dim) if infer_state.is_context_stage: # first token generation # copy key and value calculated in current step to memory manager copy_kv_to_mem_cache( infer_state.decode_layer_id, key_states, value_states, infer_state.context_mem_index, infer_state.cache_manager, ) attn_output = torch.empty_like(query_states) llama_triton_context_attention( query_states, key_states, value_states, attn_output, infer_state, num_key_value_groups=self.num_key_value_groups, ) else: if infer_state.decode_is_contiguous: # if decode is contiguous, then we copy to key cache and value cache in cache manager directly cache_k = infer_state.cache_manager.key_buffer[infer_state.decode_layer_id][ infer_state.decode_mem_start : infer_state.decode_mem_end, :, : ] cache_v = infer_state.cache_manager.value_buffer[infer_state.decode_layer_id][ infer_state.decode_mem_start : infer_state.decode_mem_end, :, : ] cache_k.copy_(key_states) cache_v.copy_(value_states) else: # if decode is not contiguous, use triton kernel to copy key and value cache # k, v shape: [batch_size, num_heads, head_dim/embed_size_per_head copy_kv_to_mem_cache( infer_state.decode_layer_id, key_states, value_states, infer_state.decode_mem_index, infer_state.cache_manager, ) if HAS_LIGHTLLM_KERNEL: attn_output = torch.empty_like(query_states) llama_triton_token_attention( query_states, attn_output, infer_state, num_key_value_groups=self.num_key_value_groups ) else: self.num_heads // self.num_key_value_heads cache_k = infer_state.cache_manager.key_buffer[infer_state.decode_layer_id] cache_v = infer_state.cache_manager.value_buffer[infer_state.decode_layer_id] query_states = query_states.view(bsz, -1, self.num_heads, self.head_dim) copy_cache_k = cache_k.view(bsz, -1, self.num_key_value_heads, self.head_dim) copy_cache_v = cache_v.view(bsz, -1, self.num_key_value_heads, self.head_dim) attn_output = flash_attn_with_kvcache( q=query_states, k_cache=copy_cache_k, v_cache=copy_cache_v, softmax_scale=1 / math.sqrt(self.head_dim), causal=True, ) attn_output = attn_output.view(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) # return past_key_value as None return attn_output, None, None
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/hybridengine/modeling/__init__.py
colossalai/legacy/inference/hybridengine/modeling/__init__.py
from .llama import LlamaInferenceForwards __all__ = ["LlamaInferenceForwards"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/inference/hybridengine/modeling/_utils.py
colossalai/legacy/inference/hybridengine/modeling/_utils.py
""" Utils for model inference """ import os import torch from colossalai.kernel.triton.copy_kv_cache_dest import copy_kv_cache_to_dest def copy_kv_to_mem_cache(layer_id, key_buffer, value_buffer, context_mem_index, mem_manager): """ This function copies the key and value cache to the memory cache Args: layer_id : id of current layer key_buffer : key cache value_buffer : value cache context_mem_index : index of memory cache in kv cache manager mem_manager : cache manager """ copy_kv_cache_to_dest(key_buffer, context_mem_index, mem_manager.key_buffer[layer_id]) copy_kv_cache_to_dest(value_buffer, context_mem_index, mem_manager.value_buffer[layer_id]) def init_to_get_rotary(self, base=10000, use_elem=False): """ This function initializes the rotary positional embedding, it is compatible for all models and is called in ShardFormer Args: self : Model that holds the rotary positional embedding base : calculation arg use_elem : activated when using chatglm-based models """ self.config.head_dim_ = self.config.hidden_size // self.config.num_attention_heads if not hasattr(self.config, "rope_scaling"): rope_scaling_factor = 1.0 else: rope_scaling_factor = self.config.rope_scaling.factor if self.config.rope_scaling is not None else 1.0 if hasattr(self.config, "max_sequence_length"): max_seq_len = self.config.max_sequence_length elif hasattr(self.config, "max_position_embeddings"): max_seq_len = self.config.max_position_embeddings * rope_scaling_factor else: max_seq_len = 2048 * rope_scaling_factor base = float(base) # NTK ref: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ ntk_alpha = os.environ.get("INFER_NTK_ALPHA", None) if ntk_alpha is not None: ntk_alpha = float(ntk_alpha) assert ntk_alpha >= 1, "NTK alpha must be greater than or equal to 1" if ntk_alpha > 1: print(f"Note: NTK enabled, alpha set to {ntk_alpha}") max_seq_len *= ntk_alpha base = base * (ntk_alpha ** (self.head_dim_ / (self.head_dim_ - 2))) # Base change formula n_elem = self.config.head_dim_ if use_elem: n_elem //= 2 inv_freq = 1.0 / (base ** (torch.arange(0, n_elem, 2, device="cpu", dtype=torch.float32) / n_elem)) t = torch.arange(max_seq_len + 1024 * 64, device="cpu", dtype=torch.float32) / rope_scaling_factor freqs = torch.outer(t, inv_freq) self._cos_cached = torch.cos(freqs).to(torch.float16).cuda() self._sin_cached = torch.sin(freqs).to(torch.float16).cuda()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false