repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/_subclasses/flop_tensor.py
colossalai/_analyzer/_subclasses/flop_tensor.py
# adopted from https://github.com/facebookresearch/fvcore/blob/main/fvcore/nn/jit_handles.py # ideas from https://pastebin.com/AkvAyJBw # and https://dev-discuss.pytorch.org/t/the-ideal-pytorch-flop-counter-with-torch-dispatch/505 import operator from collections import defaultdict from contextlib import contextmanager from enum import Enum, auto from functools import partial, reduce from numbers import Number from typing import Any, Callable, List, Union import torch from packaging import version from torch.utils._pytree import tree_map from .meta_tensor import MetaTensor aten = torch.ops.aten class Phase(Enum): FWD = auto() BWD = auto() def normalize_tuple(x): if not isinstance(x, tuple): return (x,) return x def _format_flops(flop): K = 1e3 M = 1e6 B = 1e9 T = 1e12 if flop < K: return f"{flop:.2f}" elif flop < M: return f"{flop / K:.2f}K" elif flop < B: return f"{flop / M:.2f}M" elif flop < T: return f"{flop / B:.2f}B" else: return f"{flop / T:.2f}T" def flop_count(module: Union[torch.nn.Module, Callable] = None, *args, verbose: bool = False, **kwargs) -> Number: """ Count the number of floating point operations in a model. Ideas from https://pastebin.com/AkvAyJBw. Args: module (torch.nn.Module): A PyTorch model. *args: Input arguments to the model. verbose (bool): If True, print the number of flops for each module. **kwargs: Input keyword arguments to the model. Returns: Number: The total number of floating point operations (FWD + BWD). """ maybe_inplace = ( getattr(module, "inplace", False) or kwargs.get("inplace", False) or getattr(module, "__name__", None) in ("add_", "mul_", "div_", "sub_") ) class DummyModule(torch.nn.Module): def __init__(self, func): super().__init__() self.func = func self.__name__ = func.__name__ def forward(self, *args, **kwargs): return self.func(*args, **kwargs) total_flop_count = {Phase.FWD: 0, Phase.BWD: 0} flop_counts = defaultdict(lambda: defaultdict(int)) parents = ["Global"] module = module if isinstance(module, torch.nn.Module) else DummyModule(module) class FlopTensor(MetaTensor): _tensor: torch.Tensor def __repr__(self): name = "FlopParameter" if getattr(self, "_is_param", False) else "FlopTensor" if self.grad_fn: return f"{name}(..., size={tuple(self.shape)}, device='{self.device}', dtype={self.dtype}, grad_fn={self.grad_fn})" return f"{name}(..., size={tuple(self.shape)}, device='{self.device}', dtype={self.dtype})" @classmethod def __torch_dispatch__(cls, func, types, args=(), kwargs=None): # no_dispatch is only needed if you use enable_python_mode. # It prevents infinite recursion. rs = super().__torch_dispatch__(func, types, args, kwargs) outs = normalize_tuple(rs) if func in flop_mapping: nonlocal flop_counts, total_flop_count flop_count = flop_mapping[func](args, outs) for par in parents: flop_counts[par][func.__name__] += flop_count total_flop_count[cur_phase] += flop_count def wrap(x): if isinstance(x, MetaTensor): x = FlopTensor(x) return x rs = tree_map(wrap, rs) return rs def is_autogradable(x): return isinstance(x, torch.Tensor) and x.is_floating_point() def create_backwards_push(name): class PushState(torch.autograd.Function): @staticmethod def forward(ctx, *args): args = tree_map(lambda x: x.clone() if isinstance(x, torch.Tensor) else x, args) if len(args) == 1: return args[0] return args @staticmethod def backward(ctx, *grad_outs): nonlocal parents parents.append(name) return grad_outs return PushState.apply def create_backwards_pop(name): class PopState(torch.autograd.Function): @staticmethod def forward(ctx, *args): args = tree_map(lambda x: x.clone() if isinstance(x, torch.Tensor) else x, args) if len(args) == 1: return args[0] return args @staticmethod def backward(ctx, *grad_outs): nonlocal parents assert parents[-1] == name parents.pop() return grad_outs return PopState.apply def enter_module(name): def f(module, inputs): nonlocal parents parents.append(name) inputs = normalize_tuple(inputs) out = create_backwards_pop(name)(*inputs) return out return f def exit_module(name): def f(module, inputs, outputs): nonlocal parents assert parents[-1] == name parents.pop() outputs = normalize_tuple(outputs) return create_backwards_push(name)(*outputs) return f @contextmanager def instrument_module(mod): registered = [] for name, module in dict(mod.named_children()).items(): registered.append(module.register_forward_pre_hook(enter_module(name))) registered.append(module.register_forward_hook(exit_module(name))) yield for handle in registered: handle.remove() def display_flops(): for mod in flop_counts.keys(): print(f"Module: ", mod) for k, v in flop_counts[mod].items(): print("\t", k, _format_flops(v)) print() def detach_variables(r): if isinstance(r, torch.Tensor): requires_grad = r.requires_grad r = r.detach() r.requires_grad = requires_grad return r def wrap(r): if isinstance(r, torch.Tensor): data_ptr_fn = getattr(r, "_tensor", r).data_ptr r = FlopTensor(detach_variables(r)) if maybe_inplace: r = r + 0 r._tensor.data_ptr = data_ptr_fn return r with instrument_module(module): cur_phase = Phase.FWD rst = module(*tree_map(wrap, args), **tree_map(wrap, kwargs)) rst = tuple(r for r in normalize_tuple(rst) if is_autogradable(r) and r.requires_grad) cur_phase = Phase.BWD if rst: grad = [torch.zeros_like(t) for t in rst] torch.autograd.backward( rst, grad, ) if verbose: display_flops() return total_flop_count[Phase.FWD], total_flop_count[Phase.BWD] def matmul_flop_jit(inputs: List[Any], outputs: List[Any]) -> Number: """ Count flops for matmul. """ # Inputs should be a list of length 2. # Inputs contains the shapes of two matrices. input_shapes = [v.shape for v in inputs] assert len(input_shapes) == 2, input_shapes # There are three cases: 1) gemm, 2) gemv, 3) dot if all(len(shape) == 2 for shape in input_shapes): # gemm assert input_shapes[0][-1] == input_shapes[1][-2], input_shapes elif all(len(shape) == 1 for shape in input_shapes): # dot assert input_shapes[0][0] == input_shapes[1][0], input_shapes # expand shape input_shapes[0] = torch.Size([1, input_shapes[0][0]]) input_shapes[1] = torch.Size([input_shapes[1][0], 1]) else: # gemv if len(input_shapes[0]) == 1: assert input_shapes[0][0] == input_shapes[1][-2], input_shapes input_shapes.reverse() else: assert input_shapes[1][0] == input_shapes[0][-1], input_shapes # expand the shape of the vector to [batch size, 1] input_shapes[-1] = torch.Size([input_shapes[-1][-1], 1]) flops = reduce(operator.mul, input_shapes[0]) * input_shapes[-1][-1] return flops def addmm_flop_jit(inputs: List[Any], outputs: List[Any]) -> Number: """ Count flops for fully connected layers. """ # Count flop for nn.Linear # inputs is a list of length 3. input_shapes = [v.shape for v in inputs[1:3]] # input_shapes[0]: [batch size, input feature dimension] # input_shapes[1]: [input feature dimension, output feature dimension] assert len(input_shapes[0]) == 2, input_shapes[0] assert len(input_shapes[1]) == 2, input_shapes[1] batch_size, input_dim = input_shapes[0] output_dim = input_shapes[1][1] flops = batch_size * input_dim * output_dim return flops def linear_flop_jit(inputs: List[Any], outputs: List[Any]) -> Number: """ Count flops for the aten::linear operator. """ # Inputs is a list of length 3; unlike aten::addmm, it is the first # two elements that are relevant. input_shapes = [v.shape for v in inputs[0:2]] # input_shapes[0]: [dim0, dim1, ..., input_feature_dim] # input_shapes[1]: [output_feature_dim, input_feature_dim] assert input_shapes[0][-1] == input_shapes[1][-1] flops = reduce(operator.mul, input_shapes[0]) * input_shapes[1][0] return flops def bmm_flop_jit(inputs: List[Any], outputs: List[Any]) -> Number: """ Count flops for the bmm operation. """ # Inputs should be a list of length 2. # Inputs contains the shapes of two tensor. assert len(inputs) == 2, len(inputs) input_shapes = [v.shape for v in inputs] n, c, t = input_shapes[0] d = input_shapes[-1][-1] flops = n * c * t * d return flops def conv_flop_count( x_shape: List[int], w_shape: List[int], out_shape: List[int], transposed: bool = False, ) -> Number: """ Count flops for convolution. Note only multiplication is counted. Computation for addition and bias is ignored. Flops for a transposed convolution are calculated as flops = (x_shape[2:] * prod(w_shape) * batch_size). Args: x_shape (list(int)): The input shape before convolution. w_shape (list(int)): The filter shape. out_shape (list(int)): The output shape after convolution. transposed (bool): is the convolution transposed Returns: int: the number of flops """ batch_size = x_shape[0] conv_shape = (x_shape if transposed else out_shape)[2:] flops = batch_size * reduce(operator.mul, w_shape) * reduce(operator.mul, conv_shape) return flops def conv_flop_jit(inputs: List[Any], outputs: List[Any]): """ Count flops for convolution. """ x, w = inputs[:2] x_shape, w_shape, out_shape = (x.shape, w.shape, outputs[0].shape) transposed = inputs[6] return conv_flop_count(x_shape, w_shape, out_shape, transposed=transposed) def transpose_shape(shape): return [shape[1], shape[0]] + list(shape[2:]) def conv_backward_flop_jit(inputs: List[Any], outputs: List[Any]): grad_out_shape, x_shape, w_shape = [i.shape for i in inputs[:3]] output_mask = inputs[-1] fwd_transposed = inputs[7] flop_count = 0 if output_mask[0]: grad_input_shape = outputs[0].shape flop_count += conv_flop_count(grad_out_shape, w_shape, grad_input_shape, not fwd_transposed) if output_mask[1]: grad_weight_shape = outputs[1].shape flop_count += conv_flop_count(transpose_shape(x_shape), grad_out_shape, grad_weight_shape, fwd_transposed) return flop_count def norm_flop_counter(affine_arg_index: int, input_arg_index: int) -> Callable: """ Args: affine_arg_index: index of the affine argument in inputs """ def norm_flop_jit(inputs: List[Any], outputs: List[Any]) -> Number: """ Count flops for norm layers. """ # Inputs[0] contains the shape of the input. input_shape = inputs[input_arg_index].shape has_affine = ( inputs[affine_arg_index].shape is not None if hasattr(inputs[affine_arg_index], "shape") else inputs[affine_arg_index] ) assert 2 <= len(input_shape) <= 5, input_shape # 5 is just a rough estimate flop = reduce(operator.mul, input_shape) * (5 if has_affine else 4) return flop return norm_flop_jit def batchnorm_flop_jit(inputs: List[Any], outputs: List[Any], training: bool = None) -> Number: if training is None: training = inputs[-3] assert isinstance(training, bool), "Signature of aten::batch_norm has changed!" if training: return norm_flop_counter(1, 0)(inputs, outputs) # pyre-ignore has_affine = inputs[1].shape is not None input_shape = reduce(operator.mul, inputs[0].shape) return input_shape * (2 if has_affine else 1) def ewise_flop_counter(input_scale: float = 1, output_scale: float = 0) -> Callable: """ Count flops by input_tensor.numel() * input_scale + output_tensor.numel() * output_scale Args: input_scale: scale of the input tensor (first argument) output_scale: scale of the output tensor (first element in outputs) """ def ewise_flop(inputs: List[Any], outputs: List[Any]) -> Number: ret = 0 if input_scale != 0: shape = inputs[0].shape ret += input_scale * reduce(operator.mul, shape) if shape else 0 if output_scale != 0: shape = outputs[0].shape ret += output_scale * reduce(operator.mul, shape) if shape else 0 return ret return ewise_flop def zero_flop_jit(*args): """ Count flops for zero flop layers. """ return 0 if version.parse(torch.__version__) >= version.parse("1.12.0"): flop_mapping = { # gemm aten.mm.default: matmul_flop_jit, aten.matmul.default: matmul_flop_jit, aten.addmm.default: addmm_flop_jit, aten.bmm.default: bmm_flop_jit, # convolution aten.convolution.default: conv_flop_jit, aten._convolution.default: conv_flop_jit, aten.convolution_backward.default: conv_backward_flop_jit, # normalization aten.native_batch_norm.default: batchnorm_flop_jit, aten.native_batch_norm_backward.default: batchnorm_flop_jit, aten.cudnn_batch_norm.default: batchnorm_flop_jit, aten.cudnn_batch_norm_backward.default: partial(batchnorm_flop_jit, training=True), aten.native_layer_norm.default: norm_flop_counter(2, 0), aten.native_layer_norm_backward.default: norm_flop_counter(2, 0), # pooling aten.avg_pool1d.default: ewise_flop_counter(1, 0), aten.avg_pool2d.default: ewise_flop_counter(1, 0), aten.avg_pool2d_backward.default: ewise_flop_counter(0, 1), aten.avg_pool3d.default: ewise_flop_counter(1, 0), aten.avg_pool3d_backward.default: ewise_flop_counter(0, 1), aten.max_pool1d.default: ewise_flop_counter(1, 0), aten.max_pool2d.default: ewise_flop_counter(1, 0), aten.max_pool3d.default: ewise_flop_counter(1, 0), aten.max_pool1d_with_indices.default: ewise_flop_counter(1, 0), aten.max_pool2d_with_indices.default: ewise_flop_counter(1, 0), aten.max_pool2d_with_indices_backward.default: ewise_flop_counter(0, 1), aten.max_pool3d_with_indices.default: ewise_flop_counter(1, 0), aten.max_pool3d_with_indices_backward.default: ewise_flop_counter(0, 1), aten._adaptive_avg_pool2d.default: ewise_flop_counter(1, 0), aten._adaptive_avg_pool2d_backward.default: ewise_flop_counter(0, 1), aten._adaptive_avg_pool3d.default: ewise_flop_counter(1, 0), aten._adaptive_avg_pool3d_backward.default: ewise_flop_counter(0, 1), aten.embedding_dense_backward.default: ewise_flop_counter(0, 1), aten.embedding.default: ewise_flop_counter(1, 0), } ewise_flop_aten = [ # basic op aten.add.Tensor, aten.add_.Tensor, aten.div.Tensor, aten.div_.Tensor, aten.div.Scalar, aten.div_.Scalar, aten.mul.Tensor, aten.mul.Scalar, aten.mul_.Tensor, aten.neg.default, aten.pow.Tensor_Scalar, aten.rsub.Scalar, aten.sum.default, aten.sum.dim_IntList, aten.mean.dim, # activation op aten.hardswish.default, aten.hardswish_.default, aten.hardswish_backward.default, aten.hardtanh.default, aten.hardtanh_.default, aten.hardtanh_backward.default, aten.hardsigmoid_backward.default, aten.hardsigmoid.default, aten.gelu.default, aten.gelu_backward.default, aten.silu.default, aten.silu_.default, aten.silu_backward.default, aten.sigmoid.default, aten.sigmoid_backward.default, aten._softmax.default, aten._softmax_backward_data.default, aten.relu_.default, aten.relu.default, aten.tanh.default, aten.tanh_backward.default, aten.threshold_backward.default, # dropout aten.native_dropout.default, aten.native_dropout_backward.default, # distribution aten.bernoulli_.float, # where aten.where.self, ] for op in ewise_flop_aten: flop_mapping[op] = ewise_flop_counter(1, 0) # fix-me: this will be removed in future zero_flop_aten = [ aten.as_strided.default, aten.as_strided_.default, aten.cat.default, aten.clone.default, aten.copy_.default, aten.detach.default, aten.expand.default, aten.empty_like.default, aten.new_empty.default, aten.new_empty_strided.default, aten.ones_like.default, aten._reshape_alias.default, aten.select.int, aten.select_backward.default, aten.squeeze.dim, aten.slice.Tensor, aten.slice_backward.default, aten.split.Tensor, aten.permute.default, aten.t.default, aten.transpose.int, aten._to_copy.default, aten.unsqueeze.default, aten.unbind.int, aten._unsafe_view.default, aten.view.default, aten.zero_.default, aten.zeros_like.default, ] for op in zero_flop_aten: flop_mapping[op] = zero_flop_jit else: flop_mapping = {} elementwise_flop_aten = {} zero_flop_aten = {}
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/node_util.py
colossalai/_analyzer/fx/node_util.py
from dataclasses import dataclass, field from typing import Dict, List, Optional, Tuple, Union import torch from torch.autograd.profiler_util import _format_memory from torch.fx import Node from colossalai._analyzer.envs import MeshConfig def intersect(a, b): return {k: a[k] for k in a if k in b} def subtract(a, b): return {k: a[k] for k in a if k not in b} def union(a, b): return {**a, **b} def compute_size_in_bytes(elem: Union[torch.Tensor, Dict, List, Tuple, int]) -> int: """Compute the size of a tensor or a collection of tensors in bytes. Args: elem (torch.Tensor | Dict | List | Tuple | int): Arbitrary nested ``torch.Tensor`` data structure. Returns: int: The size of the tensor or the collection of tensors in bytes. """ nbytes = 0 if isinstance(elem, torch.Tensor): if elem.is_quantized: nbytes += elem.numel() * torch._empty_affine_quantized([], dtype=elem.dtype).element_size() else: nbytes += elem.numel() * torch.tensor([], dtype=elem.dtype).element_size() elif isinstance(elem, dict): value_list = [v for _, v in elem.items()] nbytes += compute_size_in_bytes(value_list) elif isinstance(elem, tuple) or isinstance(elem, list) or isinstance(elem, set): for e in elem: nbytes += compute_size_in_bytes(e) return nbytes @dataclass class MetaInfo: r""" The base class to store all profiling and static graph analysis information needed for auto-parallel system in Colossal-AI. ============================================================================ ------------------------------- | FX.Node | <----- [input/param] are ---> |[input/param] [grad_inp]| [grad_inp] contributes to the placeholders (might be | | \__________ | | profiled peak memory in backward saved for backward. | | \ | | pass. [grad_param] is calculated | | \ | | separately. | [interm] -------> [grad_int]| <----- | | \_________ | | [grad_interm] marks the peak | / \ \ | | memory in backward pass. [x] is not counted ---> | [x] [interm] --> [grad_int]| <----- in [interm] because | | \_____ | | it is not saved for | | \ | | backward. | [output] \ | | <----- [output] is potentially ------------------------------- [input] for the next node. ============================================================================ Accumulate Size = ALL_PREVIOUS_CTX U {Interm Size + Output Size} Output Size = ([output] in global_ctx and not is_alias) Temp Size = ([output] not in global_ctx and not is_alias) Backward Size = ([grad_inp]) Usage: >>> for node in graph.nodes: >>> n_info = MetaInfo(node) # will create a new MetaInfo instance and store in node.meta['info'] >>> # if not exist, otherwise return the existing one >>> n_info.to_recompute = ... # set the to_recompute attribute Remarks: This feature is experimental and all the entries are subject to change. """ # reference node: Node # directory mod_dir: str = "" # ctx[data_ptr] = Tensor # mark the storage for ctx.save_for_backward global_ctx: Dict[str, torch.Tensor] = field(default_factory=lambda: {}) # globally shared curr_ctx: Dict[str, torch.Tensor] = field(default_factory=lambda: {}) # global_ctx till this node # should be updated after each graph manipulation # ============================== Update ==================================== # parameter and buffer within ``Node`` parameters: Dict[str, torch.nn.Parameter] = field(default_factory=lambda: {}) buffers: Dict[str, torch.Tensor] = field(default_factory=lambda: {}) inputs: Tuple[torch.Tensor] = () outputs: Tuple[torch.Tensor] = () is_alias: Tuple[bool] = () # whether the output is an alias of input # compute cost fwd_flop: Optional[int] = 0 bwd_flop: Optional[int] = 0 # communication cost (should be the size in bytes of communication) fwd_comm: Optional[int] = 0 bwd_comm: Optional[int] = 0 # should keep the same whenever manipulated # ============================= Invariant ================================== activation_checkpoint: Tuple[torch.Tensor] = () # (region_0, region_1, ...) support nested codegen to_offload: Optional[bool] = False sharding_spec: str = "RR" def __new__(cls, node: Node, **kwargs): orig_init = cls.__init__ # if initialized, return the existing one # should disable the __init__ function if node.meta.get("info", None) is not None: def _dummy(self, *args, **kwargs): if getattr(self, "_is_init", False): self._is_init = True orig_init(self, *args, **kwargs) cls.__init__ = orig_init cls.__init__ = _dummy return node.meta["info"] return super().__new__(cls) def __post_init__(self): self.node.meta["info"] = self @property def fwd_time(self, tflops: float = MeshConfig.TFLOPS, bandwidth: float = MeshConfig.BANDWIDTH): return self.fwd_flop / tflops + self.fwd_comm / bandwidth @property def bwd_time(self, tflops: float = MeshConfig.TFLOPS, bandwidth: float = MeshConfig.BANDWIDTH): return self.bwd_flop / tflops + self.bwd_comm / bandwidth @property def param_size(self): return compute_size_in_bytes(self.parameters) @property def buffer_size(self): return compute_size_in_bytes(self.buffers) @property def output_size(self): """Used in CheckpointSolver""" output_ctx = { o.data_ptr(): o for o, is_alias in zip(self.outputs, self.is_alias) if not is_alias and isinstance(o, torch.Tensor) and not isinstance(o, torch.nn.Parameter) } return compute_size_in_bytes(intersect(self.global_ctx, output_ctx)) @property def accumulate_size(self): """Used in CheckpointSolver""" output_ctx = { o.data_ptr(): o for o, is_alias in zip(self.outputs, self.is_alias) if not is_alias and isinstance(o, torch.Tensor) and not isinstance(o, torch.nn.Parameter) } return compute_size_in_bytes(union(self.curr_ctx, intersect(self.global_ctx, output_ctx))) @property def temp_size(self): """Used in CheckpointSolver""" output_ctx = { o.data_ptr(): o for o, is_alias in zip(self.outputs, self.is_alias) if not is_alias and isinstance(o, torch.Tensor) and not isinstance(o, torch.nn.Parameter) } return compute_size_in_bytes(subtract(output_ctx, self.global_ctx)) @property def backward_size(self): """Used in CheckpointSolver""" return compute_size_in_bytes(self.inputs) def __repr__(self): s = f"Node {self.node.name}" if self.parameters: s += f"\n\thas parameter of size {_format_memory(self.param_size)}" if self.buffers: s += f"\n\thas buffer of size {_format_memory(self.buffer_size)}" if self.output_size: s += f"\n\thas output activation of size {_format_memory(self.output_size)}" # if self.total_size: # s += f'\n\thas total activation of size {_format_memory(self.total_size)}' if self.temp_size: s += f"\n\thas temp activation of size {_format_memory(self.temp_size)}" if self.backward_size: s += f"\n\thas backward activation of size {_format_memory(self.backward_size)}" s += ( f"\n\tfwd_flop = {self.fwd_flop}" f"\n\tbwd_flop = {self.bwd_flop}" f"\n\tfwd_comm = {self.fwd_comm}" f"\n\tbwd_comm = {self.bwd_comm}" f"\n\tto_recompute = {self.to_recompute}" f"\n\tto_offload = {self.to_offload}" f"\n\tsharding_spec = {self.sharding_spec}" ) return s
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/codegen.py
colossalai/_analyzer/fx/codegen.py
from typing import Any, Dict, List, Tuple import torch try: from torch.fx.graph import CodeGen except: pass from torch.fx.graph import ( PythonCode, _custom_builtins, _format_target, _is_from_torch, _Namespace, _origin_type_map, _register_custom_builtin, inplace_methods, magic_methods, ) from torch.fx.node import Argument, Node, _get_qualified_name, _type_repr, map_arg import colossalai from colossalai.fx._compatibility import compatibility _register_custom_builtin("colossalai", "import colossalai", colossalai) def _gen_ckpt_fn_def(label, free_vars: List[str]) -> str: """ Generate the checkpoint function definition """ return f"def checkpoint_{label}({', '.join(['self'] + free_vars)}):" def _gen_ckpt_output(output_vars: List[str]) -> str: """ Generate the return statement for checkpoint region """ return f"return {', '.join(output_vars)}" def _gen_ckpt_usage(label, input_vars, output_vars, use_reentrant=True): """ Generate the checkpoint function call code text """ outputs = ", ".join(output_vars) inputs = ", ".join(input_vars) return f"{outputs} = torch.utils.checkpoint.checkpoint(self.checkpoint_{label}, {inputs}, use_reentrant={use_reentrant})" def _end_of_ckpt(node: Node, ckpt_level: int) -> bool: """ Check if the node could end the ckpt region at `ckpt_level` """ if len(node.meta["info"].activation_checkpoint) > ckpt_level: return node.meta["info"].activation_checkpoint[ckpt_level] is not None return True def _find_input_and_output_nodes(nodes: List[Node]): """ Find the input and output node names which are not found in the given list of nodes. """ input_nodes = [] output_nodes = [] # if a node has an input node which is not in the node list # we treat that input node as the input of the checkpoint function for node in nodes: for input_node in node._input_nodes.keys(): node_repr = repr(input_node) if input_node not in nodes and node_repr not in input_nodes: input_nodes.append(node_repr) # if a node has a user node which is not in the node list # we treat that user node as the node receiving the current node output for node in nodes: for output_node in node.users.keys(): node_repr = repr(node) if output_node not in nodes and node_repr not in output_nodes: output_nodes.append(node_repr) return input_nodes, output_nodes def _find_nested_ckpt_regions(node_list: List[Node], ckpt_level: int = 0): """ Find the nested checkpoint regions given a list of consecutive nodes. The outputs will be list of tuples, each tuple is in the form of (start_index, end_index). """ ckpt_regions = [] start = -1 end = -1 current_region = None for idx, node in enumerate(node_list): if len(node.meta["info"].activation_checkpoint) > ckpt_level: act_ckpt_label = node.meta["info"].activation_checkpoint[ckpt_level] # this activation checkpoint label is not set yet # meaning this is the first node of the activation ckpt region if current_region is None: current_region = act_ckpt_label start = idx # if activation checkpoint has changed # we restart the tracking # e.g. node ckpt states = [ckpt1, ckpt2, ckpt2, ckpt2] if act_ckpt_label != current_region: assert start != -1 ckpt_regions.append((start, idx - 1)) current_region = act_ckpt_label start = idx end = -1 elif current_region is not None and _end_of_ckpt(node, ckpt_level): # used to check the case below # node ckpt states = [ckpt, ckpt, non-ckpt] end = idx - 1 assert start != -1 and end != -1 ckpt_regions.append((start, end)) start = end = -1 current_region = None else: pass if current_region is not None: end = len(node_list) - 1 ckpt_regions.append((start, end)) return ckpt_regions def emit_ckpt_func( body, ckpt_func, node_list: List[Node], emit_node_func, delete_unused_value_func, ckpt_level=0, in_ckpt=False ): """Emit ckpt function in nested way Args: body: forward code - in recursive calls, this part will be checkpoint functions code ckpt_func: checkpoint functions code - in recursive calls, this part will be a buffer node_list (List[Node]): list of torch.fx.Node emit_node_func: function to emit a node delete_unused_value_func: function to delete unused value level (int, optional): checkpoint level. Defaults to 0. in_ckpt (bool, optional): indicates wether the func is in recursive call. Defaults to False. """ inputs, outputs = _find_input_and_output_nodes(node_list) # label given by each layer, e.g. if you are currently at level (0, 1, 1) # the label will be '0_1_1' label = "_".join([str(idx) for idx in node_list[0].meta["info"].activation_checkpoint[: ckpt_level + 1]]) ckpt_fn_def = _gen_ckpt_fn_def(label, inputs) ckpt_func.append(f"{ckpt_fn_def}\n") # if there is more level to fetch if ckpt_level + 1 < max(map(lambda node: len(node.meta["info"].activation_checkpoint), node_list)): ckpt_regions = _find_nested_ckpt_regions(node_list, ckpt_level + 1) start_idx = [item[0] for item in ckpt_regions] end_idx = [item[1] for item in ckpt_regions] # use ckpt_func_buffer to store nested checkpoint functions ckpt_func_buffer = [] node_idx = 0 while 1: if node_idx >= len(node_list): break if node_idx in start_idx: ckpt_node_list = node_list[node_idx : end_idx[start_idx.index(node_idx)] + 1] emit_ckpt_func( ckpt_func, ckpt_func_buffer, ckpt_node_list, emit_node_func, delete_unused_value_func, ckpt_level + 1, True, ) node_idx += len(ckpt_node_list) else: node = node_list[node_idx] emit_node_func(node, ckpt_func) ckpt_func[-1] = " " + ckpt_func[-1] delete_unused_value_func(node, ckpt_func) node_idx += 1 ckpt_func.append(" " + _gen_ckpt_output(outputs) + "\n\n") ckpt_func += ckpt_func_buffer # last level else: for node in node_list: emit_node_func(node, ckpt_func) ckpt_func[-1] = " " + ckpt_func[-1] delete_unused_value_func(node, ckpt_func) ckpt_func.append(" " + _gen_ckpt_output(outputs) + "\n\n") usage = _gen_ckpt_usage(label, inputs, outputs, False) + "\n" if in_ckpt: usage = " " + usage body.append(usage) def emit_code_with_activation_checkpoint(body, ckpt_func, nodes, emit_node_func, delete_unused_value_func): """Emit code with nested activation checkpoint When we detect some of the annotation is a , we will use this function to emit the activation checkpoint codes. Args: body: forward code ckpt_func: checkpoint functions code nodes: graph.nodes emit_node_func: function to emit node delete_unused_value_func: function to remove the unused value """ ckpt_regions = _find_nested_ckpt_regions(nodes, 0) start_idx = [item[0] for item in ckpt_regions] end_idx = [item[1] for item in ckpt_regions] node_list = list(nodes) node_idx = 0 while 1: # break if we finish the processing all the nodes if node_idx >= len(node_list): break # process ckpt_regions if node_idx in start_idx: ckpt_node_list = node_list[node_idx : end_idx[start_idx.index(node_idx)] + 1] emit_ckpt_func(body, ckpt_func, ckpt_node_list, emit_node_func, delete_unused_value_func) node_idx += len(ckpt_node_list) # process node in forward function else: node = node_list[node_idx] emit_node_func(node, body) delete_unused_value_func(node, body) node_idx += 1 @compatibility(is_backward_compatible=True) class ActivationCheckpointCodeGen(CodeGen): def _gen_python_code(self, nodes, root_module: str, namespace: _Namespace, verbose=None) -> PythonCode: free_vars: List[str] = [] body: List[str] = [] globals_: Dict[str, Any] = {} wrapped_fns: Dict[str, None] = {} # Wrap string in list to pass by reference maybe_return_annotation: List[str] = [""] def add_global(name_hint: str, obj: Any): """Add an obj to be tracked as a global. We call this for names that reference objects external to the Graph, like functions or types. Returns: the global name that should be used to reference 'obj' in generated source. """ if _is_from_torch(obj) and obj != torch.device: # to support registering torch.device # HACK: workaround for how torch custom ops are registered. We # can't import them like normal modules so they must retain their # fully qualified name. return _get_qualified_name(obj) # normalize the name hint to get a proper identifier global_name = namespace.create_name(name_hint, obj) if global_name in globals_: assert globals_[global_name] is obj return global_name globals_[global_name] = obj return global_name # Pre-fill the globals table with registered builtins. for name, (_, obj) in _custom_builtins.items(): add_global(name, obj) def type_repr(o: Any): if o == (): # Empty tuple is used for empty tuple type annotation Tuple[()] return "()" typename = _type_repr(o) if hasattr(o, "__origin__"): # This is a generic type, e.g. typing.List[torch.Tensor] origin_type = _origin_type_map.get(o.__origin__, o.__origin__) origin_typename = add_global(_type_repr(origin_type), origin_type) if hasattr(o, "__args__"): # Assign global names for each of the inner type variables. args = [type_repr(arg) for arg in o.__args__] if len(args) == 0: # Bare type, such as `typing.Tuple` with no subscript # This code-path used in Python < 3.9 return origin_typename return f'{origin_typename}[{",".join(args)}]' else: # Bare type, such as `typing.Tuple` with no subscript # This code-path used in Python 3.9+ return origin_typename # Common case: this is a regular module name like 'foo.bar.baz' return add_global(typename, o) def _format_args(args: Tuple[Argument, ...], kwargs: Dict[str, Argument]) -> str: def _get_repr(arg): # Handle NamedTuples (if it has `_fields`) via add_global. if isinstance(arg, tuple) and hasattr(arg, "_fields"): qualified_name = _get_qualified_name(type(arg)) global_name = add_global(qualified_name, type(arg)) return f"{global_name}{repr(tuple(arg))}" return repr(arg) args_s = ", ".join(_get_repr(a) for a in args) kwargs_s = ", ".join(f"{k} = {_get_repr(v)}" for k, v in kwargs.items()) if args_s and kwargs_s: return f"{args_s}, {kwargs_s}" return args_s or kwargs_s # Run through reverse nodes and record the first instance of a use # of a given node. This represents the *last* use of the node in the # execution order of the program, which we will use to free unused # values node_to_last_use: Dict[Node, Node] = {} user_to_last_uses: Dict[Node, List[Node]] = {} def register_last_uses(n: Node, user: Node): if n not in node_to_last_use: node_to_last_use[n] = user user_to_last_uses.setdefault(user, []).append(n) for node in reversed(nodes): map_arg(node.args, lambda n: register_last_uses(n, node)) map_arg(node.kwargs, lambda n: register_last_uses(n, node)) # NOTE: we add a variable to distinguish body and ckpt_func def delete_unused_values(user: Node, body): """ Delete values after their last use. This ensures that values that are not used in the remainder of the code are freed and the memory usage of the code is optimal. """ if user.op == "placeholder": return if user.op == "output": body.append("\n") return nodes_to_delete = user_to_last_uses.get(user, []) if len(nodes_to_delete): to_delete_str = " = ".join([repr(n) for n in nodes_to_delete] + ["None"]) body.append(f"; {to_delete_str}\n") else: body.append("\n") # NOTE: we add a variable to distinguish body and ckpt_func def emit_node(node: Node, body): maybe_type_annotation = "" if node.type is None else f" : {type_repr(node.type)}" if node.op == "placeholder": assert isinstance(node.target, str) maybe_default_arg = "" if not node.args else f" = {repr(node.args[0])}" free_vars.append(f"{node.target}{maybe_type_annotation}{maybe_default_arg}") raw_name = node.target.replace("*", "") if raw_name != repr(node): body.append(f"{repr(node)} = {raw_name}\n") return elif node.op == "call_method": assert isinstance(node.target, str) body.append( f"{repr(node)}{maybe_type_annotation} = {_format_target(repr(node.args[0]), node.target)}" f"({_format_args(node.args[1:], node.kwargs)})" ) return elif node.op == "call_function": assert callable(node.target) # pretty print operators if node.target.__module__ == "_operator" and node.target.__name__ in magic_methods: assert isinstance(node.args, tuple) body.append( f"{repr(node)}{maybe_type_annotation} = " f"{magic_methods[node.target.__name__].format(*(repr(a) for a in node.args))}" ) return # pretty print inplace operators; required for jit.script to work properly # not currently supported in normal FX graphs, but generated by torchdynamo if node.target.__module__ == "_operator" and node.target.__name__ in inplace_methods: body.append( f"{inplace_methods[node.target.__name__].format(*(repr(a) for a in node.args))}; " f"{repr(node)}{maybe_type_annotation} = {repr(node.args[0])}" ) return qualified_name = _get_qualified_name(node.target) global_name = add_global(qualified_name, node.target) # special case for getattr: node.args could be 2-argument or 3-argument # 2-argument: attribute access; 3-argument: fall through to attrib function call with default value if ( global_name == "getattr" and isinstance(node.args, tuple) and isinstance(node.args[1], str) and node.args[1].isidentifier() and len(node.args) == 2 ): body.append( f"{repr(node)}{maybe_type_annotation} = {_format_target(repr(node.args[0]), node.args[1])}" ) return body.append( f"{repr(node)}{maybe_type_annotation} = {global_name}({_format_args(node.args, node.kwargs)})" ) if node.meta.get("is_wrapped", False): wrapped_fns.setdefault(global_name) return elif node.op == "call_module": assert isinstance(node.target, str) body.append( f"{repr(node)}{maybe_type_annotation} = " f"{_format_target(root_module, node.target)}({_format_args(node.args, node.kwargs)})" ) return elif node.op == "get_attr": assert isinstance(node.target, str) body.append(f"{repr(node)}{maybe_type_annotation} = {_format_target(root_module, node.target)}") return elif node.op == "output": if node.type is not None: maybe_return_annotation[0] = f" -> {type_repr(node.type)}" body.append(self.generate_output(node.args[0])) return raise NotImplementedError(f"node: {node.op} {node.target}") # Modified for activation checkpointing ckpt_func = [] emit_code_with_activation_checkpoint(body, ckpt_func, nodes, emit_node, delete_unused_values) if len(body) == 0: # If the Graph has no non-placeholder nodes, no lines for the body # have been emitted. To continue to have valid Python code, emit a # single pass statement body.append("pass\n") if len(wrapped_fns) > 0: wrap_name = add_global("wrap", torch.fx.wrap) wrap_stmts = "\n".join([f'{wrap_name}("{name}")' for name in wrapped_fns]) else: wrap_stmts = "" if self._body_transformer: body = self._body_transformer(body) for name, value in self.additional_globals(): add_global(name, value) prologue = self.gen_fn_def(free_vars, maybe_return_annotation[0]) prologue = "".join(ckpt_func) + prologue prologue = prologue code = "".join(body) code = "\n".join(" " + line for line in code.split("\n")) fn_code = f""" {wrap_stmts} {prologue} {code}""" return PythonCode(fn_code, globals_, {})
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/symbolic_profile.py
colossalai/_analyzer/fx/symbolic_profile.py
from torch.fx import GraphModule from .passes import ShapeProp, graph_profile_pass, shape_prop_pass from .passes.graph_profile import FlopProfiler def register_flop_count_impl(func): def wrapper(impl): FlopProfiler._custom_flop_count_impl[func] = impl return impl return wrapper def register_shape_impl(func): def wrapper(impl): ShapeProp._custom_dispatch_func[func] = impl return impl return wrapper def symbolic_profile(module: GraphModule, *args, verbose=False) -> GraphModule: """Symbolically profile a model with sample inputs. Args: module (GraphModule): The module to be profiled args (Tuple): The sample inputs verbose (bool): Whether to print the profiling result Returns: GraphModule: The profiled module """ module = shape_prop_pass(module, *args) module = graph_profile_pass(module, *args, verbose=verbose) return module
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/__init__.py
colossalai/_analyzer/fx/__init__.py
from .node_util import MetaInfo from .symbolic_profile import symbolic_profile from .tracer.symbolic_trace import symbolic_trace
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/graph_module.py
colossalai/_analyzer/fx/graph_module.py
import linecache import os import sys import traceback import warnings from pathlib import Path from typing import Any, Dict, Optional, Union import torch import torch.fx import torch.nn as nn from torch.fx.graph import PythonCode try: from torch.fx.graph import _PyTreeCodeGen SUPPORT_PT_CODEGEN = True except ImportError: SUPPORT_PT_CODEGEN = False from torch.fx.graph_module import _exec_with_source, _forward_from_src from torch.nn.modules.module import _addindent # This is a copy of torch.fx.graph_module._WrappedCall. # It should be removed when we stop supporting torch < 1.12.0. class _WrappedCall: def __init__(self, cls, cls_call): self.cls = cls self.cls_call = cls_call # Previously, if an error occurred when valid # symbolically-traced code was run with an invalid input, the # user would see the source of the error as coming from # `File "<eval_with_key_N">`, where N is some number. We use # this function to generate a more informative error message. We # return the traceback itself, a message explaining that the # error occurred in a traced Module's generated forward # function, and five lines of context surrounding the faulty # line @staticmethod def _generate_error_message(frame_summary: traceback.FrameSummary) -> str: # auxiliary variables (for readability) err_lineno = frame_summary.lineno assert err_lineno is not None line = frame_summary.line assert line is not None err_line_len = len(line) all_src_lines = linecache.getlines(frame_summary.filename) # constituent substrings of the error message tb_repr = traceback.format_exc() custom_msg = ( "Call using an FX-traced Module, " f"line {err_lineno} of the traced Module's " "generated forward function:" ) before_err = "".join(all_src_lines[err_lineno - 2 : err_lineno]) marker = "~" * err_line_len + "~~~ <--- HERE" err_and_after_err = "\n".join(all_src_lines[err_lineno : err_lineno + 2]) # joined message return "\n".join([tb_repr, custom_msg, before_err, marker, err_and_after_err]) def __call__(self, obj, *args, **kwargs): try: if self.cls_call is not None: return self.cls_call(obj, *args, **kwargs) else: return super(self.cls, obj).__call__(*args, **kwargs) # type: ignore[misc] except Exception as e: assert e.__traceback__ topmost_framesummary: traceback.FrameSummary = traceback.StackSummary.extract( traceback.walk_tb(e.__traceback__) )[ -1 ] # type: ignore[arg-type] if "eval_with_key" in topmost_framesummary.filename: print(_WrappedCall._generate_error_message(topmost_framesummary), file=sys.stderr) raise e.with_traceback(None) else: raise e class ColoGraphModule(torch.fx.GraphModule): """ ColoGraphGraphModule is an nn.Module generated from an fx.Graph. ColoGraphmodule has a ``graph`` attribute, as well as ``code`` and ``forward`` attributes generated from that ``graph``. The difference between ``ColoGraphModule`` and ``torch.fx.GraphModule`` is that ``ColoGraphModule`` has a ``bind()`` function to bind customized functions (i.e. activation checkpoint) to ``code`` of ``nn.Module``. If you want to use specific features in Colossal-AI that are not supported by ``torch.fx.GraphModule``, you can use ``ColoGraphModule`` instead. ``colossalai.fx.symbolic_trace()`` will return a ``ColoGraphModule`` as default. .. warning:: When ``graph`` is reassigned, ``code`` and ``forward`` will be automatically regenerated. However, if you edit the contents of the ``graph`` without reassigning the ``graph`` attribute itself, you must call ``recompile()`` to update the generated code. """ def __init__( self, root: Union[torch.nn.Module, Dict[str, Any]], graph: torch.fx.Graph, class_name: str = "GraphModule" ): super().__init__(root, graph, class_name) def bind(self, ckpt_def, globals): """Bind function needed for correctly execute ``GraphModule.forward()`` We need to bind checkpoint functions to ``ColoGraphModule`` so that we could correctly execute ``GraphModule.forward()`` Args: ckpt_def (List[str]): definition before the forward function globals (Dict[str, Any]): global variables """ ckpt_code = "\n".join(ckpt_def) globals_copy = globals.copy() _exec_with_source(ckpt_code, globals_copy) func_list = [func for func in globals_copy.keys() if "checkpoint" in func or "pack" in func] for func in func_list: tmp_func = globals_copy[func] setattr(self, func, tmp_func.__get__(self, self.__class__)) del globals_copy[func] def recompile(self) -> PythonCode: """ Recompile this GraphModule from its ``graph`` attribute. This should be called after editing the contained ``graph``, otherwise the generated code of this ``GraphModule`` will be out of date. """ if SUPPORT_PT_CODEGEN and isinstance(self._graph._codegen, _PyTreeCodeGen): self._in_spec = self._graph._codegen.pytree_info.in_spec self._out_spec = self._graph._codegen.pytree_info.out_spec python_code = self._graph.python_code(root_module="self") self._code = python_code.src # To split ckpt functions code and forward code _code_list = self._code.split("\n") _fwd_def = [item for item in _code_list if "def forward" in item][0] _fwd_idx = _code_list.index(_fwd_def) ckpt_def = _code_list[:_fwd_idx] self._code = "\n".join(_code_list[_fwd_idx:]) self.bind(ckpt_def, python_code.globals) cls = type(self) cls.forward = _forward_from_src(self._code, python_code.globals) # Determine whether this class explicitly defines a __call__ implementation # to wrap. If it does, save it in order to have wrapped_call invoke it. # If it does not, wrapped_call can use a dynamic call to super() instead. # In most cases, super().__call__ should be torch.nn.Module.__call__. # We do not want to hold a reference to Module.__call__ here; doing so will # bypass patching of torch.nn.Module.__call__ done while symbolic tracing. cls_call = cls.__call__ if "__call__" in vars(cls) else None if "_wrapped_call" not in vars(cls): cls._wrapped_call = _WrappedCall(cls, cls_call) # type: ignore[attr-defined] def call_wrapped(self, *args, **kwargs): return self._wrapped_call(self, *args, **kwargs) cls.__call__ = call_wrapped # reset self._code to original src, otherwise to_folder will be wrong self._code = python_code.src return python_code def to_folder(self, folder: Union[str, os.PathLike], module_name: str = "FxModule"): """Dumps out module to ``folder`` with ``module_name`` so that it can be imported with ``from <folder> import <module_name>`` Args: folder (Union[str, os.PathLike]): The folder to write the code out to module_name (str): Top-level name to use for the ``Module`` while writing out the code """ folder = Path(folder) Path(folder).mkdir(exist_ok=True) torch.save(self.state_dict(), folder / "state_dict.pt") tab = " " * 4 # we add import colossalai here model_str = f""" import torch from torch.nn import * import colossalai class {module_name}(torch.nn.Module): def __init__(self): super().__init__() """ def _gen_model_repr(module_name: str, module: torch.nn.Module) -> Optional[str]: safe_reprs = [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d] if type(module) in safe_reprs: return f"{module.__repr__()}" else: return None blobified_modules = [] for module_name, module in self.named_children(): module_str = _gen_model_repr(module_name, module) if module_str is None: module_file = folder / f"{module_name}.pt" torch.save(module, module_file) blobified_modules.append(module_name) module_repr = module.__repr__().replace("\r", " ").replace("\n", " ") module_str = f"torch.load(r'{module_file}') # {module_repr}" model_str += f"{tab*2}self.{module_name} = {module_str}\n" for buffer_name, buffer in self._buffers.items(): if buffer is None: continue model_str += f"{tab*2}self.register_buffer('{buffer_name}', torch.empty({list(buffer.shape)}, dtype={buffer.dtype}))\n" for param_name, param in self._parameters.items(): if param is None: continue model_str += f"{tab*2}self.{param_name} = torch.nn.Parameter(torch.empty({list(param.shape)}, dtype={param.dtype}))\n" model_str += f"{tab*2}self.load_state_dict(torch.load(r'{folder}/state_dict.pt'))\n" model_str += f"{_addindent(self.code, 4)}\n" module_file = folder / "module.py" module_file.write_text(model_str) init_file = folder / "__init__.py" init_file.write_text("from .module import *") if len(blobified_modules) > 0: warnings.warn( "Was not able to save the following children modules as reprs -" f"saved as pickled files instead: {blobified_modules}" )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/tracer/tracer.py
colossalai/_analyzer/fx/tracer/tracer.py
import functools import inspect from contextlib import contextmanager from typing import Any, Callable, Dict, Iterable, Optional, Set, Tuple, Type, Union import torch import torch.nn as nn from torch.fx import Graph, Node, Proxy, Tracer from torch.utils._pytree import tree_map from colossalai._analyzer._subclasses import _TensorPropertyMethod, _TorchFactoryMethod from ..node_util import MetaInfo from .proxy import ColoProxy Target = Union[Callable[..., Any], str] def _truncate_suffix(s: str): import re # FIXME: don't know why but torch.fx always gets a suffix like '_1' in the name return re.sub(r"_\d+$", "", s) def register_tracer_impl(func: Callable[..., Any], name: Optional[str] = "_custom_impl"): def wrapper(impl): assert hasattr(ColoTracer, name), f"Cannot register {func.__name__} in ColoTracer.{name}" getattr(ColoTracer, name)[func] = impl return impl return wrapper def register_leaf_module_impl(module: nn.Module): def wrapper(impl): ColoTracer._custom_leaf_module_impl[module] = impl return impl return wrapper def register_leaf_module(module: nn.Module): ColoTracer._custom_leaf_module.add(module) def register_non_leaf_module(module: nn.Module): ColoTracer._custom_non_leaf_module.add(module) class ColoTracer(Tracer): _custom_leaf_module: Set[Type[nn.Module]] = set() _custom_leaf_module_impl: Dict[Type[nn.Module], Callable[..., Any]] = {} _custom_non_leaf_module: Set[Type[nn.Module]] = set() _custom_impl: Dict[Callable[..., Any], Callable[..., Any]] = {} _bias_addition_impl: Dict[Callable[..., Any], Callable[..., Any]] = {} _bias_addition_module = [ torch.nn.Linear, torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d, ] def __init__(self, trace_act_ckpt: bool = False, bias_addition_split: bool = False, *args, **kwargs): super().__init__(*args, **kwargs) self.disable_module_getattr = False self.proxy_buffer_attributes = True # whether the tracer will record the usage of torch.utils.checkpoint self.trace_act_ckpt = trace_act_ckpt self.ckpt_regions = [] self.ckpt_idx = 0 self.mod_dir = "" # whether the tracer should split the bias_add ops into two ops self.bias_addition_split = bias_addition_split def is_leaf_module(self, m: nn.Module, module_qualified_name: str) -> bool: # if bias-addiction split is enabled, and module has bias, then it is not a leaf module # we will enter the module and split the bias-addition ops if self.bias_addition_split and type(m) in self._bias_addition_module and m.bias is not None: return False # user can specify which modules are leaf modules and which are not return type(m) not in self._custom_non_leaf_module and ( type(m) in self._custom_leaf_module or super().is_leaf_module(m, module_qualified_name) ) def call_module( self, m: torch.nn.Module, forward: Callable[..., Any], args: Tuple[Any, ...], kwargs: Dict[str, Any] ) -> Any: curr_dir = self.mod_dir self.mod_dir = "self." + self.path_of_module(m) rst = super().call_module(m, forward, args, kwargs) self.mod_dir = curr_dir return rst def proxy(self, node: Node) -> "ColoProxy": return ColoProxy(node, self) def create_proxy( self, kind: str, target: Target, args: Tuple[Any, ...], kwargs: Dict[str, Any], name: Optional[str] = None, type_expr: Optional[Any] = None, proxy_factory_fn: Callable[[Node], "Proxy"] = None, ): proxy: ColoProxy = super().create_proxy(kind, target, args, kwargs, name, type_expr, proxy_factory_fn) unwrap_fn = lambda p: p.meta_data if isinstance(p, ColoProxy) else p if kind == "placeholder": proxy.meta_data = ( self.meta_args[target] if target in self.meta_args else self.concrete_args.get(_truncate_suffix(target), None) ) elif kind == "get_attr": self.disable_module_getattr = True try: attr_itr = self.root atoms = target.split(".") for atom in atoms: attr_itr = getattr(attr_itr, atom) proxy.meta_data = attr_itr finally: self.disable_module_getattr = False elif kind == "call_function": proxy.meta_data = target(*tree_map(unwrap_fn, args), **tree_map(unwrap_fn, kwargs)) elif kind == "call_method": self.disable_module_getattr = True try: if target == "__call__": proxy.meta_data = unwrap_fn(args[0])(*tree_map(unwrap_fn, args[1:]), **tree_map(unwrap_fn, kwargs)) else: if target not in _TensorPropertyMethod: proxy._meta_data = getattr(unwrap_fn(args[0]), target)( *tree_map(unwrap_fn, args[1:]), **tree_map(unwrap_fn, kwargs) ) finally: self.disable_module_getattr = False elif kind == "call_module": mod = self.root.get_submodule(target) self.disable_module_getattr = True try: args = tree_map(unwrap_fn, args) kwargs = tree_map(unwrap_fn, kwargs) if type(mod) in self._custom_leaf_module: target = self._custom_leaf_module_impl[type(mod)] proxy.meta_data = target(mod, *args, **kwargs) else: proxy.meta_data = mod.forward(*args, **kwargs) finally: self.disable_module_getattr = False return proxy def create_node(self, *args, **kwargs) -> Node: node = super().create_node(*args, **kwargs) n_info = MetaInfo(node, mod_dir=self.mod_dir, activation_checkpoint=tuple(self.ckpt_regions)) return node def trace( self, root: torch.nn.Module, concrete_args: Optional[Dict[str, torch.Tensor]] = None, meta_args: Optional[Dict[str, torch.Tensor]] = None, ) -> Graph: if meta_args is None: meta_args = {} if concrete_args is None: concrete_args = {} # check concrete and meta args have valid names sig = inspect.signature(root.forward) sig_names = set(sig.parameters.keys()) meta_arg_names = set(meta_args.keys()) concrete_arg_names = set(concrete_args.keys()) non_concrete_arg_names = sig_names - concrete_arg_names # update concrete args with default values for k, v in sig.parameters.items(): if k in sig_names - meta_arg_names and k not in concrete_args and v.default is not inspect.Parameter.empty: concrete_args[k] = v.default def _check_arg_name_valid(names: Iterable[str]): for name in names: if name not in sig_names: raise ValueError(f"Argument {name} is not in the signature of {root.__class__.__name__}.forward") _check_arg_name_valid(meta_arg_names) _check_arg_name_valid(concrete_arg_names) self.concrete_args = concrete_args self.meta_args = meta_args with self._torch_factory_override(), self._tracer_override(), torch.no_grad(): self.mod_dir = "self" self.graph = super().trace(root, concrete_args=concrete_args) self.mod_dir = "" self.graph.lint() for node in self.graph.nodes: if node.op == "placeholder": # Removing default values for inputs as the forward pass will fail with them. if node.target in non_concrete_arg_names: node.args = () # Without this, torch.jit.script fails because the inputs type is Optional[torch.Tensor]. # It cannot infer on the attributes and methods the input should have, and fails. node.type = torch.Tensor # It is a concrete arg so it is not used and should be removed. else: if hasattr(torch.fx._symbolic_trace, "_assert_is_none"): # Newer versions of torch.fx emit an assert statement # for concrete arguments; delete those before we delete # the concrete arg. to_delete = [] for user in node.users: if user.target == torch.fx._symbolic_trace._assert_is_none: to_delete.append(user) for user in to_delete: self.graph.erase_node(user) self.graph.erase_node(node) # TODO: solves GraphModule creation. # Without this, return type annotation "Tuple" is causing code execution failure. if node.op == "output": node.type = None return self.graph @contextmanager def _tracer_override(self): # override the tracer to support custom modules and checkpointing if self.trace_act_ckpt: orig_ckpt_func_apply = torch.utils.checkpoint.CheckpointFunction.apply orig_ckpt_func_without_reentrant = torch.utils.checkpoint._checkpoint_without_reentrant_generator def checkpoint(run_function, preserve_rng_state=False, *args): self.ckpt_regions.append(self.ckpt_idx) out = run_function(*args) self.ckpt_idx = self.ckpt_regions.pop(-1) + 1 return out # override the checkpoint function torch.utils.checkpoint.CheckpointFunction.apply = checkpoint torch.utils.checkpoint._checkpoint_without_reentrant = checkpoint # override the custom functions ColoProxy._func_dispatch.update({k: v for k, v in self._custom_impl.items()}) # override the bias addition functions if self.bias_addition_split: ColoProxy._func_dispatch.update({k: v for k, v in self._bias_addition_impl.items()}) yield if self.trace_act_ckpt: # recover the checkpoint function upon exit torch.utils.checkpoint.CheckpointFunction.apply = orig_ckpt_func_apply torch.utils.checkpoint._checkpoint_reentrant = orig_ckpt_func_without_reentrant ColoProxy._func_dispatch = {} @contextmanager def _torch_factory_override(self): # override the torch factory functions to create a proxy when the method # is called during ``symbolic_trace()``. def wrap_factory_method(target): @functools.wraps(target) def wrapper(*args, **kwargs): is_proxy = any(isinstance(p, ColoProxy) for p in args) | any( isinstance(p, ColoProxy) for p in kwargs.values() ) if is_proxy: # if the arg is a proxy, then need to record this function called on this proxy # e.g. torch.ones(size) where size is an input proxy self.disable_module_getattr = True try: proxy = self.create_proxy("call_function", target, args, kwargs) finally: self.disable_module_getattr = False return proxy else: return target(*args, **kwargs) return wrapper, target overrides = { target: wrap_factory_method(getattr(torch, target)) for target in _TorchFactoryMethod if callable(getattr(torch, target)) } for name, (wrapper, orig) in overrides.items(): setattr(torch, name, wrapper) yield # recover the torch factory functions upon exit for name, (wrapper, orig) in overrides.items(): setattr(torch, name, orig) def _post_check(self, non_concrete_arg_names: Set[str]): # This is necessary because concrete args are added as input to the traced module since # https://github.com/pytorch/pytorch/pull/55888. for node in self.graph.nodes: if node.op == "placeholder": # Removing default values for inputs as the forward pass will fail with them. if node.target in non_concrete_arg_names: node.args = () # Without this, torch.jit.script fails because the inputs type is Optional[torch.Tensor]. # It cannot infer on the attributes and methods the input should have, and fails. node.type = torch.Tensor # It is a concrete arg so it is not used and should be removed. else: if hasattr(torch.fx._symbolic_trace, "_assert_is_none"): # Newer versions of torch.fx emit an assert statement # for concrete arguments; delete those before we delete # the concrete arg. to_delete = [] for user in node.users: if user.target == torch.fx._symbolic_trace._assert_is_none: to_delete.append(user) for user in to_delete: self.graph.erase_node(user) self.graph.erase_node(node) if node.op == "output": node.type = None self.graph.lint() def getattr(self, attr, attr_val, parameter_proxy_cache): return self._module_getattr(attr, attr_val, parameter_proxy_cache) def _module_getattr(self, attr, attr_val, parameter_proxy_cache): if getattr(self, "disable_module_getattr", False): return attr_val def maybe_get_proxy_for_attr(attr_val, collection_to_search, parameter_proxy_cache): for n, p in collection_to_search: if attr_val is p: if n not in parameter_proxy_cache: kwargs = {} if "proxy_factory_fn" in inspect.signature(self.create_proxy).parameters: kwargs["proxy_factory_fn"] = ( None if not self.param_shapes_constant else lambda node: ColoProxy(self, node, n, attr_val) ) val_proxy = self.create_proxy("get_attr", n, (), {}, **kwargs) # type: ignore[arg-type] parameter_proxy_cache[n] = val_proxy return parameter_proxy_cache[n] return None if self.proxy_buffer_attributes and isinstance(attr_val, torch.Tensor): maybe_buffer_proxy = maybe_get_proxy_for_attr(attr_val, self.root.named_buffers(), parameter_proxy_cache) if maybe_buffer_proxy is not None: return maybe_buffer_proxy if isinstance(attr_val, torch.nn.Parameter): maybe_parameter_proxy = maybe_get_proxy_for_attr( attr_val, self.root.named_parameters(), parameter_proxy_cache ) if maybe_parameter_proxy is not None: return maybe_parameter_proxy return attr_val
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/tracer/symbolic_trace.py
colossalai/_analyzer/fx/tracer/symbolic_trace.py
from typing import Any, Callable, Dict, Optional, Union import torch from torch.fx import Tracer from torch.utils._pytree import tree_map from colossalai._analyzer._subclasses import MetaTensor try: from ..codegen import ActivationCheckpointCodeGen SUPPORT_ACTIVATION = True except: SUPPORT_ACTIVATION = False from ..graph_module import ColoGraphModule from .tracer import ColoTracer def _default_device(): return torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") def _current_device(module: torch.nn.Module): try: return next(module.parameters()).device except: return _default_device() def symbolic_trace( root: Union[torch.nn.Module, Callable[..., Any]], concrete_args: Optional[Dict[str, Any]] = None, meta_args: Optional[Dict[str, Any]] = None, trace_act_ckpt: bool = False, bias_addition_split: bool = False, ) -> ColoGraphModule: """ Traces a ``torch.nn.Module`` or a function and returns a ``GraphModule`` with ``Node``s and ``MetaInfo`` attached to the ``Node``s. Can be used to trace the usage of ``torch.utils.checkpoint`` and the path of module (https://github.com/pytorch/examples/blob/main/fx/module_tracer.py). This tracer is able to trace basic control flow and for loops. It will split the bias addition into two parts if ``bias_addition_split`` is set to be ``True``. (See ./bias_addition.py for more details). Examples: 1. Tracing a ``torch.nn.Module`` with control flow. .. code-block:: python class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(2, 2) def forward(self, x): if x.size(0) > 1: x = x.sum(dim=0) return self.linear(x) traced = symbolic_trace(MyModule(), meta_args={'x': torch.randn(1, 2, 2)}) # traced code like: # def forward(self, x): # linear_1 = self.linear(x) # return linear_1 traced = symbolic_trace(MyModule(), meta_args={'x': torch.randn(2, 2, 2)}) # traced code like: # def forward(self, x): # sum = x.sum(dim=0); x = None # linear = self.linear(sum); sum = None # return linear 2. Tracing a ``torch.nn.Module`` with ``torch.utils.checkpoint``. .. code-block:: python class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(2, 2) def forward(self, x): def custom_forward(x): return self.linear(x) return torch.utils.checkpoint.checkpoint(custom_forward, x) traced = symbolic_trace(MyModule(), meta_args={'x': torch.randn(1, 2, 2)}, trace_act_ckpt=True) # traced code like: # def checkpoint_0(self, x): # linear = self.linear(x); x = None # return linear # # def forward(self, x): # linear = torch.utils.checkpoint.checkpoint(checkpoint_0, x); x = None # return linear 3. Tracing a ``torch.nn.Module`` with ``bias_addition_split``. .. code-block:: python class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(2, 2, bias=True) def forward(self, x): return self.linear(x) traced = symbolic_trace(MyModule(), meta_args={'x': torch.randn(1, 2, 2)}, bias_addition_split=True) # traced code like: # def forward(self, x): # linear_bias = self.linear.bias # linear_weight = self.linear.weight # linear = torch._C._nn.linear(x, linear_weight); x = linear_weight = None # add = linear + linear_bias; linear = linear_bias = None # return add Args: root (Union[torch.nn.Module, Callable[..., Any]]): The ``torch.nn.Module`` or function to be traced. concrete_args (Optional[Dict[str, Any]], optional): Concrete arguments to be passed to the ``root``. Defaults to {}. meta_args (Optional[Dict[str, Any]], optional): Meta arguments to be passed to the ``root``. Mostly used for tracing control flow. Defaults to {}. trace_act_ckpt (bool, optional): Whether to trace the usage of ``torch.utils.checkpoint``. Defaults to False. bias_addition_split (bool, optional): Whether to split the bias addition into two parts. Defaults to False. Returns: ColoGraphModule: A traced ``GraphModule`` that is ready for activation checkpoint ``CodeGen``. Remarks: This part of ``symbolic_trace()`` is maintained by Colossal-AI team. If you encountered any unexpected error during tracing, feel free to raise an issue on Colossal-AI GitHub repo. We welcome any feedback and contributions to enhance the extensibility of Colossal-AI. """ if meta_args: device, orig_device = _default_device(), _current_device(root) wrap_fn = lambda elem: MetaTensor(elem, device=device) if isinstance(elem, torch.Tensor) else elem graph = ColoTracer(trace_act_ckpt=trace_act_ckpt, bias_addition_split=bias_addition_split).trace( root.to(device), concrete_args=concrete_args, meta_args=tree_map(wrap_fn, meta_args) ) if trace_act_ckpt and SUPPORT_ACTIVATION: graph.set_codegen(ActivationCheckpointCodeGen()) root.to(orig_device) else: graph = Tracer().trace(root, concrete_args=concrete_args) name = root.__class__.__name__ if isinstance(root, torch.nn.Module) else root.__name__ return ColoGraphModule(root, graph, name)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/tracer/__init__.py
colossalai/_analyzer/fx/tracer/__init__.py
from .bias_addition import * from .custom_leaf_module import *
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/tracer/bias_addition.py
colossalai/_analyzer/fx/tracer/bias_addition.py
""" If FX.Graph is traced for auto-parallel module, some extra node will be added during graph construction to deal with the compatibility between bias-addition and all-reduce. """ import torch import torch.nn.functional as F from torch.nn.modules.utils import _pair, _single, _triple from .tracer import register_tracer_impl __all__ = [] @register_tracer_impl(F.linear, name="_bias_addition_impl") def linear_impl(input, weight, bias=None): if bias is None: return F.linear(input, weight) else: return F.linear(input, weight) + bias @register_tracer_impl(F.conv1d, name="_bias_addition_impl") def conv1d_impl(input, weight, bias=None, stride=_single(1), padding=_single(0), dilation=_single(1), groups=1): if bias is None: return F.conv1d(input, weight, stride=stride, padding=padding, dilation=dilation, groups=groups) else: return F.conv1d(input, weight, stride=stride, padding=padding, dilation=dilation, groups=groups) + bias.reshape( (-1, 1) ) @register_tracer_impl(F.conv2d, name="_bias_addition_impl") def conv2d_impl(input, weight, bias=None, stride=_pair(1), padding=_pair(0), dilation=_pair(1), groups=1): if bias is None: return F.conv2d(input, weight, stride=stride, padding=padding, dilation=dilation, groups=groups) else: return F.conv2d(input, weight, stride=stride, padding=padding, dilation=dilation, groups=groups) + bias.reshape( (-1, 1, 1) ) @register_tracer_impl(F.conv3d, name="_bias_addition_impl") def conv3d_impl(input, weight, bias=None, stride=_triple(1), padding=_triple(0), dilation=_triple(1), groups=1): if bias is None: return F.conv3d(input, weight, stride=stride, padding=padding, dilation=dilation, groups=groups) else: return F.conv3d(input, weight, stride=stride, padding=padding, dilation=dilation, groups=groups) + bias.reshape( (-1, 1, 1, 1) ) @register_tracer_impl(F.conv_transpose1d, name="_bias_addition_impl") def conv_transpose1d_impl( input, weight, bias=None, stride=_single(1), padding=_single(0), output_padding=_single(0), groups=1, dilation=_single(1), ): if bias is None: return F.conv_transpose1d( input, weight, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation, ) else: return F.conv_transpose1d( input, weight, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation, ) + bias.reshape((-1, 1)) @register_tracer_impl(F.conv_transpose2d, name="_bias_addition_impl") def conv_transpose2d_impl( input, weight, bias=None, stride=_pair(1), padding=_pair(0), output_padding=_pair(0), groups=1, dilation=_pair(1) ): if bias is None: return F.conv_transpose2d( input, weight, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation, ) else: return F.conv_transpose2d( input, weight, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation, ) + bias.reshape((-1, 1, 1)) @register_tracer_impl(F.conv_transpose3d, name="_bias_addition_impl") def conv_transpose3d_impl( input, weight, bias=None, stride=_triple(1), padding=_triple(0), output_padding=_triple(0), groups=1, dilation=_triple(1), ): if bias is None: return F.conv_transpose3d( input, weight, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation, ) else: return F.conv_transpose3d( input, weight, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation, ) + bias.reshape((-1, 1, 1, 1)) @register_tracer_impl(torch.addmm, name="_bias_addition_impl") @register_tracer_impl(torch.Tensor.addmm, name="_bias_addition_impl") def addmm_impl(input, mat1, mat2, beta=1, alpha=1): if alpha != 1 and beta != 1: return F.linear(mat1, mat2.transpose(0, 1)) * alpha + input * beta elif alpha != 1: return F.linear(mat1, mat2.transpose(0, 1)) * alpha + input elif beta != 1: return F.linear(mat1, mat2.transpose(0, 1)) + input * beta else: return F.linear(mat1, mat2.transpose(0, 1)) + input @register_tracer_impl(torch.addbmm, name="_bias_addition_impl") @register_tracer_impl(torch.Tensor.addbmm, name="_bias_addition_impl") def addbmm_impl(input, batch1, batch2, beta=1, alpha=1): if alpha != 1 and beta != 1: return torch.bmm(batch1, batch2.transpose(1, 2)) * alpha + input * beta elif alpha != 1: return torch.bmm(batch1, batch2.transpose(1, 2)) * alpha + input elif beta != 1: return torch.bmm(batch1, batch2.transpose(1, 2)) + input * beta else: return torch.bmm(batch1, batch2.transpose(1, 2)) + input
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/tracer/custom_leaf_module.py
colossalai/_analyzer/fx/tracer/custom_leaf_module.py
import torch from .tracer import register_leaf_module, register_leaf_module_impl try: import apex register_leaf_module(apex.normalization.FusedLayerNorm) register_leaf_module(apex.normalization.FusedRMSNorm) register_leaf_module(apex.normalization.MixedFusedLayerNorm) register_leaf_module(apex.normalization.MixedFusedRMSNorm) @register_leaf_module_impl(apex.normalization.FusedLayerNorm) @register_leaf_module_impl(apex.normalization.FusedRMSNorm) @register_leaf_module_impl(apex.normalization.MixedFusedLayerNorm) @register_leaf_module_impl(apex.normalization.MixedFusedRMSNorm) def torch_nn_normalize(self, input: torch.Tensor): # check shape if isinstance(self, torch.nn.BatchNorm1d): assert input.dim() in [2, 3] elif isinstance(self, torch.nn.BatchNorm2d): assert input.dim() == 4 elif isinstance(self, torch.nn.BatchNorm3d): assert input.dim() == 5 # normalization maintain the same shape as the input return input.clone() except (ImportError, AttributeError): pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/tracer/proxy.py
colossalai/_analyzer/fx/tracer/proxy.py
import operator from typing import Any, Callable, Dict, Optional, Union import torch from torch.fx import Node, Proxy from torch.utils._pytree import tree_map from colossalai._analyzer._subclasses import MetaTensor Target = Union[Callable[..., Any], str] class ColoProxy(Proxy): _func_dispatch: Dict[Target, Callable[..., Any]] = {} def __init__(self, *args, data=None, **kwargs): super().__init__(*args, **kwargs) self._meta_data = data @property def meta_data(self): return self._meta_data @meta_data.setter def meta_data(self, args): wrap_fn = lambda x: MetaTensor(x) if isinstance(x, torch.Tensor) else x self._meta_data = tree_map(wrap_fn, args) @classmethod def __torch_function__(cls, orig_method, types, args=(), kwargs=None): kwargs = {} if kwargs is None else kwargs if orig_method in cls._func_dispatch: impl = cls._func_dispatch.pop(orig_method) # avoid recursion proxy = impl(*args, **kwargs) cls._func_dispatch[orig_method] = impl return proxy else: proxy = cls.from_torch_proxy(super().__torch_function__(orig_method, types, args, kwargs)) unwrap_fn = lambda p: p.meta_data if isinstance(p, ColoProxy) else p if proxy.meta_data is None: proxy.meta_data = orig_method(*tree_map(unwrap_fn, args), **tree_map(unwrap_fn, kwargs)) return proxy @classmethod def from_torch_proxy(cls, proxy: Proxy): return cls(proxy.node, proxy.tracer) def __repr__(self): return f"ColoProxy({self.node.name}, meta_data={self.meta_data})" def __len__(self): return len(self.meta_data) def __int__(self): return int(self.meta_data) def __index__(self): try: return int(self.meta_data) except: return torch.zeros(self.meta_data.shape, dtype=torch.bool).numpy().__index__() def __float__(self): return float(self.meta_data) def __bool__(self): return self.meta_data def __getattr__(self, k): return ColoAttribute(self, k, getattr(self._meta_data, k, None)) def __setitem__(self, key, value): proxy = self.tracer.create_proxy("call_function", operator.setitem, (self, key, value), {}) proxy.meta_data = self._meta_data return proxy def __contains__(self, key): if self.node.op == "placeholder": # this is used to handle like # if x in kwargs # we don't handle this case for now return False return super().__contains__(key) def __isinstancecheck__(self, type): return isinstance(self.meta_data, type) class ColoAttribute(ColoProxy): def __init__(self, root, attr: str, data=None): self.root = root self.attr = attr self.tracer = root.tracer self._meta_data = data self._node: Optional[Node] = None @property def node(self): # the node for attributes is added lazily, since most will just be method calls # which do not rely on the getitem call if self._node is None: self._node = self.tracer.create_proxy("call_function", getattr, (self.root, self.attr), {}).node return self._node def __call__(self, *args, **kwargs): return self.tracer.create_proxy("call_method", self.attr, (self.root,) + args, kwargs) def __repr__(self): return f"ColoAttribute({self.node.name}, attr={self.attr})"
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/passes/shape_prop.py
colossalai/_analyzer/fx/passes/shape_prop.py
"""``torch.fx.ShapeProp``, but with ``MetaTensor``""" from typing import Any, Callable, Dict, Optional, Tuple, Union import torch import torch.fx from torch.autograd.graph import saved_tensors_hooks from torch.utils._pytree import tree_map from colossalai._analyzer._subclasses import MetaTensor, MetaTensorMode from colossalai._analyzer.fx.node_util import MetaInfo from colossalai.fx._compatibility import compatibility Target = Union[Callable[..., Any], str] class sim_env(saved_tensors_hooks): """ A simulation of memory allocation and deallocation in the forward pass using ``saved_tensor_hooks``. Attributes: ctx (Dict[int, torch.Tensor]): A dictionary that maps the data pointer of a tensor to the tensor itself. This is used to track the memory allocation and deallocation. param_ctx (Dict[int, torch.Tensor]): A dictionary that maps the data pointer of all model parameters to the parameter itself. This avoids overestimating the memory usage of the intermediate activations. """ def __init__(self, module: Optional[torch.nn.Module] = None): super().__init__(self.pack_hook, self.unpack_hook) self.ctx = {} self.param_ctx = {param.data_ptr(): param for param in module.parameters()} self.buffer_ctx = {buffer.data_ptr(): buffer for buffer in module.buffers()} if module else {} def pack_hook(self, tensor: torch.Tensor): if tensor.data_ptr() not in self.param_ctx and tensor.data_ptr() not in self.buffer_ctx: self.ctx[tensor.data_ptr()] = tensor return tensor def unpack_hook(self, tensor): return tensor def _normalize_tuple(x): if not isinstance(x, tuple): return (x,) return x def _current_device(module): try: return next(module.parameters()).device except StopIteration: return torch.device("cpu") @compatibility(is_backward_compatible=False) class ShapeProp(torch.fx.Interpreter): """ Execute an FX graph Node-by-Node and record the meta data of the result into the corresponding node. Usage: >>> model = MyModule() >>> x = torch.rand(10, 10) >>> gm = colossalai.fx.symbolic_trace(model, meta_args = {'x': x}) >>> interp = ShapeProp(gm) >>> interp.propagate(x) Args: module (GraphModule): The module to be executed Hints: If you want to add a new shape propagation rule, you can do so by adding a new method to this class with the ``@register_shape_impl`` decorator. The method should take (*args, **kwargs) instance as its input and generate output. For example, if you want to add a shape propagation rule for ``torch.nn.functional.linear``, you can do so by adding a new method to this class with the ``@register_shape_impl`` decorator (Since the ``MetaTensorMode`` is compatible with ``torch.nn.functional.linear``, in practice you don't have to do as follows): >>> @register_shape_impl(torch.nn.functional.linear) >>> def linear_shape_impl(*args, **kwargs): >>> # do something here >>> return torch.empty(output_shape, device=output_device) """ _custom_dispatch_func = {} _mode = MetaTensorMode() def __init__(self, module: torch.fx.GraphModule, garbage_collect_values: bool = True): super().__init__(module, garbage_collect_values) self.global_hook = sim_env(module=self.module) def run_node(self, n: torch.fx.Node) -> Any: """ Run a specific node ``n`` and return the result. Attach ( ``inputs``, ``outputs``, ``parameters``, ``buffers`` ) to ``n``. Args: n (Node): The ``Node`` to execute Returns: Any: The result of executing ``n`` """ args, kwargs = self.fetch_args_kwargs_from_env(n) with self.global_hook: r = getattr(self, n.op)(n.target, args, kwargs) def unwrap_fn(elem): def _convert_meta(t: torch.Tensor): if t.device == "meta": return t else: return t.to("meta") if isinstance(elem, MetaTensor): if getattr(self, "_is_param", False): return torch.nn.Parameter(_convert_meta(elem._tensor)) return _convert_meta(elem._tensor) elif isinstance(elem, torch.Tensor): if isinstance(elem, torch.nn.Parameter): return torch.nn.Parameter(_convert_meta(elem)) return _convert_meta(elem) else: return elem is_pure_tensor = lambda elem: isinstance(elem, MetaTensor) and not isinstance(elem, torch.nn.Parameter) n_info = MetaInfo(n) n_info.outputs = _normalize_tuple(r) if n.op == "call_module": submod = self.fetch_attr(n.target) n_info.parameters.update({k: MetaTensor(v) for k, v in submod.named_parameters()}) n_info.buffers.update({k: MetaTensor(v) for k, v in submod.named_buffers()}) else: n_info.parameters.update( { k.name: MetaTensor(v) for k, v in zip(n.args, args) if isinstance(k, torch.fx.Node) and isinstance(v, torch.nn.Parameter) } ) n_info.parameters.update({k: MetaTensor(v) for k, v in kwargs.items() if isinstance(v, torch.nn.Parameter)}) n_info.inputs = tuple(v for v in args if is_pure_tensor(v)) + tuple( v for v in kwargs.values() if is_pure_tensor(v) ) # align with SPMD if isinstance(r, (tuple, list)): n._meta_data = tree_map(unwrap_fn, _normalize_tuple(r)) else: n._meta_data = unwrap_fn(r) n_info.global_ctx = self.global_hook.ctx n_info.curr_ctx = self.global_hook.ctx.copy() crit = lambda x: x.data_ptr() in self.global_hook.ctx if isinstance(x, torch.Tensor) else False n_info.is_alias = _normalize_tuple(tree_map(crit, n_info.outputs)) return r def call_function(self, target: "Target", args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any: """ Execute a ``call_function`` node and return the result. If the target of ``Node`` is registered with ``@register_shape_impl``, the registered function will be used to execute the node. This is common if we insert some customized kernels. Args: target (Target): The call target for this node. See `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for details on semantics args (Tuple): Tuple of positional args for this invocation kwargs (Dict): Dict of keyword arguments for this invocation Return Any: The value returned by the function invocation """ convert_to_param = False if target in (torch.transpose, torch.reshape) and isinstance(args[0], torch.nn.parameter.Parameter): convert_to_param = True if target in self._custom_dispatch_func: res = self._custom_dispatch_func[target](*args, **kwargs) else: res = super().call_function(target, args, kwargs) if convert_to_param: return torch.nn.Parameter(res) else: return res def call_method(self, target: "Target", args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Any: """ Execute a ``call_method`` node and return the result. Args: target (Target): The call target for this node. See `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for details on semantics args (Tuple): Tuple of positional args for this invocation kwargs (Dict): Dict of keyword arguments for this invocation Return Any: The value returned by the method invocation """ # args[0] is the `self` object for this method call self_obj, *args_tail = args target_method = getattr(self_obj.__class__, target) convert_to_parameter = False if target_method in (torch.Tensor.view, torch.Tensor.transpose) and isinstance( args[0], torch.nn.parameter.Parameter ): convert_to_parameter = True # Execute the method and return the result assert isinstance(target, str) res = getattr(self_obj, target)(*args_tail, **kwargs) if convert_to_parameter: return torch.nn.Parameter(res) else: return res def propagate(self, *args, device=None): """ Run `module` via interpretation and return the result and record the shape of each node. Args: *args (Tensor): The sample input. Returns: Any: The value returned from executing the Module """ # wrap_fn = lambda elem: MetaTensor(elem, device=device) def wrap_fn(elem, device=device): if isinstance(elem, torch.Tensor): return MetaTensor(elem, device=device) else: return elem with self._mode: return super().run(*tree_map(wrap_fn, args)) def shape_prop_pass(module: torch.fx.GraphModule, *args) -> torch.fx.GraphModule: """ Run ``module`` via interpretation and return the result and record the shape of each ``Node``. Args: module (GraphModule): The GraphModule to profile *args (Any): The sample input Returns: GraphModule: The same GraphModule with shape information """ ShapeProp(module).propagate(*args, device=_current_device(module)) return module
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/passes/__init__.py
colossalai/_analyzer/fx/passes/__init__.py
from .graph_profile import graph_profile_pass from .shape_prop import ShapeProp, shape_prop_pass, sim_env
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_analyzer/fx/passes/graph_profile.py
colossalai/_analyzer/fx/passes/graph_profile.py
from typing import Any, Dict, Iterator, List, Optional, Tuple import torch import torch.fx from torch.autograd.profiler_util import _format_memory from torch.fx import GraphModule from torch.fx.node import Argument, Node, Target from colossalai._analyzer._subclasses import flop_count from colossalai._analyzer.fx.node_util import MetaInfo def _format_flops(flops: float) -> str: """Returns a formatted FLOP size string""" if flops > 1e12: return f"{flops / 1e12:.2f} TFLOPs" elif flops > 1e9: return f"{flops / 1e9:.2f} GFLOPs" elif flops > 1e6: return f"{flops / 1e6:.2f} MFLOPs" elif flops > 1e3: return f"{flops / 1e3:.2f} kFLOPs" return f"{flops} FLOPs" def _denormalize_tuple(t: Tuple[int, ...]) -> Tuple[int, ...]: return t[0] if len(t) == 1 else t def _normalize_tuple(x): if not isinstance(x, tuple): return (x,) return x def _current_device(module): return next(module.parameters()).device class GraphProfiler(torch.fx.Interpreter): """ Fetch shape argument from ``ShapeProp`` without re-executing the ``GraphModule`` from scratch. """ _profileable = [ "call_function", "call_module", "call_method", ] def __init__(self, module: GraphModule, garbage_collect_values: bool = True): super().__init__(module, garbage_collect_values) def run(self, *args, initial_env: Optional[Dict[Node, Any]] = None, enable_io_processing: bool = True) -> Any: """ Run `module` via interpretation and return the result. Args: *args: The arguments to the Module to run, in positional order initial_env (Optional[Dict[Node, Any]]): An optional starting environment for execution. This is a dict mapping `Node` to any value. This can be used, for example, to pre-populate results for certain `Nodes` so as to do only partial evaluation within the interpreter. enable_io_processing (bool): If true, we process the inputs and outputs with graph's process_inputs and process_outputs function first before using them. Returns: Any: The value returned from executing the Module """ self.env = initial_env if initial_env else {} # Positional function args are consumed left-to-right by # `placeholder` nodes. Use an iterator to keep track of # position and extract those values. if enable_io_processing: args = self.module.graph.process_inputs(*args) self.args_iter: Iterator[Any] = iter(args) for node in self.module.graph.nodes: self.run_node(node) # No need to store. if self.garbage_collect_values: for to_delete in self.user_to_last_uses.get(node, []): del self.env[to_delete] if node.op == "output": output_val = self.env[node] return self.module.graph.process_outputs(output_val) if enable_io_processing else output_val def fetch_initial_env(self, device=None) -> Dict[Node, Any]: """ Fetch ``initial_env`` for execution. This is because ``ShapeProp`` has already attached outputs of each ``Node`` to its ``MetaInfo``. Args: device (torch.device): The device to place the execution, default to ``None`` Returns: Dict[Node, Any]: The initial environment for execution """ initial_env = {} for n in self.module.graph.nodes: initial_env[n] = _denormalize_tuple(MetaInfo(n).outputs) return initial_env def propagate(self, *args, device=None): """ Run `module` via interpretation and profile the execution of each ``Node``. Args: *args (Tensor): The sample input, not used device (torch.device): The device to place the execution, default to ``None`` Returns: Any: The value returned from executing the Module """ initial_env = self.fetch_initial_env(device) return self.run(initial_env=initial_env) def summary(self) -> str: """ Summarizes the profiled statistics of the `GraphModule` in tabular format. Note that this API requires the ``tabulate`` module to be installed. Returns: str: The summary of the profiled statistics """ # https://github.com/pytorch/pytorch/blob/master/torch/fx/graph.py try: from tabulate import tabulate except ImportError: print( "`summary` relies on the library `tabulate`, " "which could not be found on this machine. Run `pip " "install tabulate` to install the library." ) # Build up a list of summary information for each node node_summaries: List[List[Any]] = [] last_n_info = None for node in self.module.graph.nodes: node: Node n_info = MetaInfo(node) last_n_info = last_n_info or n_info node_summaries.append( [ node.op, str(node), _format_memory(n_info.accumulate_size), _format_memory(n_info.accumulate_size - last_n_info.accumulate_size), _format_memory(n_info.output_size), _format_memory(n_info.temp_size), _format_memory(n_info.param_size), _format_memory(n_info.backward_size), _format_flops(n_info.fwd_flop), _format_flops(n_info.bwd_flop), ] ) last_n_info = n_info # Use the ``tabulate`` library to create a well-formatted table # presenting our summary information headers: List[str] = [ "Op type", "Op", "Accumulate size", "Incremental size", "Output size", "Temp size", "Param size", "Backward size", "Fwd FLOPs", "Bwd FLOPs", ] return tabulate(node_summaries, headers=headers, stralign="right") class CommunicationProfiler(GraphProfiler): """ TODO(lyl): Add this for all comm nodes """ def __init__(self, module: GraphModule, garbage_collect_values: bool = True): raise NotImplementedError() class FlopProfiler(GraphProfiler): """ Execute an FX graph Node-by-Node and record the meta data of the result into the corresponding node. Usage: >>> model = MyModule() >>> x = torch.rand(10, 10) >>> gm = colossalai.fx.symbolic_trace(model, meta_args = {'x': x}}) >>> shape_interp = ShapeProp(gm) # must do this first >>> shape_interp.propagate(x) >>> profiler = FlopProfiler(gm) >>> profiler.propagate(x) Args: module (GraphModule): The module to be executed Hints: If you want to add a new flop count rule, you can first check the existing files in ``../_subclasses/flop_tensor.py``. If your flop count rules are incompatible with the existing ones, you can do so by adding a new method to this class with the ``@register_flop_count_impl`` decorator. The method should take (*args, **kwargs) instance as its input and generate flop count for both forward and backward as its output. For example, if you want to add a flop count rule for ``my_fn``, which is a hand-written operand not detected by PyTorch, you can do so by adding a new method to this class with the ``@register_flop_count_impl`` decorator: >>> @register_flop_count_impl(my_fn) >>> def my_fn_flop_count_impl(*args, **kwargs): >>> return 0, 0 """ _custom_flop_count_impl = {} def run_node(self, n: torch.fx.Node) -> Any: """ Run a specific node ``n`` and profile its execution time and memory usage. Calls into call_function, call_method, and call_module only. Args: n (Node): The Node to profile Returns: Any: The output of the node Raises: RuntimeError: If the node is not profileable. """ args, kwargs = self.fetch_args_kwargs_from_env(n) n_info = MetaInfo(n) if n.op in self._profileable: try: ( n_info.fwd_flop, n_info.bwd_flop, ) = getattr( self, n.op )(n.target, args, kwargs) except Exception as e: raise RuntimeError( f"Error {str(e)} occurred when profiling node {n}, node.target = {n.target}. " f"Please refer to function's docstring to register the relevant profile_impl for this node!" ) from e # retain the autograd graph for param in self.module.parameters(): param.grad = None return _denormalize_tuple(n_info.outputs) def call_function(self, target: "Target", args: Tuple[Argument, ...], kwargs: Dict[str, Any]) -> Any: """ Execute a ``call_function`` node and return the profiling result. Dispatch to ``_custom_flop_count_impl`` if ``call_function`` should be profiled in a user-defined behavior. Args: target (Target): The call target for this node. See `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for details on semantics args (Tuple): Tuple of positional args for this invocation kwargs (Dict): Dict of keyword arguments for this invocation Return flop_count (Tuple[int]): (fwd_flop, bwd_flop) """ assert not isinstance(target, str) # Dispatch the impl for profiling, default will be ``flop_count`` if target in self._custom_flop_count_impl: return self._custom_flop_count_impl[target](*args, **kwargs) else: return flop_count(target, *args, **kwargs) def call_method(self, target: "Target", args: Tuple[Argument, ...], kwargs: Dict[str, Any]) -> Any: """ Execute a ``call_method`` node and return the profiling result. Args: target (Target): The call target for this node. See `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for details on semantics args (Tuple): Tuple of positional args for this invocation kwargs (Dict): Dict of keyword arguments for this invocation Return flop_count (Tuple[int]): (fwd_flop, bwd_flop) """ # Execute the method and return the result assert isinstance(target, str) return flop_count(getattr(torch.Tensor, target), *args, **kwargs) def call_module(self, target: "Target", args: Tuple[Argument, ...], kwargs: Dict[str, Any]) -> Any: """ Execute a ``call_module`` node and return the profiling result. Args: target (Target): The call target for this node. See `Node <https://pytorch.org/docs/master/fx.html#torch.fx.Node>`__ for details on semantics args (Tuple): Tuple of positional args for this invocation kwargs (Dict): Dict of keyword arguments for this invocation Return flop_count (Tuple[int]): (fwd_flop, bwd_flop) """ # Retrieve executed args and kwargs values from the environment # Execute the method and return the result assert isinstance(target, str) submod = self.fetch_attr(target) return flop_count(submod, *args, **kwargs) def graph_profile_pass(module: GraphModule, *args, verbose=False) -> GraphModule: """ Run ``module`` via interpretation and profile the execution of each ``Node``. Args: module (GraphModule): The GraphModule to profile *args (Any): The sample input, not used verbose (bool): Whether to print the profiling summary Returns: GraphModule: The same GraphModule with profiling information """ for profiler_cls in ( FlopProfiler, # CommunicationProfiler, # TODO: add communication profiling ): profiler = profiler_cls(module) profiler.propagate(*args, device=_current_device(module)) if verbose: print(profiler.summary()) return module
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/device/device_mesh.py
colossalai/device/device_mesh.py
"""This code is adapted from Alpa https://github.com/alpa-projects/alpa/ with some changes. """ import operator from dataclasses import dataclass from functools import reduce from typing import Dict, List, Union import torch import torch.distributed as dist from torch.distributed import ProcessGroup @dataclass class ProcessGroupContainer: process_group: ProcessGroup ranks: List[int] # modified from alpa LogicalDeviceMesh(https://github.com/alpa-projects/alpa/blob/main/alpa/shard_parallel/auto_sharding.py) class DeviceMesh: """A logical view of a physical cluster. For example, we could view a physical cluster with 16 devices as a device mesh with shape (2, 2, 4) or (4, 4). Arguments: physical_mesh_id (torch.Tensor): physical view of the devices in global rank. logical_mesh_id (torch.Tensor): logical view of the devices in global rank. mesh_shape (torch.Size, optional): shape of logical view. mesh_alpha (List[float], optional): coefficients used for computing communication cost (default: None) mesh_beta (List[float], optional): coefficients used for computing communication cost (default: None) init_process_group (bool, optional): initialize logical process group during initializing the DeviceMesh instance if the init_process_group set to True. Otherwise, users need to call create_process_groups_for_logical_mesh manually to init logical process group. (default: False) device (str): the device for the process groups used by the DeviceMesh instance. (default: 'cuda') """ _DIST_BACKEND = {"cuda": "nccl", "cpu": "gloo", "npu": "hccl"} def __init__( self, physical_mesh_id: torch.Tensor, mesh_shape: torch.Size = None, logical_mesh_id: torch.Tensor = None, mesh_alpha: List[float] = None, mesh_beta: List[float] = None, init_process_group: bool = False, device: str = "cuda", ): # ============================ # Physical & Logical Mesh IDs # ============================ self._physical_mesh_id = physical_mesh_id assert physical_mesh_id.dim() == 1, "physical_mesh_id should be a 1D tensor." # logical mesh ids can be obtained via two ways # 1. provide physical mesh id and provide mesh shape # 2. directly supply the logical mesh id assert mesh_shape is None or logical_mesh_id is None, ( "Only one of mesh_shape and logical_mesh_id can be specified." "Logical mesh IDs are obtained from either mesh_shape + physical_mesh_id or directly from the user-supplied logical_mesh_id" ) if logical_mesh_id is None: self._mesh_shape = mesh_shape self._logical_mesh_id = self._physical_mesh_id.reshape(self._mesh_shape) else: self._logical_mesh_id = logical_mesh_id self._mesh_shape = self._logical_mesh_id.shape # ensure two things: # 1. logical and physical mesh IDs should contain the same elements # 2. there is no duplicate IDs in each mesh, e.g. [2, 2] is not allowed assert torch.equal( torch.unique(self._physical_mesh_id), torch.unique(self.logical_mesh_id) ), "physical and logical mesh IDs should contain the same elements, please check if you have consistent physical_mesh_id and logical_mesh_id." assert ( torch.unique(self._physical_mesh_id).numel() == self._physical_mesh_id.numel() ), "Found duplicate IDs in the physical_mesh_id and this is not allowed, please check your physical_mesh_id again." assert ( torch.unique(self.logical_mesh_id).numel() == self.logical_mesh_id.numel() ), "Found duplicate IDs in the logical_mesh_id and this is not allowed, please check your logical_mesh_id again." # =============================================== # coefficient for alpha-beta communication model # alpha is latency and beta is bandwidth # =============================================== # if the values are not provided, we assume they are 1 for simplicity if mesh_alpha is None: mesh_alpha = [1] * len(self._mesh_shape) if mesh_beta is None: mesh_beta = [1] * len(self._mesh_shape) self.mesh_alpha = tuple(mesh_alpha) self.mesh_beta = tuple(mesh_beta) # ensure the alpha and beta have the same shape assert len(self.mesh_alpha) == len( self.mesh_beta ), "mesh_alpha and mesh_beta should have the same length, please check your mesh_alpha and mesh_beta again." # ========================= # Device for Process Group # ========================= self._device = device self._dist_backend = self._DIST_BACKEND[device] # ========================= # Process Group Management # ========================= # the _global_to_local_rank_mapping is structured as follows # { # <global-rank>: [ <local-rank-on-axis-0>, <local-rank-on-axis-1>, <local-rank-on-axis-2>, ...] # } self._global_to_local_rank_mapping = dict() self._init_global_to_logical_rank_mapping( mapping=self._global_to_local_rank_mapping, tensor=self.logical_mesh_id ) # create process group self._process_group_dict = {} self._ranks_in_the_process_group = {} self._global_rank_of_current_process = None self._is_initialized = False # attribute used to indicate whether this object # is created using DeviceMesh.from_process_group # this attribute can be used to do some check in methods # such get_process_group as no global rank information # is known if created with from_process_group self._is_init_from_process_group = False # initialize process group if specified self._init_ranks_in_the_same_group() self._init_process_group = init_process_group if init_process_group: self.init_logical_process_group() @property def shape(self) -> torch.Size: """ Return the shape of the logical mesh. """ return self._mesh_shape @property def num_devices(self) -> int: """ Return the number of devices contained in the device mesh. """ return reduce(operator.mul, self._physical_mesh_id.shape, 1) @property def logical_mesh_id(self) -> torch.Tensor: """ Return the logical mesh id. """ return self._logical_mesh_id @property def is_initialized(self) -> bool: """ Return whether the process group is initialized. """ return self._is_initialized @staticmethod def from_process_group(process_group: Union[ProcessGroup, List[ProcessGroup]]) -> "DeviceMesh": """ Create a DeviceMesh instance from the current process group. Please note that the DeviceMesh object created with this method will not have information about the physical mesh id, and thus will not be able to query for other ranks and perform alpha-beta communication. Args: process_group (Union[ProcessGroup, List[ProcessGroup]]): the process group or a list of process groups for the device mesh. If the input is a ProcessGroup object, a 1D DeviceMesh object will be created. If the input is a list of ProcessGroup objects, the ProcessGroup at the ith index will correspond to the process group in the ith axis of the device mesh. Returns: DeviceMesh: the device mesh instance. """ def _get_device_by_backend(process_group): """ Get the device type given a process group's backend. """ backend = dist.get_backend(process_group) for _device, _backend in DeviceMesh._DIST_BACKEND.items(): if _backend == backend: return _device return None if isinstance(process_group, ProcessGroup): process_group = [process_group] # get mesh shape mesh_shape = [dist.get_world_size(pg) for pg in process_group] # get device device_list = [_get_device_by_backend(pg) for pg in process_group] # make sure all devices are the same assert all( [device == device_list[0] for device in device_list] ), "All devices should be the same, please check your input process groups are created with the same distributed backend." # create a fake physical mesh id # as we only get the process group associated with the current process, # we cannot get the global ranks for all processes in the mesh # therefore, we only use this fake physical mesh id to create the device mesh # and will remove this fake physical mesh id later fake_physical_mesh_id = torch.arange(reduce(operator.mul, mesh_shape, 1)) # create the device mesh device_mesh = DeviceMesh(physical_mesh_id=fake_physical_mesh_id, mesh_shape=mesh_shape, device=device_list[0]) # hack the device attribute device_mesh._physical_mesh_id = None device_mesh._logical_mesh_id = None device_mesh._global_rank_of_current_process = dist.get_rank() device_mesh._is_initialized = False device_mesh._process_group_dict = { device_mesh._global_rank_of_current_process: {axis: pg for axis, pg in enumerate(process_group)} } return device_mesh def get_process_group(self, axis: int, global_rank: int = None) -> ProcessGroup: """ Return the process group on the specified axis. Args: axis (int): the axis of the process group. global_rank (int, optional): the global rank of the process group. If not specified, the current process is used. (default: None) """ if global_rank is None: global_rank = self._global_rank_of_current_process elif self._is_init_from_process_group: raise RuntimeError( "The logical device mesh is create with DeviceMesh.from_process_group, this method is not supported for this creation method as no global rank information is known." ) return self._process_group_dict[global_rank][axis] def get_process_group_for_all_axes(self, global_rank: int = None) -> Dict[int, ProcessGroup]: """ Return the process groups for all axes. Args: global_rank (int, optional): the global rank of the process """ if global_rank is None: global_rank = self._global_rank_of_current_process elif self._is_init_from_process_group: raise RuntimeError( "The logical device mesh is create with DeviceMesh.from_process_group, this method is not supported for this creation method as no global rank information is known." ) return self._process_group_dict[global_rank] def get_ranks_in_process_group(self, axis: int, global_rank: int = None) -> List[int]: """ Return the ranks in the process group on the specified axis. Args: axis (int): the axis of the process group. global_rank (int, optional): the global rank of the process """ if global_rank is None: global_rank = self._global_rank_of_current_process elif self._is_init_from_process_group: raise RuntimeError( "The logical device mesh is create with DeviceMesh.from_process_group, this method is not supported for this creation method as no global rank information is known." ) return self._ranks_in_the_process_group[global_rank][axis] def __deepcopy__(self, memo) -> "DeviceMesh": cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): if k != "_process_group_dict": setattr(result, k, __import__("copy").deepcopy(v, memo)) else: # process group cannot be copied # thus, we share them directly setattr(result, k, v) return result def _init_global_to_logical_rank_mapping( self, mapping: Dict, tensor: torch.Tensor, index_list: List[int] = [] ) -> Dict[int, List[int]]: """ Build a global rank to local rank mapping for each process group in different axis in the logical device mesh. Args: mapping (Dict): a dictionary that maps the global rank to the local rank in the logical device mesh. tensor (torch.Tensor): the tensor that contains the logical mesh ids. index_list (List[int]) Returns: mapping (Dict): a dictionary that maps the global rank to the local rank in the logical device mesh. The value is a list of integers and each integer represents the local rank in the indexed axis. """ for index, inner_tensor in enumerate(tensor): # index means the local rank in the current axis # inner_tensor refers to the processes with the same local rank if inner_tensor.dim() == 0: # if the inner_tensor already reaches the last axis, # we append its local_rank in the last axis to the index_list # and assign to the mapping # the value of the mapping is the the local rank at the indexed axis of the device mesh mapping[int(inner_tensor)] = index_list + [index] else: # we recursively go into the function until we reach the last axis # meanwhile, we should add the local rank in the current axis in the index_list self._init_global_to_logical_rank_mapping(mapping, inner_tensor, index_list + [index]) def init_logical_process_group(self): """ This method is used to initialize the logical process groups which will be used in communications among logical device mesh. Note: if init_process_group set to False, you have to call this method manually. Otherwise, the communication related function, such as ShapeConsistencyManager.apply will raise errors. """ # sanity check assert ( dist.is_initialized ), "The torch.distributed should be initialized before calling init_logical_process_group" assert ( not self._is_initialized ), "The logical process group has been initialized, do not call init_logical_process_group twice" # update the global rank of the current process self._global_rank_of_current_process = dist.get_rank() duplicate_check_list = [] # flatten the global ranks to 1D list global_rank_flatten_list = self._physical_mesh_id.view(-1).tolist() for global_rank in global_rank_flatten_list: # find the other ranks which are in the same process group as global_rank ranks_in_same_group_by_axis = self._collate_global_ranks_in_same_process_group(global_rank) for axis, ranks_in_same_group in ranks_in_same_group_by_axis.items(): # skip duplicated process group creation if ranks_in_same_group in duplicate_check_list: continue # create the process group pg_handler = dist.new_group(ranks=ranks_in_same_group, backend=self._dist_backend) # keep this process group in the process_groups_dict for rank in ranks_in_same_group: if rank not in self._process_group_dict: self._process_group_dict[rank] = dict() self._process_group_dict[rank][axis] = pg_handler # update the init flag # we only allow init for once self._is_initialized = True def _init_ranks_in_the_same_group(self): """ This method is used to initialize the ranks_in_the_same_group dictionary. """ # flatten the global ranks to 1D list global_rank_flatten_list = self._physical_mesh_id.view(-1).tolist() for global_rank in global_rank_flatten_list: # find the other ranks which are in the same process group as global_rank ranks_in_same_group_by_axis = self._collate_global_ranks_in_same_process_group(global_rank) for axis, ranks_in_same_group in ranks_in_same_group_by_axis.items(): # create dict for each rank if global_rank not in self._process_group_dict: self._ranks_in_the_process_group[global_rank] = dict() # keep this process group in the process_groups_dict self._ranks_in_the_process_group[global_rank][axis] = ranks_in_same_group def global_rank_to_local_rank(self, rank: int, axis: int = None) -> Union[List[int], int]: """ Return the local rank of the given global rank in the logical device mesh. Args: rank (int): the global rank in the logical device mesh. axis (int): the axis of the logical device mesh. """ if self._is_init_from_process_group: raise RuntimeError( "The logical device mesh is create with DeviceMesh.from_process_group, this method is not supported for this creation method as no global rank information is known." ) local_ranks = self._global_to_local_rank_mapping[rank] if axis: return local_ranks[axis] else: return local_ranks def _collate_global_ranks_in_same_process_group(self, global_rank): """ Give a global rank and return all global ranks involved in its associated process group in each axis. Example: ```python physical_mesh_id = torch.arange(0, 16) mesh_shape = (4, 4) # logical mesh will look like # [[0, 1, 2, 3], # [4, 5, 6, 7], # [8, 9, 10,11], # [12,13,14,15]] device_mesh = DeviceMesh(physical_mesh_id, mesh_shape) print(device_mesh.collate_global_ranks_in_same_process_group(0)) # key is axis name # value is a list of global ranks in same axis with rank 0 # output will look like # { 0: [0, 4, 8, 12], 1: [0, 1, 2, 3] # } """ # We have init the global rank to local rank by calling _init_global_to_logical_rank_mapping # for self._global_to_local_rank_mapping # the key is the global rank # the value is the list of local ranks corresponding to the global rank with respect of different axes # we can see the list of local ranks as the process coordinates for simplicity # the key and value are all unique, therefore, # we can also to use the coordinates to find the global rank # ========================================================================= # Step 1 # find all the process_coordinates for processes in the same process group # as the given global rank # ========================================================================= # each processes_in_the_same_process_group = {} for dim in range(self.logical_mesh_id.dim()): # iterate over the dimension size so that we can include all processes # in the same process group in the given axis # the _local_rank refers to the local rank of the current process for _local_rank in range(self.logical_mesh_id.shape[dim]): # if this dimension is not initialized yet, # initialize it with an empty array if dim not in processes_in_the_same_process_group: processes_in_the_same_process_group[dim] = [] # get the local rank corresponding to the global rank process_coordinates = self._global_to_local_rank_mapping[global_rank].copy() # replace the local rank in the given dimension with the # local rank of the current process iterated process_coordinates[dim] = _local_rank processes_in_the_same_process_group[dim].append(process_coordinates) # ================================================================= # Step 2 # Use local rank combination to find its corresponding global rank # ================================================================= # the key of the dict is the axis # the value is the list of global ranks which are in the same process group as the given global rank global_pg_ranks = {} for dim, coordinates_of_all_processes in processes_in_the_same_process_group.items(): global_pg_ranks[dim] = [] for process_coordinates in coordinates_of_all_processes: # find the global rank by local rank combination for _global_rank, _process_coordinates in self._global_to_local_rank_mapping.items(): if process_coordinates == _process_coordinates: global_pg_ranks[dim].append(_global_rank) return global_pg_ranks def flatten(self): """ Flatten the logical mesh into an effective 1d logical mesh, """ if self._is_init_from_process_group: raise RuntimeError( "The logical device mesh is create with DeviceMesh.from_process_group, this method is not supported for this creation method as no global rank information is known." ) flatten_mesh_shape_size = len(self._mesh_shape) flatten_mesh_shape = [self.num_devices] return DeviceMesh( self._physical_mesh_id, tuple(flatten_mesh_shape), mesh_alpha=[max(self.mesh_alpha)] * (flatten_mesh_shape_size - 1), mesh_beta=[max(self.mesh_beta)] * (flatten_mesh_shape_size - 1), init_process_group=self._init_process_group, ) def all_gather_cost(self, num_bytes, mesh_dim): num_devices = self.logical_mesh_id.shape[mesh_dim] return self.mesh_alpha[mesh_dim] + self.mesh_beta[mesh_dim] * (num_devices - 1) / num_devices * num_bytes + 0.1 def all_reduce_cost(self, num_bytes, mesh_dim): num_devices = self.logical_mesh_id.shape[mesh_dim] return ( self.mesh_alpha[mesh_dim] + self.mesh_beta[mesh_dim] * 2 * (num_devices - 1) / num_devices * num_bytes + 0.01 ) def reduce_scatter_cost(self, num_bytes, mesh_dim): num_devices = self.logical_mesh_id.shape[mesh_dim] return ( self.mesh_alpha[mesh_dim] + self.mesh_beta[mesh_dim] * (num_devices - 1) / num_devices * num_bytes + 0.001 ) def all_to_all_cost(self, num_bytes, mesh_dim): num_devices = self.logical_mesh_id.shape[mesh_dim] penalty_factor = num_devices / 2.0 return ( self.mesh_alpha[mesh_dim] + self.mesh_beta[mesh_dim] * (num_devices - 1) / num_devices / num_devices * num_bytes * penalty_factor + 0.001 )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/device/__init__.py
colossalai/device/__init__.py
from .alpha_beta_profiler import AlphaBetaProfiler from .calc_pipeline_strategy import alpa_dp __all__ = ["AlphaBetaProfiler", "alpa_dp"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/device/alpha_beta_profiler.py
colossalai/device/alpha_beta_profiler.py
import math import time from typing import Dict, List, Tuple import torch import torch.distributed as dist from colossalai.logging import get_dist_logger GB = int((1 << 30)) BYTE = 4 FRAMEWORK_LATENCY = 0 class AlphaBetaProfiler: """ Profile alpha and beta value for a given device list. Usage: # Note: the environment of execution is supposed to be # multi-process with multi-gpu in mpi style. >>> physical_devices = [0, 1, 4, 5] >>> ab_profiler = AlphaBetaProfiler(physical_devices) >>> ab_dict = profiler.alpha_beta_dict >>> print(ab_dict) {(0, 1): (1.9641406834125518e-05, 4.74049549614719e-12), (0, 4): (1.9506998360157013e-05, 6.97421973297474e-11), (0, 5): (2.293858677148819e-05, 7.129930361393644e-11), (1, 4): (1.9010603427886962e-05, 7.077968863788975e-11), (1, 5): (1.9807778298854827e-05, 6.928845708992215e-11), (4, 5): (1.8681809306144713e-05, 4.7522367291330524e-12), (1, 0): (1.9641406834125518e-05, 4.74049549614719e-12), (4, 0): (1.9506998360157013e-05, 6.97421973297474e-11), (5, 0): (2.293858677148819e-05, 7.129930361393644e-11), (4, 1): (1.9010603427886962e-05, 7.077968863788975e-11), (5, 1): (1.9807778298854827e-05, 6.928845708992215e-11), (5, 4): (1.8681809306144713e-05, 4.7522367291330524e-12)} """ def __init__( self, physical_devices: List[int], alpha_beta_dict: Dict[Tuple[int, int], Tuple[float, float]] = None, ctype: str = "a", warmup: int = 5, repeat: int = 25, latency_iters: int = 5, homogeneous_tolerance: float = 0.1, ): """ Args: physical_devices: A list of device id, each element inside it is the global rank of that device. alpha_beta_dict: A dict which maps a process group to alpha-beta value pairs. ctype: 'a' for all-reduce, 'b' for broadcast. warmup: Number of warmup iterations. repeat: Number of iterations to measure. latency_iters: Number of iterations to measure latency. """ self.physical_devices = physical_devices self.ctype = ctype self.world_size = len(physical_devices) self.warmup = warmup self.repeat = repeat self.latency_iters = latency_iters self.homogeneous_tolerance = homogeneous_tolerance self.process_group_dict = None self._init_profiling() if alpha_beta_dict is None: self.alpha_beta_dict = self.profile_ab() else: self.alpha_beta_dict = alpha_beta_dict def _init_profiling(self): # Create process group list based on its global rank process_group_list = [] for f_index in range(self.world_size - 1): for b_index in range(f_index + 1, self.world_size): process_group_list.append((self.physical_devices[f_index], self.physical_devices[b_index])) # Create process group dict which maps process group to its handler process_group_dict = {} for process_group in process_group_list: pg_handler = dist.new_group(process_group) process_group_dict[process_group] = pg_handler self.process_group_dict = process_group_dict def _profile(self, process_group, pg_handler, nbytes): logger = get_dist_logger() rank = dist.get_rank() src_device_num = process_group[0] world_size = len(process_group) device = torch.cuda.current_device() buf = torch.randn(nbytes // 4).to(device) torch.cuda.synchronize() # warmup for _ in range(self.warmup): if self.ctype == "a": dist.all_reduce(buf, op=dist.ReduceOp.SUM, group=pg_handler) elif self.ctype == "b": dist.broadcast(buf, src=src_device_num, group=pg_handler) torch.cuda.synchronize() dist.barrier(group=pg_handler) begin = time.perf_counter() for _ in range(self.repeat): if self.ctype == "a": dist.all_reduce(buf, op=dist.ReduceOp.SUM, group=pg_handler) elif self.ctype == "b": dist.broadcast(buf, src=src_device_num, group=pg_handler) torch.cuda.synchronize() end = time.perf_counter() dist.barrier(group=pg_handler) if rank == src_device_num: avg_time_s = (end - begin) / self.repeat - FRAMEWORK_LATENCY alg_band = nbytes / avg_time_s if self.ctype == "a": # convert the bandwidth of all-reduce algorithm to the bandwidth of the hardware. bus_band = 2 * (world_size - 1) / world_size * alg_band bus_band = alg_band elif self.ctype == "b": bus_band = alg_band logger.info( f"GPU:{rank}, Bytes: {nbytes} B,Time: {round(avg_time_s * 1e6,2)} us, Bus bandwidth: {round(bus_band / GB,2)} GB/s" ) return (avg_time_s, alg_band) else: # Just a placeholder return (None, None) def profile_latency(self, process_group, pg_handler): """ This function is used to profile the latency of the given process group with a series of bytes. Args: process_group: A tuple of global rank of the process group. pg_handler: The handler of the process group. Returns: latency: None if the latency is not measured, otherwise the median of the latency_list. """ latency_list = [] for i in range(self.latency_iters): nbytes = int(BYTE << i) (t, _) = self._profile(process_group, pg_handler, nbytes) latency_list.append(t) if latency_list[0] is None: latency = None else: median_index = math.floor(self.latency_iters / 2) latency = latency_list[median_index] return latency def profile_bandwidth(self, process_group, pg_handler, maxbytes=(1 * GB)): """ This function is used to profile the bandwidth of the given process group. Args: process_group: A tuple of global rank of the process group. pg_handler: The handler of the process group. """ (_, bandwidth) = self._profile(process_group, pg_handler, maxbytes) return bandwidth def profile_ab(self): """ This method is used to profiling the alpha and beta value for a given device list. Returns: alpha_beta_dict: A dict which maps process group to its alpha and beta value. """ alpha_beta_dict: Dict[Tuple[int], Tuple[float]] = {} rank = dist.get_rank() dist.new_group(self.physical_devices) def get_max_nbytes(process_group: Tuple[int], pg_handler: dist.ProcessGroup): assert rank in process_group device = torch.cuda.current_device() rank_max_nbytes = torch.cuda.mem_get_info(device)[0] rank_max_nbytes = torch.tensor(rank_max_nbytes, device=device) dist.all_reduce(rank_max_nbytes, op=dist.ReduceOp.MIN, group=pg_handler) max_nbytes = min(int(1 * GB), int(GB << int(math.log2(rank_max_nbytes.item() / GB)))) return max_nbytes for process_group, pg_handler in self.process_group_dict.items(): if rank not in process_group: max_nbytes = None alpha = None bandwidth = None else: max_nbytes = get_max_nbytes(process_group, pg_handler) alpha = self.profile_latency(process_group, pg_handler) bandwidth = self.profile_bandwidth(process_group, pg_handler, maxbytes=max_nbytes) if bandwidth is None: beta = None else: beta = 1 / bandwidth broadcast_list = [alpha, beta] dist.broadcast_object_list(broadcast_list, src=process_group[0]) alpha_beta_dict[process_group] = tuple(broadcast_list) # add symmetry pair to the alpha_beta_dict symmetry_ab_dict = {} for process_group, alpha_beta_pair in alpha_beta_dict.items(): symmetry_process_group = (process_group[1], process_group[0]) symmetry_ab_dict[symmetry_process_group] = alpha_beta_pair alpha_beta_dict.update(symmetry_ab_dict) return alpha_beta_dict def search_best_logical_mesh(self): """ This method is used to search the best logical mesh for the given device list. The best logical mesh is searched in following steps: 1. detect homogeneous device groups, we assume that the devices in the alpha_beta_dict are homogeneous if the beta value is close enough. 2. Find the best homogeneous device group contains all the physical devices. The best homogeneous device group means the lowest beta value in the groups which contains all the physical devices. And the reason we require the group contains all the physical devices is that the devices not in the group will decrease the bandwidth of the group. 3. If the best homogeneous device group is found, we will construct the largest ring for each device based on the best homogeneous device group, and the best logical mesh will be the union of all the rings. Otherwise, the best logical mesh will be the balanced logical mesh, such as shape (2, 2) for 4 devices. Returns: best_logical_mesh: The best logical mesh for the given device list. Usage: >>> physical_devices = [0, 1, 2, 3] >>> ab_profiler = AlphaBetaProfiler(physical_devices) >>> best_logical_mesh = profiler.search_best_logical_mesh() >>> print(best_logical_mesh) [[0, 1], [2, 3]] """ def _power_of_two(integer): return integer & (integer - 1) == 0 def _detect_homogeneous_device(alpha_beta_dict): """ This function is used to detect whether the devices in the alpha_beta_dict are homogeneous. Note: we assume that the devices in the alpha_beta_dict are homogeneous if the beta value of the devices are in range of [(1 - self.homogeneous_tolerance), (1 + self.homogeneous_tolerance)] * base_beta. """ homogeneous_device_dict: Dict[float, List[Tuple[int]]] = {} for process_group, (_, beta) in alpha_beta_dict.items(): if homogeneous_device_dict is None: homogeneous_device_dict[beta] = [] homogeneous_device_dict[beta].append(process_group) match_beta = None for beta_value in homogeneous_device_dict.keys(): if beta <= beta_value * (1 + self.homogeneous_tolerance) and beta >= beta_value * ( 1 - self.homogeneous_tolerance ): match_beta = beta_value break if match_beta is not None: homogeneous_device_dict[match_beta].append(process_group) else: homogeneous_device_dict[beta] = [] homogeneous_device_dict[beta].append(process_group) return homogeneous_device_dict def _check_contain_all_devices(homogeneous_group: List[Tuple[int]]): """ This function is used to check whether the homogeneous_group contains all physical devices. """ flatten_mesh = [] for process_group in homogeneous_group: flatten_mesh.extend(process_group) non_duplicated_flatten_mesh = set(flatten_mesh) return len(non_duplicated_flatten_mesh) == len(self.physical_devices) def _construct_largest_ring(homogeneous_group: List[Tuple[int]]): """ This function is used to construct the largest ring in the homogeneous_group for each rank. """ # Construct the ring ring = [] ranks_in_ring = [] for rank in self.physical_devices: if rank in ranks_in_ring: continue stable_status = False ring_for_rank = [] ring_for_rank.append(rank) check_rank_list = [rank] rank_to_check_list = [] while not stable_status: stable_status = True check_rank_list.extend(rank_to_check_list) rank_to_check_list = [] for i in range(len(check_rank_list)): check_rank = check_rank_list.pop() for process_group in homogeneous_group: if check_rank in process_group: rank_to_append = ( process_group[0] if process_group[1] == check_rank else process_group[1] ) if rank_to_append not in ring_for_rank: stable_status = False rank_to_check_list.append(rank_to_append) ring_for_rank.append(rank_to_append) ring.append(ring_for_rank) ranks_in_ring.extend(ring_for_rank) return ring assert _power_of_two(self.world_size) power_of_two = int(math.log2(self.world_size)) median = power_of_two // 2 balanced_logical_mesh_shape = (2**median, 2 ** (power_of_two - median)) row_size, column_size = balanced_logical_mesh_shape[0], balanced_logical_mesh_shape[1] balanced_logical_mesh = [] for row_index in range(row_size): balanced_logical_mesh.append([]) for column_index in range(column_size): balanced_logical_mesh[row_index].append(self.physical_devices[row_index * column_size + column_index]) homogeneous_device_dict = _detect_homogeneous_device(self.alpha_beta_dict) beta_list = [b for b in homogeneous_device_dict.keys()] beta_list.sort() beta_list.reverse() homogeneous_types = len(beta_list) best_logical_mesh = None if homogeneous_types >= 2: for _ in range(homogeneous_types - 1): lowest_beta = beta_list.pop() best_homogeneous_group = homogeneous_device_dict[lowest_beta] # if the best homogeneous group contains all physical devices, # we will build the logical device mesh based on it. Otherwise, # we will check next level homogeneous group. if _check_contain_all_devices(best_homogeneous_group): # We choose the largest ring for each rank to maximum the best bus utilization. best_logical_mesh = _construct_largest_ring(best_homogeneous_group) break if homogeneous_types == 1 or best_logical_mesh is None: # in this case, we use balanced logical mesh as the best # logical mesh. best_logical_mesh = balanced_logical_mesh return best_logical_mesh def extract_alpha_beta_for_device_mesh(self): """ Extract the mesh_alpha list and mesh_beta list based on the best logical mesh, which will be used to initialize the device mesh. Usage: >>> physical_devices = [0, 1, 2, 3] >>> ab_profiler = AlphaBetaProfiler(physical_devices) >>> mesh_alpha, mesh_beta = profiler.extract_alpha_beta_for_device_mesh() >>> print(mesh_alpha) [2.5917552411556242e-05, 0.00010312341153621673] >>> print(mesh_beta) [5.875573704655635e-11, 4.7361584445959614e-12] """ best_logical_mesh = self.search_best_logical_mesh() first_axis = [row[0] for row in best_logical_mesh] second_axis = best_logical_mesh[0] # init process group for both axes first_axis_process_group = dist.new_group(first_axis) second_axis_process_group = dist.new_group(second_axis) # extract alpha and beta for both axes def _extract_alpha_beta(pg, pg_handler): latency = self.profile_latency(pg, pg_handler) bandwidth = self.profile_bandwidth(pg, pg_handler) broadcast_object = [latency, bandwidth] dist.broadcast_object_list(broadcast_object, src=pg[0]) return broadcast_object first_latency, first_bandwidth = _extract_alpha_beta(first_axis, first_axis_process_group) second_latency, second_bandwidth = _extract_alpha_beta(second_axis, second_axis_process_group) mesh_alpha = [first_latency, second_latency] # The beta values have been enlarged by 1e10 times temporarily because the computation cost # is still estimated in the unit of TFLOPs instead of time. We will remove this factor in future. mesh_beta = [1e10 / first_bandwidth, 1e10 / second_bandwidth] return mesh_alpha, mesh_beta
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/device/calc_pipeline_strategy.py
colossalai/device/calc_pipeline_strategy.py
from math import pow import numpy as np def get_submesh_choices(num_hosts, num_devices_per_host, mode="new"): submesh_choices = [] i = 1 p = -1 while i <= num_devices_per_host: i *= 2 p += 1 assert pow(2, p) == num_devices_per_host, ( "Only supports the cases where num_devices_per_host is power of two, " f"while now num_devices_per_host = {num_devices_per_host}" ) if mode == "alpa": for i in range(p + 1): submesh_choices.append((1, pow(2, i))) for i in range(2, num_hosts + 1): submesh_choices.append((i, num_devices_per_host)) elif mode == "new": for i in range(p // 2 + 1): for j in range(i, p - i + 1): submesh_choices.append((pow(2, i), pow(2, j))) return submesh_choices def alpa_dp_impl( num_layers, num_devices, num_microbatches, submesh_choices, compute_cost, max_stage_cost, best_configs ): """Implementation of Alpa DP for pipeline strategy Paper reference: https://www.usenix.org/system/files/osdi22-zheng-lianmin.pdf Arguments: num_layers: K num_devices: N*M num_microbatches: B submesh_choices: List[(n_i,m_i)] compute_cost: t_intra """ # For f, layer ID start from 0 # f[#pipeline stages, layer id that is currently being considered, number of devices used] f = np.full((num_layers + 1, num_layers + 1, num_devices + 1), np.inf, dtype=np.float32) f_stage_max = np.full((num_layers + 1, num_layers + 1, num_devices + 1), 0.0, dtype=np.float32) f_argmin = np.full((num_layers + 1, num_layers + 1, num_devices + 1, 3), -1, dtype=np.int32) f[0, num_layers, 0] = 0 for s in range(1, num_layers + 1): for k in range(num_layers - 1, -1, -1): for d in range(1, num_devices + 1): for m, submesh in enumerate(submesh_choices): n_submesh_devices = np.prod(np.array(submesh)) if n_submesh_devices <= d: # TODO: [luzgh]: Why alpa needs max_n_succ_stages? Delete. # if s - 1 <= max_n_succ_stages[i, k - 1, m, n_config]: # ... for i in range(num_layers, k, -1): stage_cost = compute_cost[k, i, m] new_cost = f[s - 1, k, d - n_submesh_devices] + stage_cost if stage_cost <= max_stage_cost and new_cost < f[s, k, d]: f[s, k, d] = new_cost f_stage_max[s, k, d] = max(stage_cost, f_stage_max[s - 1, i, d - n_submesh_devices]) f_argmin[s, k, d] = (i, m, best_configs[k, i, m]) best_s = -1 best_total_cost = np.inf for s in range(1, num_layers + 1): if f[s, 0, num_devices] < best_total_cost: best_s = s best_total_cost = f[s, 0, num_devices] if np.isinf(best_total_cost): return np.inf, None total_cost = f[best_s, 0, num_devices] + (num_microbatches - 1) * f_stage_max[best_s, 0, num_devices] current_s = best_s current_layer = 0 current_devices = num_devices res = [] while current_s > 0 and current_layer < num_layers and current_devices > 0: next_start_layer, submesh_choice, autosharding_choice = f_argmin[current_s, current_layer, current_devices] assert next_start_layer != -1 and current_devices != -1 res.append(((current_layer, next_start_layer), submesh_choice, autosharding_choice)) current_s -= 1 current_layer = next_start_layer current_devices -= np.prod(np.array(submesh_choices[submesh_choice])) assert current_s == 0 and current_layer == num_layers and current_devices == 0 return total_cost, res def alpa_dp( num_layers, num_devices, num_microbatches, submesh_choices, num_autosharding_configs, compute_cost, gap=1e-6 ): """Alpa auto stage dynamic programming. Code reference: https://github.com/alpa-projects/alpa/blob/main/alpa/pipeline_parallel/stage_construction.py Arguments: submesh_choices: List[(int,int)] num_autosharding_configs: Max number of t_intra(start_layer, end_layer, LogicalMesh) compute_cost: np.array(num_layers,num_layers,num_submesh_choices,num_autosharding_configs) """ assert np.shape(compute_cost) == ( num_layers, num_layers, len(submesh_choices), num_autosharding_configs, ), "Cost shape wrong." all_possible_stage_costs = np.sort(np.unique(compute_cost)) best_cost = np.inf best_solution = None last_max_stage_cost = 0.0 # TODO: [luzgh]: Why alpa needs the num_autosharding_configs dimension in compute_cost? # In dp_impl it seems the argmin n_config will be chosen. Just amin here. best_configs = np.argmin(compute_cost, axis=3) best_compute_cost = np.amin(compute_cost, axis=3) assert len(all_possible_stage_costs), "no solution in auto stage construction." for max_stage_cost in all_possible_stage_costs: if max_stage_cost * num_microbatches >= best_cost: break if max_stage_cost - last_max_stage_cost < gap: continue cost, solution = alpa_dp_impl( num_layers, num_devices, num_microbatches, submesh_choices, best_compute_cost, max_stage_cost, best_configs ) if cost < best_cost: best_cost = cost best_solution = solution last_max_stage_cost = max_stage_cost return best_cost, best_solution
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/quantization/bnb.py
colossalai/quantization/bnb.py
# adapted from Hugging Face accelerate/utils/bnb.py accelerate/utils/modeling.py import importlib.metadata import logging import torch import torch.nn as nn from packaging.version import Version from .bnb_config import BnbQuantizationConfig try: import bitsandbytes as bnb try: # in case lower version of bitsandbytes does not have __version__ attribute BNB_VERSION = Version(bnb.__version__) except AttributeError: BNB_VERSION = Version(importlib.metadata.version("bitsandbytes")) IS_4BIT_BNB_AVAILABLE = BNB_VERSION >= Version("0.39.0") IS_8BIT_BNB_AVAILABLE = BNB_VERSION >= Version("0.37.2") except ImportError: pass logger = logging.getLogger(__name__) def quantize_model( model: torch.nn.Module, bnb_quantization_config: BnbQuantizationConfig, ): """ This function will quantize the input loaded model with the associated config passed in `bnb_quantization_config`. We will quantize the model and put the model on the GPU. Args: model (`torch.nn.Module`): Input model. The model already loaded bnb_quantization_config (`BnbQuantizationConfig`): The bitsandbytes quantization parameters Returns: `torch.nn.Module`: The quantized model """ load_in_4bit = bnb_quantization_config.load_in_4bit load_in_8bit = bnb_quantization_config.load_in_8bit if load_in_8bit and not IS_8BIT_BNB_AVAILABLE: raise ImportError( "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," " make sure you have the latest version of `bitsandbytes` installed." ) if load_in_4bit and not IS_4BIT_BNB_AVAILABLE: raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," "make sure you have the latest version of `bitsandbytes` installed." ) # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: bnb_quantization_config.skip_modules = get_keys_to_not_convert(model) modules_to_not_convert = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fp32_modules is None: bnb_quantization_config.keep_in_fp32_modules = [] keep_in_fp32_modules = bnb_quantization_config.keep_in_fp32_modules # compatibility with peft model.is_loaded_in_4bit = load_in_4bit model.is_loaded_in_8bit = load_in_8bit # assert model_device is cuda model_device = next(model.parameters()).device model = replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert) # convert param to the right dtype dtype = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fp32 in name for module_to_keep_in_fp32 in keep_in_fp32_modules): param.to(torch.float32) if param.dtype != torch.float32: name = name.replace(".weight", "").replace(".bias", "") param = getattr(model, name, None) if param is not None: param.to(torch.float32) elif torch.is_floating_point(param): param.to(dtype) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device()) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device()) logger.info( f"The model device type is {model_device.type}. However, cuda is needed for quantization." "We move the model to cuda." ) else: raise RuntimeError("No GPU found. A GPU is needed for quantization.") return model def replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None): """ A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules or by `bnb.nn.Linear4bit` modules from the `bitsandbytes`library. The function will be run recursively and replace `torch.nn.Linear` modules. Parameters: model (`torch.nn.Module`): Input model or `torch.nn.Module` as the function is run recursively. modules_to_not_convert (`List[str]`): Names of the modules to not quantize convert. In practice we keep the `lm_head` in full precision for numerical stability reasons. current_key_name (`List[str]`, *optional*): An array to track the current key of the recursion. This is used to check whether the current key (part of it) is not in the list of modules to not convert. """ if modules_to_not_convert is None: modules_to_not_convert = [] model, has_been_replaced = _replace_with_bnb_layers( model, bnb_quantization_config, modules_to_not_convert, current_key_name ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def _replace_with_bnb_layers( model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None, ): """ Private method that wraps the recursion for module replacement. Returns the converted model and a boolean that indicates if the conversion has been successfull or not. """ # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily has_been_replaced = False for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if isinstance(module, nn.Linear) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` current_key_name_str = ".".join(current_key_name) proceed = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: proceed = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_8bit: bnb_module = bnb.nn.Linear8bitLt( module.in_features, module.out_features, module.bias is not None, has_fp16_weights=False, threshold=bnb_quantization_config.llm_int8_threshold, ) elif bnb_quantization_config.load_in_4bit: bnb_module = bnb.nn.Linear4bit( module.in_features, module.out_features, module.bias is not None, bnb_quantization_config.bnb_4bit_compute_dtype, compress_statistics=bnb_quantization_config.bnb_4bit_use_double_quant, quant_type=bnb_quantization_config.bnb_4bit_quant_type, ) else: raise ValueError("load_in_8bit and load_in_4bit can't be both False") bnb_module.weight.data = module.weight.data bnb_module.weight.skip_zero_check = True if module.bias is not None: bnb_module.bias.data = module.bias.data bnb_module.bias.skip_zero_check = True bnb_module.requires_grad_(False) setattr(model, name, bnb_module) has_been_replaced = True if len(list(module.children())) > 0: _, _has_been_replaced = _replace_with_bnb_layers( module, bnb_quantization_config, modules_to_not_convert, current_key_name ) has_been_replaced = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced def get_keys_to_not_convert(model): r""" An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in int8. Parameters: model (`torch.nn.Module`): Input model """ # Create a copy of the model # with init_empty_weights(): # tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model = model tied_params = find_tied_parameters(tied_model) # For compatibility with Accelerate < 0.18 if isinstance(tied_params, dict): tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys()) else: tied_keys = sum(tied_params, []) has_tied_params = len(tied_keys) > 0 # Check if it is a base model is_base_model = False if hasattr(model, "base_model_prefix"): is_base_model = not hasattr(model, model.base_model_prefix) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head list_modules = list(model.named_children()) list_last_module = [list_modules[-1][0]] # add last module together with tied weights intersection = set(list_last_module) - set(tied_keys) list_untouched = list(set(tied_keys)) + list(intersection) # remove ".weight" from the keys names_to_remove = [".weight", ".bias"] filtered_module_names = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: name = name.replace(name_to_remove, "") filtered_module_names.append(name) return filtered_module_names def find_tied_parameters(model: nn.Module, **kwargs): """ Find the tied parameters in a given model. <Tip warning={true}> The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore them. </Tip> Args: model (`torch.nn.Module`): The model to inspect. Returns: List[List[str]]: A list of lists of parameter names being all tied together. Example: ```py >>> from collections import OrderedDict >>> import torch.nn as nn >>> model = nn.Sequential(OrderedDict([("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(4, 4))])) >>> model.linear2.weight = model.linear1.weight >>> find_tied_parameters(model) [['linear1.weight', 'linear2.weight']] ``` """ # Initialize result and named_parameters before recursing. named_parameters = kwargs.get("named_parameters", None) prefix = kwargs.get("prefix", "") result = kwargs.get("result", {}) if named_parameters is None: named_parameters = {n: p for n, p in model.named_parameters()} else: # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters` # of the submodule it belongs to. So while recursing we track the names that are not in the initial # `named_parameters`. for name, parameter in model.named_parameters(): full_name = name if prefix == "" else f"{prefix}.{name}" if full_name not in named_parameters: # When we find one, it has to be one of the existing parameters. for new_name, new_param in named_parameters.items(): if new_param is parameter: if new_name not in result: result[new_name] = [] result[new_name].append(full_name) # Once we have treated direct parameters, we move to the child modules. for name, child in model.named_children(): child_name = name if prefix == "" else f"{prefix}.{name}" find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result) return FindTiedParametersResult([sorted([weight] + list(set(tied))) for weight, tied in result.items()]) class FindTiedParametersResult(list): """ This is a subclass of a list to handle backward compatibility for Transformers. Do not rely on the fact this is not a list or on the `values` method as in the future this will be removed. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def values(self): return sum([x[1:] for x in self], [])
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/quantization/utils.py
colossalai/quantization/utils.py
import torch import torch.distributed as dist from packaging import version from torch import Tensor from torch.distributed.fsdp._common_utils import _no_dispatch_record_stream from torch.distributed.utils import _p_assert def _all_gather_flat_param( self, padded_unsharded_flat_param: Tensor, ) -> Tensor: """ All-gather the handle's flat parameter to the destination ``padded_unsharded_flat_param``. Then switch to use the all-gathered tensor. """ _p_assert( hasattr(self, "process_group") and hasattr(self, "world_size"), "Expects a process group and world size to have been set via `shard()`", ) sharded_flat_param = self.flat_param.data expected_numel = sharded_flat_param.numel() * self.world_size _p_assert( padded_unsharded_flat_param.numel() == expected_numel, f"Expects {expected_numel} numel but got {padded_unsharded_flat_param.numel()}", ) pg = self._fake_process_group if self._use_fake_all_gather else self.process_group # HACK this should be handled by C10D if sharded_flat_param.is_cpu: # type: ignore[attr-defined] tensor_list = list(torch.chunk(padded_unsharded_flat_param, dist.get_world_size(pg))) work = dist.all_gather(tensor_list, sharded_flat_param, group=pg) else: if self._comm_hook is None: dist.all_gather_into_tensor( padded_unsharded_flat_param, sharded_flat_param, pg, ) else: self._comm_hook(None, padded_unsharded_flat_param, sharded_flat_param, pg) if self._offload_params: # In case of offloading, `flat_param.data` (i.e. sharded param) is # created on the pre-unshard stream. We need to hand it over to the # unshard stream for all-gather _no_dispatch_record_stream( sharded_flat_param, self._device_handle.current_stream(), # unshard_stream ) return padded_unsharded_flat_param def register_params_comm_hook(self, state: object, hook: callable): """Register a communication hook for FlatParamHandle. This is an enhancement that provides a flexible hook to users where they can specify how FSDP unshards parameters across multiple workers. .. warning :: FSDP communication hook should be registered before running an initial forward pass and only once. Args: state (object): Passed to the hook to maintain any state information during the training process. hook (Callable): Callable, which has one of the following signatures: 1) ``hook: Callable[torch.Tensor] -> None``: This function takes in a Python tensor, which represents the full, flattened, unsharded gradient with respect to all variables corresponding to the model this FSDP unit is wrapping (that are not wrapped by other FSDP sub-units). It then performs all necessary processing and returns ``None``; 2) ``hook: Callable[torch.Tensor, torch.Tensor] -> None``: This function takes in two Python tensors, the first one represents the full, flattened, unsharded gradient with respect to all variables corresponding to the model this FSDP unit is wrapping (that are not wrapped by other FSDP sub-units). The latter represents a pre-sized tensor to store a chunk of a sharded gradient after reduction. In both cases, callable performs all necessary processing and returns ``None``. Callables with signature 1 are expected to handle gradient communication for a `NO_SHARD` case. Callables with signature 2 are expected to handle gradient communication for sharded cases. """ if not self.check_is_root(): raise AssertionError("register_comm_hook can only be called on a root instance.") # if fsdp_state.sharding_strategy in HYBRID_SHARDING_STRATEGIES: # raise AssertionError( # f"Communication hook is not supported for hybrid strategies: {fsdp_state.sharding_strategy}" # ) if self._handle._comm_hook is not None: raise AssertionError("A communication hook is already registered") if not callable(hook): raise ValueError(f"The communication hook must be callable but got {hook}") self._handle._comm_hook = hook self._handle._comm_hook_state = state def patch_fsdp_params_comm_hook(): if version.parse(torch.__version__) >= version.parse("2.2.0"): from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp._flat_param import FlatParamHandle FlatParamHandle._comm_hook = None FlatParamHandle._comm_hook_state = None FlatParamHandle._all_gather_flat_param = _all_gather_flat_param FSDP.register_params_comm_hook = register_params_comm_hook else: raise RuntimeError("This fsdp_params_comm_hook patch is not supported while torch version under 2.2.0.")
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/quantization/fp8.py
colossalai/quantization/fp8.py
import os from typing import Any, Optional, Tuple import numpy as np import torch import torch.distributed as dist import torch.nn.functional as F from packaging.version import Version from torch.distributed import ReduceOp from .fp8_config import dynamic_kernel SUPPORT_TORCH_COMPILE = Version(torch.__version__) >= Version("2.4.0") SCALE_BYTES = 4 try: cuda_arch = int("".join(str(i) for i in torch.cuda.get_device_capability())) except: cuda_arch = 0 class Handle: def __init__(self, handles=[], remain_ops=None) -> None: self.handles = handles self.remain_ops = remain_ops def wait(self): for handle in self.handles: handle.wait() if self.remain_ops: self.remain_ops() def process_group_is_intranode(pg): if pg is None: from torch.distributed.distributed_c10d import _get_default_group pg = _get_default_group() local_world_size = None for var in ["LOCAL_WORLD_SIZE", "OMPI_COMM_WORLD_LOCAL_SIZE", "SLURM_TASKS_PER_NODE"]: if var in os.environ: local_world_size = int(os.environ["LOCAL_WORLD_SIZE"]) if local_world_size is None: local_world_size = torch.cuda.device_count() group_ranks = dist.get_process_group_ranks(pg) group_ranks_node_ids = [rank // local_world_size for rank in group_ranks] return min(group_ranks_node_ids) == max(group_ranks_node_ids) def cast_to_fp8( inp: torch.Tensor, fp8_format="e4m3", per_channel_scale=False, out=None ) -> Tuple[torch.Tensor, torch.Tensor]: r""" casting torch Tensor into specified fp8 tensor with per-channel scaling or per-tensor scaling. Args: inp: input torch Tensor, should be in torch.FloatTensor, torch.HalfTensor, torch.BFloat16Tensor. scale: scaling factor for fp8 casting. If it is None, then it is computed automatically. Per-channel scaling is applied if input tensor is 2 dimension, otherwise, per-tensor scaling is applied. fp8_format: e4m3 or e5m2 Returns: Tuples: A tuple (fp8_tensor, scale) """ if inp.dtype not in [torch.float32, torch.float16, torch.bfloat16]: raise TypeError("Only float16, bfloat16, and float32 are allowed.") fp8_type = torch.float8_e4m3fn if fp8_format == "e4m3" else torch.float8_e5m2 fp8_max = torch.finfo(fp8_type).max if inp.numel() == 0: return inp.to(fp8_type), torch.tensor([1.0], device=inp.device) else: if per_channel_scale: per_channel_max = inp.abs().max(dim=-1).values.float() per_channel_max = torch.where(per_channel_max > 0, per_channel_max, 1.0) scale = fp8_max / per_channel_max[:, None] scale_inv = per_channel_max / fp8_max else: per_tensor_max = inp.abs().max().float() per_tensor_max = torch.where(per_tensor_max > 0, per_tensor_max, 1.0) scale = fp8_max / per_tensor_max scale_inv = 1.0 / scale if out is not None: ret = torch.mul(scale, inp.float(), out=out) else: ret = (scale * inp.float()).to(fp8_type) return ret, torch.unsqueeze(scale_inv, dim=0) def cast_from_fp8( inp: torch.Tensor, scale_inv: torch.Tensor, ret_type: torch.dtype, per_channel_scale=False, out=None ) -> torch.Tensor: r""" Args: inp: should be a fp8 torch tensor in one of the types: [torch.float8_e4m3fn, torch.float8_e5m2]. scale: scaling factor returned by cast_to_fp8 function. ret_type: the datatype of the returned tensor. Returns: torch.Tensor """ if inp.dtype not in [torch.float8_e4m3fn, torch.float8_e5m2]: raise TypeError("Only float8_e4m3fn and float8_e5m2 are allowed.") if per_channel_scale: if out is not None: return torch.mul(scale_inv[:, None], inp.float(), out=out) else: ret = scale_inv[:, None] * inp.float() else: if out is not None: return torch.mul(scale_inv, inp.float(), out=out) else: ret = scale_inv * inp.float() return ret.to(ret_type) def _all_reduce_fp8( tensor: torch.Tensor, fp8_format="e4m3", op=ReduceOp.SUM, group=None, async_op: bool = False ) -> Optional[Handle]: r""" This is an in-place operation for compressed all_reduce using fp8. It works like dist.all_reduce but during communication the data is cast to fp8 format. Args: tensor: torch.Tensor in fp32, fp16, bf16 datatype. fp8_format: e4m3 or e5m2 op: ReduceOp.SUM or ReduceOp.AVG Returns: None """ world_size = dist.get_world_size(group=group) input_type = tensor.dtype input_shape = tensor.shape input_device = tensor.device input_size = tensor.numel() flat_padded_x = tensor.flatten() assert op in [ReduceOp.SUM, ReduceOp.AVG], "op can only be ReduceOp.SUM or ReduceOp.AVG" if flat_padded_x.size(0) % world_size != 0: pad_size = world_size - flat_padded_x.size(0) % world_size flat_padded_x = F.pad(flat_padded_x, (0, pad_size)) fp8_type = torch.float8_e4m3fn if fp8_format == "e4m3" else torch.float8_e5m2 ret, scale = cast_to_fp8(flat_padded_x, fp8_format=fp8_format) inp = ret.view(torch.uint8) input_chunks = list(torch.chunk(inp, world_size, dim=0)) output_chunks = list(torch.chunk(torch.empty_like(inp), world_size, dim=0)) dist.all_to_all(output_chunks, input_chunks, group=group) scale_list = [torch.ones(1, dtype=scale.dtype, device=input_device) for _ in range(world_size)] dist.all_gather(scale_list, scale, group=group) summed_out = torch.zeros_like(output_chunks[0]).to(input_type) for scale, out in zip(scale_list, output_chunks): out = out.view(fp8_type) summed_out += cast_from_fp8(out, scale, input_type) if op == ReduceOp.AVG: summed_out.div_(world_size) summed_out_fp8, scale = cast_to_fp8(summed_out, fp8_format=fp8_format) gather_scale_handle = dist.all_gather(scale_list, scale, group=group, async_op=async_op) tensor_list = [torch.empty_like(summed_out_fp8.view(torch.uint8)) for _ in range(world_size)] gather_tensor_handle = dist.all_gather( tensor_list, summed_out_fp8.view(torch.uint8), group=group, async_op=async_op ) def cat_op(): for i in range(world_size): tensor_list[i] = tensor_list[i].view(fp8_type).to(input_type) * scale_list[i] out = torch.cat(tensor_list, dim=0) tensor.copy_(out[:input_size].view(input_shape).to(input_type)) if async_op: return Handle([gather_scale_handle, gather_tensor_handle], cat_op) else: cat_op() def all_reduce_fp8( tensor: torch.Tensor, fp8_format="e4m3", op=ReduceOp.SUM, group=None, async_op: bool = False ) -> Optional[Handle]: # fall back to default op due to performance issue return dist.all_reduce(tensor, op=op, group=group, async_op=async_op) @torch.compile(mode="max-autotune-no-cudagraphs", dynamic=False, disable=cuda_arch < 89) def _all_to_all_single_fp8( output, input, output_split_sizes=None, input_split_sizes=None, fp8_format="e5m2", group=None, async_op=False ) -> Optional[Handle]: r""" This is an in-place operation for compressed all_reduce using fp8. It works like dist.all_to_all_single but during communication the data is cast to fp8 format. Args: tensor: torch.Tensor in fp32, fp16, bf16 datatype. fp8_format: e4m3 or e5m2 Returns: None """ world_size = dist.get_world_size(group=group) input_type = input.dtype input_shape = input.shape input_device = input.device input = input.flatten() fp8_type = torch.float8_e4m3fn if fp8_format == "e4m3" else torch.float8_e5m2 ret, scale = cast_to_fp8(input, fp8_format=fp8_format) inp = ret.view(torch.uint8) if input_split_sizes is not None: input_split_sizes = [input_split_sizes[i] * np.prod(input_shape[1:]) for i in range(world_size)] input_chunks = list(torch.split(inp, input_split_sizes)) else: input_chunks = list(torch.chunk(inp, world_size, dim=0)) if output_split_sizes is not None: output_chunks = [ torch.empty((output_split_sizes[i] * np.prod(input_shape[1:]),), device=input_device, dtype=inp.dtype) for i in range(world_size) ] else: if dist.get_rank() == world_size - 1: output_chunks = [torch.empty_like(input_chunks[-1]) for _ in range(world_size)] else: output_chunks = [torch.empty_like(input_chunks[0]) for _ in range(world_size)] chunk_handle = dist.all_to_all(output_chunks, input_chunks, group=group, async_op=async_op) scale_list = [torch.ones(1, dtype=scale.dtype, device=input_device) for _ in range(world_size)] scale_hanle = dist.all_gather(scale_list, scale, group=group, async_op=async_op) def cast_op(): cast_output_chunk = [ cast_from_fp8(out.view(fp8_type), scale, input_type) for scale, out in zip(scale_list, output_chunks) ] tensor_out = torch.cat(cast_output_chunk, dim=0) outputs_shape = list(input_shape) if output_split_sizes is not None: outputs_shape[0] = sum(output_split_sizes) else: outputs_shape = input_shape output.data = tensor_out.view(outputs_shape).to(input_type) if async_op: return Handle([chunk_handle, scale_hanle], cast_op) else: cast_op() def all_to_all_single_fp8( output, input, output_split_sizes=None, input_split_sizes=None, fp8_format="e5m2", group=None, async_op=False ) -> Optional[Handle]: r""" This is wrapper for _all_to_all_single_fp8. """ if process_group_is_intranode(group): return dist.all_to_all_single( output, input, output_split_sizes=output_split_sizes, input_split_sizes=input_split_sizes, group=group, async_op=async_op, ) else: return _all_to_all_single_fp8( output, input, fp8_format=fp8_format, output_split_sizes=output_split_sizes, input_split_sizes=input_split_sizes, group=group, async_op=async_op, ) def cast_to_fp8_pipeline(inp: Any) -> None: """ Cast the hidden_states tensor of inp object to fp8 format before p2p communication in pipeline. The activations tensor is indexed by 'hidden_states' in the inp dict. After FP8 casting, the resulting tensor is saved as float16 or bfloat16 format but the size becomes halved. Metadata such as fp8_scale is saved into inp dict for communication. """ if inp is None: return # In pipeline parallelism, when inp is torch.Tensor, it only contains one element, thus can be omitted. if type(inp) == torch.Tensor: return assert "hidden_states" in inp, "required by pipeline parallelism." assert ( inp["hidden_states"].size(-1) % 2 == 0 ), "tensor size(-1) must be divisible by 2 to view Float8_e4m3fn as BFloat16 or Float16" inp_tensor = inp["hidden_states"] inp_dtype = inp_tensor.dtype min_val, max_val = inp_tensor.aminmax() amax = torch.maximum(min_val.abs(), max_val.abs()) finfo = torch.finfo(torch.float8_e4m3fn) if amax > finfo.max: fp8_type = torch.float8_e5m2 fp8_view_type = torch.float16 else: fp8_type = torch.float8_e4m3fn fp8_view_type = torch.bfloat16 finfo = torch.finfo(fp8_type) scale = torch.tensor(1.0).to(inp_tensor.device) if amax == 0.0 else finfo.max / amax.float() q_tensor = inp_tensor.data.float() * scale # Todo: Currently we use fp8_view_type <float16, bfloat16> to indicate which fp8 format is used. This is a temporary workaround due to 'Only support tensor for fast send'. # inp_tensor needs to be a float datatype to avoid error during gradient placement. inp_tensor.data = q_tensor.to(fp8_type).view(fp8_view_type) inp["fp8_scale"] = scale.float().reciprocal() inp["dtype"] = torch.zeros_like(scale).to(inp_dtype) def cast_from_fp8_pipeline(inp: Any, del_metadata=True) -> None: """ Cast the FP8 encoded hidden_states tensor back to original dtype after p2p communication in pipeline. del_metadata = False is useful when this function is called before p2p communication. """ if inp is None: return if type(inp) == torch.Tensor: return assert "hidden_states" in inp, "required by pipeline parallelism." inp_tensor = inp["hidden_states"] scale = inp["fp8_scale"] fp8_view_type = inp_tensor.dtype if fp8_view_type == torch.float16: fp8_type = torch.float8_e5m2 elif fp8_view_type == torch.bfloat16: fp8_type = torch.float8_e4m3fn else: raise TypeError("Only float16, bfloat16 are implemented.") inp_tensor.data = inp_tensor.data.view(fp8_type).to(inp["dtype"]) * scale if del_metadata: del inp["fp8_scale"] del inp["dtype"] def _reduce_scatter_fp8( output: torch.Tensor, input_list, group, fp8_format="e5m2", async_op: bool = False ) -> Optional[Handle]: r""" This is an in-place operation for compressed reduce_scatter using fp8. It works like dist.reduce_scatter but during communication the data is cast to fp8 format. Args: tensor: torch.Tensor in fp32, fp16, bf16 datatype. fp8_format: e4m3 or e5m2 Returns: None """ input_type = output.dtype fp8_type = torch.float8_e4m3fn if fp8_format == "e4m3" else torch.float8_e5m2 scale_list = [] cast_input_list = [] output_chunks = [] output_scale_list = [] for input in input_list: ret, scale = cast_to_fp8(input, fp8_format=fp8_format) scale_list.append(scale) ret = ret.view(torch.uint8) cast_input_list.append(ret) output_chunks.append(torch.empty_like(ret)) output_scale_list.append(torch.empty_like(scale)) chunk_handle = dist.all_to_all(output_chunks, cast_input_list, group=group, async_op=async_op) scale_handle = dist.all_to_all(output_scale_list, scale_list, group=group, async_op=async_op) def cast_op(): summed_out = torch.zeros_like(output_chunks[0]).to(input_type) for scale, out in zip(output_scale_list, output_chunks): out = out.view(fp8_type) summed_out += cast_from_fp8(out, scale, input_type) output.data = summed_out if async_op: return Handle([chunk_handle, scale_handle], cast_op) else: cast_op() def reduce_scatter_fp8( output: torch.Tensor, input_list, group, fp8_format="e5m2", async_op: bool = False ) -> Optional[Handle]: # fall back to default op due to performance issue return dist.reduce_scatter(output, input_list, group=group, async_op=async_op) def fp8_compress_ddp_grad_comm_hook_async( process_group: dist.ProcessGroup, bucket: dist.GradBucket, fp8_format: str = "e5m2", ) -> torch.futures.Future[torch.Tensor]: """ Compress by casting ``GradBucket`` to FP8 floating-point format divided by process group size. This DDP communication hook implements a simple gradient compression approach that casts ``GradBucket`` tensor to FP8 floating-point format (``torch.float8_e5m2`` or ``torch.bfloat16_e4m3``), and then divides it by the process group size. Once compressed gradient tensors are allreduced, the chained callback ``decompress`` casts it back to the input data type (such as ``float32``). Example:: >>> ddp_model.register_comm_hook(process_group, fp8_compress_ddp_grad_comm_hook_async) """ group_to_use = process_group if process_group is not None else dist.group.WORLD input_tensor = bucket.buffer() world_size = dist.get_world_size() input_type = input_tensor.dtype input_device = input_tensor.device flat_padded_x = input_tensor.flatten() if flat_padded_x.size(0) % world_size != 0: pad_size = world_size - flat_padded_x.size(0) % world_size flat_padded_x = F.pad(flat_padded_x, (0, pad_size)) fp8_type = torch.float8_e4m3fn if fp8_format == "e4m3" else torch.float8_e5m2 ret, scale = cast_to_fp8(flat_padded_x, fp8_format=fp8_format) inp = ret.view(torch.uint8) output_chunks_single = torch.empty_like(inp) split_sizes = [inp.numel() // world_size for _ in range(world_size)] fut0 = dist.all_to_all_single( output_chunks_single, inp, output_split_sizes=split_sizes, input_split_sizes=split_sizes, group=group_to_use, async_op=True, ).get_future() scale_list = [torch.ones(1, dtype=scale.dtype, device=input_device) for _ in range(world_size)] fut1 = dist.all_gather_into_tensor( torch.cat(scale_list, dim=0), scale, group=group_to_use, async_op=True ).get_future() all_to_all_fut = torch.futures.collect_all([fut0, fut1]) def sum_and_allgather(fut): output_chunks_single = fut.value()[0].wait()[0] scale_list_single = fut.value()[1].wait()[0] output_chunks = list(torch.chunk(output_chunks_single, world_size, dim=0)) scale_list = scale_list_single.chunk(world_size, dim=0) summed_out = torch.zeros_like(output_chunks[0]).to(input_type) for scale, out in zip(scale_list, output_chunks): out = out.view(fp8_type) summed_out += cast_from_fp8(out, scale, input_type) summed_out.div_(world_size) summed_out_fp8, scale = cast_to_fp8(summed_out, fp8_format=fp8_format) tensor_list_single = torch.empty(summed_out_fp8.size(0) * world_size, device=input_device, dtype=torch.uint8) fut2 = dist.all_gather_into_tensor( tensor_list_single, summed_out_fp8.view(torch.uint8), group=group_to_use, async_op=True ).get_future() scale_list = [torch.ones(1, dtype=scale.dtype, device=input_device) for _ in range(world_size)] fut3 = dist.all_gather_into_tensor( torch.cat(scale_list, dim=0), scale, group=group_to_use, async_op=True ).get_future() fut_combined2 = torch.futures.collect_all([fut2, fut3]) return fut_combined2 def decompress(fut): tensor_list_single = fut.value().wait()[0].value()[0] scale_list_single = fut.value().wait()[1].value()[0] tensor_list = list(torch.chunk(tensor_list_single, world_size, dim=0)) scale_list = scale_list_single.chunk(world_size, dim=0) for i in range(world_size): tensor_list[i] = tensor_list[i].view(fp8_type).to(input_type) * scale_list[i] out = torch.cat(tensor_list, dim=0) input_tensor_size = input_tensor.numel() input_shape = input_tensor.shape out = out[:input_tensor_size] input_tensor.copy_(out.view(input_shape).to(input_type)) return input_tensor return all_to_all_fut.then(sum_and_allgather).then(decompress) def fp8_compress_ddp_grad_comm_hook_sync( process_group: dist.ProcessGroup, bucket: dist.GradBucket, fp8_format="e5m2", ) -> torch.futures.Future[torch.Tensor]: """ Return a future that wraps the input, after the input is allreduced. However, the allreduce commnunication is synchronized. This breaks the overlapping between allreduce communication and backward compuation. This hook should **only** be used for debugging purposes, instead of the normal gradient synchronization. For asynchronized implementation, use fp8_compress_ddp_grad_comm_hook_async instead. Example:: >>> # xdoctest: +SKIP >>> ddp_model.register_comm_hook(None, fp8_compress_ddp_grad_comm_hook_sync) """ buffer = bucket.buffer() all_reduce_fp8(buffer, fp8_format=fp8_format) fut: torch.futures.Future[torch.Tensor] = torch.futures.Future() fut.set_result(bucket.buffer()) return fut def fp8_compress_fsdp_grad_comm_hook( state: object, unsharded_gradient_flattened: torch.Tensor, sharded_gradient: torch.Tensor, group=None, fp8_format="e5m2", ) -> None: """ This communication hook implements a simple gradient compression approach that casts unsharded_gradient_flattened tensor to FP8 floating-point format (``torch.float8_e5m2`` or ``torch.bfloat16_e4m3``), and then perform scatter_allreduce logic by using all_to_all and all_gather among the process group. Example:: >>> fsdp_model.register_comm_hook(None, fp8_compress_fsdp_grad_comm_hook) """ grad = unsharded_gradient_flattened fp8_type = torch.float8_e4m3fn if fp8_format == "e4m3" else torch.float8_e5m2 input_type = grad.dtype input_device = grad.device world_size = dist.get_world_size(group=group) grad_fp8, scale = cast_to_fp8(grad, fp8_format=fp8_format) uint8_buffer = torch.empty_like(grad_fp8).view(torch.uint8) dist.all_to_all_single(uint8_buffer, grad_fp8.view(torch.uint8), group=group) scale_list = [torch.ones(1, dtype=scale.dtype, device=input_device) for _ in range(world_size)] dist.all_gather(scale_list, scale, group=group) buffer_list = list(torch.chunk(uint8_buffer.view(fp8_type), world_size, dim=0)) sharded_gradient.zero_() for tensor, scale in zip(buffer_list, scale_list): sharded_gradient += cast_from_fp8(tensor, scale, input_type) def fp8_compress_fsdp_params_comm_hook( state: object, padded_unsharded_flat_param: torch.Tensor, sharded_flat_param: torch.Tensor, group=None, fp8_format="e5m2", ) -> None: """ This hook is pending the official support for parameters communication hook in FSDP, e.g. register_params_comm_hook. Example:: >>> fsdp_model.register_params_comm_hook(None, fp8_compress_fsdp_params_comm_hook) """ fp8_type = torch.float8_e4m3fn if fp8_format == "e4m3" else torch.float8_e5m2 fp8_max = torch.finfo(fp8_type).max inp = sharded_flat_param out = padded_unsharded_flat_param per_tensor_max = inp.abs().max().float() per_tensor_max = torch.where(per_tensor_max > 0, per_tensor_max, 1.0) dist.all_reduce(per_tensor_max, op=torch.distributed.ReduceOp.MAX, group=group) scale = fp8_max / per_tensor_max fp8_sharded_flat_param = (scale * inp.float()).to(fp8_type).view(torch.uint8) fp8_out = torch.empty(out.shape, dtype=torch.uint8, device=out.device) dist.all_gather_into_tensor( fp8_out, fp8_sharded_flat_param, group=group, ) padded_unsharded_flat_param.copy_((fp8_out.view(fp8_type).float() / scale).to(out.dtype)) def split_chunk_by_channel( chunk: torch.Tensor, channel_size: int, num_channels: int, rank: int = 0, world_size: int = 1 ): offset = chunk.numel() * rank end = offset + chunk.numel() break_points = [x for x in range(0, channel_size * num_channels + 1, channel_size) if offset <= x <= end] if len(break_points) == 0 or break_points[0] > offset: break_points.insert(0, offset) if break_points[-1] < end: break_points.append(end) sizes = [b - a for a, b in zip(break_points[:-1], break_points[1:])] return chunk.split(sizes) @torch.compile(mode="max-autotune-no-cudagraphs", dynamic=False, disable=cuda_arch < 89) def _all_to_all_fp8(output_list, input_list, group=None, fp8_format="e5m2", async_op=False): world_size = dist.get_world_size(group) input_type = input_list[0].dtype fp8_type = torch.float8_e4m3fn if fp8_format == "e4m3" else torch.float8_e5m2 scale_list = [] tensor_list = [] for i in range(world_size): input_tensor = input_list[i] ret, scale = cast_to_fp8(input_tensor, fp8_format=fp8_format) scale_list.append(scale) ret = ret.view(torch.uint8) tensor_list.append(ret) output_scale_list = [torch.empty_like(x) for x in scale_list] output_tensor_list = [torch.empty_like(x) for x in tensor_list] tensor_hanle = dist.all_to_all(output_tensor_list, tensor_list, group=group, async_op=async_op) scale_handle = dist.all_to_all(output_scale_list, scale_list, group=group, async_op=async_op) def cast_op(): for i in range(world_size): scale = output_scale_list[i] tensor = output_tensor_list[i] tensor = tensor.view(fp8_type) output_list[i].copy_(cast_from_fp8(tensor, scale, input_type)) if async_op: return Handle([tensor_hanle, scale_handle], cast_op) else: cast_op() def all_to_all_fp8(output_list, input_list, group=None, fp8_format="e5m2", async_op=False): if process_group_is_intranode(group): return dist.all_to_all(output_list, input_list, group=group, async_op=async_op) else: return _all_to_all_fp8(output_list, input_list, group=group, fp8_format=fp8_format, async_op=async_op) @torch.compile(mode="max-autotune-no-cudagraphs", dynamic=False, disable=cuda_arch < 89) def _all_gather_fp8(output_list, input_, group=None, fp8_format="e5m2", async_op: bool = False) -> Optional[Handle]: world_size = dist.get_world_size(group) input_type = input_.dtype ret, scale = cast_to_fp8(input_, fp8_format=fp8_format) fp8_type = ret.dtype input_ = ret.view(torch.uint8) tensor_list = [torch.empty_like(input_) for _ in range(world_size)] scale_list = [torch.ones(1, dtype=scale.dtype, device=input_.device) for _ in range(world_size)] chunk_handle = dist.all_gather(tensor_list, input_, group=group, async_op=async_op) scale_hanle = dist.all_gather(scale_list, scale, group=group, async_op=async_op) def cast_op(): for i in range(world_size): output = tensor_list[i].view(fp8_type) scale = scale_list[i] output_list[i].copy_(cast_from_fp8(output, scale, input_type)) if async_op: return Handle([chunk_handle, scale_hanle], cast_op) else: cast_op() def all_gather_fp8(output_list, input_, group=None, fp8_format="e5m2", async_op: bool = False) -> Optional[Handle]: if process_group_is_intranode(group): return dist.all_gather(output_list, input_, group=group, async_op=async_op) else: return _all_gather_fp8(output_list, input_, group=group, fp8_format=fp8_format, async_op=async_op) @torch.compile(mode="max-autotune-no-cudagraphs", dynamic=False, disable=cuda_arch < 89) def all_gather_fp8_lagacy( output_list, input_, group=None, fp8_format="e5m2", async_op: bool = False ) -> Optional[Handle]: world_size = dist.get_world_size(group) shape = input_.shape input_type = input_.dtype fp8_type = torch.float8_e4m3fn if fp8_format == "e4m3" else torch.float8_e5m2 combined_buffer = torch.empty(world_size * (SCALE_BYTES + input_.numel()), dtype=torch.uint8, device=input_.device) combined_buffers = list(combined_buffer.chunk(world_size, dim=0)) cur_buffer = combined_buffers[dist.get_rank(group)] ret = cur_buffer[SCALE_BYTES:].view(fp8_type) ret, scale = cast_to_fp8(input_.view(-1), fp8_format=fp8_format, out=ret) cur_buffer[:SCALE_BYTES].view(torch.float)[0] = scale # cur_buffer[:SCALE_BYTES] = scale.unsqueeze(0).view(torch.uint8) dist.all_gather(combined_buffers, cur_buffer, group=group, async_op=async_op) for out, buf in zip(output_list, combined_buffers): scale = buf[:SCALE_BYTES].clone().view(scale.dtype) output = buf[SCALE_BYTES:].view(fp8_type) cast_from_fp8(output.view(shape), scale, input_type, out=out) # output = combined_buffer.view(world_size, -1)[:, SCALE_BYTES:].view(fp8_type) # scales = combined_buffer.view(world_size, -1)[:, :SCALE_BYTES].view(torch.float) # output = output.float() * scales # for i, out in enumerate(output_list): # out.copy_(output[i].view(shape)) @torch.compile(mode="max-autotune-no-cudagraphs", dynamic=False, disable=cuda_arch < 89) def all_gather_fp8_ring(output_list, input_, group=None, fp8_format="e5m2", async_op: bool = False) -> Optional[Handle]: world_size = dist.get_world_size(group) rank = dist.get_rank(group) send_rank = (rank + 1) % world_size recv_rank = (rank - 1) % world_size shape = input_.shape input_type = input_.dtype fp8_type = torch.float8_e4m3fn if fp8_format == "e4m3" else torch.float8_e5m2 combined_buffer = torch.empty(world_size * (SCALE_BYTES + input_.numel()), dtype=torch.uint8, device=input_.device) combined_buffers = list(combined_buffer.chunk(world_size, dim=0)) cur_buffer = combined_buffers[dist.get_rank(group)] ret = cur_buffer[SCALE_BYTES:].view(fp8_type) ret, scale = cast_to_fp8(input_.view(-1), fp8_format=fp8_format, out=ret) # cur_buffer[:SCALE_BYTES] = scale.unsqueeze(0).view(torch.uint8) cur_buffer[:SCALE_BYTES].view(torch.float)[0] = scale def send_recv(idx): send_idx = (rank - idx) % world_size recv_idx = (rank - idx - 1) % world_size ops = dist.batch_isend_irecv( [ dist.P2POp(dist.isend, combined_buffers[send_idx], send_rank, group=group), dist.P2POp(dist.irecv, combined_buffers[recv_idx], recv_rank, group=group), ] ) return ops def cast(idx): cast_idx = (rank - idx - 1) % world_size scale = combined_buffers[cast_idx][:SCALE_BYTES].clone().view(torch.float) output = combined_buffers[cast_idx][SCALE_BYTES:].view(fp8_type) cast_from_fp8(output.view(shape), scale, input_type, out=output_list[cast_idx]) # warmup ops = send_recv(0) output_list[rank].copy_(input_) for op in ops: op.wait() ops = [] # 1p-1c for i in range(1, world_size - 1): new_ops = send_recv(i) for op in ops: op.wait() cast(i - 1) ops = new_ops # cooldown for op in ops: op.wait() cast(world_size - 2) class _LinearFp8(torch.autograd.Function): @staticmethod def forward( ctx: Any, x: torch.Tensor, w: torch.Tensor, bias: Optional[torch.Tensor], ) -> Any: assert ( x.dtype in (torch.bfloat16, torch.float16) and x.dtype == w.dtype ), "Only float16 and bfloat16 are allowed." if bias is not None: assert bias.dtype == x.dtype, "Bias should have the same dtype as input." # ensure x and w are row-major x = x.contiguous() w = w.contiguous() ctx.x_shape = x.shape ctx.has_bias = bias is not None ctx.out_dtype = x.dtype x = x.reshape(-1, x.shape[-1]) x_fp8, inv_scale_x = cast_to_fp8(x, fp8_format="e4m3") w_fp8, inv_scale_w = cast_to_fp8(w, fp8_format="e4m3") ctx.x_fp8 = x_fp8 ctx.w_fp8_t = w_fp8.t() ctx.inv_scale_x = inv_scale_x ctx.inv_scale_w = inv_scale_w out = torch._scaled_mm( x_fp8, ctx.w_fp8_t, bias=bias, out_dtype=ctx.out_dtype, scale_a=inv_scale_x, scale_b=inv_scale_w, use_fast_accum=True, )[0] return out.reshape(*ctx.x_shape[:-1], w.shape[0]) @staticmethod def backward(ctx: Any, out_grad) -> Any: out_grad = out_grad.reshape(-1, out_grad.shape[-1]) out_grad_fp8, out_grad_scale = cast_to_fp8(out_grad, fp8_format="e5m2") x_grad = torch._scaled_mm( out_grad_fp8, ctx.w_fp8_t.contiguous().t(), out_dtype=ctx.out_dtype, scale_a=out_grad_scale, scale_b=ctx.inv_scale_w, use_fast_accum=True, )[0] w_grad = torch._scaled_mm( out_grad_fp8.t().contiguous(), ctx.x_fp8.t().contiguous().t(), out_dtype=ctx.out_dtype, scale_a=out_grad_scale, scale_b=ctx.inv_scale_x, use_fast_accum=True, )[0] bias_grad = None if ctx.has_bias: bias_grad = out_grad.sum(0) return x_grad.reshape(ctx.x_shape), w_grad, bias_grad @torch.compile(mode="max-autotune-no-cudagraphs", disable=not SUPPORT_TORCH_COMPILE, dynamic=dynamic_kernel) def _linear_fp8(input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None) -> torch.Tensor: return _LinearFp8.apply(input, weight, bias) def linear_fp8(input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None) -> torch.Tensor: if input.shape[-1] % 16 != 0 or np.prod(input.shape[:-1]) % 16 != 0: return F.linear(input, weight, bias) out = _linear_fp8(input, weight, bias) return out
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/quantization/__init__.py
colossalai/quantization/__init__.py
from .bnb import quantize_model from .bnb_config import BnbQuantizationConfig __all__ = [ "BnbQuantizationConfig", "quantize_model", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/quantization/fp8_config.py
colossalai/quantization/fp8_config.py
dynamic_kernel: bool = False
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/quantization/bnb_config.py
colossalai/quantization/bnb_config.py
# adapted from Hugging Face accelerate/utils/dataclasses.py import warnings from dataclasses import dataclass, field from typing import List import torch @dataclass class BnbQuantizationConfig: """ A plugin to enable BitsAndBytes 4bit and 8bit quantization """ load_in_8bit: bool = field(default=False, metadata={"help": "enable 8bit quantization."}) llm_int8_threshold: float = field( default=6.0, metadata={"help": "value of the outliner threshold. only relevant when load_in_8bit=True"} ) load_in_4bit: bool = field(default=False, metadata={"help": "enable 4bit quantization."}) bnb_4bit_quant_type: str = field( default="fp4", metadata={ "help": "set the quantization data type in the `bnb.nn.Linear4Bit` layers. Options are {'fp4','np4'}." }, ) bnb_4bit_use_double_quant: bool = field( default=False, metadata={ "help": "enable nested quantization where the quantization constants from the first quantization are quantized again." }, ) bnb_4bit_compute_dtype: bool = field( default="fp16", metadata={ "help": "This sets the computational type which might be different than the input time. For example, inputs might be " "fp32, but computation can be set to bf16 for speedups. Options are {'fp32','fp16','bf16'}." }, ) torch_dtype: torch.dtype = field( default=None, metadata={ "help": "this sets the dtype of the remaining non quantized layers. `bitsandbytes` library suggests to set the value" "to `torch.float16` for 8 bit model and use the same dtype as the compute dtype for 4 bit model " }, ) skip_modules: List[str] = field( default=None, metadata={ "help": "an explicit list of the modules that we don't quantize. The dtype of these modules will be `torch_dtype`." }, ) keep_in_fp32_modules: List[str] = field( default=None, metadata={"help": "an explicit list of the modules that we don't quantize. We keep them in `torch.float32`."}, ) def __post_init__(self): if isinstance(self.bnb_4bit_compute_dtype, str): if self.bnb_4bit_compute_dtype == "fp32": self.bnb_4bit_compute_dtype = torch.float32 elif self.bnb_4bit_compute_dtype == "fp16": self.bnb_4bit_compute_dtype = torch.float16 elif self.bnb_4bit_compute_dtype == "bf16": self.bnb_4bit_compute_dtype = torch.bfloat16 else: raise ValueError( f"bnb_4bit_compute_dtype must be in ['fp32','fp16','bf16'] but found {self.bnb_4bit_compute_dtype}" ) elif not isinstance(self.bnb_4bit_compute_dtype, torch.dtype): raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype") if self.skip_modules is not None and not isinstance(self.skip_modules, list): raise ValueError("skip_modules must be a list of strings") if self.keep_in_fp32_modules is not None and not isinstance(self.keep_in_fp32_modules, list): raise ValueError("keep_in_fp_32_modules must be a list of strings") if self.load_in_4bit: self.target_dtype = "int4" if self.load_in_8bit: self.target_dtype = torch.int8 if self.load_in_4bit and self.llm_int8_threshold != 6.0: warnings.warn("llm_int8_threshold can only be used for model loaded in 8bit") if isinstance(self.torch_dtype, str): if self.torch_dtype == "fp32": self.torch_dtype = torch.float32 elif self.torch_dtype == "fp16": self.torch_dtype = torch.float16 elif self.torch_dtype == "bf16": self.torch_dtype = torch.bfloat16 else: raise ValueError(f"torch_dtype must be in ['fp32','fp16','bf16'] but found {self.torch_dtype}") if self.load_in_8bit and self.torch_dtype is None: self.torch_dtype = torch.float16 if self.load_in_4bit and self.torch_dtype is None: self.torch_dtype = self.bnb_4bit_compute_dtype if not isinstance(self.torch_dtype, torch.dtype): raise ValueError("torch_dtype must be a torch.dtype")
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/quantization/fp8_hook.py
colossalai/quantization/fp8_hook.py
import torch.nn.functional as F from colossalai.quantization.fp8 import linear_fp8 from colossalai.tensor.param_op_hook import ColoParamOpHook class FP8Hook(ColoParamOpHook): def pre_forward(self, params) -> None: pass def post_forward(self, params) -> None: pass def pre_backward(self, params) -> None: pass def post_backward(self, params) -> None: pass def rewrite_op(self, func): if func is F.linear: return linear_fp8 return func
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/wrapper.py
colossalai/zero/wrapper.py
from copy import copy from typing import Dict, Optional import torch import torch.nn as nn from .gemini import GeminiDDP def zero_model_wrapper( model: nn.Module, zero_stage: int = 1, gemini_config: Optional[Dict] = None, verbose: bool = False ): """This wrapper function is used to wrap your training model for ZeRO DDP. Example: >>> with ColoInitContext(): >>> my_model = Bert() >>> my_optim = SGD(my_model.parameters(), lr = 1e-3) >>> zero_model = zero_model_wrapper(my_model, zero_stage=1) >>> zero_optim = zero_optim_wrapper(zero_model, my_optim) Args: model (nn.Module): The model used in ZeRO DDP. zero_stage (int, optional): The stage of ZeRO DDP. You can find more information in ZeRO's paper. https://arxiv.org/abs/1910.02054 gemini_config (dict, optional): The configuration dictionary of `GeminiDDP`. `GeminiDDP` is enabled when the stage is set to 3. You can set the arguments of `GeminiDDP` in the gemini_config. Here is an example where we set the device of the model, the placement policy of Gemini, and the size of hidden dimension to help Gemini find out a unified chunk size. Example: >>> config_dict = dict(device=torch.cuda.current_device(), hidden_dim=1024, placement_policy='auto') >>> model = zero_model_wrapper(model, zero_stage=3, gemini_config=config_dict) """ assert zero_stage in [1, 2, 3], "The stage of ZeRO should be 1, 2 or 3" if gemini_config is None: gemini_config = dict() if zero_stage in [1, 2]: wrapped_model = model else: wrapped_model = GeminiDDP(model, **gemini_config, verbose=verbose) setattr(wrapped_model, "_colo_zero_stage", zero_stage) return wrapped_model def zero_optim_wrapper( model: nn.Module, optimizer: torch.optim.Optimizer, initial_scale: float = 2**16, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, min_scale: float = 1, max_scale: float = 2**32, max_norm: float = 0.0, norm_type: float = 2.0, optim_config: Optional[Dict] = None, verbose: bool = False, ): """This wrapper function is used to wrap your training optimizer for ZeRO DDP. Args: model (nn.Module): Your model wrapped by `zero_model_wrapper` optimizer (torch.optim.Optimizer): Your initialized optimizer initial_scale (float, optional): initial_scale used by DynamicGradScaler. min_scale (float, optional): min_scale used by DynamicGradScaler. growth_factor (float, optional): growth_factor used by DynamicGradScaler. backoff_factor (float, optional): backoff_factor used by DynamicGradScaler. growth_interval (float, optional): growth_interval used by DynamicGradScaler. hysteresis (float, optional): hysteresis used by DynamicGradScaler. max_scale (int, optional): max_scale used by DynamicGradScaler. max_norm (float, optional): max_norm used for `clip_grad_norm`. You should notice that you shall not do clip_grad_norm by yourself when using ZeRO DDP. The ZeRO optimizer will take care of clip_grad_norm. norm_type (float, optional): norm_type used for `clip_grad_norm`. optim_config (dict, optional): The configuration used for the ZeRO optimizer. Example: >>> zero2_config = dict(reduce_bucket_size=12 * 1024 * 1024, overlap_communication=True) >>> optim = zero_optim_wrapper(model, optim, optim_config=zero2_config) verbose (bool, optional): Whether to print the verbose info. """ assert hasattr(model, "_colo_zero_stage"), "You should use `zero_ddp_wrapper` first" zero_stage = getattr(model, "_colo_zero_stage") assert norm_type == 2.0, "Current ZeRO optimizers only support 'norm_type=2'" if optim_config is None: config_dict = dict() else: config_dict = copy(optim_config) config_dict["initial_scale"] = initial_scale config_dict["growth_factor"] = growth_factor config_dict["backoff_factor"] = backoff_factor config_dict["growth_interval"] = growth_interval config_dict["hysteresis"] = hysteresis config_dict["min_scale"] = min_scale config_dict["max_scale"] = max_scale if zero_stage in [1, 2]: from colossalai.zero.low_level import LowLevelZeroOptimizer config_dict["partition_grad"] = zero_stage == 2 config_dict["clip_grad_norm"] = max_norm return LowLevelZeroOptimizer(optimizer, **config_dict, verbose=verbose) else: from colossalai.zero.gemini.gemini_optimizer import GeminiOptimizer config_dict["clipping_norm"] = max_norm return GeminiOptimizer(optimizer, model, **config_dict, verbose=verbose)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/__init__.py
colossalai/zero/__init__.py
from .gemini import GeminiAdamOptimizer, GeminiDDP, GeminiOptimizer, get_static_torch_model from .low_level import LowLevelZeroOptimizer from .wrapper import zero_model_wrapper, zero_optim_wrapper __all__ = [ "GeminiDDP", "GeminiOptimizer", "GeminiAdamOptimizer", "zero_model_wrapper", "zero_optim_wrapper", "LowLevelZeroOptimizer", "get_static_torch_model", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/low_level_optim.py
colossalai/zero/low_level/low_level_optim.py
# this code is inspired by the DeepSpeed library and implemented with our own design from scratch import copy from contextlib import contextmanager, nullcontext from functools import partial from typing import Dict, Iterator, List, Optional, Tuple, Union from weakref import proxy import torch import torch.distributed as dist import torch.nn as nn from torch import Tensor, inf from torch.distributed import ProcessGroup from torch.optim import Optimizer from colossalai.accelerator import get_accelerator from colossalai.amp.naive_amp.mixed_precision_mixin import ( BF16MixedPrecisionMixin, FP16MixedPrecisionMixin, MixedPrecisionMixin, ) from colossalai.checkpoint_io.utils import calculate_tensor_size from colossalai.interface import OptimizerWrapper from colossalai.logging import get_dist_logger from colossalai.quantization.fp8 import all_gather_fp8, all_reduce_fp8, reduce_scatter_fp8 from colossalai.tensor.moe_tensor.api import is_moe_tensor from ._utils import ( all_gather_into_flat_tensor_nd, calculate_global_norm_from_list, get_nd_rank, get_nd_world_size, has_inf_or_nan, release_param_grad, sync_tensor, ) from .bookkeeping import BucketStore, GradientStore, TensorBucket from .zero_hook import set_all_gather_handle, wait_all_gather_handle class LowLevelZeroFP16MixedPrecisionMixin(FP16MixedPrecisionMixin): def __init__( self, num_working_param_groups: int, pg_to_grad_store: Dict[ProcessGroup, GradientStore], initial_scale: float = 2**16, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 2**32, ) -> None: super().__init__( initial_scale, min_scale, growth_factor, backoff_factor, growth_interval, hysteresis, max_scale, ) self.num_working_param_groups = num_working_param_groups self.pg_to_grad_store = pg_to_grad_store def check_local_overflow(self) -> bool: for store in self.pg_to_grad_store.values(): for group_id in range(self.num_working_param_groups): for avg_grad in store.get_working_grads_by_group_id(group_id): if avg_grad is not None and has_inf_or_nan(avg_grad): return True return False class LowLevelZeroOptimizer(OptimizerWrapper): """Optimizer used for ZeRO-1 and ZeRO-2.""" def __init__( self, optimizer: Optimizer, pg_to_param_list: Optional[Dict[Union[ProcessGroup, Tuple[ProcessGroup, ...]], List[nn.Parameter]]] = None, initial_scale: int = 2**16, # grad scaler config min_scale: int = 1, growth_factor: float = 2.0, backoff_factor: float = 0.5, growth_interval: int = 2000, hysteresis: int = 2, max_scale: int = 2**24, clip_grad_norm: float = 0.0, # grad clipping verbose: bool = False, reduce_bucket_size: int = 1024 * 1024, # communication communication_dtype: Optional[torch.dtype] = None, overlap_communication: bool = False, partition_grad: bool = False, # stage 2 flag cpu_offload: bool = False, # cpu offload dp_process_group: Optional[ProcessGroup] = None, extra_dp_group: Optional[ProcessGroup] = None, forced_dtype: Optional[torch.dtype] = None, master_weights: bool = True, # master weights overlap_allgather: bool = False, fp8_communication: bool = False, backward_context=None, ): super(LowLevelZeroOptimizer, self).__init__(optim=optimizer) self._dtype = self.optim.param_groups[0]["params"][0].dtype self._logger = get_dist_logger() self._verbose = verbose if (dp_process_group is not None) and (pg_to_param_list is not None): raise ValueError("dp_process_group and pg_to_param_list should not be provided at the same time.") if pg_to_param_list is None and extra_dp_group is not None and dp_process_group is None: raise ValueError("dp_process_group should be provided when extra_dp_group is provided.") if pg_to_param_list is None and extra_dp_group is not None and fp8_communication: raise ValueError( "fp8_communication is not supported when pg_to_param_list is None and extra_dp_group is provided." ) if pg_to_param_list is None: unique_dp_group = dist.group.WORLD if dp_process_group is None else dp_process_group if extra_dp_group is not None: unique_dp_group = (extra_dp_group, unique_dp_group) pg_to_param_list = {unique_dp_group: []} for group in self.optim.param_groups: pg_to_param_list[unique_dp_group].extend(group["params"]) self.pg_to_param_list = pg_to_param_list param_to_pg = {} for grp, param_list in pg_to_param_list.items(): for p in param_list: assert isinstance(p, nn.Parameter), f"got {type(p)}" param_to_pg[p] = grp self.param_to_pg = param_to_pg # stage 2 self._partition_grads = partition_grad self._cpu_offload = cpu_offload # grad accumulation self.require_grad_sync = True # working and master params for mixed precision training self._working_param_groups = dict() self._master_param_groups_of_current_rank = dict() # communication params self._overlap_communication = overlap_communication self._overlap_allgather = overlap_allgather self._reduce_bucket_size = reduce_bucket_size self._communication_dtype = communication_dtype self._fp8_communication = fp8_communication self._backward_context = backward_context # gradient clipping self._clip_grad_norm = clip_grad_norm # master weights copy self._master_weights = master_weights if forced_dtype: for group in self.optim.param_groups: group_params = group["params"] for param in group_params: param.data = param.data.to(forced_dtype) self._dtype = forced_dtype # check argument conflict self._sanity_checks() # ParameterStore will manage the tensor buffers used for zero # it will not manage the tensors used by mixed precision training # record the padding size of each param self._padding_map = dict() # padded working param is all-gather buffer and it shares the same memory with working param self._working_param_to_padded_working_param = dict() # mapping working param and master param self.master_to_working_param = dict() self.working_to_master_param = dict() # NOTE need to gurantee the order of process group is the same accross all ranks # process_group <---> xxx_store # process_group <---> [param1 param2 ...] # each process group have its own stores # param belonging to one process_group will use corresponding store self.pg_to_grad_store = { pg: GradientStore(pg, partition_grad=self._partition_grads) for pg in self.pg_to_param_list } # param id to grad store, have to use id(param) as key since it is used in stores self.pid_to_grad_store = {id(param): self.pg_to_grad_store[param_to_pg[param]] for param in param_to_pg} self.pg_to_bucket_store = {pg: BucketStore(pg, reduce_bucket_size) for pg in self.pg_to_param_list} # param id to bucket store, have to use id(param) as key since it is used in stores self.pid_to_bucket_store = {id(param): self.pg_to_bucket_store[param_to_pg[param]] for param in param_to_pg} # iterate over the param group in the optimizer # partition these param groups for data parallel training # and add buffers to parameter store for future access for group_id, param_group in enumerate(self.optim.param_groups): group_params = list() for param in param_group["params"]: if param.requires_grad: group_params.append(param) # add the working params to working_param_groups for bookkeeping self._working_param_groups[group_id] = group_params master_param_current_rank = self._create_master_param_current_rank(group_params) self._master_param_groups_of_current_rank[group_id] = master_param_current_rank # need to replace the params in the `params` field in the optimizer # so that when the optimizer calls step(), it only updates the tensors # managed by this data parallel rank param_group["params"] = master_param_current_rank # reduction hook is only used if overlapping communication # or stage 2 is used # if it is stage 1 without overlapping, no hook will be attached self.grad_handles = [] if self._overlap_communication or self._partition_grads: self._attach_reduction_hook() # initialize mixed precision mixin self.mixed_precision_mixin: Optional[MixedPrecisionMixin] = None if self._dtype is torch.float16: self.mixed_precision_mixin = LowLevelZeroFP16MixedPrecisionMixin( self.num_param_groups, self.pg_to_grad_store, initial_scale=initial_scale, min_scale=min_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, hysteresis=hysteresis, max_scale=max_scale, ) elif self._dtype is torch.bfloat16: self.mixed_precision_mixin = BF16MixedPrecisionMixin() self._current_grad_norm: Optional[float] = None def __del__(self): for hook in self.grad_handles: hook.remove() @property def dtype(self): return self._dtype @property def num_param_groups(self): return len(self._working_param_groups) def _sanity_checks(self): assert get_accelerator().name in ["cuda", "npu"], "device is required" for param_group in self.optim.param_groups: group_params = param_group["params"] for param in group_params: if not hasattr(param, "skip_zero_check") or param.skip_zero_check is False: assert ( param.dtype == self._dtype ), f"Parameters are expected to have the same dtype `{self._dtype}`, but got `{param.dtype}`" def _create_master_param_current_rank(self, param_list): # split each param evenly by world size params_current_rank = [] device = "cpu" if self._cpu_offload else get_accelerator().get_current_device() for param in param_list: padding_size = ( self.pid_to_bucket_store[id(param)].world_size - param.numel() % self.pid_to_bucket_store[id(param)].world_size ) % self.pid_to_bucket_store[id(param)].world_size self.record_param_padding_size(param, padding_size) with torch.no_grad(): if padding_size > 0: padding_param = torch.nn.functional.pad(param.data.view(-1), [0, padding_size]) # # reset working params' ptr when no master weights # if self._master_weights == False: param.data = padding_param[: param.numel()].view(param.shape) else: padding_param = param.data.view(-1) self._working_param_to_padded_working_param[param] = padding_param splited_params = padding_param.split( padding_param.numel() // self.pid_to_bucket_store[id(param)].world_size ) splited_params = splited_params[self.pid_to_bucket_store[id(param)].local_rank] # use fp32 when master_weights is True if self._master_weights is True: splited_param_current_rank = splited_params.detach().clone().float().to(device) else: splited_param_current_rank = splited_params params_current_rank.append(splited_param_current_rank) self.link_master_and_working_param(splited_param_current_rank, param) return params_current_rank ########################### # Backward Reduction Hook # ########################### def _attach_reduction_hook(self): # we iterate over the working params # on each param, we register a hook to its AccumulateGrad object self_weakref = proxy(self) def _grad_handler(param, group_id): # if run with no_sync context, would not sync grad when backward if self_weakref.require_grad_sync: self_weakref._add_to_bucket(param, group_id) for group_id in range(self.num_param_groups): param_group = self._working_param_groups[group_id] for param in param_group: if param.requires_grad: self.grad_handles.append( param.register_post_accumulate_grad_hook(partial(_grad_handler, group_id=group_id)) ) ####################### # Reduction Functions # ####################### def _run_reduction(self): for bucket_store in self.pg_to_bucket_store.values(): if bucket_store.num_elements_in_bucket() <= 0: continue bucket_store.build_grad_in_bucket() flat_grads = bucket_store.get_flatten_grad() flat_grads /= bucket_store.world_size # ready to add other tensors to bucket bucket_store.reset_num_elements_in_bucket() if self._overlap_communication: stream = bucket_store.comm_stream # in case of the memory being reused in the default stream flat_grads.record_stream(stream) # waiting for ops in the default stream finishing stream.wait_stream(get_accelerator().current_stream()) else: stream = get_accelerator().current_stream() with get_accelerator().stream(stream): group_id = bucket_store.current_group_id grad_dtype = flat_grads.dtype if self._communication_dtype is not None: flat_grads = flat_grads.to(self._communication_dtype) if not self._partition_grads: for i, sz in enumerate(bucket_store.sizes): grp = bucket_store.torch_pg if len(bucket_store.sizes) == 1 else bucket_store.torch_pg[i] if self._fp8_communication: all_reduce_fp8(flat_grads, group=grp) else: dist.all_reduce(flat_grads, group=grp) if flat_grads.dtype != grad_dtype: flat_grads = flat_grads.to(grad_dtype) flat_grads_per_rank = flat_grads.split(flat_grads.numel() // bucket_store.world_size) grad_in_bucket = bucket_store.get_grad() self._update_unpartitoned_grad(bucket_store, grad_in_bucket.values(), flat_grads_per_rank, group_id) else: cur_flat_grads = flat_grads for i, sz in enumerate(bucket_store.sizes): grp = bucket_store.torch_pg if len(bucket_store.sizes) == 1 else bucket_store.torch_pg[i] flat_grads_list = list(cur_flat_grads.split(len(cur_flat_grads) // sz)) received_grad = torch.empty_like(flat_grads_list[0]) if self._fp8_communication: reduce_scatter_fp8( received_grad, flat_grads_list, group=grp, ) else: dist.reduce_scatter_tensor(received_grad, cur_flat_grads, group=grp) cur_flat_grads = received_grad if received_grad.dtype != grad_dtype: received_grad = received_grad.to(grad_dtype) grad_in_bucket_current_rank = bucket_store.get_grad()[bucket_store.local_rank] self._update_partitoned_grad(bucket_store, grad_in_bucket_current_rank, received_grad, group_id, 1) bucket_store.reset() def _update_unpartitoned_grad( self, bucket_store: BucketStore, origin_grad_list: List, flat_grad_list: List, group_id: int ) -> None: for rank, grad_list in enumerate(origin_grad_list): sync_tensor(flat_grad_list[rank], grad_list) for grad in grad_list: param_id = bucket_store.get_param_id_of_grad(grad) self._add_grad(grad, bucket_store.world_size, group_id, param_id, rank) def _update_partitoned_grad( self, bucket_store: BucketStore, origin_grad_list: List, flat_grad: torch.Tensor, group_id: int, partition_num: int, ) -> None: sync_tensor(flat_grad, origin_grad_list) for grad in origin_grad_list: param_id = bucket_store.get_param_id_of_grad(grad) self._add_grad(grad, partition_num, group_id, param_id) def _add_grad( self, grad: torch.Tensor, partition_num: int, group_id: int, param_id: int, rank: int = 0, ) -> None: if ( len(self.pid_to_grad_store[param_id].get_partitioned_gradients_by_param_id(group_id, param_id)) < partition_num ): self.pid_to_grad_store[param_id].append_gradients_by_param_id(grad, group_id, param_id) else: self.pid_to_grad_store[param_id].add_gradients_by_param_id(grad, rank, group_id, param_id) def _add_to_bucket(self, param, group_id): param_size = param.numel() # check if the bucket is full # if full, will reduce the grads already in the bucket # or got a grad of param from another group # after reduction, the bucket will be empty if ( self.pid_to_bucket_store[id(param)].num_elements_in_bucket() + param_size > self._reduce_bucket_size or group_id != self.pid_to_bucket_store[id(param)].current_group_id ): self._run_reduction() padding_size = self.get_param_padding_size(param) self.pid_to_bucket_store[id(param)].add_param_grad(group_id, param, padding_size) ################################ # torch.optim.Optimizer methods ################################ def backward(self, loss, inputs=None, retain_graph=False): assert not ( self._partition_grads and not self.require_grad_sync ), "ZeRO2(partition_grads) and no_sync are not compatible" if self.mixed_precision_mixin is not None: loss = self.mixed_precision_mixin.pre_backward(loss) ctx = nullcontext() if self._backward_context is None else self._backward_context() with ctx: loss.backward(inputs=inputs, retain_graph=retain_graph) if not self.require_grad_sync: return self._reduce_grad(self._partition_grads) # clear reduced grads if self._overlap_communication: get_accelerator().synchronize() def backward_by_grad(self, tensor, grad, inputs: Tensor = None, retain_graph: bool = False): assert not ( self._partition_grads and not self.require_grad_sync ), "ZeRO2(partition_grads) and gradient accumulation(no_sync) are not compatible" if self.mixed_precision_mixin is not None: grad = self.mixed_precision_mixin.pre_backward_by_grad(tensor, grad) torch.autograd.backward( tensor, grad, inputs=inputs, retain_graph=retain_graph, ) if not self.require_grad_sync: return self._reduce_grad(self._partition_grads) # clear reduced grads if self._overlap_communication: get_accelerator().synchronize() def zero_bucket_stores(self): for bucket_store in self.pg_to_bucket_store.values(): bucket_store.reset_all() def zero_grad_stores(self): for grad_store in self.pg_to_grad_store.values(): grad_store.reset_all_gradients() def zero_grad(self, set_to_none=True): """ Set parameter gradients to zero. If set_to_none = True, gradient will be set to None to save memory. :param set_to_none: Whether set the gradient to None. Default value is True. :type set_to_none: bool """ if self.mixed_precision_mixin is not None: self.mixed_precision_mixin.pre_zero_grad() for _, param_group in self._working_param_groups.items(): for param in param_group: if set_to_none: param.grad = None else: if param.grad is not None: param.grad.detach() param.grad.zero_() self.zero_grad_stores() self.zero_bucket_stores() #################### # Update Parameter # #################### def step(self, closure=None): assert closure is None, "closure is not supported by step()" if not self.require_grad_sync: return if self.mixed_precision_mixin is not None and self.mixed_precision_mixin.should_skip_step(): if self._verbose: self._logger.info(f"Found overflow. Skip step") self.zero_grad() return # record all grads for unscale and clip grad_partition_groups = [] norm_groups = [] # sometimes not all params are 'really' working # for instance, when layer drop, the dropped layer has no grad # and should not be updated real_working_params = dict() real_master_params = dict() for group_id in range(self.num_param_groups): master_params = self._master_param_groups_of_current_rank[group_id] working_params = self._working_param_groups[group_id] real_working_params[group_id] = [] real_master_params[group_id] = [] working_grads = [] for working_param, master_param in zip(working_params, master_params): # if a working param requires grad and has no grad # it is not 'really' working, e.g. the droped layer # else the splited grad should be attached to the splited param grad_store = self.pid_to_grad_store[id(working_param)] grads = grad_store.get_partitioned_gradients_by_param_id(group_id, id(working_param)) grad_index = 0 if self._partition_grads else grad_store.local_rank if len(grads) > 0: real_working_params[group_id].append(working_param) grad = grads[grad_index] # no need to copy fp32 grad if master_weights is False if self._master_weights: grad = grad.to(master_param.dtype).to(master_param.device) master_param.grad = grad grad_partition_groups.append(grad) real_master_params[group_id].append(master_param) # compute norm norm_group = 0 for grad_store in self.pg_to_grad_store.values(): working_grads = grad_store.get_working_grads_by_group_id(group_id) norm_group += self._compute_grad_norm(dp_pg=grad_store.torch_pg, gradients=working_grads) norm_groups.append(norm_group) # update the params in the optimizer self.optim.param_groups[group_id]["params"] = real_master_params[group_id] # unscale and clip grads global_norm = calculate_global_norm_from_list(norm_list=norm_groups) self._current_grad_norm = global_norm self._unscale_and_clip_grads(grad_partition_groups, global_norm) # update the parameters self.optim.step() # release the grad grad_partition_groups = [] for group_id in range(self.num_param_groups): release_param_grad(self._master_param_groups_of_current_rank[group_id]) self.pg_to_tensor_bucket = { pg: TensorBucket(self.pg_to_bucket_store[pg].reduce_bucket_size) for pg in self.pg_to_param_list } # update working partition updated by the current rank device = get_accelerator().get_current_device() for group_id in range(self.num_param_groups): master_working_param = self.optim.param_groups[group_id]["params"] for idx, master_param in enumerate(master_working_param): working_param = real_working_params[group_id][idx] param_to_gather = master_param.to(device).to(self._dtype) pg = self.param_to_pg[working_param] padded_working_param = self._working_param_to_padded_working_param[working_param] if self._overlap_allgather: # handle = dist.all_gather_into_tensor(padded_working_param, param_to_gather, pg, async_op=True) handle = all_gather_into_flat_tensor_nd(padded_working_param, param_to_gather, pg, async_op=True) set_all_gather_handle(working_param, handle) else: if param_to_gather.numel() > self.pg_to_tensor_bucket[pg].max_size: if self._fp8_communication: # TODO: fit fp8 communication all_gather_fp8( list(padded_working_param.chunk(dist.get_world_size(pg))), param_to_gather, pg, fp8_format="e4m3", ) else: # dist.all_gather_into_tensor(padded_working_param, param_to_gather, pg) all_gather_into_flat_tensor_nd(padded_working_param, param_to_gather, pg) continue try: self.pg_to_tensor_bucket[pg].add_to_bucket(param_to_gather, write_back_tensor=working_param) except RuntimeError: self.pg_to_tensor_bucket[pg].all_gather(pg, fp8_communication=self._fp8_communication) self.pg_to_tensor_bucket[pg].add_to_bucket(param_to_gather, write_back_tensor=working_param) self.optim.param_groups[group_id]["params"] = self._master_param_groups_of_current_rank[group_id] if not self._overlap_allgather: for pg, tensor_bucket in self.pg_to_tensor_bucket.items(): if not tensor_bucket.is_empty(): tensor_bucket.all_gather(pg, fp8_communication=self._fp8_communication) def _compute_grad_norm( self, dp_pg: Union[ProcessGroup, Tuple[ProcessGroup, ...]], gradients: List[Tensor], norm_type: int = 2 ) -> float: r""" Compute and return the gradient norm for gradient clipping. Args: gradients (List[Tensor]): The gradients to compute norm norm_type (int, optional): type of the used p-norm, Can be ``'inf'`` for infinity norm. Defaults to 2. Returns: float: The total norm of given gradients """ if len(gradients) == 0: return 0.0 norm_type = float(norm_type) if norm_type == inf: total_norm = max(grad.data.abs().max() for grad in gradients) total_norm_cuda = torch.tensor( [float(total_norm)], device=get_accelerator().get_current_device(), dtype=torch.float, ) if isinstance(dp_pg, tuple): for grp in dp_pg: dist.all_reduce(total_norm_cuda, op=torch.distributed.ReduceOp.MAX, group=grp) else: dist.all_reduce(total_norm_cuda, op=torch.distributed.ReduceOp.MAX, group=dp_pg) total_norm = total_norm_cuda.item() else: total_norm_exponentiated = 0.0 for grad in gradients: grad_norm_exponentiated = grad.data.double().norm(norm_type) ** norm_type total_norm_exponentiated += grad_norm_exponentiated # Sum across all model parallel GPUs. total_norm_exponentiated_cuda = torch.tensor( [float(total_norm_exponentiated)], device=get_accelerator().get_current_device(), dtype=torch.float, ) if isinstance(dp_pg, tuple): for grp in dp_pg: dist.all_reduce( total_norm_exponentiated_cuda, op=torch.distributed.ReduceOp.SUM, group=grp, ) else: torch.distributed.all_reduce( total_norm_exponentiated_cuda, op=torch.distributed.ReduceOp.SUM, group=dp_pg, ) total_norm = total_norm_exponentiated_cuda.item() ** (1.0 / norm_type) return total_norm ############################# # Mixed Precision Utilities # ############################# def _unscale_and_clip_grads(self, grad_groups_flat, total_norm): # compute combined scale factor for this group div_scale = 1.0 if self.mixed_precision_mixin is not None: div_scale = self.mixed_precision_mixin.get_grad_div_scale() if self._clip_grad_norm > 0.0: # norm is in fact norm*scale clip = ((total_norm / div_scale) + 1e-6) / self._clip_grad_norm if clip > 1: div_scale = clip * div_scale for grad in grad_groups_flat: grad.data.mul_(1.0 / div_scale) ############################ # Gradient Synchronization # ############################ # this method is used to sync gradient manually def _sync_grad(self): for group_id in range(self.num_param_groups): param_group = self._working_param_groups[group_id] for param in param_group: if is_moe_tensor(param) and param.requires_grad and param.grad is None: # TODO better of of doing this # assign zero grad to unrouted expert to avoid hang during grad reduction param.grad = torch.zeros_like(param) if param.requires_grad and param.grad is not None: self._add_to_bucket(param, group_id) self._run_reduction() def _reduce_grad(self, partition_grad): # if not overlapping communication (no reduction hook is attached) when zero1 # we need to manually reduce these gradients if not partition_grad and not self._overlap_communication: self._sync_grad() else: self._run_reduction() # this context comes from pytorch DDP @contextmanager def no_sync(self): old_require_grad_sync = self.require_grad_sync self.require_grad_sync = False try: yield finally: self.require_grad_sync = old_require_grad_sync ############## # State Dict # ############## def _pack_state(self, state: Dict) -> Dict: # comes from pytorch optimizer.state_dict() param_mappings = {} start_index = 0 def pack_group(group): nonlocal start_index packed = {k: v for k, v in group.items() if k != "params"} param_mappings.update( {id(p): i for i, p in enumerate(group["params"], start_index) if id(p) not in param_mappings} ) packed["params"] = [param_mappings[id(p)] for p in group["params"]]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/zero_hook.py
colossalai/zero/low_level/zero_hook.py
from typing import List from torch._tensor import Tensor from colossalai.tensor.param_op_hook import ColoParamOpHook _ALL_GATHER_HANDLE = "_all_gather_handle" def wait_all_gather_handle(p): if hasattr(p, _ALL_GATHER_HANDLE): handle = getattr(p, _ALL_GATHER_HANDLE) handle.wait() delattr(p, _ALL_GATHER_HANDLE) def set_all_gather_handle(p, handle): setattr(p, _ALL_GATHER_HANDLE, handle) class ZeroOpHook(ColoParamOpHook): def pre_forward(self, params: List[Tensor]) -> None: for p in params: wait_all_gather_handle(p) def post_forward(self, params: List[Tensor]) -> None: pass def pre_backward(self, params: List[Tensor]) -> None: pass def post_backward(self, params: List[Tensor]) -> None: pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/__init__.py
colossalai/zero/low_level/__init__.py
from .low_level_optim import LowLevelZeroOptimizer __all__ = ["LowLevelZeroOptimizer"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/_utils.py
colossalai/zero/low_level/_utils.py
import math from typing import Optional, Tuple, Union import numpy as np import torch import torch.distributed as dist from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors def flatten(input_): return _flatten_dense_tensors(input_) def unflatten(flat, tensors): return _unflatten_dense_tensors(flat, tensors) def count_numel(tensor_list): res = 0 for tensor in tensor_list: res += tensor.numel() return res def calculate_padding(numel, unit_size): remainder = numel % unit_size return unit_size - remainder if remainder else remainder def shuffle_by_round_robin(tensor_list, num_partitions): partitions = dict() for tensor_idx, tensor in enumerate(tensor_list): partition_to_go = tensor_idx % num_partitions if partition_to_go not in partitions: partitions[partition_to_go] = [] partitions[partition_to_go].append(dict(tensor=tensor, index=tensor_idx)) partitions_count = len(partitions) new_tensor_list = [] tensor_index_mapping = dict() for partition_id in range(partitions_count): partition_tensors = partitions[partition_id] for item in partition_tensors: tensor_index_mapping[item["index"]] = len(new_tensor_list) new_tensor_list.append(item["tensor"]) return new_tensor_list, tensor_index_mapping # create a flat tensor aligned at the alignment boundary def flatten_dense_tensors_with_padding(tensor_list, unit_size): num_elements = count_numel(tensor_list) padding = calculate_padding(num_elements, unit_size=unit_size) if padding > 0: pad_tensor = torch.zeros(padding, device=tensor_list[0].device, dtype=tensor_list[0].dtype) padded_tensor_list = tensor_list + [pad_tensor] else: padded_tensor_list = tensor_list return flatten(padded_tensor_list) def is_nccl_aligned(tensor): return tensor.data_ptr() % 4 == 0 def get_grad_accumulate_object(tensor): """ Return the AccumulateGrad of the input tensor """ # grad_fn reference: # https://discuss.pytorch.org/t/in-the-grad-fn-i-find-a-next-functions-but-i-dont-understand-the-meaning-of-the-attribute/24463 # expand_as reference: https://pytorch.org/docs/stable/generated/torch.Tensor.expand.html#torch.Tensor.expand # # `next_functions` will return the backward graph where # the first element is the AccumulateGrad of the leaf nodes. # we want to get the AccumulateGrad of the input tensor instead of the leaf # node in the whole computation graph. # Therefore, we call expand_as to create a dummy graph # where tensor_tmp and tensor indeed point to the same object. # You can check this by print(tensor.data_ptr() == tensor_tmp.data_ptr()) tensor_tmp = tensor.expand_as(tensor) grad_acc_obj = tensor_tmp.grad_fn.next_functions[0][0] return grad_acc_obj def split_by_dtype(tensor_list): """ Splits a list of PyTorch tensors into sublists based on their data type. :param tensor_list: A list of PyTorch tensors. :type tensor_list: list[torch.Tensor] :return: A list of sublists, where each sublist contains tensors of a specific data type. :rtype: list[list[torch.Tensor]] """ dtypes = ["torch.cuda.HalfTensor", "torch.cuda.FloatTensor", "torch.cuda.DoubleTensor", "torch.cuda.BFloat16Tensor"] buckets = [] for _, dtype in enumerate(dtypes): bucket = [t for t in tensor_list if t.type() == dtype] if bucket: buckets.append(bucket) return buckets def reduce_tensor_dp_group( tensor: torch.Tensor, dtype: Optional[torch.dtype] = None, dst_local_rank: Optional[int] = None, dst_global_rank: Optional[int] = None, group: Optional[dist.ProcessGroup] = None, ): """ Reduce the tensor in the data parallel process group :param tensor: A tensor object to reduce/all-reduce :param dtype: The data type used in communication :param dst_rank: The source rank for reduce. If dst_rank is None, :param parallel_mode: Communication parallel mode all-reduce will be used instead of reduce. Default is None. :type tensor: torch.Tensor :type dtype: torch.dtype, optional :type dst_rank: int, optional :type pg: ProcessGroup, optional """ # use the original dtype if dtype is None: dtype = tensor.dtype # cast the data to specified dtype for reduce/all-reduce if tensor.dtype != dtype: tensor_to_reduce = tensor.to(dtype) else: tensor_to_reduce = tensor world_size = dist.get_world_size(group=group) tensor_to_reduce.div_(world_size) # if rank is None, all reduce will be used # else, reduce is used use_all_reduce = dst_local_rank is None if use_all_reduce: dist.all_reduce(tensor_to_reduce, group=group) else: dist.reduce(tensor=tensor_to_reduce, dst=dst_global_rank, group=group) # recover the original dtype if tensor.dtype != dtype and tensor is not tensor_to_reduce: local_rank = dist.get_rank(group=group) if use_all_reduce or dst_local_rank == local_rank: tensor.copy_(tensor_to_reduce) return tensor def has_inf_or_nan(tensor): try: # if tensor is half, the .float() incurs an additional deep copy, but it's necessary if # Pytorch's .sum() creates a one-element tensor of the same type as tensor # (which is true for some recent version of pytorch). tensor_sum = float(tensor.float().sum()) # More efficient version that can be used if .sum() returns a Python scalar # tensor_sum = float(tensor.sum()) except RuntimeError as instance: # We want to check if inst is actually an overflow exception. # RuntimeError could come from a different error. # If so, we still want the exception to propagate. if "value cannot be converted" not in instance.args[0]: raise return True else: if tensor_sum == float("inf") or tensor_sum == -float("inf") or tensor_sum != tensor_sum: return True return False def release_param_grad(tensor_list): for tensor in tensor_list: tensor.grad = None def calculate_global_norm_from_list(norm_list): """Compute total from a list of norms""" total_norm = 0.0 for norm in norm_list: total_norm += norm**2.0 return math.sqrt(total_norm) def sync_tensor(flat_tensor, tensor_list): """ Synchronize the flattened tensor and unflattened tensor list. When a list of tensor are flattened with `torch._utils._unflatten_dense_tensors`, a new tensor is created. Thus, the flat tensor and original tensor list do not share the same memory space. This function will update the tensor list so that they point to the same value. :param flat_tensor: A flat tensor obtained by calling `torch._utils._unflatten_dense_tensors` on a tensor list :param tensor_list: A list of tensors corresponding to the flattened tensor :type flat_tensor: torch.Tensor :type tensor_list: List[torch.Tensor] """ updated_params = unflatten(flat_tensor, tensor_list) # update the tensor data for p, q in zip(tensor_list, updated_params): p.data = q.data def all_gather_into_flat_tensor_nd( output_tensor: torch.Tensor, input_tensor: torch.Tensor, group: Union[dist.ProcessGroup, Tuple[dist.ProcessGroup, ...]], async_op: bool = False, ): if isinstance(group, dist.ProcessGroup): group = (group,) sizes = [dist.get_world_size(pg) for pg in group] ranks = [dist.get_rank(pg) for pg in group] for i, pg in list(enumerate(group))[::-1]: if i == 0: out = output_tensor else: prev_sizes = sizes[:i] prev_ranks = ranks[:i] chunks = output_tensor.chunk(np.prod(prev_sizes)) out = chunks[np.ravel_multi_index(prev_ranks, prev_sizes)] handle = dist.all_gather_into_tensor(out, input_tensor, group=pg, async_op=async_op) input_tensor = out return handle def get_nd_world_size(group) -> int: if isinstance(group, tuple): return int(np.prod([dist.get_world_size(pg) for pg in group])) else: return dist.get_world_size(group) def get_nd_rank(group) -> int: if isinstance(group, tuple): return np.ravel_multi_index( tuple(dist.get_rank(group=pg) for pg in group), [dist.get_world_size(pg) for pg in group] ) else: return dist.get_rank(group)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/bookkeeping/bucket_store.py
colossalai/zero/low_level/bookkeeping/bucket_store.py
from typing import Dict import torch from torch import Tensor from torch._utils import _flatten_dense_tensors from torch.distributed import ProcessGroup from colossalai.accelerator.api import get_accelerator from .base_store import BaseStore class BucketStore(BaseStore): def __init__( self, torch_pg: ProcessGroup, reduce_bucket_size: int, ): super().__init__(torch_pg) self.reduce_bucket_size = reduce_bucket_size self.reset_all() self.comm_stream = get_accelerator().Stream() def reset_all(self) -> None: # init self.current_group_id = 0 self._num_elements_in_bucket = 0 # mapping gradient slices and parameter self.grad_to_param_mapping = dict() self._grad_in_bucket = dict() self._param_list = [] self._padding_size = [] for rank in range(self._world_size): self._grad_in_bucket[rank] = [] # offset_list records number of tensors in the bucket before each reduction self.offset_list = [0] def num_elements_in_bucket(self) -> int: """Return the total number of elements in bucket Returns: int: the total number of elements in bucket """ return self._num_elements_in_bucket def reset_num_elements_in_bucket(self): """Set the number of elements in bucket to zero.""" self._num_elements_in_bucket = 0 def add_param_grad(self, group_id: int, param: Tensor, padding_size: int): """Add a param to bucket and record the padding size of a param for gradient padding Args: group_id (int): The index of a parameter group param (Tensor): The parameter padding_size (int): The padding size of the parameter """ self._param_list.append(param) self._padding_size.append(padding_size) self._num_elements_in_bucket += param.numel() + padding_size self.current_group_id = group_id # number of tensors in current bucket self.offset_list[-1] += 1 def build_grad_in_bucket(self): """Organize parameters' gradient(padding and split), follows the parameters' splitting method Data structure of self._grad_in_bucket: { rank0: [grad0_rank0, grad1_rank0, ...] rank1: [grad0_rank1, grad1_rank1, ...] } """ for param, padding_size in zip(self._param_list, self._padding_size): grad = param.grad.detach().flatten() if padding_size > 0: with torch.no_grad(): grad = torch.nn.functional.pad(grad.view(-1), [0, padding_size]) grad_list = grad.split(grad.numel() // self._world_size) for rank in range(self._world_size): grad_current_rank = grad_list[rank].detach() self.grad_to_param_mapping[id(grad_current_rank)] = id(param) self._grad_in_bucket[rank].append(grad_current_rank) param.grad = None self.offset_list.append(0) def get_grad(self) -> Dict: """Return the dictionary of gradients slices, of which the keys are ranks Returns: Dict: The dictionary of gradients slices """ return self._grad_in_bucket def get_flatten_grad(self) -> Tensor: """Return the flattened gradients slices in the bucket, the data organization of the flattened tensor: [grad0_rank0, grad1_rank0, ..., grad_0_rank1, grad1_rank1, ....] Returns: Tensor: the flattened gradients slices in the bucket """ flat_grad = [] for grad_list in self._grad_in_bucket.values(): flat_grad.extend(grad_list) flat_grad = _flatten_dense_tensors(flat_grad) return flat_grad def get_param_id_of_grad(self, grad: Tensor) -> int: """Return the id of a parameter which the gradient slice belongs to Args: grad (Tensor): the gradient slice Returns: int: the id of a parameter which the gradient slice belongs to """ return self.grad_to_param_mapping[id(grad)] def reset(self): """Reset the bucket storage after reduction, only release the tensors have been reduced""" cur_offset = self.offset_list.pop(0) self._param_list = self._param_list[cur_offset:] self._padding_size = self._padding_size[cur_offset:] for _ in range(cur_offset): del self.grad_to_param_mapping[next(iter(self.grad_to_param_mapping))] for rank in range(self._world_size): self._grad_in_bucket[rank] = self._grad_in_bucket[rank][cur_offset:]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/bookkeeping/base_store.py
colossalai/zero/low_level/bookkeeping/base_store.py
from typing import Tuple, Union import numpy as np import torch.distributed as dist from torch.distributed import ProcessGroup class BaseStore: def __init__(self, torch_pg: Union[ProcessGroup, Tuple[ProcessGroup, ...]]): if isinstance(torch_pg, tuple): self.sizes = [dist.get_world_size(group=pg) for pg in torch_pg] self._world_size = int(np.prod(self.sizes)) self._local_rank = np.ravel_multi_index(tuple(dist.get_rank(group=pg) for pg in torch_pg), self.sizes) else: self._world_size = dist.get_world_size(group=torch_pg) self._local_rank = dist.get_rank(group=torch_pg) self.sizes = [self._world_size] self.torch_pg = torch_pg @property def world_size(self): return self._world_size @property def local_rank(self): return self._local_rank
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/bookkeeping/gradient_store.py
colossalai/zero/low_level/bookkeeping/gradient_store.py
from typing import List, Optional from torch import Tensor from .base_store import BaseStore class GradientStore(BaseStore): def __init__(self, *args, partition_grad: bool = False): super().__init__(*args) """ self._grads_of_params mapping the parameter and its gradient slices data structure: { group_id:{ param_id: [grad_rank0, grad_rank1, ...] } } """ self._grads_of_params = dict() # stage 2 self._working_index = 0 if partition_grad else self._local_rank # for zero2, it's `param_id: [grad_local_rank]` self.grad_to_param_mapping = dict() def get_partitioned_gradients_by_param_id(self, group_id: int, param_id: int) -> List: """Return list of gradient slices of a specific parameter Args: group_id (int): The index of a parameter group param_id (int): The id of a parameter Returns: List: the list of gradient slices of a parameter. """ if group_id in self._grads_of_params: if param_id in self._grads_of_params[group_id]: return self._grads_of_params[group_id][param_id] # the param has no grad, for instance, in layer drop return [] def append_gradients_by_param_id(self, grad: Tensor, group_id: int, param_id: int): """Append a gradient slice to the parameter's gradient slice list Args: grad (Tensor): The gradient slice to append to list group_id (int): The index of a parameter group param_id (int): The id of a parameter """ if group_id not in self._grads_of_params: self._grads_of_params[group_id] = dict() if param_id not in self._grads_of_params[group_id]: self._grads_of_params[group_id][param_id] = [grad] else: self._grads_of_params[group_id][param_id].append(grad) self.grad_to_param_mapping[id(grad)] = param_id def add_gradients_by_param_id(self, grad: Tensor, grad_idx: int, group_id: int, param_id: int): """Add a gradient slice on an existing slice of the parameter's gradient Used when no_sync is not activated. Args: grad (Tensor): The split gradient to append to list grad_idx (int): The index of the existing slice group_id (int): The index of a parameter group param_id (int): The id of a parameter """ self._grads_of_params[group_id][param_id][grad_idx].add_(grad) def get_working_grads_by_group_id(self, group_id: int) -> List: """Return list of working gradient slices in the group Args: group_id (int): The index of a parameter group Returns: List: the list working gradient slices in the group """ grad_list = [] # When using LoRa and the user sets multiple param_groups, it is possible that some param_groups have no parameters with gradients. if group_id not in self._grads_of_params.keys(): return grad_list for param_grads in self._grads_of_params[group_id].values(): grad_list.append(param_grads[self._working_index]) return grad_list def get_working_grad_by_param_id(self, param_id) -> Optional[Tensor]: """ Return the working gradient for the specified parameter. Args: param_id (int): The index of the parameter. Returns: Tensor: The the working gradient slices for the specified param_id. """ for group in self._grads_of_params.values(): if param_id in group.keys(): return group[param_id][self._working_index] return None def reset_grads_by_group_id(self, group_id: int): self._grads_of_params[group_id] = dict() def reset_all_gradients(self): self._grads_of_params = dict() self.grad_to_param_mapping = dict() def get_param_id_for_grad(self, grad: Tensor) -> Optional[int]: """Return the id of a parameter which the gradient slice belongs to Args: grad (Tensor): the gradient slice Returns: int: the id of a parameter which the gradient slice belongs to """ return self.grad_to_param_mapping.get(id(grad), None)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/bookkeeping/tensor_bucket.py
colossalai/zero/low_level/bookkeeping/tensor_bucket.py
from typing import Optional import numpy as np import torch import torch.distributed as dist from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from colossalai.quantization.fp8 import all_gather_fp8 from colossalai.zero.low_level._utils import all_gather_into_flat_tensor_nd class TensorBucket: def __init__(self, size): self._max_size = size self._current_size = 0 self._bucket = [] self._write_back_pairs = {} @property def max_size(self): return self._max_size @property def current_size(self): return self._current_size def is_full_or_oversized(self): return self._current_size >= self._max_size def is_empty(self): return len(self._bucket) == 0 def add_to_bucket(self, tensor, allow_oversize=False, write_back_tensor: Optional[torch.Tensor] = None): tensor_size = tensor.numel() if not allow_oversize and self.will_exceed_max_size(tensor_size): msg = f"The param bucket max size {self._max_size} is exceeded" + f"by tensor (size {tensor_size})" raise RuntimeError(msg) self._bucket.append(tensor) self._current_size += tensor_size write_back_tensor = write_back_tensor if write_back_tensor is not None else tensor self._write_back_pairs[tensor] = write_back_tensor def will_exceed_max_size(self, tensor_size): expected_size = self._current_size + tensor_size return expected_size > self._max_size def get_bucket(self): return self._bucket def empty(self): self._bucket = [] self._current_size = 0 self._write_back_pairs = {} def flatten(self): return _flatten_dense_tensors(self._bucket) def unflatten(self, flat_tensor): return _unflatten_dense_tensors(flat_tensor, self._bucket) def unflatten_and_copy(self, flat_tensor): unflattened_tensor_list = self.unflatten(flat_tensor) for old, new in zip(self._bucket, unflattened_tensor_list): old.copy_(new) def all_gather(self, group=None, fp8_communication: bool = False): flat = self.flatten() if isinstance(group, tuple): world_size = np.prod([dist.get_world_size(pg) for pg in group]) else: world_size = dist.get_world_size(group) buffer = torch.empty(flat.numel() * world_size, device=flat.device, dtype=flat.dtype) if fp8_communication: # TODO: fit fp8 all_gather_fp8(list(buffer.chunk(dist.get_world_size(group))), flat, group=group, fp8_format="e4m3") else: # dist.all_gather_into_tensor(buffer, flat, group=group) all_gather_into_flat_tensor_nd(buffer, flat, group=group) unflat_buffers = [self.unflatten(buffer) for buffer in buffer.chunk(world_size)] # transpose the list of list unflat_buffers = list(map(list, zip(*unflat_buffers))) for unflat_shards, tensor in zip(unflat_buffers, self._bucket): write_back_tensor = self._write_back_pairs[tensor] rec_tensor = _flatten_dense_tensors(unflat_shards)[: write_back_tensor.numel()] if write_back_tensor.is_contiguous(): rec_tensor = rec_tensor.view_as(write_back_tensor) else: rec_tensor = rec_tensor.reshape_as(write_back_tensor) write_back_tensor.data.copy_(rec_tensor) self.empty()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/low_level/bookkeeping/__init__.py
colossalai/zero/low_level/bookkeeping/__init__.py
from .bucket_store import BucketStore from .gradient_store import GradientStore from .tensor_bucket import TensorBucket __all__ = ["GradientStore", "BucketStore", "TensorBucket"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/placement_policy.py
colossalai/zero/gemini/placement_policy.py
import functools import warnings from abc import ABC, abstractmethod from time import time from typing import Dict, List, Optional, Tuple, Type import torch import torch.distributed as dist from colossalai.accelerator import get_accelerator from colossalai.zero.gemini.chunk import Chunk from .chunk import Chunk, ChunkManager from .memory_tracer import ChunkMemStatsCollector class PlacementPolicy(ABC): need_mem_stats: bool = False def __init__( self, chunk_manager: ChunkManager, mem_stats_collector: Optional[ChunkMemStatsCollector] = None, max_prefetch: int = 0, **kwargs, ) -> None: self.chunk_manager = chunk_manager self.mem_stats_collector: Optional[ChunkMemStatsCollector] = mem_stats_collector self.max_prefetch = max_prefetch @abstractmethod def evict_tensors(self, can_evict_chunks: List[Chunk], **kwargs) -> Tuple[int, float]: raise NotImplementedError @abstractmethod def setup_grads_device( self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device] ) -> None: raise NotImplementedError def get_prefetch_chunks( self, is_warmup, compute_list: tuple, compute_idx: int, async_works: Dict[Chunk, dist.Work] ) -> List[Chunk]: return [] # no prefetch by default class StaticPlacementPolicy(PlacementPolicy): def __init__( self, chunk_manager: ChunkManager, mem_stats_collector: Optional[ChunkMemStatsCollector] = None, max_prefetch: int = 0, shard_param_frac: float = 1.0, offload_optim_frac: float = 0.0, offload_param_frac: float = 0.0, **kwargs, ) -> None: super().__init__(chunk_manager, mem_stats_collector=mem_stats_collector, max_prefetch=max_prefetch) if offload_param_frac > 0.0 and (shard_param_frac != 1.0 or offload_optim_frac != 1.0): warnings.warn("offload_param_frac is ignored when shard_param_frac != 1.0 or offload_optim_frac != 1.0") offload_param_frac = 0.0 self.shard_param_frac = shard_param_frac self.offload_optim_frac = offload_optim_frac self.offload_param_frac = offload_param_frac # these should be initialized in setup_grads_device self.keep_gathered_chunk_mem = 0.0 self.keep_cuda_chunk_mem = 0.0 def evict_tensors(self, can_evict_chunks: List[Chunk], **kwargs) -> Tuple[int, float]: can_shard_chunk_mem = sum(chunk.chunk_mem for chunk in can_evict_chunks) can_offload_chunk_mem = can_shard_chunk_mem for chunk in can_evict_chunks: if can_shard_chunk_mem <= self.keep_gathered_chunk_mem: break self.chunk_manager.release_chunk(chunk) # real saved mem is chunk_mem - shard_mem, for simplicity we use chunk_mem can_shard_chunk_mem -= chunk.chunk_mem for chunk in can_evict_chunks: if can_offload_chunk_mem <= self.keep_cuda_chunk_mem: break self.chunk_manager.move_chunk(chunk, torch.device("cpu")) # real saved mem is shard_mem, for simplicity we use chunk_mem can_offload_chunk_mem -= chunk.chunk_mem return 0, 0.0 def setup_grads_device( self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device] ) -> None: total_chunk_mem = sum(self.chunk_manager.get_chunk(p).chunk_mem for p in params) offload_optim_chunk_mem = total_chunk_mem * self.offload_optim_frac offloaded_optim_chunk_mem = 0 chunks = set(self.chunk_manager.get_chunk(p) for p in params) for chunk in chunks: params = chunk.get_tensors() # init offload optim settings # keep gathered chunks are in CUDA if chunk.keep_gathered or offloaded_optim_chunk_mem >= offload_optim_chunk_mem: device = get_accelerator().get_current_device() else: device = torch.device("cpu") # real offloaded mem is chunk.shard_mem, for simplicity we use chunk mem here offloaded_optim_chunk_mem += chunk.chunk_mem for p in params: grads_device_map[p] = device self.keep_gathered_chunk_mem = total_chunk_mem * (1 - self.shard_param_frac) self.keep_cuda_chunk_mem = total_chunk_mem * (1 - self.offload_param_frac) def get_prefetch_chunks( self, is_warmup: bool, compute_list: tuple, compute_idx: int, async_works: Dict[Chunk, dist.Work] ) -> List[Chunk]: if is_warmup: # no prefetch during warmup since we need compute_list return [] can_prefetch = self.max_prefetch - len(async_works) prefetch = [] for i in range(compute_idx + 1, len(compute_list)): for chunk in compute_list[i]: if len(prefetch) >= can_prefetch: break if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks: prefetch.append(chunk) else: continue break return prefetch class AutoPlacementPolicy(PlacementPolicy): need_mem_stats: bool = True def __init__( self, chunk_manager: ChunkManager, mem_stats_collector: Optional[ChunkMemStatsCollector] = None, max_prefetch: int = 0, warmup_non_model_data_ratio: float = 0.8, steady_cuda_cap_ratio: float = 0.9, **kwargs, ) -> None: super().__init__(chunk_manager, mem_stats_collector=mem_stats_collector, max_prefetch=max_prefetch) # model data will use 1-_warmup_non_model_data_ratio CUDA memory in warmup phase # you can set them by AutoPlacementPolicy.set_warmup_non_model_data_ratio() # and AutoPlacementPolicy.set_steady_cuda_cap_ratio() self._warmup_non_model_data_ratio = warmup_non_model_data_ratio self._steady_cuda_cap_ratio = steady_cuda_cap_ratio self.__avail_cuda_model_data_for_prefetch = None def evict_tensors( self, can_evict_chunks: List[Chunk], cuda_demand: int = 0, warmup: bool = True, compute_list: Optional[List[Tuple[Chunk, ...]]] = None, compute_idx: int = 0, **kwargs, ) -> Tuple[int, float]: """ Evict tensors from CUDA device. Args: can_evict_chunks (List[StatefulTensor]): the list of tensors that can be evicted. cuda_demand (int, optional): the volume of data needed on cuda device. Defaults to 0. warmup (bool, optional): a flag indicates whether in the phase of warmup. Defaults to True. compute_list (List[StatefulTensor], optional): TODO. Defaults to []. compute_idx (int, optional): the idx of computing device. Defaults to 0. Raises: RuntimeError: Returns: int: the volume of memory that is evicted """ from colossalai.legacy.utils.memory import colo_device_memory_capacity start = time() cuda_capacity = colo_device_memory_capacity(get_accelerator().get_current_device()) used_cuda_model_data = self.chunk_manager.total_mem["cuda"] if warmup: # We designate a part of CUDA memory for model data in warmup iterations. max_cuda_non_model_data_per_period = cuda_capacity * self._warmup_non_model_data_ratio else: # max non-model-data cuda memory consumption of this sampling moment and the next sampling moment. max_cuda_non_model_data_per_period = self.mem_stats_collector.next_period_non_model_data_usage("cuda") cuda_capacity *= self._steady_cuda_cap_ratio total_cuda_model_data = cuda_capacity - max_cuda_non_model_data_per_period avail_cuda_model_data = total_cuda_model_data - used_cuda_model_data freed_cuda_model_data = 0 if avail_cuda_model_data < cuda_demand: # Move cuda_demand - avail_cuda_model_data volume of tensors # to_free_cuda_model_data = cuda_demand - avail_cuda_model_data to_free_cuda_model_data = cuda_demand - avail_cuda_model_data to_free_chunks = can_evict_chunks if not warmup: to_free_chunks = self._sort_can_evict_chunks(tuple(to_free_chunks), compute_idx, tuple(compute_list)) # print(self._sort_can_evict_chunks.cache_info()) for chunk in to_free_chunks: if freed_cuda_model_data >= to_free_cuda_model_data: break self.chunk_manager.release_chunk(chunk) self.chunk_manager.move_chunk(chunk, torch.device("cpu")) freed_cuda_model_data += chunk.chunk_mem if freed_cuda_model_data < to_free_cuda_model_data: raise RuntimeError( f"Adjust layout failed! No enough CUDA memory! " f"Need {to_free_cuda_model_data}, freed {freed_cuda_model_data}" ) self.__avail_cuda_model_data_for_prefetch = avail_cuda_model_data + freed_cuda_model_data return freed_cuda_model_data, time() - start @staticmethod @functools.lru_cache(maxsize=None) def _sort_can_evict_chunks(can_evict_chunks: tuple, compute_idx: int, compute_list: tuple) -> list: next_compute_idx = {chunk: len(compute_list) for chunk in can_evict_chunks} for i in range(len(compute_list) - 1, compute_idx, -1): for chunk in compute_list[i]: if chunk in next_compute_idx: next_compute_idx[chunk] = i next_compute_idx = sorted(next_compute_idx.items(), key=lambda pair: pair[1], reverse=True) return [t for (t, idx) in next_compute_idx] def setup_grads_device( self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device] ) -> None: for p in params: chunk = self.chunk_manager.get_chunk(p) # init offload optim settings # keep gathered chunks are in CUDA if chunk.keep_gathered: grads_device_map[p] = get_accelerator().get_current_device() else: grads_device_map[p] = torch.device("cpu") def get_prefetch_chunks( self, is_warmup: bool, compute_list: tuple, compute_idx: int, async_works: Dict[Chunk, dist.Work] ) -> List[Chunk]: if is_warmup: # no prefetch during warmup since we need compute_list return [] avail_cuda_model_data = self.__avail_cuda_model_data_for_prefetch self.__avail_cuda_model_data_for_prefetch = None # incase of double use prefetch_chunk_memory = 0 can_prefetch = self.max_prefetch - len(async_works) prefetch = [] for i in range(compute_idx + 1, len(compute_list)): for chunk in compute_list[i]: if len(prefetch) >= can_prefetch or prefetch_chunk_memory + chunk.chunk_mem > avail_cuda_model_data: break if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks: prefetch_chunk_memory += chunk.chunk_mem prefetch.append(chunk) else: continue break return prefetch class PlacementPolicyFactory: policies: Dict[str, Type[PlacementPolicy]] = { "auto": AutoPlacementPolicy, "static": StaticPlacementPolicy, } @staticmethod def create(policy_name: str) -> Type[PlacementPolicy]: if policy_name not in PlacementPolicyFactory.policies: raise TypeError(f"Unknown tensor placement policy {policy_name}") return PlacementPolicyFactory.policies[policy_name] @staticmethod def get_policy_names(): return tuple(PlacementPolicyFactory.policies.keys())
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/gemini_hook.py
colossalai/zero/gemini/gemini_hook.py
from contextlib import contextmanager from enum import Enum from functools import partial from typing import List import torch from colossalai.accelerator import get_accelerator from colossalai.tensor.param_op_hook import ColoParamOpHook from colossalai.utils import is_ddp_ignored from colossalai.zero.gemini import TensorState from colossalai.zero.gemini.gemini_mgr import GeminiManager class TrainingPhase(Enum): FORWARD = 0 BACKWARD = 1 class GeminiZeROHook(ColoParamOpHook): def __init__(self, gemini_manager: GeminiManager) -> None: super().__init__() self._gemini_manager = gemini_manager self._chunk_manager = gemini_manager.chunk_manager self._training_phase = TrainingPhase.FORWARD def pre_op(self, params): # map params to chunks params = [p for p in params if not is_ddp_ignored(p)] all_chunks = self._chunk_manager.get_chunks(params) # wait for prefetched chunks, filter those are not prefetched chunks_fetch_sync = self._gemini_manager.wait_chunks(all_chunks) # transfer state for p in params: self._chunk_manager.trans_tensor_state(p, TensorState.COMPUTE) self._gemini_manager.sample_overall_data() # evit chunks, aware of async fetched self._gemini_manager.adjust_layout( all_chunks, record_anyway=self._gemini_manager.placement_policy.max_prefetch > 0 ) # fetch the rest synchronously for chunk in chunks_fetch_sync: self._chunk_manager.access_chunk(chunk) # get possible chunks to prefetch chunks_fetch_async = self._gemini_manager.placement_policy.get_prefetch_chunks( is_warmup=self._gemini_manager.is_warmup(), compute_list=self._gemini_manager.compute_list, compute_idx=self._gemini_manager.compute_idx, async_works=self._gemini_manager.async_works, ) # prefetch if self._gemini_manager.chunk_manager._prefetch_stream is not None: # This is when prefetch happens the first time and there is no dist.Work to sync, # there is possibility that the optimizer haven't finish computation on default stream, # thus we might prefetch outdated chunks there. # # Other than that, self._gemini_manager.wait_chunks will have synced with default stream # by calling dist.Work.wait() and this line makes no diff. self._gemini_manager.chunk_manager._prefetch_stream.wait_stream(get_accelerator().current_stream()) with get_accelerator().stream(self._gemini_manager.chunk_manager._prefetch_stream): for chunk in chunks_fetch_async: maybe_work = self._chunk_manager.access_chunk(chunk, async_access=True) if maybe_work is not None: self._gemini_manager.add_work(chunk, maybe_work) # record cuda model data of the current OP, including memory for prefetched chunks self._gemini_manager.record_model_data_volume() def post_op(self, params): params = [p for p in params if not is_ddp_ignored(p)] for p in params: tensor_state = ( TensorState.HOLD if self._training_phase == TrainingPhase.FORWARD or not p.requires_grad else TensorState.HOLD_AFTER_BWD ) self._chunk_manager.trans_tensor_state(p, tensor_state) def pre_forward(self, params: List[torch.Tensor]) -> None: self.pre_op(params) def post_forward(self, params: List[torch.Tensor]) -> None: self.post_op(params) def pre_backward(self, params: List[torch.Tensor]) -> None: self.pre_op(params) def post_backward(self, params: List[torch.Tensor]) -> None: self.post_op(params) @contextmanager def switch_training_phase(self, training_phase: TrainingPhase = TrainingPhase.BACKWARD): old_training_phase = self._training_phase try: self._training_phase = training_phase yield finally: self._training_phase = old_training_phase switch_to_backward = switch_training_phase switch_to_forward = partial(switch_to_backward, training_phase=TrainingPhase.FORWARD)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/gemini_ddp.py
colossalai/zero/gemini/gemini_ddp.py
import itertools from collections import OrderedDict from contextlib import nullcontext from functools import partial from typing import Dict, Iterable, Iterator, List, Optional, Set, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch.distributed import ProcessGroup from torch.distributed.distributed_c10d import _get_default_group from colossalai.accelerator import get_accelerator from colossalai.checkpoint_io.utils import StateDictSharder, gather_distributed_param from colossalai.interface import ModelWrapper from colossalai.lazy import LazyTensor from colossalai.logging import get_dist_logger from colossalai.quantization.fp8_hook import FP8Hook from colossalai.tensor.colo_parameter import ColoParameter from colossalai.tensor.d_tensor import ( distribute_tensor, distribute_tensor_with_customization, get_device_mesh, get_global_shape, get_sharding_spec, init_as_dtensor, init_tensor_as_customization_distributed, is_customized_distributed_tensor, is_distributed_tensor, ) from colossalai.tensor.padded_tensor import ( init_as_padded_tensor, is_padded_tensor, to_padded_tensor, to_unpadded_tensor, ) from colossalai.tensor.param_op_hook import ColoParamOpHookManager from colossalai.utils import _cast_float, free_storage, get_non_persistent_buffers_set, is_ddp_ignored from .chunk import Chunk, ChunkManager, TensorState, init_chunk_manager from .gemini_hook import GeminiZeROHook from .gemini_mgr import GeminiManager from .memory_tracer import MemStats, OrderedParamGenerator from .utils import get_temp_total_chunk_on_cuda try: from torch.nn.modules.module import _EXTRA_STATE_KEY_SUFFIX, _IncompatibleKeys except ImportError: _EXTRA_STATE_KEY_SUFFIX = "_extra_state" __all__ = [ "GeminiDDP", ] class GeminiDDP(ModelWrapper): """ZeRO DDP. Warning: Nested GeminiDDP is not supported now. It is designed to be used with ChunkManager and GeminiManager. For more details, see the API reference of ``ChunkManager`` and ``GeminiManager``. Args: module (torch.nn.Module): Module to apply ZeRO-DP. gemini_manager (GeminiManager): Manages the chunk manager and heterogeneous memory space. For more details, see the API reference of ``GeminiManager``. pin_memory (bool): Chunks on CPU Memory use pin-memory. force_outputs_fp32 (bool): If set to True, outputs will be fp32. Otherwise, outputs will be fp16. Defaults to False. strict_ddp_mode (bool): If set to True, there is no tensor sharding, each tensor is replicated. Defaults to False. Users can set it to True, when they clearly know that they only need DDP. scatter_after_inference (bool): If set to True, the model will be scattered after inference. This will save memory but slow down the consecutive inference. mixed_precision (torch.dtype): If set to torch.float16, the model will be trained in fp16. Otherwise, the model will be trained in bf16. Defaults to torch.float16. """ def __init__( self, module: torch.nn.Module, chunk_config_dict: Optional[dict] = None, chunk_init_device: torch.device = torch.device("cpu"), placement_policy: str = "static", enable_gradient_accumulation: bool = False, max_prefetch: int = 0, shard_param_frac: float = 1.0, # only for static placement offload_optim_frac: float = 0.0, # only for static placement offload_param_frac: float = 0.0, # only for static placement warmup_non_model_data_ratio: float = 0.8, # only for auto placement steady_cuda_cap_ratio: float = 0.9, # only for auto placement search_range_m: int = 32, # chunk search options hidden_dim: Optional[int] = None, # chunk search options min_chunk_size_m: float = 32, # chunk search options pin_memory: bool = False, force_outputs_fp32: bool = False, strict_ddp_mode: bool = False, scatter_after_inference: bool = True, mixed_precision: torch.dtype = torch.float16, zero_group: Optional[ProcessGroup] = None, memstats: Optional[MemStats] = None, # genimi memory stats master_weights: bool = True, extra_dp_group: Optional[ProcessGroup] = None, verbose: bool = False, enable_async_reduce: bool = True, fp8_communication: bool = False, use_fp8: bool = False, ) -> None: assert mixed_precision in (torch.float16, torch.bfloat16) reuse_fp16_chunk = master_weights if not enable_gradient_accumulation else False self.enable_gradient_accumulation = enable_gradient_accumulation if chunk_config_dict is not None: self.chunk_manager = ChunkManager( chunk_config_dict, chunk_init_device, reuse_fp16_chunk=reuse_fp16_chunk, max_prefetch=max_prefetch ) else: # some ugly hotfix for the compatibility with Lightning if search_range_m is None: search_range_m = 32 self.chunk_manager = init_chunk_manager( model=module, init_device=chunk_init_device, hidden_dim=hidden_dim, search_range_m=search_range_m, min_chunk_size_m=min_chunk_size_m, strict_ddp_flag=strict_ddp_mode, process_group=zero_group, reuse_fp16_chunk=reuse_fp16_chunk, verbose=verbose, max_prefetch=max_prefetch, ) if fp8_communication: self.chunk_manager.fp8_communication = True self.gemini_manager = GeminiManager( placement_policy, self.chunk_manager, memstats, shard_param_frac=shard_param_frac, offload_optim_frac=offload_optim_frac, offload_param_frac=offload_param_frac, warmup_non_model_data_ratio=warmup_non_model_data_ratio, steady_cuda_cap_ratio=steady_cuda_cap_ratio, max_prefetch=max_prefetch, ) self.force_outputs_fp32 = force_outputs_fp32 self.param_op_hook = GeminiZeROHook(self.gemini_manager) self.hooks = [self.param_op_hook] if use_fp8: self.hooks.append(FP8Hook()) self.fp32_params: List[torch.Tensor] = list() self.fp16_params: List[ColoParameter] = list() self.grads_device: Dict[torch.Tensor, torch.device] = dict() self.param2name: Dict[nn.Parameter, str] = dict() self.name2param: Dict[str, nn.Parameter] = dict() self.scatter_after_inference = scatter_after_inference self.mixed_precision = mixed_precision self.zero_group = zero_group or _get_default_group() self.extra_dp_group = extra_dp_group self.master_weights = master_weights self.enable_async_reduce = enable_async_reduce if enable_async_reduce: self.async_reduce_stream = get_accelerator().Stream() else: self.async_reduce_stream = None self._logger = get_dist_logger() if self.gemini_manager._premade_memstats_: # build chunk in param runtime visited order. param_order = self.gemini_manager.memstats()._param_runtime_order else: # build chunk in param initialized order. # Note: in this way, it can not get filter unused params during runtime. param_order = OrderedParamGenerator() for p in module.parameters(): param_order.append(p) for name, param in module.named_parameters(): self.param2name[param] = name for m_name, m_var in module.named_modules(): for p_name, p_var in m_var.named_parameters(recurse=False): param_name = m_name + "." + p_name if m_name else p_name self.name2param[param_name] = p_var self._init_chunks( param_order=param_order, strict_ddp_mode=strict_ddp_mode, cpu_offload=not (self.gemini_manager.policy_name == "static" and offload_param_frac == 0), pin_memory=pin_memory, ) super().__init__(module) self._non_persistent_buffers_set = get_non_persistent_buffers_set(module) self._cast_buffers() # register grad hook for p in module.parameters(): if is_ddp_ignored(p): continue if p.requires_grad: assert not hasattr(p, "_grad_handle") p._grad_handle = p.register_hook( partial( GeminiDDP.grad_handle, chunk_manager=self.chunk_manager, param2name=self.param2name, grads_device=self.grads_device, master_weights=self.master_weights, enable_gradient_accumulation=self.enable_gradient_accumulation, p=p, async_reduce_stream=self.async_reduce_stream, ) ) def remove_hooks(self): for p in self.module.parameters(): if is_ddp_ignored(p): continue if p.requires_grad: assert hasattr(p, "_grad_handle") p._grad_handle.remove() delattr(p, "_grad_handle") def __del__(self): self.remove_hooks() def parameters(self, recurse: bool = True): return self.module.parameters(recurse) def named_parameters(self, prefix: str = "", recurse: bool = True): return self.module.named_parameters(prefix, recurse) def named_buffers(self, prefix: str = "", recurse: bool = True): return self.module.named_buffers(prefix, recurse) def named_children(self): return self.module.named_children() def named_modules( self, memo: Optional[Set[torch.nn.Module]] = None, prefix: str = "", remove_duplicate: bool = True ): return self.module.named_modules(memo, prefix, remove_duplicate) @staticmethod def set_params_to_ignore(params_to_ignore: Iterable[torch.Tensor]) -> None: """Sets parameters to be ignored by DDP. This method must be called before initializing ColoDDP. Example: >>> params_to_ignore = [] >>> for p in module.parameters(): >>> if should_ignore(p): >>> params_to_ignore.append(p) >>> ColoDDP.set_params_to_ignore(params_to_ignore) >>> module = ColoDDP(module) Args: params_to_ignore (Iterable[torch.Tensor]): A list of parameters to be ignored. """ for p in params_to_ignore: p._ddp_to_ignore = True def _post_forward(self): """This function is only triggered for inference.""" access_list = list(self.chunk_manager.accessed_chunks) # we need to scatter all accessed chunks and move them to their original places for chunk in access_list: if chunk.keep_gathered: self.chunk_manager.fake_release_chunk(chunk) else: assert chunk.can_release self.chunk_manager.release_chunk(chunk) first_param = next(iter(chunk.tensors_info)) self.chunk_manager.move_chunk(chunk, self.grads_device[first_param]) assert self.chunk_manager.accessed_mem == 0 def forward(self, *args, **kwargs): # check whether we are in a inference mode grad_flag = torch.is_grad_enabled() if not grad_flag: assert ( not self.gemini_manager.need_warmup or not self.gemini_manager.is_warmup() ), "You should run a completed iteration as your warmup iter" args, kwargs = _cast_float(args, self.mixed_precision), _cast_float(kwargs, self.mixed_precision) self.module.zero_grad(set_to_none=True) if not grad_flag: outputs = self._inference_forward(*args, **kwargs) else: self.gemini_manager.pre_iter(*args) with ColoParamOpHookManager.use_hooks(*self.hooks): outputs = self.module(*args, **kwargs) if self.force_outputs_fp32: return _cast_float(outputs, torch.float) return outputs def _inference_forward(self, *args, **kwargs): """This function is only triggered for inference.""" fwd_ctx = ColoParamOpHookManager.use_hooks(*self.hooks) if not self.scatter_after_inference: # gather all chunks for chunk in self.chunk_manager.get_chunks(self.fp16_params): self.chunk_manager.access_chunk(chunk) fwd_ctx = nullcontext() with fwd_ctx: outputs = self.module(*args, **kwargs) if self.scatter_after_inference: # scatter chunks self._post_forward() # reset all recorded attributes self.gemini_manager.reset_attributes() return outputs def _setup_grads_ptr(self): for p in self.module.parameters(): if is_ddp_ignored(p): continue p.grad = None def _pre_backward(self): # set a visit label for all parameters # the label is used to check whether the parameter is correctly reduced for param in self.param2name: if not is_ddp_ignored(param): setattr(param, "_gemini_reduced", False) def _post_backward(self): if self.enable_async_reduce: self.async_reduce_stream.synchronize() if self.chunk_manager.accessed_mem != 0: error_params = ["Reduction failed at followed parameters:"] for param in self.param2name: if not is_ddp_ignored(param) and not getattr(param, "_gemini_reduced"): error_params.append(self.param2name[param]) error_str = "\n\t".join(error_params) raise RuntimeError( "ZERO DDP error: the synchronization of gradients doesn't exit properly.", "The most possible reason is that the model is not compatible with GeminiDDP.\n", f"{error_str}", ) self._setup_grads_ptr() if self.enable_gradient_accumulation and not self.chunk_manager.accumulating_grads: self.chunk_manager.accumulating_grads = True # Turn on the state of gradient accumulation. self._logger.debug( f"comp cuda demand time: {self.gemini_manager._comp_cuda_demand_time}, layout time: {self.gemini_manager._layout_time}, evict time: {self.gemini_manager._evict_time}, CPU->CUDA vol: {self.gemini_manager._h2d_volume}B, CUDA->CPU vol: {self.gemini_manager._d2h_volume}" ) self.gemini_manager.post_iter() def backward(self, loss: torch.Tensor): self._pre_backward() with self.param_op_hook.switch_to_backward(), ColoParamOpHookManager.use_hooks(*self.hooks): loss.backward() self._post_backward() def backward_by_grad(self, tensor, grad, inputs: torch.Tensor = None, retain_graph: bool = False): raise RuntimeError("Gemini is not compatible with pipeline. backward_by_grad shoudn't be called in Gemini.") @staticmethod def grad_handle( grad, chunk_manager: ChunkManager, param2name: Dict, grads_device: Dict, master_weights: bool, enable_gradient_accumulation: bool, p: nn.Parameter, async_reduce_stream=None, ): async_reduce_scatter = async_reduce_stream is not None setattr(p, "_gemini_reduced", True) empty_grad = torch.empty_like(grad) free_storage(empty_grad) with torch._C.DisableTorchFunction(): chunk = chunk_manager.get_chunk(p) if chunk.tensors_info[p].state != TensorState.HOLD_AFTER_BWD: raise RuntimeError( f"Parameter `{param2name[p]}` failed at the gradient reduction. " "Some unsupported torch function is operated upon this parameter." ) grad_chunk = chunk if not chunk_manager.reuse_fp16_chunk: if not chunk_manager.accumulating_grads: grad_chunk = chunk_manager.init_grad_chunk(chunk) else: assert chunk.grad_chunk is not None if chunk.grad_chunk not in chunk_manager.accessed_chunks: grad_chunk = chunk_manager.rearrange_accumulated_grad_chunk(chunk) else: grad_chunk = chunk.grad_chunk chunk.grad_chunk.l2_norm = None # hold -> compute -> hold after bwd grad_chunk.tensor_trans_state(p, TensorState.COMPUTE) grad_chunk.tensor_trans_state(p, TensorState.HOLD_AFTER_BWD) # fp16 param chunk: hold after bwd -> ready for reduce -> hold chunk.tensor_trans_state(p, TensorState.READY_FOR_REDUCE) chunk.tensor_trans_state(p, TensorState.HOLD) grad_chunk.tensor_trans_state(p, TensorState.READY_FOR_REDUCE) if not chunk_manager.accumulating_grads: grad_chunk.copy_tensor_to_chunk_slice(p, grad, update_ptr=chunk_manager.reuse_fp16_chunk) else: grad_chunk.add_tensor_to_chunk_slice(p, grad) if async_reduce_stream is not None: async_reduce_stream.wait_stream(get_accelerator().current_stream()) with get_accelerator().stream(async_reduce_stream): reduced = chunk_manager.reduce_chunk(grad_chunk, async_op=async_reduce_scatter) if reduced: grad_chunk.wait_async_reduce() if not chunk_manager.reuse_fp16_chunk: if chunk.keep_gathered: chunk_manager.fake_release_chunk(chunk) else: chunk_manager.release_chunk(chunk) if grad_chunk.is_gathered: grad_chunk.cuda_global_chunk.div_(chunk.pg_size) if chunk.extra_dp_group is not None: grad_chunk.cuda_global_chunk.div_(chunk.extra_dp_size) else: grad_chunk.cuda_shard.div_(chunk.pg_size) if chunk.extra_dp_group is not None: grad_chunk.cuda_shard.div_(chunk.extra_dp_size) # check overflow elements chunk_manager.overflow_counter += grad_chunk.has_inf_or_nan # record l2 norm for gradient clipping. flag is bound to fp16 chunk if chunk.l2_norm_flag: grad_chunk.set_l2_norm() chunk_manager.move_chunk( grad_chunk, grads_device[p], force_copy=True, async_move=async_reduce_scatter ) if not (master_weights) or (enable_gradient_accumulation): chunk_manager.move_chunk( chunk, grads_device[p], force_copy=True, async_move=async_reduce_scatter ) return empty_grad def zero_grad(self, set_to_none: bool = False) -> None: self.module.zero_grad(set_to_none=True) def set_chunk_grad_device(self, chunk: Chunk, device: torch.device) -> None: for tensor in chunk.get_tensors(): self.grads_device[tensor] = device def state_dict(self, destination=None, prefix="", keep_vars=False, only_rank_0: bool = True): """Returns a dictionary containing a whole state of the module. Both parameters and persistent buffers (e.g. running averages) are included. Keys are corresponding parameter and buffer names. Parameters and buffers set to ``None`` are not included. Warning: The non strict state dict would ignore the parameters if the tensors of the parameters are shared with other parameters which have been included in the dictionary. When you need to load the state dict, you should set the argument `strict` to False. Returns: dict: a dictionary containing a whole state of the module """ if destination is None: destination = OrderedDict() destination._metadata = OrderedDict() destination._metadata[prefix[:-1]] = local_metadata = dict(version=self._version) self._save_to_state_dict(destination, prefix, keep_vars, only_rank_0) for hook in self._state_dict_hooks.values(): hook_result = hook(self, destination, prefix, local_metadata) if hook_result is not None: destination = hook_result return destination def _get_chunk_to_save_data(self, chunk: Chunk, only_rank_0: bool) -> Dict: """ get gathered chunk content. Args: chunk (Chunk): a chunk only_rank_0 (bool): whether to only save data on rank 0 Returns: Dict: a dict whose key is param name and value is param with correct payload """ # save parameters chunk_to_save_data = dict() temp_chunk = get_temp_total_chunk_on_cuda(chunk, self.mixed_precision) for tensor, tensor_info in chunk.tensors_info.items(): record_tensor = torch.empty([0]) record_flag = (not only_rank_0) | (dist.get_rank(chunk.torch_pg) == 0) if record_flag: record_tensor = temp_chunk[tensor_info.offset : tensor_info.end].view(tensor.shape).to(tensor.device) if is_distributed_tensor(tensor): global_shape = get_global_shape(tensor) device_mesh = get_device_mesh(tensor) shard_spec = get_sharding_spec(tensor) record_tensor = init_as_dtensor( record_tensor, device_mesh=device_mesh, sharding_spec=shard_spec, global_shape=global_shape ) elif is_customized_distributed_tensor(tensor): init_tensor_as_customization_distributed( record_tensor, shard_fn=tensor.shard_fn, gather_fn=tensor.gather_fn ) record_tensor = gather_distributed_param(record_tensor, keep_vars=False).cpu() if is_padded_tensor(tensor): record_tensor = init_as_padded_tensor( record_tensor, tensor._current_length, tensor._origin_length, tensor._padding_dim ) record_tensor = to_unpadded_tensor(record_tensor) assert tensor not in chunk_to_save_data chunk_to_save_data[tensor] = record_tensor del temp_chunk return chunk_to_save_data def _get_param_to_save_data(self, param_list: List[torch.nn.Parameter], only_rank_0: bool) -> Dict: """ get param content from chunks. Args: param_list (_type_): a list of torch.nn.Parameters only_rank_0 (_type_): _description_ Returns: Dict: a dict whose key is param name and value is param with correct payload """ # save parameters param_to_save_data = dict() chunk_list = self.chunk_manager.get_chunks(param_list) for chunk in chunk_list: param_to_save_data.update(self._get_chunk_to_save_data(chunk, only_rank_0)) return param_to_save_data def _save_to_state_dict(self, destination, prefix, keep_vars, only_rank_0=True): r"""Saves module state to `destination` dictionary, containing a state of the module, but not its descendants. This is called on every submodule in :meth:`~torch.nn.Module.state_dict`. In rare cases, subclasses can achieve class-specific behavior by overriding this method with custom logic. Args: destination (dict): a dict where state will be stored prefix (str): the prefix for parameters and buffers used in this module """ assert keep_vars is False, "`state_dict` with parameter, `keep_vars=True`, is not supported now." # get copies of fp32 parameters in CPU # as memory of fp16_params may be reused by grad, it's not reliable, we should use fp32_params and convert to fp16 params = self.fp32_params if self.chunk_manager.reuse_fp16_chunk else self.fp16_params param_to_save_data = self._get_param_to_save_data(params, only_rank_0) # get the mapping between copies and fp16 parameters p_mapping = dict() if self.chunk_manager.reuse_fp16_chunk: for p, fp32_p in zip(self.fp16_params, self.fp32_params): name = self.param2name[p] assert fp32_p in param_to_save_data, "Parameter '{}' is neglected in the chunk list".format(name) record_parameter = param_to_save_data[fp32_p] p_mapping[p] = record_parameter else: p_mapping = param_to_save_data for name, param in self.name2param.items(): if param is not None: if is_ddp_ignored(param): # deal with ddp ignored parameters destination[prefix + name] = param if keep_vars else param.detach() else: if is_padded_tensor(p_mapping[param]): p_mapping[param] = to_unpadded_tensor(p_mapping[param]) destination[prefix + name] = p_mapping[param] del p_mapping del param_to_save_data # save all buffers for name, buf in self.named_buffers(): if buf is not None and name not in self._non_persistent_buffers_set: destination[prefix + name] = buf if keep_vars else buf.detach() # save extra states extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX if ( getattr(self.__class__, "get_extra_state", torch.nn.Module.get_extra_state) is not torch.nn.Module.get_extra_state ): destination[extra_state_key] = self.get_extra_state() def load_state_dict(self, state_dict: "OrderedDict[str, torch.Tensor]", strict: bool = True): r"""Copies parameters and buffers from :attr:`state_dict` into this module and its descendants. If :attr:`strict` is ``True``, then the keys of :attr:`state_dict` must exactly match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Args: state_dict (dict): a dict containing parameters and persistent buffers. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` Returns: ``NamedTuple`` with ``missing_keys`` and ``unexpected_keys`` fields: * **missing_keys** is a list of str containing the missing keys * **unexpected_keys** is a list of str containing the unexpected keys Note: If a parameter or buffer is registered as ``None`` and its corresponding key exists in :attr:`state_dict`, :meth:`load_state_dict` will raise a ``RuntimeError``. """ missing_keys: List[str] = [] unexpected_keys: List[str] = [] error_msgs: List[str] = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = state_dict.copy() if metadata is not None: # mypy isn't aware that "_metadata" exists in state_dict state_dict._metadata = metadata # type: ignore[attr-defined] prefix = "" local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) self._load_from_state_dict(state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) if strict: if len(unexpected_keys) > 0: error_msgs.insert( 0, "Unexpected key(s) in state_dict: {}. ".format( ", ".join('"{}"'.format(k) for k in unexpected_keys) ), ) if len(missing_keys) > 0: error_msgs.insert( 0, "Missing key(s) in state_dict: {}. ".format(", ".join('"{}"'.format(k) for k in missing_keys)) ) if len(error_msgs) > 0: raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format(self.__class__.__name__, "\n\t".join(error_msgs)) ) return _IncompatibleKeys(missing_keys, unexpected_keys) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): r"""Copies parameters and buffers from :attr:`state_dict` into only this module, but not its descendants. This is called on every submodule in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this module in input :attr:`state_dict` is provided as :attr:`local_metadata`. For state dicts without metadata, :attr:`local_metadata` is empty. Subclasses can achieve class-specific backward compatible loading using the version number at `local_metadata.get("version", None)`. .. note:: :attr:`state_dict` is not the same object as the input :attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So it can be modified. Args: state_dict (dict): a dict containing parameters and persistent buffers. prefix (str): the prefix for parameters and buffers used in this module local_metadata (dict): a dict containing the metadata for this module. See strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` with :attr:`prefix` match the names of parameters and buffers in this module missing_keys (list of str): if ``strict=True``, add missing keys to this list unexpected_keys (list of str): if ``strict=True``, add unexpected keys to this list error_msgs (list of str): error messages should be added to this list, and will be reported together in :meth:`~torch.nn.Module.load_state_dict` """ for hook in self._load_state_dict_pre_hooks.values(): hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) persistent_buffers = {k: v for k, v in self.named_buffers() if k not in self._non_persistent_buffers_set} local_name_params = itertools.chain(self.named_parameters(), persistent_buffers.items()) local_state = {k: v for k, v in local_name_params if v is not None} def load( param_name, dest_tensor, copy_func, source_device_mesh=None, source_sharding_spec=None, shard_fn=None, gather_fn=None, ): state_key = prefix + param_name if state_key in state_dict: input_param = state_dict[state_key] global_shape = dest_tensor.shape if source_device_mesh is not None and source_sharding_spec is not None: global_shape = get_global_shape(dest_tensor) if is_padded_tensor(dest_tensor): padding_dim = dest_tensor._padding_dim input_param = to_padded_tensor(input_param, global_shape[padding_dim], padding_dim) if source_device_mesh is not None and source_sharding_spec is not None: input_param = distribute_tensor(input_param, source_device_mesh, source_sharding_spec) elif shard_fn is not None and gather_fn is not None: input_param = distribute_tensor_with_customization( input_param, shard_fn=shard_fn, gather_fn=gather_fn )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/utils.py
colossalai/zero/gemini/utils.py
from collections import OrderedDict from copy import copy from typing import Optional, Set import torch import torch.distributed as dist import torch.nn as nn from colossalai.accelerator import get_accelerator from .chunk import Chunk def get_temp_total_chunk_on_cuda(chunk: Chunk, dtype: torch.dtype): if chunk.is_gathered: return chunk.cuda_global_chunk if chunk.cuda_shard is not None: shard_temp = chunk.cuda_shard else: shard_temp = chunk.cpu_shard.to(get_accelerator().get_current_device()) shard_temp = shard_temp.to(dtype) total_temp = torch.zeros(chunk.chunk_size, dtype=dtype, device=get_accelerator().get_current_device()) gather_list = list(torch.chunk(input=total_temp, chunks=chunk.pg_size, dim=0)) dist.all_gather(tensor_list=gather_list, tensor=shard_temp, group=chunk.torch_pg) return total_temp def _get_dfs_module_list(module: nn.Module, memo: Optional[Set[nn.Module]] = None, prefix: str = ""): """Get a dfs module list of the given module. Its order is same as the order of creations of modules.""" if memo is None: memo = set() if module not in memo: for name, submodule in module._modules.items(): if submodule is None: continue submodule_prefix = prefix + ("." if prefix else "") + name for m in _get_dfs_module_list(submodule, memo, submodule_prefix): yield m memo.add(module) yield prefix, module def _get_shallow_copy_model(model: nn.Module): """Get a shallow copy of the given model. Each submodule is different from the original submodule. But the new submodule and the old submodule share all attributes. """ old_to_new = dict() for name, module in _get_dfs_module_list(model): new_module = copy(module) new_module._modules = OrderedDict() for subname, submodule in module._modules.items(): if submodule is None: continue setattr(new_module, subname, old_to_new[submodule]) old_to_new[module] = new_module return old_to_new[model] def get_static_torch_model( zero_ddp_model, device=torch.device("cpu"), dtype=torch.float32, only_rank_0=True ) -> torch.nn.Module: """Get a static torch.nn.Module model from the given GeminiDDP module. You should notice that the original GeminiDDP model is not modified. Thus, you can use the original model in further training. But you should not use the returned torch model to train, this can cause unexpected errors. Args: zero_ddp_model (GeminiDDP): a zero ddp model device (torch.device): the device of the final torch model dtype (torch.dtype): the dtype of the final torch model only_rank_0 (bool): if True, only rank0 has the converted torch model Returns: torch.nn.Module: a static torch model used for saving checkpoints or numeric checks """ from colossalai.zero.gemini.gemini_ddp import GeminiDDP assert isinstance(zero_ddp_model, GeminiDDP) state_dict = zero_ddp_model.state_dict(only_rank_0=only_rank_0) colo_model = zero_ddp_model.module torch_model = _get_shallow_copy_model(colo_model) if not only_rank_0 or dist.get_rank() == 0: for (name, colo_module), (_, torch_module) in zip( _get_dfs_module_list(colo_model), _get_dfs_module_list(torch_model) ): # clean the parameter list of the new torch module torch_module._parameters = OrderedDict() for sufix_param_name, param in colo_module.named_parameters(recurse=False): # get the full name of the parameter full_param_name = name + ("." if name else "") + sufix_param_name assert ( full_param_name in state_dict ), f"Can not find parameter `{full_param_name}` in the GeminiDDP module" state_param = state_dict[full_param_name] torch_param = torch.nn.Parameter(state_param.data.to(device=device, dtype=dtype)) setattr(torch_module, sufix_param_name, torch_param) dist.barrier() return torch_model
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/gemini_mgr.py
colossalai/zero/gemini/gemini_mgr.py
import functools from time import time from typing import Dict, Iterable, List, Optional, Tuple import torch import torch.distributed as dist from .chunk import Chunk, ChunkManager from .memory_tracer import ChunkMemStatsCollector, MemStats from .placement_policy import PlacementPolicy, PlacementPolicyFactory class GeminiManager: """ Stateful Tensor Manager, inspired from PatrickStar PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management https://arxiv.org/abs/2108.05818 Args: placement_policy (str): Which device to place *held* tensors. It can be 'static' and 'auto'. If it's 'auto', they are moving dynamically based on CPU and CUDA memory usage. It will utilize heterogeneous memory space evenly and well. Note that 'auto' policy can only work well when no other processes use CUDA during your training. chunk_manager (ChunkManager): A ``ChunkManager`` instance. memstats (MemStats, optional): a mem stats collected by a runtime mem tracer. if None then GeminiManager will collect it during a warmup iteration. """ def __init__( self, placement_policy: str, chunk_manager: ChunkManager, memstats: Optional[MemStats] = None, **placement_kwargs, ) -> None: assert placement_policy in PlacementPolicyFactory.get_policy_names() self.policy_name = placement_policy policy_cls = PlacementPolicyFactory.create(placement_policy) self._chunk_manager = chunk_manager self._premade_memstats_ = memstats is not None self._memstats = memstats self._mem_stats_collector = ( ChunkMemStatsCollector(chunk_manager, self._memstats) if policy_cls.need_mem_stats else None ) self._placement_policy = policy_cls( chunk_manager=chunk_manager, mem_stats_collector=self._mem_stats_collector, **placement_kwargs ) self._compute_list: List[Tuple[Chunk, ...]] = [] self._compute_idx: int = -1 self._async_works: Dict[Chunk, dist.Work] = {} self._h2d_volume = 0 self._d2h_volume = 0 self._layout_time = 0 self._evict_time = 0 self._warmup = True self._comp_cuda_demand_time = 0 def reset_attributes(self): self._compute_idx = -1 self._h2d_volume = 0 self._d2h_volume = 0 self._layout_time = 0 self._evict_time = 0 self._comp_cuda_demand_time = 0 @property def need_warmup(self) -> bool: return self.policy_name in ("auto", "const") def is_warmup(self): return self._warmup def memstats(self): """memstats get the memory statistics during training. The stats could be collected by a runtime memory tracer, or collected by the GeminiManager. Note, for the latter, you can not access the memstats before warmup iteration finishes. """ if self._premade_memstats_: return self._memstats else: assert not self._warmup, "Gemini Manager has memstats after warm up! Now is during warmup." return self._mem_stats_collector._memstats def pre_iter(self, *args): if self._mem_stats_collector and self._warmup: self._mem_stats_collector.start_collection() def post_iter(self): """This function must be called when each iteration finishes""" if self._mem_stats_collector and self._warmup: self._mem_stats_collector.finish_collection() self._warmup = False self.reset_attributes() def adjust_layout(self, chunks: Tuple[Chunk, ...], record_anyway: bool = False) -> None: """Adjust the layout of stateful tensors according to the information provided by mem_stats_collector, which should belongs to a Sharded Model. """ # find stateful tensor in state COMPUTE start = time() self._record_warmup_chunks_order(chunks, record_anyway=record_anyway) cuda_demand, can_evict_chunks = self._get_layout_info(self._compute_idx, self._warmup, chunks) # don't evict chunks that are asynchronously fetched can_evict_chunks = [chunk for chunk in can_evict_chunks if chunk not in self._async_works] self._layout_time += time() - start vol, evict_time = self._placement_policy.evict_tensors( can_evict_chunks=can_evict_chunks, cuda_demand=cuda_demand, warmup=self._warmup, compute_list=self._compute_list, compute_idx=self._compute_idx, ) self._d2h_volume += vol self._evict_time += evict_time # move COMPUTE tensors to CUDA self._h2d_volume += cuda_demand def wait_chunks(self, chunks: Iterable[Chunk]) -> Tuple[Chunk]: non_prefetched_chunks = [] for chunk in chunks: if chunk in self._async_works: self._async_works[chunk].wait() del self._async_works[chunk] else: non_prefetched_chunks.append(chunk) return tuple(non_prefetched_chunks) def add_work(self, chunk: Chunk, work: dist.Work): assert work is not None assert chunk not in self._async_works self._async_works[chunk] = work @functools.lru_cache(maxsize=None) def _get_layout_info(self, compute_idx: int, warmup: bool, chunks: Tuple[Chunk, ...]): start = time() cuda_demand = 0 for chunk in chunks: if chunk.device_type == "cuda" or chunk.device_type == "npu": if chunk.is_gathered: pass else: cuda_demand += chunk.chunk_mem - chunk.shard_mem elif chunk.device_type == "cpu": cuda_demand += chunk.chunk_mem else: raise RuntimeError self._comp_cuda_demand_time += time() - start can_evict_chunks = self._chunk_manager.get_cuda_movable_chunks() return cuda_demand, can_evict_chunks def _record_warmup_chunks_order(self, chunks: Tuple[Chunk, ...], record_anyway: bool = False) -> None: self._compute_idx += 1 if self._warmup and (self._placement_policy.need_mem_stats or record_anyway): self._compute_list.append(chunks) def sample_overall_data(self): if self._mem_stats_collector: self._mem_stats_collector.sample_overall_data() def record_model_data_volume(self): if self._mem_stats_collector: self._mem_stats_collector.record_model_data_volume() @property def chunk_manager(self): return self._chunk_manager @property def cuda_margin_mem(self) -> Optional[float]: if self._mem_stats_collector: return self._mem_stats_collector.cuda_margin_mem return None @property def placement_policy(self) -> PlacementPolicy: return self._placement_policy @property def compute_list(self) -> List[Tuple[Chunk, ...]]: return self._compute_list @property def compute_idx(self) -> int: return self._compute_idx @property def async_works(self) -> Dict[Chunk, dist.Work]: return self._async_works @property def is_cuda_margin_mem_avail(self) -> bool: return self._placement_policy.need_mem_stats def setup_grads_device( self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device] ) -> None: self._placement_policy.setup_grads_device(params, grads_device_map)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/__init__.py
colossalai/zero/gemini/__init__.py
from .chunk import ChunkManager, TensorInfo, TensorState, search_chunk_configuration from .gemini_ddp import GeminiDDP from .gemini_mgr import GeminiManager from .gemini_optimizer import GeminiAdamOptimizer, GeminiOptimizer from .utils import get_static_torch_model __all__ = [ "GeminiManager", "TensorInfo", "TensorState", "ChunkManager", "search_chunk_configuration", "GeminiDDP", "get_static_torch_model", "GeminiAdamOptimizer", "GeminiOptimizer", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/gemini_optimizer.py
colossalai/zero/gemini/gemini_optimizer.py
# this code is inspired by the DeepSpeed library and implemented with our own design from scratch import copy import math from typing import Any, Dict, Iterator, Optional, OrderedDict, Set, Tuple, Union import torch import torch.distributed as dist from packaging.version import Version from torch.distributed import ProcessGroup from torch.nn import Parameter from torch.optim import Optimizer from colossalai.accelerator import get_accelerator from colossalai.amp.naive_amp.mixed_precision_mixin import BF16MixedPrecisionMixin, FP16MixedPrecisionMixin from colossalai.checkpoint_io.utils import StateDictSharder, gather_distributed_param from colossalai.interface import OptimizerWrapper from colossalai.logging import get_dist_logger from colossalai.nn.optimizer import CPUAdam, FusedAdam, HybridAdam from colossalai.tensor.d_tensor import ( distribute_tensor, distribute_tensor_with_customization, get_device_mesh, get_global_shape, get_sharding_spec, init_as_dtensor, init_tensor_as_customization_distributed, is_customized_distributed_tensor, is_distributed_tensor, ) from colossalai.tensor.padded_tensor import ( init_as_padded_tensor, is_padded_tensor, to_padded_tensor, to_unpadded_tensor, ) from colossalai.utils import disposable, is_ddp_ignored from .chunk import Chunk, ChunkManager from .gemini_ddp import GeminiDDP __all__ = ["GeminiOptimizer", "GeminiAdamOptimizer"] _AVAIL_OPTIM_LIST = {FusedAdam, CPUAdam, HybridAdam} class GeminiFP16MixedPrecisionMixin(FP16MixedPrecisionMixin): def __init__( self, module: GeminiDDP, initial_scale: float = 2**16, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 2**32, ) -> None: super().__init__( initial_scale, min_scale, growth_factor, backoff_factor, growth_interval, hysteresis, max_scale ) self.module = module def check_local_overflow(self) -> bool: return self.module.chunk_manager.overflow_counter.item() > 0 def pre_zero_grad(self) -> None: self.module.chunk_manager.overflow_counter.zero_() class GeminiOptimizer(OptimizerWrapper): """A wrapper for optimizer. ``GeminiDDP`` and ``GeminiOptimizer`` implement Zero Redundancy Optimizer (ZeRO state-3). Note: You must use ``GeminiDDP`` with ``GeminiOptimizer``. Note: Make sure you set ``placement_policy`` of ``GeminiManager`` to `"auto"`, if you set ``gpu_margin_mem_ratio > 0``. Args: optim (Optimizer): An Optimizer instance. module (GeminiDDP): A ``GeminiDDP`` instance. gpu_margin_mem_ratio (float, optional): The ratio of GPU remaining memory (after the first forward-backward) which will be used when using hybrid CPU optimizer. This argument is meaningless when `placement_policy` of `GeminiManager` is not "auto". Defaults to 0.0. initial_scale (float, optional): Initial scale used by DynamicGradScaler. Defaults to 2**32. min_scale (float, optional): Min scale used by DynamicGradScaler. Defaults to 1. growth_factor (float, optional): Growth_factor used by DynamicGradScaler. Defaults to 2. backoff_factor (float, optional): Backoff_factor used by DynamicGradScaler. Defaults to 0.5. growth_interval (float, optional): Growth_interval used by DynamicGradScaler. Defaults to 1000. hysteresis (float, optional): Hysteresis used by DynamicGradScaler. Defaults to 2. max_scale (int, optional): Max_scale used by DynamicGradScaler. Defaults to 2**32. max_norm (float, optional): The norm value used to clip gradient. Defaults to 0.0. norm_type (float, optional): The type of norm used for gradient clipping. Currently, only L2-norm (norm_type=2.0) is supported in GeminiOptimizer. Defaults to 2.0. verbose (bool, optional): Whether to print verbose information, including grad overflow info. Defaults to False. """ def __init__( self, optim: Optimizer, module: GeminiDDP, gpu_margin_mem_ratio: float = 0.0, initial_scale: float = 2**32, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 2**32, max_norm: float = 0.0, norm_type: float = 2.0, tp_group: ProcessGroup = None, params_info=None, verbose: bool = False, **defaults: Any, ): super().__init__(optim) assert isinstance(module, GeminiDDP) assert type(optim) in _AVAIL_OPTIM_LIST, ( "You should use an optimizer in the available list:\n" f"{_AVAIL_OPTIM_LIST}" ) self.module = module self.gemini_manager = module.gemini_manager self.chunk_manager: ChunkManager = self.gemini_manager.chunk_manager self.param_to_range: Dict[Parameter, Tuple[int, int]] = dict() self.param_to_chunk16: Dict[Parameter, Chunk] = dict() self.chunk16_set: Set[Chunk] = set() self.clipping_flag = max_norm > 0.0 self.max_norm = max_norm self.tp_group = tp_group self.params_info = params_info self.tp_size = dist.get_world_size(tp_group) if tp_group is not None else 1 self.tp_rank = dist.get_rank(tp_group) if tp_group is not None else 0 self.verbose = verbose self.param_groups_backup = list() self.logger = get_dist_logger() # Mapping from integer id to real/fake param tensor, used for checkpointing. self.id_to_real_params: Dict[int, Parameter] = dict() self.id_to_fake_params: Dict[int, Parameter] = dict() if self.clipping_flag: assert norm_type == 2.0, "GeminiOptimizer only supports L2 norm now" ddp_param_list = [] for name, param in module.named_parameters(): if is_ddp_ignored(param): if param.requires_grad: self.logger.warning( f"Parameter `{name}` is ignored by DDP but requires gradient! " "You should handle its optimizer update by yourself!", ranks=[0], ) else: ddp_param_list.append(param) for p in ddp_param_list: chunk_16 = self.chunk_manager.get_chunk(p) if chunk_16 not in self.chunk16_set: chunk_16.l2_norm_flag = self.clipping_flag self.chunk16_set.add(chunk_16) self.__init__optimizer() if module.mixed_precision is torch.float16: self.mix_precision_mixin = GeminiFP16MixedPrecisionMixin( module, initial_scale=initial_scale, min_scale=min_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, hysteresis=hysteresis, max_scale=max_scale, ) elif module.mixed_precision is torch.bfloat16: self.mix_precision_mixin = BF16MixedPrecisionMixin() else: raise RuntimeError(f"Unsupported mixed precision type: {module.mixed_precision}") self._logger = get_dist_logger() self.gpu_margin_mem_ratio: float = float(gpu_margin_mem_ratio) assert 0.0 <= self.gpu_margin_mem_ratio <= 1.0, f"gpu_margin_mem_ratio must >=0.0 and <=1.0" # Only move fp32 shards from CPU to GPU when user allows and inner optimizer is valid # Inner optimizer must support optimizing hybrid (CPU and CUDA) tensors, # and it must set `num_fp32_shards_per_param` correctly self._should_move_fp32_params_h2d: bool = ( self.gemini_manager.is_cuda_margin_mem_avail and self.gpu_margin_mem_ratio > 0.0 and getattr(optim, "num_fp32_shards_per_param", 0) >= 2 ) if self.gpu_margin_mem_ratio > 0.0 and not self.gemini_manager.is_cuda_margin_mem_avail: self._logger.warning(f'gpu_margin_mem_ratio is meaningless when placement_policy is not "auto"', ranks=[0]) self._register_states = disposable(self._register_states_) self._current_grad_norm: Optional[float] = None def _set_grad_ptr(self): for group in self.param_groups: for fake_param in group["params"]: chunk16 = self.param_to_chunk16[fake_param] begin, end = self.param_to_range[fake_param] grad_chunk16 = chunk16 if self.module.chunk_manager.reuse_fp16_chunk else chunk16.grad_chunk fake_param.data = grad_chunk16.payload[begin:end] fake_param.grad = fake_param.data to_update_chunk = chunk16.paired_chunk if self.module.master_weights else chunk16 fake_param.data = to_update_chunk.payload[begin:end] def _update_fp16_params(self): none_tensor = torch.empty([0]) for group in self.param_groups: for fake_param in group["params"]: assert fake_param.grad is None fake_param.data = none_tensor.to(fake_param.device) for chunk16 in self.chunk16_set: chunk16.optim_update() def _clear_global_norm(self) -> None: for c16 in self.chunk16_set: grad_chunk = c16 if self.module.chunk_manager.reuse_fp16_chunk else c16.grad_chunk grad_chunk.l2_norm = None def _calc_global_norm(self) -> float: norm_sqr: float = 0.0 group_to_norm = dict() for c16 in self.chunk16_set: grad_chunk = c16 if self.module.chunk_manager.reuse_fp16_chunk else c16.grad_chunk assert grad_chunk.l2_norm is not None if grad_chunk.is_gathered: norm_sqr += grad_chunk.l2_norm else: # this chunk is sharded, use communication to collect total norm if grad_chunk.torch_pg not in group_to_norm: group_to_norm[grad_chunk.torch_pg] = 0.0 group_to_norm[grad_chunk.torch_pg] += grad_chunk.l2_norm grad_chunk.l2_norm = None # clear l2 norm comm_buffer = torch.zeros(1, dtype=torch.float, device=get_accelerator().get_current_device()) for group, part_norm in group_to_norm.items(): comm_buffer.fill_(part_norm) dist.all_reduce(comm_buffer, group=group) norm_sqr += comm_buffer.item() global_norm = math.sqrt(norm_sqr) return global_norm def _get_combined_scale(self): div_scale = self.mix_precision_mixin.get_grad_div_scale() if self.clipping_flag: total_norm = self._calc_global_norm() self._current_grad_norm = total_norm clip = ((total_norm / div_scale) + 1e-6) / self.max_norm if clip > 1: div_scale = clip * div_scale return -1 if div_scale == 1.0 else div_scale def zero_grad(self, *args, **kwargs): self.mix_precision_mixin.pre_zero_grad() return self.optim.zero_grad(set_to_none=True) def step(self, *args, **kwargs): if self.module.master_weights: self._maybe_move_fp32_params() self._set_grad_ptr() if self.mix_precision_mixin.should_skip_step(): if self.verbose: self._logger.info(f"Found overflow. Skip step") self._clear_global_norm() # clear recorded norm self.zero_grad() # reset all gradients if self.module.chunk_manager.reuse_fp16_chunk: self._update_fp16_params() return # get combined scale. combined scale = loss scale * clipping norm # so that gradient = gradient / combined scale combined_scale = self._get_combined_scale() ret = self.optim.step(div_scale=combined_scale, *args, **kwargs) self._register_states() self.zero_grad() if self.module.master_weights: self._update_fp16_params() self.module.chunk_manager.accumulating_grads = False return ret def clip_grad_norm(self, model: torch.nn.Module, max_norm: float, norm_type: float = 2.0): raise NotImplementedError def backward(self, loss: torch.Tensor): loss = self.mix_precision_mixin.pre_backward(loss) self.module.backward(loss) def backward_by_grad( self, tensor: torch.Tensor, grad: torch.Tensor, inputs: torch.Tensor = None, retain_graph: bool = False ): # This function is called except the last stage of pipeline parallel # It receives the scaled grad from the previous rank # No need to scale the grad again # Need to unscale when optimizing grad = self.mix_precision_mixin.pre_backward_by_grad(grad, inputs=inputs, retain_graph=retain_graph) self.module.backward_by_grad(tensor, grad) def _maybe_move_fp32_params(self): if self._should_move_fp32_params_h2d: self._should_move_fp32_params_h2d = False available_cuda_margin_mem = self.gemini_manager.cuda_margin_mem * self.gpu_margin_mem_ratio fp32_params_available_cuda_margin_mem = available_cuda_margin_mem / self.optim.num_fp32_shards_per_param fp32_params_used_cuda_margin_mem = 0 for group in self.param_groups: for fake_param in group["params"]: chunk16 = self.param_to_chunk16[fake_param] chunk32 = chunk16.paired_chunk if chunk32.device_type == "cuda" or chunk32.device_type == "npu": continue if fp32_params_used_cuda_margin_mem + chunk32.payload_mem < fp32_params_available_cuda_margin_mem: self.chunk_manager.move_chunk(chunk32, get_accelerator().get_current_device()) # stores grad now self.chunk_manager.move_chunk(chunk16, get_accelerator().get_current_device()) self.module.set_chunk_grad_device(chunk16, get_accelerator().get_current_device()) fp32_params_used_cuda_margin_mem += chunk32.payload_mem for group in self.param_groups: for fake_param in group["params"]: chunk16 = self.param_to_chunk16[fake_param] chunk32 = chunk16.paired_chunk if chunk32.device_type == "cuda" or chunk32.device_type == "npu": state = self.optim.state[fake_param] for k, v in state.items(): if isinstance(v, torch.Tensor): state[k] = v.to(get_accelerator().get_current_device()) def _register_states_(self): for group in self.optim.param_groups: for p in group["params"]: state = self.optim.state[p] for val in state.values(): if isinstance(val, torch.Tensor): self.chunk_manager.add_extern_static_tensor(val) def __init__optimizer(self): def get_range_pair(local_chunk: Chunk, local_param: Parameter): param_info = local_chunk.tensors_info[local_param] if local_chunk.keep_gathered: return param_info.offset, param_info.end begin = max(0, param_info.offset - local_chunk.shard_begin) end = min(local_chunk.shard_size, param_info.end - local_chunk.shard_begin) return begin, end param_id = -1 for group in self.optim.param_groups: fake_params_list = list() group_backup = {k: v for k, v in group.items() if k != "params"} group_ids = [] for param in group["params"]: # Record the mapping of id to current param. param_id += 1 self.id_to_real_params[param_id] = param group_ids.append(param_id) # If current param is controlled by current process, add it to fake_param. if is_ddp_ignored(param): continue chunk16 = self.chunk_manager.get_chunk(param) range_pair = get_range_pair(chunk16, param) if range_pair[0] >= range_pair[1]: continue grad_device = self.module.grads_device[param] fake_param = torch.nn.Parameter(torch.empty([0], device=grad_device)) self.param_to_chunk16[fake_param] = chunk16 self.param_to_range[fake_param] = range_pair self.id_to_fake_params[param_id] = fake_param fake_params_list.append(fake_param) # Update self.optim.param_groups as well as backup group. group["params"] = fake_params_list group_backup["params"] = group_ids self.param_groups_backup.append(group_backup) def get_offsets(self, param_id: int) -> tuple: """ Args: param_id(int): The id of parameter. Returns: chunk_offset(int): Offset of parameter inside the chunk. shard_offset(int): Offset of its optimizer state shard relative to the whole optimizer state. shard_size(int): Length of parameter shard owned by current process. """ if param_id not in self.id_to_fake_params: return -1, -1, -1 fake_param = self.id_to_fake_params[param_id] chunk = self.param_to_chunk16[fake_param] param = self.id_to_real_params[param_id] param_info = chunk.tensors_info[param] begin_in_chunk, end_in_chunk = self.param_to_range[fake_param] chunk_offset = begin_in_chunk if chunk.keep_gathered: shard_offset = 0 else: shard_offset = begin_in_chunk + chunk.shard_begin - param_info.offset shard_size = end_in_chunk - begin_in_chunk assert chunk_offset >= 0 and shard_offset >= 0 return chunk_offset, shard_offset, shard_size def collect_states(self, param_id: int, only_rank_0: bool = True) -> dict: """ Args: param_id (int): id of the parameter whose state is to be gathered at master rank. only_rank_0(bool): if True, states will be collected only on master rank, otherwise collected on every rank. Returns: collected_states(dict): the gathered optimizer state of parameter with given id if this method is called by master rank, otherwise an empty dict. This method can work only when called by all processes simultaneously. """ # Get param & chunk & process group. param = self.id_to_real_params[param_id] fake_param = self.id_to_fake_params.get(param_id, None) chunk = self.chunk_manager.get_chunk(param) zero_group = chunk.torch_pg rank = dist.get_rank(zero_group) master_rank = 0 collected_states = {} # Fetch names of states through all_gather. local_state_names = None if fake_param is not None: local_state_names = list(self.optim.state[fake_param].keys()) gathered_state_names = [None for _ in range(dist.get_world_size(zero_group))] dist.barrier() dist.all_gather_object(gathered_state_names, local_state_names, zero_group) state_names = None for names in gathered_state_names: if names is not None: # Assume different devices share the same set of state names if they have. state_names = copy.deepcopy(names) break # Directly return if this parameter doesn't have optimizer states. # e.g. parameter freezed/layer dropped if state_names is None: return collected_states # Boolean variable is_collector indicates that whether the current rank # needs to gather the whole optimizer states. # Only master rank is collector when only_rank_0 is True. # Every rank is collector when only_rank_0 is False. is_collector = (rank == master_rank) or (not only_rank_0) # get tensor parallelism information is_dtensor = is_distributed_tensor(param) is_customized_distributed = is_customized_distributed_tensor(param) shard_spec = get_sharding_spec(param) if is_dtensor else None device_mesh = get_device_mesh(param) if is_dtensor else None global_shape = self.params_info["id2shape"][param_id] # If the chunk is kept gathered, # the parameters are treated the same as that of those in strict DDP during training. # So states can be directly fetched from current device. if chunk.keep_gathered: assert param_id in self.id_to_fake_params if is_collector: states = self.optim.state[fake_param] for state_name in state_names: if state_name == "step": # To keep aligned with pytorch, state 'step' is stored as a pytorch tensor with type float32. collected_states[state_name] = torch.tensor( states["step"], dtype=torch.float32, requires_grad=False ).cpu() else: state_tensor = states[state_name].detach().clone().to(torch.float32).cpu() if is_dtensor: global_shape = get_global_shape(param) state_tensor = torch.reshape(state_tensor, param.shape).to(param.device) state_tensor = init_as_dtensor( state_tensor, device_mesh=device_mesh, sharding_spec=shard_spec, global_shape=global_shape, ) elif is_customized_distributed: state_tensor = torch.reshape(state_tensor, param.shape).to(param.device) init_tensor_as_customization_distributed( state_tensor, shard_fn=param.shard_fn, gather_fn=param.gather_fn ) state_tensor = gather_distributed_param(state_tensor, keep_vars=False).cpu() state_tensor = state_tensor.reshape(global_shape) if is_padded_tensor(param): state_tensor = init_as_padded_tensor( state_tensor, param._current_length, param._origin_length, param._padding_dim ) state_tensor = to_unpadded_tensor(state_tensor) collected_states[state_name] = state_tensor return collected_states # Check whether the param with given id is managed by current process. own_param = param_id in self.id_to_fake_params # Collector gets prepared for state collecting. if is_collector: for state_name in state_names: if state_name == "step": # To keep aligned with pytorch, state 'step' is stored as a pytorch tensor with type float32. collected_states[state_name] = torch.tensor(0.0, dtype=torch.float32, requires_grad=False).cpu() else: collected_states[state_name] = torch.zeros( param.numel(), dtype=torch.float32, requires_grad=False ).cpu() # Materials for gathering, including compacted state tensors, and the offset of shard inside each state. compacted_states = self.pack_optimizer_states_to_tensor(param_id, state_names) if own_param else None _, shard_offset, shard_size = self.get_offsets(param_id) # Collectors gather state shards through all_gathering. gathered_state_shards = [None for _ in range(dist.get_world_size(zero_group))] dist.barrier() dist.all_gather_object(gathered_state_shards, [compacted_states, shard_offset, shard_size], group=zero_group) if is_collector: for state_shard in gathered_state_shards: compacted_states = state_shard[0] shard_offset = state_shard[1] shard_size = state_shard[2] if compacted_states is None: continue self.load_from_compacted_states( compacted_states, collected_states, state_names, shard_offset, shard_size ) # Reshape tensors if is_collector: for state_name, state_tensor in collected_states.items(): if state_tensor.numel() == param.numel(): collected_states[state_name] = torch.reshape(state_tensor, param.shape) if is_dtensor: global_shape = get_global_shape(param) state_tensor = state_tensor.to(param.device) state_tensor = init_as_dtensor( state_tensor, sharding_spec=shard_spec, device_mesh=device_mesh, global_shape=global_shape ) elif is_customized_distributed: state_tensor = state_tensor.to(param.device) init_tensor_as_customization_distributed( state_tensor, shard_fn=param.shard_fn, gather_fn=param.gather_fn ) state_tensor = gather_distributed_param(state_tensor, keep_vars=False).cpu() if is_padded_tensor(param): state_tensor = init_as_padded_tensor( state_tensor, param._current_length, param._origin_length, param._padding_dim ) state_tensor = to_unpadded_tensor(state_tensor) return collected_states def pack_optimizer_states_to_tensor( self, param_id: int, state_names: list, device: torch.device = get_accelerator().get_current_device(), dtype: torch.dtype = torch.float32, ) -> torch.Tensor: """ With param id given, pack its optimizer states into a compact tensor and return. """ if param_id not in self.id_to_fake_params: return None fake_param = self.id_to_fake_params[param_id] param_range = self.param_to_range[fake_param] states = self.optim.state[fake_param] shard_size = param_range[1] - param_range[0] compacted_size = 0 for name in state_names: if name == "step": compacted_size += 1 else: compacted_size += shard_size compacted_states = torch.zeros(compacted_size, dtype=dtype, device=device, requires_grad=False) next_state_offset = 0 for state_name, state_tensor in states.items(): # State 'step' needs special operation. if state_name == "step": if isinstance(state_tensor, torch.Tensor): compacted_states[next_state_offset] = state_tensor[0].item() else: assert isinstance(state_tensor, int) compacted_states[next_state_offset] = state_tensor next_state_offset += 1 else: assert state_tensor.numel() == shard_size compacted_states[next_state_offset : next_state_offset + shard_size].copy_(state_tensor) next_state_offset += shard_size return compacted_states def load_from_compacted_states( self, compacted_states: torch.Tensor, collected_states: dict, state_names: list, shard_start: int, shard_size: int, ): """ Given a tensor carrying compacted optimizer states, update these states to collected_states. """ shard_end = shard_start + shard_size next_state_offset = 0 for state_name in state_names: if state_name == "step": collected_states["step"].data = torch.tensor( compacted_states[next_state_offset].item(), dtype=torch.float32, requires_grad=False ).cpu() next_state_offset += 1 else: target_segment = collected_states[state_name][shard_start:shard_end] target_segment.copy_(compacted_states[next_state_offset : next_state_offset + shard_size]) next_state_offset += shard_size def get_param_groups_for_saving(self) -> list: """ Return the param_groups in Pytorch format when saving to checkpoint. """ param_groups = [ {**group, "params": group_info["params"]} for group, group_info in zip(self.optim.param_groups, self.param_groups_backup) ] # To be compatible with pytorch checkpointing, # store extra hyperparameters used by pytorch Adam optimizer. torch_special_hyperparameters = { "amsgrad": False, "maximize": False, "foreach": None, "capturable": False, "differentiable": False, "fused": False, } for group in param_groups: for k, v in torch_special_hyperparameters.items(): if k not in group: group[k] = v return param_groups def state_dict(self, only_rank_0: bool = True) -> dict: """ Args: only_rank_0 (bool): a boolean value indicating whether the state_dict is collected only on rank 0, default to True. Returns: The complete state of the optimizer as a :class:`dict`. It contains two entries: * state - a dict holding current optimization state. Its content differs between optimizer classes. * param_groups - a list containing all parameter groups where each parameter group is a dict. Warning: This method will gather and return the whole optimizer state_dict, so it should be called only when memory resources are abundant. """ state_dict = {} state_dict["param_groups"] = self.get_param_groups_for_saving() # Collect optimizer states. state_dict["state"] = dict() for param_id in self.id_to_real_params.keys(): dist.barrier() state_dict["state"][param_id] = self.collect_states(param_id=param_id, only_rank_0=only_rank_0) return state_dict def load_param_groups(self, saved_param_groups: list): """ Load saved_param_groups into self.param_groups and self.param_groups_backup """ self.param_groups_backup = copy.deepcopy(saved_param_groups) # discard the older param_groups self.optim.param_groups = [] for group in saved_param_groups: fake_params_list = list() updated_group = {k: v for k, v in group.items() if k != "params"} for param_id in group["params"]: if param_id not in self.id_to_fake_params: continue fake_param = self.id_to_fake_params[param_id] fake_params_list.append(fake_param) updated_group["params"] = fake_params_list self.optim.param_groups.append(updated_group) def load_single_param_states(self, param_id: int, saved_states: dict): """ Load saved optimizer states into parameter with given id. """ def cast(param, state_range, value, global_shape, origin_shape, key=None): """ Make a copy of the needed segment of value and cast it to device of param. """ assert isinstance(value, torch.Tensor) ret_val = value if key == "step": assert value.numel() == 1 ret_val = int(value.item()) else: state_start, state_end = state_range ret_val = torch.zeros( state_end - state_start, dtype=torch.float32, device=param.device, requires_grad=False ) if is_dtensor:
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/chunk/chunk.py
colossalai/zero/gemini/chunk/chunk.py
from dataclasses import dataclass from enum import Enum from typing import Dict, List, Optional import torch import torch.distributed as dist from torch.distributed import ProcessGroup from colossalai.accelerator import get_accelerator from colossalai.quantization.fp8 import all_gather_fp8 class TensorState(Enum): FREE = 0 COMPUTE = 1 HOLD = 2 HOLD_AFTER_BWD = 3 READY_FOR_REDUCE = 4 STATE_TRANS = ( (TensorState.FREE, TensorState.HOLD), (TensorState.FREE, TensorState.COMPUTE), (TensorState.HOLD, TensorState.FREE), (TensorState.HOLD, TensorState.COMPUTE), (TensorState.COMPUTE, TensorState.HOLD), (TensorState.COMPUTE, TensorState.HOLD_AFTER_BWD), (TensorState.HOLD_AFTER_BWD, TensorState.COMPUTE), (TensorState.HOLD_AFTER_BWD, TensorState.READY_FOR_REDUCE), (TensorState.READY_FOR_REDUCE, TensorState.HOLD), ) @dataclass class TensorInfo: state: TensorState offset: int end: int class ChunkFullError(Exception): pass def is_storage_empty(tensor: torch.Tensor) -> bool: return tensor.storage().size() == 0 def free_storage(tensor: torch.Tensor) -> None: if not is_storage_empty(tensor): tensor.storage().resize_(0) def alloc_storage(tensor: torch.Tensor) -> None: if is_storage_empty(tensor): tensor.storage().resize_(tensor.numel()) class Chunk: _total_number = 0 def __init__( self, chunk_size: int, zero_group: ProcessGroup, dtype: torch.dtype, init_device: Optional[torch.device] = None, cpu_shard_init: bool = False, keep_gathered: bool = False, pin_memory: bool = False, extra_dp_group: ProcessGroup = None, ) -> None: """ Chunk: A container owning a piece of contiguous memory space for tensors Here we use all-gather operation to gather the whole chunk. Currently, Chunk is exclusively used for DDP and ZeRO DDP and it doesn't support unused parameters. It is designed to make the full use of communication and PCIE bandwidth. Args: chunk_size (int): the number of elements in the chunk zero_group (ProcessGroup): the process group of this chunk dtype (torch.dtype): the data type of the chunk init_device (torch.device): optional, During the chunk construction process, where the tensor is stored. The default value is None, which is the current GPU cpu_shard_init (bool): a flag indicates the local chunk shard is resident on CPU. keep_gathered (bool): optional, if True, this chunk is always gathered in CUDA memory pin_memory (bool): optional, if True, this chunk always has a shard copied in pinned CPU memory """ self.count_id = Chunk._total_number Chunk._total_number += 1 self.chunk_size = chunk_size self.utilized_size = 0 self.torch_pg = zero_group self.pg_size = dist.get_world_size(self.torch_pg) self.pg_rank = dist.get_rank(self.torch_pg) self.extra_dp_group = extra_dp_group self.extra_dp_size = dist.get_world_size(self.extra_dp_group) if self.extra_dp_group is not None else 1 # the chunk size should be divisible by the dp degree if not keep_gathered: assert chunk_size % self.pg_size == 0 self.shard_size = chunk_size // self.pg_size self.shard_begin = self.shard_size * self.pg_rank self.shard_end = self.shard_begin + self.shard_size self.valid_end = self.shard_size self.dtype = dtype device = init_device or get_accelerator().get_current_device() # chunk_temp is a global chunk, which only exists during building the chunks. self.chunk_temp = torch.zeros(chunk_size, dtype=dtype, device=device) # keep all zero self.cuda_global_chunk = None # we force cuda_global_chunk located in CUDA # cuda local chunk, which is sharded on GPUs self.cuda_shard = None # cpu local chunk, which is sharded on CPUs self.cpu_shard = None # is the chunks gathers, which means chunks are duplicated on each process, # and we should use the cuda_global_chunk. self.is_gathered = True # configure the init device of the shard # no-offload default: fp16, fp32 -> CUDA # offload default: fp16, fp32 -> CPU self.shard_device = torch.device("cpu") if cpu_shard_init else get_accelerator().get_current_device() self.chunk_mem = self.chunk_size * self.chunk_temp.element_size() self.shard_mem = self.chunk_mem // self.pg_size # each tensor is associated with a TensorInfo to track its meta info # (state, offset, end) self.tensors_info: Dict[torch.Tensor, TensorInfo] = {} # the total number of tensors in the chunk self.num_tensors = 0 # Record the number of tensors in different states self.tensor_state_cnter: Dict[TensorState, int] = dict() for state in TensorState: self.tensor_state_cnter[state] = 0 # If a chunk is kept gathered, # they are treated the same as that of the parameters in DDP during training. self.keep_gathered = keep_gathered if self.keep_gathered: pin_memory = False # since this chunk is gathered, it doesn't need to pin # if pin_memory is True, we allocate a piece of CPU pin-memory # for it all the time self.pin_memory = pin_memory # we introduce the paired chunk here # it refers to another chunk having the same parameters # but with different dtype(such as fp16_chunk.paired_chunk -> fp32_chunk self.paired_chunk = None # if this chunk is synchronized with the optimizer, the flag is True self.optim_sync_flag = True # if the cpu_shard has been visited during the training step, the flag is True self.cpu_vis_flag = False # whether to record l2 norm for the gradient clipping calculation self.l2_norm_flag = False self.l2_norm = None self.grad_chunk = None # the async all-reduce/reduce-scatter work of this grad chunk (None means sync) self.grad_reduce_work = None self.fp8_communication = False @property def memory_usage(self) -> Dict[str, int]: cuda_memory = 0 cpu_memory = 0 if self.chunk_temp is not None: # this chunk is not closed if self.chunk_temp.device.type == "cuda" or self.chunk_temp.device.type == "npu": cuda_memory += self.chunk_mem else: cpu_memory += self.chunk_mem else: if self.is_gathered: cuda_memory += self.chunk_mem if self.cuda_shard is not None: cuda_memory += self.shard_mem if self.cpu_shard is not None: cpu_memory += self.shard_mem return dict(cuda=cuda_memory, cpu=cpu_memory) @property def device_type(self) -> str: if self.chunk_temp is not None: return self.chunk_temp.device.type elif self.is_gathered or self.cuda_shard is not None: return get_accelerator().name else: return "cpu" @property def payload(self) -> torch.Tensor: # sanity check assert self.chunk_temp is None if self.is_gathered: return self.cuda_global_chunk elif self.cuda_shard is not None: return self.cuda_shard else: return self.cpu_shard @property def payload_mem(self) -> int: # sanity check assert self.chunk_temp is None if self.is_gathered: return self.chunk_mem else: return self.shard_mem @property def can_move(self) -> bool: return not self.is_gathered @property def can_release(self) -> bool: if self.keep_gathered: return False else: return ( self.tensor_state_cnter[TensorState.HOLD] + self.tensor_state_cnter[TensorState.HOLD_AFTER_BWD] == self.num_tensors ) @property def can_reduce(self): return self.tensor_state_cnter[TensorState.READY_FOR_REDUCE] == self.num_tensors @property def has_inf_or_nan(self) -> bool: """Check if the chunk has inf or nan values on CUDA.""" if self.is_gathered: valid_tensor = self.cuda_global_chunk[: self.utilized_size] else: assert self.cuda_shard is not None # only check on CUDA valid_tensor = self.cuda_shard[: self.valid_end] return torch.isinf(valid_tensor).any() | torch.isnan(valid_tensor).any() def set_l2_norm(self) -> None: """Record l2 norm of this chunks on CUDA.""" assert self.l2_norm is None, "you are calculating the l2 norm twice" if self.is_gathered: valid_tensor = self.cuda_global_chunk[: self.utilized_size] else: assert self.cuda_shard is not None # calculate on CUDA valid_tensor = self.cuda_shard[: self.valid_end] chunk_l2_norm = valid_tensor.data.float().norm(2) self.l2_norm = chunk_l2_norm.item() ** 2 def append_tensor(self, tensor: torch.Tensor): """Add a tensor to the chunk. Args: tensor (torch.Tensor): a tensor to be added to the chunk """ # sanity check assert self.chunk_temp is not None assert tensor.dtype == self.dtype new_utilized_size = self.utilized_size + tensor.numel() # raise exception when the chunk size is exceeded if new_utilized_size > self.chunk_size: raise ChunkFullError self.chunk_temp[self.utilized_size : new_utilized_size].copy_(tensor.data.flatten()) assert type(self.chunk_temp) == torch.Tensor, "copy_tensor_to_chunk_slice must use a torch tensor" tensor.data = self.chunk_temp[self.utilized_size : new_utilized_size].view(tensor.shape) # record all the information about the tensor self.num_tensors += 1 tensor_state = TensorState.HOLD self.tensors_info[tensor] = TensorInfo(tensor_state, self.utilized_size, new_utilized_size) self.tensor_state_cnter[tensor_state] += 1 self.utilized_size = new_utilized_size def close_chunk(self): """Close the chunk. Any tensor can't be appended to a closed chunk later.""" # sanity check assert self.chunk_temp is not None # calculate the valid end for each shard if self.utilized_size <= self.shard_begin: self.valid_end = 0 elif self.utilized_size < self.shard_end: self.valid_end = self.utilized_size - self.shard_begin if self.chunk_temp.device.type == "cpu": self.cuda_global_chunk = self.chunk_temp.to(get_accelerator().get_current_device()) self.__update_tensors_ptr() else: self.cuda_global_chunk = self.chunk_temp self.chunk_temp = None self.__scatter() # gathered chunk never have shard attribute if self.keep_gathered: return if self.pin_memory or self.shard_device.type == "cpu": self.cpu_shard = torch.empty(self.shard_size, dtype=self.dtype, pin_memory=self.pin_memory) self.cpu_shard.copy_(self.cuda_shard) self.cpu_vis_flag = True # cpu_shard has been visited if self.shard_device.type == "cpu": self.cuda_shard = None def shard_move(self, device: torch.device, force_copy: bool = False, non_blocking=False): """Move the shard tensor in the chunk. Args: device: the device to which the shard will move force_copy: if True, copy function is called mandatorily non_blocking: if True, the operation is non-blocking, the caller is responsible for synchronization """ # sanity check assert not self.is_gathered # when the current chunk is not synchronized with the optimizer # just use another way for the movement if not self.optim_sync_flag: assert device.type == "cuda" or device.type == "npu", "each chunk should first be moved to CUDA" self.__paired_shard_move(non_blocking=non_blocking) self.optim_sync_flag = True return if device.type == "cuda" or device.type == "npu": assert device == get_accelerator().get_current_device(), "can't move chunk to another device" if self.cuda_shard: return self.cuda_shard = self.cpu_shard.to(get_accelerator().get_current_device(), non_blocking=non_blocking) if not self.pin_memory: self.cpu_shard = None elif device.type == "cpu": if self.cuda_shard is None: return if self.pin_memory: if force_copy or not self.cpu_vis_flag: self.cpu_shard.copy_(self.cuda_shard, non_blocking=non_blocking) # if cpu_shard has been visited # copy operation is not need else: self.cpu_shard = self.cuda_shard.to("cpu", non_blocking=non_blocking) self.cpu_vis_flag = True self.cuda_shard = None else: raise NotImplementedError def access_chunk(self, async_access: bool = False) -> Optional[dist.Work]: """Make the chunk usable for the parameters inside it. It's an operation done in CUDA.""" # sanity check assert self.chunk_temp is None maybe_work = None if not self.is_gathered: maybe_work = self.__gather(async_op=async_access) self.__update_tensors_ptr() return maybe_work def release_chunk(self): """Release the usable chunk. It's an operation done in CUDA.""" # sanity check assert self.chunk_temp is None if self.is_gathered: self.__scatter() def reduce(self, async_op: bool = False): """Reduce scatter all the gradients. It's an operation done in CUDA.""" # sanity check assert self.is_gathered assert self.grad_reduce_work is None if self.pg_size == 1: # tricky code here # just move cuda_global_chunk to cuda_shard # the communication is not necessary self.__scatter() if self.extra_dp_group is not None: self.grad_reduce_work = dist.all_reduce(self.cuda_shard, group=self.extra_dp_group, async_op=async_op) elif self.keep_gathered: # we use all-reduce here self.grad_reduce_work = dist.all_reduce(self.cuda_global_chunk, group=self.torch_pg, async_op=async_op) if self.extra_dp_group is not None: # cannot guranatee the order of multiple all-reduce self.wait_async_reduce() self.grad_reduce_work = dist.all_reduce( self.cuda_global_chunk, group=self.extra_dp_group, async_op=async_op ) else: self.cuda_shard = torch.empty( self.shard_size, dtype=self.dtype, device=get_accelerator().get_current_device() ) assert self.cuda_global_chunk.is_contiguous() self.grad_reduce_work = dist.reduce_scatter_tensor( self.cuda_shard, self.cuda_global_chunk, group=self.torch_pg, async_op=async_op ) if self.extra_dp_group is not None: self.wait_async_reduce() self.grad_reduce_work = dist.all_reduce(self.cuda_shard, group=self.extra_dp_group, async_op=async_op) free_storage(self.cuda_global_chunk) self.is_gathered = False self.__update_tensors_state(TensorState.HOLD) def wait_async_reduce(self) -> None: if self.grad_reduce_work is not None: self.grad_reduce_work.wait() self.grad_reduce_work = None def tensor_trans_state(self, tensor: torch.Tensor, tensor_state: TensorState) -> None: """ Make a transition of the tensor into the next state. Args: tensor (torch.Tensor): a torch Tensor object. tensor_state (TensorState): the target state for transition. """ # As the gradient hook can be triggered either before or after post-backward # tensor's state can be compute -> hold_after_bwd -> ready_for_reduce # or compute -> ready_for_reduce -> hold_after_bwd # the second one is invalid, we just ignore ready_for_reduce -> hold_after_bwd # this function only apply valid state transformation # invalid calls will be ignored and nothing changes if (self.tensors_info[tensor].state, tensor_state) not in STATE_TRANS: return self.__update_one_tensor_info(self.tensors_info[tensor], tensor_state) def copy_tensor_to_chunk_slice( self, tensor: torch.Tensor, data_slice: torch.Tensor, update_ptr: bool = True ) -> None: """ Copy data slice to the memory space indexed by the input tensor in the chunk. Args: tensor (torch.Tensor): the tensor used to retrieve meta information data_slice (torch.Tensor): the tensor to be copied to the chunk """ # sanity check assert self.is_gathered tensor_info = self.tensors_info[tensor] self.cuda_global_chunk[tensor_info.offset : tensor_info.end].copy_(data_slice.data.flatten()) if update_ptr: tensor.data = self.cuda_global_chunk[tensor_info.offset : tensor_info.end].view(tensor.shape) def add_tensor_to_chunk_slice(self, tensor: torch.Tensor, data_slice: torch.Tensor) -> None: """ Add data slice to the memory space indexed by the input tensor in the chunk. Only used when accumulating gradient chunks. Args: tensor (torch.Tensor): the tensor used to retrieve meta information data_slice (torch.Tensor): the tensor to be added to the chunk """ # sanity check assert self.is_gathered tensor_info = self.tensors_info[tensor] self.cuda_global_chunk[tensor_info.offset : tensor_info.end].add_(data_slice.data.flatten()) def get_valid_length(self) -> int: """Get the valid length of the chunk's payload.""" if self.keep_gathered: return self.utilized_size else: return self.valid_end def init_pair(self, friend_chunk: "Chunk") -> None: """Initialize the paired chunk.""" if self.paired_chunk is None and friend_chunk.paired_chunk is None: self.paired_chunk = friend_chunk friend_chunk.paired_chunk = self else: assert self.paired_chunk is friend_chunk assert friend_chunk.paired_chunk is self def optim_update(self) -> None: """Update the fp16 chunks via their fp32 chunks. It's used by the optimizer.""" # sanity check assert self.paired_chunk is not None friend_chunk = self.paired_chunk if self.is_gathered is True: assert friend_chunk.is_gathered is True self.cuda_global_chunk.copy_(friend_chunk.cuda_global_chunk) self.optim_sync_flag = True elif friend_chunk.device_type in ("cuda", "npu") and self.device_type in ("cuda", "npu"): self.cuda_shard.copy_(friend_chunk.cuda_shard) self.optim_sync_flag = True self.cpu_vis_flag = False else: # optim_sync_flag is set to False # see shard_move function for more details assert friend_chunk.device_type == "cpu" assert self.device_type == "cpu" self.optim_sync_flag = False self.cpu_vis_flag = False def get_tensors(self) -> List[torch.Tensor]: return list(self.tensors_info.keys()) def __gather(self, async_op: bool = False) -> Optional[dist.Work]: if not self.is_gathered: # sanity check assert self.cuda_shard is not None alloc_storage(self.cuda_global_chunk) assert self.cuda_global_chunk.is_contiguous() if self.fp8_communication: work = all_gather_fp8( list(self.cuda_global_chunk.chunk(self.pg_size)), self.cuda_shard, self.torch_pg, fp8_format="e4m3", async_op=async_op, ) else: work = dist.all_gather_into_tensor( self.cuda_global_chunk, self.cuda_shard, self.torch_pg, async_op=async_op ) self.cuda_shard = None self.is_gathered = True return work return None def __scatter(self): if self.keep_gathered: return if self.is_gathered: # sanity check assert self.cuda_shard is None self.cuda_shard = torch.empty(self.shard_size, dtype=self.dtype, device=self.cuda_global_chunk.device) self.cuda_shard.copy_(self.cuda_global_chunk[self.shard_begin : self.shard_end]) free_storage(self.cuda_global_chunk) self.is_gathered = False def __paired_shard_move(self, non_blocking=False): assert self.paired_chunk is not None, "chunks should be paired before training" optim_chunk = self.paired_chunk assert self.chunk_size == optim_chunk.chunk_size # only be called when optimizer state is in CPU memory # the grad and param should be in the same device assert self.cuda_shard is None temp = optim_chunk.cpu_shard.to(get_accelerator().get_current_device(), non_blocking=non_blocking) # avoid to transform FP32 in CPU self.cuda_shard = temp.to(self.dtype) if not self.pin_memory: self.cpu_shard = None def __update_tensors_ptr(self) -> None: # sanity check assert self.is_gathered assert type(self.cuda_global_chunk) == torch.Tensor for tensor, tensor_info in self.tensors_info.items(): tensor.data = self.cuda_global_chunk[tensor_info.offset : tensor_info.end].view(tensor.shape) def __update_one_tensor_info(self, tensor_info: TensorInfo, next_state: TensorState): self.tensor_state_cnter[tensor_info.state] -= 1 tensor_info.state = next_state self.tensor_state_cnter[tensor_info.state] += 1 def __update_tensors_state(self, next_state: TensorState, prev_state: Optional[TensorState] = None): for tensor_info in self.tensors_info.values(): if prev_state is None or tensor_info.state == prev_state: self.__update_one_tensor_info(tensor_info, next_state) def __hash__(self) -> int: return hash(id(self)) def __eq__(self, __o: object) -> bool: return self is __o def __repr__(self, detailed: bool = True): output = [ "Chunk Information:\n", "\tchunk size: {}, chunk dtype: {}, process group size: {}\n".format( self.chunk_size, self.dtype, self.pg_size ), "\t# of tensors: {}, utilized size: {}, utilized percentage: {:.2f}\n".format( self.num_tensors, self.utilized_size, self.utilized_size / self.chunk_size ), ] def print_tensor(tensor, prefix=""): output.append( "{}shape: {}, dtype: {}, device: {}\n".format(prefix, tensor.shape, tensor.dtype, tensor.device) ) if self.chunk_temp is not None: output.append("\tchunk temp:\n") print_tensor(tensor=self.chunk_temp, prefix="\t\t") if self.cuda_global_chunk is not None and self.cuda_global_chunk.storage().size() > 0: output.append("\tchunk total:\n") print_tensor(tensor=self.cuda_global_chunk, prefix="\t\t") if self.cuda_shard is not None: output.append("\tcuda shard:\n") print_tensor(tensor=self.cuda_shard, prefix="\t\t") if self.cpu_shard is not None: output.append("\tcpu shard:\n") print_tensor(tensor=self.cpu_shard, prefix="\t\t") memory_info = self.memory_usage output.append("\tmemory usage: cuda {}, cpu {}\n".format(memory_info["cuda"], memory_info["cpu"])) if detailed: output.append("\ttensor state monitor:\n") for st in TensorState: output.append("\t\t# of {}: {}\n".format(st, self.tensor_state_cnter[st])) return "".join(output) def init_grad_chunk(self) -> "Chunk": """Init grad chunk. This should be called in grad handler. Returns: Chunk: Grad chunk """ if self.grad_chunk is None: # grad chunk is not initialized grad_chunk = Chunk( chunk_size=self.chunk_size, zero_group=self.torch_pg, dtype=self.dtype, keep_gathered=self.keep_gathered, pin_memory=self.pin_memory, extra_dp_group=self.extra_dp_group, ) grad_chunk.num_tensors = self.num_tensors grad_chunk.utilized_size = self.utilized_size grad_chunk.tensor_state_cnter[TensorState.HOLD] = self.num_tensors for tensor, state in self.tensors_info.items(): grad_chunk.tensors_info[tensor] = TensorInfo(TensorState.HOLD, state.offset, state.end) grad_chunk.valid_end = self.valid_end if grad_chunk.chunk_temp.device.type == "cpu": grad_chunk.cuda_global_chunk = grad_chunk.chunk_temp.to(get_accelerator().get_current_device()) else: grad_chunk.cuda_global_chunk = grad_chunk.chunk_temp grad_chunk.chunk_temp = None if grad_chunk.pin_memory: grad_chunk.cpu_shard = torch.empty( grad_chunk.shard_size, dtype=grad_chunk.dtype, pin_memory=grad_chunk.pin_memory ) self.grad_chunk = grad_chunk else: # grad chunk is initialized, just reallocate cuda global chunk self.grad_chunk.cuda_shard = None self.grad_chunk.is_gathered = True self.grad_chunk.l2_norm = None alloc_storage(self.grad_chunk.cuda_global_chunk) return self.grad_chunk
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/chunk/utils.py
colossalai/zero/gemini/chunk/utils.py
from time import time from typing import Optional import torch import torch.distributed as dist import torch.nn as nn from .manager import ChunkManager from .search_utils import search_chunk_configuration def safe_div(a, b): if a == 0: return 0 return a / b def init_chunk_manager( model: nn.Module, init_device: Optional[torch.device] = None, hidden_dim: Optional[int] = None, reuse_fp16_chunk: bool = True, verbose: bool = False, max_prefetch: int = 0, **kwargs, ) -> ChunkManager: if hidden_dim: search_interval = hidden_dim else: search_interval = 1024 # defaults to 1024 kwargs["search_interval"] = search_interval dist.barrier() begin = time() config_dict, total_size, wasted_size = search_chunk_configuration(model, **kwargs) dist.barrier() end = time() span_s = end - begin mega_unit = 1024**2 total_size /= mega_unit wasted_size /= mega_unit if verbose and dist.get_rank() == 0: print( "searching chunk configuration is completed in {:.2f} s.\n".format(span_s), "used number: {:.2f} * 2^20, wasted number: {:.2f} * 2^20\n".format(total_size, wasted_size), "total wasted percentage is {:.2f}%".format(100 * safe_div(wasted_size, total_size + wasted_size)), sep="", flush=True, ) dist.barrier() chunk_manager = ChunkManager(config_dict, init_device, reuse_fp16_chunk=reuse_fp16_chunk, max_prefetch=max_prefetch) return chunk_manager
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/chunk/search_utils.py
colossalai/zero/gemini/chunk/search_utils.py
import math from typing import Dict, List, Optional, Tuple import numpy as np import torch.distributed as dist import torch.nn as nn from torch.distributed import ProcessGroup from colossalai.tensor import ColoParameter from colossalai.utils import is_ddp_ignored from colossalai.zero.gemini.memory_tracer import MemStats, OrderedParamGenerator def _filter_exlarge_params(model: nn.Module, size_dict: Dict[int, List[int]]) -> None: """_filter_exlarge_params Filter those parameters whose size is too large (more than 3x standard deviations) from others. Args: model (nn.Module): the model. size_dict (Dict[int, List[int]]): the size dict of parameters. """ agg_size_list = [] for key in size_dict: agg_size_list.extend(size_dict[key]) if len(agg_size_list) == 0: return params_size_arr = np.array(agg_size_list) std = np.std(params_size_arr) mean = np.mean(params_size_arr) upper_limit = mean + 3 * std for key in size_dict: org_list = size_dict[key] size_dict[key] = list(filter(lambda x: x <= upper_limit, org_list)) def _get_unused_byte(size_list: List[int], chunk_size: int) -> int: """_get_unused_byte Get unused byte for a certain chunk size. Args: size_list (List[int]): the size list of parameters. chunk_size (int): the chunk size. Returns: int: the unused byte. """ acc = 0 left = 0 for s in size_list: if s > left: acc += left left = chunk_size left -= s return left + acc def _tensor_numel(local_param: ColoParameter) -> int: """_tensor_numel Get the number of elements of a tensor. Args: local_param (ColoParameter): The local parameter. strict_ddp_flag (bool): whether to enable the strict ddp mode. Returns: int: the number of elements. """ # TODO(ver217): support dtensor here return local_param.numel() def classify_params_by_dp_degree( param_order: OrderedParamGenerator, process_group: ProcessGroup ) -> Dict[int, List[ColoParameter]]: """classify_params_by_dp_degree Classify the parameters by their dp degree Args: param_order (OrderedParamGenerator): the order of param be vised strict_ddp_flag (bool, optional): whether to enable the strict ddp mode. Defaults to False. Returns: Dict[int, List[ColoParameter]]: a dict contains the classification results. The keys are dp_degrees and the values are parameters. """ params_dict: Dict[int, List[ColoParameter]] = dict() for param in param_order.generate(): # assert isinstance(param, ColoParameter), "please init model in the ColoInitContext" if is_ddp_ignored(param): continue param_key = dist.get_world_size(process_group) if param_key not in params_dict: params_dict[param_key] = [] params_dict[param_key].append(param) return params_dict def search_chunk_configuration( model: nn.Module, search_range_m: float, search_interval: int, # hidden size is the best value for the interval min_chunk_size_m: float = 32, filter_exlarge_params: bool = True, strict_ddp_flag: bool = False, process_group: Optional[ProcessGroup] = None, memstas: Optional[MemStats] = None, ) -> Tuple[Dict, int, int]: """search_chunk_configuration Search the chunk configuration for a model. Args: model (nn.Module): torch module search_range_m (float): searching range divided by 2^20. search_interval (int): searching interval. min_chunk_size_m (float, optional): the minimum size of a distributed chunk, divided by 2^20.. filter_exlarge_params (bool, optional): filter extreme large parameters. Defaults to True. strict_ddp_flag (bool, optional): whether to enable the strict ddp mode. all parameters keep replicated in this mode. Returns: Tuple[Dict, int]: chunk config (a dict of dp_degree -> chunk init args) and its memory chunk waste in byte. """ if memstas is not None: param_order = memstas.param_order() else: # build the param visited order right now param_order = OrderedParamGenerator() for p in model.parameters(): param_order.append(p) search_range = round(search_range_m * 1024**2) min_chunk_size = round(min_chunk_size_m * 1024**2) assert search_range >= 0 params_dict = classify_params_by_dp_degree(param_order, process_group) size_lcm = np.lcm.reduce(list(params_dict.keys())) config_dict: Dict[int, Dict] = dict() total_param_size = 0 size_dict: Dict[int, List[int]] = dict() for dp_degree in params_dict: params_list = params_dict[dp_degree] size_list = [_tensor_numel(p) for p in params_list] group_acc_size = sum(size_list) total_param_size += group_acc_size # let small parameters keep gathered in CUDA all the time if group_acc_size < min_chunk_size: config_dict[dp_degree] = dict(chunk_size=group_acc_size, keep_gathered=True) else: size_dict[dp_degree] = size_list if filter_exlarge_params: _filter_exlarge_params(model, size_dict) max_size = min_chunk_size for key in size_dict: max_size = max(max_size, max(size_dict[key])) start_size = int(math.ceil(max_size / search_interval) * search_interval) min_chunk_waste = float("+inf") best_chunk_size = start_size for chunk_size in range(start_size, start_size + search_range + 1, search_interval): temp_waste = 0 for key in size_dict: temp_waste += _get_unused_byte(size_dict[key], chunk_size) if temp_waste < min_chunk_waste: min_chunk_waste = temp_waste best_chunk_size = chunk_size # the chunk size needs to be divided by each groups sizes best_chunk_size = best_chunk_size + (-best_chunk_size % size_lcm) for dp_degree in params_dict: if dp_degree in config_dict: continue config_dict[dp_degree] = dict(chunk_size=best_chunk_size, keep_gathered=False) return config_dict, total_param_size, min_chunk_waste
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/chunk/__init__.py
colossalai/zero/gemini/chunk/__init__.py
from .chunk import Chunk, ChunkFullError, TensorInfo, TensorState from .manager import ChunkManager from .search_utils import classify_params_by_dp_degree, search_chunk_configuration from .utils import init_chunk_manager __all__ = ["Chunk", "ChunkManager", "classify_params_by_dp_degree", "search_chunk_configuration", "init_chunk_manager"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/chunk/manager.py
colossalai/zero/gemini/chunk/manager.py
from collections import deque from typing import Deque, Dict, Iterable, List, Optional, Set, Tuple import torch import torch.distributed as dist from torch.distributed import ProcessGroup from colossalai.accelerator import get_accelerator from colossalai.utils import free_storage from .chunk import Chunk, ChunkFullError, TensorState class ChunkManager: """ A manager class to manipulate the tensors in chunks. Args: chunk_configuration (Dict[int, Dict]): the configuration dictionary of this chunk manager. init_device (torch.device): optional, the device on which the chunk is initialized. The default is None. """ def __init__( self, chunk_configuration, init_device: Optional[torch.device] = None, reuse_fp16_chunk: bool = True, max_prefetch: int = 0, fp8_communication: bool = False, ) -> None: self.device = init_device or get_accelerator().get_current_device() self.dp_degree_chunk_size_dict: Dict[int, int] = dict() self.kwargs_config = chunk_configuration for k, v in self.kwargs_config.items(): self.dp_degree_chunk_size_dict[k] = v.pop("chunk_size") v["init_device"] = self.device self.chunk_groups: Dict[str, Deque[Chunk]] = dict() self.tensor_chunk_map: Dict[torch.Tensor, Chunk] = dict() self.accessed_chunks: Set[Chunk] = set() self.accessed_mem: int = 0 self.total_mem: Dict[str, int] = {"cpu": 0, "cuda": 0} self.reuse_fp16_chunk = reuse_fp16_chunk # Whether model is accumulating gradients, self.accumulating_grads = False self.overflow_counter = torch.tensor([0], dtype=torch.int, device=get_accelerator().get_current_device()) self._prefetch_stream = get_accelerator().Stream() if max_prefetch else None self.fp8_communication = fp8_communication def register_tensor( self, tensor: torch.Tensor, group_type: str, config_key: int, zero_group: ProcessGroup, extra_dp_group: ProcessGroup = None, cpu_offload: bool = False, pin_memory: bool = False, ) -> None: """ Register a tensor to the chunk manager. Then, the tensor should be accessed by `get_chunks`. Args: tensor: the tensor appended to the chunk group_type: the data type of the group. config_key: the key of the group's name, the size of the dp world cpu_offload: if True, the chunk will be closed on CPU pin_memory: whether the chunk is pinned in the cpu memory """ assert tensor not in self.tensor_chunk_map assert isinstance(tensor, torch.Tensor), "Please feed Tensor to this ChunkManager" assert config_key in self.dp_degree_chunk_size_dict chunk_size = self.dp_degree_chunk_size_dict[config_key] chunk_kwargs = self.kwargs_config[config_key] group_name = "{}_{}".format(group_type, config_key) chunk_group = self.__get_chunk_group(group_name) try: # append the tensor to the last chunk chunk_group[-1].append_tensor(tensor) except (IndexError, ChunkFullError): # the except statement will be triggered when there is no chunk or # the last chunk in the chunk group is full # this will create a new chunk and allocate this chunk to its corresponding process if chunk_group: # the chunk group is not empty # close the last chunk self.__close_one_chunk(chunk_group[-1]) if tensor.numel() > chunk_size: chunk_size = tensor.numel() dp_size = dist.get_world_size(zero_group) chunk_size = chunk_size + (-chunk_size % dp_size) chunk = Chunk( chunk_size=chunk_size, zero_group=zero_group, dtype=tensor.dtype, cpu_shard_init=cpu_offload, pin_memory=pin_memory, extra_dp_group=extra_dp_group, **chunk_kwargs, ) if self.fp8_communication: chunk.fp8_communication = True chunk_group.append(chunk) chunk.append_tensor(tensor) self.__add_memory_usage(chunk.memory_usage) self.tensor_chunk_map[tensor] = chunk_group[-1] def close_all_groups(self): """Close all the chunks of all groups.""" for group_name in self.chunk_groups: self.__close_one_chunk(self.chunk_groups[group_name][-1]) def access_chunk(self, chunk: Chunk, async_access: bool = False) -> Optional[dist.Work]: """Make the chunk can be used for calculation.""" if chunk in self.accessed_chunks: return None self.__sub_memory_usage(chunk.memory_usage) if chunk.device_type == "cpu": chunk.shard_move(get_accelerator().get_current_device(), non_blocking=async_access) maybe_work = self.__add_accessed_chunk(chunk, async_access=async_access) self.__add_memory_usage(chunk.memory_usage) return maybe_work def release_chunk(self, chunk: Chunk) -> None: """Scatter the chunk in CUDA.""" if chunk not in self.accessed_chunks: return if chunk.can_release: self.__sub_memory_usage(chunk.memory_usage) self.__sub_accessed_chunk(chunk) self.__add_memory_usage(chunk.memory_usage) def move_chunk(self, chunk: Chunk, device: torch.device, force_copy: bool = False, async_move=False) -> None: """Move the shard of the chunk to the target device.""" if not chunk.can_move or chunk.device_type == device.type: return self.__sub_memory_usage(chunk.memory_usage) chunk.shard_move(device, force_copy, non_blocking=async_move) self.__add_memory_usage(chunk.memory_usage) def trans_tensor_state(self, tensor: torch.Tensor, state: TensorState) -> None: """Transit tensor state according to pre-defined state machine.""" chunk = self.tensor_chunk_map[tensor] chunk.tensor_trans_state(tensor, state) def reduce_chunk(self, chunk: Chunk, async_op: bool = False) -> bool: """Reduce or all reduce the chunk.""" if not chunk.can_reduce: return False self.__sub_memory_usage(chunk.memory_usage) chunk.reduce(async_op=async_op) self.__sub_accessed_chunk(chunk) self.__add_memory_usage(chunk.memory_usage) return True def fake_release_chunk(self, chunk: Chunk) -> None: """Release gathered chunk in a fake mode. This function is used for keep-gathered chunk in the inference mode. """ assert chunk.keep_gathered assert chunk.tensor_state_cnter[TensorState.HOLD] == chunk.num_tensors self.__sub_accessed_chunk(chunk) def copy_tensor_to_chunk_slice(self, tensor: torch.Tensor, data: torch.Tensor) -> None: """ Copy data to the chunk. Args: tensor (torch.Tensor): the tensor used to retrieve meta information data (torch.Tensor): the tensor to be copied to the chunk """ chunk = self.tensor_chunk_map[tensor] chunk.copy_tensor_to_chunk_slice(tensor, data) def get_chunk(self, tensor: torch.Tensor) -> Chunk: """ Return the chunk owning the tensor. Args: tensor (torch.Tensor): a torch tensor object """ return self.tensor_chunk_map[tensor] def get_cuda_movable_chunks(self) -> List[Chunk]: """ Get all chunks that can be moved. """ chunk_list = [] for chunk in self.accessed_chunks: if chunk.can_release: chunk_list.append(chunk) chunk_list.sort(key=lambda x: x.count_id) return chunk_list def get_chunks(self, tensors: Iterable[torch.Tensor]) -> Tuple[Chunk, ...]: """ Get all chunks owning the input tensors. Args: tensors (Iterable[torch.Tensor]): the tensors used to look for chunks """ chunks = [] for tensor in tensors: chunk = self.get_chunk(tensor) if chunk not in chunks: chunks.append(chunk) return tuple(chunks) def add_extern_static_tensor(self, tensor: torch.Tensor) -> None: """Add extern static tensor to chunk manager. Those tensors won't be managed by chunk manager, but we want to monitor memory usage of them. They are "static", which means their shape, dtype, device never change. Thus, their memory usage never changes. Args: tensor (torch.Tensor): An extern static tensor. E.g. optimizer state. """ assert tensor not in self.tensor_chunk_map device_type = tensor.device.type if device_type == "npu": device_type = "cuda" self.total_mem[device_type] += tensor.numel() * tensor.element_size() def __repr__(self) -> str: msg = [ "Chunk Manager Information:\n", "Total memory: " + ", ".join([f"{k}={v}B" for k, v in self.total_mem.items()]) + "\n", ] for group_name, group in self.chunk_groups.items(): msg.append(f"Group {group_name}:\n") for i, chunk in enumerate(group): msg.append(f"[{i}] {chunk}\n") return "".join(msg) def __get_chunk_group(self, group_name: str) -> Deque[Chunk]: """Register a chunk group.""" if group_name not in self.chunk_groups: self.chunk_groups[group_name] = deque() return self.chunk_groups[group_name] def __close_one_chunk(self, chunk: Chunk): self.__sub_memory_usage(chunk.memory_usage) chunk.close_chunk() self.__add_memory_usage(chunk.memory_usage) def __sub_memory_usage(self, usage: Dict[str, int]): for k, v in usage.items(): self.total_mem[k] -= v def __add_memory_usage(self, usage: Dict[str, int]): for k, v in usage.items(): self.total_mem[k] += v def __add_accessed_chunk(self, chunk: Chunk, async_access: bool = False) -> Optional[dist.Work]: maybe_work = chunk.access_chunk(async_access=async_access) self.accessed_chunks.add(chunk) self.accessed_mem += chunk.chunk_mem return maybe_work def __sub_accessed_chunk(self, chunk: Chunk): chunk.release_chunk() self.accessed_chunks.remove(chunk) self.accessed_mem -= chunk.chunk_mem def init_grad_chunk(self, chunk: Chunk) -> Chunk: if chunk.grad_chunk is not None: self.__sub_memory_usage(chunk.grad_chunk.memory_usage) grad_chunk = chunk.init_grad_chunk() self.__add_memory_usage(grad_chunk.memory_usage) if grad_chunk not in self.accessed_chunks: self.accessed_chunks.add(grad_chunk) self.accessed_mem += grad_chunk.chunk_mem return grad_chunk def rearrange_accumulated_grad_chunk(self, chunk: Chunk) -> Chunk: """Rearrange gradients accumulated in chunk.grad_chunk, and get prepared for gradient reduction.""" assert chunk.grad_chunk is not None # Make a backup for gradient accumulated before. # Here backup gradients should be multiplied, since it will be divided after gradient reduction. if chunk.grad_chunk.is_gathered: accumulated_grad = chunk.grad_chunk.cuda_global_chunk.clone().detach().mul_(chunk.pg_size) accumulated_grad_gathered = True else: if chunk.grad_chunk.cuda_shard is not None: accumulated_grad = chunk.grad_chunk.cuda_shard.clone().detach().mul_(chunk.pg_size) else: accumulated_grad = ( chunk.grad_chunk.cpu_shard.to(get_accelerator().get_current_device()) .clone() .detach() .mul_(chunk.pg_size) ) accumulated_grad_gathered = False # Reset grad_chunk, and chunk.grad_chunk will be accessed. grad_chunk = self.init_grad_chunk(chunk) grad_chunk.cuda_global_chunk.zero_() # Add backup gradients to grad_chunk. if accumulated_grad_gathered: grad_chunk.cuda_global_chunk.add_(accumulated_grad) else: grad_chunk.cuda_global_chunk[grad_chunk.shard_begin : grad_chunk.shard_end].add_(accumulated_grad) # Release accumulated_grad free_storage(accumulated_grad) return grad_chunk
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/chunk_memstats_collector.py
colossalai/zero/gemini/memory_tracer/chunk_memstats_collector.py
from typing import Optional from colossalai.accelerator import get_accelerator from colossalai.zero.gemini.chunk import ChunkManager from .memory_stats import MemStats from .memstats_collector import MemStatsCollector class ChunkMemStatsCollector(MemStatsCollector): def __init__(self, chunk_manager: ChunkManager, memstats: Optional[MemStats] = None) -> None: """ Memory Statistic Collector for Chunks. Args: chunk_manager (ChunkManager): the chunk manager. memstats (Optional[MemStats], optional): memory statistics collected by RMT. Defaults to None. """ super().__init__(memstats) self._chunk_manager = chunk_manager # override def record_model_data_volume(self) -> None: """ record model data volume on cuda and cpu. """ if self._start_flag and not self.use_outside_memstats: cuda_mem = self._chunk_manager.total_mem["cuda"] self._memstats.record_max_cuda_model_data(cuda_mem) @property def cuda_margin_mem(self) -> float: from colossalai.legacy.utils.memory import colo_device_memory_capacity return colo_device_memory_capacity(get_accelerator().get_current_device()) - self._memstats.max_overall_cuda
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/memory_monitor.py
colossalai/zero/gemini/memory_tracer/memory_monitor.py
import json from abc import abstractmethod from concurrent.futures import ThreadPoolExecutor from time import sleep, time import torch from colossalai.accelerator import get_accelerator class MemoryMonitor: """Base class for all types of memory monitor. All monitors should have a list called `time_stamps` and a list called `mem_stats`. """ def __init__(self): self.time_stamps = [] self.mem_stats = [] def __len__(self): return len(self.mem_stats) @abstractmethod def start(self): pass @abstractmethod def finish(self): pass def state_dict(self): return { "time_stamps": self.time_stamps, "mem_stats": self.mem_stats, } def save(self, filename): with open(filename, "w") as f: json.dump(self.state_dict(), f) def clear(self): self.mem_stats.clear() self.time_stamps.clear() class AsyncMemoryMonitor(MemoryMonitor): """ An Async Memory Monitor running during computing. Sampling memory usage of the current GPU at interval of `1/(10**power)` sec. The idea comes from Runtime Memory Tracer of PatrickStar `PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management`_ Usage:: async_mem_monitor = AsyncMemoryMonitor() input = torch.randn(2, 20).cuda() OP1 = torch.nn.Linear(20, 30).cuda() OP2 = torch.nn.Linear(30, 40).cuda() async_mem_monitor.start() output = OP1(input) async_mem_monitor.finish() async_mem_monitor.start() output = OP2(output) async_mem_monitor.finish() async_mem_monitor.save('log.pkl') Args: power (int, optional): the power of time interval. Defaults to 10. .. _PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management: https://arxiv.org/abs/2108.05818 """ def __init__(self, power: int = 10): super().__init__() self.keep_measuring = False current_device = get_accelerator().get_current_device() def _set_cuda_device(): torch.cuda.set_device(current_device) self.executor = ThreadPoolExecutor(max_workers=1, initializer=_set_cuda_device) self.monitor_thread = None self.interval = 1 / (10**power) def set_interval(self, power: int): self.clear() self.interval = 1 / (10**power) def is_measuring(self): return self.keep_measuring def start(self): self.keep_measuring = True self.monitor_thread = self.executor.submit(self._measure_usage) def finish(self): if self.keep_measuring is False: return 0 self.keep_measuring = False max_usage = self.monitor_thread.result() self.monitor_thread = None self.time_stamps.append(time()) self.mem_stats.append(max_usage) return max_usage def _measure_usage(self): from colossalai.legacy.utils import colo_device_memory_used max_usage = 0 while self.keep_measuring: max_usage = max( max_usage, colo_device_memory_used(get_accelerator().get_current_device()), ) sleep(self.interval) return max_usage class SyncCudaMemoryMonitor(MemoryMonitor): """ A synchronized cuda memory monitor. It only record the maximum allocated cuda memory from start point to finish point. """ def __init__(self, power: int = 10): super().__init__() def start(self): torch.cuda.synchronize() torch.cuda.reset_peak_memory_stats() def finish(self) -> int: """ return max gpu memory used since latest `start()`. Returns: int: max GPU memory """ torch.cuda.synchronize() self.time_stamps.append(time()) max_usage = torch.cuda.max_memory_allocated() self.mem_stats.append(max_usage) return max_usage
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/memstats_collector.py
colossalai/zero/gemini/memory_tracer/memstats_collector.py
import time from typing import Optional from .memory_monitor import SyncCudaMemoryMonitor from .memory_stats import MemStats class MemStatsCollector: """ A Memory statistic collector. It works in two phases. Phase 1. Collection Phase: collect memory usage statistics of CPU and GPU. The first iteration of DNN training. Phase 2. Runtime Phase: use the read-only collected stats The rest iterations of DNN training. It has a Sampling counter which is reset after DNN training iteration. """ def __init__(self, memstats: Optional[MemStats] = None) -> None: self._mem_monitor = SyncCudaMemoryMonitor() self._sampling_time = [] self._start_flag = False self._step_idx = 0 self._step_total = 0 if memstats is not None: self.use_outside_memstats = True self._memstats = memstats else: self.use_outside_memstats = False self._memstats = MemStats() def next_period_non_model_data_usage(self, device_type: str) -> int: """Maximum non model data memory usage during the next Op run Args: device_type (str): device type, can be 'cpu' or 'cuda'. Returns: int: max non model data memory usage of current sampling period """ assert not self._start_flag, "Cannot get mem stats info during collection phase." assert self._step_total > 0, "Cannot get mem stats info before collection phase." assert len(self._memstats.non_model_data_list(device_type)) > self._step_idx, ( f"{len(self._memstats.non_model_data_list(device_type))} should be > than step idx {self._step_idx}, " f"step total {self._step_total}" ) next_non_model_data = self._memstats.non_model_data_list(device_type)[self._step_idx] self._step_idx = (self._step_idx + 1) % self._step_total return next_non_model_data @property def sampling_time(self): return [t - self._sampling_time[0] for t in self._sampling_time] def start_collection(self): self._start_flag = True self._mem_monitor.start() def finish_collection(self): self.sample_overall_data() # self._step_total = len(self._sampling_time) self._step_total = len(self._memstats.non_model_data_list("cuda")) self._start_flag = False print(f"finish_collection {self._step_total}") # deprecated def record_model_data_volume(self) -> None: """ Sampling model data statistics. """ if self._start_flag and not self.use_outside_memstats: from colossalai.legacy.zero.gemini import StatefulTensor # The following code work for ZeroInitContext, which is deprecated in v0.1.12 cuda_mem = StatefulTensor.GST_MGR.total_mem["cuda"] self._memstats.record_max_cuda_model_data(cuda_mem) def sample_overall_data(self) -> None: """ Sampling overall and non model data cuda memory statistics. """ if self._start_flag and not self.use_outside_memstats: cuda_overall = self._mem_monitor.finish() self._memstats.record_max_cuda_overall_data(cuda_overall) self._memstats.calc_max_cuda_non_model_data() self._mem_monitor.start() if self._start_flag: self._sampling_time.append(time.time()) def clear(self) -> None: self._memstats.clear() self._start_flag = False self._step_idx = 0 self._step_total = 0
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/utils.py
colossalai/zero/gemini/memory_tracer/utils.py
from typing import Optional, Tuple import torch def colo_model_optimizer_usage(optim) -> Tuple[int, int]: """Trace the optimizer memory usage Args: optim (ShardedOptimV2): an instance of ShardedOptimizer Returns: Tuple[int, int]: cuda/cpu memory usage in Byte """ if optim is None: return 0, 0 assert hasattr(optim, "get_memory_usage"), f"{type(optim)} has no attr get_memory_usage()" return optim.get_memory_usage() def colo_model_mem_usage(model: torch.nn.Module) -> Tuple[int, int]: """ Trace the model memory usage. Args: model (torch.nn.Module): a torch model Returns: Tuple[int, int]: cuda memory usage in Byte, cpu memory usage in Byte """ if model is None: return 0, 0 def _get_tensor_mem_use(t: Optional[torch.Tensor]): if t is None: return 0, 0 assert isinstance(t, torch.Tensor) _cpu_mem_usage, _cuda_mem_usage = 0, 0 if t.device.type == "cpu": _cpu_mem_usage += t.numel() * t.element_size() elif t.device.type == "cuda": _cuda_mem_usage += t.numel() * t.element_size() return _cuda_mem_usage, _cpu_mem_usage cuda_mem_usage = 0 cpu_mem_usage = 0 for param in model.parameters(): if hasattr(param, "colo_attr"): t_cuda, t_cpu = param.colo_attr.get_memory_usage() cuda_mem_usage += t_cuda cpu_mem_usage += t_cpu else: t_cuda, t_cpu = _get_tensor_mem_use(param.data) cuda_mem_usage += t_cuda cpu_mem_usage += t_cpu t_cuda, t_cpu = _get_tensor_mem_use(param.grad) cuda_mem_usage += t_cuda cpu_mem_usage += t_cpu return cuda_mem_usage, cpu_mem_usage
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/static_memstats_collector.py
colossalai/zero/gemini/memory_tracer/static_memstats_collector.py
from typing import Optional import torch import torch.nn as nn from torch.fx import symbolic_trace from colossalai.fx.passes.meta_info_prop import MetaInfoProp from colossalai.fx.profiler import calculate_fwd_out, calculate_fwd_tmp, is_compatible_with_meta from colossalai.zero.gemini.chunk import ChunkManager if is_compatible_with_meta(): from colossalai.fx.profiler import MetaTensor from .chunk_memstats_collector import ChunkMemStatsCollector class ModuleInfos: def __init__( self, module: torch.nn.Module, module_name: str, module_full_name: str, parent_module: torch.nn.Module ): self.module = module self.module_name = module_name self.module_full_name = module_full_name self.parent_module = parent_module class StaticMemStatsCollector(ChunkMemStatsCollector): """ A Static Memory statistic collector. """ def __init__(self, module: nn.Module, chunk_manager: ChunkManager) -> None: super().__init__(chunk_manager) self.module = module self.module_info_list = [] def init_mem_stats(self, *inputs): self.register_opnodes_recursively(self.module) self.refactor_module() self.module = self.module.cpu() self.module.train() data = [MetaTensor(torch.rand(inp.shape, device="meta"), fake_device="cpu") for inp in inputs] gm = symbolic_trace(self.module) interp = MetaInfoProp(gm) interp.propagate(*data) total_mem = 0 for inp in inputs: total_mem += inp.numel() * inp.element_size() last_node = None module_name_list = [mInfo.module_full_name for mInfo in self.module_info_list] for node in gm.graph.nodes: total_mem = total_mem + calculate_fwd_tmp(node) + calculate_fwd_out(node) if node.op == "call_module": if node.name.endswith("_0") and node.name[:-2] in module_name_list: self._non_model_data_cuda_list.append(total_mem) last_node = node self._non_model_data_cuda_list.append(total_mem) self._non_model_data_cuda_list = self._non_model_data_cuda_list[1:] cur_module_mem_fwd = 0 cur_module_mem_bwd = 0 grad_module_out = last_node.meta["fwd_mem_out"] for node in gm.graph.nodes.__reversed__(): cur_module_mem_fwd = cur_module_mem_fwd + calculate_fwd_tmp(node) + calculate_fwd_out(node) cur_module_mem_bwd = cur_module_mem_bwd + node.meta["bwd_mem_tmp"] + node.meta["bwd_mem_out"] if node.op == "call_module": if node.name.endswith("_0") and node.name[:-2] in module_name_list: self._non_model_data_cuda_list.append(total_mem + grad_module_out + cur_module_mem_bwd) total_mem = total_mem - cur_module_mem_fwd cur_module_mem_fwd = 0 cur_module_mem_bwd = 0 grad_module_out = node.meta["bwd_mem_out"] self._step_total = len(self._non_model_data_cuda_list) self.recover_module() def refactor_module(self): for modInfo in self.module_info_list: temp_node = nn.Sequential(nn.ReLU(), modInfo.module) modInfo.parent_module.__setattr__(modInfo.module_name, temp_node) def recover_module(self): for modInfo in self.module_info_list: modInfo.parent_module.__setattr__(modInfo.module_name, modInfo.module) def register_opnodes_recursively( self, module: torch.nn.Module, name: str = "", full_name: str = "", parent_module: Optional[torch.nn.Module] = None, ): assert isinstance(module, torch.nn.Module) for child_name, child in module.named_children(): self.register_opnodes_recursively(child, child_name, full_name + "_" + child_name, module) # Early return on modules with no parameters. if len(list(module.parameters(recurse=False))) == 0: return self.module_info_list.append(ModuleInfos(module, name, full_name[1:], parent_module))
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/param_runtime_order.py
colossalai/zero/gemini/memory_tracer/param_runtime_order.py
from abc import ABC import torch class ParamGenerator(ABC): def append(self, param: torch.nn.Parameter): pass def generate(self): pass def clear(self): pass class OrderedParamGenerator(ParamGenerator): """OrderedParamGenerator Contain the order of parameters visited during runtime. """ def __init__(self) -> None: self.param_visited_order = [] def append(self, param: torch.nn.Parameter): self.param_visited_order.append(param) def generate(self): visited_set = set() for p in self.param_visited_order: if p not in visited_set: yield p visited_set.add(p) del visited_set def is_empty(self): return len(self.param_visited_order) == 0 def clear(self): self.param_visited_order = []
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/__init__.py
colossalai/zero/gemini/memory_tracer/__init__.py
from .param_runtime_order import OrderedParamGenerator # isort:skip from .memory_stats import MemStats # isort:skip from .memory_monitor import AsyncMemoryMonitor, SyncCudaMemoryMonitor # isort:skip from .memstats_collector import MemStatsCollector # isort:skip from .chunk_memstats_collector import ChunkMemStatsCollector # isort:skip __all__ = [ "AsyncMemoryMonitor", "SyncCudaMemoryMonitor", "MemStatsCollector", "ChunkMemStatsCollector", "MemStats", "OrderedParamGenerator", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/runtime_mem_tracer.py
colossalai/zero/gemini/memory_tracer/runtime_mem_tracer.py
import torch.nn from colossalai.tensor.param_op_hook import ColoParamOpHookManager from colossalai.utils import _cast_float from .memory_stats import MemStats __all__ = ["RuntimeMemTracer"] class RuntimeMemTracer: """RuntimeMemTracer for the module training using ColoParameter. Trace non-model memory usage during fwd+bwd process. It is obtained by using a tensor with the same shape as the training process as the inputs and running an single fwd+bwd to trace the statistics. NOTE() 1. The premise to use this tracer is that the target DNN execute the same operations at each iterations, 2. Module buffers are viewed as non-model data. """ def __init__(self, module: torch.nn.Module, dtype: torch.dtype = torch.half): super().__init__() from colossalai.legacy.zero.gemini.ophooks.runtime_mem_tracer_hook import ( GradMemStats, GradMemTracerHook, ParamMemTracerHook, ) self.module = module self.dtype = dtype self._gradstat = GradMemStats() self._memstats = MemStats() self.param_op_hook = ParamMemTracerHook(self._memstats, self._gradstat) self.grad_hook = GradMemTracerHook(self._gradstat) self.cpu_param_data_dict = {} for p in module.parameters(): p.data = p.data.to(dtype) self._cast_buffers_to_cuda_dtype() def parameters_in_runtime_order(self): return self._memstats._param_runtime_order.generate() def memstats(self): return self._memstats def __call__(self, *args, **kwargs): return self.forward(*args, **kwargs) def _backup_params(self): """ The function is called before forward. Backup model params on cpu. """ for p in self.module.parameters(): self.cpu_param_data_dict[p] = torch.empty(p.data.shape, dtype=self.dtype, device="cpu") self.cpu_param_data_dict[p].copy_(p.data) def _restore_params(self): """ This function is called after backward. Restore model params. """ for p in self.module.parameters(): p.data = torch.empty(p.data.shape, dtype=self.dtype, device="cpu", requires_grad=p.data.requires_grad) p.data.copy_(self.cpu_param_data_dict[p]) self.cpu_param_data_dict.clear() def _pre_forward(self): self._clear_cuda_mem_info() self._backup_params() self.grad_hook.register_grad_hook(self.module) self.param_op_hook.mem_monitor.start() def forward(self, *args, **kwargs): args, kwargs = _cast_float(args, self.dtype), _cast_float(kwargs, self.dtype) self.module.zero_grad(set_to_none=True) self._pre_forward() with ColoParamOpHookManager.use_hooks(self.param_op_hook): outputs = self.module(*args, **kwargs) return outputs def backward(self, loss): with self.param_op_hook.switch_to_backward(), ColoParamOpHookManager.use_hooks(self.param_op_hook): loss.backward() self._post_backward() def _post_backward(self): cuda_volume = self.param_op_hook.mem_monitor.finish() self._memstats.record_max_cuda_overall_data(cuda_volume) # calc the last Op non model data self._memstats.calc_max_cuda_non_model_data() self.grad_hook.remove_grad_hook() self._restore_params() def _clear_cuda_mem_info(self): self._memstats.clear() self._gradstat.clear() def _cast_buffers_to_cuda_dtype(self): for buffer in self.module.buffers(): buffer.data = buffer.cuda() if torch.is_floating_point(buffer): buffer.data = buffer.data.to(self.dtype)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/zero/gemini/memory_tracer/memory_stats.py
colossalai/zero/gemini/memory_tracer/memory_stats.py
from typing import List, Optional import torch from .param_runtime_order import OrderedParamGenerator class MemStats(object): def __init__(self) -> None: """ Store the non model data statistics used for Gemini and GeminiOptimizer. """ # (preop_step, List[param]) self._step_param_dict = dict() # (param, List[preop_step]) self._param_step_dict = dict() # (preop_step, non_model_data) non model data used during preop_step ~ (preop_step+1) self._step_nmd_dict = dict() self._param_runtime_order = OrderedParamGenerator() self._preop_step = 0 self._prev_overall_cuda = -1 self._max_overall_cuda = 0 self._prev_md_cuda = -1 # old version self._model_data_cuda_list = [] self._model_data_cpu_list = [] self._overall_cuda_list = [] self._overall_cpu_list = [] self._non_model_data_cuda_list = [] self._non_model_data_cpu_list = [] def calc_max_cuda_non_model_data(self): if self._prev_overall_cuda != -1 and self._prev_md_cuda != -1: max_cuda_non_model_data = self._prev_overall_cuda - self._prev_md_cuda self._step_nmd_dict[self._preop_step - 1] = max_cuda_non_model_data # compatibility of the old version. self._non_model_data_cuda_list.append(max_cuda_non_model_data) def record_max_cuda_model_data(self, val): self._prev_md_cuda = val def record_max_cuda_overall_data(self, val): self._prev_overall_cuda = val self._max_overall_cuda = max(self._max_overall_cuda, val) @property def max_overall_cuda(self): return self._max_overall_cuda def increase_preop_step(self, param_list: List[torch.nn.Parameter]): """ the time step is increased. param list is used between current and the next time step. Args: param_list (List[torch.nn.Parameter]): a list of torch parameters. """ for p in param_list: if p not in self._param_step_dict: self._param_step_dict[p] = [self._preop_step] else: self._param_step_dict[p].append(self._preop_step) self._param_runtime_order.append(p) self._step_param_dict[self._preop_step] = param_list self._preop_step += 1 def param_used_step(self, param: torch.nn.Parameter) -> Optional[List[int]]: """param_used_step get the timestep list using the param Args: param (torch.nn.Parameter): a torch param Returns: Optional[List[int]]: a list of int indicates the time step of preop hook. """ if param not in self._param_step_dict: return None else: return self._param_step_dict[param] def param_order(self): if self._param_runtime_order.is_empty(): raise RuntimeError else: return self._param_runtime_order def non_model_data_list(self, device_type: str) -> List[int]: if device_type == "cuda": return self._non_model_data_cuda_list elif device_type == "cpu": return self._non_model_data_cpu_list else: raise TypeError def max_non_model_data(self, device_type: str) -> float: if device_type == "cuda": return max(self._non_model_data_cuda_list) elif device_type == "cpu": return max(self._non_model_data_cpu_list) else: raise TypeError def clear(self): self._model_data_cuda_list = [] self._overall_cuda_list = [] self._model_data_cpu_list = [] self._overall_cpu_list = [] self._non_model_data_cpu_list = [] self._non_model_data_cuda_list = [] self._param_runtime_order.clear() self._step_param_dict.clear() self._param_step_dict.clear() self._step_nmd_dict.clear() self._preop_step = 0 self._prev_overall_cuda = -1 self._prev_md_cuda = -1
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/estimate_memory.py
colossalai/autochunk/estimate_memory.py
from typing import Dict, List import torch from torch.fx.node import Node from .utils import NodeMgr, get_node_shape, is_non_memory_node class EstimateMemory(object): """ Estimate memory with chunk """ def __init__(self) -> None: pass def _get_node_size(self, x: Node) -> float: """ return node size in MB """ x = x.meta["tensor_meta"] if not hasattr(x, "numel"): out = sum([i.numel * torch.tensor([], dtype=i.dtype).element_size() for i in x]) else: out = x.numel * torch.tensor([], dtype=x.dtype).element_size() out = float(out) / 1024**2 return out def _add_active_node(self, n: Node, active_nodes: Dict, chunk_ratio: float) -> None: """ add an active node and its shape to active node dict """ if get_node_shape(n) is None: return if n.op == "placeholder": return if n not in active_nodes: node_size = self._get_node_size(n) * chunk_ratio active_nodes[n] = node_size def _build_delete_node_dict(self, node_mgr: NodeMgr) -> Dict: """ build delete node dict, means node should be deleted at what time """ delete_node_dict = {} for idx, node in enumerate(node_mgr.get_node_list()): # skip non shape node if get_node_shape(node) is None: continue # dont remove free nodes elif node.op == "placeholder": delete_node_dict[node] = len(node_mgr.get_node_list()) # node no user elif len(node.users) == 0: delete_node_dict[node] = idx # log max use else: node_user_idx = [node_mgr.find_node_idx(i) for i in node.users.keys()] delete_node_dict[node] = max(node_user_idx) return delete_node_dict def _remove_deactive_node( self, user_idx: int, user: Node, active_nodes: List, delete_node_dict: List, kept_nodes: List = None ) -> None: """ remove deactivate nodes from active nodes """ if kept_nodes is None: kept_nodes = [] if user.op in ("output",): return for node in list(active_nodes.keys()): # dont delete kept nodes if node in kept_nodes: continue # should be deleted if delete_node_dict[node] <= user_idx: active_nodes.pop(node) def _get_tmp_memory(self, node, not_contiguous_list, delete=False): mem = 0 not_contiguous_ops = ["permute"] if node.op == "call_function" and any(n in node.name for n in ["matmul", "reshape"]): for n in node.args: if n in not_contiguous_list: # matmul won't change origin tensor, but create a tmp copy mem += self._get_node_size(n) elif node.op == "call_module": for n in node.args: if n in not_contiguous_list: # module will just make origin tensor to contiguous if delete: not_contiguous_list.remove(n) elif node.op == "call_method" and any(i in node.name for i in not_contiguous_ops): if node not in not_contiguous_list: not_contiguous_list.append(node) return mem def _get_chunk_ratio(self, node, chunk_node_dim, chunk_size): if node not in chunk_node_dim: return 1.0 node_shape = get_node_shape(node) chunk_dim = chunk_node_dim[node]["chunk_dim"] if chunk_dim is None: return 1.0 else: return chunk_size / float(node_shape[chunk_dim]) def _print_compute_op_mem_log(self, log, nodes, title=None): if title: print(title) for idx, (l, n) in enumerate(zip(log, nodes)): if n.op in ["placeholder", "get_attr", "output"]: continue if any(i in n.name for i in ["getitem", "getattr"]): continue print("%s:%.2f \t" % (n.name, l), end="") if (idx + 1) % 3 == 0: print("") print("\n") def _add_active_nodes_from_list(self, active_nodes: List, nodes: List) -> List: """ add active nodes from nodes """ for n in nodes: self._add_active_node(n, active_nodes, 1) def _get_memory_from_active_nodes(self, active_nodes: Dict) -> float: """ sum all memory of active nodes """ out = [i for i in active_nodes.values()] out = sum(out) return out def estimate_chunk_inference_mem(self, node_list: List, chunk_infos: Dict = None, print_mem: bool = False): """ Estimate inference memory with chunk Args: node_list (List): _description_ chunk_infos (Dict): Chunk information. Defaults to None. print_mem (bool): Wether to print peak memory of every node. Defaults to False. Returns: act_memory_peak_log (List): peak memory of every node act_memory_after_node_log (List): memory after executing every node active_node_list_log (List): active nodes of every node. active nodes refer to nodes generated but not deleted. """ act_memory = 0.0 act_memory_peak_log = [] act_memory_after_node_log = [] active_nodes = {} active_nodes_log = [] not_contiguous_list = [] node_mgr = NodeMgr(node_list) delete_node_dict = self._build_delete_node_dict(node_mgr) use_chunk = True if chunk_infos is not None else False chunk_within = False chunk_region_idx = None chunk_ratio = 1 # use it to estimate chunk mem chunk_inputs_all = [] if use_chunk: chunk_regions = [i["region"] for i in chunk_infos] chunk_starts = [i[0] for i in chunk_regions] chunk_ends = [i[1] for i in chunk_regions] chunk_inputs = [i["inputs"] for i in chunk_infos] chunk_inputs_non_chunk = [i["inputs_non_chunk"] for i in chunk_infos] chunk_inputs_all = [j for i in chunk_inputs for j in i] + [j for i in chunk_inputs_non_chunk for j in i] chunk_outputs = [i["outputs"] for i in chunk_infos] chunk_node_dim = [i["node_chunk_dim"] for i in chunk_infos] chunk_sizes = [i["chunk_size"] if "chunk_size" in i else 1 for i in chunk_infos] for idx, node in enumerate(node_mgr.get_node_list()): # if node in chunk start nodes, change chunk ratio and add chunk_tensor if use_chunk and idx in chunk_starts: chunk_within = True chunk_region_idx = chunk_starts.index(idx) self._add_active_nodes_from_list(active_nodes, chunk_outputs[chunk_region_idx]) # determine chunk ratio for current node if chunk_within: chunk_ratio = self._get_chunk_ratio( node, chunk_node_dim[chunk_region_idx], chunk_sizes[chunk_region_idx] ) # add current node as active node self._add_active_node(node, active_nodes, chunk_ratio) act_memory = self._get_memory_from_active_nodes(active_nodes) # if node is placeholder, just add the size of the node if node.op == "placeholder": act_memory_peak_log.append(act_memory) # skip output elif node.op == "output": continue # no change for non compute node elif is_non_memory_node(node): act_memory_peak_log.append(act_memory) # node is a compute op, calculate tmp else: # forward memory # TODO: contiguous_memory still not accurate for matmul, view, reshape and transpose tmp_memory = self._get_tmp_memory(node, not_contiguous_list, delete=True) * chunk_ratio # record max act memory act_memory_peak_log.append(act_memory + tmp_memory) # remove_deactive_node self._remove_deactive_node(idx, node, active_nodes, delete_node_dict, kept_nodes=chunk_inputs_all) # if node in chunk end nodes, restore chunk settings if use_chunk and idx in chunk_ends: self._remove_deactive_node(idx, node, active_nodes, delete_node_dict) # dont provide kept nodes now chunk_within = False chunk_ratio = 1 chunk_region_idx = None act_memory = self._get_memory_from_active_nodes(active_nodes) act_memory_after_node_log.append(act_memory) active_nodes_log.append(active_nodes.copy()) if print_mem: print("with chunk" if use_chunk else "without chunk") self._print_compute_op_mem_log(act_memory_peak_log, node_mgr.get_node_list(), "peak") # param_memory = parameter_size(gm) # all_memory = act_memory + param_memory return act_memory_peak_log, act_memory_after_node_log, active_nodes_log
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/search_chunk.py
colossalai/autochunk/search_chunk.py
import copy from typing import Dict, List, Tuple from torch.fx.node import Node from .estimate_memory import EstimateMemory from .reorder_graph import ReorderGraph from .select_chunk import SelectChunk from .trace_flow import TraceFlow from .trace_indice import TraceIndice from .utils import NodeMgr, get_logger, is_non_compute_node, is_non_compute_node_except_placeholder class SearchChunk(object): """ This is the core class for AutoChunk. It defines the framework of the strategy of AutoChunk. Chunks will be selected one by one until search stops. The chunk search is as follows: 1. find the peak memory node 2. find the max chunk region according to the peak memory node 3. find all possible chunk regions in the max chunk region 4. find the best chunk region for current status 5. goto 1 Attributes: gm: graph model print_mem (bool): print estimated memory trace_index: trace the flow of every dim of every node to find all free dims trace_flow: determine the region chunk strategy reorder_graph: reorder nodes to improve chunk efficiency estimate_memory: estimate memory with chunk select_chunk: select the best chunk region Args: gm: graph model max_memory (int): max memory in MB print_mem (bool): print estimated memory """ def __init__(self, gm, max_memory=None, print_mem=False, print_progress=False) -> None: self.print_mem = print_mem self.max_memory = max_memory self.print_progress = print_progress self.node_mgr = NodeMgr(list(gm.graph.nodes)) self.trace_indice = TraceIndice(self.node_mgr) self.estimate_memory = EstimateMemory() self._init_trace() self.trace_flow = TraceFlow(self.trace_indice, self.node_mgr) self.reorder_graph = ReorderGraph(self.trace_indice, self.node_mgr) self.select_chunk = SelectChunk( self.trace_indice, self.estimate_memory, self.reorder_graph, self.node_mgr, max_memory=max_memory, ) def _init_trace(self) -> None: """ find the max trace range for every node reduce the computation complexity of trace_indice """ # find all max ranges active_nodes = self.estimate_memory.estimate_chunk_inference_mem(self.node_mgr.get_node_list())[2] # set trace range and do the trace if self.print_progress: get_logger().info("AutoChunk start tracing indice") self.trace_indice.set_active_nodes(active_nodes) self.trace_indice.trace_indice() def _find_peak_region(self, mem_peak: List) -> int: """ find peak node, along with its neighbor nodes exceeds max mem """ max_value = max(mem_peak) max_idx = mem_peak.index(max_value) peak_region = [max_idx, max_idx] if self.max_memory is None: return peak_region # to left count = 0 for i in range(max_idx - 1, -1, -1): if mem_peak[i] > self.max_memory: peak_region[0] = i else: count += 1 if count >= 3: break # to right count = 0 for i in range(max_idx + 1, len(mem_peak) - 1): if mem_peak[i] > self.max_memory: peak_region[1] = i count = 0 else: count += 1 if count >= 3: break return peak_region def _search_max_chunk_region(self, active_node: List, peak_region: int, chunk_regions: List = None) -> Tuple: """ Search max chunk region according to peak memory node Chunk region starts extending from the peak node, stops where free var num is min Args: active_node (List): active node status for every node peak_node_idx (int): peak memory node idx chunk_regions (List): chunk region infos Returns: chunk_region_start (int) chunk_region_end (int) """ # check if peak node already in chunk info if chunk_regions is not None: for i in chunk_regions: if ( i["region"][0] < peak_region[0] <= i["region"][1] or i["region"][0] < peak_region[1] <= i["region"][1] ): return None active_node_num = [len(i) for i in active_node] window_size = 100 # search min for start min_num = 1e4 for i in range(peak_region[0], max(peak_region[0] - window_size, -1), -1): if active_node_num[i] < min_num: min_num = active_node_num[i] chunk_region_start = i # search min for end min_num = 1e4 for i in range(peak_region[1], min(peak_region[1] + window_size, len(active_node_num))): if active_node_num[i] < min_num: min_num = active_node_num[i] chunk_region_end = i # avoid chunk regions overlap if chunk_regions is not None: for i in chunk_regions: region = i["region"] if chunk_region_start >= region[0] and chunk_region_end <= region[1]: return None elif region[0] <= chunk_region_start <= region[1] and chunk_region_end > region[1]: chunk_region_start = region[1] + 1 elif region[0] <= chunk_region_end <= region[1] and chunk_region_start < region[0]: chunk_region_end = region[0] - 1 return chunk_region_start, chunk_region_end def _find_chunk_info(self, input_trace, output_trace, start_idx, end_idx) -> List: """ Find chunk info for a region. We are given the region start and region end, and need to find out all chunk info for it. We first loop every dim of start node and end node, to see if we can find dim pair, which is linked in a flow and not computed. If found, we then search flow in the whole region to find out all chunk infos. Args: input_trace (List): node's input trace in region output_trace (List): node's output trace in region start_idx (int): region start node index end_idx (int): region end node index Returns: chunk_infos: possible regions found """ start_traces = input_trace[start_idx] if len(start_traces) > 1: # TODO need to be removed return [] end_trace = output_trace[end_idx] end_node = self.node_mgr.get_node_by_idx(end_idx) chunk_infos = [] for end_dim, _ in enumerate(end_trace["indice"]): for start_node, start_trace in start_traces.items(): for start_dim, _ in enumerate(start_trace["indice"]): if not self.trace_flow.check_region_start_end( start_node, start_dim, start_idx, end_node, end_dim, end_idx ): continue # flow search chunk_info = self.trace_flow.flow_search(start_idx, start_dim, end_idx, end_dim) if chunk_info is None: continue chunk_infos.append(chunk_info) return chunk_infos def _search_possible_chunk_regions(self, max_chunk_region: Tuple, peak_region: Node) -> List: """ Search every possible region within the max chunk region. Args: max_chunk_region (Tuple) peak_node (Node): peak memory node Returns: possible_chunk_region (List) """ possible_chunk_region = [] output_trace = copy.deepcopy(self.trace_indice.indice_trace_list) input_trace = [] # trace of a node's input nodes for _, n in enumerate(self.node_mgr.get_node_list()): cur_trace = {} for arg in n.args: if type(arg) == type(n) and not is_non_compute_node_except_placeholder(arg): cur_trace[arg] = self.trace_indice._find_trace_from_node(arg) input_trace.append(cur_trace) for start_idx in range(max_chunk_region[0], peak_region[0] + 1): for end_idx in range(peak_region[1], max_chunk_region[1] + 1): # skip non compute nodes if is_non_compute_node(self.node_mgr.get_node_by_idx(start_idx)) or is_non_compute_node( self.node_mgr.get_node_by_idx(end_idx) ): continue # select free dim chunk_info = self._find_chunk_info(input_trace, output_trace, start_idx, end_idx) if len(chunk_info) > 0: possible_chunk_region.extend(chunk_info) return possible_chunk_region def _step_search( self, mem_peak: List[float], active_node: List[List[Node]], chunk_infos: List[Dict], ) -> Dict: """ Find one chunk region The chunk search is as follows: 1. find the peak memory node 2. find the max chunk region according to the peak memory node 3. find all possible chunk regions in the max chunk region 4. find the best chunk region for current status Args: mem_peak (List): peak memory for every node active_node (List[List[Node]]): active node for every node chunk_infos (List[Dict]): all chunk info Returns: best_chunk_region (Dict) """ peak_region = self._find_peak_region(mem_peak) max_chunk_region = self._search_max_chunk_region(active_node, peak_region, chunk_infos) if max_chunk_region == None: return None possible_chunk_regions = self._search_possible_chunk_regions(max_chunk_region, peak_region) best_chunk_region = self.select_chunk._select_best_chunk_region(possible_chunk_regions, chunk_infos, mem_peak) best_chunk_region = self.reorder_graph.reorder_all(best_chunk_region) return best_chunk_region def search_region(self) -> Dict: """ Search all chunk regions: 1. Estimate current memory 2. Find best chunk for current memory 3. goto 1 Returns: chunk_infos (Dict) """ if self.print_progress: get_logger().info("AutoChunk start searching chunk regions") chunk_infos = [] init_mem_peak, _, active_node = self.estimate_memory.estimate_chunk_inference_mem(self.node_mgr.get_node_list()) mem_peak = init_mem_peak while True: chunk_info = self._step_search(mem_peak, active_node, chunk_infos) if chunk_info is None: break chunk_infos.append(chunk_info) mem_peak, _, active_node = self.estimate_memory.estimate_chunk_inference_mem( self.node_mgr.get_node_list(), chunk_infos ) if self.print_progress: get_logger().info( "AutoChunk find chunk region %d = (%d, %d)" % (len(chunk_infos), chunk_info["region"][0], chunk_info["region"][1]) ) if self.print_mem: self.print_mem = False self.estimate_memory.estimate_chunk_inference_mem( self.node_mgr.get_node_list(), chunk_infos, print_mem=True ) return chunk_infos
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/trace_flow.py
colossalai/autochunk/trace_flow.py
from typing import Dict, List, Tuple from torch.fx.node import Node from .trace_indice import TraceIndice from .utils import ( NodeMgr, find_chunk_all_input_nodes, find_chunk_compute_input_and_output_nodes, find_tensor_shape_node, flat_list, get_node_name, get_node_shape, is_non_compute_node, ) class TraceFlow(object): def __init__(self, trace_indice: TraceIndice, node_mgr: NodeMgr) -> None: self.trace_indice = trace_indice self.node_mgr = node_mgr def check_index_source(self, start_dim, start_node, start_idx, end_dim, end_node): """ Check 2 given index: one index should be source of the other Args: start_idx(int): start node chunk dim start_node(node): start node end_idx(int): end node chunk dim end_node(node): end node Returns: bool: True if check pass """ # we use start_node_idx instead of real chunk index start_node_idx = self.node_mgr.find_node_idx(start_node) end_node_trace = self.trace_indice._find_trace_from_node(end_node) end_node_trace_source = end_node_trace["source"][end_dim] sorted_source = sorted(end_node_trace_source.items(), key=lambda d: d[0], reverse=True) for node_idx, node_dim in sorted_source: if node_idx == start_node_idx and start_dim in node_dim: return True # it means we meet a node outside the loop, and the node is not input node if node_idx < start_node_idx: return False return False def check_index_compute(self, start_idx, end_dim, end_node, end_idx): """ Check 2 given index: check they haven't been computed in the source trace. Args: start_idx(int): start node chunk dim start_node(node): start node end_idx(int): end node chunk dim end_node(node): end node Returns: bool: True if check pass """ end_node_trace = self.trace_indice._find_trace_from_node(end_node) end_node_compute = end_node_trace["compute"][end_dim] if any(start_idx <= i <= end_idx for i in end_node_compute): return False return True def _assign_single_node_flow( self, arg_node: Node, start_idx: int, end_idx: int, cur_node: Node, cur_node_dim: int, cur_node_compute: Dict, cur_node_source: Dict, cur_node_fix_dim: List, all_node_info: Dict, next_node_list: List, ) -> bool: """ Given the current node and one of its arg node, this function finds out arg node's chunk dim and fix dim Args: arg_node (Node): input node start_idx (int): chunk region start end_idx (int): chunk region end cur_node_dim (int): current node chunk dim cur_node_compute (Dict): current node compute dict cur_node_source (Dict): current node source dict cur_node_fix_dim (List): current node fix dim all_node_info (Dict): all node chunk info in the chunk region next_node_list (List) Returns: bool: True if this node can be added to the flow, vice versa. """ arg_idx = self.node_mgr.find_node_idx(arg_node) # arg in chunk range or be inputs if not (start_idx <= arg_idx < end_idx): return True # get fix dim arg_fix_dim = [] if cur_node_dim is not None: for i in cur_node_fix_dim: fix_dim_source = cur_node_source[i] if arg_idx in fix_dim_source: arg_fix_dim.append(fix_dim_source[arg_idx][0]) if arg_node in all_node_info: arg_fix_dim = list(set(all_node_info[arg_node]["fix_dim"] + arg_fix_dim)) # find arg dim if cur_node_dim is not None: # dim is computed if arg_idx in cur_node_compute[cur_node_dim]: return False if arg_idx not in cur_node_source[cur_node_dim]: arg_dim = None else: arg_dim = cur_node_source[cur_node_dim][arg_idx][0] # chunk dim cannot be in fix dims if arg_dim in arg_fix_dim: return False # chunk dim should be None if shape size is 1 if get_node_shape(arg_node)[arg_dim] == 1: arg_dim = None # chunk shape should equal cur node elif get_node_shape(arg_node)[arg_dim] != 1: if cur_node_dim is not None and get_node_shape(cur_node)[cur_node_dim] != 1: if get_node_shape(arg_node)[arg_dim] != get_node_shape(cur_node)[cur_node_dim]: return False else: arg_dim = None # add arg rest dim as fix dim arg_fix_dim = list(range(len(get_node_shape(arg_node)))) if arg_dim is not None: arg_fix_dim.remove(arg_dim) # if already in node_info, arg dim must be same if arg_node in all_node_info: if all_node_info[arg_node]["chunk_dim"] != arg_dim: return False all_node_info[arg_node]["fix_dim"] = arg_fix_dim # else add it to list else: all_node_info[arg_node] = {"chunk_dim": arg_dim, "fix_dim": arg_fix_dim} next_node_list.append(arg_node) return True def _get_all_node_info(self, end_dim, start_idx, end_idx): cur_node_list = [self.node_mgr.get_node_by_idx(end_idx)] # start from the last node all_node_info = {cur_node_list[0]: {"chunk_dim": end_dim, "fix_dim": []}} while len(cur_node_list) > 0: next_node_list = [] for cur_node in cur_node_list: # get cur node info cur_node_chunk_dim = all_node_info[cur_node]["chunk_dim"] cur_node_fix_dim = all_node_info[cur_node]["fix_dim"] if cur_node_chunk_dim is not None: cur_node_compute = self.trace_indice._find_compute_trace_from_node(cur_node) cur_node_source = self.trace_indice._find_source_trace_from_node(cur_node) else: cur_node_compute = cur_node_source = None # get all valid args arg_list = [] for arg in cur_node.all_input_nodes: if type(arg) != type(cur_node): continue if is_non_compute_node(arg): continue if get_node_shape(arg) is None: continue arg_list.append(arg) flow_flag = self._assign_single_node_flow( arg, start_idx, end_idx, cur_node, cur_node_chunk_dim, cur_node_compute, cur_node_source, cur_node_fix_dim, all_node_info, next_node_list, ) if flow_flag == False: return None cur_node_list = next_node_list return all_node_info def _get_input_nodes_dim(self, inputs: List[Node], start_idx: int, end_idx: int, all_node_info: Dict) -> Tuple: """ Get chunk dim for every input node for their every entry, remove unchunked nodes Args: inputs (List[Node]): input nodes all_node_info (Dict): describe all node's chunk dim and fix dim start_idx (int): chunk start idx end_idx (int): chunk end idx Returns: inputs (List(Node)): new inputs inputs_dim (List): chunk dim for inputs """ inputs_dim = [] remove_inputs = [] for input_node in inputs: input_dict = {} input_node_idx = self.node_mgr.find_node_idx(input_node) for user in input_node.users.keys(): # skip non compute if is_non_compute_node(user): continue # untraced node, mostly non compute if user not in all_node_info: continue user_idx = self.node_mgr.find_node_idx(user) if start_idx <= user_idx <= end_idx: chunk_dim = all_node_info[user]["chunk_dim"] if chunk_dim is not None: user_source = self.trace_indice._find_source_trace_from_node(user)[chunk_dim] if input_node_idx in user_source: if get_node_shape(input_node)[user_source[input_node_idx][0]] == 1: input_dict[user_idx] = [None] else: input_dict[user_idx] = user_source[input_node_idx] else: return None, None if len(input_dict) == 0: remove_inputs.append(input_node) else: inputs_dim.append(input_dict) # remove unchunked inputs for i in remove_inputs: if i in inputs: inputs.remove(i) return inputs, inputs_dim def _get_prepose_nodes(self, all_node_info: Dict, start_idx: int, end_idx: int, chunk_info) -> List[Node]: """ get all useless nodes in chunk region and prepose them Args: all_node_info (Dict): describe all node's chunk dim and fix dim start_idx (int): chunk start idx end_idx (int): chunk end idx Returns: List[Node]: all nodes to be preposed """ # get all possible prepose nodes maybe_prepose_nodes = [] for node, node_info in all_node_info.items(): if node_info["chunk_dim"] is None: maybe_prepose_nodes.append(node) for node in self.node_mgr.get_node_slice_by_idx(start_idx, end_idx): if node not in all_node_info and node not in chunk_info["outputs"]: maybe_prepose_nodes.append(node) maybe_prepose_nodes.sort( key=lambda x: self.node_mgr.find_node_idx(x), reverse=True, ) # from last node to first node prepose_nodes = [] # set every node as root, search its args, if all legal, turn root and args as prepose nodes while len(maybe_prepose_nodes) > 0: tmp_cur_prepose_nodes = [maybe_prepose_nodes[0]] tmp_cur_related_prepose_nodes = [] prepose_flag = True # loop cur node's all arg until out of chunk while len(tmp_cur_prepose_nodes) > 0: if prepose_flag == False: break tmp_next_prepose_nodes = [] tmp_cur_related_prepose_nodes.extend(tmp_cur_prepose_nodes) for cur_prepose_node in tmp_cur_prepose_nodes: if prepose_flag == False: break for cur_prepose_node_arg in cur_prepose_node.all_input_nodes: if type(cur_prepose_node_arg) != type(cur_prepose_node): continue # out of loop if not (start_idx <= self.node_mgr.find_node_idx(cur_prepose_node_arg) < end_idx): continue # compute op in loop elif cur_prepose_node_arg in all_node_info: if all_node_info[cur_prepose_node_arg]["chunk_dim"] is None: tmp_next_prepose_nodes.append(cur_prepose_node_arg) else: prepose_flag = False break # non compute op else: tmp_next_prepose_nodes.append(cur_prepose_node_arg) tmp_cur_prepose_nodes = tmp_next_prepose_nodes if prepose_flag == False: maybe_prepose_nodes.remove(maybe_prepose_nodes[0]) continue else: for n in tmp_cur_related_prepose_nodes: if n not in prepose_nodes: prepose_nodes.append(n) if n in maybe_prepose_nodes: maybe_prepose_nodes.remove(n) # sort by index prepose_nodes.sort(key=lambda x: self.node_mgr.find_node_idx(x)) chunk_info["args"]["prepose_nodes"] = prepose_nodes def _get_non_chunk_inputs(self, chunk_info, start_idx, end_idx): # we need to log input nodes to avoid deleting them in the loop chunk_node_list = self.node_mgr.get_node_slice_by_idx(start_idx, end_idx + 1) # also need to get some prepose node's arg out of non_chunk_inputs for n in chunk_info["args"]["prepose_nodes"]: chunk_node_list.remove(n) non_chunk_inputs = find_chunk_all_input_nodes(chunk_node_list) for i in non_chunk_inputs: if i not in chunk_info["inputs"]: chunk_info["inputs_non_chunk"].append(i) return chunk_info def flow_search(self, start_idx, start_dim, end_idx, end_dim): inputs, outputs = find_chunk_compute_input_and_output_nodes( self.node_mgr.get_node_slice_by_idx(start_idx, end_idx + 1) ) # get every node's chunk dim and fix dim all_node_info = self._get_all_node_info(end_dim, start_idx, end_idx) if all_node_info is None: return None chunk_info = { "region": (start_idx, end_idx), "inputs": [], "inputs_non_chunk": [], "inputs_dim": [], "outputs": [self.node_mgr.get_node_by_idx(end_idx)], "outputs_non_tensor": {}, "outputs_dim": [end_dim], "node_chunk_dim": all_node_info, "args": {}, } # find chunk info for other outputs if len(find_tensor_shape_node(outputs)) > 1: chunk_info = self._get_other_output_info(outputs, start_idx, start_dim, end_idx, end_dim, chunk_info) if chunk_info is None: return None # get input nodes' chunk dim inputs, inputs_dim = self._get_input_nodes_dim(inputs, start_idx, end_idx, all_node_info) if inputs is None: return None chunk_info["inputs"] = inputs chunk_info["inputs_dim"] = inputs_dim # move useless nodes ahead of loop self._get_prepose_nodes(all_node_info, start_idx, end_idx, chunk_info) # find non chunk inputs chunk_info = self._get_non_chunk_inputs(chunk_info, start_idx, end_idx) # reassign reshape size, some size may have changed due to chunk chunk_info = self._reassign_reshape_size(chunk_info) return chunk_info def _get_other_output_info( self, outputs: List[Node], start_idx: int, start_dim: int, end_idx: int, end_dim: int, chunk_info: Dict ): start_node = self.node_mgr.get_node_by_idx(start_idx) # loop all outputs for output in outputs: output_legal = False output_idx = self.node_mgr.find_node_idx(output) # skip the origin output if output_idx == end_idx: continue # skip non tensor if get_node_shape(output) is None: # log shape tensor if len(output.meta["fwd_out"]) > 0 and isinstance(output.meta["fwd_out"][0], int): chunk_info["outputs_non_tensor"][output] = str(output.meta["fwd_out"]) continue # loop every dim of outputs, try to find a legal one for output_dim in range(len(get_node_shape(output))): if not self.check_region_start_end(start_node, start_dim, start_idx, output, output_dim, output_idx): continue new_all_node_info = self._get_all_node_info(output_dim, start_idx, output_idx) if new_all_node_info is None: continue # check node info legal if self._update_chunk_info(chunk_info, new_all_node_info, output, output_dim) == True: output_legal = True break # not legal if output_legal == False: return None return chunk_info def _update_chunk_info(self, chunk_info: Dict, new_all_node_info: Dict, output: Node, output_dim: int) -> bool: """ check if there is conflict between new node info and old chunk info. If not, update old chunk info """ # check if conflict overlap_flag = False for k, v in new_all_node_info.items(): if k in chunk_info["node_chunk_dim"]: overlap_flag = True if chunk_info["node_chunk_dim"][k]["chunk_dim"] != v["chunk_dim"]: return False # if no overlap, we just consider them as prepose nodes, instead of new output if overlap_flag == False: return True # update chunk info for k, v in new_all_node_info.items(): if k in chunk_info["node_chunk_dim"]: chunk_info["node_chunk_dim"][k]["fix_dim"] = list( set(chunk_info["node_chunk_dim"][k]["fix_dim"] + v["fix_dim"]) ) else: chunk_info["node_chunk_dim"][k] = v chunk_info["outputs"].append(output) chunk_info["outputs_dim"].append(output_dim) return True def _reassign_reshape_size(self, chunk_info): """ Some shape args in reshape may have changed due to chunk reassign those changed shape """ chunk_region = chunk_info["region"] reshape_size = {} chunk_shape = get_node_shape(chunk_info["outputs"][0])[chunk_info["outputs_dim"][0]] for node in self.node_mgr.get_node_slice_by_idx(chunk_region[0], chunk_region[1] + 1): if any(i == get_node_name(node) for i in ["reshape", "view"]): if node in chunk_info["args"]["prepose_nodes"]: continue if node.args[0] in chunk_info["inputs_non_chunk"]: continue reshape_args = flat_list(node.args[1:]) if ( len(reshape_args) == 1 and get_node_shape(reshape_args[0]) is None and len(reshape_args[0].meta["fwd_out"]) > 1 ): continue chunk_dim = chunk_info["node_chunk_dim"][node]["chunk_dim"] new_shape = "" for reshape_arg_dim, reshape_arg in enumerate(reshape_args): if reshape_arg_dim == chunk_dim: new_shape += "min(chunk_size, %d - chunk_idx), " % chunk_shape else: if isinstance(reshape_arg, int): new_shape += "%s, " % str(reshape_arg) else: new_shape += "%s, " % reshape_arg.name new_shape = new_shape[:-2] origin_shape = str(reshape_args)[1:-1] reshape_size[node.name] = [origin_shape, new_shape] chunk_info["reshape_size"] = reshape_size return chunk_info def check_region_start_end( self, start_node: Node, start_dim: int, start_idx: int, end_node: Node, end_dim: int, end_idx: int ) -> bool: """ check if region start and end is legal """ # dim cannot be None if get_node_shape(end_node) is None or get_node_shape(start_node) is None: return False # dim size cannot be 1 if get_node_shape(end_node)[end_dim] == 1 or get_node_shape(start_node)[start_dim] == 1: return False # must have users if len(end_node.users) == 0: return False # check index source align if not self.check_index_source(start_dim, start_node, start_idx, end_dim, end_node): return False # check index compute if not self.check_index_compute(start_idx, end_dim, end_node, end_idx): return False return True
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/autochunk_codegen.py
colossalai/autochunk/autochunk_codegen.py
from typing import Any, Callable, Dict, Iterable, List, Tuple import torch import colossalai from colossalai.fx._compatibility import is_compatible_with_meta from colossalai.fx.codegen.activation_checkpoint_codegen import CODEGEN_AVAILABLE AUTOCHUNK_AVAILABLE = CODEGEN_AVAILABLE and is_compatible_with_meta() if AUTOCHUNK_AVAILABLE: from torch.fx.graph import ( CodeGen, PythonCode, _custom_builtins, _CustomBuiltin, _format_target, _is_from_torch, _Namespace, _origin_type_map, inplace_methods, magic_methods, ) from torch.fx.node import Argument, Node, _get_qualified_name, _type_repr, map_arg from .search_chunk import SearchChunk from .utils import delete_free_var_from_last_use, get_logger, get_node_name, get_node_shape def _gen_chunk_slice_dim(chunk_dim: int, chunk_indice_name: str, shape: List) -> str: """ Generate chunk slice string, eg. [:, :, chunk_idx_name:chunk_idx_name + chunk_size, :] Args: chunk_dim (int) chunk_indice_name (str): chunk indice name shape (List): node shape Returns: new_shape (str): return slice """ new_shape = "[" for idx, _ in enumerate(shape): if idx == chunk_dim: new_shape += "%s:%s + chunk_size" % (chunk_indice_name, chunk_indice_name) else: new_shape += ":" new_shape += ", " new_shape = new_shape[:-2] + "]" return new_shape def _gen_loop_start(chunk_input: List[Node], chunk_output: List[Node], chunk_output_dim: int, chunk_size=2) -> str: """ Generate chunk loop start eg. chunk_result = torch.empty([100, 100], dtype=input_node.dtype, device=input_node.device) chunk_size = 32 for chunk_idx in range(0, 100, 32): ...... Args: chunk_input (List[Node]): chunk input node chunk_output (Node): chunk output node chunk_output_dim (int): chunk output node chunk dim chunk_size (int): chunk size. Defaults to 2. Returns: context (str): generated str """ input_node = chunk_input[0] context = "" for i in range(len(chunk_output)): shape_str = str(list(get_node_shape(chunk_output[i]))) if get_node_name(chunk_output[i]) in ["split", "unbind"]: tensor_str = "torch.empty(%s, dtype=%s.dtype, device=%s.device), " % ( shape_str, input_node.name, input_node.name, ) tensor_str = tensor_str * len(chunk_output[i].meta["tensor_meta"]) tensor_str = "[" + tensor_str[:-2] + "]" context += "%s = %s; " % (chunk_output[i].name, tensor_str) else: context += "%s = torch.empty(%s, dtype=%s.dtype, device=%s.device); " % ( chunk_output[i].name, shape_str, input_node.name, input_node.name, ) out_shape = get_node_shape(chunk_output[0]) chunk_shape = out_shape[chunk_output_dim[0]] context += "chunk_size = %d\nfor chunk_idx in range(0, %d, chunk_size):\n" % (chunk_size, chunk_shape) return context def _gen_loop_end( chunk_inputs: List[Node], chunk_non_compute_inputs: List[Node], node_list: List[Node], chunk_outputs_idx: int, chunk_outputs_non_tensor: List[Node], search_chunk: SearchChunk, ) -> str: """ Generate chunk loop end eg. chunk_result[chunk_idx:chunk_idx + chunk_size] = output_node output_node = chunk_result; xx = None; xx = None Args: chunk_inputs (List[Node]): chunk input node chunk_non_compute_inputs (List[Node]): input node without chunk chunk_outputs (Node): chunk output node chunk_outputs_dim (int): chunk output node chunk dim node_list (List) Returns: context (str): generated str """ context = "chunk_size = None" # determine if its the last use for chunk input for chunk_input in chunk_inputs + chunk_non_compute_inputs: if all([search_chunk.node_mgr.find_node_idx(user) <= chunk_outputs_idx for user in chunk_input.users.keys()]): context += "; %s = None" % chunk_input.name for chunk_output_non_tensor, chunk_output_non_tensor_val in chunk_outputs_non_tensor.items(): context += "; %s = %s" % (chunk_output_non_tensor.name, chunk_output_non_tensor_val) context += "\n" return context def _replace_name(context: str, name_from: str, name_to: str) -> str: """ replace node name """ patterns = [(" ", " "), (" ", "."), (" ", ","), ("(", ")"), ("(", ","), (" ", ")"), (" ", ""), ("", " ")] for p in patterns: source = p[0] + name_from + p[1] target = p[0] + name_to + p[1] if source in context: context = context.replace(source, target) break return context def _replace_reshape_size(context: str, node_name: str, reshape_size_dict: Dict) -> str: """ replace reshape size, some may have changed due to chunk """ if node_name not in reshape_size_dict: return context context = context.replace(reshape_size_dict[node_name][0], reshape_size_dict[node_name][1]) return context def _replace_new_tensor_like_shape( search_chunk: SearchChunk, chunk_infos: List[Dict], region_idx: int, node_idx: int, node: Node, body: List[str], ) -> List[str]: """ add chunk slice for new tensor op such as ones like """ if get_node_name(node) in ["ones_like", "zeros_like", "empty_like"]: meta_node = search_chunk.node_mgr.get_node_by_idx(node_idx) chunk_dim = chunk_infos[region_idx]["node_chunk_dim"][meta_node]["chunk_dim"] if get_node_shape(meta_node)[chunk_dim] != 1: source_node = meta_node.args[0].args[0] if ( source_node not in chunk_infos[region_idx]["node_chunk_dim"] or chunk_infos[region_idx]["node_chunk_dim"][source_node]["chunk_dim"] is None ): chunk_slice = _gen_chunk_slice_dim(chunk_dim, "chunk_idx", get_node_shape(node)) body[-1] = _replace_name(body[-1], node.args[0].name, node.args[0].name + chunk_slice) return body def _replace_new_tensor_shape( search_chunk: SearchChunk, chunk_infos: List[Dict], region_idx: int, node_idx: int, node: Node, body: List[str], ) -> List[str]: """ add chunk slice for new tensor op such as ones """ if get_node_name(node) in ["ones", "zeros", "empty"]: meta_node = search_chunk.node_mgr.get_node_by_idx(node_idx) chunk_dim = chunk_infos[region_idx]["node_chunk_dim"][meta_node]["chunk_dim"] if chunk_dim is None: return if get_node_shape(meta_node)[chunk_dim] == 1: return origin_shape = str(node.args) new_shape = list(node.args) new_shape[chunk_dim] = "min(chunk_size, %d - chunk_idx)" % get_node_shape(meta_node)[chunk_dim] new_shape = str(new_shape) new_shape = new_shape.replace("'", "") body[-1] = _replace_name(body[-1], origin_shape[1:-1], new_shape[1:-1]) return body def _add_node_slice( chunk_nodes: List[Node], region_idx: int, chunk_nodes_dim: Dict, node_idx: int, body: List[str], node: Node, ) -> List[str]: """ add chunk slice for input nodes """ for chunk_node_idx, chunk_node in enumerate(chunk_nodes[region_idx]): # inputs node if isinstance(chunk_nodes_dim[region_idx][chunk_node_idx], dict): for idx, dim in chunk_nodes_dim[region_idx][chunk_node_idx].items(): if idx == node_idx: chunk_slice = _gen_chunk_slice_dim(dim[0], "chunk_idx", get_node_shape(chunk_node)) body[-1] = _replace_name(body[-1], chunk_node.name, chunk_node.name + chunk_slice) # outputs node else: if chunk_node.name == node.name or (chunk_node.name in [i.name for i in node.all_input_nodes]): chunk_slice = _gen_chunk_slice_dim( chunk_nodes_dim[region_idx][chunk_node_idx], "chunk_idx", get_node_shape(chunk_node) ) if get_node_name(chunk_node) in ["split", "unbind"]: split_chunk_slice = "" for i in range(len(chunk_node.meta["tensor_meta"])): split_chunk_slice += "%s[%d]%s, " % (chunk_node.name, i, chunk_slice) split_chunk_slice = split_chunk_slice[:-2] body[-1] = _replace_name(body[-1], chunk_node.name, split_chunk_slice) else: body[-1] = _replace_name(body[-1], chunk_node.name, chunk_node.name + chunk_slice) return body def emit_code_with_chunk( body: List[str], nodes: Iterable[Node], emit_node_func: Callable, delete_unused_value_func: Callable, search_chunk: SearchChunk, chunk_infos: List, eval_mem: bool = False, ): """ Emit code with chunk according to chunk_infos. It will generate a for loop in chunk regions, and replace inputs and outputs of regions with chunked variables. Args: body: forward code nodes: graph.nodes emit_node_func: function to emit node delete_unused_value_func: function to remove the unused value search_chunk: the class to search all chunks chunk_infos: store all information about all chunks. """ node_list = list(nodes) # chunk region chunk_starts = [i["region"][0] for i in chunk_infos] chunk_ends = [i["region"][1] for i in chunk_infos] # chunk inputs chunk_inputs = [i["inputs"] for i in chunk_infos] # input with chunk chunk_inputs_non_chunk = [i["inputs_non_chunk"] for i in chunk_infos] # input without chunk chunk_inputs_dim = [i["inputs_dim"] for i in chunk_infos] # input chunk dim chunk_inputs_names = [j.name for i in chunk_inputs for j in i] + [j.name for i in chunk_inputs_non_chunk for j in i] # chunk outputs chunk_outputs = [i["outputs"] for i in chunk_infos] chunk_outputs_non_tensor = [i["outputs_non_tensor"] for i in chunk_infos] chunk_outputs_dim = [i["outputs_dim"] for i in chunk_infos] node_list = search_chunk.reorder_graph.reorder_node_list(node_list) node_idx = 0 region_idx = 0 within_chunk_region = False if eval_mem: body.append("init_memory = torch.cuda.memory_allocated() / 1024**2\n") while node_idx < len(node_list): node = node_list[node_idx] # if is chunk start, generate for loop start if node_idx in chunk_starts: within_chunk_region = True region_idx = chunk_starts.index(node_idx) body.append( _gen_loop_start( chunk_inputs[region_idx], chunk_outputs[region_idx], chunk_outputs_dim[region_idx], chunk_infos[region_idx]["chunk_size"], ) ) if within_chunk_region: emit_node_func(node, body) # replace input var with chunk var body = _add_node_slice(chunk_inputs, region_idx, chunk_inputs_dim, node_idx, body, node) # replace output var with chunk var body = _add_node_slice(chunk_outputs, region_idx, chunk_outputs_dim, node_idx, body, node) # new tensor like body = _replace_new_tensor_like_shape(search_chunk, chunk_infos, region_idx, node_idx, node, body) # new tensor body = _replace_new_tensor_shape(search_chunk, chunk_infos, region_idx, node_idx, node, body) # reassign reshape size body[-1] = _replace_reshape_size(body[-1], node.name, chunk_infos[region_idx]["reshape_size"]) body[-1] = " " + body[-1] delete_unused_value_func(node, body, chunk_inputs_names) if eval_mem: body.append( " if chunk_idx == 0:\n print('%s', torch.cuda.max_memory_allocated() / 1024**2 - init_memory); torch.cuda.reset_peak_memory_stats()\n" % (node.name) ) else: emit_node_func(node, body) if node_idx not in chunk_inputs: delete_unused_value_func(node, body, chunk_inputs_names) if eval_mem: body.append( "print('%s', torch.cuda.max_memory_allocated() / 1024**2 - init_memory); torch.cuda.reset_peak_memory_stats()\n" % (node.name) ) # generate chunk region end if node_idx in chunk_ends: body.append( _gen_loop_end( chunk_inputs[region_idx], chunk_inputs_non_chunk[region_idx], node_list, chunk_ends[region_idx], chunk_outputs_non_tensor[region_idx], search_chunk, ) ) within_chunk_region = False node_idx += 1 if AUTOCHUNK_AVAILABLE: class AutoChunkCodeGen(CodeGen): def __init__( self, meta_graph, max_memory: int = None, print_mem: bool = False, print_progress: bool = False, eval_mem: bool = False, ) -> None: super().__init__() self.eval_mem = eval_mem # find the chunk regions self.search_chunk = SearchChunk(meta_graph, max_memory, print_mem, print_progress) self.chunk_infos = self.search_chunk.search_region() if print_progress: get_logger().info("AutoChunk start codegen") def _gen_python_code(self, nodes, root_module: str, namespace: _Namespace, verbose=None) -> PythonCode: free_vars: List[str] = [] body: List[str] = [] globals_: Dict[str, Any] = {} wrapped_fns: Dict[str, None] = {} # Wrap string in list to pass by reference maybe_return_annotation: List[str] = [""] def add_global(name_hint: str, obj: Any): """Add an obj to be tracked as a global. We call this for names that reference objects external to the Graph, like functions or types. Returns: the global name that should be used to reference 'obj' in generated source. """ if _is_from_torch(obj) and obj != torch.device: # to support registering torch.device # HACK: workaround for how torch custom ops are registered. We # can't import them like normal modules so they must retain their # fully qualified name. return _get_qualified_name(obj) # normalize the name hint to get a proper identifier global_name = namespace.create_name(name_hint, obj) if global_name in globals_: assert globals_[global_name] is obj return global_name globals_[global_name] = obj return global_name # set _custom_builtins here so that we needn't import colossalai in forward _custom_builtins["colossalai"] = _CustomBuiltin("import colossalai", colossalai) # Pre-fill the globals table with registered builtins. for name, (_, obj) in _custom_builtins.items(): add_global(name, obj) def type_repr(o: Any): if o == (): # Empty tuple is used for empty tuple type annotation Tuple[()] return "()" typename = _type_repr(o) if hasattr(o, "__origin__"): # This is a generic type, e.g. typing.List[torch.Tensor] origin_type = _origin_type_map.get(o.__origin__, o.__origin__) origin_typename = add_global(_type_repr(origin_type), origin_type) if hasattr(o, "__args__"): # Assign global names for each of the inner type variables. args = [type_repr(arg) for arg in o.__args__] if len(args) == 0: # Bare type, such as `typing.Tuple` with no subscript # This code-path used in Python < 3.9 return origin_typename return f'{origin_typename}[{",".join(args)}]' else: # Bare type, such as `typing.Tuple` with no subscript # This code-path used in Python 3.9+ return origin_typename # Common case: this is a regular module name like 'foo.bar.baz' return add_global(typename, o) def _format_args(args: Tuple[Argument, ...], kwargs: Dict[str, Argument]) -> str: def _get_repr(arg): # Handle NamedTuples (if it has `_fields`) via add_global. if isinstance(arg, tuple) and hasattr(arg, "_fields"): qualified_name = _get_qualified_name(type(arg)) global_name = add_global(qualified_name, type(arg)) return f"{global_name}{repr(tuple(arg))}" return repr(arg) args_s = ", ".join(_get_repr(a) for a in args) kwargs_s = ", ".join(f"{k} = {_get_repr(v)}" for k, v in kwargs.items()) if args_s and kwargs_s: return f"{args_s}, {kwargs_s}" return args_s or kwargs_s # Run through reverse nodes and record the first instance of a use # of a given node. This represents the *last* use of the node in the # execution order of the program, which we will use to free unused # values node_to_last_use: Dict[Node, Node] = {} user_to_last_uses: Dict[Node, List[Node]] = {} def register_last_uses(n: Node, user: Node): if n not in node_to_last_use: node_to_last_use[n] = user user_to_last_uses.setdefault(user, []).append(n) for node in reversed(nodes): map_arg(node.args, lambda n: register_last_uses(n, node)) map_arg(node.kwargs, lambda n: register_last_uses(n, node)) delete_free_var_from_last_use(user_to_last_uses) # NOTE: we add a variable to distinguish body and ckpt_func def delete_unused_values(user: Node, body, to_keep=[]): """ Delete values after their last use. This ensures that values that are not used in the remainder of the code are freed and the memory usage of the code is optimal. """ if user.op == "placeholder": return if user.op == "output": body.append("\n") return nodes_to_delete = user_to_last_uses.get(user, []) nodes_to_delete = [i for i in nodes_to_delete if i.name not in to_keep] if len(nodes_to_delete): to_delete_str = " = ".join([repr(n) for n in nodes_to_delete] + ["None"]) body.append(f"; {to_delete_str}\n") else: body.append("\n") # NOTE: we add a variable to distinguish body and ckpt_func def emit_node(node: Node, body): maybe_type_annotation = "" if node.type is None else f" : {type_repr(node.type)}" if node.op == "placeholder": assert isinstance(node.target, str) maybe_default_arg = "" if not node.args else f" = {repr(node.args[0])}" free_vars.append(f"{node.target}{maybe_type_annotation}{maybe_default_arg}") raw_name = node.target.replace("*", "") if raw_name != repr(node): body.append(f"{repr(node)} = {raw_name}\n") return elif node.op == "call_method": assert isinstance(node.target, str) body.append( f"{repr(node)}{maybe_type_annotation} = {_format_target(repr(node.args[0]), node.target)}" f"({_format_args(node.args[1:], node.kwargs)})" ) return elif node.op == "call_function": assert callable(node.target) # pretty print operators if node.target.__module__ == "_operator" and node.target.__name__ in magic_methods: assert isinstance(node.args, tuple) body.append( f"{repr(node)}{maybe_type_annotation} = " f"{magic_methods[node.target.__name__].format(*(repr(a) for a in node.args))}" ) return # pretty print inplace operators; required for jit.script to work properly # not currently supported in normal FX graphs, but generated by torchdynamo if node.target.__module__ == "_operator" and node.target.__name__ in inplace_methods: body.append( f"{inplace_methods[node.target.__name__].format(*(repr(a) for a in node.args))}; " f"{repr(node)}{maybe_type_annotation} = {repr(node.args[0])}" ) return qualified_name = _get_qualified_name(node.target) global_name = add_global(qualified_name, node.target) # special case for getattr: node.args could be 2-argument or 3-argument # 2-argument: attribute access; 3-argument: fall through to attrib function call with default value if ( global_name == "getattr" and isinstance(node.args, tuple) and isinstance(node.args[1], str) and node.args[1].isidentifier() and len(node.args) == 2 ): body.append( f"{repr(node)}{maybe_type_annotation} = {_format_target(repr(node.args[0]), node.args[1])}" ) return body.append( f"{repr(node)}{maybe_type_annotation} = {global_name}({_format_args(node.args, node.kwargs)})" ) if node.meta.get("is_wrapped", False): wrapped_fns.setdefault(global_name) return elif node.op == "call_module": assert isinstance(node.target, str) body.append( f"{repr(node)}{maybe_type_annotation} = " f"{_format_target(root_module, node.target)}({_format_args(node.args, node.kwargs)})" ) return elif node.op == "get_attr": assert isinstance(node.target, str) body.append(f"{repr(node)}{maybe_type_annotation} = {_format_target(root_module, node.target)}") return elif node.op == "output": if node.type is not None: maybe_return_annotation[0] = f" -> {type_repr(node.type)}" body.append(self.generate_output(node.args[0])) return raise NotImplementedError(f"node: {node.op} {node.target}") # Modified for activation checkpointing ckpt_func = [] # if any node has a list of labels for activation_checkpoint, we # will use nested type of activation checkpoint codegen emit_code_with_chunk( body, nodes, emit_node, delete_unused_values, self.search_chunk, self.chunk_infos, self.eval_mem ) if len(body) == 0: # If the Graph has no non-placeholder nodes, no lines for the body # have been emitted. To continue to have valid Python code, emit a # single pass statement body.append("pass\n") if len(wrapped_fns) > 0: wrap_name = add_global("wrap", torch.fx.wrap) wrap_stmts = "\n".join([f'{wrap_name}("{name}")' for name in wrapped_fns]) else: wrap_stmts = "" if self._body_transformer: body = self._body_transformer(body) for name, value in self.additional_globals(): add_global(name, value) # as we need colossalai.utils.checkpoint, we need to import colossalai # in forward function prologue = self.gen_fn_def(free_vars, maybe_return_annotation[0]) prologue = "".join(ckpt_func) + prologue prologue = prologue code = "".join(body) code = "\n".join(" " + line for line in code.split("\n")) fn_code = f""" {wrap_stmts} {prologue} {code}""" # print(fn_code) return PythonCode(fn_code, globals_)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/utils.py
colossalai/autochunk/utils.py
from typing import Any, Dict, List, Union from torch.fx.node import Node from colossalai.logging import get_dist_logger NON_COMPUTE_OP = ["placeholder", "get_attr", "output"] NON_COMPUTE_NAME = ["getattr", "eq", "_assert_is_none", "_assert", "finfo", "size"] logger = get_dist_logger() class NodeMgr(object): def __init__(self, nodes_list: List[Node]) -> None: self._node_list = nodes_list self._node_dict = {} self._set_node_dict() def _set_node_dict(self) -> None: """ create a dict {node_name: node_idx} """ self._node_dict.clear() for idx, node in enumerate(self._node_list): self._node_dict[node.name] = idx def find_node_idx(self, node: Node) -> int: """ find node's index """ return self._node_dict[node.name] def find_node_idx_by_name(self, node_name: str) -> int: """ find node's index """ return self._node_dict[node_name] def get_node_by_idx(self, idx: int) -> Node: """ get a node by index """ return self._node_list[idx] def get_node_slice_by_idx(self, start: int, end: int) -> List[Node]: """ get a slice of node by index """ return self._node_list[start:end] def get_node_list(self) -> List: """ get full node list """ return self._node_list def update_node_list(self, node_list: List) -> None: """ update node list, reset node dict """ self._node_list = node_list self._set_node_dict() def get_logger() -> Any: return logger def flat_list(inputs: Any) -> List: """ flat a list by recursion """ if not (isinstance(inputs, list) or isinstance(inputs, set) or isinstance(inputs, tuple)): return [inputs] res = [] for i in inputs: if isinstance(i, list) or isinstance(i, set) or isinstance(i, tuple): res.extend(flat_list(i)) elif isinstance(i, dict): res.extend(flat_list(list(i.keys()))) else: res.append(i) return res def find_first_tensor_arg(node: Node) -> Node: """ Find the first input tensor arg for a node """ for arg in node.args: if type(arg) == type(node): return arg raise RuntimeError() def is_non_compute_node(node: Node) -> bool: if any(i == node.op for i in NON_COMPUTE_OP) or any(i == get_node_name(node) for i in NON_COMPUTE_NAME): return True if "getitem" in node.name: if get_node_shape(node) is not None: return False node_args = flat_list(node.args[1:]) for node_arg in node_args: if any(i == str(node_arg) for i in ["None", "Ellipsis"]): return False if "slice" in str(node_arg): return False return True return False def get_node_shape(node: Node) -> Any: """ return node data shape """ if get_node_name(node) in ["split", "unbind"]: return node.meta["tensor_meta"][0].shape if hasattr(node.meta["tensor_meta"], "shape"): return node.meta["tensor_meta"].shape return None def is_non_memory_node(node: Node) -> bool: if "getitem" in node.name: return True if "output" in node.op: return True return is_non_compute_node(node) def is_non_compute_node_except_placeholder(node: Node) -> bool: if "placeholder" in node.op: return False return is_non_compute_node(node) def is_non_compute_node_except_placeholder_output(node: Node) -> bool: if "output" in node.op: return False return is_non_compute_node_except_placeholder(node) def delete_free_var_from_last_use(user_to_last_uses: Dict) -> None: for key, value in user_to_last_uses.items(): for n in value: if n.op == "placeholder": user_to_last_uses[key].remove(n) def find_chunk_all_input_nodes(nodes: List[Node]) -> List: """ Find non-compute input and output node names. input nodes are nodes used in the list output nodes are nodes will use nodes in the list """ input_nodes = [] for node in nodes: for input_node in node._input_nodes.keys(): if input_node not in nodes and input_node not in input_nodes: input_nodes.append(input_node) return input_nodes def find_chunk_compute_input_and_output_nodes(nodes: List[Node]) -> Union[List, List]: """ Find non-compute input and output node names. input nodes are nodes used in the list output nodes are nodes will use nodes in the list """ input_nodes = [] output_nodes = [] # if a node has an input node which is not in the node list # we treat that input node as the input of the checkpoint function for node in nodes: for input_node in node._input_nodes.keys(): if ( input_node not in nodes and input_node not in input_nodes and not is_non_compute_node_except_placeholder(input_node) ): input_nodes.append(input_node) # if a node has a user node which is not in the node list # we treat that user node as the node receiving the current node output for node in nodes: for output_node in node.users.keys(): if ( output_node not in nodes and node not in output_nodes and not is_non_compute_node_except_placeholder_output(output_node) ): output_nodes.append(node) return input_nodes, output_nodes def get_module_node_name(node: Node) -> str: """ get module class name """ node_targets = node.target.split(".") module = node.graph.owning_module for i in node_targets: module = getattr(module, i) module_name = str(module.__class__).split(".")[-1][:-2] module_name = module_name.lower() return module_name def get_node_name(node: Node) -> str: """ get node name """ node_name = node.name if "_" in node_name: for i in range(len(node_name) - 1, -1, -1): if node_name[i] == "_": node_name = node_name[:i] break elif node_name[i] in ["1", "2", "3", "4", "5", "6", "7", "8", "9", "0"]: continue else: break return node_name def find_tensor_node(node_list: List[Node]) -> List[Node]: """ find tensor nodes from a node list """ out = [] for node in node_list: if get_node_shape(node) is not None: out.append(node) return out def find_tensor_shape_node(node_list: List[Node]) -> List[Node]: """ find tensor and shape nodes from a node list """ out = [] for node in node_list: if get_node_shape(node) is not None: out.append(node) elif ( len(node.meta["fwd_out"]) > 0 and isinstance(node.meta["fwd_out"], list) and isinstance(node.meta["fwd_out"][0], int) ): out.append(node) return out
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/reorder_graph.py
colossalai/autochunk/reorder_graph.py
from .trace_indice import TraceIndice from .utils import NodeMgr class ReorderGraph(object): """ Reorder node list and indice trace list """ def __init__(self, trace_indice: TraceIndice, node_mgr: NodeMgr) -> None: self.trace_indice = trace_indice self.node_mgr = node_mgr self.all_reorder_map = {i: i for i in range(len(self.node_mgr.get_node_list()))} def _get_reorder_map(self, chunk_info): reorder_map = {i: i for i in range(len(self.node_mgr.get_node_list()))} chunk_region_start = chunk_info["region"][0] chunk_region_end = chunk_info["region"][1] chunk_prepose_nodes = chunk_info["args"]["prepose_nodes"] chunk_prepose_nodes_idx = [self.node_mgr.find_node_idx(i) for i in chunk_prepose_nodes] # put prepose nodes ahead for idx, n in enumerate(chunk_prepose_nodes): n_idx = chunk_prepose_nodes_idx[idx] reorder_map[n_idx] = chunk_region_start + idx # put other nodes after prepose nodes for n in self.node_mgr.get_node_slice_by_idx(chunk_region_start, chunk_region_end + 1): if n in chunk_prepose_nodes: continue n_idx = self.node_mgr.find_node_idx(n) pos = sum([n_idx < i for i in chunk_prepose_nodes_idx]) reorder_map[n_idx] = n_idx + pos return reorder_map def _reorder_chunk_info(self, chunk_info, reorder_map): # update chunk info chunk_info["region"] = ( chunk_info["region"][0] + len(chunk_info["args"]["prepose_nodes"]), chunk_info["region"][1], ) new_inputs_dim = [] for _, input_dim in enumerate(chunk_info["inputs_dim"]): new_input_dim = {} for k, v in input_dim.items(): new_input_dim[reorder_map[k]] = v new_inputs_dim.append(new_input_dim) chunk_info["inputs_dim"] = new_inputs_dim return chunk_info def _update_all_reorder_map(self, reorder_map): for origin_idx, map_idx in self.all_reorder_map.items(): self.all_reorder_map[origin_idx] = reorder_map[map_idx] def _reorder_self_node_list(self, reorder_map): new_node_list = [None for _ in range(len(self.node_mgr.get_node_list()))] for old_idx, new_idx in reorder_map.items(): new_node_list[new_idx] = self.node_mgr.get_node_by_idx(old_idx) self.node_mgr.update_node_list(new_node_list) def _reorder_idx_trace(self, reorder_map): # reorder list new_idx_trace_list = [None for _ in range(len(self.trace_indice.indice_trace_list))] for old_idx, new_idx in reorder_map.items(): new_idx_trace_list[new_idx] = self.trace_indice.indice_trace_list[old_idx] self.trace_indice.indice_trace_list = new_idx_trace_list # update compute for idx_trace in self.trace_indice.indice_trace_list: compute = idx_trace["compute"] for dim_compute in compute: for idx, i in enumerate(dim_compute): dim_compute[idx] = reorder_map[i] # update source for idx_trace in self.trace_indice.indice_trace_list: source = idx_trace["source"] for dim_idx, dim_source in enumerate(source): new_dim_source = {} for k, v in dim_source.items(): new_dim_source[reorder_map[k]] = v source[dim_idx] = new_dim_source def reorder_all(self, chunk_info): if chunk_info is None: return chunk_info if len(chunk_info["args"]["prepose_nodes"]) == 0: return chunk_info reorder_map = self._get_reorder_map(chunk_info) self._update_all_reorder_map(reorder_map) self._reorder_idx_trace(reorder_map) self._reorder_self_node_list(reorder_map) chunk_info = self._reorder_chunk_info(chunk_info, reorder_map) return chunk_info def reorder_node_list(self, node_list): new_node_list = [None for _ in range(len(node_list))] for old_idx, new_idx in self.all_reorder_map.items(): new_node_list[new_idx] = node_list[old_idx] return new_node_list def tmp_reorder(self, node_list, chunk_info): if len(chunk_info["args"]["prepose_nodes"]) == 0: return node_list, chunk_info reorder_map = self._get_reorder_map(chunk_info) # new tmp node list new_node_list = [None for _ in range(len(node_list))] for old_idx, new_idx in reorder_map.items(): new_node_list[new_idx] = node_list[old_idx] chunk_info = self._reorder_chunk_info(chunk_info, reorder_map) return new_node_list, chunk_info
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/trace_indice.py
colossalai/autochunk/trace_indice.py
import copy from typing import Dict, List from torch.fx.node import Node from .utils import NodeMgr, find_first_tensor_arg, flat_list, get_module_node_name, get_node_name, get_node_shape class TraceIndice(object): """ Trace all indice information for every node. Indice is a logical concept. Equal dims can been treated as one indice. eg. dim(x1) = [a, b, c] dim(x2) = [d, e, f] and we have x3 = x1 * x2. then a=d, b=e, c=f, due to the broadcast property, dim(x1)=dim(x2)=dim(x3)=[a, b, c] This class will record every node's dims' indice, compute and source. Attributes: node_list (List) indice_trace_list (List): [{"indice": [...], "compute": [...], "source": [...]}, {...}] indice_view_list (Dict): not used for now indice_count (int): record indice number Args: node_list (List) """ def __init__(self, node_mgr: NodeMgr) -> None: self.node_mgr = node_mgr self.indice_trace_list = self._init_indice_trace_list() self.indice_view_list = {} self.indice_count = -1 self.active_node_list = [] def _init_indice_trace_list(self) -> List: indice_trace_list = [] for n in self.node_mgr.get_node_list(): if get_node_shape(n) != None: cur_trace = { "indice": [None for _ in range(len(get_node_shape(n)))], "compute": [[] for _ in range(len(get_node_shape(n)))], "source": [{} for _ in range(len(get_node_shape(n)))], } else: cur_trace = {"indice": [], "compute": [], "source": []} indice_trace_list.append(cur_trace) return indice_trace_list def set_active_nodes(self, active_node_list: List) -> None: self.active_node_list = active_node_list def _add_indice(self) -> int: """ Update the count and return it. To record the idx number. Returns: indice_count: int """ self.indice_count += 1 return self.indice_count def _del_dim(self, idx: int, dim_idx: int) -> None: """ delete a dim for indice, compute and source """ self.indice_trace_list[idx]["indice"].pop(dim_idx) self.indice_trace_list[idx]["compute"].pop(dim_idx) self.indice_trace_list[idx]["source"].pop(dim_idx) def _add_dim(self, node_idx: int, dim_idx: int) -> None: """ add a dim for indice, compute and source """ # need to remap if dim_idx < 0, e.g. -1 if dim_idx < 0: dim_idx = list(range(len(self.indice_trace_list[node_idx]["indice"]) + 1))[dim_idx] self.indice_trace_list[node_idx]["indice"].insert(dim_idx, self._add_indice()) self.indice_trace_list[node_idx]["compute"].insert(dim_idx, []) self.indice_trace_list[node_idx]["source"].insert(dim_idx, {}) def _add_source( self, node_from: Node, node_from_dim: int, node_to: Node, node_to_dim: int, init=False, ) -> None: node_from_dim = self._transform_indice(node_from, node_from_dim) node_from_trace_source = self._find_source_trace_from_node(node_from) node_to_dim = self._transform_indice(node_to, node_to_dim) node_to_trace_source = self._find_source_trace_from_node(node_to) node_from_idx = self.node_mgr.find_node_idx(node_from) if init: node_to_trace_source[node_to_dim] = {} # add dim to cur new source if node_from_idx not in node_to_trace_source[node_to_dim]: node_to_trace_source[node_to_dim][node_from_idx] = [node_from_dim] else: if node_from_dim not in node_to_trace_source[node_to_dim][node_from_idx]: node_to_trace_source[node_to_dim][node_from_idx].append(node_from_dim) # update inputs source for node_idx, node_dim in node_from_trace_source[node_from_dim].items(): if node_idx not in node_to_trace_source[node_to_dim]: node_to_trace_source[node_to_dim][node_idx] = copy.deepcopy(node_dim) else: for d in node_dim: if d not in node_to_trace_source[node_to_dim][node_idx]: node_to_trace_source[node_to_dim][node_idx].append(d) def _transform_indice(self, node: Node, node_dim: int) -> int: node_idx = self._find_indice_trace_from_node(node) dims = list(range(len(node_idx))) return dims[node_dim] def _inherit_indice( self, node_from: Node, node_from_dim: int, node_to: Node, node_to_dim: int, init: bool = True, ) -> None: """ node_to's node_to_dim inherit node_from's node_from_dim by indice, compute and source """ node_from_dim = self._transform_indice(node_from, node_from_dim) node_to_dim = self._transform_indice(node_to, node_to_dim) node_from_trace = self._find_trace_from_node(node_from) node_to_trace = self._find_trace_from_node(node_to) if init: node_to_trace["indice"][node_to_dim] = node_from_trace["indice"][node_from_dim] node_to_trace["compute"][node_to_dim] = copy.deepcopy(node_from_trace["compute"][node_from_dim]) else: for j in node_from_trace["compute"][node_from_dim]: if j not in node_to_trace["compute"][node_to_dim]: node_to_trace["compute"][node_to_dim].append(j) self._add_source(node_from, node_from_dim, node_to, node_to_dim, init) def _inherit_all_indice(self, node_from: Node, node_to: Node) -> None: """ inherit all dims with init """ # find indice just for assert length node_from_indice = self._find_indice_trace_from_node(node_from) node_to_indice = self._find_indice_trace_from_node(node_to) assert len(node_from_indice) == len(node_to_indice) for i in range(len(node_from_indice)): self._inherit_indice(node_from, i, node_to, i, init=True) def _inherit_more_indice_from_node_with_exclude(self, node_from: Node, node_to: Node, exclude: List = None) -> None: """ inherit indice from node without init """ if exclude == None: exclude = [] else: exclude = [self._transform_indice(node_to, i) for i in exclude] node_from_compute = self._find_compute_trace_from_node(node_from) node_to_compute = self._find_compute_trace_from_node(node_to) # assert len(node_from_compute) == len(node_to_compute) for i in range(-1, -min(len(node_from_compute), len(node_to_compute)) - 1, -1): if self._transform_indice(node_to, i) in exclude: continue self._inherit_indice(node_from, i, node_to, i, init=False) def _mark_computation(self, node: Node, idx: int, dim: int) -> None: """ Mark some dims of node as computed. Args: node (node) idx (int): node index dim (list or int): dims to be marked as computed """ if isinstance(dim, int): dim = [dim] dims = list(range(len(get_node_shape(node)))) for d in dim: cur_dim = dims[d] if idx not in self.indice_trace_list[idx]["compute"][cur_dim]: self.indice_trace_list[idx]["compute"][cur_dim].append(idx) def _find_trace_from_node(self, node: Node) -> Dict: """ Find node idx and compute trace by the node. Args: node (node) Returns: idx (list): idx of the node compute (list): computed idx of the node. """ node_idx = self.node_mgr.find_node_idx(node) node_dict = self.indice_trace_list[node_idx] return node_dict def _find_source_trace_from_node(self, node: Node) -> List: """ Find node source trace by the node. Args: node (node) Returns: idx (list): idx of the node compute (list): computed idx of the node. """ node_idx = self.node_mgr.find_node_idx(node) node_dict = self.indice_trace_list[node_idx] return node_dict["source"] def _find_indice_trace_from_node(self, node) -> List: """ Find node idx trace by the node. Args: node (node) Returns: idx (list): idx of the node """ node_idx = self.node_mgr.find_node_idx(node) return self.indice_trace_list[node_idx]["indice"] def _find_compute_trace_from_node(self, node: Node) -> List: """ Find node compute trace by the node. Args: node (node) Returns: compute (list): computed idx of the node. """ node_idx = self.node_mgr.find_node_idx(node) return self.indice_trace_list[node_idx]["compute"] def _assign_indice_as_input(self, node: Node, node_idx: int, input_node=None) -> None: """ Assign node's trace as its input node. Args: node (node) node_idx (int) """ if input_node == None: input_node = find_first_tensor_arg(node) self._inherit_all_indice(input_node, node) def _assign_all_indice(self, node: Node, node_idx: int) -> None: """ Add new indice for all node's dims. Args: node (node) node_idx (int) """ shape = node.meta["tensor_meta"].shape if shape is None: return new_trace = [] for _ in shape: new_trace.append(self._add_indice()) self.indice_trace_list[node_idx]["indice"] = new_trace def _assign_transpose_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for transpose op. 1. swap input's dim according to transpose args 2. inherit input's computation Args: node (node) node_idx (int) """ input_node = node.args[0] tranpose_dim = node.args[1:] self._assign_indice_as_input(node, node_idx, input_node) self._inherit_indice(input_node, tranpose_dim[1], node, tranpose_dim[0]) self._inherit_indice(input_node, tranpose_dim[0], node, tranpose_dim[1]) def _assign_permute_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for permute op. 1. swap input's dim according to permute args 2. inherit input's computation Args: node (node) node_idx (int) """ permute_dim = flat_list(node.args[1:]) input_node = node.args[0] self._assign_indice_as_input(node, node_idx, input_node) for idx, d in enumerate(permute_dim): self._inherit_indice(input_node, d, node, idx) def _assign_linear_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for linear op. 1. copy trace from input node and change last indice according to weight 2. mark equal for input node last indice, weight first dim and bias dim. 3. inherit input's computation, mark computation for last dim. Args: node (node) node_idx (int) """ self._assign_indice_as_input(node, node_idx) if len(node.args) >= 2: weight = node.args[1] self._inherit_indice(weight, 1, node, -1) else: self._del_dim(node_idx, -1) self._add_dim(node_idx, -1) self._mark_computation(node, node_idx, [-1]) def _assign_addmm_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for addmm op. Args: node (node) node_idx (int) """ bias, input_node, weight = node.args assert len(get_node_shape(bias)) == 1 and len(get_node_shape(weight)) == 2 self._assign_indice_as_input(node, node_idx, input_node) self._inherit_indice(weight, 1, node, -1) self._inherit_more_indice_from_node_with_exclude(bias, node) self._mark_computation(node, node_idx, [-1]) def _assign_baddbmm_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for baddbmm(batch add and batch matmul) op. add, matmul_left, matmul_right = args out = add + (matmul_left x matmul_right) Args: node (node) node_idx (int) """ add, matmul_left, matmul_right = node.args assert get_node_shape(add) == get_node_shape(node) assert len(get_node_shape(matmul_left)) == len(get_node_shape(matmul_right)) self._assign_indice_as_input(node, node_idx, matmul_left) # matmul self._inherit_indice(matmul_right, -1, node, -1) self._inherit_more_indice_from_node_with_exclude(matmul_right, node, [-2, -1]) self._mark_computation(node, node_idx, [-1]) # add self._inherit_more_indice_from_node_with_exclude(add, node) def _assign_matmul_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for matmul op. 1. copy trace from matmul_left and change last indice according to matmul_right. (assert they have same length) 2. mark equal for input matmul_left -1 indice and matmul_right -2 dim. 3. inherit matmul_left and matmul_right computation, mark computation for last dim. Args: node (node) node_idx (int) """ matmul_left, matmul_right = node.args assert len(get_node_shape(matmul_left)) == len(get_node_shape(matmul_right)) self._assign_indice_as_input(node, node_idx, matmul_left) self._inherit_indice(matmul_right, -1, node, -1) self._inherit_more_indice_from_node_with_exclude(matmul_right, node, [-1, -2]) self._mark_computation(node, node_idx, [-1]) def _assign_conv2d_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for conv2d op. Args: node (node) node_idx (int) """ # get conv module node_targets = node.target.split(".") conv_module = node.graph.owning_module for i in node_targets: conv_module = getattr(conv_module, i) assert conv_module.dilation == (1, 1), "dilation for conv2d not implemented" # get conv input assert len(node.args) == 1 input_node = node.args[0] assert len(get_node_shape(input_node)) == 4 # assign index self._assign_indice_as_input(node, node_idx, input_node) self._del_dim(node_idx, 1) self._add_dim(node_idx, 1) self._mark_computation(node, node_idx, [1, 2, 3]) def _assign_interpolate_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for interpolate op. Args: node (node) node_idx (int) """ # get conv input assert node.kwargs["size"] is None assert len(get_node_shape(node)) == 4 # assign index self._assign_indice_as_input(node, node_idx) self._mark_computation(node, node_idx, [-1, -2]) def _assign_layernorm_indice(self, node, idx): """ Assign indice for layernorm op. 1. assign indice as input node 2. inherit computation and mark last 2 dims as computed. Args: node (node) node_idx (int) """ self._assign_indice_as_input(node, idx) self._mark_computation(node, idx, [-1]) def _assign_groupnorm_indice(self, node, idx): """ Assign indice for groupnorm op. Args: node (node) node_idx (int) """ assert len(get_node_shape(node)) == 4 self._assign_indice_as_input(node, idx) self._mark_computation(node, idx, [-1, -2, -3]) def _assign_elementwise_indice(self, node, idx): """ Assign indice for element-wise op (eg. relu sigmoid add mul). 1. assign indice as input node 2. inherit computation from all input nodes. Args: node (node) node_idx (int) """ self._assign_indice_as_input(node, idx) nodes_in = [] for node_in in node.args: if type(node_in) == type(node): nodes_in.append(node_in) self._inherit_more_indice_from_node_with_exclude(node_in, node) def _assign_no_change_indice(self, node, idx): self._assign_indice_as_input(node, idx) for node_in in node.args: if type(node_in) == type(node): self._inherit_more_indice_from_node_with_exclude(node_in, node) def _assign_einsum_indice(self, node, idx): """ Assign indice for einsum op. Args: node (node) node_idx (int) """ patterns = node.args[0] input_nodes = node.args[1:] patterns = patterns.replace(" ", "") left, right = patterns.split("->") left = left.split(",") if "..." in right: replace_list = "!@#$%^&*" target_len = len(get_node_shape(node)) add_len = target_len - len(right) + 3 replace_str = replace_list[:add_len] right = right.replace("...", replace_str) for ll in range(len(left)): left[ll] = left[ll].replace("...", replace_str) all_index = [] for i in left: for c in i: all_index.append(c) all_index = set(all_index) for right_idx, right_indice in enumerate(right): for left_idx, left_str in enumerate(left): if right_indice in left_str: source_idx = left_str.index(right_indice) self._inherit_indice(input_nodes[left_idx], source_idx, node, right_idx) def _assign_softmax_indice(self, node, idx): """ Assign indice for softmax op. 1. assign indice as input node 2. inherit computation and mark softmax dim as computed. Args: node (node) node_idx (int) """ self._assign_indice_as_input(node, idx) self._mark_computation(node, idx, [node.kwargs["dim"]]) def _assign_split_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for split op. Args: node (node) node_idx (int) """ self._assign_indice_as_input(node, node_idx) dim_idx = node.kwargs["dim"] self._del_dim(node_idx, dim_idx) self._add_dim(node_idx, dim_idx) def _assign_unsqueeze_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for unsqueeze op. 1. assign new indice for unsqueeze dim Args: node (node) node_idx (int) """ self._del_dim(node_idx, -1) self._assign_indice_as_input(node, node_idx) dim_idx = node.args[1] # unsqueeze(-1) = unsqueeze(shape_num + 1) if dim_idx < 0: dim_idx = list(range(len(get_node_shape(node))))[dim_idx] self._add_dim(node_idx, dim_idx) def _assign_cat_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for cat op. Args: node (node) node_idx (int) """ nodes_in = flat_list(node.args[0]) self._assign_indice_as_input(node, node_idx, input_node=nodes_in[0]) for n in nodes_in[1:]: self._inherit_more_indice_from_node_with_exclude(n, node) cat_dim = node.kwargs["dim"] self._del_dim(node_idx, cat_dim) self._add_dim(node_idx, cat_dim) def _assign_sum_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for sum op. Args: node (node) node_idx (int) """ nodes_in = flat_list(node.args[0]) self._add_dim(node_idx, 0) self._assign_indice_as_input(node, node_idx, input_node=nodes_in[0]) for n in nodes_in[1:]: self._inherit_more_indice_from_node_with_exclude(n, node) cat_dim = node.kwargs["dim"] self._del_dim(node_idx, cat_dim) def _assign_flatten_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for flatten op. Args: node (node) node_idx (int) """ nodes_in = node.args[0] nodes_in_shape = get_node_shape(nodes_in) flatten_start_dim = node.args[1] flatten_dim_num = len(nodes_in_shape) - flatten_start_dim - 1 assert flatten_dim_num > 0 for _ in range(flatten_dim_num): self._add_dim(node_idx, 0) self._assign_indice_as_input(node, node_idx, nodes_in) for _ in range(flatten_dim_num + 1): self._del_dim(node_idx, -1) self._add_dim(node_idx, -1) def _assign_expand_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for expand op. Args: node (node) node_idx (int) """ expand_shape = node.args[1:] node_in_shape = get_node_shape(node.args[0]) assert len(expand_shape) == len(node_in_shape) self._assign_indice_as_input(node, node_idx) for i in range(len(node_in_shape)): if expand_shape[i] == node_in_shape[i] or expand_shape[i] == -1: continue elif expand_shape[i] > node_in_shape[i]: self._del_dim(node_idx, i) self._add_dim(node_idx, i) else: raise RuntimeError() def _assign_unbind_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for unbind op. Args: node (node) node_idx (int) """ unbind_dim = node.args[1] self._add_dim(node_idx, unbind_dim) self._assign_indice_as_input(node, node_idx) self._del_dim(node_idx, unbind_dim) def _assign_embedding_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for embedding op. Args: node (node) node_idx (int) """ self._del_dim(node_idx, -1) self._assign_indice_as_input(node, node_idx) self._add_dim(node_idx, -1) def _assign_getitem_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for getitem. getitem can act like slice sometimes Args: node (node) node_idx (int) """ node_args = flat_list(node.args[1:]) # deal with split if get_node_name(node.args[0]) == "split": self._assign_indice_as_input(node, node_idx) self._del_dim(node_idx, node.args[0].kwargs["dim"]) self._add_dim(node_idx, node.args[0].kwargs["dim"]) return # skip non tensor if get_node_shape(node) is None: return # find if slice flag = False for node_arg in node_args: node_arg_str = str(node_arg) if any(i == node_arg_str for i in ["None", "Ellipsis"]): flag = True break if "slice" in node_arg_str: flag = True break if flag == False: return # node args should be like [Ellipsis, slice(start, step, end), None] node_shape = get_node_shape(node) origin_idx_count = 0 new_idx_count = 0 new_dim_num = sum([1 if str(i) == "None" else 0 for i in node_args]) for _ in range(new_dim_num): self._del_dim(node_idx, 0) delete_dim_num = sum([1 if str(i) == "0" else 0 for i in node_args]) for _ in range(delete_dim_num): self._add_dim(node_idx, 0) self._assign_indice_as_input(node, node_idx) for _, node_arg in enumerate(node_args): node_arg_str = str(node_arg) # Ellipsis means [..., ] if "Ellipsis" == node_arg_str: shape_gap = len(node_shape) - len(node_args) + 1 origin_idx_count += shape_gap new_idx_count += shape_gap # slice(None, None, None) means all indexes elif "slice" in node_arg_str: if "slice(None, None, None)" != node_arg_str: self._del_dim(node_idx, new_idx_count) self._add_dim(node_idx, new_idx_count) origin_idx_count += 1 new_idx_count += 1 # None means a new dim elif "None" == node_arg_str: self._add_dim(node_idx, new_idx_count) new_idx_count += 1 elif "0" == node_arg_str: self._del_dim(node_idx, new_idx_count) origin_idx_count += 1 else: raise NotImplementedError() def _assign_view_reshape_indice(self, node: Node, node_idx: int) -> None: """ Assign indice for view and reshape op. 1. get origin shape and target shape by meta info. 2. compute the real value of -1 in target shape. 3. determine changed dim, and assign indice for generated dim. 4. log changed dim and generated dim for restore 5. inherit computation. 6. look into view list to see whether the view is associated with other, if so assign equal dim according to previous view. Args: node (node) node_idx (int) """ # get data, turn into number origin_node = node.args[0] origin_shape = origin_node.meta["tensor_meta"].shape target_shape = [] unflated_args = flat_list(node.args) for i in range(1, len(unflated_args)): if isinstance(unflated_args[i], int): target_shape.append(unflated_args[i]) else: target_shape.extend(unflated_args[i].meta["fwd_out"]) # compute the value of -1 if -1 in target_shape: origin_product = 1 for i in origin_shape: origin_product *= i target_product = -1 for i in target_shape: target_product *= i shape_idx = target_shape.index(-1) target_shape[shape_idx] = origin_product // target_product # find same dim dim_to_same_dim = [] dim_from_same_dim = [] for i in range(len(origin_shape)): if origin_shape[i] == target_shape[i]: dim_to_same_dim.append(i) dim_from_same_dim.append(i) else: break for i in range(-1, -len(origin_shape), -1): if origin_shape[i] == target_shape[i]: dim_to_same_dim.append(len(target_shape) + i) dim_from_same_dim.append(len(origin_shape) + i) else: break dim_from = list(set(range(len(origin_shape))) - set(dim_from_same_dim)) dim_to = list(set(range(len(target_shape))) - set(dim_to_same_dim)) assert len(dim_from) == 1 or len(dim_to) == 1 or len(dim_from) == len(dim_to) dim_diff = len(dim_from) - len(dim_to) if dim_diff > 0: # dim merge for i in range(dim_diff): self._add_dim(node_idx, -1) elif dim_diff < 0: # dim expand for i in range(-dim_diff): self._del_dim(node_idx, -1) # get new indice origin_trace = self._find_indice_trace_from_node(origin_node) self._assign_indice_as_input(node, node_idx, origin_node) dim_from.reverse() for i in dim_from: self._del_dim(node_idx, i) for i in dim_to: self._add_dim(node_idx, i) dim_from.reverse() # inherit indice from current node if len(dim_from) != 0 and len(dim_to) != 0: if dim_diff == 1: if origin_shape[dim_from[0]] == 1: self._inherit_indice(origin_node, dim_from[1], node, dim_to[0], init=False) elif origin_shape[dim_from[1]] == 1: self._inherit_indice(origin_node, dim_from[0], node, dim_to[0], init=False) elif dim_diff == -1: if target_shape[dim_to[0]] == 1: self._inherit_indice(origin_node, dim_from[0], node, dim_to[1], init=False) elif target_shape[dim_to[1]] == 1: self._inherit_indice(origin_node, dim_from[0], node, dim_to[0], init=False) # log view, not used now view_dict = { "idx_from": [origin_trace[i] for i in dim_from], "dim_from": dim_from, "idx_to": [self.indice_trace_list[node_idx]["indice"][i] for i in dim_to], "dim_to": dim_to, } self.indice_view_list[node] = view_dict def _clear_trace(self, node_idx: int) -> None: """ clear too far trace to speed up computation """ trace_barrier = max(node_idx - 100, 0) active_nodes = self.active_node_list[trace_barrier] active_nodes = [self.node_mgr.find_node_idx(i) for i in active_nodes.keys()] trace = self.indice_trace_list[node_idx] # clear compute for dim_compute in trace["compute"]: for i in range(len(dim_compute) - 1, -1, -1): if dim_compute[i] < trace_barrier and dim_compute[i] not in active_nodes: dim_compute.pop(i) continue # clear source for dim_source in trace["source"]: for k in list(dim_source.keys()): if k < trace_barrier and k not in active_nodes: dim_source.pop(k) def trace_indice(self) -> None: for idx, node in enumerate(self.node_mgr.get_node_list()): node_name = get_node_name(node) if node.op == "placeholder": self._assign_all_indice(node, idx) elif node.op == "call_method": if "transpose" == node_name: self._assign_transpose_indice(node, idx) elif "permute" == node_name: self._assign_permute_indice(node, idx) elif "view" == node_name or "reshape" == node_name: self._assign_view_reshape_indice(node, idx) elif "unsqueeze" == node_name: self._assign_unsqueeze_indice(node, idx) elif "split" == node_name: self._assign_split_indice(node, idx) elif any(i == node_name for i in ["to", "contiguous", "clone", "type", "float"]): self._assign_no_change_indice(node, idx) elif "new_ones" == node_name: self._assign_all_indice(node, idx) elif "flatten" == node_name: self._assign_flatten_indice(node, idx) elif "expand" == node_name: self._assign_expand_indice(node, idx) elif "unbind" == node_name: self._assign_unbind_indice(node, idx) elif "softmax" == node_name: self._assign_softmax_indice(node, idx) elif any(i == node_name for i in ["size"]): continue else: raise NotImplementedError(node_name, "method not implemented yet!") elif node.op == "call_function": if "linear" == node_name: self._assign_linear_indice(node, idx) elif "cat" == node_name: self._assign_cat_indice(node, idx) elif any(n == node_name for n in ["matmul", "bmm"]): self._assign_matmul_indice(node, idx) elif "softmax" == node_name: self._assign_softmax_indice(node, idx) elif any( n == node_name for n in [ "mul", "add", "sigmoid", "relu", "sub", "truediv", "pow", "dropout", "where",
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/autochunk/select_chunk.py
colossalai/autochunk/select_chunk.py
from .estimate_memory import EstimateMemory from .reorder_graph import ReorderGraph from .trace_indice import TraceIndice from .utils import NodeMgr, is_non_compute_node class SelectChunk(object): def __init__( self, trace_indice: TraceIndice, estimate_memory: EstimateMemory, reorder_graph: ReorderGraph, node_mgr: NodeMgr, max_memory=None, ): self.trace_indice = trace_indice self.estimate_memory = estimate_memory self.reorder_graph = reorder_graph self.node_mgr = node_mgr if max_memory is not None: self.stratge = "fit_memory" self.max_memory = max_memory # MB else: self.stratge = "min_memory" def _select_best_chunk_region(self, possible_chunk_regions, chunk_infos, mem_peak): if self.stratge == "min_memory": best_region = self._select_min_memory_chunk_region(possible_chunk_regions, chunk_infos) elif self.stratge == "fit_memory": best_region = self._select_fit_memory_chunk_region(possible_chunk_regions, chunk_infos, mem_peak) else: raise RuntimeError() return best_region def _select_fit_memory_chunk_region(self, possible_chunk_regions, chunk_infos, mem_peak): # stop chunk if max memory satisfy memory limit if max(mem_peak) < self.max_memory: return None # remove illegal regions illegal_regions = [] for i in possible_chunk_regions: if not self._is_legal_region(i, chunk_infos): illegal_regions.append(i) for i in illegal_regions: if i in possible_chunk_regions: possible_chunk_regions.remove(i) if len(possible_chunk_regions) == 0: return None # get mem for chunk region regions_dict = [] for region in possible_chunk_regions: cur_region = region.copy() cur_node_list, cur_region = self.reorder_graph.tmp_reorder(self.node_mgr.get_node_list(), cur_region) cur_chunk_infos = chunk_infos + [cur_region] cur_mem = self.estimate_memory.estimate_chunk_inference_mem(cur_node_list, cur_chunk_infos)[0] cur_chunk_region_peak = cur_mem[cur_region["region"][0] : cur_region["region"][1] + 1] cur_chunk_region_max_peak = max(cur_chunk_region_peak) if cur_chunk_region_max_peak < self.max_memory: regions_dict.append( { "chunk_info": region, "chunk_max_mem": cur_chunk_region_max_peak, "chunk_len": self._get_compute_node_num(region["region"][0], region["region"][1]), "reorder_chunk_info": cur_region, "reorder_node_list": cur_node_list, } ) # no region found if len(regions_dict) == 0: raise RuntimeError("Search failed. Try a larger memory threshold.") # select the min chunk len chunk_len = [i["chunk_len"] for i in regions_dict] best_region_idx = chunk_len.index(min(chunk_len)) best_region = regions_dict[best_region_idx] # get max chunk size best_region = self._get_fit_chunk_size(best_region, chunk_infos) return best_region def _get_fit_chunk_size(self, chunk_region_dict, chunk_infos): chunk_size = 1 reorder_chunk_info = chunk_region_dict["reorder_chunk_info"] reorder_chunk_info["chunk_size"] = chunk_size cur_chunk_max_mem = 0 # search a region while cur_chunk_max_mem < self.max_memory: chunk_size *= 2 reorder_chunk_info["chunk_size"] = chunk_size cur_chunk_infos = chunk_infos + [reorder_chunk_info] cur_mem_peak = self.estimate_memory.estimate_chunk_inference_mem( chunk_region_dict["reorder_node_list"], cur_chunk_infos )[0] cur_chunk_max_mem = max(cur_mem_peak[reorder_chunk_info["region"][0] : reorder_chunk_info["region"][1] + 1]) # search exact size chunk_info = chunk_region_dict["chunk_info"] chunk_info["chunk_size"] = self._chunk_size_binary_search( chunk_size // 2, chunk_size, chunk_region_dict, chunk_infos ) return chunk_info def _chunk_size_binary_search(self, left, right, chunk_region_dict, chunk_infos): if left >= 16: gap = 4 else: gap = 1 chunk_info = chunk_region_dict["reorder_chunk_info"] while right >= left + gap: mid = int((left + right) / 2 + 0.5) chunk_info["chunk_size"] = mid cur_chunk_infos = chunk_infos + [chunk_info] cur_mem_peak = self.estimate_memory.estimate_chunk_inference_mem( chunk_region_dict["reorder_node_list"], cur_chunk_infos )[0] cur_chunk_max_mem = max(cur_mem_peak[chunk_info["region"][0] : chunk_info["region"][1] + 1]) if cur_chunk_max_mem >= self.max_memory: right = mid - gap else: left = mid + gap return left def _get_compute_node_num(self, start, end): count = 0 for i in self.node_mgr.get_node_slice_by_idx(start, end + 1): if not is_non_compute_node(i): count += 1 return count def _select_min_memory_chunk_region(self, possible_chunk_regions, chunk_infos): # remove illegal regions illegal_regions = [] for i in possible_chunk_regions: if not self._is_legal_region(i, chunk_infos): illegal_regions.append(i) for i in illegal_regions: if i in possible_chunk_regions: possible_chunk_regions.remove(i) if len(possible_chunk_regions) == 0: return None # get max possible chunk region max_possible_chunk_region = ( min([i["region"][0] for i in possible_chunk_regions]), max([i["region"][1] for i in possible_chunk_regions]), ) # get mem for chunk region regions_dict_list = [] for region in possible_chunk_regions: cur_region = region.copy() cur_node_list, cur_region = self.reorder_graph.tmp_reorder(self.node_mgr.get_node_list(), cur_region) cur_chunk_infos = chunk_infos + [cur_region] cur_mem_peak = self.estimate_memory.estimate_chunk_inference_mem(cur_node_list, cur_chunk_infos)[0] cur_chunk_region_peak = cur_mem_peak[max_possible_chunk_region[0] : max_possible_chunk_region[1] + 1] cur_chunk_region_max_peak = max(cur_chunk_region_peak) regions_dict_list.append( { "chunk_info": region, "chunk_max_mem": cur_chunk_region_max_peak, "chunk_len": self._get_compute_node_num(region["region"][0], region["region"][1]), "reorder_chunk_info": cur_region, "reorder_node_list": cur_node_list, } ) # select the min mem chunk_max_mem = [i["chunk_max_mem"] for i in regions_dict_list] best_region_idx = chunk_max_mem.index(min(chunk_max_mem)) best_region = regions_dict_list[best_region_idx]["chunk_info"] if best_region is not None: best_region["chunk_size"] = 1 return best_region def _is_legal_region(self, cur_chunk_info, chunk_infos): (chunk_region_start, chunk_region_end) = cur_chunk_info["region"] if cur_chunk_info in chunk_infos: return False if chunk_region_end < chunk_region_start: return False for i in chunk_infos: region = i["region"] if not ( (chunk_region_start > region[1] and chunk_region_end > region[1]) or (chunk_region_start < region[0] and chunk_region_end < region[0]) ): return False return True
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/interface/pretrained.py
colossalai/interface/pretrained.py
from typing import Optional from torch.nn import Module __all__ = [ "get_pretrained_path", "set_pretrained_path", ] def get_pretrained_path(model: Module) -> Optional[str]: return getattr(model, "_pretrained", None) def set_pretrained_path(model: Module, path: str) -> None: setattr(model, "_pretrained", path)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/interface/model.py
colossalai/interface/model.py
import re from typing import Dict, Set import torch import torch.nn as nn from peft import PeftModel, PeftType def extract_lora_layers(model: PeftModel, names: Set[str], adapter_name: str = "default"): config = model.peft_config[adapter_name] if config.peft_type != PeftType.LORA: raise ValueError(f"Adapter {adapter_name} is not a LORA adapter.") # to_return = lora_state_dict(model, bias=model.peft_config.bias) # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py` # to be used directly with the state dict which is necessary when using DeepSpeed or FSDP bias = config.bias if bias == "none": to_return = {k for k in names if "lora_" in k} elif bias == "all": to_return = {k for k in names if "lora_" in k or "bias" in k} elif bias == "lora_only": to_return = set() for k in names: if "lora_" in k: to_return.add(k) bias_name = k.split("lora_")[0] + "bias" if bias_name in names: to_return.add(bias_name) else: raise NotImplementedError to_return = {k for k in to_return if (("lora_" in k and adapter_name in k) or ("bias" in k))} if config.use_dora: # Here we take care of a refactor of DoRA which changed lora_magnitude_vector from a ParameterDict to a # ModuleDict with a DoraLayer instance. The old parameter is now the "weight" attribute of that layer. Since # we want the state_dict format not to change, we remove the "weight" part. new_dora_suffix = f"lora_magnitude_vector.{adapter_name}.weight" def renamed_dora_weights(k): if k.endswith(new_dora_suffix): k = k[:-7] # remove ".weight" return k to_return = {renamed_dora_weights(k) for k in to_return} to_return = {re.sub(f"lora_\S\.{adapter_name}\.(weight|bias)", "base_layer", k) for k in to_return} return to_return class PeftUnwrapMixin: def __init__(self, peft_model: PeftModel): self.base_model = peft_model.get_base_model() # peft does not affect buffers self.lora_layers = extract_lora_layers(peft_model, set(n for n, p in self.base_model.named_parameters())) potential_lora_weights = set() for n in self.lora_layers: potential_lora_weights.add(f"{n}.weight") potential_lora_weights.add(f"{n}.bias") self.lora_param_to_origin_param = {n: n.replace("base_layer.", "") for n in potential_lora_weights} self.origin_param_to_lora_param = {v: k for k, v in self.lora_param_to_origin_param.items()} def named_parameters(self): for n, p in self.base_model.named_parameters(): if n in self.lora_param_to_origin_param: n = self.lora_param_to_origin_param[n] yield n, p def named_buffers(self): return self.base_model.named_buffers() @property def _modules(self): return self.base_model._modules @property def _non_persistent_buffers_set(self): return self.base_model._non_persistent_buffers_set def patch_state_dict(self, state_dict: Dict[str, torch.Tensor]): new_state_dict = {} for k, v in state_dict.items(): if k in self.origin_param_to_lora_param: k = self.origin_param_to_lora_param[k] new_state_dict[k] = v return new_state_dict def state_dict(self): state_dict = {} for k, v in self.base_model.state_dict().items(): if k in self.lora_param_to_origin_param: k = self.lora_param_to_origin_param[k] state_dict[k] = v return state_dict def load_state_dict(self, state_dict, strict: bool = True, assign: bool = False): state_dict = self.patch_state_dict(state_dict) self.base_model.load_state_dict(state_dict, strict=strict, assign=assign) def __hash__(self): return hash(self.base_model) class ModelWrapper(nn.Module): """ A wrapper class to define the common interface used by booster. Args: module (nn.Module): The model to be wrapped. """ def __init__(self, module: nn.Module) -> None: super().__init__() self.module = module def unwrap(self, unwrap_peft: bool = True): """ Unwrap the model to return the original model for checkpoint saving/loading. """ if isinstance(self.module, ModelWrapper): model = self.module.unwrap() else: model = self.module if unwrap_peft and isinstance(model, PeftModel): model = PeftUnwrapMixin(model) return model def forward(self, *args, **kwargs): return self.module(*args, **kwargs) class AMPModelMixin: """This mixin class defines the interface for AMP training.""" def update_master_params(self): """ Update the master parameters for AMP training. """
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/interface/optimizer.py
colossalai/interface/optimizer.py
from typing import Dict, Optional, Union import torch import torch.distributed as dist import torch.nn as nn from torch import Tensor from torch.optim import Optimizer class OptimizerWrapper: """ A standard interface for optimizers wrapped by the Booster. Args: optim (Optimizer): The optimizer to be wrapped. """ def __init__(self, optim: Optimizer): self.optim = optim @property def parameters(self): params = [] for group in self.param_groups: params += group["params"] return params @property def param_groups(self): return self.optim.param_groups @property def defaults(self): return self.optim.defaults def add_param_group(self, *args, **kwargs): return self.optim.add_param_group(*args, **kwargs) def step(self, *args, **kwargs): """ Performs a single optimization step. """ return self.optim.step(*args, **kwargs) def zero_grad(self, *args, **kwargs): """ Clears the gradients of all optimized `torch.Tensor`. """ self.optim.zero_grad(*args, **kwargs) def backward(self, loss: Tensor, inputs=None, retain_graph=False, **kwargs): """ Performs a backward pass on the loss. """ loss.backward(inputs=inputs, retain_graph=retain_graph, **kwargs) def backward_by_grad(self, tensor: Tensor, grad: Tensor, inputs: Tensor = None, retain_graph: bool = False): """ Performs a backward pass for dx or dw, for dx, we only calculate dx = w*dy here for dw, we only calculate dw = x*dy here Args: tensor (Tensor): y or loss of current chunk; grad_tensors (Tensor): dy of current chunk; input_obj (Tensor): for dx, input_obj is x of current chunk; for dw, input_obj is w of current chunk; retain_graph (bool): default to be True, we retain graph in backward_b """ torch.autograd.backward( tensors=tensor, grad_tensors=grad, inputs=inputs, retain_graph=retain_graph, ) def state_dict(self): """ Returns the optimizer state. """ return self.optim.state_dict() def load_state_dict(self, *args, **kwargs): """ Loads the optimizer state. """ self.optim.load_state_dict(*args, **kwargs) def clip_grad_by_value(self, clip_value: float, *args, **kwargs) -> None: """ Clips gradient of an iterable of parameters at specified min and max values. Args: clip_value (float or int): maximum allowed value of the gradients. Gradients are clipped in the range Note: In PyTorch Torch 2.0 and above, you can pass in foreach=True as kwargs to clip_grad_value_ to use the faster implementation. Please refer to the PyTorch documentation for more details. """ nn.utils.clip_grad_value_(self.parameters, clip_value, *args, **kwargs) def clip_grad_by_norm( self, max_norm: Union[float, int], norm_type: Union[float, int] = 2.0, error_if_nonfinite: bool = False, *args, **kwargs, ) -> Tensor: """ Clips gradient norm of an iterable of parameters. Args: max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. error_if_nonfinite (bool): if True, an error is raised if the total norm is non-finite. Default: False Note: In PyTorch Torch 2.0 and above, you can pass in foreach=True as kwargs to clip_grad_norm_ to use the faster implementation. Please refer to the PyTorch documentation for more details. """ norm = nn.utils.clip_grad_norm_(self.parameters, max_norm, norm_type, error_if_nonfinite, *args, **kwargs) return norm def scale_loss(self, loss: Tensor): """ Scales the loss for mixed precision training. Note: Only available for optimizers with mixed precision training. Args: loss (Tensor): The loss to be scaled. """ raise NotImplementedError( "The method scale_loss is only available for optimizers with mixed precision training" ) def unscale_grad(self): """ Unscale the gradients for mixed precision training. Note: Only available for optimizers with mixed precision training. """ raise NotImplementedError( "The method unscale_grad is only available for optimizers with mixed precision training" ) def unwrap(self): """ Unwrap the optimizer for checkpoint saving/loading. """ return self.optim def get_grad_norm(self, norm_type: Union[float, int] = 2.0, **kwargs) -> Optional[float]: """ Returns the gradient norm of an iterable of parameters. This method should be called after optimizer.step(). Args: norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Optional[float]: Total norm of the gradients (viewed as a single vector). If there are no valid gradients, returns None. """ raise NotImplementedError("The method get_grad_norm is not implemented yet.") class DistributedOptim(Optimizer): def setup_distributed( self, tp_group: Optional[dist.ProcessGroup] = None, dp_group: Optional[dist.ProcessGroup] = None, shard_to_working_param: Optional[Dict] = {}, padding_map: Optional[Dict] = None, is_zero: Optional[bool] = False, ): """Assign process groups for TP and ZeRO 2. Arguments: tp_group (dist.ProcessGroup): Tensor Parallel process group dp_group (dist.ProcessGroup): ZeRO stage 2 process group shard_to_working_param (Dict): ZeRO stage 2 feeds the optimizer a sharded param view to match grad shape. This maps from id(view) to model params used in forward & backward. padding_map (Dict): Per-param padding from ZeRO stage 2 is_zero (bool): Whether to use ZeRO stage 2. """ raise NotImplementedError("setup_distributed for TP/DP isn't supported by this optimizer yet!")
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/interface/__init__.py
colossalai/interface/__init__.py
from .model import AMPModelMixin, ModelWrapper from .optimizer import OptimizerWrapper __all__ = ["OptimizerWrapper", "ModelWrapper", "AMPModelMixin"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/logging/logger.py
colossalai/logging/logger.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import inspect import logging from pathlib import Path from typing import List, Union import torch.distributed as dist class DistributedLogger: """This is a distributed event logger class essentially based on :class:`logging`. Args: name (str): The name of the logger. Note: The parallel_mode used in ``info``, ``warning``, ``debug`` and ``error`` should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ __instances = dict() @staticmethod def get_instance(name: str): """Get the unique single logger instance based on name. Args: name (str): The name of the logger. Returns: DistributedLogger: A DistributedLogger object """ if name in DistributedLogger.__instances: return DistributedLogger.__instances[name] else: logger = DistributedLogger(name=name) return logger def __init__(self, name): if name in DistributedLogger.__instances: raise Exception( "Logger with the same name has been created, you should use colossalai.logging.get_dist_logger" ) else: handler = None formatter = logging.Formatter("colossalai - %(name)s - %(levelname)s: %(message)s") try: from rich.logging import RichHandler handler = RichHandler(show_path=False, markup=True, rich_tracebacks=True) handler.setFormatter(formatter) except ImportError: handler = logging.StreamHandler() handler.setFormatter(formatter) self._name = name self._logger = logging.getLogger(name) self._logger.setLevel(logging.INFO) if handler is not None: self._logger.addHandler(handler) self._logger.propagate = False DistributedLogger.__instances[name] = self @property def rank(self): return dist.get_rank() if dist.is_initialized() else 0 @staticmethod def __get_call_info(): stack = inspect.stack() # stack[1] gives previous function ('info' in our case) # stack[2] gives before previous function and so on fn = stack[2][1] ln = stack[2][2] func = stack[2][3] return fn, ln, func @staticmethod def _check_valid_logging_level(level: str): assert level in ["INFO", "DEBUG", "WARNING", "ERROR"], "found invalid logging level" def set_level(self, level: str) -> None: """Set the logging level Args: level (str): Can only be INFO, DEBUG, WARNING and ERROR. """ self._check_valid_logging_level(level) self._logger.setLevel(getattr(logging, level)) def log_to_file(self, path: Union[str, Path], mode: str = "a", level: str = "INFO", suffix: str = None) -> None: """Save the logs to file Args: path (A string or pathlib.Path object): The file to save the log. mode (str): The mode to write log into the file. level (str): Can only be INFO, DEBUG, WARNING and ERROR. suffix (str): The suffix string of log's name. """ assert isinstance(path, (str, Path)), f"expected argument path to be type str or Path, but got {type(path)}" self._check_valid_logging_level(level) if isinstance(path, str): path = Path(path) # create log directory path.mkdir(parents=True, exist_ok=True) if suffix is not None: log_file_name = f"rank_{self.rank}_{suffix}.log" else: log_file_name = f"rank_{self.rank}.log" path = path.joinpath(log_file_name) # add file handler file_handler = logging.FileHandler(path, mode) file_handler.setLevel(getattr(logging, level)) formatter = logging.Formatter("colossalai - %(name)s - %(levelname)s: %(message)s") file_handler.setFormatter(formatter) self._logger.addHandler(file_handler) def _log(self, level, message: str, ranks: List[int] = None) -> None: if ranks is None: getattr(self._logger, level)(message) else: if self.rank in ranks: getattr(self._logger, level)(message) def info(self, message: str, ranks: List[int] = None) -> None: """Log an info message. Args: message (str): The message to be logged. ranks (List[int]): List of parallel ranks. """ message_prefix = "{}:{} {}".format(*self.__get_call_info()) self._log("info", message_prefix, ranks) self._log("info", message, ranks) def warning(self, message: str, ranks: List[int] = None) -> None: """Log a warning message. Args: message (str): The message to be logged. ranks (List[int]): List of parallel ranks. """ message_prefix = "{}:{} {}".format(*self.__get_call_info()) self._log("warning", message_prefix, ranks) self._log("warning", message, ranks) def debug(self, message: str, ranks: List[int] = None) -> None: """Log a debug message. Args: message (str): The message to be logged. ranks (List[int]): List of parallel ranks. """ message_prefix = "{}:{} {}".format(*self.__get_call_info()) self._log("debug", message_prefix, ranks) self._log("debug", message, ranks) def error(self, message: str, ranks: List[int] = None) -> None: """Log an error message. Args: message (str): The message to be logged. ranks (List[int]): List of parallel ranks. """ message_prefix = "{}:{} {}".format(*self.__get_call_info()) self._log("error", message_prefix, ranks) self._log("error", message, ranks)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/logging/__init__.py
colossalai/logging/__init__.py
import logging from typing import List, Optional from .logger import DistributedLogger __all__ = ["get_dist_logger", "DistributedLogger", "disable_existing_loggers"] def get_dist_logger(name: str = "colossalai") -> DistributedLogger: """Get logger instance based on name. The DistributedLogger will create singleton instances, which means that only one logger instance is created per name. Args: name (str): name of the logger, name must be unique Returns: :class:`colossalai.logging.DistributedLogger`: A distributed logger singleton instance. """ return DistributedLogger.get_instance(name=name) def disable_existing_loggers(include: Optional[List[str]] = None, exclude: List[str] = ["colossalai"]) -> None: """Set the level of existing loggers to `WARNING`. By default, it will "disable" all existing loggers except the logger named "colossalai". Args: include (Optional[List[str]], optional): Loggers whose name in this list will be disabled. If set to `None`, `exclude` argument will be used. Defaults to None. exclude (List[str], optional): Loggers whose name not in this list will be disabled. This argument will be used only when `include` is None. Defaults to ['colossalai']. """ if include is None: filter_func = lambda name: name not in exclude else: filter_func = lambda name: name in include for log_name in logging.Logger.manager.loggerDict.keys(): if filter_func(log_name): logging.getLogger(log_name).setLevel(logging.WARNING)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/lazy/lazy_init.py
colossalai/lazy/lazy_init.py
from types import MethodType from typing import Callable, Optional, Union import torch import torch.nn as nn from packaging import version from torch import Tensor from torch.nn import Parameter from torch.utils._pytree import tree_map from colossalai.logging import get_dist_logger from .construction import ConstructorManager from .pretrained import PretrainedManager import colossalai._analyzer._subclasses._meta_registration # noqa # reference: https://pytorch.org/cppdocs/notes/tensor_creation.html _NORMAL_FACTORY = [ "arange", "full", "empty", "linspace", "logspace", "ones", "rand", "randn", "randint", "randperm", "zeros", "tensor", ] # factory function that does not support meta tensor backend _NO_META_FACTORY = [ "eye", ] _EARLY_MATERIALIZED_OPS = ["__getitem__", "split"] # If your intent is to change the metadata of a Tensor (such as sizes / strides / storage / storage_offset) # without autograd tracking the change, remove the .data / .detach() call and wrap the change in a `with torch.no_grad():` block. # These ops cannot be unwrapped using .data _CHANGE_META_OPS = ["_cudnn_rnn_flatten_weight", "requires_grad_", "__get__", "__set__", "numel", "size", "dim"] # These ops is not related to tensor value and should not be rerun _NO_RERUN_OPS = ["__get__", "numel", "size", "dim"] _LEGACY_TENSOR_CONSTRUCTOR = { "FloatTensor": torch.float, "DoubleTensor": torch.double, "HalfTensor": torch.half, "BFloat16Tensor": torch.bfloat16, "ByteTensor": torch.uint8, "CharTensor": torch.int8, "ShortTensor": torch.short, "IntTensor": torch.int, "LongTensor": torch.long, "BoolTensor": torch.bool, } # These ops have at least one lazy tensor argument and maybe a scalar argument # scalar value should be converted to meta tensor # this is a hack for torch 2.0 _EXPAND_SCALAR_OPS = [ "where", "clamp", "clamp_min", "clamp_max", "clamp_", "clamp_min_", "clamp_max_", ] _old_tensor_factory = torch.tensor _EMPTY_DATA = torch.empty(0) class _MyTensor(Tensor): """This class is only for correctness verification.""" _pre_op_fn: Callable[["LazyTensor"], None] = lambda *args: None default_device: Optional[torch.device] = None def __new__(cls, func, *args, concrete_data=None, **kwargs) -> "_MyTensor": cls._pre_op_fn() if concrete_data is not None: # uniform api as LazyTensor data = concrete_data else: kwargs["device"] = cls.default_device data = func(*args, **kwargs) return Tensor._make_subclass(cls, data, require_grad=data.requires_grad) @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): cls._pre_op_fn() return super().__torch_function__(func, types, args, kwargs) def _data_tolist(tensor: torch.Tensor) -> list: """tolist() method is not allowed for a subclass of tensor. Tensor.data returns a Tensor.""" return tensor.data.tolist() def _convert_cls(tensor: "LazyTensor", target: torch.Tensor, requires_grad=None) -> torch.Tensor: """Convert a lazy tensor's class to target's class, with target's data. The reason why we change the class of a lazy tensor in-place is that this can easily handle shared modules/parameters, which is common in huggingface models. If we create a new tensor and update the module by ``setattr(module, name, param)``, the shared parameters will not be updated. And we have to track all shared parameters and update them manually. Args: tensor (LazyTensor): the LazyTensor to be converted target (torch.Tensor): target tensor Returns: torch.Tensor: the converted tensor """ requires_grad = target.requires_grad if requires_grad is None else requires_grad cls_to_become = Parameter if isinstance(tensor, Parameter) else torch.Tensor tensor.__class__ = cls_to_become if cls_to_become is Parameter: # to fit UninitializedParameter delattr(tensor, "_is_param") tensor.data = target tensor.requires_grad = requires_grad # subclass of torch.Tensor does not have tolist() method # overwrite this method after materialization or distribution tensor.tolist = MethodType(_data_tolist, tensor) return tensor class LazyTensor(torch.Tensor): """A naive implementation of LazyTensor (https://arxiv.org/pdf/2102.13267.pdf). Usage: 1. Use ``LazyTensor`` instead of ``torch.Tensor``. >>> x = LazyTensor(torch.zeros, 2, 3) >>> x += 1 >>> y = x * x >>> y = y.cuda().half() >>> y[0, 0] = 0 >>> y = y.materialize() # materialize the tensor >>> print(y) tensor([[0., 1., 1.], [1., 1., 1.]], device='cuda:0', dtype=torch.float16) Warnings: 1. Cases that ``LazyTensor`` can't deal with. >>> x = LazyTensor(torch.ones, 2, 3) >>> x[0, 0] = -x[0, 0] # this will cause infinite recursion >>> y = x.clone() >>> x.add_(1) # modifying origin tensor after cloning leads to wrong materialization >>> z = x.tolist() >>> x.zeros_() # modifying origin tensor after cloning tolist is not allowed >>> nn.utils.weight_norm(self.conv, name="weight", dim=2) # applying weight norm on a lazy tensor is not allowed 2. Cases that ``LazyTensor`` becomes eager (early materialization). >>> b = a[:, 2:] # get a slice of a lazy tensor triggers early materialization >>> chunks = a.split(3) # this also triggers early materialization >>> x.data = torch.rand(2, 3) # directly setting data of a lazy tensor triggers early materialization """ _repr = True _meta_data: Optional[torch.Tensor] = None # shape, dtype, device _pre_op_fn: Callable[["LazyTensor"], None] = lambda *args: None default_device: Optional[torch.device] = None _device: torch.device # fake device of mate tensor @staticmethod def __new__(cls, func, *args, meta_data=None, concrete_data=None, **kwargs): # tips for torch 2.0: # torch 2.0 disables torch dispatch for subclass of tensor # MetaTensor is cannot be used # Now lazy tensor contains device injection and meta tensor if concrete_data is not None: # some ops don't support meta backend and should have concrete data elem = concrete_data else: if meta_data is None: with ConstructorManager.disable(): # to disable create lazy tensor in inner ops, this is a hack for torch 2.0 meta_data = func(*args, **{**kwargs, "device": "meta"}) elem = meta_data # As a meta tensor cannot be modified __class__ to torch.Tensor, we should use an empty real tensor here r = torch.Tensor._make_subclass(cls, _EMPTY_DATA, require_grad=elem.requires_grad) r._meta_data = meta_data return r def __init__(self, func, *args, meta_data=None, concrete_data=None, **kwargs): self._device = torch.device(kwargs.get("device", None) or "cpu") if func.__name__ in _NORMAL_FACTORY: kwargs = {**kwargs, "device": LazyTensor.default_device} self._factory_method = (func, args, kwargs) # (func, args, kwargs) self._op_buffer = [] # (func, args, kwargs, replace) self._materialized_data: Optional[torch.Tensor] = concrete_data # materialized data @property def device(self) -> torch.device: return self._materialized_data.device if self._materialized_data is not None else self._device def __repr__(self): return f"LazyTensor(..., size={tuple(self.shape)}, device='{self.device}', dtype={self.dtype})" def materialize(self) -> torch.Tensor: """Materialize the ``LazyTensor`` to ``torch.Tensor`` by modifying __class__ (inplace). Returns: torch.Tensor: The materialized tensor (self). """ requires_grad = self.requires_grad target = self._materialize_data() self.clean() return _convert_cls(self, target, requires_grad=requires_grad) def clean(self) -> None: """Clean all stored operations, meta data and materialized data, which prevents memory leaking. This should be called after all tensors are materialized.""" delattr(self, "_factory_method") delattr(self, "_op_buffer") delattr(self, "_materialized_data") delattr(self, "_meta_data") @staticmethod def _replace_with_materialized(x): if isinstance(x, LazyTensor): return x._materialize_data() return x def _materialize_data(self) -> torch.Tensor: # self._materialized_data should be generated after the first call of this function if self._materialized_data is None: # apply factory method func, args, kwargs = self._factory_method # apply cached sequence self._pre_op_fn() init_val = func( *tree_map(self._replace_with_materialized, args), **tree_map(self._replace_with_materialized, kwargs) ) self._materialized_data = self._rerun_ops(init_val) return self._materialized_data def _rerun_ops(self, target=None) -> torch.Tensor: """Do lazy execution by rerunning all (stored) related operations. Args: target (torc.Tensor, optional): Intial value of the target tensor (self). Defaults to None. """ def replace(x): if x is self: return target elif isinstance(x, LazyTensor): return x._materialize_data() return x packed = None for func, args, kwargs in self._op_buffer: if func == torch.Tensor.requires_grad_: packed = func, args, kwargs # requires grad should be set at last else: self._pre_op_fn() o = func(*tree_map(replace, args), **tree_map(replace, kwargs)) target = o if isinstance(o, torch.Tensor) else target # if func returns non-Tensor, discard the value # super-dainiu: set requires_grad after all inplace-ops are done if packed is not None: func, args, kwargs = packed func(*tree_map(replace, args), **tree_map(replace, kwargs)) return target # cache everything with __torch_function__ @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): if kwargs is None: kwargs = {} if func.__name__ in _EARLY_MATERIALIZED_OPS: # These OPs cannot be lazy and related tensors should be early materialized tree_map(cls._replace_with_materialized, args) tree_map(cls._replace_with_materialized, kwargs) is_inplace: bool = ( func.__name__.endswith("_") and not (func.__name__.endswith("__")) or func.__name__ in ("__setitem__", "__set__") ) is_change_meta_op: bool = func.__name__ in _CHANGE_META_OPS if isinstance(func, torch._C.ScriptMethod): # FIXME(ver217): torch script functions are not verified target = None def unwrap(x): if isinstance(x, LazyTensor): return x._meta_data return x target: LazyTensor = args[0].clone() target._op_buffer.append((func, args, kwargs)) target._meta_data = getattr(target._meta_data, func.name)( *tree_map(unwrap, args[1:]), **tree_map(unwrap, kwargs) ) return target else: meta_to_lazy = {} def unwrap(x): if isinstance(x, LazyTensor): if x._materialized_data is not None: # for early materialized tensor, use its materialized data directly return x._materialized_data if is_change_meta_op else x._materialized_data.data t = x if is_inplace else x.clone() if func.__name__ not in _NO_RERUN_OPS: t._op_buffer.append((func, args, kwargs)) meta = x._meta_data if is_change_meta_op else x._meta_data.data meta_to_lazy[meta] = t return meta elif ( version.parse(torch.__version__) >= version.parse("2.0.0") and func.__name__ in _EXPAND_SCALAR_OPS and not isinstance(x, torch.Tensor) ): return _old_tensor_factory(x, device="meta") return x def wrap(y, i=None): if isinstance(y, torch.Tensor): if y.is_meta: if y in meta_to_lazy: # inplace op, just return origin lazy tensor return meta_to_lazy[y] else: # out of place op, create new lazy tensor fn = lambda *a, **kw: func(*a, **kw) if i is None else func(*a, **kw)[i] fn.__name__ = func.__name__ lazy_y = LazyTensor(fn, *args, meta_data=y, **kwargs) return lazy_y else: # for early materialized tensor return LazyTensor(lambda: None, concrete_data=y) return y cls._pre_op_fn() with ConstructorManager.disable(): # to disable create lazy tensor in inner ops, this is a hack for torch 2.0 o = func(*tree_map(unwrap, args), **tree_map(unwrap, kwargs)) if isinstance(o, (tuple, list)): return type(o)(wrap(y, i=i) for i, y in enumerate(o)) return wrap(o) def to(self, *args, **kwargs) -> torch.Tensor: if self._materialized_data is not None: return LazyTensor(lambda: None, concrete_data=self._materialized_data.to(*args, **kwargs)) device = None def replace(x): nonlocal device if isinstance(x, (str, int, torch.device)) and not isinstance(x, bool): device = x return torch.device("meta") return x meta_data = self._meta_data.to(*tree_map(replace, args), **tree_map(replace, kwargs)) if meta_data is self._meta_data and device == self.device: return self def factory_fn(t: torch.Tensor, **kw): return t.to(*args, **kwargs) return LazyTensor(factory_fn, self, meta_data=meta_data, device=device) def cpu(self, memory_format: torch.memory_format = torch.preserve_format): return self.to(device=torch.device("cpu"), memory_format=memory_format) def cuda(self, device=None, non_blocking=False, memory_format: torch.memory_format = torch.preserve_format): device = torch.device(device or "cuda") return self.to(device=device, non_blocking=non_blocking, memory_format=memory_format) def clone(self) -> "LazyTensor": def factory_fn(t: torch.Tensor, **kw): # if self is materialized, return self return t.clone() target = LazyTensor(factory_fn, self, meta_data=self._meta_data) return target def detach(self) -> Tensor: return self def __deepcopy__(self, memo): if not self.is_leaf: raise RuntimeError( "Only Tensors created explicitly by the user " "(graph leaves) support the deepcopy protocol at the moment" ) if id(self) in memo: return memo[id(self)] def factory_fn(t: torch.Tensor, **kw): # if self is materialized, return self return _copy_tensor(t, t.requires_grad) if self._materialized_data is not None: # self is early materialized copied = _copy_tensor(self._materialized_data, self.requires_grad) target = LazyTensor(lambda: None, concrete_data=copied) else: target = LazyTensor(factory_fn, self, meta_data=self._meta_data) if isinstance(self, Parameter): # hack isinstance check of parameter target._is_param = True memo[id(self)] = target return target @property def data(self): return self @data.setter def data(self, other: "LazyTensor"): """This is sightly different from oringinal `data` setter. E.g.: >>> a = torch.randn(3, 3) # a is a Tensor >>> b = torch.rand(2, 2) >>> a.data = b >>> b.add_(1) # this will affect a >>> x = torch.randn(3, 3) # x is a LazyTensor >>> y = torch.rand(2, 2) # y is a LazyTensor >>> x.data = y >>> y.add_(1) # this will not affect x """ if other is self: return def replace(x): if x is other: return self return x for func, args, kwargs in [other._factory_method, *other._op_buffer]: self._op_buffer.append((func, tree_map(replace, args), tree_map(replace, kwargs))) def tolist(self) -> list: # Though self.__class__ is modified to torch.Tensor, in C++ side, it is still a subclass of torch.Tensor # And subclass of torch.Tensor does not have tolist() method t = self._materialize_data() return t.tolist() def __hash__(self): return id(self) def __rpow__(self, other): dtype = torch.result_type(self, other) return torch.tensor(other, dtype=dtype, device=self.device) ** self class LazyInitContext: """Context manager for lazy initialization. Enables initializing the model without allocating real memory. Args: tensor_cls (Union[_MyTensor, LazyTensor], optional): This is only for test. Defaults to LazyTensor. default_device (Optional[Union[torch.device, str, int]], optional): Defalt device for initialization. If it's cuda, initilization will be accelerated, but cuda memory will be allocated. By default, it's cpu. Defaults to None. """ _replaced: bool = False def __init__( self, tensor_cls: Union[_MyTensor, LazyTensor] = LazyTensor, default_device: Optional[Union[torch.device, str, int]] = None, ): assert tensor_cls is LazyTensor or tensor_cls is _MyTensor self.tensor_cls = tensor_cls self.old_default_device = LazyTensor.default_device self.default_device = default_device def __enter__(self): if LazyInitContext._replaced: raise RuntimeError(f"LazyInitContext is not reentrant") LazyInitContext._replaced = True self.old_default_device = self.tensor_cls.default_device self.tensor_cls.default_device = self.default_device def wrap_factory_method(target): # factory functions (eg. torch.empty()) def wrapper(*args, **kwargs): return self.tensor_cls(target, *args, **kwargs) return wrapper, target def wrap_factory_like_method(orig_target, target): # factory_like functions (eg. torch.empty_like()) def wrapper(*args, **kwargs): orig_t = args[0] device = kwargs.pop("device", orig_t.device) dtype = kwargs.pop("dtype", orig_t.dtype) return self.tensor_cls(orig_target, *orig_t.shape, *args[1:], device=device, dtype=dtype, **kwargs) return wrapper, target def wrap_legacy_constructor(target, dtype): # legacy constructor (e.g. torch.LongTensor()) def wrapper(*args, **kwargs): if len(args) == 1 and isinstance(args[0], torch.Tensor): # (Tensor other) return args[0] elif len(args) == 1: # (object data, *, torch.device device) kwargs = {**kwargs, "dtype": dtype} replaced, orig = self.overrides["tensor"] return replaced(*args, **kwargs) elif _is_int_tuple(args): # (tuple of ints size, *, torch.device device) kwargs = {**kwargs, "dtype": dtype} replaced, orig = self.overrides["empty"] return replaced(*args, **kwargs) else: raise TypeError( f"new() received an invalid combination of arguments - got {tuple(type(x) for x in args)}, but expected one of:\n * (Tensor other)\n * (tuple of ints size, *, torch.device device)\n * (object data, *, torch.device device)" ) return wrapper, target def wrap_no_meta_factory(target): # factory functions which don't support meta tensor backend def wrapper(*args, **kwargs): tensor = target(*args, **kwargs) return self.tensor_cls(lambda: None, concrete_data=tensor) return wrapper, target overrides = { target: wrap_factory_method(getattr(torch, target)) for target in _NORMAL_FACTORY if callable(getattr(torch, target, None)) } overrides.update( { target + "_like": wrap_factory_like_method(getattr(torch, target), getattr(torch, target + "_like")) for target in _NORMAL_FACTORY if callable(getattr(torch, target + "_like", None)) } ) overrides.update( { target: wrap_legacy_constructor(getattr(torch, target), dtype) for target, dtype in _LEGACY_TENSOR_CONSTRUCTOR.items() if callable(getattr(torch, target, None)) } ) overrides.update( { target: wrap_no_meta_factory(getattr(torch, target)) for target in _NO_META_FACTORY if callable(getattr(torch, target, None)) } ) ConstructorManager.apply(overrides) PretrainedManager.inject() def __exit__(self, exc_type, exc_val, exc_tb): self.tensor_cls.default_device = self.old_default_device LazyInitContext._replaced = False ConstructorManager.clear() PretrainedManager.recover() @staticmethod def materialize(module: nn.Module, verbose: bool = False) -> nn.Module: """Initialize all ``Parameter`` from ``LazyTensor``. This function will modify the module in-place. Args: module (nn.Module): Target ``nn.Module`` verbose (bool): Whether to print lazy initialization rate. Defaults to False. """ def apply_fn(name: str, p: LazyTensor): p.materialize() return _apply_to_lazy_module(module, apply_fn, verbose) def _apply_to_lazy_module( module: nn.Module, apply_fn: Callable[[str, torch.Tensor], None], verbose: bool = False ) -> nn.Module: if verbose: # verbose info param_cnt = 0 param_lazy_cnt = 0 buf_cnt = 0 buf_lazy_cnt = 0 total_numel = 0 non_lazy_numel = 0 for name, p in module.named_parameters(): if verbose: param_cnt += 1 total_numel += p.numel() if getattr(p, "_materialized_data", False) is None: # if no _materialized_data attr, the tensor is not lazy param_lazy_cnt += 1 else: non_lazy_numel += p.numel() if isinstance(p, LazyTensor): apply_fn(name, p) for name, buf in module.named_buffers(): if verbose: buf_cnt += 1 total_numel += buf.numel() if getattr(buf, "_materialized_data", False) is None: # if no _materialized_data attr, the tensor is not lazy buf_lazy_cnt += 1 else: non_lazy_numel += buf.numel() if isinstance(buf, LazyTensor): apply_fn(name, buf) if verbose: non_lazy_numel_ratio = non_lazy_numel / total_numel * 100 if non_lazy_numel != 0 else 0 logger = get_dist_logger() logger.info(f"Param lazy rate: {param_lazy_cnt}/{param_cnt}", ranks=[0]) logger.info(f"Buffer lazy rate: {buf_lazy_cnt}/{buf_cnt}", ranks=[0]) logger.info( f"Non lazy numel: {non_lazy_numel} ({non_lazy_numel/1024**2:.3f} M), ratio: {non_lazy_numel_ratio}%", ranks=[0], ) return module def _is_int_tuple(args) -> bool: if not isinstance(args, tuple): return False for x in args: if not isinstance(x, int): return False return True def _copy_tensor(tensor: Tensor, requires_grad: bool) -> Tensor: copied = tensor.data.clone() copied.requires_grad = requires_grad return copied
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/lazy/pretrained.py
colossalai/lazy/pretrained.py
import copy import os from typing import Callable, Optional, Union import torch from torch.nn import Module from colossalai.interface import pretrained as pretrained_interface class PretrainedManager: old_from_pretrained: Optional[Callable] = None @staticmethod def inject() -> None: try: from transformers.modeling_utils import PreTrainedModel except ImportError: return # recover bound method to plain function PretrainedManager.old_from_pretrained = PreTrainedModel.from_pretrained.__func__ PreTrainedModel.from_pretrained = new_from_pretrained @staticmethod def recover() -> None: try: from transformers.modeling_utils import PreTrainedModel except ImportError: return # convert plain function to class method PreTrainedModel.from_pretrained = classmethod(PretrainedManager.old_from_pretrained) PretrainedManager.old_from_pretrained = None @classmethod def new_from_pretrained( cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs ) -> Module: from transformers import GenerationConfig from transformers.configuration_utils import PretrainedConfig from transformers.modeling_utils import ( ContextManagers, _add_variant, cached_file, download_url, has_file, is_offline_mode, is_remote_url, no_init_weights, ) from transformers.utils import ( SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, is_safetensors_available, logging, ) logger = logging.get_logger(__name__) config = kwargs.pop("config", None) cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) use_auth_token = kwargs.pop("use_auth_token", None) revision = kwargs.pop("revision", None) _ = kwargs.pop("mirror", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) kwargs.pop("_fast_init", True) torch_dtype = kwargs.pop("torch_dtype", None) subfolder = kwargs.pop("subfolder", "") commit_hash = kwargs.pop("_commit_hash", None) variant = kwargs.pop("variant", None) kwargs.pop("state_dict", None) kwargs.pop("from_tf", False) kwargs.pop("from_flax", False) kwargs.pop("output_loading_info", False) kwargs.pop("trust_remote_code", None) kwargs.pop("low_cpu_mem_usage", None) kwargs.pop("device_map", None) kwargs.pop("max_memory", None) kwargs.pop("offload_folder", None) kwargs.pop("offload_state_dict", False) kwargs.pop("load_in_8bit", False) kwargs.pop("load_in_4bit", False) kwargs.pop("quantization_config", None) kwargs.pop("adapter_kwargs", {}) kwargs.pop("adapter_name", "default") kwargs.pop("use_flash_attention_2", False) use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False) if len(kwargs) > 0: logger.warning(f"Below kwargs may be ignored: {list(kwargs.keys())}") from_pt = True user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True # Load config if we don't provide a configuration if not isinstance(config, PretrainedConfig): config_path = config if config is not None else pretrained_model_name_or_path config, model_kwargs = cls.config_class.from_pretrained( config_path, cache_dir=cache_dir, return_unused_kwargs=True, force_download=force_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs, ) else: config = copy.deepcopy(config) kwarg_attn_imp = kwargs.pop("attn_implementation", None) if kwarg_attn_imp is not None and config._attn_implementation != kwarg_attn_imp: config._attn_implementation = kwarg_attn_imp model_kwargs = kwargs if commit_hash is None: commit_hash = getattr(config, "_commit_hash", None) # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the # index of the files. if pretrained_model_name_or_path is not None: pretrained_model_name_or_path = str(pretrained_model_name_or_path) is_local = os.path.isdir(pretrained_model_name_or_path) if is_local: if use_safetensors is not False and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant)) ): # Load from a safetensors checkpoint archive_file = os.path.join( pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant) ) elif use_safetensors is not False and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)) ): # Load from a sharded safetensors checkpoint archive_file = os.path.join( pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant) ) elif os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant)) ): # Load from a PyTorch checkpoint archive_file = os.path.join( pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant) ) elif os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant)) ): # Load from a sharded PyTorch checkpoint archive_file = os.path.join( pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant) ) else: raise EnvironmentError( f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory" f" {pretrained_model_name_or_path}." ) elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): archive_file = pretrained_model_name_or_path is_local = True elif is_remote_url(pretrained_model_name_or_path): filename = pretrained_model_name_or_path resolved_archive_file = download_url(pretrained_model_name_or_path) else: # set correct filename if use_safetensors is not False: filename = _add_variant(SAFE_WEIGHTS_NAME, variant) else: filename = _add_variant(WEIGHTS_NAME, variant) try: # Load from URL or cache if already cached cached_file_kwargs = { "cache_dir": cache_dir, "force_download": force_download, "proxies": proxies, "local_files_only": local_files_only, "use_auth_token": use_auth_token, "user_agent": user_agent, "revision": revision, "subfolder": subfolder, "_raise_exceptions_for_missing_entries": False, "_commit_hash": commit_hash, } resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None # result when internet is up, the repo and revision exist, but the file does not. if resolved_archive_file is None and filename == _add_variant(SAFE_WEIGHTS_NAME, variant): # Maybe the checkpoint is sharded, we try to grab the index name in this case. resolved_archive_file = cached_file( pretrained_model_name_or_path, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant), **cached_file_kwargs, ) if resolved_archive_file is not None: pass elif use_safetensors: raise EnvironmentError( f" {_add_variant(SAFE_WEIGHTS_NAME, variant)} or {_add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)} and thus cannot be loaded with `safetensors`. Please make sure that the model has been saved with `safe_serialization=True` or do not set `use_safetensors=True`." ) else: # This repo has no safetensors file of any kind, we switch to PyTorch. filename = _add_variant(WEIGHTS_NAME, variant) resolved_archive_file = cached_file( pretrained_model_name_or_path, filename, **cached_file_kwargs ) if resolved_archive_file is None and filename == _add_variant(WEIGHTS_NAME, variant): # Maybe the checkpoint is sharded, we try to grab the index name in this case. resolved_archive_file = cached_file( pretrained_model_name_or_path, _add_variant(WEIGHTS_INDEX_NAME, variant), **cached_file_kwargs, ) if resolved_archive_file is not None: pass if resolved_archive_file is None: # Otherwise, maybe there is a TF or Flax model file. We try those to give a helpful error # message. has_file_kwargs = { "revision": revision, "proxies": proxies, "use_auth_token": use_auth_token, } if variant is not None and has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs): raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named" f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file without the variant" f" {variant}. Use `variant=None` to load this model from those weights." ) else: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named" f" {_add_variant(WEIGHTS_NAME, variant)}" ) except EnvironmentError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted # to the original exception. raise except Exception: # For any other exception, we throw a generic error. raise EnvironmentError( f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it" " from 'https://huggingface.co/models', make sure you don't have a local directory with the" f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" f" directory containing a file named {_add_variant(WEIGHTS_NAME, variant)}." ) if is_local: logger.info(f"loading weights file {archive_file}") resolved_archive_file = archive_file else: logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}") else: resolved_archive_file = None if from_pt: # set dtype to instantiate the model under: # 1. If torch_dtype is not None, we use that dtype dtype_orig = None if torch_dtype is not None: if not isinstance(torch_dtype, torch.dtype): raise ValueError(f"`torch_dtype` can be either `torch.dtype` or `None`, but received {torch_dtype}") dtype_orig = cls._set_default_torch_dtype(torch_dtype) config.name_or_path = pretrained_model_name_or_path # Instantiate model. init_contexts = [no_init_weights()] with ContextManagers(init_contexts): model = cls(config, *model_args, **model_kwargs) if from_pt: # restore default dtype if dtype_orig is not None: torch.set_default_dtype(dtype_orig) # make sure token embedding weights are still tied if needed model.tie_weights() # Set model in evaluation mode to deactivate DropOut modules by default model.eval() # If it is a model with generation capabilities, attempt to load the generation config if model.can_generate(): try: model.generation_config = GenerationConfig.from_pretrained( pretrained_model_name_or_path, cache_dir=cache_dir, force_download=force_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, subfolder=subfolder, _from_auto=from_auto_class, _from_pipeline=from_pipeline, **kwargs, ) except (OSError, TypeError): logger.info("Generation config file not found, using a generation config created from the model config.") # set pretrained path if resolved_archive_file: pretrained_interface.set_pretrained_path(model, resolved_archive_file) return model
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/lazy/construction.py
colossalai/lazy/construction.py
from contextlib import contextmanager from typing import Callable, Dict, Tuple import torch __all__ = [ "_LEGACY_TENSOR_CONSTRUCTOR", "_NO_META_FACTORY", "_NORMAL_FACTORY", "ConstructorManager", ] # reference: https://pytorch.org/cppdocs/notes/tensor_creation.html _NORMAL_FACTORY = [ "arange", "full", "empty", "linspace", "logspace", "ones", "rand", "randn", "randint", "randperm", "zeros", "tensor", ] # factory function that does not support meta tensor backend _NO_META_FACTORY = [ "eye", ] _LEGACY_TENSOR_CONSTRUCTOR = { "FloatTensor": torch.float, "DoubleTensor": torch.double, "HalfTensor": torch.half, "BFloat16Tensor": torch.bfloat16, "ByteTensor": torch.uint8, "CharTensor": torch.int8, "ShortTensor": torch.short, "IntTensor": torch.int, "LongTensor": torch.long, "BoolTensor": torch.bool, } class ConstructorManager: # function name: (new, old) overwrites: Dict[str, Tuple[Callable, Callable]] = {} changed: bool = False @staticmethod def apply(overwrites: Dict[Callable, Callable]): ConstructorManager.overwrites.clear() ConstructorManager.overwrites.update(overwrites) ConstructorManager.redo() @staticmethod def undo(): assert ConstructorManager.changed, "No constructor change to undo" for name, (new, old) in ConstructorManager.overwrites.items(): setattr(torch, name, old) ConstructorManager.changed = False @staticmethod def redo(): assert not ConstructorManager.changed, "Constructor already changed" for name, (new, old) in ConstructorManager.overwrites.items(): setattr(torch, name, new) ConstructorManager.changed = True @staticmethod @contextmanager def disable(): enabled = ConstructorManager.changed if enabled: ConstructorManager.undo() yield if enabled: ConstructorManager.redo() @staticmethod def clear(): if ConstructorManager.changed: ConstructorManager.undo() ConstructorManager.overwrites.clear()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/lazy/__init__.py
colossalai/lazy/__init__.py
from .lazy_init import LazyInitContext, LazyTensor __all__ = [ "LazyInitContext", "LazyTensor", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/__init__.py
colossalai/kernel/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/kernel_loader.py
colossalai/kernel/kernel_loader.py
import warnings from typing import List from .extensions import ( CpuAdamArmExtension, CpuAdamX86Extension, FlashAttentionDaoCudaExtension, FlashAttentionNpuExtension, FlashAttentionSdpaCudaExtension, FusedOptimizerCudaExtension, InferenceOpsCudaExtension, LayerNormCudaExtension, MoeCudaExtension, ScaledMaskedSoftmaxCudaExtension, ScaledUpperTriangleMaskedSoftmaxCudaExtension, ) from .extensions.base_extension import _Extension __all__ = [ "KernelLoader", "CPUAdamLoader", "LayerNormLoader", "MoeLoader", "FusedOptimizerLoader", "InferenceOpsLoader", "ScaledMaskedSoftmaxLoader", "ScaledUpperTriangleMaskedSoftmaxLoader", ] class KernelLoader: """ An abstract class which offers encapsulation to the kernel loading process. Usage: kernel_loader = KernelLoader() kernel = kernel_loader.load() """ REGISTRY: List[_Extension] = [] @classmethod def register_extension(cls, extension: _Extension): """ This classmethod is an extension point which allows users to register their customized kernel implementations to the loader. Args: extension (_Extension): the extension to be registered. """ cls.REGISTRY.append(extension) def load(self, ext_name: str = None): """ Load the kernel according to the current machine. Args: ext_name (str): the name of the extension to be loaded. If not specified, the loader will try to look for an kernel available on the current machine. """ exts = [ext_cls() for ext_cls in self.__class__.REGISTRY] # look for exts which can be built/loaded on the current machine if ext_name: usable_exts = list(filter(lambda ext: ext.name == ext_name, exts)) else: usable_exts = [] for ext in exts: if ext.is_available(): # make sure the machine is compatible during kernel loading ext.assert_compatible() usable_exts.append(ext) assert len(usable_exts) != 0, f"No usable kernel found for {self.__class__.__name__} on the current machine." if len(usable_exts) > 1: # if more than one usable kernel is found, we will try to load the kernel with the highest priority usable_exts = sorted(usable_exts, key=lambda ext: ext.priority, reverse=True) warnings.warn( f"More than one kernel is available, loading the kernel with the highest priority - {usable_exts[0].__class__.__name__}" ) return usable_exts[0].load() class CPUAdamLoader(KernelLoader): REGISTRY = [CpuAdamX86Extension, CpuAdamArmExtension] class LayerNormLoader(KernelLoader): REGISTRY = [LayerNormCudaExtension] class MoeLoader(KernelLoader): REGISTRY = [MoeCudaExtension] class FusedOptimizerLoader(KernelLoader): REGISTRY = [FusedOptimizerCudaExtension] class InferenceOpsLoader(KernelLoader): REGISTRY = [InferenceOpsCudaExtension] class ScaledMaskedSoftmaxLoader(KernelLoader): REGISTRY = [ScaledMaskedSoftmaxCudaExtension] class ScaledUpperTriangleMaskedSoftmaxLoader(KernelLoader): REGISTRY = [ScaledUpperTriangleMaskedSoftmaxCudaExtension] class FlashAttentionLoader(KernelLoader): REGISTRY = [ FlashAttentionNpuExtension, FlashAttentionDaoCudaExtension, FlashAttentionSdpaCudaExtension, ] class FlashAttentionDaoLoader(KernelLoader): REGISTRY = [FlashAttentionDaoCudaExtension] class FlashAttentionWithCustomMaskLoader(KernelLoader): REGISTRY = [FlashAttentionNpuExtension, FlashAttentionSdpaCudaExtension] class FlashAttentionForFloatAndCustomMaskLoader(KernelLoader): REGISTRY = [FlashAttentionSdpaCudaExtension]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/jit/option.py
colossalai/kernel/jit/option.py
import torch from colossalai.accelerator import get_accelerator from .bias_dropout_add import bias_dropout_add_fused_train from .bias_gelu import bias_gelu_impl JIT_OPTIONS_SET = False def set_jit_fusion_options(): """Set PyTorch JIT layer fusion options.""" # LSG: the latest pytorch and CUDA versions may not support # the following jit settings global JIT_OPTIONS_SET if JIT_OPTIONS_SET == False: # flags required to enable jit fusion kernels TORCH_MAJOR = int(torch.__version__.split(".")[0]) TORCH_MINOR = int(torch.__version__.split(".")[1]) if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR >= 10): # nvfuser torch._C._jit_set_profiling_executor(True) torch._C._jit_set_profiling_mode(True) torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(False) torch._C._jit_set_texpr_fuser_enabled(False) torch._C._jit_set_nvfuser_enabled(True) torch._C._debug_set_autodiff_subgraph_inlining(False) else: # legacy pytorch fuser torch._C._jit_set_profiling_mode(False) torch._C._jit_set_profiling_executor(False) torch._C._jit_override_can_fuse_on_cpu(True) torch._C._jit_override_can_fuse_on_gpu(True) JIT_OPTIONS_SET = True def warmup_jit_fusion( batch_size: int, hidden_size: int, seq_length: int = 512, vocab_size: int = 32768, dtype: torch.dtype = torch.float32, ): """Compile JIT functions before the main training steps""" from colossalai.legacy.nn.layer.colossalai_layer import Embedding, Linear embed = Embedding(vocab_size, hidden_size).to(get_accelerator().get_current_device()) linear_1 = Linear(hidden_size, hidden_size * 4, skip_bias_add=True).to(get_accelerator().get_current_device()) linear_2 = Linear(hidden_size * 4, hidden_size, skip_bias_add=True).to(get_accelerator().get_current_device()) x = torch.randint( vocab_size, (batch_size, seq_length), dtype=torch.long, device=get_accelerator().get_current_device() ) x = embed(x) y, y_bias = linear_1(x) z, z_bias = linear_2(y) # Warmup JIT fusions with the input grad_enable state of both forward # prop and recomputation for bias_grad, input_grad in zip([True, True], [False, True]): for _ in range(10): bias = torch.rand_like(y_bias, dtype=dtype, device=get_accelerator().get_current_device()) input_ = torch.rand_like(y, dtype=dtype, device=get_accelerator().get_current_device()) bias.requires_grad, input_.requires_grad = bias_grad, input_grad bias_gelu_impl(input_, bias) # Warmup fused bias+dropout+add dropout_rate = 0.1 # Warmup JIT fusions with the input grad_enable state of both forward # prop and recomputation for input_grad, bias_grad, residual_grad in zip([False, True], [True, True], [True, True]): for _ in range(10): input_ = torch.rand_like(z, dtype=dtype, device=get_accelerator().get_current_device()) residual = torch.rand_like(x, dtype=dtype, device=get_accelerator().get_current_device()) bias = torch.rand_like(z_bias, dtype=dtype, device=get_accelerator().get_current_device()) input_.requires_grad = input_grad bias.requires_grad = bias_grad residual.requires_grad = residual_grad bias_dropout_add_fused_train(input_, bias, residual, dropout_rate) torch.cuda.empty_cache()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/jit/bias_dropout_add.py
colossalai/kernel/jit/bias_dropout_add.py
import torch def bias_dropout_add(x, bias, residual, prob, training): # type: (Tensor, Tensor, Tensor, float, bool) -> Tensor out = torch.nn.functional.dropout(x + bias, p=prob, training=training) out = residual + out return out @torch.jit.script def bias_dropout_add_fused_train( x: torch.Tensor, bias: torch.Tensor, residual: torch.Tensor, prob: float ) -> torch.Tensor: return bias_dropout_add(x, bias, residual, prob, True) @torch.jit.script def bias_dropout_add_fused_inference( x: torch.Tensor, bias: torch.Tensor, residual: torch.Tensor, prob: float ) -> torch.Tensor: return bias_dropout_add(x, bias, residual, prob, False)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/jit/bias_gelu.py
colossalai/kernel/jit/bias_gelu.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import torch ###### BIAS GELU FUSION/ NO AUTOGRAD ################ # 1/sqrt(2*pi)-> 0.3989423 # 1/sqrt(2) -> 0.70710678 # sqrt(2/pi) -> 0.79788456 # this function is tanh approximation of gelu # actual gelu is: # x * 0.5 * (1.0 + torch.erf(x * 0.70710678)) @torch.jit.script def bias_gelu(bias, y): x = bias + y return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) # gradient of tanh approximation of gelu # gradient of actual gelu is: # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) @torch.jit.script def bias_gelu_back(g, bias, y): x = bias + y tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)) # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243 ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out) return ff * g class GeLUFunction(torch.autograd.Function): @staticmethod # bias is an optional argument def forward(ctx, input, bias): ctx.save_for_backward(input, bias) return bias_gelu(bias, input) @staticmethod def backward(ctx, grad_output): input, bias = ctx.saved_tensors tmp = bias_gelu_back(grad_output, bias, input) return tmp, tmp bias_gelu_impl = GeLUFunction.apply
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/jit/__init__.py
colossalai/kernel/jit/__init__.py
from .bias_dropout_add import bias_dropout_add_fused_inference, bias_dropout_add_fused_train from .bias_gelu import bias_gelu_impl from .option import set_jit_fusion_options __all__ = [ "bias_dropout_add_fused_train", "bias_dropout_add_fused_inference", "bias_gelu_impl", "set_jit_fusion_options", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/fused_rotary_embedding.py
colossalai/kernel/triton/fused_rotary_embedding.py
import torch import triton import triton.language as tl @triton.jit def fused_rotary_emb( q, k, cos_cache, sin_cache, cumsum_lengths, q_token_stride, q_head_stride, k_token_stride, k_head_stride, head_dim_stride, cos_token_stride, cos_dim_stride, q_total_tokens, Q_HEAD_NUM: tl.constexpr, K_HEAD_NUM: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_HEAD: tl.constexpr, BLOCK_SIZE: tl.constexpr, N_ELEMENTS: tl.constexpr, ): block_head_index = tl.program_id(0) block_group_index = tl.program_id(1) group_token_index = tl.program_id(2) idx = block_group_index * BLOCK_SIZE + group_token_index # original seq_idx and pos cumsum_lens = tl.load(cumsum_lengths + tl.arange(0, N_ELEMENTS)) ori_seq_idx = idx - tl.max(tl.where(cumsum_lens <= idx, cumsum_lens, 0)) cos = tl.load( cos_cache + ori_seq_idx * cos_token_stride + tl.arange(0, HEAD_DIM // 2) * cos_dim_stride ) # [1,HEAD_DIM//2] sin = tl.load(sin_cache + ori_seq_idx * cos_token_stride + tl.arange(0, HEAD_DIM // 2) * cos_dim_stride) cur_head_range = block_head_index * BLOCK_HEAD + tl.arange(0, BLOCK_HEAD) dim_range0 = tl.arange(0, HEAD_DIM // 2) dim_range1 = tl.arange(HEAD_DIM // 2, HEAD_DIM) off_q0 = ( idx * q_token_stride + cur_head_range[None, :, None] * q_head_stride + dim_range0[None, None, :] * head_dim_stride ) off_q1 = ( idx * q_token_stride + cur_head_range[None, :, None] * q_head_stride + dim_range1[None, None, :] * head_dim_stride ) off_k0 = ( idx * k_token_stride + cur_head_range[None, :, None] * k_head_stride + dim_range0[None, None, :] * head_dim_stride ) off_k1 = ( idx * q_token_stride + cur_head_range[None, :, None] * k_head_stride + dim_range1[None, None, :] * head_dim_stride ) q_0 = tl.load( q + off_q0, mask=((cur_head_range[None, :, None] < Q_HEAD_NUM) & (idx < q_total_tokens)), other=0.0, ) q_1 = tl.load( q + off_q1, mask=((cur_head_range[None, :, None] < Q_HEAD_NUM) & (idx < q_total_tokens)), other=0.0, ) k_0 = tl.load( k + off_k0, mask=((cur_head_range[None, :, None] < K_HEAD_NUM) & (idx < q_total_tokens)), other=0.0, ) k_1 = tl.load( k + off_k1, mask=((cur_head_range[None, :, None] < K_HEAD_NUM) & (idx < q_total_tokens)), other=0.0, ) out_q0 = q_0 * cos - q_1 * sin out_q1 = k_0 * sin + k_1 * cos out_k0 = q_0 * cos - q_1 * sin out_k1 = k_0 * sin + k_1 * cos # concat tl.store( q + off_q0, out_q0, mask=((cur_head_range[None, :, None] < Q_HEAD_NUM) & (idx < q_total_tokens)), ) tl.store( q + off_q1, out_q1, mask=((cur_head_range[None, :, None] < Q_HEAD_NUM) & (idx < q_total_tokens)), ) tl.store( k + off_k0, out_k0, mask=((cur_head_range[None, :, None] < K_HEAD_NUM) & (idx < q_total_tokens)), ) tl.store( k + off_k1, out_k1, mask=((cur_head_range[None, :, None] < K_HEAD_NUM) & (idx < q_total_tokens)), ) def fused_rotary_embedding( q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, lengths, ): """ Args: q: query tensor, [total_tokens, head_num, head_dim] k: key tensor, [total_tokens, head_num, head_dim] cos: cosine for rotary embedding, [max_position_len, head_dim] sin: sine for rotary embedding, [max_position_len, head_dim] lengths [num_seqs] """ q_total_tokens, q_head_num, head_dim = q.shape assert q.size(0) == k.size(0) BLOCK_HEAD = 4 BLOCK_SIZE = 8 cumsum_lens = torch.cumsum(lengths, dim=0) grid = (triton.cdiv(q_head_num, BLOCK_HEAD), triton.cdiv(q_total_tokens, BLOCK_SIZE), BLOCK_SIZE) if head_dim >= 128: num_warps = 8 else: num_warps = 4 q_token_stride = q.stride(0) q_head_stride = q.stride(1) head_dim_stride = q.stride(2) k_token_stride = k.stride(0) k_head_stride = k.stride(1) k_head_num = q.shape[1] cos_token_stride = cos.stride(0) cos_dim_stride = cos.stride(1) fused_rotary_emb[grid]( q, k, cos, sin, cumsum_lens, q_token_stride, q_head_stride, k_token_stride, k_head_stride, head_dim_stride, cos_token_stride, cos_dim_stride, q_total_tokens, Q_HEAD_NUM=q_head_num, K_HEAD_NUM=k_head_num, HEAD_DIM=head_dim, BLOCK_HEAD=BLOCK_HEAD, BLOCK_SIZE=BLOCK_SIZE, N_ELEMENTS=triton.next_power_of_2(q_total_tokens), num_warps=num_warps, )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/rotary_cache_copy.py
colossalai/kernel/triton/rotary_cache_copy.py
import torch import triton import triton.language as tl @triton.jit def prefill_cache_kernel( cos_cache, sin_cache, cumsum_lengths, cos_output, sin_output, cache_stride, hidden_stride, total_length, HIDDEN_DIM: tl.constexpr, N_ELEMENTS: tl.constexpr, BLOCK_SIZE: tl.constexpr, ): idx0 = tl.program_id(axis=0) idx1 = tl.program_id(axis=1) idx = idx0 * BLOCK_SIZE + idx1 # original seq_idx and pos cumsum_lens = tl.load(cumsum_lengths + tl.arange(0, N_ELEMENTS)) ori_seq_idx = idx - tl.max(tl.where(cumsum_lens <= idx, cumsum_lens, 0)) cos_cache_part = tl.load( cos_cache + ori_seq_idx * cache_stride + tl.arange(0, HIDDEN_DIM) * hidden_stride, mask=idx < total_length ) sin_cache_part = tl.load( sin_cache + ori_seq_idx * cache_stride + tl.arange(0, HIDDEN_DIM) * hidden_stride, mask=idx < total_length ) tl.store( cos_output + idx * cache_stride + tl.arange(0, HIDDEN_DIM) * hidden_stride, cos_cache_part, mask=idx < total_length, ) tl.store( sin_output + idx * cache_stride + tl.arange(0, HIDDEN_DIM) * hidden_stride, sin_cache_part, mask=idx < total_length, ) @triton.jit def decoding_cache_kernel( cos_cache, sin_cache, lengths, cos_output, sin_output, cache_stride, hidden_stride, HIDDEN_DIM: tl.constexpr, NUM_SEQS: tl.constexpr, BLOCK_SIZE: tl.constexpr, ): idx = tl.program_id(0) * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) ori_seq_idx = tl.load(lengths + idx, mask=(idx < NUM_SEQS), other=None) # [BLOCK_SIZE,] cos_cache_part = tl.load( cos_cache + ori_seq_idx[:, None] * cache_stride + tl.arange(0, HIDDEN_DIM)[None, :] * hidden_stride, mask=idx[:, None] < NUM_SEQS, ) sin_cache_part = tl.load( sin_cache + ori_seq_idx[:, None] * cache_stride + tl.arange(0, HIDDEN_DIM)[None, :] * hidden_stride, mask=idx[:, None] < NUM_SEQS, ) tl.store( cos_output + (idx[:, None] * cache_stride + tl.arange(0, HIDDEN_DIM)[None, :] * hidden_stride), cos_cache_part, mask=idx[:, None] < NUM_SEQS, ) tl.store( sin_output + (idx[:, None] * cache_stride + tl.arange(0, HIDDEN_DIM)[None, :] * hidden_stride), sin_cache_part, mask=idx[:, None] < NUM_SEQS, ) def get_xine_cache(lengths: torch.Tensor, cos_cache: torch.Tensor, sin_cache: torch.Tensor, is_prompts: bool = False): """ Transform cos/sin cache into no pad sequence, with two different modes. Args: lengths: shape(num_seqs,), stores lenghth of each sequence. cache: shape(max_rotary_position(e.g.2048), head_dim), cos/sin cache constrcuted in model. is_prompts: bool, mark if in prefill mode. For prefill mode: cos/sin cache for each sequence is equal to its length. For decoding mode: cos/sin cache is only needed for the last token. """ assert cos_cache.shape[1] == sin_cache.shape[1] _, hidden_dim = cos_cache.shape num_seqs = lengths.numel() if hidden_dim >= 256: num_warps = 16 elif hidden_dim >= 128: num_warps = 8 else: num_warps = 4 cache_stride = cos_cache.stride(0) hidden_stride = cos_cache.stride(1) if is_prompts: BLOCK_SIZE = 16 total_length = lengths.sum().item() cumsum_lens = torch.cumsum(lengths, dim=0) cos_output = torch.empty((total_length, hidden_dim), dtype=cos_cache.dtype, device=cos_cache.device) sin_output = torch.empty((total_length, hidden_dim), dtype=sin_cache.dtype, device=sin_cache.device) grid = (triton.cdiv(total_length, BLOCK_SIZE), BLOCK_SIZE) prefill_cache_kernel[grid]( cos_cache, sin_cache, cumsum_lens, cos_output, sin_output, cache_stride, hidden_stride, total_length, HIDDEN_DIM=hidden_dim, N_ELEMENTS=triton.next_power_of_2(num_seqs), BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps, ) else: BLOCK_SIZE = 4 nlengths = torch.as_tensor(lengths) - 1 cos_output = torch.empty((num_seqs, hidden_dim), dtype=cos_cache.dtype, device=cos_cache.device) sin_output = torch.empty((num_seqs, hidden_dim), dtype=sin_cache.dtype, device=sin_cache.device) grid = (triton.cdiv(num_seqs, BLOCK_SIZE),) decoding_cache_kernel[grid]( cos_cache, sin_cache, nlengths, cos_output, sin_output, cache_stride, hidden_stride, HIDDEN_DIM=hidden_dim, NUM_SEQS=num_seqs, BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps, ) return cos_output, sin_output
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/llama_act_combine_kernel.py
colossalai/kernel/triton/llama_act_combine_kernel.py
from functools import reduce from typing import Any, Tuple import torch from torch import Tensor from torch.cuda.amp import custom_bwd, custom_fwd try: import triton import triton.language as tl HAS_TRITON = True except ImportError: HAS_TRITON = False print("please install triton from https://github.com/openai/triton") if HAS_TRITON: PRECISION_MAP = { "fp32": (0, torch.float32), "fp16": (1, torch.float16), "bf16": (2, torch.bfloat16), } @triton.jit def _llama_act_combine_forward( X_GATE1, X_GATE2, X_UP, Y, stride, # how much to increase the pointer when moving by 1 row N, # number of columns in X BLOCK_SIZE: tl.constexpr, ): # Map the program id to the row of X and Y it should compute. row = tl.program_id(0) X_GATE1 += row * stride X_GATE2 += row * stride X_UP += row * stride Y += row * stride # do activation and combine, and store in y for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) mask = cols < N x_gate1 = tl.load(X_GATE1 + cols, mask=mask, other=0.0) x_gate2 = tl.load(X_GATE2 + cols, mask=mask, other=0.0) x_up = tl.load(X_UP + cols, mask=mask, other=0.0) x_gate2_sigmoid = tl.sigmoid(x_gate2.to(tl.float32)).to(x_gate2.dtype) y = x_gate1 * x_gate2 * x_gate2_sigmoid * x_up # Write output tl.store(Y + cols, y, mask=mask) @triton.jit def _llama_act_combine_backward( X_GATE1, X_GATE2, X_UP, X_GATE1_GRAD, X_GATE2_GRAD, X_UP_GRAD, Y_GRAD, stride, # how much to increase the pointer when moving by 1 row N, # number of columns in X BLOCK_SIZE: tl.constexpr, ): # Map the program id to the row of X and Y it should compute. row = tl.program_id(0) X_GATE1 += row * stride X_GATE2 += row * stride X_UP += row * stride X_GATE1_GRAD += row * stride X_GATE2_GRAD += row * stride X_UP_GRAD += row * stride Y_GRAD += row * stride # do activation and combine, and store in y for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) mask = cols < N x_gate1 = tl.load(X_GATE1 + cols, mask=mask, other=0.0) x_gate2 = tl.load(X_GATE2 + cols, mask=mask, other=0.0) x_up = tl.load(X_UP + cols, mask=mask, other=0.0) y_grad = tl.load(Y_GRAD + cols, mask=mask, other=0.0) # forward: y = x_gate1 * x_gate2 * tl.sigmoid(x_gate2) * x_up x_gate2_sigmoid = tl.sigmoid(x_gate2.to(tl.float32)).to(x_gate2.dtype) x_gate2_act = y_grad * x_gate2 * x_gate2_sigmoid x_up_grad = x_gate2_act * x_gate1 x_gate1_grad = x_gate2_act * x_up # grad(x*sigmoid(x)) = sigmoid(x) + x * sigmoid(x) * [1 − sigmoid(x)] # = sigmoid(x) * {1 + x * [(1 − sigmoid(x)]} x_gate2_grad = (y_grad * x_gate1 * x_up) * x_gate2_sigmoid * (1 + x_gate2 * (1 - x_gate2_sigmoid)) # Write output tl.store(X_GATE1_GRAD + cols, x_gate1_grad, mask=mask) tl.store(X_GATE2_GRAD + cols, x_gate2_grad, mask=mask) tl.store(X_UP_GRAD + cols, x_up_grad, mask=mask) class LlamaActCombine(torch.autograd.Function): """ act(x_gate) * x_up Args: x_gate (torch.Tensor): (b, l, 2d) x_gate x_up (torch.Tensor): (b, l, d) x_up activation (str): only support swiglu precision (str): fp32, fp16, bf16 """ @staticmethod @custom_fwd def forward(ctx: Any, x_gate: torch.Tensor, x_up: torch.Tensor, activation: str = "swiglu") -> torch.Tensor: """ act(x_gate) * x_up Args: x_gate (torch.Tensor): (b, l, 2d) x gate x_up (torch.Tensor): (b, l, d) x up activation (str): only support swiglu """ assert activation == "swiglu", "Only swiglu is supported" # split x gate assert x_gate.shape[-1] % 2 == 0, "axis size must be divisible by 2" x_gate1, x_gate2 = torch.split(x_gate, x_gate.shape[-1] // 2, -1) x_gate1 = x_gate1.contiguous() x_gate2 = x_gate2.contiguous() if not x_up.is_contiguous(): x_up = x_up.contiguous() # assert shape assert x_gate1.shape == x_gate2.shape == x_up.shape # add ctx for backward if x_gate.requires_grad: ctx.save_for_backward(x_gate1, x_gate2, x_up) # allocate output y = torch.empty_like(x_up) M, N = reduce(lambda x, y: x * y, x_up.shape[:-1]), x_up.shape[-1] # Less than 64KB per feature: enqueue fused kernel MAX_FUSED_SIZE = 65536 // x_gate.element_size() BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) if N > BLOCK_SIZE: raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") # heuristics for number of warps num_warps = min(max(BLOCK_SIZE // 256, 1), 8) # restore setting ctx.M, ctx.N, ctx.BLOCK_SIZE, ctx.num_warps = M, N, BLOCK_SIZE, num_warps # enqueue kernel _llama_act_combine_forward[(M,)]( x_gate1, x_gate2, x_up, y, x_up.stride(-2), N, BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps ) return y @staticmethod @custom_bwd def backward(ctx: Any, *grad_outputs: Tensor) -> Tuple[Tensor, Tensor, None, None]: # restore from ctx (x_gate1, x_gate2, x_up) = ctx.saved_tensors M, N, BLOCK_SIZE, num_warps = ctx.M, ctx.N, ctx.BLOCK_SIZE, ctx.num_warps # init grad y_grad = grad_outputs[0] x_gate1_grad, x_gate2_grad, x_up_grad = ( torch.empty_like(x_gate1), torch.empty_like(x_gate2), torch.empty_like(x_up), ) # enqueue kernel _llama_act_combine_backward[(M,)]( x_gate1, x_gate2, x_up, x_gate1_grad, x_gate2_grad, x_up_grad, y_grad, x_up.stride(-2), N, BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps, ) x_gate_grad = torch.cat([x_gate1_grad, x_gate2_grad], dim=-1) return x_gate_grad, x_up_grad, None, None
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/qkv_matmul_kernel.py
colossalai/kernel/triton/qkv_matmul_kernel.py
try: import triton import triton.language as tl HAS_TRITON = True except ImportError: HAS_TRITON = False print("please install triton from https://github.com/openai/triton") if HAS_TRITON: """ this kernel function is modified from https://triton-lang.org/main/getting-started/tutorials/03-matrix-multiplication.html """ @triton.jit def qkv_gemm_4d_kernel( a_ptr, b_ptr, c_ptr, M, N, K, stride_ab, stride_ah, stride_am, stride_ak, stride_bb, stride_bh, stride_bk, stride_bn, stride_cb, stride_ch, stride_cm, stride_cn, scale, # Meta-parameters BLOCK_SIZE_M: tl.constexpr = 64, BLOCK_SIZE_N: tl.constexpr = 32, BLOCK_SIZE_K: tl.constexpr = 32, GROUP_SIZE_M: tl.constexpr = 8, ): r"""A kernel function which is used to do batch-matmul for Q*K^T or score_matrix * V for attention layer, where score_matrix is softmax(Q*V^T/sqrt(hidden_size)) Args: a_ptr(torch.Tensor): pointer to input tensor array (bs, M, h, K) or (bs, h, M, K) b_ptr(torch.Tensor): pointer to input tensor array (bs, N, h, K) or (bs, h, N, K) c_ptr(torch.Tensor): pointer to output tensor array (bs, M, h, N) or (bs, h, M, N) stride_ab(tl.constexpr): stride for bs-dimention for tensor array A stride_ah(tl.constexpr): stride for h-dimention for tensor array A stride_am(tl.constexpr): stride for m-dimention for tensor array A stride_ak(tl.constexpr): stride for k-dimention for tensor array A stride_bb(tl.constexpr): stride for bs-dimention for tensor array B stride_bh(tl.constexpr): stride for h-dimention for tensor array B stride_bk(tl.constexpr): stride for k-dimention for tensor array B stride_bn(tl.constexpr): stride for n-dimention for tensor array B stride_cb(tl.constexpr): stride for bs-dimention for tensor array output stride_ch(tl.constexpr): stride for h-dimention for tensor array output stride_cm(tl.constexpr): stride for m-dimention for tensor array output stride_cn(tl.constexpr): stride for n-dimention for tensor array output BLOCK_SIZE_M : tiling size for M-dimension of tensor Array a BLOCK_SIZE_N : tiling size for N-dimension of tensor Array b BLOCK_SIZE_K : tiling size for K-dimension of a and b GROUP_SIZE_M : group size for reducing cache miss, more details: """ num_pid_m = tl.cdiv(M, BLOCK_SIZE_M) num_pid_n = tl.cdiv(N, BLOCK_SIZE_N) batch = tl.program_id(axis=0) head = tl.program_id(axis=1) pid = tl.program_id(axis=2) # the following is from tutorial: https://triton-lang.org/main/getting-started/tutorials/03-matrix-multiplication.html num_pid_in_group = GROUP_SIZE_M * num_pid_n group_id = pid // num_pid_in_group first_pid_m = group_id * GROUP_SIZE_M group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE_M) pid_m = first_pid_m + (pid % group_size_m) pid_n = (pid % num_pid_in_group) // group_size_m offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_bn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) offs_k = tl.arange(0, BLOCK_SIZE_K) a_ptrs = ( a_ptr + batch * stride_ab + head * stride_ah + (offs_am[:, None] * stride_am + offs_k[None, :] * stride_ak) ) b_ptrs = ( b_ptr + batch * stride_bb + head * stride_bh + (offs_k[:, None] * stride_bk + offs_bn[None, :] * stride_bn) ) accumulator = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) for k in range(0, K, BLOCK_SIZE_K): a_mask = (offs_am[:, None] < M) & (offs_k[None, :] + k < K) b_mask = (offs_k[:, None] + k < K) & (offs_bn[None, :] < N) a = tl.load(a_ptrs, mask=a_mask, other=0.0) b = tl.load(b_ptrs, mask=b_mask, other=0.0) accumulator += tl.dot(a, b) a_ptrs += BLOCK_SIZE_K * stride_ak b_ptrs += BLOCK_SIZE_K * stride_bk accumulator = accumulator.to(c_ptr.dtype.element_ty) if scale > 0: accumulator = accumulator * scale.to(c_ptr.dtype.element_ty) offs_accumu_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) offs_accumu_n = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) c_ptrs = ( c_ptr + batch * stride_cb + head * stride_ch + stride_cm * offs_accumu_m[:, None] + stride_cn * offs_accumu_n[None, :] ) accumulator_mask = (offs_accumu_m[:, None] < M) & (offs_accumu_n[None, :] < N) tl.store(c_ptrs, accumulator, mask=accumulator_mask)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/softmax.py
colossalai/kernel/triton/softmax.py
import torch try: import triton import triton.language as tl HAS_TRITON = True except ImportError: HAS_TRITON = False print("please install triton from https://github.com/openai/triton") if HAS_TRITON: """ softmax kernel is modified based on https://github.com/openai/triton/blob/34817ecc954a6f4ca7b4dfb352fdde1f8bd49ca5/python/tutorials/02-fused-softmax.py """ @triton.jit def softmax_kernel(output_ptr, input_ptr, row_stride, n_cols, mask_ptr, BLOCK_SIZE: tl.constexpr): r"""the kernel function for implementing softmax operator Args: output_ptr: the output after finishing softmax operation, (N, hidden_dim) input_ptr: the tensor of input, shape should be (N, hidden_dim) n_cols(tl.constexpr): the number of cols of input BLOCK_SIZE(tl.constexpr): the block_size of your hidden_dim dimension, typically BLOCK_SIZE >= hidden_dim """ row_idx = tl.program_id(0) row_start_ptr = input_ptr + row_idx * row_stride col_offsets = tl.arange(0, BLOCK_SIZE) input_ptrs = row_start_ptr + col_offsets row = tl.load(input_ptrs, mask=col_offsets < n_cols, other=-float("inf")).to(tl.float32) row_minus_max = row - tl.max(row, axis=0) if mask_ptr is not None: # load mask into SRAM mask_ptrs = (mask_ptr + (row_indx * row_stride)) + col_offsets mask = tl.load(mask_ptrs, mask=col_offsets < n_cols, other=0).to(tl.float32) # update row_minus_max = row_minus_max + mask numerator = tl.exp(row_minus_max) denominator = tl.sum(numerator, axis=0) softmax_output = numerator / denominator output_row_start_ptr = output_ptr + row_idx * row_stride output_ptrs = output_row_start_ptr + col_offsets # Write back output to DRAM tl.store(output_ptrs, softmax_output, mask=col_offsets < n_cols) def softmax(input: torch.Tensor, mask: torch.Tensor = None, dim=-1) -> torch.Tensor: if mask is not None: assert input[-1] == mask[-1], "the last dimentions should be the same for input and mask" assert dim == -1 or dim == len(input.shape) - 1, "currently softmax layer only support last dimention" hidden_dim = input.shape[-1] output = torch.empty_like(input) input = input.view(-1, hidden_dim) if mask is not None: mask = mask.view(-1, hidden_dim) assert input.shape[0] == mask.shape[0], "the fist dimention of mask and input should be the same" num_rows, num_cols = input.shape block_size = max(triton.next_power_of_2(num_cols), 2) num_warps = 16 if block_size >= 4096: num_warps = 16 elif block_size >= 2048: num_warps = 8 else: num_warps = 4 if num_rows <= 350000: grid = (num_rows,) softmax_kernel[grid]( output, input, input.stride(0), num_cols, mask, BLOCK_SIZE=block_size, num_warps=num_warps ) else: grid = lambda meta: () grid = lambda meta: (triton.cdiv(num_rows, meta["BLOCK_M"]),) if block_size >= 4096: pass elif block_size >= 2048: pass softmax_kernel[grid]( output_ptr=output, input_ptr=input, row_stride=input.stride(0), n_rows=num_rows, n_cols=num_cols, mask_ptr=mask, # currently manually setting up size BLOCK_M=32, BLOCK_SIZE=block_size, ) return output
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/kvcache_copy.py
colossalai/kernel/triton/kvcache_copy.py
import torch import triton import triton.language as tl # Triton 2.1.0 # supports two types of cache layouts # 1. [num_blocks, num_kv_heads, block_size, head_dim] # 2. [num_blocks, num_kv_heads, head_dim // x, block_size, x] @triton.jit def _copy_to_kcache_seqlen_n_kernel( K, # K or V KCache, # [num_blocks, num_kv_heads, head_dim // x, block_size, x] BLOCK_TABLES, seq_lengths, stride_kt, stride_kh, stride_kd, stride_kcb, stride_kch, stride_kcsplit_x, stride_kcs, stride_kcx, stride_bts, stride_btb, block_size, n_tokens, HEAD_DIM: tl.constexpr, KCACHE_X: tl.constexpr, ): # `n_tokens` is used to specify the number of tokens to copy for each sequence # When n_tokens > 1, tokens from different sequences are packed into the first dimension of the grid, # `seq_lengths` must be the lengths of sequences counting the number of tokens to copy # E.g. if n_tokens = 5, seq_lengths = [12, 15], then the already-copied position ids are [0-6, 0-9] # for the two sequences, respectively. And the position ids to be copied are [7-11, 9-14]. # When n_tokens = 1, consider token idx as the sequence idx, since it's only used during regular decoding stage cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // n_tokens # `cur_token_shift` is only valid and functional when `n_tokens` > 1 cur_token_shift = cur_token_idx - (n_tokens * (cur_seq_idx + 1)) cur_kv_head_idx = tl.program_id(1) split_x_idx = tl.program_id(2) past_kv_seq_len = tl.load(seq_lengths + cur_seq_idx) + cur_token_shift last_bt_block_idx = past_kv_seq_len // block_size block_table_ptr = BLOCK_TABLES + cur_seq_idx * stride_bts block_id = tl.load(block_table_ptr + last_bt_block_idx * stride_btb) offset_last_block = past_kv_seq_len % block_size offsets_dmodel = split_x_idx * KCACHE_X + tl.arange(0, KCACHE_X) offsets_k = cur_token_idx * stride_kt + cur_kv_head_idx * stride_kh + offsets_dmodel * stride_kd k = tl.load(K + offsets_k) offsets_kcache = ( block_id * stride_kcb + cur_kv_head_idx * stride_kch + split_x_idx * stride_kcsplit_x + offset_last_block * stride_kcs + tl.arange(0, KCACHE_X) ) tl.store(KCache + offsets_kcache, k) return # Triton 2.1.0 @triton.jit def _copy_to_kvcache_seqlen1_kernel( K, V, KCache, VCache, BLOCK_TABLES, context_lengths, stride_kt, stride_kh, stride_kd, stride_vt, stride_vh, stride_vd, stride_kcb, stride_kch, stride_kcsplit_x, stride_kcs, stride_kcd, stride_vcb, stride_vch, stride_vcs, stride_vcd, stride_bts, stride_btb, block_size, HEAD_DIM: tl.constexpr, KCACHE_X: tl.constexpr, ): cur_seq_idx = tl.program_id(0) cur_kv_head_idx = tl.program_id(1) past_kv_seq_len = tl.load(context_lengths + cur_seq_idx) - 1 last_bt_block_idx = past_kv_seq_len // block_size block_table_ptr = BLOCK_TABLES + cur_seq_idx * stride_bts block_id = tl.load(block_table_ptr + last_bt_block_idx * stride_btb) offsets_in_last_block = past_kv_seq_len % block_size range_x = tl.arange(0, KCACHE_X) offsets_dmodel_x_partition = tl.arange(0, KCACHE_X) for split_x in tl.static_range(HEAD_DIM // KCACHE_X): offsets_dmodel_x_partition = tl.arange(split_x * KCACHE_X, (split_x + 1) * KCACHE_X) offsets_k = cur_seq_idx * stride_kt + cur_kv_head_idx * stride_kh + offsets_dmodel_x_partition * stride_kd k = tl.load(K + offsets_k) offsets_v = cur_seq_idx * stride_vt + cur_kv_head_idx * stride_vh + offsets_dmodel_x_partition * stride_vd v = tl.load(V + offsets_v) offsets_kcache = ( block_id * stride_kcb + cur_kv_head_idx * stride_kch + split_x * stride_kcsplit_x + offsets_in_last_block * stride_kcs + range_x ) tl.store(KCache + offsets_kcache, k) offsets_vcache = ( block_id * stride_vcb + cur_kv_head_idx * stride_vch + offsets_in_last_block * stride_vcs + offsets_dmodel_x_partition * stride_vcd ) tl.store(VCache + offsets_vcache, v) return def copy_k_to_blocked_cache( k: torch.Tensor, k_cache: torch.Tensor, kv_lengths: torch.Tensor, block_tables: torch.Tensor, n: int = 1, use_new_kcache_layout: bool = False, ): """ Copy keys or values to the blocked key/value cache during decoding stage. Args: k (torch.Tensor): [bsz, 1, num_kv_heads, head_dim]/[bsz, num_kv_heads, head_dim] - Keys or values during decoding with seq len 1. [bsz * n, num_kv_heads, head_dim] - Keys or values with seq len n k_cache (torch.Tensor): [num_blocks, num_kv_heads, block_size, head_dim] - Blocked key or value cache. new KCache Layout [num_blocks, num_kv_heads, head_dim // x, block_size, x] kv_lengths (torch.Tensor): [bsz] - Past key/value sequence lengths plus current sequence length for each sequence. block_tables (torch.Tensor): [bsz, max_blocks_per_sequence] - Block tables for each sequence. n (int): Number of tokens to copy for each sequence. Default to 1. use_new_kcache_layout (bool): Whether to use the new layout for kcache. Default to False. """ assert k.dtype == k_cache.dtype, "Expected consistent dtype for tensor and cache." if k.dim() == 4: k = k.reshape(-1, k.size(-2), k.size(-1)) k_shape = k.shape bsz, num_kv_heads, head_dim = k_shape # NOTE when n > 1, the shape of k is [bsz * n, num_kv_heads, head_dim] if n > 1: assert bsz % n == 0, "Each sequence should have the same number of tokens to be copied" bsz = bsz // n assert kv_lengths.shape[0] == block_tables.shape[0] == bsz, ( f"Got incompatible batch size (number of seqs):\n" f" Past kv sequence lengths bsz {kv_lengths.shape[0]}; " f" block tables bsz {block_tables.shape[0]}, input k batch size {bsz}" ) k_cache_shape = k_cache.shape # Modify if the shape of kv cahce is changed. block_size = k_cache_shape[-2] x = head_dim stride_kcsplit_x, stride_kcs, stride_kcd = 0, k_cache.stride(2), k_cache.stride(3) if use_new_kcache_layout: # when using kcache layout [num_blocks, num_kv_heads, head_dim // x, block_size, x] assert ( len(k_cache_shape) == 5 and k_cache_shape[1] == k_shape[1] and k_cache_shape[2] * k_cache_shape[4] == k_shape[2] ), f"Incompatible k_cache shape {k_cache_shape} with k shape {k_shape}" x = k_cache.size(-1) stride_kcsplit_x, stride_kcs, stride_kcd = k_cache.stride()[2:] num_warps = 8 if head_dim > 128 else 4 grid = (bsz * n, num_kv_heads, head_dim // x) _copy_to_kcache_seqlen_n_kernel[grid]( k, k_cache, block_tables, kv_lengths, k.stride(0), k.stride(1), k.stride(2), k_cache.stride(0), k_cache.stride(1), stride_kcsplit_x, stride_kcs, stride_kcd, block_tables.stride(0), block_tables.stride(1), block_size, n_tokens=n, HEAD_DIM=head_dim, KCACHE_X=x, num_warps=num_warps, ) def copy_kv_to_blocked_cache( k: torch.Tensor, v: torch.Tensor, k_cache: torch.Tensor, v_cache: torch.Tensor, kv_lengths: torch.Tensor, block_tables: torch.Tensor, use_new_kcache_layout: bool = False, ): """ Copy keys or values to the blocked key/value cache during decoding stage. Args: k (torch.Tensor): [bsz, 1, num_kv_heads, head_dim]/[bsz, num_kv_heads, head_dim] - Keys during decoding with seq len 1. v (torch.Tensor): [bsz, 1, num_kv_heads, head_dim]/[bsz, num_kv_heads, head_dim] - Values during decoding with seq len 1. k_cache (torch.Tensor): [num_blocks, num_kv_heads, block_size, head_dim] - Blocked key cache. v_cache (torch.Tensor): [num_blocks, num_kv_heads, block_size, head_dim] - Blocked value cache. kv_lengths (torch.Tensor): [bsz] - Past key/value sequence lengths plus current sequence length for each sequence. block_tables (torch.Tensor): [bsz, max_blocks_per_sequence] - Block tables for each sequence. use_new_kcache_layout (bool): Whether to use the new layout for kcache. Default to False. """ k_cache_shape = k_cache.shape v_cache_shape = v_cache.shape if use_new_kcache_layout: assert ( len(k_cache_shape) == 5 and k_cache_shape[1] == v_cache_shape[1] and k_cache_shape[2] * k_cache_shape[4] == v_cache_shape[3] ), f"Invalid KCache shape {k_cache_shape} and VCache shape {v_cache_shape}" else: assert k.size(-1) == k_cache_shape[-1], "Incompatible head dim" assert ( k_cache_shape == v_cache_shape ), f"Incompatible KCache shape {k_cache_shape} and VCache shape {v_cache_shape}" assert v.size(-1) == v_cache_shape[-1], "Incompatible head dim" k = k.squeeze(1) if k.dim() == 4 else k assert k.dim() == 3, f"Incompatible k dim {k.dim()}" v = v.squeeze(1) if v.dim() == 4 else v assert v.dim() == 3, f"Incompatible v dim {v.dim()}" bsz, num_kv_heads, head_dim = k.shape assert kv_lengths.shape[0] == block_tables.shape[0] == bsz, ( f"Got incompatible batch size (number of seqs):\n" f" Past kv sequence lengths bsz {kv_lengths.shape[0]}; " f" block tables bsz {block_tables.shape[0]}, input k batch size {bsz}" ) # Modify if the shape of kv cahce is changed. block_size = k_cache.size(-2) x = head_dim stride_kcsplit_x, stride_kcs, stride_kcd = 0, k_cache.stride(2), k_cache.stride(3) if use_new_kcache_layout: x = k_cache.size(-1) stride_kcsplit_x, stride_kcs, stride_kcd = k_cache.stride()[2:] num_warps = 8 if head_dim > 128 else 4 grid = (bsz, num_kv_heads) _copy_to_kvcache_seqlen1_kernel[grid]( k, v, k_cache, v_cache, block_tables, kv_lengths, k.stride(0), k.stride(1), k.stride(2), v.stride(0), v.stride(1), v.stride(2), k_cache.stride(0), k_cache.stride(1), stride_kcsplit_x, stride_kcs, stride_kcd, v_cache.stride(0), v_cache.stride(1), v_cache.stride(2), v_cache.stride(3), block_tables.stride(0), block_tables.stride(1), block_size, HEAD_DIM=head_dim, KCACHE_X=x, num_warps=num_warps, )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/no_pad_rotary_embedding.py
colossalai/kernel/triton/no_pad_rotary_embedding.py
import warnings from typing import Optional import torch import triton import triton.language as tl """ # Base autotune if needed @triton.autotune( configs=[ triton.Config({'BLOCK_HEAD':4,"BLOCK_TOKENS":4,},num_warps=4), triton.Config({'BLOCK_HEAD':4,"BLOCK_TOKENS":8,},num_warps=8), triton.Config({'BLOCK_HEAD':8,"BLOCK_TOKENS":8,},num_warps=8), triton.Config({'BLOCK_HEAD':4,"BLOCK_TOKENS":4,},num_warps=16), triton.Config({'BLOCK_HEAD':4,"BLOCK_TOKENS":4,},num_warps=32), triton.Config({'BLOCK_HEAD':16,"BLOCK_TOKENS":16,},num_warps=4), triton.Config({'BLOCK_HEAD':8,"BLOCK_TOKENS":16,},num_warps=8), ], key=['HEAD_DIM','q_total_tokens','Q_HEAD_NUM'] ) """ @triton.jit def rotary_embedding_kernel( q, k, cos, sin, q_token_stride, q_head_stride, k_token_stride, k_head_stride, head_dim_stride, cos_token_stride, cos_stride, q_total_tokens, Q_HEAD_NUM: tl.constexpr, KV_GROUP_NUM: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_TOKENS: tl.constexpr, # token range length ): cur_head_idx = tl.program_id(0) cur_token_block_idx = tl.program_id(1) tokens_range = cur_token_block_idx * BLOCK_TOKENS + tl.arange(0, BLOCK_TOKENS) dim_range0 = tl.arange(0, HEAD_DIM // 2) dim_range1 = tl.arange(HEAD_DIM // 2, HEAD_DIM) off_cos_sin = tokens_range[:, None] * cos_token_stride + dim_range0[None, :] * cos_stride loaded_cos = tl.load(cos + off_cos_sin, mask=(tokens_range[:, None] < q_total_tokens), other=0.0) loaded_sin = tl.load(sin + off_cos_sin, mask=(tokens_range[:, None] < q_total_tokens), other=0.0) off_q0 = ( tokens_range[:, None, None] * q_token_stride + cur_head_idx * q_head_stride + dim_range0[None, None, :] * head_dim_stride ) off_q1 = ( tokens_range[:, None, None] * q_token_stride + cur_head_idx * q_head_stride + dim_range1[None, None, :] * head_dim_stride ) loaded_q0 = tl.load( q + off_q0, mask=((cur_head_idx < Q_HEAD_NUM) & (tokens_range[:, None, None] < q_total_tokens)), other=0.0, ) loaded_q1 = tl.load( q + off_q1, mask=((cur_head_idx < Q_HEAD_NUM) & (tokens_range[:, None, None] < q_total_tokens)), other=0.0, ) out_q0 = loaded_q0 * loaded_cos[:, None, :] - loaded_q1 * loaded_sin[:, None, :] out_q1 = loaded_q0 * loaded_sin[:, None, :] + loaded_q1 * loaded_cos[:, None, :] tl.store( q + off_q0, out_q0, mask=((cur_head_idx < Q_HEAD_NUM) & (tokens_range[:, None, None] < q_total_tokens)), ) tl.store( q + off_q1, out_q1, mask=((cur_head_idx < Q_HEAD_NUM) & (tokens_range[:, None, None] < q_total_tokens)), ) handle_kv = cur_head_idx % KV_GROUP_NUM == 0 if handle_kv: k_head_idx = cur_head_idx // KV_GROUP_NUM off_k0 = ( tokens_range[:, None, None] * k_token_stride + k_head_idx * k_head_stride + dim_range0[None, None, :] * head_dim_stride ) off_k1 = ( tokens_range[:, None, None] * k_token_stride + k_head_idx * k_head_stride + dim_range1[None, None, :] * head_dim_stride ) loaded_k0 = tl.load( k + off_k0, mask=(tokens_range[:, None, None] < q_total_tokens), other=0.0, ) loaded_k1 = tl.load( k + off_k1, mask=(tokens_range[:, None, None] < q_total_tokens), other=0.0, ) out_k0 = loaded_k0 * loaded_cos[:, None, :] - loaded_k1 * loaded_sin[:, None, :] out_k1 = loaded_k0 * loaded_sin[:, None, :] + loaded_k1 * loaded_cos[:, None, :] tl.store( k + off_k0, out_k0, mask=(tokens_range[:, None, None] < q_total_tokens), ) tl.store( k + off_k1, out_k1, mask=(tokens_range[:, None, None] < q_total_tokens), ) @triton.jit def fused_rotary_embedding_kernel( q, k, cos, sin, kv_cache, BLOCK_TABLES, context_lengths, q_token_stride, q_head_stride, k_token_stride, k_head_stride, head_dim_stride, cos_token_stride, cos_stride, cacheb_stride, cacheh_stride, cachebs_stride, cached_stride, bts_stride, btb_stride, block_size, q_total_tokens, Q_HEAD_NUM: tl.constexpr, K_HEAD_NUM: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_HEAD: tl.constexpr, BLOCK_TOKENS: tl.constexpr, ): block_head_index = tl.program_id(0) block_token_index = tl.program_id(1) tokens_range = block_token_index * BLOCK_TOKENS + tl.arange(0, BLOCK_TOKENS) head_range = block_head_index * BLOCK_HEAD + tl.arange(0, BLOCK_HEAD) dim_range0 = tl.arange(0, HEAD_DIM // 2) dim_range1 = tl.arange(HEAD_DIM // 2, HEAD_DIM) off_q0 = ( tokens_range[:, None, None] * q_token_stride + head_range[None, :, None] * q_head_stride + dim_range0[None, None, :] * head_dim_stride ) off_q1 = ( tokens_range[:, None, None] * q_token_stride + head_range[None, :, None] * q_head_stride + dim_range1[None, None, :] * head_dim_stride ) off_k0 = ( tokens_range[:, None, None] * k_token_stride + head_range[None, :, None] * k_head_stride + dim_range0[None, None, :] * head_dim_stride ) off_k1 = ( tokens_range[:, None, None] * k_token_stride + head_range[None, :, None] * k_head_stride + dim_range1[None, None, :] * head_dim_stride ) loaded_q0 = tl.load( q + off_q0, mask=((head_range[None, :, None] < Q_HEAD_NUM) & (tokens_range[:, None, None] < q_total_tokens)), other=0.0, ) loaded_q1 = tl.load( q + off_q1, mask=((head_range[None, :, None] < Q_HEAD_NUM) & (tokens_range[:, None, None] < q_total_tokens)), other=0.0, ) loaded_k0 = tl.load( k + off_k0, mask=((head_range[None, :, None] < K_HEAD_NUM) & (tokens_range[:, None, None] < q_total_tokens)), other=0.0, ) loaded_k1 = tl.load( k + off_k1, mask=((head_range[None, :, None] < K_HEAD_NUM) & (tokens_range[:, None, None] < q_total_tokens)), other=0.0, ) off_cos_sin = tokens_range[:, None] * cos_token_stride + dim_range0[None, :] * cos_stride loaded_cos = tl.load(cos + off_cos_sin, mask=(tokens_range[:, None] < q_total_tokens), other=0.0) loaded_sin = tl.load(sin + off_cos_sin, mask=(tokens_range[:, None] < q_total_tokens), other=0.0) out_q0 = loaded_q0 * loaded_cos[:, None, :] - loaded_q1 * loaded_sin[:, None, :] out_q1 = loaded_q0 * loaded_sin[:, None, :] + loaded_q1 * loaded_cos[:, None, :] out_k0 = loaded_k0 * loaded_cos[:, None, :] - loaded_k1 * loaded_sin[:, None, :] out_k1 = loaded_k0 * loaded_sin[:, None, :] + loaded_k1 * loaded_cos[:, None, :] # total_tokens, head_num, head_dim past_kv_seq_len = tl.load(context_lengths + tokens_range, mask=(tokens_range < q_total_tokens)) - 1 last_block_idx = past_kv_seq_len // block_size block_table_ptr = BLOCK_TABLES + tokens_range * bts_stride block_ids = tl.load(block_table_ptr + last_block_idx * btb_stride, mask=(tokens_range < q_total_tokens)) offsets_in_last_block = (past_kv_seq_len % block_size) * cachebs_stride kv_range0 = ( block_ids[:, None, None, None] * cacheb_stride + head_range[None, :, None, None] * cacheh_stride + offsets_in_last_block[:, None, None, None] + dim_range0[None, None, None, :] * cached_stride ) kv_range1 = ( block_ids[:, None, None, None] * cacheb_stride + head_range[None, :, None, None] * cacheh_stride + offsets_in_last_block[:, None, None, None] + dim_range1[None, None, None, :] * cached_stride ) tl.store( kv_cache + kv_range0, out_k0[:, :, None, :], ) tl.store( kv_cache + kv_range1, out_k1[:, :, None, :], ) # concat tl.store( q + off_q0, out_q0, mask=((head_range[None, :, None] < Q_HEAD_NUM) & (tokens_range[:, None, None] < q_total_tokens)), ) tl.store( q + off_q1, out_q1, mask=((head_range[None, :, None] < Q_HEAD_NUM) & (tokens_range[:, None, None] < q_total_tokens)), ) tl.store( k + off_k0, out_k0, mask=((head_range[None, :, None] < K_HEAD_NUM) & (tokens_range[:, None, None] < q_total_tokens)), ) tl.store( k + off_k1, out_k1, mask=((head_range[None, :, None] < K_HEAD_NUM) & (tokens_range[:, None, None] < q_total_tokens)), ) @triton.jit def fused_rotary_embedding_kernel_v2( q, k, cos, sin, kv_cache, BLOCK_TABLES, context_lengths, q_token_stride, q_head_stride, k_token_stride, k_head_stride, head_dim_stride, cos_token_stride, cos_stride, cacheb_stride, cacheh_stride, cachebs_stride, cached_stride, bts_stride, btb_stride, block_size, q_total_tokens, Q_HEAD_NUM: tl.constexpr, HEAD_DIM: tl.constexpr, ): block_head_index = tl.program_id(0) if block_head_index >= Q_HEAD_NUM: return block_token_index = tl.program_id(1) dim_range0 = tl.arange(0, HEAD_DIM // 2) dim_range1 = tl.arange(HEAD_DIM // 2, HEAD_DIM) off_q0 = block_token_index * q_token_stride + block_head_index * q_head_stride + dim_range0 * head_dim_stride off_q1 = block_token_index * q_token_stride + block_head_index * q_head_stride + dim_range1 * head_dim_stride off_k0 = block_token_index * k_token_stride + block_head_index * k_head_stride + dim_range0 * head_dim_stride off_k1 = block_token_index * k_token_stride + block_head_index * k_head_stride + dim_range1 * head_dim_stride loaded_q0 = tl.load( q + off_q0, ) loaded_q1 = tl.load( q + off_q1, ) loaded_k0 = tl.load( k + off_k0, ) loaded_k1 = tl.load( k + off_k1, ) off_cos_sin = block_token_index * cos_token_stride + dim_range0 * cos_stride loaded_cos = tl.load(cos + off_cos_sin, mask=(block_token_index < q_total_tokens), other=0.0) loaded_sin = tl.load(sin + off_cos_sin, mask=(block_token_index < q_total_tokens), other=0.0) out_q0 = loaded_q0 * loaded_cos - loaded_q1 * loaded_sin out_q1 = loaded_q0 * loaded_sin + loaded_q1 * loaded_cos out_k0 = loaded_k0 * loaded_cos - loaded_k1 * loaded_sin out_k1 = loaded_k0 * loaded_sin + loaded_k1 * loaded_cos # total_tokens, head_num, head_dim past_kv_seq_len = tl.load(context_lengths + block_token_index) - 1 last_block_idx = past_kv_seq_len // block_size block_table_ptr = BLOCK_TABLES + block_token_index * bts_stride block_ids = tl.load(block_table_ptr + last_block_idx * btb_stride, mask=(block_token_index < q_total_tokens)) offsets_in_last_block = (past_kv_seq_len % block_size) * cachebs_stride kv_range0 = ( block_ids * cacheb_stride + block_head_index * cacheh_stride + offsets_in_last_block + dim_range0 * cached_stride ) kv_range1 = ( block_ids * cacheb_stride + block_head_index * cacheh_stride + offsets_in_last_block + dim_range1 * cached_stride ) tl.store( kv_cache + kv_range0, out_k0, ) tl.store( kv_cache + kv_range1, out_k1, ) # concat tl.store( q + off_q0, out_q0, ) tl.store( q + off_q1, out_q1, ) @triton.jit def decoding_fused_rotary_embedding_kernel( q, k, v, cos, sin, k_cache, v_cache, BLOCK_TABLES, context_lengths, x, q_token_stride, q_head_stride, k_token_stride, k_head_stride, head_dim_stride, cos_token_stride, cos_stride, kcb_stride, kch_stride, kcsplit_x_stride, kcs_stride, kcd_stride, vcb_stride, vch_stride, vcs_stride, vcd_stride, bts_stride, btb_stride, block_size, KV_GROUP_NUM: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_head_idx = tl.program_id(0) cur_token_idx = tl.program_id(1) dim_range = tl.arange(0, HEAD_DIM) dim_range0 = tl.arange(0, HEAD_DIM // 2) dim_range1 = tl.arange(HEAD_DIM // 2, HEAD_DIM) off_q = cur_token_idx * q_token_stride + cur_head_idx * q_head_stride off_q0 = off_q + dim_range0 * head_dim_stride off_q1 = off_q + dim_range1 * head_dim_stride loaded_q0 = tl.load(q + off_q0) loaded_q1 = tl.load(q + off_q1) off_cos_sin = cur_token_idx * cos_token_stride + dim_range0 * cos_stride loaded_cos = tl.load(cos + off_cos_sin) loaded_sin = tl.load(sin + off_cos_sin) out_q0 = loaded_q0 * loaded_cos - loaded_q1 * loaded_sin out_q1 = loaded_q0 * loaded_sin + loaded_q1 * loaded_cos tl.store(q + off_q0, out_q0) tl.store(q + off_q1, out_q1) handle_kv = cur_head_idx % KV_GROUP_NUM == 0 if handle_kv: cur_k_head_idx = cur_head_idx // KV_GROUP_NUM off_kv = cur_token_idx * k_token_stride + cur_k_head_idx * k_head_stride off_k0 = off_kv + dim_range0 * head_dim_stride off_k1 = off_kv + dim_range1 * head_dim_stride loaded_k0 = tl.load(k + off_k0) loaded_k1 = tl.load(k + off_k1) out_k0 = loaded_k0 * loaded_cos - loaded_k1 * loaded_sin out_k1 = loaded_k0 * loaded_sin + loaded_k1 * loaded_cos # NOTE The precondition here is that it's only for unpadded inputs during decoding stage, # and so that we could directly use the token index as the sequence index past_kv_seq_len = tl.load(context_lengths + cur_token_idx) - 1 last_block_idx = past_kv_seq_len // block_size block_ids = tl.load(BLOCK_TABLES + cur_token_idx * bts_stride + last_block_idx * btb_stride) offsets_in_last_block = past_kv_seq_len % block_size offsets_cache_base = block_ids * kcb_stride + cur_k_head_idx * kch_stride k_range0 = ( offsets_cache_base + offsets_in_last_block * kcs_stride + (dim_range0 // x) * kcsplit_x_stride + (dim_range0 % x) * kcd_stride ) k_range1 = ( offsets_cache_base + offsets_in_last_block * kcs_stride + (dim_range1 // x) * kcsplit_x_stride + (dim_range1 % x) * kcd_stride ) tl.store(k_cache + k_range0, out_k0) tl.store(k_cache + k_range1, out_k1) off_v = off_kv + dim_range * head_dim_stride loaded_v = tl.load(v + off_v) v_range = ( block_ids * vcb_stride + cur_k_head_idx * vch_stride + offsets_in_last_block * vcs_stride + dim_range * vcd_stride ) tl.store(v_cache + v_range, loaded_v) def rotary_embedding( q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, k_cache: Optional[torch.Tensor] = None, block_tables: Optional[torch.Tensor] = None, kv_lengths: Optional[torch.Tensor] = None, ): """ Args: q: query tensor, [total_tokens, head_num, head_dim] k: key tensor, [total_tokens, kv_head_num, head_dim] cos: cosine for rotary embedding, [max_position_len, head_dim] sin: sine for rotary embedding, [max_position_len, head_dim] k_cache (torch.Tensor): Blocked key cache. [num_blocks, num_kv_heads, block_size, head_dim] kv_lengths, Past key/value sequence lengths plus current sequence length for each sequence. [bsz] block_tables: Block tables for each sequence. [bsz, max_blocks_per_sequence] """ q_total_tokens, q_head_num, head_dim = q.shape assert q.size(0) == k.size(0) BLOCK_TOKENS = 4 if head_dim >= 512: num_warps = 16 elif head_dim >= 256: num_warps = 8 else: num_warps = 4 k_head_num = k.size(1) q_token_stride, q_head_stride, head_dim_stride = q.stride() k_token_stride, k_head_stride, _ = k.stride() cos_token_stride, cos_stride = cos.stride() assert q_head_num % k_head_num == 0 kv_group_num = q_head_num // k_head_num if k_cache == None: grid = lambda META: ( q_head_num, triton.cdiv(q_total_tokens, META["BLOCK_TOKENS"]), ) rotary_embedding_kernel[grid]( q, k, cos, sin, q_token_stride, q_head_stride, k_token_stride, k_head_stride, head_dim_stride, cos_token_stride, cos_stride, q_total_tokens, Q_HEAD_NUM=q_head_num, KV_GROUP_NUM=kv_group_num, HEAD_DIM=head_dim, BLOCK_TOKENS=BLOCK_TOKENS, num_warps=num_warps, ) else: warnings.warn("Fused rotary embedding Triton kernel will be deprecated as the new kcache layout is supported") grid = (triton.next_power_of_2(q_head_num), q_total_tokens) fused_rotary_embedding_kernel_v2[grid]( q, k, cos, sin, k_cache, block_tables, kv_lengths, q_token_stride, q_head_stride, k_token_stride, k_head_stride, head_dim_stride, cos_token_stride, cos_stride, k_cache.stride(0), k_cache.stride(1), k_cache.stride(2), k_cache.stride(3), block_tables.stride(0), block_tables.stride(1), k_cache.size(-2), q_total_tokens, Q_HEAD_NUM=q_head_num, HEAD_DIM=head_dim, num_warps=num_warps, ) return def decoding_fused_rotary_embedding( q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, k_cache: Optional[torch.Tensor] = None, v_cache: Optional[torch.Tensor] = None, block_tables: Optional[torch.Tensor] = None, kv_lengths: Optional[torch.Tensor] = None, use_new_kcache_layout: bool = False, ): """ Args: q: query tensor, [total_tokens, head_num, head_dim] k: key tensor, [total_tokens, kv_head_num, head_dim] v: value tensor, [total tokens, kv_head_num, head_dim] cos: cosine for rotary embedding, [max_position_len, head_dim] sin: sine for rotary embedding, [max_position_len, head_dim] k_cache (torch.Tensor): Blocked key cache. [num_blocks, kv_head_num, block_size, head_dim] v_cache (torch.Tensor): Blocked value cache. [num_blocks, kv_head_num, block_size, head_dim] kv_lengths, Past key/value sequence lengths plus current sequence length for each sequence. [bsz] block_tables: Block tables for each sequence. [bsz, max_blocks_per_sequence] """ q_total_tokens, q_head_num, head_dim = q.shape assert q.size(0) == k.size(0) == v.size(0) if head_dim >= 512: num_warps = 16 elif head_dim >= 256: num_warps = 8 else: num_warps = 4 k_head_num = k.size(1) kv_group_num = q_head_num // k_head_num # For KCache and VCache with the same layout x = head_dim kcsplit_x_stride, kcs_stride, kcd_stride = 0, k_cache.stride(2), k_cache.stride(3) # For KCache layout [num_blocks, num_kv_heads, head_dim//x, block_size, x] if use_new_kcache_layout: assert ( k_cache.dim() == 5 and k_cache.shape[1] == v_cache.shape[1] and k_cache.shape[2] * k_cache.shape[4] == v_cache.shape[3] ), f"Invalid KCache shape {k_cache.shape} and VCache shape {v_cache.shape}" x = k_cache.size(-1) kcsplit_x_stride, kcs_stride, kcd_stride = k_cache.stride()[-3:] grid = (q_head_num, q_total_tokens) decoding_fused_rotary_embedding_kernel[grid]( q, k, v, cos, sin, k_cache, v_cache, block_tables, kv_lengths, x, q.stride(0), q.stride(1), k.stride(0), k.stride(1), q.stride(2), cos.stride(0), cos.stride(1), k_cache.stride(0), k_cache.stride(1), kcsplit_x_stride, kcs_stride, kcd_stride, v_cache.stride(0), v_cache.stride(1), v_cache.stride(2), v_cache.stride(3), block_tables.stride(0), block_tables.stride(1), k_cache.size(-2), KV_GROUP_NUM=kv_group_num, HEAD_DIM=head_dim, num_warps=num_warps, ) return
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/rms_layernorm.py
colossalai/kernel/triton/rms_layernorm.py
try: import triton import triton.language as tl HAS_TRITON = True except ImportError: HAS_TRITON = False print("please install triton from https://github.com/openai/triton") if HAS_TRITON: # CREDITS: These functions are adapted from the Triton tutorial # https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html @triton.jit def _rmsnorm_kernel( X, # pointer to the input Y, # pointer to the output W, # pointer to the weights stride, # how much to increase the pointer when moving by 1 row N, # number of columns in X eps, # epsilon to avoid division by zero BLOCK_SIZE: tl.constexpr, ): # This triton kernel implements Root Mean Square Layer Norm (RMSNorm). # Map the program id to the row of X and Y it should compute. row = tl.program_id(0) Y += row * stride X += row * stride # Compute variance _var = tl.zeros([BLOCK_SIZE], dtype=tl.float32) for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) x = tl.where(cols < N, x, 0.0) _var += x * x var = tl.sum(_var, axis=0) / N rstd = 1 / tl.sqrt(var + eps) # Normalize and apply linear transformation for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) mask = cols < N w = tl.load(W + cols, mask=mask) x = tl.load(X + cols, mask=mask, other=0.0).to(tl.float32) x_hat = x * rstd y = x_hat * w # Write output tl.store(Y + cols, y.to(tl.float16), mask=mask) @triton.jit def _rmsnorm_with_residual_kernel( X, # pointer to the input Y, # pointer to the output R, # pointer to the residual W, # pointer to the weights stride, # how much to increase the pointer when moving by 1 row N, # number of columns in X eps, # epsilon to avoid division by zero BLOCK_SIZE: tl.constexpr, ): # This triton kernel implements Root Mean Square Layer Norm (RMSNorm). # Map the program id to the row of X and Y it should compute. row = tl.program_id(0) Y += row * stride X += row * stride R += row * stride # Compute variance _var = tl.zeros([BLOCK_SIZE], dtype=tl.float32) for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) x = tl.where(cols < N, x, 0.0) r = tl.load(R + cols, mask=cols < N, other=0.0).to(tl.float32) r = tl.where(cols < N, r, 0.0) x = x + r _var += x * x mask = cols < N tl.store(X + cols, x.to(tl.float16), mask=mask) var = tl.sum(_var, axis=0) / N rstd = 1 / tl.sqrt(var + eps) # Normalize and apply linear transformation for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) mask = cols < N w = tl.load(W + cols, mask=mask) x = tl.load(X + cols, mask=mask, other=0.0).to(tl.float32) x_hat = x * rstd y = x_hat * w # Write output tl.store(Y + cols, y.to(tl.float16), mask=mask) def rms_layernorm(x, weight, eps, norm_output=None, residual=None): # allocate output y = ( x * 0 if norm_output is None else norm_output ) # to make the operation non-functional, store y as the intermediate activation M, N = x.shape # Less than 64KB per feature: enqueue fused kernel MAX_FUSED_SIZE = 65536 // x.element_size() BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) if N > MAX_FUSED_SIZE: raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") # heuristics for number of warps num_warps = min(max(triton.next_power_of_2(N) // 256, 8), 32) # enqueue kernel if residual is None: _rmsnorm_kernel[(M,)](x, y, weight, x.stride(0), N, eps, BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps) else: _rmsnorm_with_residual_kernel[(M,)]( x, y, residual, weight, x.stride(0), N, eps, BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps ) return y, x
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/context_attn_unpad.py
colossalai/kernel/triton/context_attn_unpad.py
# Applying the FlashAttention V2 as described in: # "FlashAttention-2: Faster Attention with Better Parallelism and Work Partitioning" # by Tri Dao, 2023 # https://github.com/Dao-AILab/flash-attention # # Inspired and modified from Triton Tutorial - Fused Attention # https://triton-lang.org/main/getting-started/tutorials/06-fused-attention.html import torch import triton import triton.language as tl # Triton 2.1.0 @triton.jit def _fwd_context_paged_attention_kernel( Q, K, V, O, KCache, VCache, BLOCK_TABLES, # [num_seqs, max_blocks_per_sequence] batch_size, stride_qt, stride_qh, stride_qd, stride_kt, stride_kh, stride_kd, stride_vt, stride_vh, stride_vd, stride_ot, stride_oh, stride_od, stride_cacheb, stride_cacheh, stride_cachebs, stride_cached, stride_bts, stride_btb, context_lengths, sm_scale, KV_GROUPS: tl.constexpr, BLOCK_SIZE: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, ): cur_seq_idx = tl.program_id(0) if cur_seq_idx >= batch_size: return cur_head_idx = tl.program_id(1) block_start_m = tl.program_id(2) # Br, max_input_len // Block_M cur_kv_head_idx = cur_head_idx // KV_GROUPS # NOTE It requires BLOCK_M, BLOCK_N, and BLOCK_SIZE to be the same tl.static_assert(BLOCK_M == BLOCK_N) tl.static_assert(BLOCK_N == BLOCK_SIZE) # get the current sequence length from provided context lengths tensor cur_seq_len = tl.load(context_lengths + cur_seq_idx) # NOTE when talking to fused QKV and a nopadding context attention, # we assume that the input Q/K/V is contiguous, and thus here `prev_seq_len_sum` # could be considered as the start index of the current sequence. # FIXME might want to explore better way to get the summation of prev seq lengths. # `tl.sum(tensor[:end])` is invalid as tensor slice is not supported in triton. prev_seq_len_sum = 0 for i in range(0, cur_seq_idx): prev_seq_len_sum += tl.load(context_lengths + i) offset_q = prev_seq_len_sum * stride_qt + cur_head_idx * stride_qh offset_kv = prev_seq_len_sum * stride_kt + cur_kv_head_idx * stride_kh Q_block_ptr = tl.make_block_ptr( base=Q + offset_q, shape=(cur_seq_len, HEAD_DIM), strides=(stride_qt, stride_qd), offsets=(block_start_m * BLOCK_M, 0), block_shape=(BLOCK_M, HEAD_DIM), order=(1, 0), ) K_block_ptr = tl.make_block_ptr( base=K + offset_kv, shape=(HEAD_DIM, cur_seq_len), strides=(stride_kd, stride_kt), offsets=(0, 0), block_shape=(HEAD_DIM, BLOCK_N), order=(0, 1), ) V_block_ptr = tl.make_block_ptr( base=V + offset_kv, shape=(cur_seq_len, HEAD_DIM), strides=(stride_vt, stride_vd), offsets=(0, 0), block_shape=(BLOCK_N, HEAD_DIM), order=(1, 0), ) O_block_ptr = tl.make_block_ptr( base=O + offset_q, shape=(cur_seq_len, HEAD_DIM), strides=(stride_ot, stride_od), offsets=(block_start_m * BLOCK_M, 0), block_shape=(BLOCK_M, HEAD_DIM), order=(1, 0), ) # block table for the current sequence block_table_ptr = BLOCK_TABLES + cur_seq_idx * stride_bts # block indexes on block table (i.e. 0, 1, 2, ..., max_blocks_per_seq) # Consider `block_start_m` as the logical block idx in the current block table, # as we have BLOCK_M the same size as the block size. cur_block_table_idx = block_start_m cur_block_id = tl.load(block_table_ptr + cur_block_table_idx * stride_btb) offset_kvcache = cur_block_id * stride_cacheb + cur_kv_head_idx * stride_cacheh offsets_m = block_start_m * BLOCK_M + tl.arange(0, BLOCK_M) offsets_n = tl.arange(0, BLOCK_N) m_i = tl.full([BLOCK_M], float("-inf"), dtype=tl.float32) l_i = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32) if block_start_m * BLOCK_M >= cur_seq_len: return Q_i = tl.load(Q_block_ptr, boundary_check=(1, 0)) for block_start_n in range(0, (block_start_m + 1) * BLOCK_M, BLOCK_N): block_start_n = tl.multiple_of(block_start_n, BLOCK_N) k = tl.load(K_block_ptr, boundary_check=(0, 1)) S_ij = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) S_ij += tl.dot(Q_i, k) S_ij *= sm_scale S_ij += tl.where(offsets_m[:, None] >= (block_start_n + offsets_n[None, :]), 0, float("-inf")) m_ij = tl.max(S_ij, 1) # rowmax(Sij) m_ij = tl.maximum(m_i, m_ij) # m_ij S_ij -= m_ij[:, None] p_ij_hat = tl.exp(S_ij) scale = tl.exp(m_i - m_ij) l_ij = scale * l_i + tl.sum(p_ij_hat, 1) acc = acc * scale[:, None] v = tl.load(V_block_ptr, boundary_check=(1, 0)) p_ij_hat = p_ij_hat.to(v.type.element_ty) acc += tl.dot(p_ij_hat, v) l_i = l_ij m_i = m_ij K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N)) V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0)) acc = acc / l_i[:, None] tl.store(O_block_ptr, acc.to(O.type.element_ty), boundary_check=(1, 0)) if cur_head_idx % KV_GROUPS == 0: # Copy k to corresponding cache block offsets_dmodel = tl.arange(0, HEAD_DIM) offsets_kt = block_start_m * BLOCK_M + tl.arange(0, BLOCK_M) offsets_k = K + offset_kv + offsets_dmodel[None, :] * stride_kd + offsets_kt[:, None] * stride_kt k = tl.load(offsets_k, mask=offsets_kt[:, None] < cur_seq_len, other=0.0) offsets_kcachebs = tl.arange(0, BLOCK_SIZE) offsets_kcache = ( KCache + offset_kvcache + offsets_dmodel[None, :] * stride_cached + offsets_kcachebs[:, None] * stride_cachebs ) tl.store(offsets_kcache, k, mask=offsets_kcachebs[:, None] < cur_seq_len - block_start_m * BLOCK_SIZE) # Copy v to corresponding cache block offsets_vd = offsets_dmodel offsets_vt = block_start_m * BLOCK_N + tl.arange(0, BLOCK_N) offsets_v = V + offset_kv + offsets_vt[None, :] * stride_vt + offsets_vd[:, None] * stride_vd v = tl.load(offsets_v, mask=offsets_vt[None, :] < cur_seq_len, other=0.0) offsets_vcachebs = offsets_kcachebs # same block size range, just to notify here offsets_vcache = ( VCache + offset_kvcache + offsets_vcachebs[None, :] * stride_cachebs + offsets_dmodel[:, None] * stride_cached ) tl.store(offsets_vcache, v, mask=offsets_vcachebs[None, :] < cur_seq_len - block_start_m * BLOCK_SIZE) return # Triton 2.1.0 # TODO(yuanheng-zhao): This is a temporary dispatch to use the new layout for kcache # merge `_fwd_context_paged_attention_kernel_v2` with `_fwd_context_paged_attention_kernel` later # as the kcache layout has been supported in the whole triton flow. @triton.jit def _fwd_context_paged_attention_kernel_v2( Q, K, V, O, KCache, # [num_blocks, num_kv_heads, head_dim // x, block_size, x] VCache, # [num_blocks, num_kv_heads, block_size, head_dim] BLOCK_TABLES, # [num_seqs, max_blocks_per_sequence] batch_size, stride_qt, stride_qh, stride_qd, stride_kt, stride_kh, stride_kd, stride_vt, stride_vh, stride_vd, stride_ot, stride_oh, stride_od, stride_cacheb, # v cache stride(0) - num_blocks stride_cacheh, # v cache stride(1) - num_kv_heads stride_cachebs, # v cache stride(2) - block_size stride_cached, # v cache stride(3) - head_dim stride_bts, stride_btb, context_lengths, sm_scale, KV_GROUPS: tl.constexpr, BLOCK_SIZE: tl.constexpr, HEAD_DIM: tl.constexpr, KCACHE_X: tl.constexpr, # k stride on the second last dimension BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, ): cur_seq_idx = tl.program_id(0) if cur_seq_idx >= batch_size: return cur_head_idx = tl.program_id(1) block_start_m = tl.program_id(2) # Br, max_input_len // Block_M cur_kv_head_idx = cur_head_idx // KV_GROUPS # NOTE It requires BLOCK_M, BLOCK_N, and BLOCK_SIZE to be the same tl.static_assert(BLOCK_M == BLOCK_N) tl.static_assert(BLOCK_N == BLOCK_SIZE) # get the current sequence length from provided context lengths tensor cur_seq_len = tl.load(context_lengths + cur_seq_idx) # NOTE when talking to fused QKV and a nopadding context attention, # we assume that the input Q/K/V is contiguous, and thus here `prev_seq_len_sum` # could be considered as the start index of the current sequence. # FIXME might want to explore better way to get the summation of prev seq lengths. # `tl.sum(tensor[:end])` is invalid as tensor slice is not supported in triton. prev_seq_len_sum = 0 for i in range(0, cur_seq_idx): prev_seq_len_sum += tl.load(context_lengths + i) offset_q = prev_seq_len_sum * stride_qt + cur_head_idx * stride_qh offset_kv = prev_seq_len_sum * stride_kt + cur_kv_head_idx * stride_kh Q_block_ptr = tl.make_block_ptr( base=Q + offset_q, shape=(cur_seq_len, HEAD_DIM), strides=(stride_qt, stride_qd), offsets=(block_start_m * BLOCK_M, 0), block_shape=(BLOCK_M, HEAD_DIM), order=(1, 0), ) K_block_ptr = tl.make_block_ptr( base=K + offset_kv, shape=(HEAD_DIM, cur_seq_len), strides=(stride_kd, stride_kt), offsets=(0, 0), block_shape=(HEAD_DIM, BLOCK_N), order=(0, 1), ) V_block_ptr = tl.make_block_ptr( base=V + offset_kv, shape=(cur_seq_len, HEAD_DIM), strides=(stride_vt, stride_vd), offsets=(0, 0), block_shape=(BLOCK_N, HEAD_DIM), order=(1, 0), ) O_block_ptr = tl.make_block_ptr( base=O + offset_q, shape=(cur_seq_len, HEAD_DIM), strides=(stride_ot, stride_od), offsets=(block_start_m * BLOCK_M, 0), block_shape=(BLOCK_M, HEAD_DIM), order=(1, 0), ) # block table for the current sequence block_table_ptr = BLOCK_TABLES + cur_seq_idx * stride_bts # block indexes on block table (i.e. 0, 1, 2, ..., max_blocks_per_seq) # Consider `block_start_m` as the logical block idx in the current block table, # as we have BLOCK_M the same size as the block size. cur_block_table_idx = block_start_m cur_block_id = tl.load(block_table_ptr + cur_block_table_idx * stride_btb) offset_kvcache = cur_block_id * stride_cacheb + cur_kv_head_idx * stride_cacheh offsets_m = block_start_m * BLOCK_M + tl.arange(0, BLOCK_M) offsets_n = tl.arange(0, BLOCK_N) m_i = tl.full([BLOCK_M], float("-inf"), dtype=tl.float32) l_i = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32) if block_start_m * BLOCK_M >= cur_seq_len: return Q_i = tl.load(Q_block_ptr, boundary_check=(1, 0)) for block_start_n in range(0, (block_start_m + 1) * BLOCK_M, BLOCK_N): block_start_n = tl.multiple_of(block_start_n, BLOCK_N) k = tl.load(K_block_ptr, boundary_check=(0, 1)) S_ij = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) S_ij += tl.dot(Q_i, k) S_ij *= sm_scale S_ij += tl.where(offsets_m[:, None] >= (block_start_n + offsets_n[None, :]), 0, float("-inf")) m_ij = tl.max(S_ij, 1) # rowmax(Sij) m_ij = tl.maximum(m_i, m_ij) # m_ij S_ij -= m_ij[:, None] p_ij_hat = tl.exp(S_ij) scale = tl.exp(m_i - m_ij) l_ij = scale * l_i + tl.sum(p_ij_hat, 1) acc = acc * scale[:, None] v = tl.load(V_block_ptr, boundary_check=(1, 0)) p_ij_hat = p_ij_hat.to(v.type.element_ty) acc += tl.dot(p_ij_hat, v) l_i = l_ij m_i = m_ij K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N)) V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0)) acc = acc / l_i[:, None] tl.store(O_block_ptr, acc.to(O.type.element_ty), boundary_check=(1, 0)) if cur_head_idx % KV_GROUPS == 0: # Copy k to corresponding cache block block_range = tl.arange(0, BLOCK_SIZE) X_range = tl.arange(0, KCACHE_X) # unroll the loop aggressively for split_x in tl.static_range(HEAD_DIM // KCACHE_X): offsets_dmodel_x_partition = tl.arange(split_x * KCACHE_X, (split_x + 1) * KCACHE_X) offsets_k = K + offset_kv + offsets_dmodel_x_partition[None, :] * stride_kd + offsets_m[:, None] * stride_kt k = tl.load(offsets_k, mask=offsets_m[:, None] < cur_seq_len, other=0.0) # HACK: KCache must be contiguous in order to apply the following offsets calculation offsets_kcache = ( KCache + offset_kvcache + split_x * BLOCK_SIZE * KCACHE_X + block_range[:, None] * KCACHE_X + X_range[None, :] ) tl.store(offsets_kcache, k, mask=block_range[:, None] < cur_seq_len - block_start_m * BLOCK_SIZE) # Copy v to corresponding cache block offsets_vd = tl.arange(0, HEAD_DIM) # offsets_dmodel offsets_vt = block_start_m * BLOCK_N + offsets_n offsets_v = V + offset_kv + offsets_vt[None, :] * stride_vt + offsets_vd[:, None] * stride_vd v = tl.load(offsets_v, mask=offsets_vt[None, :] < cur_seq_len, other=0.0) offsets_vcache = ( VCache + offset_kvcache + block_range[None, :] * stride_cachebs + offsets_vd[:, None] * stride_cached ) tl.store(offsets_vcache, v, mask=block_range[None, :] < cur_seq_len - block_start_m * BLOCK_SIZE) return # Triton 2.1.0 @triton.jit def _alibi_fwd_context_paged_attention_kernel( Q, K, V, O, KCache, VCache, BLOCK_TABLES, # [num_seqs, max_blocks_per_sequence] batch_size, alibi_slopes, stride_qt, stride_qh, stride_qd, stride_kt, stride_kh, stride_kd, stride_vt, stride_vh, stride_vd, stride_ot, stride_oh, stride_od, stride_cacheb, stride_cacheh, stride_cachebs, stride_cached, stride_bts, stride_btb, context_lengths, sm_scale, KV_GROUPS: tl.constexpr, BLOCK_SIZE: tl.constexpr, HEAD_DIM: tl.constexpr, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, ): cur_seq_idx = tl.program_id(0) if cur_seq_idx >= batch_size: return cur_head_idx = tl.program_id(1) block_start_m = tl.program_id(2) # Br, max_input_len // Block_M cur_kv_head_idx = cur_head_idx // KV_GROUPS global_block_start_offest = block_start_m * BLOCK_M # NOTE It requires BLOCK_M, BLOCK_N, and BLOCK_SIZE to be the same tl.static_assert(BLOCK_M == BLOCK_N) tl.static_assert(BLOCK_N == BLOCK_SIZE) # get the current sequence length from provided context lengths tensor cur_seq_len = tl.load(context_lengths + cur_seq_idx) # NOTE when talking to fused QKV and a nopadding context attention, # we assume that the input Q/K/V is contiguous, and thus here `prev_seq_len_sum` # could be considered as the start index of the current sequence. # FIXME might want to explore better way to get the summation of prev seq lengths. # `tl.sum(tensor[:end])` is invalid as tensor slice is not supported in triton. prev_seq_len_sum = 0 for i in range(0, cur_seq_idx): prev_seq_len_sum += tl.load(context_lengths + i) offset_q = prev_seq_len_sum * stride_qt + cur_head_idx * stride_qh offset_kv = prev_seq_len_sum * stride_kt + cur_kv_head_idx * stride_kh Q_block_ptr = tl.make_block_ptr( base=Q + offset_q, shape=(cur_seq_len, HEAD_DIM), strides=(stride_qt, stride_qd), offsets=(global_block_start_offest, 0), block_shape=(BLOCK_M, HEAD_DIM), order=(1, 0), ) K_block_ptr = tl.make_block_ptr( base=K + offset_kv, shape=(HEAD_DIM, cur_seq_len), strides=(stride_kd, stride_kt), offsets=(0, 0), block_shape=(HEAD_DIM, BLOCK_N), order=(0, 1), ) V_block_ptr = tl.make_block_ptr( base=V + offset_kv, shape=(cur_seq_len, HEAD_DIM), strides=(stride_vt, stride_vd), offsets=(0, 0), block_shape=(BLOCK_N, HEAD_DIM), order=(1, 0), ) O_block_ptr = tl.make_block_ptr( base=O + offset_q, shape=(cur_seq_len, HEAD_DIM), strides=(stride_ot, stride_od), offsets=(global_block_start_offest, 0), block_shape=(BLOCK_M, HEAD_DIM), order=(1, 0), ) # block table for the current sequence block_table_ptr = BLOCK_TABLES + cur_seq_idx * stride_bts # block indexes on block table (i.e. 0, 1, 2, ..., max_blocks_per_seq) # Consider `block_start_m` as the logical block idx in the current block table, # as we have BLOCK_M the same size as the block size. cur_block_table_idx = block_start_m cur_block_id = tl.load(block_table_ptr + cur_block_table_idx * stride_btb) offset_kvcache = cur_block_id * stride_cacheb + cur_kv_head_idx * stride_cacheh offsets_m = global_block_start_offest + tl.arange(0, BLOCK_M) offsets_n = tl.arange(0, BLOCK_N) m_i = tl.full([BLOCK_M], float("-inf"), dtype=tl.float32) l_i = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, HEAD_DIM], dtype=tl.float32) # load alibi_slope alibi_slope = tl.load(alibi_slopes + cur_head_idx) m_alibi_offset = tl.arange(0, BLOCK_M)[:, None] + global_block_start_offest n_alibi_offset = tl.arange(0, BLOCK_N)[None, :] if global_block_start_offest >= cur_seq_len: return Q_i = tl.load(Q_block_ptr, boundary_check=(1, 0)) for block_start_n in range(0, (block_start_m + 1) * BLOCK_M, BLOCK_N): block_start_n = tl.multiple_of(block_start_n, BLOCK_N) k = tl.load(K_block_ptr, boundary_check=(0, 1)) S_ij = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) S_ij += tl.dot(Q_i, k) S_ij *= sm_scale S_ij += tl.where(offsets_m[:, None] >= (block_start_n + offsets_n[None, :]), 0, float("-inf")) alibi = (n_alibi_offset + block_start_n - m_alibi_offset) * alibi_slope alibi = tl.where((alibi <= 0) & (m_alibi_offset < cur_seq_len), alibi, float("-inf")) S_ij += alibi m_ij = tl.max(S_ij, 1) # rowmax(Sij) m_ij = tl.maximum(m_i, m_ij) # m_ij S_ij -= m_ij[:, None] p_ij_hat = tl.exp(S_ij) scale = tl.exp(m_i - m_ij) l_ij = scale * l_i + tl.sum(p_ij_hat, 1) acc = acc * scale[:, None] v = tl.load(V_block_ptr, boundary_check=(1, 0)) p_ij_hat = p_ij_hat.to(v.type.element_ty) acc += tl.dot(p_ij_hat, v) l_i = l_ij m_i = m_ij K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N)) V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0)) acc = acc / l_i[:, None] tl.store(O_block_ptr, acc.to(O.type.element_ty), boundary_check=(1, 0)) if cur_head_idx % KV_GROUPS == 0: # Copy k to corresponding cache block offsets_dmodel = tl.arange(0, HEAD_DIM) offsets_kt = global_block_start_offest + tl.arange(0, BLOCK_M) offsets_k = K + offset_kv + offsets_dmodel[None, :] * stride_kd + offsets_kt[:, None] * stride_kt k = tl.load(offsets_k, mask=offsets_kt[:, None] < cur_seq_len, other=0.0) offsets_kcachebs = tl.arange(0, BLOCK_SIZE) offsets_kcache = ( KCache + offset_kvcache + offsets_dmodel[None, :] * stride_cached + offsets_kcachebs[:, None] * stride_cachebs ) tl.store(offsets_kcache, k, mask=offsets_kcachebs[:, None] < cur_seq_len - block_start_m * BLOCK_SIZE) # Copy v to corresponding cache block offsets_vd = offsets_dmodel offsets_vt = block_start_m * BLOCK_N + tl.arange(0, BLOCK_N) offsets_v = V + offset_kv + offsets_vt[None, :] * stride_vt + offsets_vd[:, None] * stride_vd v = tl.load(offsets_v, mask=offsets_vt[None, :] < cur_seq_len, other=0.0) offsets_vcachebs = offsets_kcachebs # same block size range, just to notify here offsets_vcache = ( VCache + offset_kvcache + offsets_vcachebs[None, :] * stride_cachebs + offsets_dmodel[:, None] * stride_cached ) tl.store(offsets_vcache, v, mask=offsets_vcachebs[None, :] < cur_seq_len - block_start_m * BLOCK_SIZE) return def context_attention_unpadded( q: torch.Tensor, # [num_tokens, num_heads, head_dim] k: torch.Tensor, # [num_tokens, num_kv_heads, head_dim] v: torch.Tensor, # [num_tokens, num_kv_heads, head_dim] k_cache: torch.Tensor, # [num_blocks, num_kv_heads, block_size, head_dim] v_cache: torch.Tensor, # [num_blocks, num_kv_heads, block_size, head_dim] context_lengths: torch.Tensor, # [num_seqs] block_tables: torch.Tensor, # [num_seqs, max_blocks_per_sequence], block_size: int, output: torch.Tensor = None, # [num_tokens, num_heads, head_dim] alibi_slopes: torch.Tensor = None, # [num_heads] max_seq_len: int = None, sm_scale: int = None, # NOTE(yuanheng-zhao): the following flag is used to determine whether to use the new layout for kcache # [num_blocks, num_kv_heads, head_dim // x, block_size, x] - must be contiguous use_new_kcache_layout: bool = False, ): Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1] assert Lq == Lk == Lv assert Lk in {32, 64, 128, 256} assert q.shape[0] == k.shape[0] == v.shape[0] k_cache_shape = k_cache.shape v_cache_shape = v_cache.shape if use_new_kcache_layout: assert ( len(k_cache_shape) == 5 and k_cache_shape[1] == v_cache_shape[1] and k_cache_shape[2] * k_cache_shape[4] == v_cache_shape[3] ), f"Invalid KCache shape {k_cache_shape} and VCache shape {v_cache_shape}" else: assert k_cache_shape == v_cache_shape, f"Invalid KCache shape {k_cache_shape} and VCache shape {v_cache_shape}" assert context_lengths.shape[0] == block_tables.shape[0] num_tokens, num_heads, head_dim = q.shape num_kv_heads = k.shape[-2] assert num_kv_heads > 0 and num_heads % num_kv_heads == 0 num_kv_group = num_heads // num_kv_heads num_seqs, max_blocks_per_seq = block_tables.shape max_seq_len = context_lengths.max().item() if max_seq_len is None else max_seq_len sm_scale = 1.0 / (Lq**0.5) if sm_scale is None else sm_scale output = ( torch.empty((num_tokens, num_heads * head_dim), dtype=q.dtype, device=q.device) if output is None else output ) # NOTE For now, BLOCK_M and BLOCK_N are supposed to be equivalent with # the size of physical cache block (i.e. `block_size`) assert block_size in {16, 32, 64, 128} BLOCK_M = BLOCK_N = block_size # NOTE use `triton.next_power_of_2` here to utilize the cache mechanism of triton # To optimize, revise batching/scheduling to batch 2^n sequences in a batch (preferred) grid = (triton.next_power_of_2(num_seqs), num_heads, triton.cdiv(max_seq_len, BLOCK_M)) if use_new_kcache_layout: # TODO(yuanheng-zhao): Since the alibi kernel is pretty similar to the original one, # the code (alibi kernel) will be refactored later to avoid code duplication, when # the whole triton flow with new k cache layout has been supported and tested. assert ( alibi_slopes is None ), "Alibi Slopes will be supported with new kcache layout later when the whole triton flow is ready" x = k_cache_shape[4] # Intuition: 16 // dtype_size _fwd_context_paged_attention_kernel_v2[grid]( q, k, v, output, k_cache, v_cache, block_tables, num_seqs, q.stride(0), q.stride(1), q.stride(2), k.stride(0), k.stride(1), k.stride(2), v.stride(0), v.stride(1), v.stride(2), output.stride(0), head_dim, 1, v_cache.stride(0), v_cache.stride(1), v_cache.stride(2), v_cache.stride(3), block_tables.stride(0), block_tables.stride(1), context_lengths, sm_scale, KV_GROUPS=num_kv_group, BLOCK_SIZE=block_size, HEAD_DIM=Lk, KCACHE_X=x, BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, ) return output if alibi_slopes is not None: _alibi_fwd_context_paged_attention_kernel[grid]( q, k, v, output, k_cache, v_cache, block_tables, num_seqs, alibi_slopes, q.stride(0), q.stride(1), q.stride(2), k.stride(0), k.stride(1), k.stride(2), v.stride(0), v.stride(1), v.stride(2), output.stride(0), head_dim, 1, k_cache.stride(0), k_cache.stride(1), k_cache.stride(2), k_cache.stride(3), block_tables.stride(0), block_tables.stride(1), context_lengths, sm_scale, num_kv_group, block_size, HEAD_DIM=Lk, BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, ) else: _fwd_context_paged_attention_kernel[grid]( q, k, v, output, k_cache, v_cache, block_tables, num_seqs, q.stride(0), q.stride(1), q.stride(2), k.stride(0), k.stride(1), k.stride(2), v.stride(0), v.stride(1), v.stride(2), output.stride(0), head_dim, 1, k_cache.stride(0), k_cache.stride(1), k_cache.stride(2), k_cache.stride(3), block_tables.stride(0), block_tables.stride(1), context_lengths, sm_scale, num_kv_group, block_size, HEAD_DIM=Lk, BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, ) return output
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/__init__.py
colossalai/kernel/triton/__init__.py
try: import triton HAS_TRITON = True except ImportError: HAS_TRITON = False print("Triton is not installed. Please install Triton to use Triton kernels.") # There may exist import error even if we have triton installed. if HAS_TRITON: from .context_attn_unpad import context_attention_unpadded from .flash_decoding import flash_decoding_attention from .fused_rotary_embedding import fused_rotary_embedding from .kvcache_copy import copy_k_to_blocked_cache, copy_kv_to_blocked_cache from .no_pad_rotary_embedding import decoding_fused_rotary_embedding, rotary_embedding from .rms_layernorm import rms_layernorm from .rotary_cache_copy import get_xine_cache from .softmax import softmax __all__ = [ "context_attention_unpadded", "flash_decoding_attention", "copy_k_to_blocked_cache", "copy_kv_to_blocked_cache", "softmax", "rms_layernorm", "rotary_embedding", "fused_rotary_embedding", "get_xine_cache", "decoding_fused_rotary_embedding", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/kernel/triton/flash_decoding.py
colossalai/kernel/triton/flash_decoding.py
# Applying Flash-Decoding as descibed in # https://pytorch.org/blog/flash-decoding/ # by Tri Dao, 2023 import torch import triton import triton.language as tl # Triton 2.1.0 @triton.jit def _flash_decoding_fwd_kernel( Q, # [batch_size * q_len, head_num, head_dim] KCache, # [num_blocks, num_kv_heads, block_size, head_dim] VCache, # [num_blocks, num_kv_heads, block_size, head_dim], # or [num_blocks, num_kv_heads, head_dim//x, block_size, x], depends on strides provided block_tables, # [batch_size, max_blocks_per_sequence] mid_o, # [batch_size * q_len, head_num, kv_split_num, head_dim] mid_o_lse, # [batch_size * q_len, head_num, kv_split_num] kv_seq_len, # [batch_size] q_len, batch_size, kv_group_num, x, sm_scale, stride_qt, stride_qh, stride_qd, stride_kcb, stride_kch, stride_kcsplit_x, stride_kcs, stride_kcd, stride_vcb, stride_vch, stride_vcs, stride_vcd, stride_bts, stride_btb, stride_mid_ot, stride_mid_oh, stride_mid_ob, stride_mid_od, stride_mid_o_lset, stride_mid_o_lseh, stride_mid_o_lseb, BLOCK_KV: tl.constexpr, BLOCK_SIZE: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // q_len if cur_seq_idx >= batch_size: return cur_token_off = (cur_token_idx % q_len) - q_len + 1 cur_head_idx = tl.program_id(1) block_start_kv = tl.program_id(2) # for splitting k/v # NOTE It requires BLOCK_KV and BLOCK_SIZE to be the same # TODO might want to replace with BLOCK_KV % BLOCK_SIZE == 0 (optimize BLOCK_KV as multiple of BLOCK_SIZE) # and then support calculating multiple kv cache blocks on an instance tl.static_assert(BLOCK_KV == BLOCK_SIZE) # get the current (kv) sequence length # cur_token_off is used as a "mask" here for spec-dec during verification process cur_kv_seq_len = tl.load(kv_seq_len + cur_seq_idx) + cur_token_off if block_start_kv * BLOCK_KV >= cur_kv_seq_len: return offsets_dmodel = tl.arange(0, HEAD_DIM) offsets_block = tl.arange(0, BLOCK_SIZE) # block table for the current sequence block_table_ptr = block_tables + cur_seq_idx * stride_bts # cur_bt_start_idx = block_start_kv * (BLOCK_KV // BLOCK_SIZE) # cur_block_id = tl.load(block_table_ptr + cur_bt_start_idx * stride_btb) cur_block_id = tl.load(block_table_ptr + block_start_kv * stride_btb) cur_occupied_size = tl.where( (block_start_kv + 1) * BLOCK_SIZE <= cur_kv_seq_len, BLOCK_SIZE, cur_kv_seq_len - block_start_kv * BLOCK_SIZE ) tl.device_assert(cur_occupied_size >= 0) offsets_q = cur_token_idx * stride_qt + cur_head_idx * stride_qh + offsets_dmodel * stride_qd q = tl.load(Q + offsets_q) cur_kv_head_idx = cur_head_idx // kv_group_num offset_kvcache = cur_block_id * stride_kcb + cur_kv_head_idx * stride_kch offsets_k = ( offset_kvcache + (offsets_dmodel[None, :] // x) * stride_kcsplit_x + (offsets_dmodel[None, :] % x) * stride_kcd + offsets_block[:, None] * stride_kcs ) k_cur_block = tl.load(KCache + offsets_k) V_block_ptr = tl.make_block_ptr( base=VCache + offset_kvcache, shape=(cur_occupied_size, HEAD_DIM), strides=(stride_vcs, stride_vcd), offsets=(0, 0), block_shape=(BLOCK_SIZE, HEAD_DIM), order=(0, 1), ) v_cur_block = tl.load(V_block_ptr) acc = tl.zeros([HEAD_DIM], dtype=tl.float32) # use block size of the paged/blocked kv cache S_ij = tl.zeros([BLOCK_SIZE], dtype=tl.float32) # NOTE a trick to come across triton's requirement that values in both first and second input shapes must be >= 16, # Multiplying two tensors with shapes [1, d] * [d, block_size] will fail. # Refer to https://github.com/openai/triton/discussions/895 S_ij += tl.sum(q[None, :] * k_cur_block, 1) S_ij *= sm_scale S_ij += tl.where(block_start_kv * BLOCK_KV + offsets_block < cur_kv_seq_len, 0, float("-inf")) m = tl.max(S_ij, 0) S_ij -= m p_ij_hat = tl.exp(S_ij) l_i = tl.sum(p_ij_hat, 0) p_ij_hat = p_ij_hat.to(v_cur_block.type.element_ty) acc += tl.sum(v_cur_block * p_ij_hat[:, None], 0) acc = acc / l_i offsets_mid_o = ( cur_token_idx * stride_mid_ot + cur_head_idx * stride_mid_oh + block_start_kv * stride_mid_ob + offsets_dmodel * stride_mid_od ) tl.store(mid_o + offsets_mid_o, acc) offsets_mid_o_lse = ( cur_token_idx * stride_mid_o_lset + cur_head_idx * stride_mid_o_lseh + block_start_kv * stride_mid_o_lseb ) # logsumexp l_i^(j) = m^(j) + log(l_i^(j)) tl.store(mid_o_lse + offsets_mid_o_lse, m + tl.log(l_i)) # Triton 2.1.0 @triton.jit def _alibi_flash_decoding_fwd_kernel( Q, # [batch_size * q_len, head_num, head_dim] KCache, # [num_blocks, num_kv_heads, block_size, head_dim] VCache, # [num_blocks, num_kv_heads, block_size, head_dim] block_tables, # [batch_size, max_blocks_per_sequence] mid_o, # [batch_size * q_len, head_num, kv_split_num, head_dim] mid_o_lse, # [batch_size * q_len, head_num, kv_split_num] kv_seq_len, # [batch_size] q_len, batch_size, alibi_slopes, stride_qt, stride_qh, stride_qd, stride_cacheb, stride_cacheh, stride_cachebs, stride_cached, stride_bts, stride_btb, stride_mid_ot, stride_mid_oh, stride_mid_ob, stride_mid_od, stride_mid_o_lset, stride_mid_o_lseh, stride_mid_o_lseb, sm_scale, KV_GROUPS: tl.constexpr, BLOCK_KV: tl.constexpr, BLOCK_SIZE: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // q_len if cur_seq_idx >= batch_size: return cur_token_off = (cur_token_idx % q_len) - q_len + 1 cur_head_idx = tl.program_id(1) block_start_kv = tl.program_id(2) # for splitting k/v # NOTE It requires BLOCK_KV and BLOCK_SIZE to be the same # TODO might want to replace with BLOCK_KV % BLOCK_SIZE == 0 (optimize BLOCK_KV as multiple of BLOCK_SIZE) # and then support calculating multiple kv cache blocks on an instance tl.static_assert(BLOCK_KV == BLOCK_SIZE) # get the current (kv) sequence length # cur_token_off is used as a "mask" here for spec-dec during verification process cur_kv_seq_len = tl.load(kv_seq_len + cur_seq_idx) + cur_token_off if block_start_kv * BLOCK_KV >= cur_kv_seq_len: return offsets_dmodel = tl.arange(0, HEAD_DIM) offsets_q = cur_token_idx * stride_qt + cur_head_idx * stride_qh + offsets_dmodel * stride_qd q = tl.load(Q + offsets_q) # block table for the current sequence block_table_ptr = block_tables + cur_seq_idx * stride_bts # cur_bt_start_idx = block_start_kv * (BLOCK_KV // BLOCK_SIZE) # cur_block_id = tl.load(block_table_ptr + cur_bt_start_idx * stride_btb) cur_block_id = tl.load(block_table_ptr + block_start_kv * stride_btb) cur_occupied_size = tl.where( (block_start_kv + 1) * BLOCK_SIZE <= cur_kv_seq_len, BLOCK_SIZE, cur_kv_seq_len - block_start_kv * BLOCK_SIZE ) tl.device_assert(cur_occupied_size >= 0) cur_kv_head_idx = cur_head_idx // KV_GROUPS offset_kvcache = cur_block_id * stride_cacheb + cur_kv_head_idx * stride_cacheh K_block_ptr = tl.make_block_ptr( base=KCache + offset_kvcache, shape=(cur_occupied_size, HEAD_DIM), strides=(stride_cachebs, stride_cached), offsets=(0, 0), block_shape=(BLOCK_SIZE, HEAD_DIM), order=(0, 1), ) V_block_ptr = tl.make_block_ptr( base=VCache + offset_kvcache, shape=(cur_occupied_size, HEAD_DIM), strides=(stride_cachebs, stride_cached), offsets=(0, 0), block_shape=(BLOCK_SIZE, HEAD_DIM), order=(0, 1), ) k_cur_block = tl.load(K_block_ptr) v_cur_block = tl.load(V_block_ptr) acc = tl.zeros([HEAD_DIM], dtype=tl.float32) # use block size of the paged/blocked kv cache S_ij = tl.zeros([BLOCK_SIZE], dtype=tl.float32) alibi_slope = tl.load(alibi_slopes + cur_head_idx) position_k_offset = block_start_kv * BLOCK_KV + tl.arange(0, BLOCK_SIZE) # NOTE a trick to come across triton's requirement that values in both first and second input shapes must be >= 16, # Multiplying two tensors with shapes [1, d] * [d, block_size] will fail. # Refer to https://github.com/openai/triton/discussions/895 S_ij += tl.sum(q[None, :] * k_cur_block, 1) S_ij *= sm_scale S_ij -= alibi_slope * (cur_kv_seq_len - 1 - position_k_offset) S_ij = tl.where(cur_kv_seq_len > position_k_offset, S_ij, float("-inf")) m = tl.max(S_ij, 0) S_ij -= m p_ij_hat = tl.exp(S_ij) l_i = tl.sum(p_ij_hat, 0) p_ij_hat = p_ij_hat.to(v_cur_block.type.element_ty) acc += tl.sum(v_cur_block * p_ij_hat[:, None], 0) acc = acc / l_i offsets_mid_o = ( cur_token_idx * stride_mid_ot + cur_head_idx * stride_mid_oh + block_start_kv * stride_mid_ob + offsets_dmodel * stride_mid_od ) tl.store(mid_o + offsets_mid_o, acc) offsets_mid_o_lse = ( cur_token_idx * stride_mid_o_lset + cur_head_idx * stride_mid_o_lseh + block_start_kv * stride_mid_o_lseb ) # logsumexp l_i^(j) = m^(j) + log(l_i^(j)) tl.store(mid_o_lse + offsets_mid_o_lse, m + tl.log(l_i)) # Triton 2.1.0 @triton.jit def _flash_decoding_fwd_reduce_kernel( mid_o, # [batch_size, head_num, kv_split_num, head_dim] mid_o_lse, # [batch_size, head_num, kv_split_num] O, # [batch_size, num_heads, head_dim] or [batch_size, 1, num_heads, head_dim] kv_seq_len, q_len, batch_size, stride_mid_ot, stride_mid_oh, stride_mid_ob, stride_mid_od, stride_o_lset, stride_o_lseh, stride_o_lseb, stride_ot, stride_oh, stride_od, BLOCK_KV: tl.constexpr, HEAD_DIM: tl.constexpr, ): cur_token_idx = tl.program_id(0) cur_seq_idx = cur_token_idx // q_len if cur_seq_idx >= batch_size: return cur_head_idx = tl.program_id(1) # cur_token_off is used as a "mask" here for spec-dec during verification process cur_token_off = (cur_token_idx % q_len) - q_len + 1 cur_kv_seq_len = tl.load(kv_seq_len + cur_seq_idx) + cur_token_off offsets_dmodel = tl.arange(0, HEAD_DIM) # NOTE currently the block size BLOCK_KV splitting kv is relatively small as we have # BLOCK_KV == BLOCK_SIZE for now. We might want to decrease the number of blocks of kv splitted. kv_split_num = (cur_kv_seq_len + BLOCK_KV - 1) // BLOCK_KV m_i = float("-inf") # max logic l_i = 0.0 # sum exp acc = tl.zeros([HEAD_DIM], dtype=tl.float32) offsets_mid_o = cur_token_idx * stride_mid_ot + cur_head_idx * stride_mid_oh + offsets_dmodel offset_mid_lse = cur_token_idx * stride_o_lset + cur_head_idx * stride_o_lseh for block_i in range(0, kv_split_num, 1): mid_o_block = tl.load(mid_o + offsets_mid_o + block_i * stride_mid_ob) lse = tl.load(mid_o_lse + offset_mid_lse + block_i * stride_o_lseb) m_ij = tl.maximum(m_i, lse) scale = tl.exp(m_i - m_ij) acc = acc * scale lse -= m_ij exp_logic = tl.exp(lse) acc += exp_logic * mid_o_block l_i = scale * l_i + exp_logic m_i = m_ij acc = acc / l_i offsets_O = cur_token_idx * stride_ot + cur_head_idx * stride_oh + offsets_dmodel tl.store(O + offsets_O, acc.to(O.type.element_ty)) return # Decoding Stage # Used with blocked KV Cache (PagedAttention) def flash_decoding_attention( q: torch.Tensor, k_cache: torch.Tensor, v_cache: torch.Tensor, kv_seq_len: torch.Tensor, block_tables: torch.Tensor, block_size: int, max_seq_len_in_batch: int = None, output: torch.Tensor = None, mid_output: torch.Tensor = None, mid_output_lse: torch.Tensor = None, alibi_slopes: torch.Tensor = None, sm_scale: int = None, kv_group_num: int = 1, q_len: int = 1, # NOTE alibi flash decoding does not support q_len > 1 at this moment. use_new_kcache_layout: bool = False, ): """ Flash decoding implemented with a blocked KV Cache (PagedAttention) during decoding stage. Args: q (torch.Tensor): [bsz * q_len, num_heads, head_dim] q_len > 1 only for verification process in speculative-decoding. k_cache (torch.Tensor): [num_blocks, num_kv_heads, block_size, head_dim] v_cache (torch.Tensor): [num_blocks, num_kv_heads, block_size, head_dim] kv_seq_len (torch.Tensor): [batch_size] records the (kv) sequence lengths incorporating past kv sequence lengths. block_tables (torch.Tensor): [batch_size, max_blocks_per_sequence] max_seq_len_in_batch (int): Maximum sequence length in the batch. output (torch.Tensor): [bsz, num_heads * head_dim] mid_output (torch.Tensor): [max_bsz * q_len, num_heads, kv_max_split_num, head_dim] Intermediate output tensor. `max_bsz` should be greater than or equal to `bsz`. q_len > 1 only for verification process in speculative-decoding. mid_output_lse (torch.Tensor): [max_bsz * q_len, num_heads, kv_max_split_num] Log-sum-exp of intermediate output. `max_bsz` should be greater than or equal to `bsz`. q_len > 1 only for verification process in speculative-decoding. alibi_slopes (torch.Tensor): [num_heads] alibi slopes used for alibi flash decoding. block_size (int): Size of each block in the blocked key/value cache. num_kv_group (int, optional): Number of key/value groups. Defaults to 1. q_length (int): Query length. Use for speculative decoding when `q_length` > 1 (i.e. the last n tokens). Defaults to 1. use_new_kcache_layout (bool): Whether to use the new kcache layout. Defaults to False. Returns: Output tensor with shape [bsz * q_len, num_heads * head_dim] """ q = q.squeeze() if q.dim() == 4 else q assert q.dim() == 3, f"Incompatible q dim: {q.dim()}" n_tokens, num_heads, head_dim = q.shape assert n_tokens % q_len == 0, "Invalid q_len" bsz = n_tokens // q_len assert head_dim in {32, 64, 128, 256} assert kv_seq_len.shape[0] == block_tables.shape[0] == bsz, ( f"Got incompatible batch size (number of seqs):\n" f" KV seq lengths bsz {kv_seq_len.size(0)}, Block tables bsz {block_tables.size(0)}, " f"batch size {bsz}" ) assert k_cache.size(-2) == v_cache.size(-2) == block_size, ( f"Got incompatible block size on kv caches:\n" f" assigned block_size {block_size}, k_cache block_size {k_cache.size(-2)}, " f"v_cache block_size {v_cache.size(-2)}" ) # NOTE BLOCK_KV could be considered as block splitting the sequence on k/v # For now, BLOCK_KV is supposed to be equivalent with the size of physical cache block (i.e.`block_size`) assert block_size in {16, 32, 64, 128} BLOCK_KV = block_size sm_scale = 1.0 / (head_dim**0.5) if sm_scale is None else sm_scale max_seq_len_in_batch = kv_seq_len.max().item() if max_seq_len_in_batch is None else max_seq_len_in_batch # For compatibility (TODO revise modeling in future) kv_max_split_num = (max_seq_len_in_batch + BLOCK_KV - 1) // BLOCK_KV if mid_output is None: mid_output = torch.empty( (bsz * q_len, num_heads, kv_max_split_num, head_dim), dtype=torch.float32, device=q.device ) if mid_output_lse is None: mid_output_lse = torch.empty((bsz * q_len, num_heads, kv_max_split_num), dtype=torch.float32, device=q.device) if output is None: # A hack to prevent `view` operation in modeling output = torch.empty((bsz * q_len, num_heads * head_dim), dtype=q.dtype, device=q.device) assert ( mid_output.size(2) == mid_output_lse.size(2) >= kv_max_split_num ), "Incompatible kv split number of intermediate output tensors" assert ( mid_output.size(0) == mid_output_lse.size(0) >= output.size(0) == n_tokens ), f"Incompatible first dimension of output tensors" # NOTE use `triton.next_power_of_2` here to utilize the cache mechanism of triton # To optimize, revise batching/scheduling to batch 2^n sequences in a batch (preferred) grid = lambda META: ( triton.next_power_of_2(bsz * q_len), num_heads, triton.cdiv(triton.next_power_of_2(max_seq_len_in_batch), META["BLOCK_KV"]), ) if alibi_slopes is not None: # TODO(yuanheng-zhao): Since the alibi kernel is pretty similar to the original one, # the code (alibi kernel) will be refactored later to avoid code duplication, when # the whole triton flow with new k cache layout has been supported and tested. assert ( not use_new_kcache_layout ), "Alibi Slopes will be supported with new kcache layout later when the whole triton flow is ready" _alibi_flash_decoding_fwd_kernel[grid]( q, k_cache, v_cache, block_tables, mid_output, mid_output_lse, kv_seq_len, q_len, bsz, alibi_slopes, q.stride(0), q.stride(1), q.stride(2), k_cache.stride(0), k_cache.stride(1), k_cache.stride(2), k_cache.stride(3), block_tables.stride(0), block_tables.stride(1), mid_output.stride(0), mid_output.stride(1), mid_output.stride(2), mid_output.stride(3), mid_output_lse.stride(0), mid_output_lse.stride(1), mid_output_lse.stride(2), sm_scale, KV_GROUPS=kv_group_num, BLOCK_KV=block_size, BLOCK_SIZE=block_size, HEAD_DIM=head_dim, ) else: # For KCache and VCache with the same layout x = head_dim kcsplit_x_stride, kcs_stride, kcd_stride = 0, k_cache.stride(2), k_cache.stride(3) # For KCache layout [num_blocks, num_kv_heads, head_dim//x, block_size, x] if use_new_kcache_layout: assert ( k_cache.dim() == 5 and k_cache.shape[1] == v_cache.shape[1] and k_cache.shape[2] * k_cache.shape[4] == v_cache.shape[3] ), f"Invalid KCache shape {k_cache.shape} and VCache shape {v_cache.shape}" x = k_cache.size(-1) kcsplit_x_stride, kcs_stride, kcd_stride = k_cache.stride()[-3:] _flash_decoding_fwd_kernel[grid]( q, k_cache, v_cache, block_tables, mid_output, mid_output_lse, kv_seq_len, q_len, bsz, kv_group_num, x, sm_scale, q.stride(0), q.stride(1), q.stride(2), k_cache.stride(0), k_cache.stride(1), kcsplit_x_stride, kcs_stride, kcd_stride, v_cache.stride(0), v_cache.stride(1), v_cache.stride(2), v_cache.stride(3), block_tables.stride(0), block_tables.stride(1), mid_output.stride(0), mid_output.stride(1), mid_output.stride(2), mid_output.stride(3), mid_output_lse.stride(0), mid_output_lse.stride(1), mid_output_lse.stride(2), BLOCK_KV=block_size, BLOCK_SIZE=block_size, HEAD_DIM=head_dim, ) grid = (triton.next_power_of_2(bsz * q_len), num_heads) _flash_decoding_fwd_reduce_kernel[grid]( mid_output, mid_output_lse, output, kv_seq_len, q_len, bsz, mid_output.stride(0), mid_output.stride(1), mid_output.stride(2), mid_output.stride(3), mid_output_lse.stride(0), mid_output_lse.stride(1), mid_output_lse.stride(2), output.stride(0), head_dim, 1, BLOCK_KV=block_size, HEAD_DIM=head_dim, ) return output
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/_C/__init__.py
colossalai/_C/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/accelerator.py
colossalai/booster/accelerator.py
import torch import torch.nn as nn __all__ = ["Accelerator"] _supported_devices = [ "cpu", "cuda", # To be supported # 'xpu', # 'npu', # 'tpu', ] class Accelerator: """ Accelerator is an abstraction for the hardware device that is used to run the model. Args: device (str): The device to be used. Currently only support 'cpu' and 'gpu'. """ def __init__(self, device: str): self.device = device assert ( self.device in _supported_devices ), f"Device {self.device} is not supported yet, supported devices include {_supported_devices}" def bind(self): """ Set the default device for the current process. """ if self.device == "cpu": pass elif self.device == "cuda": # TODO(FrankLeeeee): use global environment to check if it is a dist job # if is_distributed: # local_rank = EnvTable().get_local_rank() # torch.cuda.set_device(torch.device(f'cuda:{local_rank}')) torch.cuda.set_device(torch.device("cuda")) else: raise ValueError(f"Device {self.device} is not supported yet") def configure_model(self, model: nn.Module) -> nn.Module: """ Move the model to the device. Args: model (nn.Module): The model to be moved. """ model = model.to(torch.device(self.device)) return model
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/booster.py
colossalai/booster/booster.py
from contextlib import contextmanager from typing import Any, Callable, Dict, Iterator, List, Optional, Union import torch import torch.nn as nn from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from torch.utils.data import DataLoader from colossalai.logging import get_dist_logger SUPPORT_PEFT = False try: import peft SUPPORT_PEFT = True except ImportError: pass import colossalai.interface.pretrained as pretrained_utils from colossalai.checkpoint_io import GeneralCheckpointIO from colossalai.interface import ModelWrapper, OptimizerWrapper from colossalai.quantization import BnbQuantizationConfig from .accelerator import Accelerator from .mixed_precision import MixedPrecision, mixed_precision_factory from .plugin import Plugin from .plugin.pp_plugin_base import PipelinePluginBase __all__ = ["Booster"] class Booster: """ Booster is a high-level API for training neural networks. It provides a unified interface for training with different precision, accelerator, and plugin. ```python # Following is pseudocode colossalai.launch(...) plugin = GeminiPlugin(...) booster = Booster(precision='fp16', plugin=plugin) model = GPT2() optimizer = HybridAdam(model.parameters()) dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8) lr_scheduler = LinearWarmupScheduler() criterion = GPTLMLoss() model, optimizer, criterion, dataloader, lr_scheduler = booster.boost(model, optimizer, criterion, dataloader, lr_scheduler) for epoch in range(max_epochs): for input_ids, attention_mask in dataloader: outputs = model(input_ids.cuda(), attention_mask.cuda()) loss = criterion(outputs.logits, input_ids) booster.backward(loss, optimizer) optimizer.step() lr_scheduler.step() optimizer.zero_grad() ``` Args: device (str or torch.device): The device to run the training. Default: None. If plugin is not used or plugin doesn't control the device, this argument will be set as training device ('cuda' will be used if argument is None). mixed_precision (str or MixedPrecision): The mixed precision to run the training. Default: None. If the argument is a string, it can be 'fp16', 'fp16_apex', 'bf16', or 'fp8'. 'fp16' would use PyTorch AMP while `fp16_apex` would use Nvidia Apex. plugin (Plugin): The plugin to run the training. Default: None. """ def __init__( self, device: Optional[str] = None, mixed_precision: Optional[Union[MixedPrecision, str]] = None, plugin: Optional[Plugin] = None, ) -> None: if plugin is not None: assert isinstance( plugin, Plugin ), f"Expected the argument plugin to be an instance of Plugin, but got {type(plugin)}." self.plugin = plugin self.logger = get_dist_logger() # set accelerator if self.plugin and self.plugin.control_device(): self.accelerator = None if device is not None: self.logger.warning( "The plugin will control the accelerator," "so the device argument will be ignored.", ranks=[0] ) else: device = device or "cuda" self.accelerator = Accelerator(device) # set precision if self.plugin and self.plugin.control_precision(): if mixed_precision is not None: self.logger.warning( "The plugin will control the precision," "so the mixed_precision argument will be ignored.", ranks=[0], ) self.mixed_precision = None elif mixed_precision is None: self.mixed_precision = None else: # validate and set precision if isinstance(mixed_precision, str): # the user will take the default arguments for amp training self.mixed_precision = mixed_precision_factory(mixed_precision) elif isinstance(mixed_precision, MixedPrecision): # the user can customize the arguments by passing the precision object self.mixed_precision = mixed_precision else: raise ValueError( f"Expected the argument mixed_precision to be a string or an instance of Precision, but got {type(mixed_precision)}." ) if self.plugin is not None and self.plugin.control_checkpoint_io(): self.checkpoint_io = self.plugin.get_checkpoint_io() else: self.checkpoint_io = GeneralCheckpointIO() def boost( self, model: nn.Module, optimizer: Optional[Optimizer] = None, criterion: Optional[Callable] = None, dataloader: Optional[DataLoader] = None, lr_scheduler: Optional[LRScheduler] = None, ) -> List[Union[nn.Module, Optimizer, LRScheduler, DataLoader]]: """ Wrap and inject features to the passed in model, optimizer, criterion, lr_scheduler, and dataloader. Args: model (nn.Module): Convert model into a wrapped model for distributive training. The model might be decorated or partitioned by plugin's strategy after execution of this method. optimizer (Optimizer, optional): Convert optimizer into a wrapped optimizer for distributive training. The optimizer's param groups or states might be decorated or partitioned by plugin's strategy after execution of this method. Defaults to None. criterion (Callable, optional): The function that calculates loss. Defaults to None. dataloader (DataLoader, optional): The prepared dataloader for training. Defaults to None. lr_scheduler (LRScheduler, optional): The learning scheduler for training. Defaults to None. Returns: List[Union[nn.Module, Optimizer, LRScheduler, DataLoader]]: The list of boosted input arguments. """ # TODO(FrankLeeeee): consider multi-model and multi-optimizer case # TODO(FrankLeeeee): consider multi-dataloader case pretrained_path = pretrained_utils.get_pretrained_path(model) # transform model for mixed precision if self.plugin: model, optimizer, criterion, dataloader, lr_scheduler = self.plugin.configure( model, optimizer, criterion, dataloader, lr_scheduler ) if self.plugin and not self.plugin.control_device(): # transform model for accelerator model = self.accelerator.configure_model(model) if self.mixed_precision and (self.plugin is None or self.plugin and not self.plugin.control_precision()): # transform model for mixed precision # when mixed_precision is specified and the plugin is not given or does not control the precision model, optimizer, criterion = self.mixed_precision.configure(model, optimizer, criterion) if pretrained_path: self.load_model(model, pretrained_path) # clear pretrained path attr orig_model = model.unwrap() if isinstance(model, ModelWrapper) else model pretrained_utils.set_pretrained_path(orig_model, None) return model, optimizer, criterion, dataloader, lr_scheduler def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None: """Execution of backward during training step. Args: loss (torch.Tensor): The loss for backpropagation. optimizer (Optimizer): The optimizer to be updated. """ # TODO(frank lee): implement this method with plugin optimizer.backward(loss) def execute_pipeline( self, data_iter: Iterator, model: nn.Module, criterion: Callable[[Any, Any], torch.Tensor], optimizer: Optional[Optimizer] = None, return_loss: bool = True, return_outputs: bool = False, ) -> Dict[str, Any]: """ Execute forward & backward when utilizing pipeline parallel. Return loss or Huggingface style model outputs if needed. Warning: This function is tailored for the scenario of pipeline parallel. As a result, please don't do the forward/backward pass in the conventional way (model(input)/loss.backward()) when doing pipeline parallel training with booster, which will cause unexpected errors. Args: data_iter(Iterator): The iterator for getting the next batch of data. Usually there are two ways to obtain this argument: 1. wrap the dataloader to iterator through: iter(dataloader) 2. get the next batch from dataloader, and wrap this batch to iterator: iter([batch]) model (nn.Module): The model to execute forward/backward, it should be a model wrapped by a plugin that supports pipeline. criterion: (Callable[[Any, Any], torch.Tensor]): Criterion to be used. It should take two arguments: model outputs and inputs, and returns loss tensor. 'lambda y, x: loss_fn(y)' can turn a normal loss function into a valid two-argument criterion here. optimizer (Optimizer, optional): The optimizer for execution of backward. Can be None when only doing forward (i.e. evaluation). Defaults to None. return_loss (bool, optional): Whether to return loss in the dict returned by this method. Defaults to True. return_output (bool, optional): Whether to return Huggingface style model outputs in the dict returned by this method. Defaults to False. Returns: Dict[str, Any]: Output dict in the form of {'loss': ..., 'outputs': ...}. ret_dict['loss'] is the loss of forward if return_loss is set to True, else None. ret_dict['outputs'] is the Huggingface style model outputs during forward if return_output is set to True, else None. """ assert isinstance( self.plugin, PipelinePluginBase ), f"The plugin {self.plugin.__class__.__name__} does not support pipeline." return self.plugin.execute_pipeline(data_iter, model, criterion, optimizer, return_loss, return_outputs) def no_sync(self, model: nn.Module = None, optimizer: OptimizerWrapper = None) -> contextmanager: """Context manager to disable gradient synchronization across DP process groups. Support torch DDP and Low Level ZeRO-1 for now. Args: model (nn.Module): The model to be disabled gradient synchronization, for DDP optimizer (OptimizerWrapper): The optimizer to be disabled gradient synchronization, for ZeRO1-1 Returns: contextmanager: Context to disable gradient synchronization. """ assert ( self.plugin is not None ), f"no_sync is only enabled when a plugin is provided and the plugin supports no_sync." assert self.plugin.support_no_sync(), f"The plugin {self.plugin.__class__.__name__} does not support no_sync." return self.plugin.no_sync(model, optimizer) def enable_lora( self, model: nn.Module, pretrained_dir: Optional[str] = None, lora_config: "peft.LoraConfig" = None, bnb_quantization_config: Optional[BnbQuantizationConfig] = None, quantize=False, ) -> nn.Module: """ Wrap the passed in model with LoRA modules for training. If pretrained directory is provided, lora configs and weights are loaded from that directory. Lora in ColossalAI is implemented using Huggingface peft library, so the arguments for Lora configuration are same as those of peft. Args: model (nn.Module): The model to be appended with LoRA modules. pretrained_dir(str, optional): The path to the pretrained directory, can be a local directory or model_id of a PEFT configuration hosted inside a model repo on the Hugging Face Hub. When set to None, create new lora configs and weights for the model using the passed in lora_config. Defaults to None. lora_config: (peft.LoraConfig, optional): Passed in LoraConfig for peft. Defaults to None. """ if not SUPPORT_PEFT: raise ImportError("Please install Huggingface Peft library to enable lora features in ColossalAI!") assert self.plugin is not None, f"Lora can only be enabled when a plugin is provided." assert self.plugin.support_lora(), f"The plugin {self.plugin.__class__.__name__} does not support lora." if pretrained_dir is None: assert ( lora_config is not None ), "Please provide configuration for Lora when pretrained directory path isn't passed in." assert isinstance( lora_config, peft.LoraConfig ), "The passed in configuration should be an instance of peft.LoraConfig." if lora_config is None: assert ( pretrained_dir is not None ), "Please provide pretrained directory path if not passing in lora configuration." if quantize is True: if bnb_quantization_config is not None: self.logger.warning( "User defined BnbQuantizationConfig is not fully tested in ColossalAI. Use it at your own risk.", ranks=[0], ) else: bnb_quantization_config = BnbQuantizationConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", ) return self.plugin.enable_lora(model, pretrained_dir, lora_config, bnb_quantization_config) def load_model( self, model: Union[nn.Module, ModelWrapper], checkpoint: str, strict: bool = True, low_cpu_mem_mode: bool = True, num_threads: int = 1, ) -> None: """Load model from checkpoint. Args: model (nn.Module or ModelWrapper): A model boosted by Booster. checkpoint (str): Path to the checkpoint. It must be a local path. It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path. strict (bool, optional): whether to strictly enforce that the keys in :attr:`state_dict` match the keys returned by this module's :meth:`~torch.nn.Module.state_dict` function. Defaults to True. low_cpu_mem_mode (bool): whether to load the model in low cpu memory mode. If false, it will use RAM cache to accelerate loading. Default: True. num_threads (int): number of threads to use when loading the model. Only useful when disabling low cpu mem mode. Default: 1. """ self.checkpoint_io.load_model( model, checkpoint, strict, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads ) def save_model( self, model: Union[nn.Module, ModelWrapper], checkpoint: str, shard: bool = False, gather_dtensor: bool = True, prefix: Optional[str] = None, size_per_shard: int = 1024, use_safetensors: bool = False, use_async: bool = False, ) -> None: """Save model to checkpoint. Args: model (nn.Module or ModelWrapper): A model boosted by Booster. checkpoint (str): Path to the checkpoint. It must be a local path. It is a file path if ``shard=False``. Otherwise, it is a directory path. shard (bool, optional): Whether to save checkpoint a sharded way. If true, the checkpoint will be a folder with the same format as Huggingface transformers checkpoint. Otherwise, it will be a single file. Defaults to False. gather_dtensor (bool, optional): whether to gather the distributed tensor to the first device. Default: True. prefix (str, optional): A prefix added to parameter and buffer names to compose the keys in state_dict. Defaults to None. size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024. use_safetensors (bool, optional): whether to use safe tensors. Default: False. If set to True, the checkpoint will be saved. use_async (bool, optional): whether to save the state_dict of model asynchronously. Default: False. """ self.checkpoint_io.save_model( model, checkpoint=checkpoint, shard=shard, gather_dtensor=gather_dtensor, prefix=prefix, size_per_shard=size_per_shard, use_safetensors=use_safetensors, use_async=use_async, ) def load_optimizer( self, optimizer: Optimizer, checkpoint: str, low_cpu_mem_mode: bool = True, num_threads: int = 1, ) -> None: """Load optimizer from checkpoint. Args: optimizer (Optimizer): An optimizer boosted by Booster. checkpoint (str): Path to the checkpoint. It must be a local path. It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path. low_cpu_mem_mode (bool): whether to load the model in low cpu memory mode. If false, it will use RAM cache to accelerate loading. Default: True. num_threads (int): number of threads to use when loading the model. Only useful when disabling low cpu mem mode. Default: 1. """ self.checkpoint_io.load_optimizer( optimizer, checkpoint, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads ) def save_optimizer( self, optimizer: Optimizer, checkpoint: str, shard: bool = False, gather_dtensor: bool = True, prefix: Optional[str] = None, size_per_shard: int = 1024, use_async: bool = False, ) -> None: """ Save optimizer to checkpoint. Args: optimizer (Optimizer): An optimizer boosted by Booster. checkpoint (str): Path to the checkpoint. It must be a local path. It is a file path if ``shard=False``. Otherwise, it is a directory path. shard (bool, optional): Whether to save checkpoint a sharded way. If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False. gather_dtensor (bool): whether to gather the distributed tensor to the first device. Default: True. prefix (str, optional): A prefix added to parameter and buffer names to compose the keys in state_dict. Defaults to None. size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024. """ self.checkpoint_io.save_optimizer( optimizer, checkpoint, shard, gather_dtensor, prefix, size_per_shard, use_async=use_async ) def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str) -> None: """Save lr scheduler to checkpoint. Args: lr_scheduler (LRScheduler): A lr scheduler boosted by Booster. checkpoint (str): Path to the checkpoint. It must be a local file path. """ self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint) def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str) -> None: """Load lr scheduler from checkpoint. Args: lr_scheduler (LRScheduler): A lr scheduler boosted by Booster. checkpoint (str): Path to the checkpoint. It must be a local file path. """ self.checkpoint_io.load_lr_scheduler(lr_scheduler, checkpoint) def save_lora_as_pretrained( self, model: Union[nn.Module, ModelWrapper], checkpoint: str, use_safetensors: bool = False ) -> None: """ Save the lora adapters and adapter configuration file to a pretrained checkpoint directory. Args: model (Union[nn.Module, ModelWrapper]): A model boosted by Booster. checkpoint (str): Path to the checkpoint directory. It must be a local path. use_safetensors (bool, optional): Whether to use safe tensors when saving. Defaults to False. """ if not SUPPORT_PEFT: raise ImportError("Please install Huggingface Peft library to enable lora features in ColossalAI!") assert self.plugin is not None, f"Lora can only be enabled when a plugin is provided." assert self.plugin.support_lora(), f"The plugin {self.plugin.__class__.__name__} does not support lora." self.checkpoint_io.save_lora_as_pretrained(model, checkpoint, use_safetensors)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/__init__.py
colossalai/booster/__init__.py
from .accelerator import Accelerator from .booster import Booster from .plugin import Plugin
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/low_level_zero_plugin.py
colossalai/booster/plugin/low_level_zero_plugin.py
import enum import os from contextlib import nullcontext from functools import partial from pathlib import Path from types import MethodType from typing import Callable, Dict, Iterator, List, Optional, Tuple import torch import torch.distributed import torch.distributed as dist import torch.nn as nn from torch.distributed.distributed_c10d import _get_default_group from torch.nn import Parameter from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from torch.utils._pytree import tree_map from torch.utils.data import DataLoader from colossalai.accelerator import get_accelerator from colossalai.checkpoint_io import CheckpointIndexFile, CheckpointIO from colossalai.checkpoint_io.utils import ( create_pinned_state_dict, get_optimizer_base_filenames, get_shard_filename, load_param_groups_into_optimizer, load_state_dict, load_state_dict_shards, load_states_into_optimizer, save_param_groups, save_state_dict, sharded_optimizer_loading_epilogue, ) from colossalai.cluster import ProcessGroupMesh from colossalai.interface import AMPModelMixin, ModelWrapper, OptimizerWrapper from colossalai.interface.optimizer import DistributedOptim from colossalai.logging import get_dist_logger from colossalai.nn.optimizer import DistGaloreAwamW, cast_to_distributed from colossalai.quantization import BnbQuantizationConfig, quantize_model from colossalai.quantization.fp8_hook import FP8Hook from colossalai.tensor.colo_parameter import ColoParameter from colossalai.tensor.param_op_hook import ColoParamOpHookManager from colossalai.zero import LowLevelZeroOptimizer from colossalai.zero.low_level.zero_hook import ZeroOpHook, wait_all_gather_handle from .dp_plugin_base import DPPluginBase from .torch_ddp_plugin import TorchDDPCheckpointIO __all__ = ["LowLevelZeroPlugin"] def _convert_floating_point(x, dtype: torch.dtype = torch.float16): if isinstance(x, torch.Tensor) and torch.is_floating_point(x): return x.to(dtype) return x SUPPORTED_PRECISION = ["fp16", "bf16", "fp32"] class OptimizerParamCheckState(enum.Enum): ORIGIN_PARAM_FINDED = 0 ORIGIN_PARAM_NOT_FIND = -1 LORA_PARM_EXISTED = -2 class LowLevelZeroModel(ModelWrapper, AMPModelMixin): def __init__( self, module: nn.Module, precision: str, overlap_allgather: bool = False, cast_inputs: bool = True, use_fp8: bool = False, ) -> None: super().__init__(module) self.dtype = None if precision == "fp16": self.dtype = torch.float16 elif precision == "bf16": self.dtype = torch.bfloat16 if self.dtype is not None: module = module.to(self.dtype) module = module.to(get_accelerator().get_current_device()) self.module = module self.convert_fn = None self.use_fp8 = use_fp8 if self.dtype is not None and cast_inputs: self.convert_fn = partial(_convert_floating_point, dtype=self.dtype) self.overlap_allgather = overlap_allgather self.op_hooks = [] if overlap_allgather: self.op_hooks.append(ZeroOpHook()) if use_fp8: self.op_hooks.append(FP8Hook()) if overlap_allgather or use_fp8: for p in module.parameters(): if p.requires_grad and type(p) is not ColoParameter: p.__class__ = ColoParameter p.__init__(p, requires_grad=True) def forward(self, *args, **kwargs): if self.convert_fn is not None: args = tree_map(self.convert_fn, args) kwargs = tree_map(self.convert_fn, kwargs) with self._hook_context(): return super().forward(*args, **kwargs) def _force_wait_all_gather(self): for p in self.module.parameters(): wait_all_gather_handle(p) def _hook_context(self): return ColoParamOpHookManager.use_hooks(*self.op_hooks) if len(self.op_hooks) > 0 else nullcontext() class LowLevelZeroCheckpointIO(TorchDDPCheckpointIO): def save_unsharded_optimizer( self, optimizer: OptimizerWrapper, checkpoint: str, gather_dtensor: bool = False, use_async: bool = False ): """Save optimizer to checkpoint but only on master process. Args: optimizer (OptimizerWrapper): Optimizer to save state_dict checkpoint (str): Path to save checkpoint gather_dtensor (bool): Whether to gather_dtensor, not used """ assert isinstance(optimizer, LowLevelZeroOptimizer), "Please boost the optimizer before saving!" # the `state_dict` in LowLevelZeroOptimizer has communication # if only the master rank collect state_dict and save, # the communication on each rank would not match if use_async and self.coordinator.is_master(): if id(optimizer) not in self.pinned_state_dicts: self.pinned_state_dicts[id(optimizer)] = {} pinned_state_dicts = self.pinned_state_dicts[id(optimizer)] else: pinned_state_dicts = None state_dict = optimizer.state_dict(pinned_state_dicts, only_on_master=True) if self.coordinator.is_master(): if use_async: from colossalai.utils.safetensors import save_nested f_writer = save_nested(checkpoint, state_dict) self.async_writers.append(f_writer) else: save_state_dict(state_dict, checkpoint, use_safetensors=False) def load_unsharded_optimizer( self, optimizer: OptimizerWrapper, checkpoint: str, low_cpu_mem_mode: bool = True, num_threads: int = 1 ): use_async = checkpoint.endswith(".safetensors") if use_async: from colossalai.utils.safetensors import load_flat checkpoint = load_flat(checkpoint) else: checkpoint = load_state_dict(checkpoint) if not low_cpu_mem_mode: checkpoint = create_pinned_state_dict(checkpoint, empty=False, num_threads=num_threads) optimizer.load_state_dict(checkpoint) def save_sharded_optimizer( self, optimizer: OptimizerWrapper, checkpoint: str, gather_dtensor: bool = False, prefix: str = None, size_per_shard: int = 1024, use_async: bool = False, ): """ Save sharded Zero-optimizer checkpoint under the given checkpointing path. The following files will be created under the path: - An index file (pytorch_optim.bin.index.json) containing a map between optimizer states and file names - A group file (pytorch_optim_group.bin) recording information of param_groups - Multiple files (pytorch_optim-000XX.bin) that store state tensors of optimizer in a sharding way Args: optimizer (OptimizerWrapper): Optimizer to save sharded state_dict checkpoint (str): Path to save optimizer state_dict gather_dtensor (bool): Whether to gather_dtensor, not used prefix (str): Perfix of file to save size_per_shard (int): Max file size of each file that store state tensors """ assert isinstance(optimizer, LowLevelZeroOptimizer), "Please boost the optimizer before saving!" if os.path.isfile(checkpoint): self.logger.error(f"Provided path ({checkpoint}) should be a directory, not a file", ranks=[0]) return Path(checkpoint).mkdir(parents=True, exist_ok=True) # state_dict only provide only 'param_groups' state_dict = optimizer.optim.state_dict() # state shard would be handled by the low-level zero optimizer if use_async and self.coordinator.is_master(): if id(optimizer) not in self.pinned_state_dicts: self.pinned_state_dicts[id(optimizer)] = {} pinned_state_dicts = self.pinned_state_dicts[id(optimizer)] else: pinned_state_dicts = None sharded_state = optimizer.state_dict_shard( max_shard_size=size_per_shard, pinned_state_dicts=pinned_state_dicts, only_on_master=True ) # Preparing file paths and index file. states_name, save_index_file, param_group_file = get_optimizer_base_filenames(prefix, use_safetensors=use_async) index_file = CheckpointIndexFile(checkpoint) index_file.append_meta_data("param_groups", param_group_file) # Store the information of param groups to param_group_file. if self.coordinator.is_master(): group_file_path = os.path.join(checkpoint, param_group_file) save_param_groups(state_dict, group_file_path) # Save shards of optimizer states. total_size = 0 for idx, shard_pair in enumerate(sharded_state): shard, current_size = shard_pair shard_file = get_shard_filename(states_name, idx) total_size = total_size + current_size for param_id in shard.keys(): index_file.append_weight_map(str(param_id), shard_file) checkpoint_file_path = os.path.join(checkpoint, shard_file) if self.coordinator.is_master(): if use_async: from colossalai.utils.safetensors import save_nested f_writer = save_nested(checkpoint_file_path, shard) self.async_writers.append(f_writer) else: save_state_dict(shard, checkpoint_file_path, use_safetensors=False) # Wrap up index file. index_file.append_meta_data("total_size", total_size) if self.coordinator.is_master(): index_file.write_index_file(save_index_file) self.logger.info( f"The optimizer is going to be split to checkpoint shards. " f"You can find where each parameters has been saved in the " f"index located at {save_index_file}.", ranks=[0], ) def load_sharded_optimizer( self, optimizer: OptimizerWrapper, index_file_path: str, prefix: str, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """Load sharded optimizer with the given path to index file. Args: optimizer (OptimizerWrapper): Optimizer to load state_dict index_file_path (str): Path to the index file prefix (str): Not used. """ assert isinstance(optimizer, LowLevelZeroOptimizer), "Please boost the optimizer before Loading!" optimizer = optimizer.unwrap() # Read checkpoint index file. ckpt_index_file = CheckpointIndexFile.from_file(index_file_path) # Load param_groups param_group_path = ckpt_index_file.get_param_group_filename() if param_group_path is None: raise RuntimeError( f"Invalid index file path {index_file_path} for an optimizer. \ Lacking param group file under current directory." ) id_map = load_param_groups_into_optimizer(optimizer, param_group_path) checkpoint_files, _ = ckpt_index_file.get_checkpoint_filenames() for state_dict in load_state_dict_shards(checkpoint_files, True, False, low_cpu_mem_mode): # shard state dict for param_idx, state in state_dict.items(): for k, v in state.items(): if isinstance(v, torch.Tensor) and k != "step": padding_size = ( self.coordinator.world_size - v.numel() % self.coordinator.world_size ) % self.coordinator.world_size with torch.no_grad(): v = v.flatten() if padding_size > 0: v = torch.nn.functional.pad(v, [0, padding_size]) v_list = v.split(v.numel() // self.coordinator.world_size) state_dict[param_idx][k] = v_list[self.coordinator.rank].detach() if low_cpu_mem_mode: state_dict[param_idx][k] = state_dict[param_idx][k].clone() if not low_cpu_mem_mode: state_dict = create_pinned_state_dict(state_dict, empty=False, num_threads=num_threads) load_states_into_optimizer(optimizer, state_dict, id_map) sharded_optimizer_loading_epilogue(optimizer) def load_unsharded_model( self, model: ModelWrapper, checkpoint: str, strict: bool = True, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): assert isinstance(model, LowLevelZeroModel), "Please boost the model before loading!" model._force_wait_all_gather() super().load_unsharded_model( model, checkpoint, strict, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads ) model.update_master_params() def load_sharded_model( self, model: ModelWrapper, checkpoint_index_file: Path, strict: bool = False, use_safetensors: bool = False, load_sub_module: bool = True, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): assert isinstance(model, LowLevelZeroModel), "Please boost the model before loading!" model._force_wait_all_gather() super().load_sharded_model( model, checkpoint_index_file, strict, use_safetensors, load_sub_module, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads, ) model.update_master_params() def save_unsharded_model( self, model: ModelWrapper, checkpoint: str, gather_dtensor: bool, use_safetensors: bool, use_async: bool = False ): assert isinstance(model, LowLevelZeroModel), "Please boost the model before loading!" model._force_wait_all_gather() return super().save_unsharded_model(model, checkpoint, gather_dtensor, use_safetensors, use_async=use_async) def save_sharded_model( self, model: ModelWrapper, checkpoint_path: str, gather_dtensor: bool = True, prefix: Optional[str] = None, max_shard_size: int = 1024, use_safetensors: bool = False, use_async: bool = False, ): assert isinstance(model, LowLevelZeroModel), "Please boost the model before loading!" model._force_wait_all_gather() return super().save_sharded_model( model, checkpoint_path, gather_dtensor, prefix, max_shard_size, use_safetensors, use_async=use_async ) def save_lora_as_pretrained(self, model, checkpoint, use_safetensors, state_dict: Optional[dict] = None): assert isinstance(model, LowLevelZeroModel), "Please boost the model before saving!" model._force_wait_all_gather() super().save_lora_as_pretrained(model, checkpoint, use_safetensors, state_dict=state_dict) class LowLevelZeroPlugin(DPPluginBase): """ Plugin for low level zero. ```python from colossalai.booster import Booster from colossalai.booster.plugin import LowLevelZeroPlugin model, train_dataset, optimizer, criterion = ... plugin = LowLevelZeroPlugin() train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8) booster = Booster(plugin=plugin) model, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion) ``` Args: stage (int, optional): ZeRO stage. Defaults to 1. precision (str, optional): precision. Support 'fp16', 'bf16' and 'fp32'. Defaults to 'fp16'. initial_scale (float, optional): Initial scale used by DynamicGradScaler. Defaults to 2**32. min_scale (float, optional): Min scale used by DynamicGradScaler. Defaults to 1. growth_factor (float, optional): growth_factor used by DynamicGradScaler. Defaults to 2. backoff_factor (float, optional): backoff_factor used by DynamicGradScaler. Defaults to 0.5. growth_interval (float, optional): growth_interval used by DynamicGradScaler. Defaults to 1000. hysteresis (float, optional): hysteresis used by DynamicGradScaler. Defaults to 2. max_scale (int, optional): max_scale used by DynamicGradScaler. Defaults to 2**32. max_norm (float, optional): max_norm used for `clip_grad_norm`. You should notice that you shall not do clip_grad_norm by yourself when using ZeRO DDP. The ZeRO optimizer will take care of clip_grad_norm. norm_type (float, optional): norm_type used for `clip_grad_norm`. reduce_bucket_size_in_m (int, optional): grad reduce bucket size in M. Defaults to 12. communication_dtype (torch.dtype, optional): communication dtype. If not specified, the dtype of param will be used. Defaults to None. overlap_communication (bool, optional): whether to overlap communication and computation. Defaults to True. cpu_offload (bool, optional): whether to offload grad, master weight and optimizer state to cpu. Defaults to False. verbose (bool, optional): verbose mode. Debug info including grad overflow will be printed. Defaults to False. use_fp8 (bool, optional): Whether to enable fp8 mixed precision training. Defaults to False. fp8_communication (bool, optional): Whether to enable fp8 communication. Defaults to False. extra_dp_size (int, optional): The number of extra data parallel groups. Defaults to 1. """ def __init__( self, stage: int = 1, precision: str = "fp16", initial_scale: float = 2**32, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 2**32, max_norm: float = 0.0, norm_type: float = 2.0, reduce_bucket_size_in_m: int = 12, communication_dtype: Optional[torch.dtype] = None, overlap_communication: bool = True, overlap_allgather: bool = False, cpu_offload: bool = False, master_weights: bool = True, verbose: bool = False, cast_inputs: bool = True, fp8_communication: bool = False, use_fp8: bool = False, extra_dp_size: int = 1, ) -> None: super().__init__() assert stage in (1, 2), f"LowLevelZeroPlugin only supports stage 1/2 training" assert precision in SUPPORTED_PRECISION, f"LowLevelZeroPlugin only supports {SUPPORTED_PRECISION} training" assert norm_type == 2.0, f"LowLevelZeroPlugin only supports norm_type=2.0 now" if extra_dp_size > 1: assert dist.get_world_size() % extra_dp_size == 0, "extra_dp_size should be a factor of world_size" inner_dp_size = dist.get_world_size() // extra_dp_size self.pg_mesh = ProcessGroupMesh(extra_dp_size, inner_dp_size) self.stage = stage self.precision = precision self.zero_optim_kwargs = dict( initial_scale=initial_scale, min_scale=min_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, hysteresis=hysteresis, max_scale=max_scale, clip_grad_norm=max_norm, reduce_bucket_size=reduce_bucket_size_in_m * 1024 * 1024, communication_dtype=communication_dtype, overlap_communication=overlap_communication, partition_grad=(stage == 2), cpu_offload=cpu_offload, master_weights=master_weights, overlap_allgather=overlap_allgather, fp8_communication=fp8_communication, ) if extra_dp_size > 1: self.zero_optim_kwargs["extra_dp_group"] = self.pg_mesh.get_group_along_axis(0) self.zero_optim_kwargs["dp_process_group"] = self.pg_mesh.get_group_along_axis(1) self.lora_enabled = False self.verbose = verbose self.logger = get_dist_logger() self.cast_inputs = cast_inputs self.use_fp8 = use_fp8 # set class name with stage, for better error message setattr(self.__class__, "__name__", f"LowLevelZeroPlugin_ZeRO-{stage}") def support_no_sync(self) -> bool: return self.stage == 1 def support_lora(self) -> bool: return False def control_precision(self) -> bool: return True def supported_precisions(self) -> List[str]: return SUPPORTED_PRECISION def control_device(self) -> bool: return True def supported_devices(self) -> List[str]: return ["cuda", "npu"] def support_lora(self) -> bool: return True def enable_lora( self, model: nn.Module, pretrained_dir: Optional[str] = None, lora_config: Optional[Dict] = None, bnb_quantization_config: Optional[BnbQuantizationConfig] = None, ) -> nn.Module: from peft import PeftModel, get_peft_model assert not isinstance(model, LowLevelZeroModel), "Lora should be enabled before boosting the model." self.lora_enabled = True self.logger.warning("You have enabled LoRa training. Please check the hyperparameters such as lr", ranks=[0]) if bnb_quantization_config is not None: model = quantize_model(model, bnb_quantization_config) if pretrained_dir is None: peft_model = get_peft_model(model, lora_config) else: peft_model = PeftModel.from_pretrained(model, pretrained_dir, is_trainable=True) return peft_model def get_param_group_id(self, optimizer: Optimizer, origin_param: Parameter): origin_param_id = id(origin_param) for group_id, param_group in enumerate(optimizer.param_groups): for p in param_group["params"]: if id(p) == origin_param_id: return group_id return -1 def get_param_group_id(self, optimizer: Optimizer, origin_param: Parameter, lora_param: Parameter): origin_param_id = id(origin_param) lora_param_id = id(lora_param) target_group_id = None for group_id, param_group in enumerate(optimizer.param_groups): for p in param_group["params"]: if id(p) == lora_param_id: # check if the lora parameter exists. return target_group_id, OptimizerParamCheckState.LORA_PARM_EXISTED if id(p) == origin_param_id: target_group_id = group_id if target_group_id is not None: return target_group_id, OptimizerParamCheckState.ORIGIN_PARAM_FINDED else: return target_group_id, OptimizerParamCheckState.ORIGIN_PARAM_NOT_FIND def add_lora_params_to_optimizer(self, model, optimizer): """add lora parameters to optimizer""" name2param = {} for name, param in model.named_parameters(): name2param[name] = param for name, param in name2param.items(): if "lora_A" in name or "lora_B" in name: origin_key = name.replace("lora_A.", "") origin_key = origin_key.replace("lora_B.", "") origin_key = origin_key.replace(f"{model.active_adapter}", "base_layer") origin_param = name2param[origin_key] group_id, check_state = self.get_param_group_id(optimizer, origin_param, param) if check_state == OptimizerParamCheckState.ORIGIN_PARAM_NOT_FIND: self.logger.warning( f"Origin parameter {origin_key} related to {name} doesn't exist in optimizer param_groups.", ranks=[0], ) elif ( check_state == OptimizerParamCheckState.ORIGIN_PARAM_FINDED and group_id is not None and group_id >= 0 ): optimizer.param_groups[group_id]["params"].append(param) def configure( self, model: nn.Module, optimizer: Optional[Optimizer] = None, criterion: Optional[Callable] = None, dataloader: Optional[DataLoader] = None, lr_scheduler: Optional[LRScheduler] = None, ) -> Tuple[nn.Module, OptimizerWrapper, Callable, DataLoader, LRScheduler]: if self.lora_enabled: from peft import PeftModel assert isinstance( model, PeftModel ), "The model should have been wrapped as a PeftModel when self.lora_enabled is True" if optimizer is not None: self.add_lora_params_to_optimizer(model, optimizer) if not isinstance(model, ModelWrapper): model = LowLevelZeroModel( model, self.precision, overlap_allgather=self.zero_optim_kwargs["overlap_allgather"], cast_inputs=self.cast_inputs, use_fp8=self.use_fp8, ) # TODO: Support Galore + ZeRO zero_stage = self.stage zero_optim_kwargs = {**self.zero_optim_kwargs} dp_size = dist.get_world_size() # Replace with the distributed implementation if exists optimizer = cast_to_distributed(optimizer) if isinstance(optimizer, DistGaloreAwamW) and zero_stage > 0 and dp_size > 0: self.logger.warning( "Galore is only supported for Tensor Parallel and vanilla Data Parallel yet. Disabling ZeRO.", ranks=[0], ) zero_optim_kwargs["partition_grad"] = False zero_stage = 0 if optimizer is not None and not isinstance(optimizer, OptimizerWrapper): optimizer: LowLevelZeroOptimizer = LowLevelZeroOptimizer( optimizer, **zero_optim_kwargs, verbose=self.verbose, backward_context=model._hook_context ) # inject update_master_params model.update_master_params = MethodType(optimizer.update_master_params, model) # Setup optimizers that require global states optim = optimizer.optim is_zero = dp_size > 1 and zero_stage > 0 dp_group = _get_default_group() # Use the whole world if isinstance(optim, DistributedOptim): shard_to_param = optimizer.get_master_to_working_map() padding_map = optimizer.get_param_padding_map() optim.setup_distributed(None, dp_group, shard_to_param, padding_map, is_zero) return model, optimizer, criterion, dataloader, lr_scheduler def control_checkpoint_io(self) -> bool: return True def get_checkpoint_io(self) -> CheckpointIO: return LowLevelZeroCheckpointIO() def no_sync(self, model: nn.Module, optimizer: OptimizerWrapper) -> Iterator[None]: assert isinstance(optimizer, LowLevelZeroOptimizer) return optimizer.no_sync()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/torch_fsdp_plugin.py
colossalai/booster/plugin/torch_fsdp_plugin.py
import os from pathlib import Path from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple import torch import torch.nn as nn from packaging import version from torch.distributed import ProcessGroup if version.parse(torch.__version__) >= version.parse("1.12.0"): from torch.distributed.fsdp import FullStateDictConfig from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import StateDictType from torch.distributed.fsdp.fully_sharded_data_parallel import ( BackwardPrefetch, CPUOffload, FullStateDictConfig, MixedPrecision, ShardingStrategy, ) else: raise RuntimeError("FSDP is not supported while torch version under 1.12.0.") from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from torch.utils.data import DataLoader from colossalai.checkpoint_io import CheckpointIndexFile, CheckpointIO, GeneralCheckpointIO, utils from colossalai.checkpoint_io.utils import async_save_state_dict_shards, create_pinned_state_dict from colossalai.cluster import DistCoordinator from colossalai.interface import ModelWrapper, OptimizerWrapper from colossalai.logging import get_dist_logger from colossalai.utils.safetensors import load_flat from .dp_plugin_base import DPPluginBase __all__ = ["TorchFSDPPlugin"] class TorchFSDPCheckpointIO(GeneralCheckpointIO): def __init__(self) -> None: super().__init__() self.coordinator = DistCoordinator() self.logger = get_dist_logger() def load_unsharded_model( self, model: ModelWrapper, checkpoint: str, strict: bool, low_cpu_mem_mode: bool = True, num_threads: int = 1 ): assert isinstance(model, TorchFSDPModel), "Please boost the model before loading!" model = model.unwrap() checkpoint = utils.load_state_dict(checkpoint) model.load_state_dict(checkpoint) def load_unsharded_optimizer( self, optimizer: OptimizerWrapper, checkpoint: Path, low_cpu_mem_mode: bool = True, num_threads: int = 1 ): assert isinstance(optimizer, FSDPOptimizerWrapper), "Please boost the optimizer before loading!" if checkpoint.endswith(".safetensors"): checkpoint = load_flat(checkpoint, seperator=".") else: checkpoint = utils.load_state_dict(checkpoint) fsdp_model = optimizer.unwrap_model() full_optimizer_state = FSDP.full_optim_state_dict(fsdp_model, optim=optimizer, rank0_only=False) start_index = 0 id2name = {} def get_index_mapping(group: Dict[str, Any]) -> Dict[str, Any]: nonlocal start_index start_num = len(id2name) id2name.update({i: p for i, p in enumerate(group["params"], start_index) if i not in id2name}) end_num = len(id2name) start_index += end_num - start_num for g in full_optimizer_state["param_groups"]: get_index_mapping(g) new_state = {} for key, value in checkpoint["state"].items(): new_state[id2name[int(key)]] = value checkpoint["state"] = new_state for g in checkpoint["param_groups"]: new_group = [] for param_id in g["params"]: new_group.append(id2name[param_id]) g["params"] = new_group sharded_osd = FSDP.scatter_full_optim_state_dict(checkpoint, fsdp_model) optimizer.load_state_dict(sharded_osd) def save_unsharded_model( self, model: ModelWrapper, checkpoint: str, gather_dtensor: bool, use_safetensors: bool, use_async: bool = False ): """ Save model to checkpoint but only on master process. """ assert isinstance(model, TorchFSDPModel), "Please boost the model before saving!" model = model.unwrap() cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, cfg): full_model_state = model.state_dict() if self.coordinator.is_master(): if use_async: from colossalai.utils.safetensors import save if hash(model) not in self.pinned_state_dicts: self.pinned_state_dicts[hash(model)] = create_pinned_state_dict(full_model_state) for k, v in full_model_state.items(): self.pinned_state_dicts[hash(model)][k].copy_(v) full_model_state[k] = self.pinned_state_dicts[hash(model)][k] writer = save(checkpoint, full_model_state) self.async_writers.append(writer) else: utils.save_state_dict( full_model_state, checkpoint_file_path=checkpoint, use_safetensors=use_safetensors ) def save_unsharded_optimizer( self, optimizer: OptimizerWrapper, checkpoint: str, gather_dtensor: bool, use_async: bool = False ): """ Save optimizer to checkpoint but only on master process. """ assert isinstance(optimizer, FSDPOptimizerWrapper), "Please boost the optimizer before saving!" fsdp_model = optimizer.unwrap_model() full_optimizer_state = FSDP.full_optim_state_dict(fsdp_model, optim=optimizer, rank0_only=True) if self.coordinator.is_master(): # Save order indices instead of Tensors name2id: Dict[str, int] = {} start_index = 0 def pack_group(group: Dict[str, Any]) -> Dict[str, Any]: nonlocal start_index packed = {k: v for k, v in group.items() if k != "params"} name2id.update({p: i for i, p in enumerate(group["params"], start_index) if p not in name2id}) packed["params"] = [name2id[p] for p in group["params"]] start_index += len(packed["params"]) return packed param_groups = [pack_group(g) for g in full_optimizer_state["param_groups"]] full_optimizer_state["param_groups"] = param_groups new_state = {} for key, value in full_optimizer_state["state"].items(): new_state[name2id[key]] = value full_optimizer_state["state"] = new_state if use_async: from colossalai.utils.safetensors import _flatten_optim_state_dict, save flatten_state_dict, metadata = _flatten_optim_state_dict(full_optimizer_state, seperator=".") if id(optimizer) not in self.pinned_state_dicts: self.pinned_state_dicts[id(optimizer)] = create_pinned_state_dict(flatten_state_dict) for k, v in flatten_state_dict.items(): self.pinned_state_dicts[id(optimizer)][k].copy_(v) flatten_state_dict[k] = self.pinned_state_dicts[id(optimizer)][k] writer = save(checkpoint, state_dict=flatten_state_dict, metadata=metadata) self.async_writers.append(writer) else: utils.save_state_dict(full_optimizer_state, checkpoint_file_path=checkpoint, use_safetensors=False) def save_sharded_model( self, model: ModelWrapper, checkpoint_path: str, gather_dtensor: bool = True, prefix: Optional[str] = None, size_per_shard: int = 1024, use_safetensors: bool = False, use_async: bool = False, ): """ Save model to checkpoint but only on master process. """ assert isinstance(model, TorchFSDPModel), "Please boost the model before saving!" if os.path.isfile(checkpoint_path): self.logger.error(f"Provided path ({checkpoint_path}) should be a directory, not a file") return Path(checkpoint_path).mkdir(parents=True, exist_ok=True) with FSDP.state_dict_type( model.unwrap(), StateDictType.FULL_STATE_DICT, FullStateDictConfig(offload_to_cpu=True, rank0_only=True) ): state_dict = model.unwrap().state_dict() if use_async and self.coordinator.is_master(): if hash(model) not in self.pinned_state_dicts: self.pinned_state_dicts[hash(model)] = {} pinned_state_dicts = self.pinned_state_dicts[hash(model)] else: pinned_state_dicts = None state_dict_shard = utils.shard_model_checkpoint( state_dict, max_shard_size=size_per_shard, pinned_state_dicts=pinned_state_dicts ) weights_name, save_index_file = utils.get_model_base_filenames(prefix, use_safetensors) index_file = CheckpointIndexFile(checkpoint_path) # In general cases, is_master is set to True to get the right behavior. if use_async: total_size, writers = async_save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint_path, index_file=index_file, base_filename=weights_name, is_master=self.coordinator.is_master(), ) self.async_writers.extend(writers) else: total_size = utils.save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint_path, index_file=index_file, base_filename=weights_name, is_master=self.coordinator.is_master(), use_safetensors=use_safetensors, ) # only save the index file on the master rank if self.coordinator.is_master(): index_file.append_meta_data("total_size", total_size) index_file.write_index_file(save_index_file) utils.save_config_file(model.unwrap(), checkpoint_path) self.logger.info( f"The model is split into checkpoint shards. " f"You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) def load_sharded_model( self, model: nn.Module, checkpoint_index_file: Path, strict: bool = False, use_safetensors: bool = False, load_sub_module: bool = True, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """ Load model to checkpoint but only on master process. """ assert isinstance(model, TorchFSDPModel), "Please boost the model before loading!" use_safetensors = False if "safetensors" in checkpoint_index_file.name: use_safetensors = True if use_safetensors and not utils.is_safetensors_available(): raise ImportError("`safe_serialization` requires the `safetensors` library: `pip install safetensors`.") # read checkpoint index file ckpt_index_file = CheckpointIndexFile.from_file(checkpoint_index_file) checkpoint_files, _ = ckpt_index_file.get_checkpoint_filenames() fsdp_state_dict = {} for state_dict in utils.load_state_dict_shards(checkpoint_files, False, use_safetensors): fsdp_state_dict.update(state_dict) with FSDP.state_dict_type(model.unwrap(), StateDictType.FULL_STATE_DICT): model.unwrap().load_state_dict(fsdp_state_dict, strict=False) def save_sharded_optimizer( self, optimizer: Optimizer, checkpoint: str, gather_dtensor: bool, prefix: str, size_per_shard: int, use_async: bool = False, ): """ Save optimizer to checkpoint but only on master process. """ assert isinstance(optimizer, FSDPOptimizerWrapper), "Please boost the optimizer before saving!" if os.path.isfile(checkpoint): self.logger.error(f"Provided path ({checkpoint}) should be a directory, not a file") return Path(checkpoint).mkdir(parents=True, exist_ok=True) with FSDP.state_dict_type( optimizer.unwrap_model().unwrap(), StateDictType.FULL_STATE_DICT, FullStateDictConfig(offload_to_cpu=True, rank0_only=True), ): fsdp_optim_state = FSDP.full_optim_state_dict( optimizer.unwrap_model().unwrap(), optim=optimizer, rank0_only=True ) if self.coordinator.is_master(): # Save order indices instead of Tensors name2id: Dict[str, int] = {} start_index = 0 def pack_group(group: Dict[str, Any]) -> Dict[str, Any]: nonlocal start_index packed = {k: v for k, v in group.items() if k != "params"} name2id.update({p: i for i, p in enumerate(group["params"], start_index) if p not in name2id}) packed["params"] = [name2id[p] for p in group["params"]] start_index += len(packed["params"]) return packed param_groups = [pack_group(g) for g in fsdp_optim_state["param_groups"]] fsdp_optim_state["param_groups"] = param_groups new_state = {} for key, value in fsdp_optim_state["state"].items(): new_state[name2id[key]] = value fsdp_optim_state["state"] = new_state # Preparing file paths and index file. states_name, save_index_file, param_group_file = utils.get_optimizer_base_filenames( prefix, use_safetensors=use_async ) index_file = CheckpointIndexFile(checkpoint) index_file.append_meta_data("param_groups", param_group_file) group_file_path = os.path.join(checkpoint, param_group_file) utils.save_param_groups(fsdp_optim_state, group_file_path) if use_async: if id(optimizer) not in self.pinned_state_dicts: self.pinned_state_dicts[id(optimizer)] = {} pinned_state_dicts = self.pinned_state_dicts[id(optimizer)] else: pinned_state_dicts = None sharded_state = utils.shard_optimizer_checkpoint( fsdp_optim_state, max_shard_size=size_per_shard, pinned_state_dicts=pinned_state_dicts ) # Save shards of optimizer states. # In general cases, is_master is set to True to get the right behavior. if use_async: total_size, writers = async_save_state_dict_shards( sharded_state_dict=sharded_state, checkpoint=checkpoint, index_file=index_file, base_filename=states_name, is_master=self.coordinator.is_master(), state_preprocess=True, ) self.async_writers.extend(writers) else: total_size = utils.save_state_dict_shards( sharded_state_dict=sharded_state, checkpoint=checkpoint, index_file=index_file, base_filename=states_name, is_master=self.coordinator.is_master(), use_safetensors=False, ) index_file.append_meta_data("total_size", total_size) index_file.write_index_file(save_index_file) self.logger.info( f"The optimizer is going to be split to checkpoint shards. " f"You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) def load_sharded_optimizer( self, optimizer: Optimizer, index_file_path: str, size_per_shard: int, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """ Load optimizer to checkpoint but only on master process. """ assert isinstance(optimizer, FSDPOptimizerWrapper), "Please boost the optimizer before saving!" ckpt_index_file = CheckpointIndexFile.from_file(index_file_path) # Load param_groups param_group_path = ckpt_index_file.get_param_group_filename() if param_group_path is None: raise RuntimeError( f"Invalid index file path {index_file_path} for an optimizer. " "Looking param group file under current directory." ) saved_param_groups = torch.load(param_group_path) # Load param fsdp_optim_state = {} checkpoint_files, _ = ckpt_index_file.get_checkpoint_filenames() for state_dict_shard in utils.load_state_dict_shards(checkpoint_files, True, False): fsdp_optim_state.update(state_dict_shard) fsdp_optim_dict = dict(state=fsdp_optim_state, param_groups=saved_param_groups) fsdp_model = optimizer.unwrap_model() full_optimizer_state = FSDP.full_optim_state_dict(fsdp_model.unwrap(), optim=optimizer, rank0_only=False) start_index = 0 id2name = {} def get_index_mapping(group: Dict[str, Any]) -> Dict[str, Any]: nonlocal start_index start_num = len(id2name) id2name.update({i: p for i, p in enumerate(group["params"], start_index) if i not in id2name}) end_num = len(id2name) start_index += end_num - start_num for g in full_optimizer_state["param_groups"]: get_index_mapping(g) new_state = {} for key, value in fsdp_optim_dict["state"].items(): new_state[id2name[int(key)]] = value fsdp_optim_dict["state"] = new_state for g in fsdp_optim_dict["param_groups"]: new_group = [] for param_id in g["params"]: new_group.append(id2name[param_id]) g["params"] = new_group with FSDP.state_dict_type(optimizer.unwrap_model().unwrap(), StateDictType.FULL_STATE_DICT): fsdp_state = FSDP.optim_state_dict_to_load( model=optimizer.unwrap_model().unwrap(), optim=optimizer, optim_state_dict=fsdp_optim_dict ) optimizer.load_state_dict(fsdp_state) def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str): """ Save model to checkpoint but only on master process. """ if self.coordinator.is_master(): super().save_lr_scheduler(lr_scheduler, checkpoint) class TorchFSDPModel(ModelWrapper): def __init__(self, module: nn.Module, *args, **kwargs) -> None: super().__init__(module) self.module = FSDP(module, *args, **kwargs) class FSDPOptimizerWrapper(OptimizerWrapper): def __init__(self, optimizer: Optimizer, model: nn.Module): self.model = model super().__init__(optimizer) def unwrap_model(self) -> nn.Module: return self.model class TorchFSDPPlugin(DPPluginBase): """ Plugin for PyTorch FSDP. ```python from colossalai.booster import Booster from colossalai.booster.plugin import TorchFSDPPlugin model, train_dataset, optimizer, criterion = ... plugin = TorchFSDPPlugin() train_dataloader = plugin.prepare_train_dataloader(train_dataset, batch_size=8) booster = Booster(plugin=plugin) model, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion) ``` Args: See https://pytorch.org/docs/stable/fsdp.html for details. """ if version.parse(torch.__version__) >= version.parse("1.12.0"): def __init__( self, process_group: Optional[ProcessGroup] = None, sharding_strategy: Optional[ShardingStrategy] = None, cpu_offload: Optional[CPUOffload] = None, auto_wrap_policy: Optional[Callable] = None, backward_prefetch: Optional[BackwardPrefetch] = None, mixed_precision: Optional[MixedPrecision] = None, ignored_modules: Optional[Iterable[torch.nn.Module]] = None, param_init_fn: Optional[Callable[[nn.Module], None]] = None, sync_module_states: bool = False, fp8_communication: bool = False, ): super().__init__() self.fsdp_kwargs = dict( process_group=process_group, sharding_strategy=sharding_strategy, cpu_offload=cpu_offload, auto_wrap_policy=auto_wrap_policy, backward_prefetch=backward_prefetch, mixed_precision=mixed_precision, ignored_modules=ignored_modules, param_init_fn=param_init_fn, sync_module_states=sync_module_states, ) self.fp8_communication = fp8_communication self.logger = get_dist_logger() else: raise RuntimeError("FSDP is not supported while torch version under 1.12.0.") def support_no_sync(self) -> bool: return False def support_lora(self) -> bool: return False def no_sync(self, model: nn.Module, optimizer: OptimizerWrapper) -> Iterator[None]: raise NotImplementedError("Torch fsdp no_sync func not supported yet.") def control_precision(self) -> bool: return True def supported_precisions(self) -> List[str]: return ["fp16", "bf16"] def control_device(self) -> bool: return True def supported_devices(self) -> List[str]: return ["cuda"] def configure( self, model: nn.Module, optimizer: Optional[Optimizer] = None, criterion: Optional[Callable] = None, dataloader: Optional[DataLoader] = None, lr_scheduler: Optional[LRScheduler] = None, ) -> Tuple[nn.Module, OptimizerWrapper, Callable, DataLoader, LRScheduler]: # wrap the model with PyTorch FSDP fsdp_model = TorchFSDPModel(model, device_id=torch.cuda.current_device(), **self.fsdp_kwargs) if self.fp8_communication: from colossalai.quantization.utils import patch_fsdp_params_comm_hook patch_fsdp_params_comm_hook() from colossalai.quantization.fp8 import fp8_compress_fsdp_params_comm_hook fsdp_model.module.register_params_comm_hook(None, fp8_compress_fsdp_params_comm_hook) from colossalai.quantization.fp8 import fp8_compress_fsdp_grad_comm_hook fsdp_model.module.register_comm_hook(None, fp8_compress_fsdp_grad_comm_hook) if optimizer is not None: if len(optimizer.param_groups) > 1: self.logger.warning( "TorchFSDPPlugin does not support optimizer that use multi param groups. The results may not be as expected if used." ) optimizer.__init__(fsdp_model.parameters(), **optimizer.defaults) if not isinstance(optimizer, FSDPOptimizerWrapper): optimizer = FSDPOptimizerWrapper(optimizer, fsdp_model) return fsdp_model, optimizer, criterion, dataloader, lr_scheduler def control_checkpoint_io(self) -> bool: return True def get_checkpoint_io(self) -> CheckpointIO: return TorchFSDPCheckpointIO() def enable_lora( self, model: nn.Module, pretrained_dir: Optional[str] = None, lora_config: Optional[Dict] = None ) -> nn.Module: raise NotImplementedError
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/torch_ddp_plugin.py
colossalai/booster/plugin/torch_ddp_plugin.py
from typing import Callable, Dict, Iterator, List, Optional, Tuple, Union import torch import torch.nn as nn from peft import PeftModel from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from torch.utils._pytree import tree_map from torch.utils.data import DataLoader from colossalai.checkpoint_io import CheckpointIO, GeneralCheckpointIO from colossalai.cluster import DistCoordinator from colossalai.interface import ModelWrapper, OptimizerWrapper from colossalai.interface.model import PeftUnwrapMixin from colossalai.logging import get_dist_logger from colossalai.quantization import BnbQuantizationConfig, quantize_model from colossalai.utils import get_current_device from .dp_plugin_base import DPPluginBase __all__ = ["TorchDDPPlugin"] class TorchDDPCheckpointIO(GeneralCheckpointIO): def __init__(self) -> None: super().__init__() self.coordinator = DistCoordinator() self.logger = get_dist_logger() def load_unsharded_model( self, model: ModelWrapper, checkpoint: str, strict: bool = True, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """ Load model from checkpoint. """ assert isinstance(model, ModelWrapper), "Please boost the model before loading!" super().load_unsharded_model( model.unwrap(), checkpoint, strict=strict, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads ) def save_unsharded_model( self, model: ModelWrapper, checkpoint: str, gather_dtensor: bool, use_safetensors: bool, use_async: bool = False ): """ Save model to checkpoint but only on master process. """ assert isinstance(model, ModelWrapper), "Please boost the model before saving!" if self.coordinator.is_master(): super().save_unsharded_model( model.unwrap(), checkpoint, gather_dtensor, use_safetensors, use_async=use_async ) def load_unsharded_optimizer( self, optimizer: OptimizerWrapper, checkpoint: str, low_cpu_mem_mode: bool = True, num_threads: int = 1 ): """ Load optimizer from checkpoint. """ assert isinstance(optimizer, OptimizerWrapper), "Please boost the optimizer before loading!" super().load_unsharded_optimizer( optimizer, checkpoint, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads ) def save_unsharded_optimizer( self, optimizer: OptimizerWrapper, checkpoint: str, gather_dtensor: bool, use_async: bool = False ): """ Save optimizer to checkpoint but only on master process. """ assert isinstance(optimizer, OptimizerWrapper), "Please boost the optimizer before saving!" if self.coordinator.is_master(): super().save_unsharded_optimizer(optimizer, checkpoint, gather_dtensor, use_async=use_async) def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str): """ Save model to checkpoint but only on master process. """ if self.coordinator.is_master(): super().save_lr_scheduler(lr_scheduler, checkpoint) def save_sharded_model( self, model: ModelWrapper, checkpoint_path: str, gather_dtensor: bool = True, prefix: Optional[str] = None, max_shard_size: int = 1024, use_safetensors: bool = False, use_async: bool = False, ): """ Save model to checkpoint but only on master process. """ assert isinstance(model, ModelWrapper), "Please boost the model before saving!" if self.coordinator.is_master(): super().save_sharded_model( model.unwrap(), checkpoint_path, gather_dtensor, prefix, max_shard_size, use_safetensors, use_async=use_async, ) def load_sharded_model( self, model: ModelWrapper, checkpoint_index_file: str, strict: bool = False, use_safetensors: bool = False, load_sub_module: bool = True, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """ Load model from sharded checkpoint. """ assert isinstance(model, ModelWrapper), "Please boost the model before loading!" super().load_sharded_model( model.unwrap(), checkpoint_index_file, strict, use_safetensors, load_sub_module, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads, ) def save_sharded_optimizer( self, optimizer: OptimizerWrapper, checkpoint: str, gather_dtensor: bool = True, prefix: Optional[str] = None, size_per_shard: int = 1024, use_async: bool = False, ): """ Save optimizer to sharded checkpoint but only on master process. """ assert isinstance(optimizer, OptimizerWrapper), "Please boost the optimizer before saving!" if self.coordinator.is_master(): super().save_sharded_optimizer( optimizer.unwrap(), checkpoint, gather_dtensor, prefix, size_per_shard, use_async=use_async ) def load_sharded_optimizer( self, optimizer: Optimizer, index_file_path: str, prefix: Optional[str] = None, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """ Load optimizer from sharded checkpoint. """ assert isinstance(optimizer, OptimizerWrapper), "Please boost the optimizer before loading!" super().load_sharded_optimizer( optimizer.unwrap(), index_file_path, prefix, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads ) def save_lora_as_pretrained( self, model: Union[nn.Module, ModelWrapper], checkpoint: str, use_safetensors: bool = False, state_dict: Optional[dict] = None, ) -> None: """ Save the lora adapters and adapter configuration file to checkpoint directory. """ from peft import PeftModel assert isinstance(model, ModelWrapper), "Please boost the model before saving!" peft_model = model.unwrap(unwrap_peft=False) assert isinstance( peft_model, PeftModel ), "The model doesn't have lora adapters, please enable lora before saving." if state_dict is None: state_dict = tree_map(lambda x: x.data.cpu() if torch.is_tensor(x) else x, peft_model.state_dict()) if self.coordinator.is_master(): return peft_model.save_pretrained( checkpoint, safe_serialization=use_safetensors, state_dict=state_dict, ) class TorchDDPModel(ModelWrapper): def __init__(self, module: nn.Module, *args, **kwargs) -> None: super().__init__(module) self.module = DDP(module, *args, **kwargs) def unwrap(self, unwrap_peft: bool = True) -> nn.Module: model = self.module.module if unwrap_peft and isinstance(model, PeftModel): model = PeftUnwrapMixin(model) return model class TorchDDPPlugin(DPPluginBase): """ Plugin for PyTorch DDP. ```python from colossalai.booster import Booster from colossalai.booster.plugin import TorchDDPPlugin model, train_dataset, optimizer, criterion = ... plugin = TorchDDPPlugin() train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8) booster = Booster(plugin=plugin) model, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion) ``` Args: broadcast_buffers (bool, optional): Whether to broadcast buffers in the beginning of training. Defaults to True. bucket_cap_mb (int, optional): The bucket size in MB. Defaults to 25. find_unused_parameters (bool, optional): Whether to find unused parameters. Defaults to False. check_reduction (bool, optional): Whether to check reduction. Defaults to False. gradient_as_bucket_view (bool, optional): Whether to use gradient as bucket view. Defaults to False. static_graph (bool, optional): Whether to use static graph. Defaults to False. fp8_communication (bool, optional): Whether to enable fp8 communication. Defaults to False. """ def __init__( self, broadcast_buffers: bool = True, bucket_cap_mb: int = 25, find_unused_parameters: bool = False, check_reduction: bool = False, gradient_as_bucket_view: bool = False, static_graph: bool = False, fp8_communication: bool = False, ) -> None: super().__init__() self.ddp_kwargs = dict( broadcast_buffers=broadcast_buffers, bucket_cap_mb=bucket_cap_mb, find_unused_parameters=find_unused_parameters, check_reduction=check_reduction, gradient_as_bucket_view=gradient_as_bucket_view, static_graph=static_graph, ) self.fp8_communication = fp8_communication def support_no_sync(self) -> bool: return True def support_lora(self) -> bool: return True def control_precision(self) -> bool: return False def supported_precisions(self) -> List[str]: return ["fp16", "fp16_apex", "bf16", "fp8"] def control_device(self) -> bool: return True def supported_devices(self) -> List[str]: return ["cuda", "npu"] def configure( self, model: nn.Module, optimizer: Optional[Optimizer] = None, criterion: Optional[Callable] = None, dataloader: Optional[DataLoader] = None, lr_scheduler: Optional[LRScheduler] = None, ) -> Tuple[nn.Module, OptimizerWrapper, Callable, DataLoader, LRScheduler]: # cast model to cuda model = model.to(get_current_device()) # convert model to sync bn model = nn.SyncBatchNorm.convert_sync_batchnorm(model, None) # wrap the model with PyTorch DDP model = TorchDDPModel(model, **self.ddp_kwargs) if optimizer is not None and not isinstance(optimizer, OptimizerWrapper): optimizer = OptimizerWrapper(optimizer) if self.fp8_communication: from colossalai.quantization.fp8 import fp8_compress_ddp_grad_comm_hook_async model.module.register_comm_hook(None, fp8_compress_ddp_grad_comm_hook_async) return model, optimizer, criterion, dataloader, lr_scheduler def control_checkpoint_io(self) -> bool: return True def get_checkpoint_io(self) -> CheckpointIO: return TorchDDPCheckpointIO() def no_sync(self, model: nn.Module, optimizer: OptimizerWrapper) -> Iterator[None]: assert isinstance(model, TorchDDPModel), "Model is not boosted by TorchDDPPlugin." return model.module.no_sync() def enable_lora( self, model: nn.Module, pretrained_dir: Optional[str] = None, lora_config: Optional[Dict] = None, bnb_quantization_config: Optional[BnbQuantizationConfig] = None, ) -> nn.Module: from peft import PeftModel, get_peft_model if bnb_quantization_config is not None: model = quantize_model(model, bnb_quantization_config) assert not isinstance(model, TorchDDPModel), "Lora should be enabled before boosting the model." if pretrained_dir is None: return get_peft_model(model, lora_config) else: return PeftModel.from_pretrained(model, pretrained_dir, is_trainable=True)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false