repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_model/sharded_model_v2.py
colossalai/legacy/zero/sharded_model/sharded_model_v2.py
# this code is inspired by the DeepSpeed library and implemented with our own design from scratch import functools import itertools from collections import OrderedDict from typing import Any, Iterator, Optional, Tuple import torch import torch.distributed as dist import torch.nn as nn from torch.distributed import ProcessGroup from torch.nn.parameter import Parameter from colossalai.accelerator import get_accelerator from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.utils.memory import colo_device_memory_capacity from colossalai.legacy.zero.gemini.ophooks import register_ophooks_recursively from colossalai.legacy.zero.gemini.paramhooks import BaseParamHookMgr from colossalai.legacy.zero.gemini.stateful_tensor import TensorState from colossalai.legacy.zero.gemini.stateful_tensor_mgr import StatefulTensorMgr from colossalai.legacy.zero.gemini.tensor_placement_policy import TensorPlacementPolicy, TensorPlacementPolicyFactory from colossalai.legacy.zero.gemini.tensor_utils import colo_model_data_move_to_cpu from colossalai.legacy.zero.shard_utils import BaseShardStrategy from colossalai.legacy.zero.sharded_model.reduce_scatter import ReduceScatterBucketer from colossalai.logging import get_dist_logger from colossalai.utils import disposable from colossalai.zero.gemini.memory_tracer import MemStatsCollector from ._utils import ( cast_float_arguments, cast_tensor_to_bf16, cast_tensor_to_fp16, cast_tensor_to_fp32, chunk_and_pad, free_storage, get_gradient_predivide_factor, ) from .zero_hook import ZeroHook try: from torch.nn.modules.module import _EXTRA_STATE_KEY_SUFFIX except ImportError: _EXTRA_STATE_KEY_SUFFIX = "_extra_state" class ShardedModelV2(nn.Module): """ A wrapper for the PyTorch module shards the model parameters among multiple GPU memory. Only `1/#nproc` of parameters, gradients are stored in local CUDA memory, so forward and backward passes can be executed with limited CUDA memory budget. Note: You must use ``ShardedModelV2`` with ``ShardedOptimizerV2``. Note: Make sure you don't use gradient accumulation and your optimizer can work with fp16 gradient and fp32 parameter, if you enable ``reuse_fp16_shard``. Args: module (nn.Module): A sharded module, which must be initialized by `ZeroInitContext`. shard_strategy (BaseShardStrategy): A shard strategy to manage shard behavior. process_group (Optional[ProcessGroup], optional): Data parallel process group. Defaults to None. reduce_scatter_process_group (Optional[ProcessGroup], optional): Reduce-scatter process group. Generally, it should be `None`, and it's the same as `process_group`. Defaults to None. reduce_scatter_bucket_size_mb (int, optional): Reduce-scatter bucket size in *MB*. Defaults to 25. fp32_reduce_scatter (bool, optional): If set to `True`, gradients are forced to FP32 before reduce-scatter. Defaults to False. tensor_placement_policy (str): Which device to place *held* tensors. It can be 'cpu', 'cuda' and 'auto'. If it's 'cpu', parameters, gradients and optimizer states will be offloaded to CPU, which means min CUDA memory will be used. If it's 'cuda', they won't be offloaded, which means max CUDA memory will be used. If it's 'auto', they are moving dynamically based on CPU and CUDA memory usage. It will utilize heterogeneous memory space evenly and well. Note that 'auto' policy can only work well when no other processes use CUDA during your training. Defaults to 'cuda'. gradient_predivide_factor (Optional[float], optional): Gradient is divided by this value before reduce-scatter. Defaults to 1.0. reuse_fp16_shard (bool, optional): Whether to reuse fp16 shard for param and grad. Enabling this can reduce GPU memory usage, but you have to make sure you disable it when using gradient accumulation. In this mode, grad will be fp16. Make sure your optimizer supports mixed precision (fp32 param and fp16 grad). We find that PyTorch's optimizers don't support mixed precision, so we recommend you enable this only when using our CPUAdam with CPU offload. Defaults to False. bf16 (bool, optional): Whether to use bfloat16 for param and grad. Defaults to False. """ def __init__( self, module: nn.Module, shard_strategy: BaseShardStrategy, process_group: Optional[ProcessGroup] = None, reduce_scatter_process_group: Optional[ProcessGroup] = None, reduce_scatter_bucket_size_mb: int = 25, fp32_reduce_scatter: bool = False, tensor_placement_policy: str = "cuda", gradient_predivide_factor: Optional[float] = 1.0, reuse_fp16_shard: bool = False, bf16: bool = False, *args, **kwargs, ): assert not isinstance(module, ShardedModelV2), "Nested ShardedModelV2 is not supported." super().__init__() self.logger = get_dist_logger() self.bf16 = bf16 # We force users to use ZeroInitContext for submodule in module.modules(): sharded_cnt = 0 unshard_cnt = 0 for param in submodule.parameters(recurse=False): assert hasattr(param, "colo_attr"), "You must use ZeroInitContext to init your module first." if param.colo_attr.param_is_sharded: sharded_cnt += 1 else: unshard_cnt += 1 assert (not sharded_cnt) or (not unshard_cnt), "nn.Module can not both have shard param and unshard param" submodule.param_is_sharded = sharded_cnt > 0 self.sharded_params = [] self.unshard_params = [] for param in module.parameters(): if param.colo_attr.param_is_sharded: self.sharded_params.append(param) else: self.unshard_params.append(param) self.module = module self.process_group = process_group or gpc.get_group(ParallelMode.DATA) self.reduce_scatter_process_group = reduce_scatter_process_group or self.process_group self.world_size = dist.get_world_size(self.process_group) self.rank = dist.get_rank(self.process_group) self.shard_strategy = shard_strategy self._use_memory_tracer = tensor_placement_policy == "auto" if self._use_memory_tracer: self._memstats_collector = MemStatsCollector() self._start_collect_memstats = disposable(self._memstats_collector.start_collection) self._finish_collect_memstats = disposable(self._memstats_collector.finish_collection) else: self._memstats_collector = None self._tensor_placement_policy: TensorPlacementPolicy = TensorPlacementPolicyFactory.create( tensor_placement_policy )(mem_stats_collector=self._memstats_collector) if "warmup_non_model_data_ratio" in kwargs: if tensor_placement_policy != "auto": self.logger.warning("setting warmup_non_model_data_ratio is useless if not use auto placement") else: ratio = kwargs["warmup_non_model_data_ratio"] self._tensor_placement_policy._warmup_non_model_data_ratio = ratio self.logger.info(f"setting warmup_non_model_data_ratio as {ratio} for auto placement") self._stateful_tensor_mgr = StatefulTensorMgr(self._tensor_placement_policy) param_tensor_list = [p.colo_attr.sharded_data_tensor for p in module.parameters() if hasattr(p, "colo_attr")] self._stateful_tensor_mgr.register_stateful_tensor_list(param_tensor_list) # Register hooks self._ophook_list = [ ZeroHook(self.shard_strategy, self._memstats_collector, self._stateful_tensor_mgr, self.process_group) ] register_ophooks_recursively(self.module, self._ophook_list) self.param_hook_mgr = BaseParamHookMgr(list(self.module.parameters())) self.param_hook_mgr.register_backward_hooks(self._grad_post_backward_hook) self.fp32_reduce_scatter = fp32_reduce_scatter self._cpu_offload: bool = tensor_placement_policy != "cuda" for param in module.parameters(): # Init `offload_grad` param.colo_attr.offload_grad = self._cpu_offload # We find if gradient_predivide_factor != 1.0, there may be wrong precision problem # So we use 1.0 as the default gradient_predivide_factor # However, if you set gradient_predivide_factor to None, we will set # gradient_predivide_factor to a value >= 1.0 automatically self.gradient_predivide_factor: float = ( gradient_predivide_factor if gradient_predivide_factor is not None else get_gradient_predivide_factor(self.world_size) ) self.gradient_postdivide_factor: float = self.world_size / self.gradient_predivide_factor self.comm_stream: torch.cuda.Stream = torch.cuda.Stream() self.reducer = ReduceScatterBucketer(reduce_scatter_bucket_size_mb) self._require_backward_grad_sync: bool = True self._cuda_margin_space = 0 self.reuse_fp16_shard = reuse_fp16_shard # record whether gradients have inf or nan self.overflow_counter = 0 def adjust_stateful_tensor_layout(self) -> None: self._stateful_tensor_mgr.adjust_layout() @property def use_memory_tracer(self): return self._use_memory_tracer @property def cuda_margin_space(self): return self._cuda_margin_space @property def cpu_offload(self): return self._cpu_offload def dump_memory_stats(self, filename: Optional[str] = "dump_mem_stats.log") -> None: """ dummy memory tracer collected information to a file. try: # forward: model(inputs) # backward: optimizer.backward() except Exception as e: model.dump_memory_stats() exit(0) """ if self._use_memory_tracer: self.logger.error(f"dump memory tracer collected information to a {filename}", ranks=[0]) if gpc.get_global_rank() == 0: with open(filename, "w+") as f: f.write( f"cuda reserved {torch.cuda.memory_reserved(get_accelerator().get_current_device()) / 1e9} GB\n" ) f.write( f"cuda max allocated {torch.cuda.max_memory_allocated(get_accelerator().get_current_device()) / 1e9} GB\n" ) f.write("CUDA model data (GB)\n") f.write("\n") f.write("CUDA non model data (GB)\n") f.write(str(self._memstats_collector._memstats.non_model_data_list("cuda"))) f.write("CPU non model data (GB)\n") f.write(str(self._memstats_collector._memstats.non_model_data_list("cpu"))) f.write("\n") def _pre_forward_operations(self, *args): # the operation will affect the memory tracer behavior in ZeroHook if self._memstats_collector: self._start_collect_memstats() for p in self.module.parameters(): if hasattr(p, "colo_attr"): p.colo_attr.sharded_data_tensor.trans_state(TensorState.HOLD) self._stateful_tensor_mgr.start_iter() def _post_forward_operations(self): for p in self.module.parameters(): if hasattr(p, "colo_attr"): p.colo_attr.sharded_data_tensor.trans_state(TensorState.HOLD) def forward(self, *args: Any, **kwargs: Any) -> torch.Tensor: self._pre_forward_operations(*args) cast_fn = cast_tensor_to_bf16 if self.bf16 else cast_tensor_to_fp16 args, kwargs = cast_float_arguments(cast_fn, *args, **kwargs) outputs = self.module(*args, **kwargs) self._post_forward_operations() return outputs def backward(self, loss): loss.backward() self._post_backward_operations() for ophook in self._ophook_list: ophook.post_iter() def backward_by_grad(self, tensor, grad): torch.autograd.backward(tensors=tensor, grad_tensors=grad) self._post_backward_operations() for ophook in self._ophook_list: ophook.post_iter() def _update_memstats(self): if self._memstats_collector: self._finish_collect_memstats() # cuda margin space = cuda mem capacity - max fwd/bwd cuda mem used. # the way to calculate margin space is based on the assumption that # model data is fixed in cuda during training. # cuda margin space can be used to store OS. self._cuda_margin_space = ( colo_device_memory_capacity(get_accelerator().get_current_device()) - self._memstats_collector._memstats.max_overall_cuda ) @torch.no_grad() def _post_backward_operations(self) -> None: """ The method includes operations required to be processed after backward 1. update memory tracer. 2. flush the gradient in buckets. Reducing partial gradients in each process. 3. shard tensors not dealed in the zero hook 4. move sharded param grad payload to param.grad """ # 1. update memory tracer. self._update_memstats() # 2. flush the gradient in buckets. Reducing partial gradients in each process. if self._require_backward_grad_sync: # Flush any unreduced buckets in the post_backward stream. with torch.cuda.stream(self.comm_stream): self.reducer.flush() torch.cuda.current_stream().wait_stream(self.comm_stream) self.reducer.free() # 3. shard tensors not dealed in the zero hook tensor_list = [] for p in self.sharded_params: if not p.colo_attr.param_is_sharded: tensor_list.append(p.colo_attr.sharded_data_tensor) p.colo_attr.sharded_data_tensor.trans_state(TensorState.HOLD_AFTER_BWD) p.colo_attr.set_data_none() self.shard_strategy.shard(tensor_list, self.process_group) # 4. set all parameters' grad to None for p in self.module.parameters(): if not p.requires_grad: continue # Leave the gradient accumulation state (_require_backward_grad_sync) as-is if not synchronizing this pass. # NOTE() (no-sync)/sync pass: (not conduct)/conduct gradient all reducing between process group. # If _require_backward_grad_sync is True, # p.grad remains the accumulated unsharded gradient from prior no-sync passes. # We also allows to interleave no-sync pass with sync passes, if desired. if not self._require_backward_grad_sync: continue p.grad = None @torch.no_grad() def _grad_post_backward_hook(self, param: Parameter, grad: torch.Tensor) -> Optional[torch.Tensor]: """ At the start of :func:`_grad_post_backward_hook`, ``param.grad`` contains the full gradient for the local batch. The reduce-scatter op will save a single shard of the summed gradient across all GPUs to param.colo_attr.grad. This shard will align with the current GPU rank. For example:: before reduce_scatter: param.grad (GPU #0): [1, 2, 3, 4] param.grad (GPU #1): [5, 6, 7, 8] after reduce_scatter: param.grad (GPU #0): [6, 8] # 1+5, 2+6 param.grad (GPU #1): [10, 12] # 3+7, 4+8 The local GPU's ``optim.step`` is responsible for updating a single shard of params, also corresponding to the current GPU's rank. This alignment is created by `param.colo_attr.grad`, which ensures that the local optimizer only sees the relevant parameter shard. """ if grad is None: return assert not grad.requires_grad, "ShardedModel only works with gradients that don't require gradients" if not self._require_backward_grad_sync: return # used to cheat Pytorch, since we can't return None empty_grad = torch.empty_like(grad) free_storage(empty_grad) # As torch didn't allow modifying grad in hook, we make a copy grad = grad.clone() if param.colo_attr.is_replicated: self._reduce_scatter_handler(param, grad) else: self._save_grad(param, grad) return empty_grad def _reduce_scatter_handler(self, param: Parameter, grad: torch.Tensor) -> None: self.comm_stream.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(self.comm_stream): if self.fp32_reduce_scatter: grad.data = grad.data.to(param.dtype) if self.gradient_predivide_factor > 1.0: # Average grad by world_size for consistency with PyTorch DDP. grad.data.div_(self.gradient_predivide_factor) if self.world_size > 1: grad_chunks = chunk_and_pad(grad, self.reduce_scatter_process_group.size()) self.reducer.reduce_scatter_async( grad_chunks, group=self.reduce_scatter_process_group, callback_fn=functools.partial(self._reduce_scatter_callback, param), ) else: self._reduce_scatter_callback(param, grad) torch.cuda.current_stream().wait_stream(self.comm_stream) def _reduce_scatter_callback(self, param: Parameter, reduced_grad: torch.Tensor) -> None: assert isinstance( reduced_grad, torch.Tensor ), f"_reduce_scatter_callback accept reduced_grad as {type(reduced_grad)}" reduced_grad.data = reduced_grad.data.contiguous().view(-1) if self.gradient_postdivide_factor > 1: # Average grad by world_size for consistency with PyTorch DDP. reduced_grad.data.div_(self.gradient_postdivide_factor) self._save_grad(param, reduced_grad) # FIXME(ver217): refactor the below line when impl eviction policy def _save_grad(self, param: Parameter, grad: torch.Tensor): # record whether we have overflow self.overflow_counter += torch.isinf(grad).any().item() self.overflow_counter += torch.isnan(grad).any().item() # move gradient to cpu if param.colo_attr.offload_grad: colo_model_data_move_to_cpu(grad) if self.reuse_fp16_shard: # make parameters point to gradient assert ( param.colo_attr.saved_grad.is_null() ), "Gradient accumulation is not supported when reuse_fp16_shard=True" param.colo_attr.grad_payload_reset(grad.data) # release the memory of param # we set a false None for parameter's payload # so we can get parameter's device and dtype later in optimizer param.colo_attr.data_payload_reset(torch.empty(0, device=grad.device, dtype=grad.dtype)) if param.colo_attr.is_replicated: param.colo_attr.sharded_data_tensor.is_sharded = True else: fp32_grad = cast_tensor_to_fp32(grad) if param.colo_attr.saved_grad.is_null(): param.colo_attr.grad_payload_reset(fp32_grad) else: param.colo_attr.grad_payload.add_(fp32_grad.view_as(param.colo_attr.grad_payload)) # keep saved_grad in HOLD state param.colo_attr.saved_grad.trans_state(TensorState.HOLD) def parameters(self, recurse: bool = True) -> Iterator[Parameter]: return self.module.parameters(recurse=recurse) def named_parameters(self, prefix: str = "", recurse: bool = True) -> Iterator[Tuple[str, Parameter]]: return self.module.named_parameters(prefix, recurse) def state_dict(self, destination=None, prefix="", keep_vars=False) -> "OrderedDict[str, torch.Tensor]": return self._colo_state_dict( destination, prefix, keep_vars, shard_strategy=self.shard_strategy, state_dict_func=nn.Module.state_dict, module_to_load=self.module, sharded_params=self.sharded_params, process_group=self.process_group, ) def load_state_dict(self, state_dict: "OrderedDict[str, torch.Tensor]", strict: bool = True) -> None: for name, p in self.named_parameters(): if name in state_dict: p.colo_attr.data_payload_reset( state_dict[name].to(dtype=p.colo_attr.data_payload.dtype, device=p.colo_attr.data_payload.device) ) # Force re-shard p.colo_attr.sharded_data_tensor.is_sharded = False self.shard_strategy.shard([p.colo_attr.sharded_data_tensor]) elif strict: raise RuntimeError(f"Missing key in state_dict: {name}") def _colo_state_dict( self, destination=None, prefix="", keep_vars=False, shard_strategy: Optional[BaseShardStrategy] = None, state_dict_func=None, module_to_load=None, sharded_params=[], process_group=None, ) -> "OrderedDict[str, torch.Tensor]": if len(sharded_params) == 0: for param in self.parameters(): if param.colo_attr.param_is_sharded: sharded_params.append(param) if shard_strategy is not None: shard_strategy.gather([p.colo_attr.sharded_data_tensor for p in sharded_params], process_group) for p in sharded_params: p.data = p.colo_attr.data_payload module_to_load = module_to_load or self gathered_state_dict = state_dict_func(module_to_load, destination, prefix, keep_vars) gathered_state_dict = {k: v.cpu() if isinstance(v, torch.Tensor) else v for k, v in gathered_state_dict.items()} if shard_strategy is not None: shard_strategy.shard([p.colo_attr.sharded_data_tensor for p in sharded_params], process_group) for p in sharded_params: p.colo_attr.set_data_none() return gathered_state_dict def _colo_load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs, shard_strategy=None ): r"""Copies parameters and buffers from :attr:`state_dict` into only this module, but not its descendants. This is called on every submodule in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this module in input :attr:`state_dict` is provided as :attr:`local_metadata`. For state dicts without metadata, :attr:`local_metadata` is empty. Subclasses can achieve class-specific backward compatible loading using the version number at `local_metadata.get("version", None)`. .. note:: :attr:`state_dict` is not the same object as the input :attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So it can be modified. Args: state_dict (dict): a dict containing parameters and persistent buffers. prefix (str): the prefix for parameters and buffers used in this module local_metadata (dict): a dict containing the metadata for this module. See strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` with :attr:`prefix` match the names of parameters and buffers in this module missing_keys (list of str): if ``strict=True``, add missing keys to this list unexpected_keys (list of str): if ``strict=True``, add unexpected keys to this list error_msgs (list of str): error messages should be added to this list, and will be reported together in :meth:`~torch.nn.Module.load_state_dict` shard_strategy (Optional[BaseShardStrategy], optional): A shard strategy to manage shard behavior. Defaults to None. """ for hook in self._load_state_dict_pre_hooks.values(): hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) persistent_buffers = {k: v for k, v in self._buffers.items() if k not in self._non_persistent_buffers_set} local_name_params = itertools.chain(self._parameters.items(), persistent_buffers.items()) local_state = {k: v for k, v in local_name_params if v is not None} for name, param in local_state.items(): key = prefix + name if key in state_dict: input_param = state_dict[key] if hasattr(param, "colo_attr"): param.colo_attr.data_payload_reset( input_param.to( dtype=param.colo_attr.data_payload.dtype, device=param.colo_attr.data_payload.device ) ) if shard_strategy is not None: # Force re-shard param.colo_attr.sharded_data_tensor.is_sharded = False shard_strategy.shard([param.colo_attr.sharded_data_tensor]) else: # This is used to avoid copying uninitialized parameters into # non-lazy modules, since they dont have the hook to do the checks # in such case, it will error when accessing the .shape attribute. is_param_lazy = torch.nn.parameter.is_lazy(param) # Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+ if not is_param_lazy and len(param.shape) == 0 and len(input_param.shape) == 1: input_param = input_param[0] if not is_param_lazy and input_param.shape != param.shape: # local shape should match the one in checkpoint error_msgs.append( "size mismatch for {}: copying a param with shape {} from checkpoint, " "the shape in current model is {}.".format(key, input_param.shape, param.shape) ) continue try: with torch.no_grad(): param.copy_(input_param) except Exception as ex: error_msgs.append( 'While copying the parameter named "{}", ' "whose dimensions in the model are {} and " "whose dimensions in the checkpoint are {}, " "an exception occurred : {}.".format(key, param.size(), input_param.size(), ex.args) ) elif strict: missing_keys.append(key) extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX if getattr(self.__class__, "set_extra_state", nn.Module.set_extra_state) is not nn.Module.set_extra_state: if extra_state_key in state_dict: self.set_extra_state(state_dict[extra_state_key]) elif strict: missing_keys.append(extra_state_key) elif strict and (extra_state_key in state_dict): unexpected_keys.append(extra_state_key) if strict: for key in state_dict.keys(): if key.startswith(prefix) and key != extra_state_key: input_name = key[len(prefix) :] input_name = input_name.split(".", 1)[0] # get the name of param/buffer/child if input_name not in self._modules and input_name not in local_state: unexpected_keys.append(key) def __getitem__(self, idx: int): assert isinstance(self.module, nn.ModuleList) return self.module[idx] def __len__(self): assert isinstance(self.module, nn.ModuleList) return len(self.module) def __iter__(self): assert isinstance(self.module, nn.ModuleList) return iter(self.module)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_model/reduce_scatter.py
colossalai/legacy/zero/sharded_model/reduce_scatter.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the BSD license found in the # LICENSE file in the root directory of this source tree. import functools import os from typing import Callable, Dict, List, Optional, Tuple import torch import torch.distributed as dist from torch import Tensor from torch.distributed import ProcessGroup # TODO: Remove the toggle-enable_nccl_base_collectives when github open issue #801 is resolved. if os.getenv("ENABLE_NCCL_BASE_COLLECTIVES", "1") == "0": enable_nccl_base_collectives = False else: enable_nccl_base_collectives = True class Bucket: def __init__(self, shard_size: int, dtype: torch.dtype, device: torch.device, group: ProcessGroup): self.buffer = torch.zeros((group.size(), shard_size), dtype=dtype, device=device) self.group = group self.offset = 0 self.callbacks: List[Callable] = [] self.output_shard = torch.zeros_like(self.buffer[0]) def flush(self) -> None: """Flush content of the bucket.""" if self.offset == 0: assert len(self.callbacks) == 0 return # reduce-scatter bucket if hasattr(dist, "_reduce_scatter_base") and enable_nccl_base_collectives: dist._reduce_scatter_base( self.output_shard[: self.offset], self.buffer[:, : self.offset].contiguous(), group=self.group ) else: dist.reduce_scatter( self.output_shard[: self.offset], list(self.buffer[:, : self.offset].unbind(0)), group=self.group ) # execute post-reduction callbacks for callback_fn in self.callbacks: callback_fn() # reuse input bucket but allocate a fresh output shard self.buffer[:, : self.offset].zero_() self.offset = 0 self.callbacks.clear() self.output_shard = torch.zeros_like(self.buffer[0]) def alloc(self) -> None: """Setup the buffers if they are not allocated. Using ``setup`` and ``teardown``, we can ensure that the bucket buffers are only allocated during the backward pass, hence saving more memory to other parts of the training process, such as the forward pass for activation memory. """ for tensor in [self.buffer, self.output_shard]: if tensor.storage().size() == 0: tensor.storage().resize_(tensor.size().numel()) def free(self) -> None: """Tear down the bucket by freeing the memory""" assert self.offset == 0 and self.callbacks == [], "Incorrect call of teardown" for tensor in [self.buffer, self.output_shard]: tensor.storage().resize_(0) def append(self, tensor_list: List[Tensor], callback_fn: Callable): # copy data from input_list into bucket tensor_size = tensor_list[0].numel() stacked_input = torch.stack(tensor_list).view(self.group.size(), tensor_size) offset = self.offset self.buffer[:, offset : offset + tensor_size].copy_(stacked_input) self.offset += tensor_size # callback will be given the reduced result if callback_fn is not None: result_view = self.output_shard[offset : offset + tensor_size].view_as(tensor_list[0]) self.callbacks.append(functools.partial(callback_fn, result_view)) class ReduceScatterBucketer: """ Helper for bucketing multiple reduce-scatter operations on small tensors into larger reduce-scatter ops to improve communication efficiency. Usage:: bucketer = ReduceScatterBucketer() bucketer.reduce_scatter_async( small_tensors, callback_fn=lambda result: print("small") ) bucketer.reduce_scatter_async( big_tensors, callback_fn=lambda result: print("big") ) bucketer.reduce_scatter_async( more_small_tensors, callback_fn=lambda result: print("small2") ) bucketer.flush() # callbacks only guaranteed to be called after flush() # Example output (note that it is out of order, due to bucketing): # big # small # small2 Args: bucket_size_mb (int, Optional): bucket size for communicating. Buckets are sub-divided based on world_size. Values <= 0 disable bucketing. """ def __init__(self, bucket_size_mb: int = 25): self.bucket_size_mb = bucket_size_mb self.buckets: Dict[Tuple[torch.dtype, torch.device, ProcessGroup], Bucket] = {} @torch.no_grad() def reduce_scatter_async( self, input_list: List[Tensor], group: ProcessGroup, callback_fn: Optional[Callable] = None, ) -> None: """ Reduce-scatter a list of tensors asynchronously, so smaller reductions can be bucketed together. The given callback (``callback_fn``) will be called with the reduced result at some later time. Call ``flush()`` to force all queued ops and callbacks to be executed. Note that large inputs will be reduced immediately, and this function may also flush the relevant bucket to make room for ``input_list``. Args: input_list (List[Tensor]): list of tensors to reduce-scatter. List should contain ``group.size()`` tensors and each tensor should have identical shape, dtype and device. group (ProcessGroup): process group for reduction callback_fn (Callable, Optional): callback function to call after the reduction executes. Function will be called with a single argument corresponding to the reduced result. """ world_size = group.size() assert ( len(input_list) == world_size ), f"reduce_scatter received {len(input_list)} inputs, expected group.size() ({world_size})" first_input = input_list[0] first_input_size = first_input.numel() bucket_shard_size = self._get_shard_size(first_input.element_size(), world_size) if first_input_size > bucket_shard_size: # TODO: investigate how to avoid using torch.cat (because it seems to be slow for CPU tensors) # input is too big to fit in the bucket, reduce-scatter directly output = torch.zeros_like(input_list[0]) if hasattr(dist, "_reduce_scatter_base") and enable_nccl_base_collectives: input_flattened = torch.cat(input_list) dist._reduce_scatter_base(output, input_flattened, group=group) else: # fallback dist.reduce_scatter(output, input_list, group=group) if callback_fn is not None: callback_fn(output) return bucket = self._get_bucket(first_input, group) if first_input_size > bucket.buffer.size(1) - bucket.offset: # not enough space remaining in bucket, flush it now bucket.flush() bucket.append(input_list, callback_fn) @torch.no_grad() def flush(self) -> None: """Reduce-scatter any partial buckets.""" for bucket in self.buckets.values(): bucket.flush() @torch.no_grad() def free(self) -> None: """Free buffers from all buckets.""" for bucket in self.buckets.values(): bucket.free() @functools.lru_cache() def _get_shard_size(self, element_size: int, num_shards: int) -> int: if self.bucket_size_mb <= 0: # Values <= 0 disable bucketing. return 0 MB = 1024 * 1024 bucket_size = self.bucket_size_mb * MB / element_size return int(bucket_size // num_shards) def _get_bucket(self, tensor: Tensor, group: ProcessGroup) -> Bucket: key = (tensor.dtype, tensor.device, group) if key not in self.buckets: # buckets are divided into world_size pieces, bucket.data shaped (world_size, shard_size) world_size = group.size() shard_size = self._get_shard_size(tensor.element_size(), world_size) self.buckets[key] = Bucket(shard_size, tensor.dtype, tensor.device, group) self.buckets[key].alloc() return self.buckets[key]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_model/utils.py
colossalai/legacy/zero/sharded_model/utils.py
import copy import torch from colossalai.legacy.zero.sharded_model import ShardedModelV2 def col_model_deepcopy(sharded_model: ShardedModelV2, other_model: torch.nn.Module): """ copy param of the ShardedModelV2 to other_model. Note the other_model has to be the same as self. """ for zero_param, param in zip(sharded_model.parameters(), other_model.parameters()): assert hasattr(zero_param, "colo_attr") shard_flag = zero_param.colo_attr.sharded_data_tensor.is_sharded if shard_flag: sharded_model.shard_strategy.gather([zero_param.colo_attr.sharded_data_tensor]) param.data = copy.deepcopy(zero_param.colo_attr.data_payload) if shard_flag: sharded_model.shard_strategy.shard([zero_param.colo_attr.sharded_data_tensor])
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_model/zero_hook.py
colossalai/legacy/zero/sharded_model/zero_hook.py
from typing import Optional import torch import torch.distributed as dist from colossalai.accelerator import get_accelerator from colossalai.legacy.registry import OPHOOKS from colossalai.legacy.zero.gemini.ophooks import BaseOpHook from colossalai.legacy.zero.gemini.stateful_tensor import TensorState from colossalai.legacy.zero.gemini.stateful_tensor_mgr import StatefulTensorMgr from colossalai.legacy.zero.shard_utils import BaseShardStrategy from colossalai.logging import get_dist_logger from colossalai.zero.gemini.memory_tracer import MemStatsCollector @OPHOOKS.register_module class ZeroHook(BaseOpHook): """ A hook to process sharded param for ZeRO method. Warning: this class has been deprecated after version 0.1.12 """ def __init__( self, shard_strategy: BaseShardStrategy, memstarts_collector: Optional[MemStatsCollector] = None, stateful_tensor_mgr: Optional[StatefulTensorMgr] = None, process_group: Optional[dist.ProcessGroup] = None, ): super().__init__() self.logger = get_dist_logger("ZeROHook") self.shard_strategy = shard_strategy self.process_group = process_group # NOTE(jiaruifang) Now the computing device of FWD and BWD is always on GPU self.computing_device = get_accelerator().get_current_device() self._memstarts_collector = memstarts_collector self._stateful_tensor_mgr = stateful_tensor_mgr def gather_parameters(self, module: torch.nn.Module): # gather sharded parameters if module.param_is_sharded: tensor_list = [] for param in module.parameters(recurse=False): assert hasattr(param, "colo_attr") tensor_list.append(param.colo_attr.sharded_data_tensor) self.shard_strategy.gather(tensor_list, self.process_group) def shard_parameters(self, module: torch.nn.Module): # shard gathered parameters if module.param_is_sharded: tensor_list = [] for param in module.parameters(recurse=False): assert hasattr(param, "colo_attr") tensor_list.append(param.colo_attr.sharded_data_tensor) self.shard_strategy.shard(tensor_list, self.process_group) def adjust_module_data(self, module: torch.nn.Module): # record overall data statistics if self._memstarts_collector: self._memstarts_collector.sample_overall_data() for param in module.parameters(recurse=False): param.colo_attr.sharded_data_tensor.trans_state(TensorState.COMPUTE) # adjust stateful tensor to get enough CUDA memory self._stateful_tensor_mgr.adjust_layout() # record model data statistics if self._memstarts_collector: self._memstarts_collector.record_model_data_volume() def pre_fwd_exec(self, module: torch.nn.Module, *args): self.adjust_module_data(module) self.gather_parameters(module) for param in module.parameters(recurse=False): param.data = param.colo_attr.data_payload assert param.data.device.type == "cuda", f"PRE FWD param.data must be on CUDA" def post_fwd_exec(self, module: torch.nn.Module, *args): # change tensor state to HOLD_AFTER_FWD for param in module.parameters(recurse=False): param.colo_attr.sharded_data_tensor.trans_state(TensorState.HOLD_AFTER_FWD) self.shard_parameters(module) # remove torch payload for param in module.parameters(recurse=False): param.colo_attr.set_data_none() def pre_bwd_exec(self, module: torch.nn.Module, input, output): self.adjust_module_data(module) self.gather_parameters(module) for param in module.parameters(recurse=False): param.data = param.colo_attr.data_payload assert param.data.device.type == "cuda", f"PRE BWD param.data must be on CUDA" def post_bwd_exec(self, module: torch.nn.Module, input): # change tensor state to HOLD_AFTER_BWD for param in module.parameters(recurse=False): param.colo_attr.sharded_data_tensor.trans_state(TensorState.HOLD_AFTER_BWD) self.shard_parameters(module) # remove torch payload for param in module.parameters(recurse=False): param.colo_attr.set_data_none() def pre_iter(self): pass def post_iter(self): if self._stateful_tensor_mgr: self.logger.debug( f"CPU-GPU data moving this iteration {self._stateful_tensor_mgr.cpu_gpu_move_volume/1e9} GB, get layout info time: {self._stateful_tensor_mgr._layout_time}, evict cpu time: {self._stateful_tensor_mgr._evict_time}", ranks=[0], ) self._stateful_tensor_mgr.finish_iter()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_model/__init__.py
colossalai/legacy/zero/sharded_model/__init__.py
from .sharded_model_v2 import ShardedModelV2 __all__ = ["ShardedModelV2"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_model/_utils.py
colossalai/legacy/zero/sharded_model/_utils.py
from typing import Any, Callable, List, Tuple, Union import torch import torch.nn.functional as F from colossalai.legacy.zero.gemini.stateful_tensor import StatefulTensor def get_gradient_predivide_factor(world_size: int) -> float: factor: int = 1 while world_size % factor == 0 and world_size / factor > factor: factor *= 2 return float(factor) def free_storage(data: torch.Tensor) -> None: """Free underlying storage of a Tensor.""" if data.storage().size() > 0: # Since we're modifying the Tensor's Storage directly, make sure the Tensor # is the sole occupant of the Storage. assert data.storage_offset() == 0 data.storage().resize_(0) @torch.no_grad() def alloc_storage(data: torch.Tensor, size: torch.Size) -> None: """Allocate storage for a tensor.""" if data.storage().size() == size.numel(): # no need to reallocate return assert data.storage().size() == 0 data.storage().resize_(size.numel()) def cast_tensor_to_fp16(tensor: torch.Tensor) -> torch.Tensor: if isinstance(tensor, StatefulTensor): tensor = tensor.payload if torch.is_floating_point(tensor) and tensor.dtype is torch.float32: return tensor.half() return tensor def cast_tensor_to_fp32(tensor: Union[torch.Tensor, StatefulTensor]) -> torch.Tensor: if isinstance(tensor, StatefulTensor): tensor = tensor.payload if torch.is_floating_point(tensor) and tensor.dtype in (torch.float16, torch.bfloat16): return tensor.float() return tensor def cast_tensor_to_bf16(tensor: torch.Tensor) -> torch.Tensor: if isinstance(tensor, StatefulTensor): tensor = tensor.payload if torch.is_floating_point(tensor) and tensor.dtype is torch.float32: return tensor.bfloat16() return tensor def apply_to_tensors(x: Any, fn: Callable): if torch.is_tensor(x): return fn(x) elif isinstance(x, list): return [apply_to_tensors(t, fn) for t in x] elif isinstance(x, tuple): return tuple(apply_to_tensors(t, fn) for t in x) elif isinstance(x, dict): return {key: apply_to_tensors(val, fn) for key, val in x.items()} else: return x def cast_float_arguments(fn: Callable, *args: Any, **kwargs: Any) -> Tuple[Any, Any]: return apply_to_tensors(args, fn), apply_to_tensors(kwargs, fn) def chunk_and_pad(tensor: torch.Tensor, num_chunks: int) -> List[torch.Tensor]: """Chunk a given Tensor into num_chunks parts and add any necessary padding.""" chunks = list(torch.flatten(tensor).chunk(num_chunks)) # torch.chunk may return fewer than num_chunks chunks, pad accordingly. num_pad_for_partial_chunk = chunks[0].numel() - chunks[-1].numel() if num_pad_for_partial_chunk > 0: chunks[-1] = F.pad(chunks[-1], [0, num_pad_for_partial_chunk]) if len(chunks) < num_chunks: chunks.extend([torch.zeros_like(chunks[0]) for _ in range(num_chunks - len(chunks))]) return chunks
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/stateful_tensor_mgr.py
colossalai/legacy/zero/gemini/stateful_tensor_mgr.py
import functools import types from time import time from typing import List from colossalai.accelerator import get_accelerator from .stateful_tensor import StatefulTensor, TensorState from .tensor_placement_policy import TensorPlacementPolicy from .tensor_utils import colo_model_data_tensor_move_inline class StatefulTensorMgr(object): """ Stateful Tensor Manager, inspired from PatrickStar PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management https://arxiv.org/abs/2108.05818 """ def __init__(self, tensor_placement_policy: TensorPlacementPolicy) -> None: self._tensor_placement_policy: TensorPlacementPolicy = tensor_placement_policy self._stateful_tensor_list: List[StatefulTensor] = [] self._compute_list: List[StatefulTensor] = [] self._compute_idx: int = -1 self._cpu_gpu_move_volume = 0 self._layout_time = 0 self._evict_time = 0 self._warmup = True def register_stateful_tensor_list(self, tensor_list: List[StatefulTensor]) -> None: assert self._stateful_tensor_list == [], "Can't register stateful tensors for manager twice" self._stateful_tensor_list = tensor_list for t in self._stateful_tensor_list: assert isinstance(t, StatefulTensor) t.trans_state = types.MethodType(functools.partial(self._trans_state, t.trans_state), t) def start_iter(self): pass def finish_iter(self): """This function must be called when each iteration finishes""" self._warmup = False self._compute_idx = -1 self._cpu_gpu_move_volume = 0 self._layout_time = 0 self._evict_time = 0 def adjust_layout(self) -> None: """Adjust the layout of stateful tensor according to the information provided by mem_stats_collector, which should belongs to a Sharded Model. """ # find stateful tensor in state COMPUTE cuda_demand = StatefulTensor.GST_MGR.state_mem["cpu"][TensorState.COMPUTE] start = time() move_to_cuda_tensor_list, hold_cuda_tensor_list = self._get_layout_info(self._compute_idx, self._warmup) self._layout_time += time() - start vol, evict_time = self._tensor_placement_policy.evict_tensors( hold_cuda_tensor_list, cuda_demand=cuda_demand, warmup=self._warmup, compute_list=self._compute_list, compute_idx=self._compute_idx, ) self._cpu_gpu_move_volume += vol self._evict_time += evict_time # move COMPUTE tensors to CUDA self._cpu_gpu_move_volume += cuda_demand for t in move_to_cuda_tensor_list: colo_model_data_tensor_move_inline(t, get_accelerator().get_current_device()) @property def cpu_gpu_move_volume(self): return self._cpu_gpu_move_volume def _trans_state(self, trans_state_func, stateful_tensor, state): trans_state_func(state) if state == TensorState.COMPUTE: self._compute_idx += 1 if self._warmup: self._compute_list.append(stateful_tensor) @functools.lru_cache(maxsize=None) def _get_layout_info(self, compute_idx: int, warmup: bool): move_to_cuda_tensor_list = [] hold_cuda_tensor_list = [] for tensor in self._stateful_tensor_list: if tensor.state == TensorState.FREE: continue if tensor.device.type == "cuda": if tensor.state in [TensorState.HOLD, TensorState.HOLD_AFTER_BWD, TensorState.HOLD_AFTER_FWD]: hold_cuda_tensor_list.append(tensor) elif tensor.device.type == "cpu": if tensor.state == TensorState.COMPUTE: move_to_cuda_tensor_list.append(tensor) else: raise RuntimeError return move_to_cuda_tensor_list, hold_cuda_tensor_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/gemini_context.py
colossalai/legacy/zero/gemini/gemini_context.py
from enum import EnumMeta class GeminiMemoryManager(object): def __init__(self, states_cls: EnumMeta): super().__init__() self.states_cls = states_cls self._cnter = 0 # the counter of instances self.total_mem = dict() self.state_mem = dict() self.state_mem["cpu"] = dict() self.state_mem["cuda"] = dict() self.reset() @property def total_number(self): return self._cnter def reset(self): self._cnter = 0 # the counter of instances self.total_mem["cpu"] = 0 # memory occupation of instances in cpu self.total_mem["cuda"] = 0 # memory of occupation of instances in cuda # memory conditions for all states for state in self.states_cls: self.state_mem["cpu"][state] = 0 self.state_mem["cuda"][state] = 0 def register_new_instance(self): self._cnter += 1 def delete_instance(self): self._cnter -= 1 def print_info(self): print( f"Total number: {self.total_number}", f"Total CPU memory occupation: {self.total_mem['cpu']}", f"Total CUDA memory occupation: {self.total_mem['cuda']}\n", sep="\n", ) for state in self.states_cls: print( f"{state}: CPU memory occupation: {self.state_mem['cpu'][state]}", f"{state}: CUDA memory occupation: {self.state_mem['cuda'][state]}\n", sep="\n", )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/tensor_utils.py
colossalai/legacy/zero/gemini/tensor_utils.py
from typing import Tuple, Union import torch from .stateful_tensor import StatefulTensor def is_storage_empty(tensor: torch.Tensor) -> bool: return tensor.storage().size() == 0 def free_storage(tensor: torch.Tensor) -> None: if not is_storage_empty(tensor): tensor.storage().resize_(0) def alloc_storage(tensor: torch.Tensor) -> None: if is_storage_empty(tensor): tensor.storage().resize_(tensor.numel()) def colo_tensor_mem_usage(tensor: Union[torch.Tensor, StatefulTensor]) -> Tuple[int, int]: if isinstance(tensor, StatefulTensor): t = tensor.payload elif isinstance(tensor, torch.Tensor): t = tensor else: return 0, 0 cuda_use, cpu_use = 0, 0 mem_use = t.storage().size() * t.element_size() if t.device.type == "cuda": cuda_use += mem_use elif t.device.type == "cpu": cpu_use += mem_use return cuda_use, cpu_use def colo_model_data_tensor_move( src_t: Union[StatefulTensor, torch.Tensor], tgt_t: Union[StatefulTensor, torch.Tensor] ) -> None: """ A colossal API for model data tensor move. The src and target tensors could be resident on both CPU and GPU. NOTE() The source tensor payload will be removed after this function. The function will record the communication volume between CPU and GPU. Args: src_t (Union[StatefulTensor, torch.Tensor]): source tensor tgt_t (Union[StatefulTensor, torch.Tensor]): target tensor """ if isinstance(src_t, StatefulTensor): src_t_payload = src_t.payload else: src_t_payload = src_t.data src_dev = src_t_payload.device if isinstance(tgt_t, StatefulTensor): tgt_t_payload = tgt_t.payload else: tgt_t_payload = tgt_t.data tgt_t_payload.copy_(src_t_payload) # remove payload of src_t if isinstance(src_t, StatefulTensor): src_t.set_null() else: src_t.data = torch.empty(0, device=src_dev, dtype=src_t_payload.dtype) def colo_model_data_tensor_move_inline( t: Union[StatefulTensor, torch.Tensor], target_device: Union[torch.device, int] ) -> None: """ move a tensor to the target_device Args: t (Union[StatefulTensor, torch.Tensor]): the tensor be moved target_device: a target device, if type is int, it the index of cuda card. """ if not isinstance(target_device, torch.device): target_device = torch.device(f"cuda:{target_device}") if isinstance(t, torch.Tensor): t.data = t.data.to(target_device) elif isinstance(t, StatefulTensor): t.move_to(target_device) else: raise TypeError(f"colo_model_data_tensor_move_inline dose not accept type {type(t)}") def colo_model_data_move_to_cpu(t: Union[StatefulTensor, torch.Tensor]) -> None: """colo_model_data_move_to_cpu move a model data tensor from gpu to cpu Args: t (Union[StatefulTensor, torch.Tensor]): _description_ """ # TODO() optimize the tensor moving with non-blocking if isinstance(t, torch.Tensor): t.data = t.data.cpu() elif isinstance(t, StatefulTensor): t.move_to(torch.device("cpu")) else: raise TypeError(f"colo_model_data_move_to_cpu dose not accept type {type(t)}") def colo_model_tensor_clone(t: Union[StatefulTensor, torch.Tensor], target_device: torch.device) -> torch.Tensor: """ Clone a model data tensor Args: t (Union[StatefulTensor, torch.Tensor]): a model data tensor target_device (torch.device): the target device Returns: torch.Tensor: a cloned torch tensor """ # TODO() rename this function colo_model_data_tensor_move_inline(t, target_device) t_payload = t.payload if isinstance(t, StatefulTensor) else t return t_payload
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/tensor_placement_policy.py
colossalai/legacy/zero/gemini/tensor_placement_policy.py
import functools from abc import ABC, abstractmethod from time import time from typing import List, Optional, Type import torch from colossalai.accelerator import get_accelerator from colossalai.legacy.utils.memory import colo_device_memory_capacity from colossalai.zero.gemini.memory_tracer import MemStatsCollector from .stateful_tensor import StatefulTensor from .tensor_utils import colo_model_data_tensor_move_inline class TensorPlacementPolicy(ABC): def __init__(self, device: Optional[torch.device], mem_stats_collector: Optional[MemStatsCollector] = None) -> None: self.device: Optional[torch.device] = device self.mem_stats_collector: Optional[MemStatsCollector] = mem_stats_collector @abstractmethod def evict_tensors(self, hold_cuda_tensor_list: List[StatefulTensor], **kwargs) -> None: raise NotImplementedError class CPUTensorPlacementPolicy(TensorPlacementPolicy): def __init__(self, mem_stats_collector: Optional[MemStatsCollector] = None) -> None: super().__init__(torch.device("cpu"), mem_stats_collector=mem_stats_collector) def evict_tensors(self, hold_cuda_tensor_list: List[StatefulTensor], **kwargs) -> int: volume = 0 for t in hold_cuda_tensor_list: colo_model_data_tensor_move_inline(t, self.device) volume += t.payload.numel() * t.payload.element_size() return volume, 0 class CUDATensorPlacementPolicy(TensorPlacementPolicy): def __init__(self, mem_stats_collector: Optional[MemStatsCollector] = None) -> None: assert torch.cuda.is_available(), "Cannot use CUDATensorPlacementPolicy when CUDA is not available" super().__init__(get_accelerator().get_current_device(), mem_stats_collector=mem_stats_collector) def evict_tensors(self, hold_cuda_tensor_list: List[StatefulTensor], **kwargs) -> int: return 0, 0 class AutoTensorPlacementPolicy(TensorPlacementPolicy): def __init__(self, mem_stats_collector: Optional[MemStatsCollector] = None) -> None: super().__init__(None, mem_stats_collector=mem_stats_collector) # model data will use 1-self._warmup_non_model_data_ratio CUDA memory in warmup phase # TODO(ver217): make these args configurable self._warmup_non_model_data_ratio: float = 0.8 self._steady_cuda_cap_ratio: float = 0.9 def evict_tensors( self, hold_cuda_tensor_list: List[StatefulTensor], cuda_demand: int = 0, warmup: bool = True, compute_list: List[StatefulTensor] = [], compute_idx: int = 0, **kwargs, ) -> int: """ Evict tensors from CUDA device. Args: hold_cuda_tensor_list (List[StatefulTensor]): the list of tensor in state of HOLD-like cuda_demand (int, optional): the volume of data needed on cuda device. Defaults to 0. warmup (bool, optional): a flag indicates whether in the phase of warmup. Defaults to True. compute_list (List[StatefulTensor], optional): TODO. Defaults to []. compute_idx (int, optional): the idx of computing device. Defaults to 0. Raises: RuntimeError: Returns: int: the volume of memory that is evicted """ start = time() cuda_capacity = colo_device_memory_capacity(get_accelerator().get_current_device()) used_cuda_model_data = StatefulTensor.GST_MGR.total_mem["cuda"] if warmup: # We designate a part of CUDA memory for model data in warmup iterations. max_cuda_non_model_data_per_period = cuda_capacity * self._warmup_non_model_data_ratio else: # max non-model-data cuda memory consumption of this sampling moment and the next sampling moment. max_cuda_non_model_data_per_period = self.mem_stats_collector.next_period_non_model_data_usage("cuda") cuda_capacity *= self._steady_cuda_cap_ratio total_cuda_model_data = cuda_capacity - max_cuda_non_model_data_per_period avail_cuda_model_data = total_cuda_model_data - used_cuda_model_data freed_cuda_model_data = 0 end = time() if avail_cuda_model_data < cuda_demand: # Move cuda_demand - avail_cuda_model_data volume of tensors # to_free_cuda_model_data = cuda_demand - avail_cuda_model_data to_free_cuda_model_data = cuda_demand - avail_cuda_model_data to_free_tensor_list = hold_cuda_tensor_list if not warmup: to_free_tensor_list = self._sort_hold_cuda_tensors( tuple(hold_cuda_tensor_list), compute_idx, tuple(compute_list) ) # print(self._sort_hold_cuda_tensors.cache_info()) end = time() for t in to_free_tensor_list: if freed_cuda_model_data >= to_free_cuda_model_data: break freed_cuda_model_data += t.payload_size colo_model_data_tensor_move_inline(t, torch.device("cpu")) if freed_cuda_model_data < to_free_cuda_model_data: raise RuntimeError( f"Adjust layout failed! No enough CUDA memory! Need {to_free_cuda_model_data}, freed {freed_cuda_model_data}" ) return freed_cuda_model_data, end - start @staticmethod @functools.lru_cache(maxsize=None) def _sort_hold_cuda_tensors(hold_cuda_tensors: tuple, compute_idx: int, compute_list: tuple) -> list: next_compute_idx = {t: len(compute_list) for t in hold_cuda_tensors} for i in range(len(compute_list) - 1, compute_idx, -1): if compute_list[i] in next_compute_idx: next_compute_idx[compute_list[i]] = i next_compute_idx = sorted(next_compute_idx.items(), key=lambda pair: pair[1], reverse=True) return [t for (t, idx) in next_compute_idx] class TensorPlacementPolicyFactory: @staticmethod def create(policy_name: str) -> Type[TensorPlacementPolicy]: if policy_name == "cpu": return CPUTensorPlacementPolicy elif policy_name == "cuda": return CUDATensorPlacementPolicy elif policy_name == "auto": return AutoTensorPlacementPolicy else: raise TypeError(f"Unknown tensor placement policy {policy_name}")
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/stateful_tensor.py
colossalai/legacy/zero/gemini/stateful_tensor.py
from enum import Enum from typing import Optional, Union import torch from .gemini_context import GeminiMemoryManager def sizeof_tensor(tensor: torch.Tensor): return tensor.numel() * tensor.element_size() class TensorState(Enum): FREE = 0 HOLD = 1 HOLD_AFTER_FWD = 2 HOLD_AFTER_BWD = 3 COMPUTE = 4 class StatefulTensor(object): """A Structure stores a Torch Tensor and labeled states. Inspired from the paper: PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management https://arxiv.org/abs/2108.05818 """ # Global Stateful Tensor Manager GST_MGR = GeminiMemoryManager(TensorState) def __init__(self, maybe_tensor: Optional[torch.Tensor], state: Optional[TensorState] = TensorState.HOLD) -> None: self._state = state self._payload = None self._payload_size = 0 # byte size of current payload StatefulTensor.GST_MGR.register_new_instance() if self._state == TensorState.FREE: # when the state is free, payload should be None assert maybe_tensor is None, f"payload has to None if state is {self._state}" else: # otherwise, payload should not be None assert maybe_tensor is not None, f"payload can't be None if state is {self._state}" self._payload = maybe_tensor self._payload_size = sizeof_tensor(maybe_tensor) self.__trans_state_update(TensorState.FREE, state) def data_ptr(self): if self._payload is None: return 0 # if a tensor has no storage, 0 should be returned return self._payload.data_ptr() def set_null(self) -> None: # notice that free stateful tensor do not need to become null again if self.state != TensorState.FREE: self.__trans_state_update(self.state, TensorState.FREE) self.__release() def is_null(self) -> bool: if self.state == TensorState.FREE: # check sanity here assert self.payload is None return True return False def trans_state(self, state: TensorState) -> None: if self.state == TensorState.FREE: # free stateful tensor can't change state assert state == TensorState.FREE, "Free stateful tensor can't change to other states" return self.__trans_state_update(self.state, state) if state == TensorState.FREE: self.__release() else: self._state = state def move_to(self, device: Union[torch.device, int]): assert self.state is not TensorState.FREE, "Can't move free stateful tensor" if not isinstance(device, torch.device): to_device = torch.device("cuda", device) else: to_device = device from_device_type = self.device.type if from_device_type == to_device.type: # from device == to device return # update manager's information self.__trans_device_update(from_device_type, to_device.type) self.payload.data = self.payload.data.to(to_device) def payload_copy(self, tensor) -> None: self._payload.view(-1).copy_(tensor.view(-1)) def payload_reset(self, tensor) -> None: assert tensor is not None, "Can't reset None for stateful tensors, please use set_null() instead" if self.payload is not None: # release old payload self.__trans_state_update(self.state, TensorState.FREE) else: # otherwise, set the state to HOLD for new payload self._state = TensorState.HOLD del self._payload self._payload = tensor self._payload_size = sizeof_tensor(tensor) # record new payload self.__trans_state_update(TensorState.FREE, self.state) def payload_relay(self, rhs): # relay the payload of rhs to current stateful tensor # can't support null relay right now assert not rhs.is_null() # now this function only support stateful tensor that has zero-length payload # because it doesn't require memory manager updating # you can extend this function by yourself assert self.payload_size == 0 self._payload = rhs.payload self._payload_size = rhs.payload_size self._state = TensorState.HOLD self.__trans_state_update(rhs.state, TensorState.HOLD) rhs.__release() @property def payload(self) -> Optional[torch.Tensor]: return self._payload @property def payload_size(self) -> int: return self._payload_size @property def state(self) -> TensorState: return self._state @property def device(self) -> torch.device: return self._payload.device @property def dtype(self) -> torch.dtype: return self._payload.dtype @property def shape(self): return self._payload.shape def to(self, device: torch.device): raise RuntimeError("Use move_to(...) instead of call .to() on StatefulTensor") def to_(self, device: torch.device): raise RuntimeError("Use move_to(...) instead of call .to_() on StatefulTensor") def __release(self): # release current payload # shouldn't be visible to users self._state = TensorState.FREE self._payload = None self._payload_size = 0 def __trans_state_update(self, from_state: TensorState, to_state: TensorState): """Update global manager when changing the state of a tensor""" manager = StatefulTensor.GST_MGR size = self.payload_size device_type = self.device.type if from_state != TensorState.FREE: manager.state_mem[device_type][from_state] -= size else: # when from_state is FREE, the tensor is new to manager # we should add its memory manager.total_mem[device_type] += size if to_state != TensorState.FREE: manager.state_mem[device_type][to_state] += size else: # when to_state is FREE, the tensor will be deleted soon # we should sub its memory manager.total_mem[device_type] -= size def __trans_device_update(self, from_type: str, to_type: str): """Update global manager when changing the device of a tensor""" manager = StatefulTensor.GST_MGR size = self.payload_size state = self.state # update aggregated information manager.total_mem[from_type] -= size manager.total_mem[to_type] += size # update the information of each state manager.state_mem[from_type][state] -= size manager.state_mem[to_type][state] += size def __del__(self): self.set_null() StatefulTensor.GST_MGR.delete_instance() del self
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/__init__.py
colossalai/legacy/zero/gemini/__init__.py
from .colo_init_context import ColoInitContext, post_process_colo_init_ctx from .ophooks import BaseOpHook, register_ophooks_recursively from .stateful_tensor import StatefulTensor from .stateful_tensor_mgr import StatefulTensorMgr from .tensor_placement_policy import AutoTensorPlacementPolicy, CPUTensorPlacementPolicy, CUDATensorPlacementPolicy __all__ = [ "StatefulTensorMgr", "StatefulTensor", "CPUTensorPlacementPolicy", "CUDATensorPlacementPolicy", "AutoTensorPlacementPolicy", "register_ophooks_recursively", "BaseOpHook", "ColoInitContext", "post_process_colo_init_ctx", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/colo_init_context.py
colossalai/legacy/zero/gemini/colo_init_context.py
from typing import Any, Iterator, Optional, Tuple, Union import torch from torch import nn from colossalai.legacy.tensor import ProcessGroup from colossalai.tensor import ColoParameter, ColoTensor from colossalai.utils.model.utils import InsertPostInitMethodToModuleSubClasses # find named_params includes replica def _named_params_with_replica( module: nn.Module, prefix: str = "", recurse: bool = True, ) -> Iterator[Tuple[str, Union[nn.Parameter, ColoTensor]]]: modules = module.named_modules(prefix=prefix) if recurse else [(prefix, module)] for mod_prefix, mod in modules: for name, val in mod._parameters.items(): if val is None: continue name = mod_prefix + ("." if mod_prefix else "") + name yield name, val def _convert_to_coloparam( param: torch.nn.Parameter, device: torch.device, dtype=torch.float, default_pg: Optional[ProcessGroup] = None, default_dist_spec: Optional[Any] = None, ) -> ColoParameter: if type(param) is ColoParameter: return param # detaching tensor is necessary for optimizers. requires_grad = param.requires_grad # param is the global tensor. if param.device.type == "meta": colo_param = ColoParameter(param, requires_grad=requires_grad) else: colo_param = ColoParameter(param.to(device=device, dtype=dtype), requires_grad=requires_grad) # if default_shard_plan exists, shard the param during initialization. # This can reduce the model size after initialization. # NOTE() embedding usually can not be correctly sharded. So I use except to handle # the param that can not be sharded by the default plan if default_pg is not None: colo_param.set_process_group(default_pg) if default_dist_spec is not None: try: colo_param.set_dist_spec(default_dist_spec) except: pass return colo_param def ColoModulize(module): """ Replacing the parameters() and named_parameters() with our customized ones """ module._colo_visited = True class ColoInitContext(InsertPostInitMethodToModuleSubClasses): def __init__( self, device: torch.device = torch.device("cpu"), dtype: torch.dtype = torch.float, default_pg: Optional[ProcessGroup] = None, default_dist_spec=None, ): """ Args: device (torch.device): the device where parameters initialized are resident. Defaults to torch.device('cpu'). dtype (torch.dtype): the dtype of parameters initialized. Defaults to torch.float. default_pg (ProcessGroup): the default process group for all initialized parameters. default_dist_spec: the default distributed specifications. """ super().__init__() self._device = device self._dtype = dtype self._register_colo_modules() self._default_pg = default_pg self._default_dist_spec = default_dist_spec def _register_colo_modules(self): from colossalai.legacy.nn.parallel.layers import ColoEmbedding, ColoLinear, register_colo_module register_colo_module(torch.nn.Linear, ColoLinear()) register_colo_module(torch.nn.Embedding, ColoEmbedding()) def _pre_context_exec(self): pass def _post_init_method(self, module: torch.nn.Module, *args, **kwargs): """ The function to call at the end of the constructor of each module. FIXME(fjr) The module may be passed to this function multiple times? """ name_list = [] for name, param in _named_params_with_replica(module): if type(param) is ColoParameter: continue split = name.rfind(".") if split >= 0: # param in submodule module_name = name[:split] param_name = name[split + 1 :] else: module_name = "" # param in current module param_name = name name_list.append((module_name, param_name)) replaced_tensors = dict() # record mapping between (torch.Tensor, ColoTensor) to distinguish the same reference for module_name, param_name in name_list: submodule = module.get_submodule(module_name) param = submodule.get_parameter(param_name) if param in replaced_tensors: colo_param = replaced_tensors[param] else: colo_param = _convert_to_coloparam( param, self._device, self._dtype, self._default_pg, self._default_dist_spec ) replaced_tensors[param] = colo_param delattr(submodule, param_name) setattr(submodule, param_name, colo_param) colo_param.shared_param_modules.append(submodule) param_number = 0 meta_param_number = 0 buffer_number = 0 meta_buffer_number = 0 for param in module.parameters(): param_number += 1 meta_param_number += param.device.type == "meta" for buffer in module.buffers(): buffer_number += 1 meta_buffer_number += buffer.device.type == "meta" if meta_param_number > 0 and meta_param_number != param_number: raise ValueError("Meta parameters and valued parameters can not be in the same model") if meta_buffer_number > 0 and meta_buffer_number != buffer_number: raise ValueError("Meta buffers and valued buffers can not be in the same model") if meta_buffer_number == 0: for buffer in module.buffers(): buffer.data = buffer.data.to(device=self._device) def post_process_colo_init_ctx( model: torch.nn.Module, device: torch.device = torch.device("cpu"), dtype: torch.dtype = torch.float, default_pg: Optional[ProcessGroup] = None, default_dist_spec=None, ): """post_process_colo_init_ctx This function is called after `ColoInitContext`. Args: model (torch.nn.module): the model device (torch.device, optional): device type of the model params. Defaults to torch.device('cpu'). dtype (torch.dtype, optional): dtype of the model params. Defaults to torch.float. default_pg (Optional[ProcessGroup], optional): default process group. Defaults to None. Indicates a DP-only process group. default_dist_spec (Any, optional): default dist spec of params. Defaults to None. Raises: RuntimeError: raise error if """ torch_params = [] for n, p in model.named_parameters(): if not isinstance(p, ColoParameter): # print(f"{n} is not a ColoParameter. We are going to converting it to ColoParameter") torch_params.append((n, p)) for n, param in torch_params: name_list = n.split(".") module = model for i in range(len(name_list) - 1): module = module._modules[name_list[i]] delattr(module, name_list[-1]) setattr(module, name_list[-1], _convert_to_coloparam(param, device, dtype, default_pg, default_dist_spec)) del torch_params for n, p in model.named_parameters(): if not isinstance(p, ColoTensor): raise RuntimeError
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/ophooks/runtime_mem_tracer_hook.py
colossalai/legacy/zero/gemini/ophooks/runtime_mem_tracer_hook.py
from contextlib import contextmanager from enum import Enum from functools import partial from typing import List import torch from colossalai.legacy.zero.gemini.tensor_utils import alloc_storage, free_storage from colossalai.tensor.param_op_hook import ColoParamOpHook from colossalai.zero.gemini.memory_tracer import MemStats, SyncCudaMemoryMonitor class TrainingPhase(Enum): FORWARD = 0 BACKWARD = 1 class GradMemStats: def __init__(self) -> None: self.unreleased_grad_flag = {} self.unreleased_grad_volume = 0 def clear(self): self.unreleased_grad_flag.clear() self.unreleased_grad_volume = 0 class GradMemTracerHook: def __init__(self, grad_stats: GradMemStats): self.grad_hook_list = [] self._grad_stats = grad_stats def grad_handle(self, p, grad): assert self._grad_stats.unreleased_grad_flag[p] free_storage(grad) self._grad_stats.unreleased_grad_volume -= grad.numel() * grad.element_size() self._grad_stats.unreleased_grad_flag[p] = False def register_grad_hook(self, module: torch.nn.Module): for p in module.parameters(): if p.requires_grad: self.grad_hook_list.append(p.register_hook(partial(self.grad_handle, p))) self._grad_stats.unreleased_grad_flag[p] = False def remove_grad_hook(self): for hook in self.grad_hook_list: hook.remove() class ParamMemTracerHook(ColoParamOpHook): def __init__(self, memstats: MemStats, gradstats: GradMemStats) -> None: super().__init__() self._training_phase = TrainingPhase.FORWARD self._memstats = memstats self._grad_stats = gradstats self.mem_monitor = SyncCudaMemoryMonitor() def _free_cuda_params(self, params): for p in params: if p.data.device.type == "cpu": raise NotImplementedError("Only free cuda memory") free_storage(p.data) def _allocate_params_on_cuda(self, params: List[torch.nn.Parameter]): """ move params to cuda Args: params (List[torch.nn.Parameter]): target params Raises: NotImplementedError: raise error when param has cpu grad """ for p in params: cur_dev = p.data.device.type if cur_dev == "cpu": if p.grad is not None and p.grad.device.type == "cpu": raise NotImplementedError("Only run in forward propagation") p.data = torch.empty( p.data.shape, device="cuda", dtype=p.data.dtype, requires_grad=p.data.requires_grad ) elif cur_dev == "cuda": alloc_storage(p.data) def record_model_data_volume(self, params): """ get cuda model data used by params """ data_volume = self._grad_stats.unreleased_grad_volume for p in params: cur_model_data_volume = p.data.numel() * p.data.element_size() data_volume += cur_model_data_volume if self._training_phase == TrainingPhase.BACKWARD and p.requires_grad: # add param.grad, actually param.grad is None in this time data_volume += cur_model_data_volume if not self._grad_stats.unreleased_grad_flag[p]: self._grad_stats.unreleased_grad_volume += cur_model_data_volume self._grad_stats.unreleased_grad_flag[p] = True # record max non model data used for this Op self._memstats.record_max_cuda_model_data(data_volume) def pre_op(self, params): max_cuda_used_pre_op = self.mem_monitor.finish() # record max cuda overall data for prev OP. self._memstats.record_max_cuda_overall_data(max_cuda_used_pre_op) # record max cuda non model data for prev OP. self._memstats.calc_max_cuda_non_model_data() self._allocate_params_on_cuda(params) # record max cuda model data for current OP self.record_model_data_volume(params) self.mem_monitor.start() self._memstats.increase_preop_step(params) def post_op(self, params): self._free_cuda_params(params) def pre_forward(self, params: List[torch.Tensor]) -> None: self.pre_op(params) def post_forward(self, params: List[torch.Tensor]) -> None: self.post_op(params) def pre_backward(self, params: List[torch.Tensor]) -> None: self.pre_op(params) def post_backward(self, params: List[torch.Tensor]) -> None: self.post_op(params) @contextmanager def switch_training_phase(self, training_phase: TrainingPhase = TrainingPhase.BACKWARD): old_training_phase = self._training_phase try: self._training_phase = training_phase yield finally: self._training_phase = old_training_phase switch_to_backward = switch_training_phase switch_to_forward = partial(switch_to_backward, training_phase=TrainingPhase.FORWARD)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/ophooks/_shard_param_ophook.py
colossalai/legacy/zero/gemini/ophooks/_shard_param_ophook.py
import torch from colossalai.legacy.registry import OPHOOKS from . import BaseOpHook @OPHOOKS.register_module class ShardParamHook(BaseOpHook): """ A hook to process sharded param before and after FWD and BWD operator executing. """ def __init__(self): super().__init__() def niter(self): return self._niter def pre_fwd_exec(self, module: torch.nn.Module, *args): for param in module.parameters(): assert hasattr(param, "ca_attr") param.ca_attr.gather() param.data = param.ca_attr.payload() def post_fwd_exec(self, module: torch.nn.Module, *args): for param in module.parameters(): assert hasattr(param, "ca_attr") param.ca_attr.shard() param.data = param.ca_attr.payload() def pre_bwd_exec(self, module: torch.nn.Module, input, output): for param in module.parameters(): assert hasattr(param, "ca_attr") param.ca_attr.gather() param.data = param.ca_attr.payload() def post_bwd_exec(self, module: torch.nn.Module, input): for param in module.parameters(): assert hasattr(param, "ca_attr") param.ca_attr.shard() param.data = param.ca_attr.payload() def pre_iter(self): pass def post_iter(self): pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/ophooks/utils.py
colossalai/legacy/zero/gemini/ophooks/utils.py
# this code is inspired by the DeepSpeed library and implemented with our own design from scratch from abc import ABC, abstractmethod from typing import Callable, List, Optional import torch class BaseOpHook(ABC): """This class allows users to add customized operations before and after the execution of a PyTorch submodule""" def __init__(self): pass @abstractmethod def pre_fwd_exec(self, module: torch.nn.Module, *args): pass @abstractmethod def post_fwd_exec(self, module: torch.nn.Module, *args): pass @abstractmethod def pre_bwd_exec(self, module: torch.nn.Module, input, output): pass @abstractmethod def post_bwd_exec(self, module: torch.nn.Module, input): pass @abstractmethod def post_iter(self): pass # apply torch.autograd.Function that calls a backward_function to tensors in output def _apply_to_tensors_only(module, functional, backward_function, outputs): if type(outputs) is tuple: touched_outputs = [] for output in outputs: touched_output = _apply_to_tensors_only(module, functional, backward_function, output) touched_outputs.append(touched_output) return tuple(touched_outputs) elif type(outputs) is torch.Tensor: return functional.apply(module, backward_function, outputs) else: return outputs class PreBackwardFunction(torch.autograd.Function): @staticmethod def forward(ctx, module, pre_backward_function, outputs): ctx.module = module ctx.pre_backward_function = pre_backward_function module.applied_pre_backward = False outputs = outputs.detach() return outputs @staticmethod def backward(ctx, *args): ctx.pre_backward_function(ctx.module) return (None, None) + args class PostBackwardFunction(torch.autograd.Function): @staticmethod def forward(ctx, module, pre_backward_function, output): ctx.module = module output = output.detach() ctx.pre_backward_function = pre_backward_function return output @staticmethod def backward(ctx, *args): """ Args: activation_grad of the next layer. Returns: grad of the input activation. """ ctx.pre_backward_function(ctx.module) return (None, None) + args def register_ophooks_recursively( module: torch.nn.Module, ophook_list: List[BaseOpHook], name: str = "", filter_fn: Optional[Callable] = None ): r"""Recursively register pre/post hooks for all submodules in the module in FWD and BWD.""" assert isinstance(module, torch.nn.Module) assert isinstance(ophook_list, (list, tuple)) assert len(ophook_list) > 0, "expected at least 1 hook in the argument ophook_list but found 0" for hook in ophook_list: assert isinstance(hook, BaseOpHook) # Add hooks for submodules for child_name, child in module.named_children(): register_ophooks_recursively(child, ophook_list, name + child_name, filter_fn) # Early return on modules with no parameters. if len(list(module.parameters(recurse=False))) == 0: return # return from filtered module if filter_fn is not None and filter_fn(module): return def _pre_forward_module_hook(submodule, *args): for hook in ophook_list: assert isinstance(submodule, torch.nn.Module) hook.pre_fwd_exec(submodule, *args) def _post_forward_module_hook(submodule, *args): for hook in ophook_list: assert isinstance(submodule, torch.nn.Module) hook.post_fwd_exec(submodule, *args) def _pre_backward_module_hook(submodule, inputs, output): def _run_before_backward_function(submodule): for hook in ophook_list: assert isinstance(submodule, torch.nn.Module) hook.pre_bwd_exec(submodule, inputs, output) return _apply_to_tensors_only(submodule, PreBackwardFunction, _run_before_backward_function, output) def _post_backward_module_hook(submodule, inputs): def _run_after_backward_function(submodule): for hook in ophook_list: assert isinstance(submodule, torch.nn.Module) hook.post_bwd_exec(submodule, inputs) return _apply_to_tensors_only(submodule, PostBackwardFunction, _run_after_backward_function, inputs) module.register_forward_pre_hook(_pre_forward_module_hook) module.register_forward_hook(_post_forward_module_hook) module.register_forward_hook(_pre_backward_module_hook) module.register_forward_pre_hook(_post_backward_module_hook)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/ophooks/__init__.py
colossalai/legacy/zero/gemini/ophooks/__init__.py
from .utils import BaseOpHook, register_ophooks_recursively __all__ = ["BaseOpHook", "register_ophooks_recursively"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/ophooks/_shard_grad_ophook.py
colossalai/legacy/zero/gemini/ophooks/_shard_grad_ophook.py
import torch from colossalai.legacy.registry import OPHOOKS from . import BaseOpHook @OPHOOKS.register_module class ShardGradMemTracerHook(BaseOpHook): """ A hook to process sharded param before and after FWD and BWD operator executing. """ def __init__(self): super().__init__() def pre_fwd_exec(self, module: torch.nn.Module, *args): pass def post_fwd_exec(self, module: torch.nn.Module, *args): pass def pre_bwd_exec(self, module: torch.nn.Module, input, output): for param in module.parameters(): assert hasattr(param, "_sharded_grad") param._sharded_grad.setup() def post_bwd_exec(self, module: torch.nn.Module, input): pass def post_iter(self): pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/paramhooks/_param_hookmgr.py
colossalai/legacy/zero/gemini/paramhooks/_param_hookmgr.py
import functools from typing import Callable, List import torch class BaseParamHookMgr(object): def __init__(self, param_list: List[torch.nn.Parameter]) -> None: r""" register backward hook on every parameters of module """ self._param_list = param_list self._hook_list = [] def register_backward_hooks(self, hook_call: Callable) -> None: r""" The hook_call will be called every time a gradient with respect to the a param in self.param_list is computed. The hook should have the following signature: ``` hook(param, grad) -> Tensor or None ``` """ if not torch.is_grad_enabled(): return # don't register grad hooks if grad isn't enabled for p in self._param_list: if p.requires_grad and not hasattr(p, "_base_param_hook"): handle = p.register_hook(functools.partial(hook_call, p)) p._base_param_hook = handle def remove_hooks(self) -> None: """ Remove hooks from model parameters. """ for p in self._param_list: if p.requires_grad and hasattr(p, "_base_param_hook"): p._base_param_hook.remove()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/gemini/paramhooks/__init__.py
colossalai/legacy/zero/gemini/paramhooks/__init__.py
from ._param_hookmgr import BaseParamHookMgr __all__ = ["BaseParamHookMgr"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/builder/__init__.py
colossalai/legacy/builder/__init__.py
from .builder import build_from_config, build_from_registry, build_gradient_handler __all__ = ["build_gradient_handler", "build_from_config", "build_from_registry"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/builder/builder.py
colossalai/legacy/builder/builder.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import inspect from colossalai.legacy.registry import * def build_from_config(module, config: dict): """Returns an object of :class:`module` constructed from `config`. Args: module: A python or user-defined class config: A python dict containing information used in the construction of the return object Returns: An ``object`` of interest Raises: AssertionError: Raises an AssertionError if `module` is not a class """ assert inspect.isclass(module), "module must be a class" return module(**config) def build_from_registry(config, registry: Registry): r"""Returns an object constructed from `config`, the type of the object is specified by `registry`. Note: the `config` is used to construct the return object such as `LAYERS`, `OPTIMIZERS` and other support types in `registry`. The `config` should contain all required parameters of corresponding object. The details of support types in `registry` and the `mod_type` in `config` could be found in `registry <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/registry/__init__.py>`_. Args: config (dict or :class:`colossalai.context.colossalai.context.Config`): information used in the construction of the return object. registry (:class:`Registry`): A registry specifying the type of the return object Returns: A Python object specified by `registry`. Raises: Exception: Raises an Exception if an error occurred when building from registry. """ config_ = config.copy() # keep the original config untouched assert isinstance(registry, Registry), f"Expected type Registry but got {type(registry)}" mod_type = config_.pop("type") assert registry.has(mod_type), f"{mod_type} is not found in registry {registry.name}" try: obj = registry.get_module(mod_type)(**config_) except Exception as e: print(f"An error occurred when building {mod_type} from registry {registry.name}", flush=True) raise e return obj def build_gradient_handler(config, model, optimizer): """Returns a gradient handler object of :class:`BaseGradientHandler` constructed from `config`, `model` and `optimizer`. Args: config (dict or :class:`colossalai.context.Config`): A python dict or a :class:`colossalai.context.Config` object containing information used in the construction of the ``GRADIENT_HANDLER``. model (:class:`nn.Module`): A model containing parameters for the gradient handler optimizer (:class:`torch.optim.Optimizer`): An optimizer object containing parameters for the gradient handler Returns: An object of :class:`colossalai.legacy.engine.BaseGradientHandler` """ config_ = config.copy() config_["model"] = model config_["optimizer"] = optimizer return build_from_registry(config_, GRADIENT_HANDLER)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/communication/p2p_v2.py
colossalai/legacy/communication/p2p_v2.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import io import pickle from typing import Any, List, Tuple, Union import torch import torch.distributed as dist from torch.distributed import ProcessGroupNCCL from torch.distributed import distributed_c10d as c10d from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc TensorShape = Union[torch.Size, List[int], Tuple[int]] _pg_manager = {} _unpickler = pickle.Unpickler def init_process_group(): """initialise process group by dist.new_group in the adjacent stages Args: None Returns: None """ world_size = gpc.get_world_size(ParallelMode.PIPELINE) for i in range(world_size - 1): _pg_manager[(i, i + 1)] = dist.new_group([i, i + 1]) def _acquire_pair_group_handle(first_rank: int, second_rank: int) -> ProcessGroupNCCL: """get the group handle of two given ranks Args: first_rank (int): first rank in the pair second_rank (int): second rank in the pair Returns: :class:`ProcessGroupNCCL`: the handle of the group consisting of the given two ranks """ if len(_pg_manager) == 0: init_process_group() if first_rank > second_rank: first_rank, second_rank = second_rank, first_rank pair_key = (first_rank, second_rank) return _pg_manager[pair_key] def _cuda_safe_tensor_to_object(tensor: torch.Tensor, tensor_size: torch.Size) -> object: """transform tensor to object with unpickle. Info of the device in bytes stream will be modified into current device before unpickling Args: tensor (:class:`torch.tensor`): tensor to be unpickled tensor_size (:class:`torch.Size`): Size of the real info in bytes Returns: Any: object after unpickled """ buf = tensor.numpy().tobytes()[:tensor_size] if b"cuda" in buf: buf_array = bytearray(buf) device_index = torch.cuda.current_device() buf_array[buf_array.find(b"cuda") + 5] = 48 + device_index buf = bytes(buf_array) io_bytes = io.BytesIO(buf) byte_pickler = _unpickler(io_bytes) unpickle = byte_pickler.load() return unpickle def _broadcast_object_list(object_list: List[Any], src: int, dst: int, device=None): """This is a modified version of the broadcast_object_list in torch.distribution The only difference is that object will be move to correct device after unpickled. If local_rank = src, then object list will be sent to rank src. Otherwise, object list will be updated with data sent from rank src. Args: object_list (List[Any]): list of object to broadcast src (int): source rank to broadcast dst (int): dst rank to broadcast device (:class:`torch.device`): device to do broadcast. current device in default """ group = _acquire_pair_group_handle(src, dst) if c10d._rank_not_in_group(group): c10d._warn_not_in_group("broadcast_object_list") return local_rank = gpc.get_local_rank(ParallelMode.PIPELINE) # Serialize object_list elements to tensors on src rank. if local_rank == src: tensor_list, size_list = zip(*[c10d._object_to_tensor(obj) for obj in object_list]) object_sizes_tensor = torch.cat(size_list) else: object_sizes_tensor = torch.empty(len(object_list), dtype=torch.long) is_nccl_backend = c10d._check_for_nccl_backend(group) current_device = None if device is not None: if is_nccl_backend and device.type != "cuda": raise ValueError("device type must be cuda for nccl backend") current_device = device else: current_device = torch.device("cpu") if is_nccl_backend: current_device = torch.device("cuda", torch.cuda.current_device()) if is_nccl_backend: object_sizes_tensor = object_sizes_tensor.to(current_device) # Broadcast object sizes c10d.broadcast(object_sizes_tensor, src=src, group=group, async_op=False) # Concatenate and broadcast serialized object tensors if local_rank == src: object_tensor = torch.cat(tensor_list) else: object_tensor = torch.empty( # type: ignore[call-overload] torch.sum(object_sizes_tensor).item(), # type: ignore[arg-type] dtype=torch.uint8, ) if is_nccl_backend: object_tensor = object_tensor.to(current_device) c10d.broadcast(object_tensor, src=src, group=group, async_op=False) # Deserialize objects using their stored sizes. offset = 0 if local_rank != src: for i, obj_size in enumerate(object_sizes_tensor): obj_view = object_tensor[offset : offset + obj_size] obj_view = obj_view.type(torch.uint8) if obj_view.device != torch.device("cpu"): obj_view = obj_view.cpu() offset += obj_size # unpickle unpickle_object = _cuda_safe_tensor_to_object(obj_view, obj_size) # unconsistence in device if ( isinstance(unpickle_object, torch.Tensor) and unpickle_object.device.index != torch.cuda.current_device() ): unpickle_object = unpickle_object.cuda() object_list[i] = unpickle_object def _send_object(object: Any, dst: int) -> None: """send anything to dst rank Args: object (Any): object needed to be sent dst (int): rank of the destination Returns: None """ local_rank = gpc.get_local_rank(ParallelMode.PIPELINE) # handler = _acquire_pair_group_handle(local_rank, dst) # transform to list if not if isinstance(object, torch.Tensor): object = [object] # broadcast length first # TODO : more elegant ? P.S. reduce a _broadcast_object_list _broadcast_object_list([len(object)], local_rank, dst) # then broadcast safely _broadcast_object_list(object, local_rank, dst) def _recv_object(src: int) -> Any: """recv anything from src Args: src (int): source rank of data. local rank will receive data from src rank. Returns: Any: Object received from src. """ local_rank = gpc.get_local_rank(ParallelMode.PIPELINE) # handler = _acquire_pair_group_handle(local_rank, src) # recv length first length = [0] _broadcast_object_list(length, src, local_rank) # then create recv buff from length[0] and broadcast object = [None] * length[0] _broadcast_object_list(object, src, local_rank) if length[0] == 1: object = object[0] return object def recv_forward(prev_rank: int = None) -> Any: """Copy the forward output from the previous stage in pipeline as the input tensor of this stage. Args: input_tensor_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor to be received. prev_rank (int, optional): The rank of the source of the tensor. Returns: Any: The input tensor or input tensor list. """ if gpc.is_pipeline_first_stage(): input_tensor = None else: if prev_rank is None: prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE) input_tensor = _recv_object(prev_rank) return input_tensor def recv_backward(next_rank: int = None) -> Any: """Copy the gradient tensor from the next stage in pipeline as the input gradient of this stage. Args: output_grad_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor to be received. next_rank (int, optional): The rank of the source of the tensor. Returns: Any: The input gradient tensor or gradient tensor list. """ if gpc.is_pipeline_last_stage(): output_tensor_grad = None else: if next_rank is None: next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE) output_tensor_grad = _recv_object(next_rank) return output_tensor_grad def send_forward(output_object: Any, next_rank: int = None) -> None: """Sends the input tensor to the next stage in pipeline. Args: output_object Any: Object to be sent. next_rank (int, optional): The rank of the recipient of the tensor. """ if not gpc.is_pipeline_last_stage(): if next_rank is None: next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE) _send_object(output_object, next_rank) def send_backward(input_object: Any, prev_rank: int = None) -> None: """Sends the gradient tensor to the previous stage in pipeline. Args: input_tensor_grad (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent prev_rank (int, optional): The rank of the recipient of the tensor """ if not gpc.is_pipeline_first_stage(): if prev_rank is None: prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE) _send_object(input_object, prev_rank)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/communication/utils.py
colossalai/legacy/communication/utils.py
from typing import List, Tuple, Union import torch import torch.distributed as dist from colossalai.accelerator import get_accelerator from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc TensorShape = Union[torch.Size, List[int], Tuple[int]] def send_meta_helper(obj, next_rank, tensor_kwargs): send_shape = torch.tensor(obj.size(), **tensor_kwargs) send_ndims = torch.tensor(len(obj.size()), **tensor_kwargs) dist.send(send_ndims, next_rank) dist.send(send_shape, next_rank) def send_obj_meta(obj, need_meta=True, next_rank=None) -> bool: """Sends obj meta information before sending a specific obj. Since the recipient must know the shape of the obj in p2p communications, meta information of the obj should be sent before communications. This function synchronizes with :func:`recv_obj_meta`. Args: obj (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): obj to be sent. need_meta (bool, optional): If False, meta information won't be sent. next_rank (int): The rank of the next member in pipeline parallel group. Returns: bool: False """ if need_meta: if next_rank is None: next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE) tensor_kwargs = {"dtype": torch.long, "device": get_accelerator().get_current_device()} if isinstance(obj, torch.Tensor): send_obj_nums = torch.tensor(1, **tensor_kwargs) dist.send(send_obj_nums, next_rank) send_meta_helper(obj, next_rank, tensor_kwargs) else: send_obj_nums = torch.tensor(len(obj), **tensor_kwargs) dist.send(send_obj_nums, next_rank) for tensor_to_send in obj: send_meta_helper(tensor_to_send, next_rank, tensor_kwargs) return False def recv_meta_helper(prev_rank, tensor_kwargs): recv_ndims = torch.empty((), **tensor_kwargs) dist.recv(recv_ndims, prev_rank) recv_shape = torch.empty(recv_ndims, **tensor_kwargs) dist.recv(recv_shape, prev_rank) return recv_shape def recv_obj_meta(obj_shape, prev_rank=None) -> torch.Size: """Receives obj meta information before receiving a specific obj. Since the recipient must know the shape of the obj in p2p communications, meta information of the obj should be received before communications. This function synchronizes with :func:`send_obj_meta`. Args: obj_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the obj to be received. prev_rank (int): The rank of the source of the obj. Returns: Union[:class:`torch.Size`, List[:class:`torch.Size`]]: The shape of the obj to be received. """ if obj_shape is None: if prev_rank is None: prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE) tensor_kwargs = {"dtype": torch.long, "device": get_accelerator().get_current_device()} recv_obj_nums = torch.empty((), **tensor_kwargs) dist.recv(recv_obj_nums, prev_rank) if recv_obj_nums.item() == 1: recv_shape = recv_meta_helper(prev_rank, tensor_kwargs) obj_shape = torch.Size(recv_shape) else: obj_shape = [] for i in range(recv_obj_nums.item()): recv_shape = recv_meta_helper(prev_rank, tensor_kwargs) obj_shape.append(torch.Size(recv_shape)) return obj_shape def split_tensor_into_1d_equal_chunks(tensor: torch.Tensor, new_buffer=False) -> torch.Tensor: """Break a tensor into equal 1D chunks. Args: tensor (:class:`torch.Tensor`): Tensor to be split before communication. new_buffer (bool, optional): Whether to use a new buffer to store sliced tensor. Returns: :class:`torch.Tensor`: The split tensor """ partition_size = torch.numel(tensor) // gpc.get_world_size(ParallelMode.PARALLEL_1D) start_index = partition_size * gpc.get_local_rank(ParallelMode.PARALLEL_1D) end_index = start_index + partition_size if new_buffer: data = torch.empty(partition_size, dtype=tensor.dtype, device=torch.cuda.current_device(), requires_grad=False) data.copy_(tensor.view(-1)[start_index:end_index]) else: data = tensor.view(-1)[start_index:end_index] return data def gather_split_1d_tensor(tensor: torch.Tensor) -> torch.Tensor: """Opposite of above function, gather values from model parallel ranks. Args: tensor (:class:`torch.Tensor`): Tensor to be gathered after communication. Returns: :class:`torch.Tensor`: The gathered tensor. """ world_size = gpc.get_world_size(ParallelMode.PARALLEL_1D) numel = torch.numel(tensor) numel_gathered = world_size * numel gathered = torch.empty(numel_gathered, dtype=tensor.dtype, device=torch.cuda.current_device(), requires_grad=False) chunks = [gathered[i * numel : (i + 1) * numel] for i in range(world_size)] dist.all_gather(chunks, tensor, group=gpc.get_group(ParallelMode.PARALLEL_1D)) return gathered
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/communication/__init__.py
colossalai/legacy/communication/__init__.py
from .collective import all_gather, all_reduce, broadcast, reduce, reduce_scatter from .p2p import ( recv_backward, recv_forward, send_backward, send_backward_recv_backward, send_backward_recv_forward, send_forward, send_forward_backward_recv_forward_backward, send_forward_recv_backward, send_forward_recv_forward, ) from .ring import ring_forward from .utils import recv_obj_meta, send_obj_meta __all__ = [ "all_gather", "reduce_scatter", "all_reduce", "broadcast", "reduce", "send_forward", "send_forward_recv_forward", "send_forward_backward_recv_forward_backward", "send_backward", "send_backward_recv_backward", "send_backward_recv_forward", "send_forward_recv_backward", "recv_backward", "recv_forward", "ring_forward", "send_obj_meta", "recv_obj_meta", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/communication/ring.py
colossalai/legacy/communication/ring.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from colossalai.accelerator import get_accelerator from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc def ring_forward(tensor_send_next: torch.Tensor, parallel_mode: ParallelMode) -> torch.Tensor: """Sends a tensor to the next member and receives a tensor from the previous member. This function returns the received tensor from the previous member. Args: tensor_send_next (:class:`torch.Tensor`): Tensor sent to next member parallel_mode (ParallelMode): Parallel group mode used in this communication Returns: :class:`torch.Tensor`: The tensor received from the previous. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ buffer_shape = tensor_send_next.size() ops = [] current_rank = gpc.get_global_rank() tensor_recv_prev = torch.empty( buffer_shape, requires_grad=True, device=get_accelerator().get_current_device(), dtype=tensor_send_next.dtype ) # send to next rank send_next_op = torch.distributed.P2POp( torch.distributed.isend, tensor_send_next, gpc.get_next_global_rank(parallel_mode) ) ops.append(send_next_op) # receive from prev rank recv_prev_op = torch.distributed.P2POp( torch.distributed.irecv, tensor_recv_prev, gpc.get_prev_global_rank(parallel_mode) ) ops.append(recv_prev_op) if current_rank % 2 == 0: ops = ops[::-1] reqs = torch.distributed.batch_isend_irecv(ops) for req in reqs: req.wait() # To protect against race condition when using batch_isend_irecv(). get_accelerator().synchronize() return tensor_recv_prev
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/communication/p2p.py
colossalai/legacy/communication/p2p.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import operator from functools import reduce from typing import List, Tuple, Union import torch import torch.distributed as dist from colossalai.accelerator import get_accelerator from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from .utils import gather_split_1d_tensor, split_tensor_into_1d_equal_chunks TensorShape = Union[torch.Size, List[int], Tuple[int]] def _get_tensor_shape(tensor_shape: TensorShape, chunk_tensor: bool = False) -> Tuple[TensorShape, bool]: """get the exact tensor shape when communicating and return whether the tensor is a chunk Args: tensor_shape (:class:`torch.Size`): shape of tensor chunk_tensor (bool, optional): whether to chunk tensor, defaults to False Returns: Tuple[Union[:class:`torch.Size`, List[int], Tuple[int]], bool]: exact tensor shape, whether to chunk tensor """ if chunk_tensor: tensor_chunk_shape = reduce(operator.mul, tensor_shape, 1) tensor_parallel_world_size = gpc.get_world_size(ParallelMode.TENSOR) if tensor_chunk_shape % tensor_parallel_world_size == 0: tensor_chunk_shape = tensor_chunk_shape // tensor_parallel_world_size else: tensor_chunk_shape = tensor_shape chunk_tensor = False else: tensor_chunk_shape = tensor_shape return tensor_chunk_shape, chunk_tensor def create_recv_buffer_with_shapes(recv_shapes, dtype, scatter_gather_tensors): if isinstance(recv_shapes, torch.Size): recv_chunk_shape, recv_split = _get_tensor_shape(recv_shapes, scatter_gather_tensors) buffer_recv = torch.empty( recv_chunk_shape, requires_grad=True, device=get_accelerator().get_current_device(), dtype=dtype ) return buffer_recv, recv_split buffer_recv = [] for recv_shape in recv_shapes: recv_chunk_shape, recv_split = _get_tensor_shape(recv_shape, scatter_gather_tensors) tensor_recv = torch.empty( recv_chunk_shape, requires_grad=True, device=get_accelerator().get_current_device(), dtype=dtype ) buffer_recv.append(tensor_recv) return buffer_recv, recv_split def process_object_to_send(object_send, scatter_gather_tensors): if isinstance(object_send, torch.Tensor): send_split = _get_tensor_shape(object_send.shape, scatter_gather_tensors)[1] if send_split: object_send = split_tensor_into_1d_equal_chunks(object_send) return object_send object_send_list = [] for tensor_send in object_send: send_split = _get_tensor_shape(tensor_send.shape, scatter_gather_tensors)[1] if send_split: object_send_list.append(split_tensor_into_1d_equal_chunks(tensor_send)) else: object_send_list.append(tensor_send) object_send = tuple(object_send_list) return object_send def filling_ops_queue(obj, comm_op, comm_rank, ops_queue): if isinstance(obj, torch.Tensor): op_to_add = dist.P2POp(comm_op, obj, comm_rank) ops_queue.append(op_to_add) else: for tensor_to_comm in obj: op_to_add = dist.P2POp(comm_op, tensor_to_comm, comm_rank) ops_queue.append(op_to_add) def _communicate( object_send_next: Union[torch.Tensor, List[torch.Tensor]] = None, object_send_prev: Union[torch.Tensor, List[torch.Tensor]] = None, recv_prev: bool = False, recv_next: bool = False, recv_prev_shape: Union[torch.Size, List[torch.Size]] = None, recv_next_shape: Union[torch.Size, List[torch.Size]] = None, prev_rank: int = None, next_rank: int = None, dtype: torch.dtype = None, scatter_gather_tensors: bool = False, ) -> Tuple[Union[torch.Tensor, List[torch.Tensor]]]: """ Adapted from megatron.p2p_communication. Communicate tensors between stages. Used as helper method in other communication methods that are used in pipeline schedule. Takes the following arguments: object_send_next (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): tensor to send to next rank (no tensor sent if set to None). object_send_prev (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): tensor to send to prev rank (no tensor sent if set to None). recv_prev (bool): boolean for whether tensor should be received from previous rank. recv_next (bool): boolean for whether tensor should be received from next rank. recv_prev_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): shape of the tensor to be received from the previous stage, defaults to None. recv_next_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): shape of the tensor to be received from the next stage, defaults to None. prev_rank (int): the rank of the previous pipeline stage, defaults to None, next_rank (int): the rank of the next pipeline stage, defaults to None, dtype (torch.dtype): data type of intermediate buffers, defaults to None scatter_gather_tensors (bool): whether to scatter and gather tensor between pipeline stages, defaults to False Returns: Tuple[Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]]: returns tensor_recv_prev, tensor_recv_next """ # Create placeholder tensors for receive in forward and backward directions # if needed. tensor_recv_prev = None tensor_recv_next = None if recv_prev: assert recv_prev_shape is not None tensor_recv_prev, recv_prev_split = create_recv_buffer_with_shapes( recv_prev_shape, dtype, scatter_gather_tensors ) if recv_next: assert recv_next_shape is not None tensor_recv_next, recv_next_split = create_recv_buffer_with_shapes( recv_next_shape, dtype, scatter_gather_tensors ) if object_send_prev is not None or recv_prev: if prev_rank is None: prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE) if object_send_next is not None or recv_next: if next_rank is None: next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE) if object_send_prev is not None: object_send_prev = process_object_to_send(object_send_prev, scatter_gather_tensors) if object_send_next is not None: object_send_next = process_object_to_send(object_send_next, scatter_gather_tensors) ops = [] if object_send_prev is not None: filling_ops_queue(object_send_prev, dist.isend, prev_rank, ops) if tensor_recv_prev is not None: filling_ops_queue(tensor_recv_prev, dist.irecv, prev_rank, ops) if tensor_recv_next is not None: filling_ops_queue(tensor_recv_next, dist.irecv, next_rank, ops) if object_send_next is not None: filling_ops_queue(object_send_next, dist.isend, next_rank, ops) if len(ops) > 0: reqs = dist.batch_isend_irecv(ops) for req in reqs: req.wait() # To protect against race condition when using batch_isend_irecv(). get_accelerator().synchronize() if recv_prev and recv_prev_split: if isinstance(tensor_recv_prev, torch.Tensor): tensor_recv_prev = gather_split_1d_tensor(tensor_recv_prev).view(recv_prev_shape).requires_grad_() else: for index in range(len(tensor_recv_prev)): tensor_recv_prev[index] = ( gather_split_1d_tensor(tensor_recv_prev[index]).view(recv_prev_shape[index]).requires_grad_() ) if recv_next and recv_next_split: if isinstance(tensor_recv_next, torch.Tensor): tensor_recv_next = gather_split_1d_tensor(tensor_recv_next).view(recv_next_shape).requires_grad_() else: for index in range(len(tensor_recv_next)): tensor_recv_next[index] = ( gather_split_1d_tensor(tensor_recv_next[index]).view(recv_next_shape[index]).requires_grad_() ) return tensor_recv_prev, tensor_recv_next def recv_forward( input_tensor_shape, prev_rank=None, dtype=torch.float, scatter_gather_tensors=False ) -> Union[torch.Tensor, List[torch.Tensor]]: """Copy the forward output from the previous stage in pipeline as the input tensor of this stage. Args: input_tensor_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor to be received. prev_rank (int, optional): The rank of the source of the tensor. Returns: Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input tensor or input tensor list. """ if gpc.is_pipeline_first_stage(): input_tensor = None else: input_tensor, _ = _communicate( recv_prev=True, recv_prev_shape=input_tensor_shape, prev_rank=prev_rank, dtype=dtype, scatter_gather_tensors=scatter_gather_tensors, ) return input_tensor def recv_backward( output_grad_shape, next_rank=None, dtype=torch.float, scatter_gather_tensors=False ) -> Union[torch.Tensor, List[torch.Tensor]]: """Copy the gradient tensor from the next stage in pipeline as the input gradient of this stage. Args: output_grad_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor to be received. next_rank (int, optional): The rank of the source of the tensor. Returns: Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input gradient tensor or gradient tensor list. """ if gpc.is_pipeline_last_stage(): output_tensor_grad = None else: _, output_tensor_grad = _communicate( recv_next=True, recv_next_shape=output_grad_shape, next_rank=next_rank, dtype=dtype, scatter_gather_tensors=scatter_gather_tensors, ) return output_tensor_grad def send_forward(output_tensor, next_rank=None, scatter_gather_tensors=False) -> None: """Sends the input tensor to the next stage in pipeline. Args: output_tensor (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent. next_rank (int, optional): The rank of the recipient of the tensor. """ if not gpc.is_pipeline_last_stage(): _communicate(object_send_next=output_tensor, next_rank=next_rank, scatter_gather_tensors=scatter_gather_tensors) def send_backward(input_tensor_grad, prev_rank=None, scatter_gather_tensors=False) -> None: """Sends the gradient tensor to the previous stage in pipeline. Args: input_tensor_grad (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent prev_rank (int, optional): The rank of the recipient of the tensor """ if not gpc.is_pipeline_first_stage(): _communicate( object_send_prev=input_tensor_grad, prev_rank=prev_rank, scatter_gather_tensors=scatter_gather_tensors ) def send_forward_recv_backward( output_tensor, output_grad_shape, recv_next=True, next_rank=None, dtype=torch.float, scatter_gather_tensors=False ) -> Union[torch.Tensor, List[torch.Tensor]]: """Batched communication operation. Sends the input tensor to the next stage in pipeline, while receives the gradient tensor from the next stage in pipeline as the input gradient tensor of this stage. Args: output_tensor (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent. output_grad_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor to be received. Returns: Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input gradient tensor. """ if gpc.is_pipeline_last_stage(): output_tensor_grad = None else: _, output_tensor_grad = _communicate( object_send_next=output_tensor, recv_next=recv_next, recv_next_shape=output_grad_shape, next_rank=next_rank, dtype=dtype, scatter_gather_tensors=scatter_gather_tensors, ) return output_tensor_grad def send_backward_recv_forward( input_tensor_grad, input_tensor_shape, recv_prev=True, prev_rank=None, dtype=torch.float, scatter_gather_tensors=False, ) -> Union[torch.Tensor, List[torch.Tensor]]: """Batched communication operation. Sends the gradient tensor to the previous stage in pipeline, while receives the output tensor from the previous stage in pipeline as the input of this stage. Args: input_tensor_grad (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent. input_tensor_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor to be received. Returns: Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input tensor. """ if gpc.is_pipeline_first_stage(): input_tensor = None else: input_tensor, _ = _communicate( object_send_prev=input_tensor_grad, recv_prev=recv_prev, recv_prev_shape=input_tensor_shape, prev_rank=prev_rank, dtype=dtype, scatter_gather_tensors=scatter_gather_tensors, ) return input_tensor def send_forward_recv_forward( output_tensor, input_tensor_shape, recv_prev=True, prev_rank=None, next_rank=None, dtype=torch.float, scatter_gather_tensors=False, ) -> Union[torch.Tensor, List[torch.Tensor]]: """Batched communication operation. Sends the input tensor to the next stage in pipeline, while receives the output tensor from the previous stage in pipeline as the input of this stage. Args: output_tensor (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent. input_tensor_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor to be received. Returns: Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input tensor. """ input_tensor, _ = _communicate( object_send_next=output_tensor, recv_prev=recv_prev, recv_prev_shape=input_tensor_shape, prev_rank=prev_rank, next_rank=next_rank, dtype=dtype, scatter_gather_tensors=scatter_gather_tensors, ) return input_tensor def send_backward_recv_backward( input_tensor_grad, output_grad_shape, recv_next=True, prev_rank=None, next_rank=None, dtype=torch.float, scatter_gather_tensors=False, ) -> Union[torch.Tensor, List[torch.Tensor]]: """Batched communication operation. Sends the gradient tensor to the previous stage in pipeline, while receives the gradient tensor from the next member in pipeline as the input of this stage. Args: input_tensor_grad (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent. output_grad_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor to be received. Returns: Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input gradient tensor. """ _, output_tensor_grad = _communicate( object_send_prev=input_tensor_grad, recv_next=recv_next, recv_next_shape=output_grad_shape, prev_rank=prev_rank, next_rank=next_rank, dtype=dtype, scatter_gather_tensors=scatter_gather_tensors, ) return output_tensor_grad def send_forward_backward_recv_forward_backward( output_tensor, input_tensor_grad, input_tensor_shape, output_grad_shape, recv_prev=True, recv_next=True, prev_rank=None, next_rank=None, dtype=torch.float, scatter_gather_tensors=False, ) -> Tuple[Union[torch.Tensor, List[torch.Tensor]]]: """Batched communication operation. Sends the input tensor to the next stage in pipeline and the gradient tensor to the previous stage, while receives the input gradient tensor from the next stage and the input tensor from the previous stage. Args: output_tensor (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor sent to the next. input_tensor_grad (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor sent to the previous. input_tensor_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor received from the previous. output_grad_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor received from the next. Returns: Tuple(Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]], Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): (the input tensor, the input gradient tensor) """ input_tensor, output_tensor_grad = _communicate( object_send_next=output_tensor, object_send_prev=input_tensor_grad, recv_prev=recv_prev, recv_next=recv_next, recv_prev_shape=input_tensor_shape, recv_next_shape=output_grad_shape, prev_rank=prev_rank, next_rank=next_rank, dtype=dtype, scatter_gather_tensors=scatter_gather_tensors, ) return input_tensor, output_tensor_grad
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/communication/collective.py
colossalai/legacy/communication/collective.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch import torch.distributed as dist from torch import Tensor from torch.distributed import ReduceOp from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc _all_gather_func = dist._all_gather_base if "all_gather_into_tensor" not in dir(dist) else dist.all_gather_into_tensor _reduce_scatter_func = ( dist._reduce_scatter_base if "reduce_scatter_tensor" not in dir(dist) else dist.reduce_scatter_tensor ) def all_gather(tensor: Tensor, dim: int, parallel_mode: ParallelMode, async_op: bool = False) -> Tensor: r"""Gathers all tensors from the parallel group and concatenates them in a specific dimension. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. Args: tensor (:class:`torch.Tensor`): Tensor to be gathered. dim (int): The dimension concatenating in. parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): Parallel group mode used in this communication. async_op (bool, optional): Whether operations are asynchronous. Returns: Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of all-together only, if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True. """ depth = gpc.get_world_size(parallel_mode) if depth == 1: out = tensor work = None else: tensor_in = tensor.contiguous() if dim == 0 else tensor.transpose(0, dim).contiguous() out_shape = (tensor_in.shape[0] * depth,) + tensor_in.shape[1:] tensor_out = torch.empty(out_shape, dtype=tensor.dtype, device=tensor.device) group = gpc.get_cpu_group(parallel_mode) if tensor.device.type == "cpu" else gpc.get_group(parallel_mode) work = _all_gather_func(tensor_out, tensor_in, group=group, async_op=async_op) out = tensor_out if dim == 0 else tensor_out.transpose(0, dim) if async_op: return out, work else: return out def reduce_scatter( tensor: Tensor, dim: int, parallel_mode: ParallelMode, op: ReduceOp = ReduceOp.SUM, async_op: bool = False ) -> Tensor: r"""Reduces all tensors then scatters it in a specific dimension to all members in the parallel group. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. Args: tensor (:class:`torch.Tensor`): Tensor to be reduce_scattered. dim (int): The dimension concatenating in. parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): Parallel group mode used in this communication. op (torch.distributed.ReduceOp, optional): The type of reduce operation, should be included in [SUM, AVG, PRODUCT, MIN, MAX, BAND, BOR, BXOR]. More details about ReduceOp please refer to `ReduceOp <https://pytorch.org/docs/stable/distributed.html#torch.distributed.ReduceOp>`_. async_op (bool, optional): Whether operations are asynchronous. Returns: Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of reduce_scatter only, if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True. """ depth = gpc.get_world_size(parallel_mode) if depth == 1: out = tensor work = None else: tensor_in = tensor.contiguous() if dim == 0 else tensor.transpose(0, dim).contiguous() out_shape = (tensor_in.shape[0] // depth,) + tensor_in.shape[1:] tensor_out = torch.empty(out_shape, dtype=tensor.dtype, device=tensor.device) group = gpc.get_cpu_group(parallel_mode) if tensor.device.type == "cpu" else gpc.get_group(parallel_mode) work = _reduce_scatter_func(tensor_out, tensor_in, op=op, group=group, async_op=async_op) out = tensor_out if dim == 0 else tensor_out.transpose(0, dim) if async_op: return out, work else: return out def all_reduce( tensor: Tensor, parallel_mode: ParallelMode, op: ReduceOp = ReduceOp.SUM, async_op: bool = False ) -> Tensor: r"""Reduces the tensor data across whole parallel group in such a way that all get the final result. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. Args: tensor (:class:`torch.Tensor`): Tensor to be all-reduced. parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): Parallel group mode used in this communication. op (torch.distributed.ReduceOp, optional): The type of reduce operation, should be included in [SUM, AVG, PRODUCT, MIN, MAX, BAND, BOR, BXOR]. More details about ReduceOp please refer to `ReduceOp <https://pytorch.org/docs/stable/distributed.html#torch.distributed.ReduceOp>`_. async_op (bool, optional): Whether operations are asynchronous. Returns: Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of all-gather only, if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True. """ depth = gpc.get_world_size(parallel_mode) if depth == 1: out = tensor work = None else: out = tensor.contiguous() group = gpc.get_cpu_group(parallel_mode) if tensor.device.type == "cpu" else gpc.get_group(parallel_mode) work = dist.all_reduce(out, op=op, group=group, async_op=async_op) if async_op: return out, work else: return out def broadcast(tensor: Tensor, src: int, parallel_mode: ParallelMode, async_op: bool = False): r"""Broadcast tensors to whole parallel group. Tensor must have the same number of elements in all processes participating in the collective. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. Args: tensor (:class:`torch.Tensor`): Tensor to be broadcast. src (int): Source rank. parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): Parallel group mode used in this communication. async_op (bool, optional): Whether operations are asynchronous. Returns: Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The tensor need to be broadcast only, if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True. """ depth = gpc.get_world_size(parallel_mode) if depth == 1: out = tensor work = None else: out = tensor.contiguous() group = gpc.get_cpu_group(parallel_mode) if tensor.device.type == "cpu" else gpc.get_group(parallel_mode) work = dist.broadcast(out, src=src, group=group, async_op=async_op) if async_op: return out, work else: return out def reduce(tensor: Tensor, dst: int, parallel_mode: ParallelMode, op: ReduceOp = ReduceOp.SUM, async_op: bool = False): r"""Reduce tensors across whole parallel group. Only the process with rank ``dst`` is going to receive the final result. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. Args: tensor (:class:`torch.Tensor`): Tensor to be reduced. dst (int): Destination rank. parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): Parallel group mode used in this communication. async_op (bool, optional): Whether operations are asynchronous. Returns: Union[tuple(:class:`torch.Tensor`, work handle), :class:`torch.Tensor`]: The result of reduce only, if async_op is set to False. A tuple of output of all-gather and Async work handle, if async_op is set to True. """ depth = gpc.get_world_size(parallel_mode) if depth == 1: out = tensor work = None else: out = tensor.contiguous() group = gpc.get_cpu_group(parallel_mode) if tensor.device.type == "cpu" else gpc.get_group(parallel_mode) work = dist.reduce(out, dst=dst, op=op, group=group, async_op=async_op) if async_op: return out, work else: return out def scatter_object_list(scatter_object_output_list, scatter_object_input_list, src=0, group=None) -> None: r"""Modified from `torch.distributed.scatter_object_list <https://pytorch.org/docs/stable/_modules/torch/distributed/distributed_c10d.html#scatter_object_list>` to fix issues """ if dist.distributed_c10d._rank_not_in_group(group): return if not isinstance(scatter_object_output_list, list) or len(scatter_object_output_list) < 1: raise RuntimeError("Expected argument scatter_object_output_list to be a list of size at least 1.") # set tensor device to cuda if backend is nccl device = torch.cuda.current_device() if dist.get_backend(group) == "nccl" else torch.device("cpu") my_rank = dist.get_rank() # use global rank if my_rank == src: tensor_list, tensor_sizes = zip( *[dist.distributed_c10d._object_to_tensor(obj) for obj in scatter_object_input_list] ) tensor_list = list(map(lambda x: x.to(device), tensor_list)) tensor_sizes = list(map(lambda x: x.to(device), tensor_sizes)) # Src rank broadcasts the maximum tensor size. This is because all ranks are # expected to call into scatter() with equal-sized tensors. if my_rank == src: max_tensor_size = max(tensor_sizes) for tensor in tensor_list: tensor.resize_(max_tensor_size) else: max_tensor_size = torch.tensor([0], dtype=torch.long).to(device) dist.broadcast(max_tensor_size, src=src, group=group) # Scatter actual serialized objects output_tensor = torch.empty(max_tensor_size.item(), dtype=torch.uint8).to(device) dist.scatter( output_tensor, scatter_list=None if my_rank != src else tensor_list, src=src, group=group, ) # Scatter per-object sizes to trim tensors when deserializing back to object obj_tensor_size = torch.tensor([0], dtype=torch.long).to(device) dist.scatter( obj_tensor_size, scatter_list=None if my_rank != src else tensor_sizes, src=src, group=group, ) output_tensor, obj_tensor_size = output_tensor.cpu(), obj_tensor_size.cpu() # Deserialize back to object scatter_object_output_list[0] = dist.distributed_c10d._tensor_to_object(output_tensor, obj_tensor_size)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/_trainer.py
colossalai/legacy/trainer/_trainer.py
from typing import Any, List, Union import torch from torch.utils.data import DataLoader from tqdm import tqdm from colossalai.legacy.engine import Engine from colossalai.legacy.trainer.hooks import BaseHook from colossalai.legacy.utils import is_dp_rank_0, is_no_pp_or_last_stage, is_tp_rank_0 from colossalai.logging import DistributedLogger from colossalai.utils import MultiTimer class Trainer: r"""This is a class tending for easy deployments of users' training and evaluation instead of writing their own scripts. It is similar with ``ignite.engine`` and ``keras.engine``, but is called `Trainer`. Args: engine (:class:`Engine`): Engine responsible for the process function. timer (:class:`MultiTimer`, optional): Timer used to monitor the whole training. logger (:class:`colossalai.logging.DistributedLogger`, optional): Logger used to record the whole training log. Examples: >>> # define model, criterion, optimizer, lr_scheduler, train_dataloader for your training >>> model = ... >>> criterion = ... >>> optimizer = ... >>> train_dataloader = ... >>> # Initialize your engine, train_dataloader, test_dataloader, lr_scheduler >>> engine, train_dataloader, _, _ = colossalai.initialize(model, optimizer, criterion) >>> # Beginning training progress >>> timer = ... >>> logger = ... >>> trainer = Trainer(engine=engine, logger=logger, timer=timer) >>> # add hooks you would like to use here. >>> hook_list = [] >>> trainer.fit( >>> train_dataloader=train_dataloader, >>> epochs=gpc.config.NUM_EPOCHS, >>> test_interval=1, >>> hooks=hook_list, >>> display_progress=True, >>> return_output_label=False >>> ) More examples and details could be found in `Training with engine and trainer <https://www.colossalai.org/docs/basics/engine_trainer>`_ and `ColossalAI-Examples <https://github.com/hpcaitech/ColossalAI-Examples/tree/main>`_. """ def __init__( self, engine: Engine, timer: MultiTimer = None, logger: DistributedLogger = None, ): # training-related params self._engine = engine self._max_epochs = 0 self._cur_epoch = 0 self._max_steps = 0 self._cur_step = 0 self._steps_per_epoch = 0 # misc params self._logger = logger self._verbose = logger is not None # hooks can store states in this dict, and could be consumed by other hooks self.states = dict() # build hooks self.hooks = list() # multi-timer for time benchmarking self._timer = timer @property def cur_epoch(self): """Returns the index of the current epoch.""" return self._cur_epoch @cur_epoch.setter def cur_epoch(self, epoch: int): """Set how many epochs have been processed.""" # allow setter for training resumption self._cur_epoch = epoch @property def cur_step(self): """Returns how many iteration steps have been processed.""" return self._cur_step @property def max_epochs(self): return self._max_epochs @property def max_steps(self): return self._max_steps @property def steps_per_epoch(self): return self._steps_per_epoch @property def engine(self): return self._engine def _set_current_step(self, epoch: int): """Sets current step number. Args: epoch (int): Step number to be set. """ self._cur_step = epoch * self._steps_per_epoch def _call_timer(self, action: str, item: str, *args, **kwargs) -> None: """Call timer function with a given timer name. Args: action (str): Function to be called on timer. item (str): Name of the timer. args (list): args used for action function. kwargs (dict): kwargs used for action function. """ if self._timer is not None: getattr(self._timer, action)(item, *args, **kwargs) def _reset_states(self) -> None: """Clear trainer states""" self.states = dict() def _call_hooks(self, func, output=None): """Calls specific hooks in the current time point. Args: func (str): A string represents the time point. output (Any, optional): Output of the model after running an iteration or None in any other time points. """ # Only after iter hook will receive output for hook in self.hooks: if output is None: getattr(hook, func)(self) else: getattr(hook, func)(self, *output) @staticmethod def _should_display_progress(display_progress: bool): """Only display progress on DP rank 0, TP rank 0 and PP last rank""" return display_progress and is_dp_rank_0() and is_tp_rank_0() and is_no_pp_or_last_stage() def _train_epoch( self, train_dataloader: DataLoader, epoch: int = None, display_progress: bool = False, return_output_label: bool = True, ): # set training state self._engine.train() data_iter = iter(train_dataloader) progress = range(self._steps_per_epoch) if display_progress: if epoch is None: progress = tqdm(progress, desc="[Train]") else: progress = tqdm(progress, desc=f"[Epoch {epoch} / Train]") self._call_hooks("before_train_epoch") self._call_timer(action="start", item="Train-epoch") for i in progress: self._call_hooks("before_train_iter") self._call_timer(action="start", item="Train-step") # run 1 training step self.engine.zero_grad() logits, label, loss = self.engine.execute_schedule( data_iter, forward_only=False, return_loss=True, return_output_label=return_output_label, ) self.engine.step() self._call_timer(action="stop", item="Train-step", keep_in_history=True) self._call_hooks("after_train_iter", output=(logits, label, loss)) self._cur_step += 1 if display_progress: if "step_metrics" in self.states: progress.set_postfix(**self.states["step_metrics"]) # stop when max iter is reached if self._exceed_max_step(): break self._call_timer(action="stop", item="Train-epoch", keep_in_history=True) self._call_hooks("after_train_epoch") self._call_timer(action="reset", item="Train-epoch") def _eval( self, test_dataloader: DataLoader, epoch: int = None, display_progress: bool = False, return_output_label: bool = True, ): # switch engine status self._engine.eval() data_iter = iter(test_dataloader) num_steps = len(test_dataloader) self._call_hooks("before_test") # prepare progress bar progress = range(num_steps) if display_progress: desc = "Evaluation" if epoch is not None: desc = "[Epoch %d / Test]" % epoch progress = tqdm(progress, desc=desc) self._call_hooks("before_test_epoch") self._call_timer(action="start", item="Test-epoch") with torch.no_grad(): for _ in progress: self._call_hooks("before_test_iter") self._call_timer(action="start", item="Test-step") logits, label, loss = self.engine.execute_schedule( data_iter, forward_only=True, return_loss=True, return_output_label=return_output_label, ) self._call_timer(action="stop", item="Test-step", keep_in_history=True) self._call_hooks("after_test_iter", output=(logits, label, loss)) if display_progress: if "step_metrics" in self.states: progress.set_postfix(**self.states["step_metrics"]) self._call_timer(action="stop", item="Test-epoch", keep_in_history=True) self._call_hooks("after_test_epoch") self._call_hooks("after_test") self._call_timer(action="reset", item="Test-step") self._call_timer(action="reset", item="Test-epoch") def _exceed_max_step(self): return self._max_steps is not None and self._cur_step >= self._max_steps def fit( self, train_dataloader: DataLoader, epochs: int, max_steps: int = None, test_dataloader: DataLoader = None, test_interval: int = 1, hooks: List[BaseHook] = None, display_progress: bool = False, return_output_label: bool = True, ): r"""Trains the model to fit training data. Args: train_dataloader (:class:`torch.utils.data.DataLoader`): DataLoader for training. epochs (int): Maximum number of epochs. max_steps (int, optional): Maximum number of running iterations. test_dataloader (:class:`torch.utils.data.DataLoader`, optional): DataLoader for validation. test_interval (int, optional): Interval of validation hooks (list[BaseHook], optional): A list of hooks used in training. display_progress (bool, optional): If True, a progress bar will be displayed. """ # set epochs and steps, consider gradient accumulation self._steps_per_epoch = len(train_dataloader) self._max_steps = max_steps self._max_epochs = epochs # check if testing is required should_test = False if test_dataloader is not None: should_test = True display_progress = self._should_display_progress(display_progress) # reset hooks self._reset_states() if hooks is not None: assert isinstance(hooks, list), f"expected argument hooks be to list, but got {type(hooks)}" for hook in hooks: assert isinstance(hook, BaseHook), f"expected the hook to be of type BaseHook, but got {type(hook)}" else: hooks = [] self.hooks = hooks self.hooks.sort(key=lambda hook: hook.priority) if self._verbose: for hook in self.hooks: self._logger.info( f"Using {hook.__class__.__name__} for training, priority = {hook.priority}", ranks=[0], ) self._logger.info("Lower value means higher priority for calling hook function", ranks=[0]) self._call_hooks("after_hook_is_attached") self._engine.train() self._call_hooks("before_train") # recover step value if resuming training last_epoch = self._cur_epoch if self.cur_epoch != 0: self._set_current_step(last_epoch) for epoch in range(last_epoch, epochs): # train for one epoch self._train_epoch( train_dataloader=train_dataloader, epoch=epoch, display_progress=display_progress, return_output_label=return_output_label, ) # start eval if should_test and epoch % test_interval == 0: self._eval( test_dataloader=test_dataloader, display_progress=display_progress, epoch=epoch, return_output_label=return_output_label, ) self._cur_epoch += 1 # check for termination if self._exceed_max_step(): self._logger.info( f"Max number of steps {max_steps} has been reached, training is stopped automatically", ranks=[0], ) break self._call_hooks("after_train") self._call_timer("reset", "Train-epoch") def evaluate( self, test_dataloader: DataLoader, hooks: List[BaseHook] = None, display_progress: bool = False, return_output_label: bool = True, ): """Evaluates the model with testing data. Args: test_dataloader (:class:`torch.utils.data.DataLoader`, optional): Dataloader for testing. hooks (list, optional): A list of hooks used in evaluation. Defaults to None. display_progress (bool, optional): If True, the evaluation progress will be printed. Defaults to False. return_output_label (bool, optional): If True, the output of model and the label will be returned. Defaults to True. """ # set display display_progress = self._should_display_progress(display_progress) # reset hooks self._reset_states() if hooks is not None: assert isinstance(hooks, list), f"expected argument hooks be to list, but got {type(hooks)}" else: hooks = [] self.hooks = hooks self.hooks.sort(key=lambda hook: hook.priority) if self._verbose: for hook in self.hooks: self._logger.info( f"Using {hook.__class__.__name__} for training, priority = {hook.priority}", ranks=[0], ) self._logger.info("Lower value means higher priority for calling hook function", ranks=[0]) self._call_hooks("after_hook_is_attached") # eval self._eval( test_dataloader=test_dataloader, display_progress=display_progress, return_output_label=return_output_label, ) def predict(self, data: Union[Any, List[Any]]): """Uses trained model to make a prediction for a tensor or a tensor list. Args: data (Union[:class:`torch.tensor`, List[:class:`torch.tensor`]]): Data as the input. Returns: :class:`torch.tensor`: The output of model as the prediction """ # predict without labels self._engine.eval() # prepare a list of (data, label) to make it iterable # for compatibility with schedule simple_dataloader = [(data, None)] data_iter = iter(simple_dataloader) output, _, _ = self.engine.execute_schedule(data_iter, forward_only=True, return_loss=False) return output
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/__init__.py
colossalai/legacy/trainer/__init__.py
from ._trainer import Trainer __all__ = ["Trainer"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/hooks/_log_hook.py
colossalai/legacy/trainer/hooks/_log_hook.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import os import os.path as osp from typing import List from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.registry import HOOKS from colossalai.legacy.trainer.hooks._metric_hook import ThroughputMetric from colossalai.legacy.utils import is_dp_rank_0, is_no_pp_or_last_stage, is_tp_rank_0, report_memory_usage from colossalai.logging import DistributedLogger from colossalai.utils import MultiTimer from ._base_hook import BaseHook from ._commons_ import _format_number class LogByEpochHook(BaseHook): """Hook to log by epoch. Args: logger (:class:`colossalai.logging.DistributedLogger`): Logger for recording the log information. interval (int, optional): Interval of printing log information, defaults to 1. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front, defaults to 1. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__(self, logger, interval: int = 1, priority: int = 1): super().__init__(priority) self.logger = logger self._interval = interval def _is_epoch_to_log(self, trainer): return trainer.cur_epoch % self._interval == 0 @HOOKS.register_module class LogMetricByStepHook(BaseHook): """Hook to log metric by step. Args: priority (int, optional): Priority in the printing, hooks with small priority will be printed in front, defaults to 10. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__(self, priority: int = 10): super().__init__(priority) def after_train_iter(self, trainer, *args): trainer.states["step_metrics"] = dict() for metric_name, metric_calculator in trainer.states["metrics"]["train"].items(): if isinstance(metric_calculator, ThroughputMetric): trainer.states["step_metrics"][metric_name.lower()] = metric_calculator.get_last_step_info() else: trainer.states["step_metrics"][metric_name.lower()] = metric_calculator.get_last_step_value() def after_test_iter(self, trainer, *args): trainer.states["step_metrics"] = dict() for metric_name, metric_calculator in trainer.states["metrics"]["test"].items(): if isinstance(metric_calculator, ThroughputMetric): trainer.states["step_metrics"][metric_name.lower()] = metric_calculator.get_last_step_info() else: trainer.states["step_metrics"][metric_name.lower()] = metric_calculator.get_last_step_value() @HOOKS.register_module class LogMetricByEpochHook(LogByEpochHook): """Specialized hook to record the metric to log. Args: logger (:class:`colossalai.logging.DistributedLogger`): Logger for recording the log information. interval (int, optional): Interval of printing log information, defaults to 1. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front, defaults to 10. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__(self, logger, interval: int = 1, priority: int = 10) -> None: super().__init__(logger, interval, priority) self._is_rank_to_log = is_dp_rank_0() and is_tp_rank_0() and is_no_pp_or_last_stage() def _get_str(self, trainer, mode): msg = [] for metric_name, metric_calculator in trainer.states["metrics"][mode].items(): msg.append(f"{metric_name} = {_format_number(metric_calculator.get_accumulated_value())}") msg = " | ".join(msg) return msg def after_train_epoch(self, trainer): if self._is_epoch_to_log(trainer): msg = self._get_str(trainer=trainer, mode="train") if self._is_rank_to_log: self.logger.info(f"[Epoch {trainer.cur_epoch} / Train]: {msg}") # f'Training - Epoch {trainer.cur_epoch} - {self.__class__.__name__}: {msg}') def after_test_epoch(self, trainer): if self._is_epoch_to_log(trainer): msg = self._get_str(trainer=trainer, mode="test") if self._is_rank_to_log: self.logger.info(f"[Epoch {trainer.cur_epoch} / Test]: {msg}") # f'Testing - Epoch {trainer.cur_epoch} - {self.__class__.__name__}: {msg}') @HOOKS.register_module class TensorboardHook(BaseHook): """Specialized hook to record the metric to Tensorboard. Args: log_dir (str): Directory of log. ranks (list): Ranks of processors. parallel_mode (:class:`colossalai.legacy.context.parallel_mode.ParallelMode`, optional): Parallel mode used in trainer, defaults to colossalai.legacy.context.parallel_mode.ParallelMode.GLOBAL. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front, defaults to 10. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__( self, log_dir: str, ranks: List = None, parallel_mode: ParallelMode = ParallelMode.GLOBAL, priority: int = 10, ) -> None: super().__init__(priority=priority) from torch.utils.tensorboard import SummaryWriter # create log dir if not gpc.is_initialized(ParallelMode.GLOBAL) or gpc.get_global_rank() == 0: os.makedirs(log_dir, exist_ok=True) # determine the ranks to generate tensorboard logs self._is_valid_rank_to_log = False if not gpc.is_initialized(parallel_mode): self._is_valid_rank_to_log = True else: local_rank = gpc.get_local_rank(parallel_mode) if ranks is None or local_rank in ranks: self._is_valid_rank_to_log = True # check for if ( gpc.is_initialized(ParallelMode.PIPELINE) and not gpc.is_last_rank(ParallelMode.PIPELINE) and self._is_valid_rank_to_log ): raise ValueError("Tensorboard hook can only log on the last rank of pipeline process group") if self._is_valid_rank_to_log: # create workspace on only one rank if gpc.is_initialized(parallel_mode): rank = gpc.get_local_rank(parallel_mode) else: rank = 0 # create workspace log_dir = osp.join(log_dir, f"{parallel_mode}_rank_{rank}") os.makedirs(log_dir, exist_ok=True) self.writer = SummaryWriter(log_dir=log_dir, filename_suffix=f"_rank_{rank}") def _log_by_iter(self, trainer, mode: str): for metric_name, metric_calculator in trainer.states["metrics"][mode].items(): if metric_calculator.epoch_only: continue val = metric_calculator.get_last_step_value() if self._is_valid_rank_to_log: self.writer.add_scalar(f"{metric_name}/{mode}", val, trainer.cur_step) def _log_by_epoch(self, trainer, mode: str): for metric_name, metric_calculator in trainer.states["metrics"][mode].items(): if metric_calculator.epoch_only: val = metric_calculator.get_accumulated_value() if self._is_valid_rank_to_log: self.writer.add_scalar(f"{metric_name}/{mode}", val, trainer.cur_step) def after_test_iter(self, trainer, *args): self._log_by_iter(trainer, mode="test") def after_test_epoch(self, trainer): self._log_by_epoch(trainer, mode="test") def after_train_iter(self, trainer, *args): self._log_by_iter(trainer, mode="train") def after_train_epoch(self, trainer): self._log_by_epoch(trainer, mode="train") @HOOKS.register_module class LogTimingByEpochHook(LogByEpochHook): """Specialized hook to write timing record to log. Args: timer (:class:`colossalai.utils.MultiTimer`): Timer for the hook. logger (:class:`colossalai.logging.DistributedLogger`): Logger for recording the log information. interval (int, optional): Interval of printing log information, defaults to 1. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front defaults to 10. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. log_eval (bool, optional): Whether writes in evaluation, defaults to True. ignore_num_train_steps (int, optional): Number of training steps to ignore, defaults to 0. """ def __init__( self, timer: MultiTimer, logger: DistributedLogger, interval: int = 1, priority: int = 10, log_eval: bool = True, ignore_num_train_steps: int = 0, ) -> None: super().__init__(logger=logger, interval=interval, priority=priority) self._timer = timer self._log_eval = log_eval self._is_rank_to_log = is_dp_rank_0() and is_tp_rank_0() and is_no_pp_or_last_stage() # extra handling to avoid the unstable readings of the first # few training steps to affect the history mean time self._ignore_num_train_steps = ignore_num_train_steps self._is_train_step_history_trimmed = False def _get_message(self, mode): msg = [] for timer_name, timer in self._timer: if timer_name.startswith(mode): last_elapsed_time = timer.get_elapsed_time() if timer.has_history: if timer_name == "Train-step" and not self._is_train_step_history_trimmed: timer._history = timer._history[self._ignore_num_train_steps :] self._is_train_step_history_trimmed = True history_mean = timer.get_history_mean() timer.get_history_sum() msg.append( f"{timer_name}: last = {_format_number(last_elapsed_time)} s, mean = {_format_number(history_mean)} s" ) else: msg.append(f"{timer_name}: last = {_format_number(last_elapsed_time)} s") msg = " | ".join(msg) return msg def after_train_epoch(self, trainer): """Writes log after finishing a training epoch.""" if self._is_epoch_to_log(trainer) and self._is_rank_to_log: msg = self._get_message("Train") self.logger.info(f"[Epoch {trainer.cur_epoch} / Train]: {msg} | #steps/epoch = {trainer.steps_per_epoch}") def after_test_epoch(self, trainer): """Writes log after finishing a testing epoch.""" if self._is_epoch_to_log(trainer) and self._is_rank_to_log and self._log_eval: msg = self._get_message("Test") self.logger.info(f"[Epoch {trainer.cur_epoch} / Test]: {msg}") @HOOKS.register_module class LogMemoryByEpochHook(LogByEpochHook): """Specialized Hook to write memory usage record to log. Args: logger (:class:`colossalai.logging.DistributedLogger`): Logger for recording the log information. interval (int, optional): Interval of printing log information, defaults to 1. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front defaults to 1. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. log_eval (bool, optional): Whether writes in evaluation, defaults to True. """ def __init__( self, logger: DistributedLogger, interval: int = 1, priority: int = 10, log_eval: bool = True, report_cpu: bool = False, # no reference ) -> None: super().__init__(logger=logger, interval=interval, priority=priority) self._log_eval = log_eval self._is_rank_to_log = is_dp_rank_0() and is_tp_rank_0() def before_train(self, trainer): """Resets before training.""" if self._is_epoch_to_log(trainer) and self._is_rank_to_log: report_memory_usage("Before-train", self.logger) def after_train_epoch(self, trainer): """Writes log after finishing a training epoch.""" if self._is_epoch_to_log(trainer) and self._is_rank_to_log: report_memory_usage(f"[Epoch {trainer.cur_epoch} / Train]", self.logger) def after_test(self, trainer): """Reports after testing.""" if self._is_epoch_to_log(trainer) and self._is_rank_to_log and self._log_eval: report_memory_usage(f"[Epoch {trainer.cur_epoch} / Test]", self.logger)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/hooks/_commons_.py
colossalai/legacy/trainer/hooks/_commons_.py
import torch def _format_number(val, prec=5): if isinstance(val, float): return f"{val:.{prec}g}" elif torch.is_tensor(val) and torch.is_floating_point(val): return f"{val.item():.{prec}g}" return val
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/hooks/_checkpoint_hook.py
colossalai/legacy/trainer/hooks/_checkpoint_hook.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from colossalai.legacy.registry import HOOKS from colossalai.legacy.trainer.hooks import BaseHook from colossalai.legacy.utils.checkpointing import save_checkpoint from colossalai.logging import get_dist_logger from ._lr_scheduler_hook import LRSchedulerHook @HOOKS.register_module class SaveCheckpointHook(BaseHook): """Saves the model by interval in training process. Args: interval (int, optional): Number of epochs between saving the checkpoint, defaults to 1. if save_by_iter is True, this arg refers to the number of iters between saving. checkpoint_dir (str, optional): File name to save the checkpoint, defaults to None. model (torch.nn.Module, Optional): The model to save, defaults to None. When not passing, 'trainer.engine.model' will be used. We encourage you to pass the model in it to avoid some unexpected bugs, especially when using **DDP**. save_by_iter (bool, optional): Whether saving the checkpoint by iter, default to False. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front defaults to 10. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__( self, interval: int = 1, checkpoint_dir: str = None, model: torch.nn.Module = None, save_by_iter: bool = False, priority: int = 10, ): super().__init__(priority=priority) self.interval = interval self.checkpoint_dir = checkpoint_dir self.model = model self.save_by_iter = save_by_iter self.logger = get_dist_logger() # get lr scheduler from the LRSchedulerHook before train self._lr_scheduler = None def after_hook_is_attached(self, trainer): # get lr scheduler if exists for hook in trainer.hooks: if isinstance(hook, LRSchedulerHook): self._lr_scheduler = hook.lr_scheduler break self.model = self.model if self.model is not None else trainer.engine.model def after_train_iter(self, trainer, output, label, loss): """Saves the model after a training iter.""" # save by interval if self.save_by_iter and trainer.cur_step % self.interval == 0: save_checkpoint( self.checkpoint_dir, trainer.cur_epoch, self.model, trainer.engine.optimizer, self._lr_scheduler ) self.logger.info( f"checkpoint for iteration {trainer.cur_step} is saved to {self.checkpoint_dir}", ranks=[0] ) else: pass def after_train_epoch(self, trainer): """Saves the model after a training epoch.""" # save by interval if trainer.cur_epoch % self.interval == 0: save_checkpoint( self.checkpoint_dir, trainer.cur_epoch, self.model, trainer.engine.optimizer, self._lr_scheduler ) self.logger.info(f"checkpoint for epoch {trainer.cur_epoch} is saved to {self.checkpoint_dir}", ranks=[0])
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/hooks/_lr_scheduler_hook.py
colossalai/legacy/trainer/hooks/_lr_scheduler_hook.py
from torch import Tensor from colossalai.legacy.registry import HOOKS from ._metric_hook import LearningRateMetric, MetricHook @HOOKS.register_module class LRSchedulerHook(MetricHook): r"""Build LR scheduler for trainer. Args: lr_scheduler (:class:`colossalai.nn.lr_scheduler`): The specific LR scheduler in range of ``colossalai.nn.lr_scheduler``, more details about ``lr_scheduler`` could be found in `lr_scheduler <https://github.com/hpcaitech/ColossalAI/tree/main/colossalai/nn/lr_scheduler>`_. by_epoch (bool): If `True`, the LR will be scheduled every epoch. Else, the LR will be scheduled every batch. store_lr_in_state (bool, optional): If `True`, store the learning rate in each state, defaults to `True`. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front defaults to 1. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__( self, lr_scheduler, by_epoch: bool, store_lr_in_state: bool = True, priority: int = 1, ): super().__init__(priority=priority) self.by_epoch = by_epoch self.lr_scheduler = lr_scheduler self.store_lr_in_state = store_lr_in_state def after_hook_is_attached(self, trainer): self._check_metric_states_initialization(trainer) trainer.states["metrics"]["train"]["LR"] = LearningRateMetric( epoch_only=self.by_epoch, initial_lr=self.lr_scheduler.get_last_lr()[0] ) def after_train_epoch(self, trainer): if self.by_epoch: self.lr_scheduler.step() trainer.states["metrics"]["train"]["LR"].update(self.lr_scheduler.get_last_lr()[0]) def after_train_iter(self, trainer, output: Tensor, label: Tensor, loss: Tensor): if not self.by_epoch: self.lr_scheduler.step() trainer.states["metrics"]["train"]["LR"].update(self.lr_scheduler.get_last_lr()[0])
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/hooks/__init__.py
colossalai/legacy/trainer/hooks/__init__.py
from ._base_hook import BaseHook from ._checkpoint_hook import SaveCheckpointHook from ._log_hook import ( LogMemoryByEpochHook, LogMetricByEpochHook, LogMetricByStepHook, LogTimingByEpochHook, TensorboardHook, ) from ._lr_scheduler_hook import LRSchedulerHook from ._metric_hook import AccuracyHook, LossHook, MetricHook, ThroughputHook __all__ = [ "BaseHook", "MetricHook", "LossHook", "AccuracyHook", "LogMetricByEpochHook", "TensorboardHook", "LogTimingByEpochHook", "LogMemoryByEpochHook", "LRSchedulerHook", "ThroughputHook", "LogMetricByStepHook", "SaveCheckpointHook", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/hooks/_metric_hook.py
colossalai/legacy/trainer/hooks/_metric_hook.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod from typing import Callable import torch import torch.distributed as dist from colossalai.accelerator import get_accelerator from colossalai.legacy.communication import all_reduce from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.registry import HOOKS from colossalai.legacy.utils import is_no_pp_or_last_stage from ._base_hook import BaseHook from ._commons_ import _format_number class Metric(ABC): """A basic class of metric collectors. It collects a specific metric during training or evaluation and would always be used with :class:`MetricHook` to help it update its states and show the metric. So please use corresponding hook class to make the metric collector works. Args: epoch_only (bool): Whether the metric only read for the full epoch. """ def __init__(self, epoch_only: bool): # is the metric only read for the full epoch self._epoch_only = epoch_only @property def epoch_only(self): """Returns :attr:`epoch_only`.""" return self._epoch_only @abstractmethod def reset(self) -> None: """Resets the metric to it's initial state. By default, this is called at the start of each epoch. """ @abstractmethod def update(self, *args, **kwargs) -> None: """Updates the metric's state using the passed batch output. By default, this is called once for each batch. """ @abstractmethod def get_last_step_value(self) -> float: """Returns the metric value in the last iteration.""" @abstractmethod def get_accumulated_value(self): """Computes the metric based on it's accumulated state. By default, this is called at the end of each epoch. :return: the actual quantity of interest :rtype: Any """ @staticmethod @abstractmethod def is_better(a, b) -> bool: """Compares a and b, and returns whether a is better than b :return: The result of comparison :rtype: bool """ class LossMetric(Metric): """A metric collector for loss. Args: epoch_only (bool): Whether the metric only read for the full epoch. """ def __init__(self, epoch_only): super().__init__(epoch_only=epoch_only) self.last_step_loss = torch.zeros(1, device=get_accelerator().get_current_device()) self.accum_loss = torch.zeros(1, device=get_accelerator().get_current_device()) self.count = 0 def reset(self) -> None: """Sets :attr:`last_step_loss` and :attr:`accum_loss` to zero.""" self.last_step_loss.zero_() self.accum_loss.zero_() self.count = 0 def update(self, loss) -> None: """Updates :attr:`last_step_loss` and :attr:`accum_loss` with current loss. It expects the output has loss. Args: loss (:class:`torch.tensor`): Current loss of the output. """ # expect output to be logits, label and loss loss_ = loss.detach() self.last_step_loss.copy_(loss_) self.accum_loss.add_(loss_) self.count += 1 def get_accumulated_value(self): """Returns accumulated loss.""" if gpc.is_initialized(ParallelMode.DATA): dist.all_reduce(self.accum_loss, op=dist.ReduceOp.SUM, group=gpc.get_group(ParallelMode.DATA)) self.accum_loss.div_(gpc.get_world_size(ParallelMode.DATA)) self.accum_loss.div_(self.count) return self.accum_loss.item() def get_last_step_value(self) -> float: """Returns :attr:`last_step_loss`.""" return self.last_step_loss.cpu().item() @staticmethod def is_better(a, b): return a < b class LearningRateMetric(Metric): """A metric collector for learning rate. Args: epoch_only (bool): Whether the metric only read for the full epoch. initial_lr (float, optional): Initial learning rate, defaults to 0.0. """ def __init__(self, epoch_only: bool, initial_lr: float = 0.0): super().__init__(epoch_only=epoch_only) self.lr = initial_lr def reset(self) -> None: pass def update(self, lr) -> None: self.lr = lr def get_last_step_value(self) -> float: return self.lr def get_accumulated_value(self): return self.lr @staticmethod def is_better(a, b) -> bool: pass class AccuracyMetric(Metric): """A metric collector for accuracy. It only works for classification tasks. Args: epoch_only (bool): Whether the metric only read for the full epoch. accuracy_func (:class:`typing.Callable`): Accuracy function for the classification task. """ def __init__(self, epoch_only: bool, accuracy_func: Callable): super().__init__(epoch_only=epoch_only) self.acc = accuracy_func self.last_step_sum = torch.zeros(1, device=get_accelerator().get_current_device()) self.last_step_correct = torch.zeros(1, device=get_accelerator().get_current_device()) self.accumulated_sum = torch.zeros(1, device=get_accelerator().get_current_device()) self.accumulated_correct = torch.zeros(1, device=get_accelerator().get_current_device()) def reset(self) -> None: self.last_step_sum.zero_() self.last_step_correct.zero_() self.accumulated_sum.zero_() self.accumulated_correct.zero_() def update(self, logits, targets, batch_size) -> None: """Updates last step accuracy and accumulated accuracy with current logits and labels. It expects the output has logits and labels. Args: logits (:class:`torch.tensor`): The logits output of the model. targets (:class:`torch.tensor`): Real labels of the dataset. batch_size (int): Batch size of the task. """ if isinstance(logits, (list, tuple)): logits = logits[0] if isinstance(targets, (list, tuple)): targets = targets[0] # update correct = self.acc(logits, targets) self.last_step_sum.fill_(batch_size) self.last_step_correct.fill_(correct) self.accumulated_sum += self.last_step_sum self.accumulated_correct += self.last_step_correct def get_last_step_value(self) -> float: self.last_step_sum = all_reduce(self.last_step_sum, ParallelMode.DATA) self.last_step_correct = all_reduce(self.last_step_correct, ParallelMode.DATA) return _format_number((self.last_step_correct / self.last_step_sum).cpu().item()) def get_accumulated_value(self): self.accumulated_sum = all_reduce(self.accumulated_sum, ParallelMode.DATA) self.accumulated_correct = all_reduce(self.accumulated_correct, ParallelMode.DATA) return (self.accumulated_correct / self.accumulated_sum).item() @staticmethod def is_better(a, b) -> bool: return a > b class MetricHook(BaseHook): """Specialized hook classes for :class:`Metric`. Some help metric collectors initialize, reset and update their states. Others are used to display and record the metric. Args: priority (int): Priority in the printing, hooks with small priority will be printed in front defaults to 1. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__( self, priority: int, ): super().__init__(priority) self._is_stage_to_compute = is_no_pp_or_last_stage() def _check_metric_states_initialization(self, trainer): if "metrics" not in trainer.states: self.init_runner_states(trainer, "metrics", dict(train={}, test={})) @HOOKS.register_module class LossHook(MetricHook): """Specialized hook class for :class:`Loss`. Args: priority (int, optional): Priority in the printing, hooks with small priority will be printed in front defaults to 0. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__(self, priority: int = 0): super().__init__(priority) def after_hook_is_attached(self, trainer): self._check_metric_states_initialization(trainer) if self._is_stage_to_compute: self.train_loss = LossMetric(epoch_only=False) self.test_loss = LossMetric(epoch_only=True) # register the metric calculator trainer.states["metrics"]["train"]["Loss"] = self.train_loss trainer.states["metrics"]["test"]["Loss"] = self.test_loss def before_train_epoch(self, trainer): if self._is_stage_to_compute: self.train_loss.reset() def after_train_iter(self, trainer, logits, label, loss): if self._is_stage_to_compute: self.train_loss.update(loss) def before_test_epoch(self, trainer): if self._is_stage_to_compute: self.test_loss.reset() def after_test_iter(self, trainer, logits, label, loss): if self._is_stage_to_compute: self.test_loss.update(loss) @HOOKS.register_module class AccuracyHook(MetricHook): """Specialized hook class for :class:`Accuracy`. Args: accuracy_func (:class:`typing.Callable`): Accuracy function for the classification task. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front defaults to 0. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. """ def __init__(self, accuracy_func: Callable, priority: int = 0): super().__init__(priority) self.accuracy_func = accuracy_func def after_hook_is_attached(self, trainer): self._check_metric_states_initialization(trainer) if self._is_stage_to_compute: self.metric = AccuracyMetric(epoch_only=True, accuracy_func=self.accuracy_func) # register the metric trainer.states["metrics"]["test"]["Accuracy"] = self.metric def before_test(self, trainer): if self._is_stage_to_compute: self.metric.reset() def after_test_iter(self, trainer, logits, targets, *args): if self._is_stage_to_compute: batch_size = trainer.engine.schedule.batch_size self.metric.update(logits, targets, batch_size) class ThroughputMetric(Metric): """Metric for :class:`Throughput`. Args: epoch_only (bool): Whether the metric only read for the full epoch. """ def __init__(self, epoch_only: bool, ignored_steps: int = 0, tflop_per_step: int = 0, use_local: bool = False): super().__init__(epoch_only=epoch_only) self.ignored_steps = ignored_steps self.cur_steps = 0 self.accumulated_num_samples = torch.zeros(1, device=get_accelerator().get_current_device()) self.accumulated_used_time = torch.zeros(1, device=get_accelerator().get_current_device()) self.last_step_num_samples = torch.zeros(1, device=get_accelerator().get_current_device()) self.last_step_used_time = torch.zeros(1, device=get_accelerator().get_current_device()) self._tflop_per_step = tflop_per_step self._use_local = use_local def reset(self) -> None: # self.cur_steps = 0 self.accumulated_num_samples.zero_() self.accumulated_used_time.zero_() self.last_step_num_samples.zero_() self.last_step_used_time.zero_() def update(self, num_samples, time) -> None: self.cur_steps += 1 self.last_step_num_samples.fill_(num_samples) self.last_step_used_time.fill_(time) if self.cur_steps >= self.ignored_steps: self.accumulated_num_samples += self.last_step_num_samples self.accumulated_used_time += self.last_step_used_time def get_last_step_value(self) -> float: if self._use_local: self.last_step_num_samples *= gpc.get_world_size(ParallelMode.DATA) else: self.last_step_used_time = all_reduce(self.last_step_used_time, ParallelMode.DATA) / gpc.get_world_size( ParallelMode.DATA ) self.last_step_num_samples = all_reduce(self.last_step_num_samples, ParallelMode.DATA) sample_per_sec = _format_number(self.last_step_num_samples / (self.last_step_used_time + 1e-12).item()) return sample_per_sec def get_last_step_info(self) -> str: if self._use_local: self.last_step_num_samples *= gpc.get_world_size(ParallelMode.DATA) else: self.last_step_used_time = all_reduce(self.last_step_used_time, ParallelMode.DATA) / gpc.get_world_size( ParallelMode.DATA ) self.last_step_num_samples = all_reduce(self.last_step_num_samples, ParallelMode.DATA) sample_per_sec = _format_number(self.last_step_num_samples / (self.last_step_used_time + 1e-12).item()) if self._tflop_per_step > 0: tflops = _format_number(self._tflop_per_step / (self.last_step_used_time.item() + 1e-12)) return f"{sample_per_sec} sample_per_sec, {tflops} Tflops" else: return f"{sample_per_sec} sample_per_sec" def get_accumulated_value(self) -> float: self.accumulated_used_time = all_reduce(self.accumulated_used_time, ParallelMode.DATA) / gpc.get_world_size( ParallelMode.DATA ) self.accumulated_num_samples = all_reduce(self.accumulated_num_samples, ParallelMode.DATA) return (self.accumulated_num_samples / (self.accumulated_used_time + 1e-12)).item() @staticmethod def is_better(a, b) -> bool: pass @HOOKS.register_module class ThroughputHook(MetricHook): """Specialized hook class for :class:`Throughput`. Hook to measure execution throughput (samples/sec). Args: ignored_steps (int, optional): the number of initial training steps to ignore. priority (int, optional): Priority in the printing, hooks with small priority will be printed in front defaults to 10. If different hooks share same priority, the order of printing would depend on the hooks order in the hook list. tflop_per_step(int, optional): tera floating point operations per step. use_local (bool, optional): Whether to use local time for throughput calculation. """ def __init__(self, ignored_steps: int = 0, priority: int = 10, tflop_per_step: int = 0, use_local=False): super().__init__(priority) self.ignored_steps = ignored_steps self._tflop_per_step = tflop_per_step self._use_local = use_local def after_hook_is_attached(self, trainer): self._check_metric_states_initialization(trainer) if self._is_stage_to_compute: self.metric = ThroughputMetric( epoch_only=True, ignored_steps=self.ignored_steps, tflop_per_step=self._tflop_per_step, use_local=self._use_local, ) # register the metric trainer.states["metrics"]["train"]["Throughput"] = self.metric trainer.states["metrics"]["test"]["Throughput"] = self.metric def before_train_epoch(self, trainer): if self._is_stage_to_compute: self.metric.reset() def after_train_iter(self, trainer, *args): if self._is_stage_to_compute: self.metric.update( trainer.engine.schedule.batch_size, trainer._timer.get_timer("Train-step").get_elapsed_time() ) def before_test(self, trainer): if self._is_stage_to_compute: self.metric.reset() def after_test_iter(self, trainer, *args): if self._is_stage_to_compute: self.metric.update( trainer.engine.schedule.batch_size, trainer._timer.get_timer("Test-step").get_elapsed_time() )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/trainer/hooks/_base_hook.py
colossalai/legacy/trainer/hooks/_base_hook.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC from torch import Tensor class BaseHook(ABC): """This class allows users to add desired actions in specific time points during training or evaluation. :param priority: Priority in the printing, hooks with small priority will be printed in front :type priority: int """ def __init__(self, priority: int) -> None: self.priority = priority def after_hook_is_attached(self, trainer): """Actions after hooks are attached to trainer.""" def before_train(self, trainer): """Actions before training.""" def after_train(self, trainer): """Actions after training.""" def before_train_iter(self, trainer): """Actions before running a training iteration.""" def after_train_iter(self, trainer, output: Tensor, label: Tensor, loss: Tensor): """Actions after running a training iteration. Args: trainer (:class:`Trainer`): Trainer which is using this hook. output (:class:`torch.Tensor`): Output of the model. label (:class:`torch.Tensor`): Labels of the input data. loss (:class:`torch.Tensor`): Loss between the output and input data. """ def before_train_epoch(self, trainer): """Actions before starting a training epoch.""" def after_train_epoch(self, trainer): """Actions after finishing a training epoch.""" def before_test(self, trainer): """Actions before evaluation.""" def after_test(self, trainer): """Actions after evaluation.""" def before_test_epoch(self, trainer): """Actions before starting a testing epoch.""" def after_test_epoch(self, trainer): """Actions after finishing a testing epoch.""" def before_test_iter(self, trainer): """Actions before running a testing iteration.""" def after_test_iter(self, trainer, output: Tensor, label: Tensor, loss: Tensor): """Actions after running a testing iteration. Args: trainer (:class:`Trainer`): Trainer which is using this hook output (:class:`torch.Tensor`): Output of the model label (:class:`torch.Tensor`): Labels of the input data loss (:class:`torch.Tensor`): Loss between the output and input data """ def init_runner_states(self, trainer, key, val): """Initializes trainer's state. Args: trainer (:class:`Trainer`): Trainer which is using this hook key: Key of state to be reset val: Value of state to be reset """ if key not in trainer.states: trainer.states[key] = val
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/amp_type.py
colossalai/legacy/amp/amp_type.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from enum import Enum class AMP_TYPE(Enum): APEX = "apex" TORCH = "torch" NAIVE = "naive"
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/__init__.py
colossalai/legacy/amp/__init__.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.nn as nn from torch.nn.modules.loss import _Loss from torch.optim import Optimizer from colossalai.context import Config from .amp_type import AMP_TYPE from .apex_amp import convert_to_apex_amp from .naive_amp import convert_to_naive_amp from .torch_amp import convert_to_torch_amp __all__ = ["convert_to_amp", "convert_to_naive_amp", "convert_to_apex_amp", "convert_to_torch_amp", "AMP_TYPE"] def convert_to_amp(model: nn.Module, optimizer: Optimizer, criterion: _Loss, mode: AMP_TYPE, amp_config: Config = None): """A helper function to wrap training components with Torch AMP modules. Args: param model (:class:`torch.nn.Module`): your model object. optimizer (:class:`torch.optim.Optimizer`): your optimizer object. criterion (:class:`torch.nn.modules.loss._Loss`): your loss function object. mode (:class:`colossalai.legacy.amp.AMP_TYPE`): amp mode. amp_config (Union[:class:`colossalai.context.Config`, dict]): configuration for different amp modes. Returns: A tuple (model, optimizer, criterion). Note: ``amp_config`` may vary from different mode you choose. You should check the corresponding amp mode for more details about ``amp_config``. For ``apex_amp``, please check `apex_amp config <https://nvidia.github.io/apex/amp.html?highlight=apex%20amp>`_. For ``naive_amp``, please check `naive_amp config <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/amp/naive_amp/_fp16_optimizer.py#L42>`_. For ``torch_amp``, please check `torch_amp config <https://github.com/pytorch/pytorch/blob/master/torch/cuda/amp/grad_scaler.py#L97>`_. """ assert isinstance(mode, AMP_TYPE), f"expected the argument mode be AMP_TYPE, but got {type(mode)}" if amp_config is None: amp_config = Config() if mode == AMP_TYPE.TORCH: model, optimizer, criterion = convert_to_torch_amp(model, optimizer, criterion, amp_config) elif mode == AMP_TYPE.APEX: model, optimizer = convert_to_apex_amp(model, optimizer, amp_config) elif mode == AMP_TYPE.NAIVE: model, optimizer = convert_to_naive_amp(model, optimizer, amp_config) return model, optimizer, criterion
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/torch_amp/torch_amp.py
colossalai/legacy/amp/torch_amp/torch_amp.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.nn as nn from torch import Tensor from torch.nn.modules.loss import _Loss from torch.optim import Optimizer from colossalai.accelerator import get_accelerator from colossalai.interface import OptimizerWrapper from colossalai.legacy.utils import clip_grad_norm_fp32 from ._grad_scaler import GradScaler autocast = get_accelerator().autocast class TorchAMPOptimizer(OptimizerWrapper): """A wrapper class which integrate Pytorch AMP with an optimizer Args: optim (torch.optim.Optimizer): A normal optimizer like Adam or SGD. init_scale (float, optional, default=2.**16): Initial scale factor. growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during :meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations. backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during :meth:`update` if inf/NaN gradients occur in an iteration. growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients that must occur for the scale to be multiplied by ``growth_factor``. enabled (bool, optional, default=True): If ``False``, disables gradient scaling. :meth:`step` simply invokes the underlying ``optimizer.step()``, and other methods become no-ops. """ def __init__(self, optim: Optimizer, *args, **kwargs): super().__init__(optim) self.scaler = GradScaler(*args, **kwargs) def backward(self, loss: Tensor): """Backward with torch amp gradient scaler Args: loss (torch.Tensor): Loss computed by a loss function """ self.scaler.scale(loss).backward() def step(self): """Update the parameters of the model""" self.scaler.step(self.optim) self.scaler.update() def clip_grad_norm(self, model: nn.Module, max_norm: float): """Apply gradient clipping to the model parameters Args: model (torch.nn.Module): Your model object max_norm (float): Max norm value for gradient clipping """ if max_norm > 0.0: self.scaler.unscale_(self.optim) clip_grad_norm_fp32(model.parameters(), max_norm) class TorchAMPModel(nn.Module): """A wrapper class for a model object which executes forward with values automatically cast to fp16 Args: model (:class:`torch.nn.Module`): a torch model instance """ def __init__(self, model: nn.Module) -> None: super().__init__() self.model = model @autocast() def forward(self, *args, **kwargs): """ Execute forward under the torch amp context """ return self.model(*args, **kwargs) class TorchAMPLoss(nn.Module): """A wrapper class for a criterion object which computes the loss in mixed-precision context Args: loss (torch.nn.modules.loss._Loss): A loss function object """ def __init__(self, loss: _Loss): super().__init__() self.loss = loss @autocast() def forward(self, *args, **kwargs): """ Execute forward under the torch amp context """ return self.loss(*args, **kwargs)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/torch_amp/__init__.py
colossalai/legacy/amp/torch_amp/__init__.py
from typing import Optional import torch.nn as nn from torch.nn.modules.loss import _Loss from torch.optim import Optimizer from colossalai.context import Config from .torch_amp import TorchAMPLoss, TorchAMPModel, TorchAMPOptimizer def convert_to_torch_amp( model: nn.Module, optimizer: Optimizer, criterion: Optional[_Loss] = None, amp_config: Optional[Config] = None ): """A helper function to wrap training components with Pytorch AMP modules Args: model (:class:`torch.nn.Module`): your model object. optimizer (:class:`torch.optim.Optimizer`): your optimizer object criterion (:class:`torch.nn.modules.loss._Loss`, optional): your loss function object amp_config (:class:`colossalai.context.Config` or dict, optional): configuration for Pytorch AMP. The ``amp_config`` should include parameters below: :: init_scale (float, optional, default=2.**16) growth_factor (float, optional, default=2.0) backoff_factor (float, optional, default=0.5) growth_interval (int, optional, default=2000) enabled (bool, optional, default=True) Returns: A tuple (model, optimizer, criterion) """ model = TorchAMPModel(model) if amp_config is None: amp_config = dict() optimizer = TorchAMPOptimizer(optimizer, **amp_config) if criterion: criterion = TorchAMPLoss(criterion) return model, optimizer, criterion __all__ = ["convert_to_torch_amp", "TorchAMPModel", "TorchAMPLoss", "TorchAMPOptimizer"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/torch_amp/_grad_scaler.py
colossalai/legacy/amp/torch_amp/_grad_scaler.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- # modified from https://github.com/pytorch/pytorch/blob/master/torch/cuda/amp/grad_scaler.py # to support tensor parallel import warnings from collections import abc, defaultdict from enum import Enum from typing import Any, Dict, List, Optional, Tuple import torch import torch.distributed as dist from packaging import version from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc class _MultiDeviceReplicator(object): """ Lazily serves copies of a tensor to requested devices. Copies are cached per-device. """ def __init__(self, master_tensor: torch.Tensor) -> None: assert master_tensor.is_cuda or master_tensor.device.type == "xla" self.master = master_tensor self._per_device_tensors: Dict[torch.device, torch.Tensor] = {} def get(self, device) -> torch.Tensor: retval = self._per_device_tensors.get(device, None) if retval is None: retval = self.master.to(device=device, non_blocking=True, copy=True) self._per_device_tensors[device] = retval return retval # Defines default_factory for GradScaler's _per_optimizer_states defaultdict, # as well as associated "enum" values. Prefers defining these at top level because # - Lambdas can't be pickled, so we don't want to supply a lambda as the factory. # - Defining READY, UNSCALED, STEPPED and _refresh_per_optimizer_state within GradScaler # causes a circular reference, which we'd rather avoid. class OptState(Enum): READY = 0 UNSCALED = 1 STEPPED = 2 def _refresh_per_optimizer_state(): return {"stage": OptState.READY, "found_inf_per_device": {}} class GradScaler(object): _scale: Optional[torch.Tensor] _grows_tracker: Optional[torch.Tensor] _per_optimizer_states: Dict[int, Dict[str, Any]] """ An instance ``scaler`` of :class:`GradScaler` helps perform the steps of gradient scaling conveniently. * ``scaler.scale(loss)`` multiplies a given loss by ``scaler``'s current scale factor. * ``scaler.step(optimizer)`` safely unscales gradients and calls ``optimizer.step()``. * ``scaler.update()`` updates ``scaler``'s scale factor. Example: # Creates a GradScaler once at the beginning of training. scaler = GradScaler() for epoch in epochs: for input, target in data: optimizer.zero_grad() output = model(input) loss = loss_fn(output, target) # Scales loss. Calls backward() on scaled loss to create scaled gradients. scaler.scale(loss).backward() # scaler.step() first unscales gradients of the optimizer's params. # If gradients don't contain infs/NaNs, optimizer.step() is then called, # otherwise, optimizer.step() is skipped. scaler.step(optimizer) # Updates the scale for next iteration. scaler.update() See the :ref:`Automatic Mixed Precision examples<amp-examples>` for usage (along with autocasting) in more complex cases like gradient clipping, gradient accumulation, gradient penalty, and multiple losses/optimizers. ``scaler`` dynamically estimates the scale factor each iteration. To minimize gradient underflow, a large scale factor should be used. However, ``float16`` values can "overflow" (become inf or NaN) if the scale factor is too large. Therefore, the optimal scale factor is the largest factor that can be used without incurring inf or NaN gradient values. ``scaler`` approximates the optimal scale factor over time by checking the gradients for infs and NaNs during every ``scaler.step(optimizer)`` (or optional separate ``scaler.unscale_(optimizer)``, see :meth:`unscale_`). * If infs/NaNs are found, ``scaler.step(optimizer)`` skips the underlying ``optimizer.step()`` (so the params themselves remain uncorrupted) and ``update()`` multiplies the scale by ``backoff_factor``. * If no infs/NaNs are found, ``scaler.step(optimizer)`` runs the underlying ``optimizer.step()`` as usual. If ``growth_interval`` unskipped iterations occur consecutively, ``update()`` multiplies the scale by ``growth_factor``. The scale factor often causes infs/NaNs to appear in gradients for the first few iterations as its value calibrates. ``scaler.step`` will skip the underlying ``optimizer.step()`` for these iterations. After that, step skipping should occur rarely (once every few hundred or thousand iterations). Args: init_scale (float, optional, default=2.**16): Initial scale factor. growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during :meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations. backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during :meth:`update` if inf/NaN gradients occur in an iteration. growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients that must occur for the scale to be multiplied by ``growth_factor``. enabled (bool, optional, default=True): If ``False``, disables gradient scaling. :meth:`step` simply invokes the underlying ``optimizer.step()``, and other methods become no-ops. """ def __init__(self, init_scale=2.0**16, growth_factor=2.0, backoff_factor=0.5, growth_interval=2000, enabled=True): if enabled and not torch.cuda.is_available(): warnings.warn("torch.cuda.amp.GradScaler is enabled, but CUDA is not available. Disabling.") self._enabled = False else: self._enabled = enabled # check version torch_version = version.parse(torch.__version__) assert torch_version.major == 1 if torch_version.minor > 8: self._higher_than_torch18 = True else: self._higher_than_torch18 = False if self._enabled: assert growth_factor > 1.0, "The growth factor must be > 1.0." assert backoff_factor < 1.0, "The backoff factor must be < 1.0." self._init_scale = init_scale # self._scale will be lazily initialized during the first call to scale() self._scale = None self._growth_factor = growth_factor self._backoff_factor = backoff_factor self._growth_interval = growth_interval self._init_growth_tracker = 0 # self._growth_tracker will be lazily initialized during the first call to scale() self._growth_tracker = None self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state) def _check_scale_growth_tracker(self, funcname) -> Tuple[torch.Tensor, torch.Tensor]: fix = "This may indicate your script did not use scaler.scale(loss or outputs) earlier in the iteration." assert self._scale is not None, "Attempted {} but _scale is None. ".format(funcname) + fix assert self._growth_tracker is not None, "Attempted {} but _growth_tracker is None. ".format(funcname) + fix return (self._scale, self._growth_tracker) def _lazy_init_scale_growth_tracker(self, dev): assert self._growth_tracker is None, "_growth_tracker initialized before _scale" self._scale = torch.full((1,), self._init_scale, dtype=torch.float32, device=dev) self._growth_tracker = torch.full((1,), self._init_growth_tracker, dtype=torch.int32, device=dev) def scale(self, outputs): """ Multiplies ('scales') a tensor or list of tensors by the scale factor. Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned unmodified. Args: outputs (Tensor or iterable of Tensors): Outputs to scale. """ if not self._enabled: return outputs # Short-circuit for the common case. if isinstance(outputs, torch.Tensor): assert outputs.is_cuda or outputs.device.type == "xla" if self._scale is None: self._lazy_init_scale_growth_tracker(outputs.device) assert self._scale is not None return outputs * self._scale.to(device=outputs.device, non_blocking=True) # Invoke the more complex machinery only if we're treating multiple outputs. # holds a reference that can be overwritten by apply_scale stash: List[_MultiDeviceReplicator] = [] def apply_scale(val): if isinstance(val, torch.Tensor): assert val.is_cuda or val.device.type == "xla" if len(stash) == 0: if self._scale is None: self._lazy_init_scale_growth_tracker(val.device) assert self._scale is not None stash.append(_MultiDeviceReplicator(self._scale)) return val * stash[0].get(val.device) elif isinstance(val, abc.Iterable): iterable = map(apply_scale, val) if isinstance(val, list) or isinstance(val, tuple): return type(val)(iterable) else: return iterable else: raise ValueError("outputs must be a Tensor or an iterable of Tensors") return apply_scale(outputs) def _unscale_grads_(self, optimizer, inv_scale, found_inf, allow_fp16): per_device_inv_scale = _MultiDeviceReplicator(inv_scale) per_device_found_inf = _MultiDeviceReplicator(found_inf) # To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype. # There could be hundreds of grads, so we'd like to iterate through them just once. # However, we don't know their devices or dtypes in advance. # https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict # Google says mypy struggles with defaultdicts type annotations. per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list)) # type: ignore[var-annotated] with torch.no_grad(): for group in optimizer.param_groups: for param in group["params"]: if param.grad is None: continue if (not allow_fp16) and param.grad.dtype == torch.float16: raise ValueError("Attempting to unscale FP16 gradients.") if param.grad.is_sparse: # is_coalesced() == False means the sparse grad has values with duplicate indices. # coalesce() deduplicates indices and adds all values that have the same index. # For scaled fp16 values, there's a good chance coalescing will cause overflow, # so we should check the coalesced _values(). if param.grad.dtype is torch.float16: param.grad = param.grad.coalesce() to_unscale = param.grad._values() else: to_unscale = param.grad # TODO: is there a way to split by device and dtype without appending in the inner loop? per_device_and_dtype_grads[to_unscale.device][to_unscale.dtype].append(to_unscale) for device, per_dtype_grads in per_device_and_dtype_grads.items(): for grads in per_dtype_grads.values(): torch._amp_foreach_non_finite_check_and_unscale_( grads, per_device_found_inf.get(device), per_device_inv_scale.get(device) ) # For tensor parallel parameters it should be all-reduced over tensor parallel process group if gpc.is_initialized(ParallelMode.MODEL) and gpc.get_world_size(ParallelMode.MODEL) > 1: vals = [val for val in per_device_found_inf._per_device_tensors.values()] coalesced = _flatten_dense_tensors(vals) dist.all_reduce(coalesced, op=dist.ReduceOp.MAX, group=gpc.get_group(ParallelMode.MODEL)) for buf, synced in zip(vals, _unflatten_dense_tensors(coalesced, vals)): buf.copy_(synced) return per_device_found_inf._per_device_tensors def unscale_(self, optimizer): """ Divides ("unscales") the optimizer's gradient tensors by the scale factor. :meth:`unscale_` is optional, serving cases where you need to :ref:`modify or inspect gradients<working-with-unscaled-gradients>` between the backward pass(es) and :meth:`step`. If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`. Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients:: ... scaler.scale(loss).backward() scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) scaler.step(optimizer) scaler.update() Args: optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled. .. note:: :meth:`unscale_` does not incur a CPU-GPU sync. .. warning:: :meth:`unscale_` should only be called once per optimizer per :meth:`step` call, and only after all gradients for that optimizer's assigned parameters have been accumulated. Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError. .. warning:: :meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute. """ if not self._enabled: return self._check_scale_growth_tracker("unscale_") optimizer_state = self._per_optimizer_states[id(optimizer)] if optimizer_state["stage"] is OptState.UNSCALED: raise RuntimeError("unscale_() has already been called on this optimizer since the last update().") elif optimizer_state["stage"] is OptState.STEPPED: raise RuntimeError("unscale_() is being called after step().") # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64. assert self._scale is not None inv_scale = self._scale.double().reciprocal().float() found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=self._scale.device) optimizer_state["found_inf_per_device"] = self._unscale_grads_(optimizer, inv_scale, found_inf, False) optimizer_state["stage"] = OptState.UNSCALED def _maybe_opt_step(self, optimizer, optimizer_state, *args, **kwargs): retval = None if not sum(v.item() for v in optimizer_state["found_inf_per_device"].values()): retval = optimizer.step(*args, **kwargs) return retval def step(self, optimizer, *args, **kwargs): """ :meth:`step` carries out the following two operations: 1. Internally invokes ``unscale_(optimizer)`` (unless :meth:`unscale_` was explicitly called for ``optimizer`` earlier in the iteration). As part of the :meth:`unscale_`, gradients are checked for infs/NaNs. 2. If no inf/NaN gradients are found, invokes ``optimizer.step()`` using the unscaled gradients. Otherwise, ``optimizer.step()`` is skipped to avoid corrupting the params. ``*args`` and ``**kwargs`` are forwarded to ``optimizer.step()``. Returns the return value of ``optimizer.step(*args, **kwargs)``. Args: optimizer (torch.optim.Optimizer): Optimizer that applies the gradients. args: Any arguments. kwargs: Any keyword arguments. .. warning:: Closure use is not currently supported. """ if not self._enabled: return optimizer.step(*args, **kwargs) if "closure" in kwargs: raise RuntimeError("Closure use is not currently supported if GradScaler is enabled.") self._check_scale_growth_tracker("step") optimizer_state = self._per_optimizer_states[id(optimizer)] if optimizer_state["stage"] is OptState.STEPPED: raise RuntimeError("step() has already been called since the last update().") retval = None if hasattr(optimizer, "_step_supports_amp_scaling") and optimizer._step_supports_amp_scaling: # This optimizer has customized scale-handling logic, so we can call optimizer.step() directly. # The contract with custom optimizers is that their step() should accept an additional, # optional grad_scaler kwarg. We append self to the kwargs so the custom optimizer has full information: # it can query its own state, invoke unscale_ on itself, etc retval = optimizer.step(*args, **dict(kwargs, grad_scaler=self)) optimizer_state["stage"] = OptState.STEPPED return retval if optimizer_state["stage"] is OptState.READY: self.unscale_(optimizer) assert len(optimizer_state["found_inf_per_device"]) > 0, "No inf checks were recorded for this optimizer." retval = self._maybe_opt_step(optimizer, optimizer_state, *args, **kwargs) optimizer_state["stage"] = OptState.STEPPED return retval def update(self, new_scale=None): """ Updates the scale factor. If any optimizer steps were skipped the scale is multiplied by ``backoff_factor`` to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively, the scale is multiplied by ``growth_factor`` to increase it. Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not used directly, it's used to fill GradScaler's internal scale tensor. So if ``new_scale`` was a tensor, later in-place changes to that tensor will not further affect the scale GradScaler uses internally.) Args: new_scale (float or :class:`torch.cuda.FloatTensor`, optional, default=None): New scale factor. .. warning:: :meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has been invoked for all optimizers used this iteration. """ if not self._enabled: return _scale, _growth_tracker = self._check_scale_growth_tracker("update") if new_scale is not None: # Accept a new user-defined scale. if isinstance(new_scale, float): self._scale.fill_(new_scale) # type: ignore[union-attr] else: reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor with requires_grad=False." # type: ignore[attr-defined] assert isinstance(new_scale, torch.cuda.FloatTensor), reason assert new_scale.numel() == 1, reason assert new_scale.requires_grad is False, reason self._scale.copy_(new_scale) # type: ignore[union-attr] else: # Consume shared inf/nan data collected from optimizers to update the scale. # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous. found_infs = [ found_inf.to(device=_scale.device, non_blocking=True) for state in self._per_optimizer_states.values() for found_inf in state["found_inf_per_device"].values() ] assert len(found_infs) > 0, "No inf checks were recorded prior to update." found_inf_combined = found_infs[0] if len(found_infs) > 1: for i in range(1, len(found_infs)): found_inf_combined += found_infs[i] if self._higher_than_torch18: torch._amp_update_scale_( _scale, _growth_tracker, found_inf_combined, self._growth_factor, self._backoff_factor, self._growth_interval, ) else: self._scale = torch._amp_update_scale( _growth_tracker, _scale, found_inf_combined, self._growth_factor, self._backoff_factor, self._growth_interval, ) # To prepare for next iteration, clear the data collected from optimizers this iteration. self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state) def _get_scale_async(self): return self._scale def get_scale(self): """ Returns a Python float containing the current scale, or 1.0 if scaling is disabled. .. warning:: :meth:`get_scale` incurs a CPU-GPU sync. """ if self._enabled: return self._init_scale if self._scale is None else self._get_scale_async().item() else: return 1.0 def get_growth_factor(self): r""" Returns a Python float containing the scale growth factor. """ return self._growth_factor def set_growth_factor(self, new_factor): r""" Args: new_scale (float): Value to use as the new scale growth factor. """ self._growth_factor = new_factor def get_backoff_factor(self): r""" Returns a Python float containing the scale backoff factor. """ return self._backoff_factor def set_backoff_factor(self, new_factor): r""" Args: new_scale (float): Value to use as the new scale backoff factor. """ self._backoff_factor = new_factor def get_growth_interval(self): r""" Returns a Python int containing the growth interval. """ return self._growth_interval def set_growth_interval(self, new_interval): r""" Args: new_interval (int): Value to use as the new growth interval. """ self._growth_interval = new_interval def _get_growth_tracker(self): if self._enabled: return self._init_growth_tracker if self._growth_tracker is None else self._growth_tracker.item() else: return 0 def is_enabled(self): r""" Returns a bool indicating whether this instance is enabled. """ return self._enabled def state_dict(self): r""" Returns the state of the scaler as a :class:`dict`. It contains five entries: * ``"scale"`` - a Python float containing the current scale * ``"growth_factor"`` - a Python float containing the current growth factor * ``"backoff_factor"`` - a Python float containing the current backoff factor * ``"growth_interval"`` - a Python int containing the current growth interval * ``"_growth_tracker"`` - a Python int containing the number of recent consecutive unskipped steps. If this instance is not enabled, returns an empty dict. .. note:: If you wish to checkpoint the scaler's state after a particular iteration, :meth:`state_dict` should be called after :meth:`update`. """ return ( { "scale": self.get_scale(), "growth_factor": self._growth_factor, "backoff_factor": self._backoff_factor, "growth_interval": self._growth_interval, "_growth_tracker": self._get_growth_tracker(), } if self._enabled else {} ) def load_state_dict(self, state_dict): r""" Loads the scaler state. If this instance is disabled, :meth:`load_state_dict` is a no-op. Args: state_dict(dict): scaler state. Should be an object returned from a call to :meth:`state_dict`. """ if not self._enabled: return if len(state_dict) == 0: raise RuntimeError( "The source state dict is empty, possibly because it was saved " "from a disabled instance of GradScaler." ) self._init_scale = state_dict["scale"] if self._scale is not None: self._scale.fill_(state_dict["scale"]) self._growth_factor = state_dict["growth_factor"] self._backoff_factor = state_dict["backoff_factor"] self._growth_interval = state_dict["growth_interval"] self._init_growth_tracker = state_dict["_growth_tracker"] if self._growth_tracker is not None: self._growth_tracker.fill_(state_dict["_growth_tracker"]) def __getstate__(self): state = self.__dict__.copy() if self._enabled: assert len(self._per_optimizer_states) == 0, ( "A GradScaler instance may only be pickled at the beginning " "of an iteration, or at the end after scaler.update()." ) # Pickling _scale and _growth_tracker Tensors directly triggers # "warnings.warn("pickle support for Storage will be removed in 1.5..." # so instead, we set the unpickled instance up to reinitialize them lazily. state["_init_scale"] = self.get_scale() state["_init_growth_tracker"] = self._get_growth_tracker() state["_scale"] = None state["_growth_tracker"] = None return state def __setstate__(self, state): self.__dict__.update(state) def _check_inf_per_device(self, optimizer): _scale, _ = self._check_scale_growth_tracker("_check_inf_per_device") dummy_inv_scale = torch.full((1,), 1.0, dtype=torch.float32, device=_scale.device) found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=_scale.device) self._per_optimizer_states[id(optimizer)]["found_inf_per_device"] = self._unscale_grads_( optimizer, dummy_inv_scale, found_inf, True ) return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"] def _found_inf_per_device(self, optimizer): return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/apex_amp/__init__.py
colossalai/legacy/amp/apex_amp/__init__.py
import torch.nn as nn from torch.optim import Optimizer from .apex_amp import ApexAMPOptimizer def convert_to_apex_amp(model: nn.Module, optimizer: Optimizer, amp_config): r"""A helper function to wrap training components with Apex AMP modules Args: model (:class:`torch.nn.Module`): your model object. optimizer (:class:`torch.optim.Optimizer`): your optimizer object. amp_config (Union[:class:`colossalai.context.Config`, dict]): configuration for initializing apex_amp. Returns: Tuple: A tuple (model, optimizer). The ``amp_config`` should include parameters below: :: enabled (bool, optional, default=True) opt_level (str, optional, default="O1") cast_model_type (``torch.dtype``, optional, default=None) patch_torch_functions (bool, optional, default=None) keep_batchnorm_fp32 (bool or str, optional, default=None master_weights (bool, optional, default=None) loss_scale (float or str, optional, default=None) cast_model_outputs (torch.dtype, optional, default=None) num_losses (int, optional, default=1) verbosity (int, default=1) min_loss_scale (float, default=None) max_loss_scale (float, default=2.**24) More details about ``amp_config`` refer to `amp_config <https://nvidia.github.io/apex/amp.html?highlight=apex%20amp>`_. """ import apex.amp as apex_amp model, optimizer = apex_amp.initialize(model, optimizer, **amp_config) optimizer = ApexAMPOptimizer(optimizer) return model, optimizer __all__ = ["convert_to_apex_amp", "ApexAMPOptimizer"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/apex_amp/apex_amp.py
colossalai/legacy/amp/apex_amp/apex_amp.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch.nn as nn try: import apex.amp as apex_amp except ImportError: pass from torch import Tensor from colossalai.interface import OptimizerWrapper from colossalai.legacy.utils import clip_grad_norm_fp32 class ApexAMPOptimizer(OptimizerWrapper): """A wrapper class for APEX optimizer and it implements apex-specific backward and clip_grad_norm methods """ def backward(self, loss: Tensor): """Backward pass to get all gradients Args: loss (torch.Tensor): Loss computed by a loss function """ with apex_amp.scale_loss(loss, self.optim) as scaled_loss: scaled_loss.backward() def clip_grad_norm(self, model: nn.Module, max_norm: float): """Clip gradients by norm Args: model (torch.nn.Module): Your model object max_norm (float): The max norm value for gradient clipping """ if max_norm > 0: clip_grad_norm_fp32(apex_amp.master_params(self.optim), max_norm)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/naive_amp/naive_amp.py
colossalai/legacy/amp/naive_amp/naive_amp.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Any import torch import torch.distributed as dist import torch.nn as nn from torch import Tensor from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from torch.distributed import ReduceOp from torch.optim import Optimizer from colossalai.interface import OptimizerWrapper from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc from ._fp16_optimizer import FP16Optimizer class NaiveAMPOptimizer(OptimizerWrapper): """A wrapper class for optimizer to cast all parameters to fp16 Args: optim (torch.optim.Optimizer): A normal optimizer like Adam or SGD. grad_scaler (BaseGradScaler): grad scaler for gradient chose in ``constant_grad_scaler`` or ``dynamic_grad_scaler``. clip_grad_norm (float, optional): clip gradients with this global L2 norm. Default 0. verbose (bool, optional): if set to `True`, will print debug info. Default False. Note: clipping is ignored if ``clip_grad_norm`` equals 0. """ def __init__(self, optim: Optimizer, *args, **kwargs): optim = FP16Optimizer(optim, *args, **kwargs) super().__init__(optim) def backward(self, loss: Tensor): self.optim.backward(loss) def step(self): return self.optim.step() def clip_grad_norm(self, model: nn.Module, max_norm: float): if self.optim.max_norm == max_norm: return raise RuntimeError( "NaiveAMP optimizer has clipped gradients during optimizer.step(). " "If you have supplied clip_grad_norm in the amp_config, " "executing the method clip_grad_norm is not allowed." ) class NaiveAMPModel(nn.Module): r"""A wrapper class for model to cast the model into fp16 and automatically cast the input and output Args: model (torch.nn.Module): torch.nn.Module to be wrapped. output_to_fp32 (bool, optional): Whether cast output of this module into fp32. (Default: True) parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): Parallel group mode used in this module. (Default: ``ParallelMode.DATA``) sync_buffer (bool, optional): whether to synchronize buffer. (Default: True) Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ def __init__( self, model: nn.Module, output_to_fp32: bool = True, parallel_mode: ParallelMode = ParallelMode.DATA, sync_buffer: bool = True, ): super().__init__() self.model = model.half() self._output_to_fp32 = output_to_fp32 self._sync_buf = sync_buffer if gpc.is_initialized(parallel_mode) and gpc.get_world_size(parallel_mode) > 1: self._process_group = gpc.get_group(parallel_mode) self._world_size = gpc.get_world_size(parallel_mode) else: self._process_group = None self._world_size = 1 self._sync_buf = False self._first_eval_run = False @property def sync_buffer(self): return self._sync_buf @sync_buffer.setter def sync_buffer(self, state: bool): self._sync_buf = state def _convert_to_fp16(self, input_: Any): if isinstance(input_, Tensor) and input_.dtype == torch.float32: input_ = input_.half() return input_ def _convert_to_fp32(self, input_: Any): if isinstance(input_, Tensor) and input_.dtype == torch.float16: input_ = input_.float() return input_ def _reduce_module_buffer(self): """ All-reduce the buffers (e.g. running stats of batch normalization) across data parallel ranks so that all the ranks will produce consistent results when given the same input """ buf_list = [] # find valid buffers for buf in self.model.buffers(): if buf is not None: buf_list.append(buf) # reduce buffers across data parallel ranks if buf_list: coalesced_buf = _flatten_dense_tensors(buf_list) coalesced_buf.div_(self._world_size) dist.all_reduce(coalesced_buf, op=ReduceOp.SUM, group=self._process_group) unflattened_buf_list = _unflatten_dense_tensors(coalesced_buf, buf_list) for old, new in zip(buf_list, unflattened_buf_list): old.copy_(new) def eval(self): self.model.eval() # we only sync buffer in the first eval iteration # so that future eval iterations can be done without communication self._first_eval_run = True def forward(self, *args, **kwargs): # reduce buffers after forward will lead to error # as we cannot change the variables needed for gradient computation after forward # so we sync buffer before forward if (self.training or self._first_eval_run) and self._sync_buf: with torch.no_grad(): self._reduce_module_buffer() if self._first_eval_run: self._first_eval_run = False if args: args = [self._convert_to_fp16(arg) for arg in args] if kwargs: for k, v in kwargs.items(): kwargs[k] = self._convert_to_fp16(v) out = self.model(*args, **kwargs) if self._output_to_fp32: if isinstance(out, Tensor): out = self._convert_to_fp32(out) elif isinstance(out, (tuple, list)): out = [self._convert_to_fp32(val) for val in out] elif isinstance(out, dict): out = {key: self._convert_to_fp32(val) for key, val in out.items()} return out
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/naive_amp/__init__.py
colossalai/legacy/amp/naive_amp/__init__.py
import inspect import torch.nn as nn from torch.optim import Optimizer from colossalai.amp.naive_amp.grad_scaler import ConstantGradScaler, DynamicGradScaler from colossalai.legacy.utils import is_no_pp_or_last_stage from ._fp16_optimizer import FP16Optimizer from .naive_amp import NaiveAMPModel, NaiveAMPOptimizer def convert_to_naive_amp(model: nn.Module, optimizer: Optimizer, amp_config): """A helper function to wrap training components with naive AMP modules. In this mode, we forcibly cast the model weights and inputs to FP16, and cast the model outputs to FP32 to calculate loss, which is equivalent to Apex O3. Args: model (:class:`torch.nn.Module`): your model object optimizer (:class:`torch.optim.Optimizer`): your optimizer object amp_config (:class:`colossalai.context.Config` or dict): configuration for naive mode amp. Returns: Tuple: A tuple (model, optimizer) The ``amp_config`` should contain parameters below:: verbose (bool, optional): if set to `True`, will print debug info (Default: False). clip_grad_norm (float, optional): clip gradients with this global L2 norm (Default 0). Note that clipping is ignored if clip_grad == 0. dynamic_grad_scale (bool): whether to use dynamic grad scaler. """ if isinstance(model, nn.ModuleList): # interleaved pipeline module_list = [] for chunk, m in enumerate(model): output_to_fp32 = is_no_pp_or_last_stage() and chunk == len(model) - 1 module_list.append(NaiveAMPModel(m, output_to_fp32=output_to_fp32)) model = nn.ModuleList(module_list) else: output_to_fp32 = is_no_pp_or_last_stage() model = NaiveAMPModel(model, output_to_fp32=output_to_fp32) use_dynamic_grad_scaler = amp_config.pop("dynamic_grad_scale", True) if use_dynamic_grad_scaler: scaler_class = DynamicGradScaler else: scaler_class = ConstantGradScaler sig = inspect.signature(scaler_class.__init__) kwargs = dict() for param in sig.parameters.values(): if param.name in amp_config: kwargs[param.name] = amp_config.pop(param.name) grad_scaler = scaler_class(**kwargs) optimizer = NaiveAMPOptimizer(optimizer, grad_scaler, **amp_config) return model, optimizer __all__ = ["convert_to_naive_amp", "NaiveAMPOptimizer", "FP16Optimizer"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/naive_amp/_utils.py
colossalai/legacy/amp/naive_amp/_utils.py
from typing import List from torch import Tensor def has_inf_or_nan(tensor): """Check if tensor has inf or nan values. Args: tensor (:class:`torch.Tensor`): a torch tensor object Returns: bool: Whether the tensor has inf or nan. True for yes and False for no. """ try: # if tensor is half, the .float() incurs an additional deep copy, but it's necessary if # Pytorch's .sum() creates a one-element tensor of the same type as tensor # (which is true for some recent version of pytorch). tensor_sum = float(tensor.float().sum()) # More efficient version that can be used if .sum() returns a Python scalar # tensor_sum = float(tensor.sum()) except RuntimeError as instance: # We want to check if inst is actually an overflow exception. # RuntimeError could come from a different error. # If so, we still want the exception to propagate. if "value cannot be converted" not in instance.args[0]: raise return True else: if tensor_sum == float("inf") or tensor_sum == -float("inf") or tensor_sum != tensor_sum: return True return False def zero_gard_by_list(tensor_list: List[Tensor], set_to_none: bool = True) -> None: """Clear the gradient of a list of tensors, Note: copied from torch.optim.optimizer. """ for param in tensor_list: if param.grad is not None: if set_to_none: param.grad = None else: if param.grad.grad_fn is not None: param.grad.detach_() else: param.grad.requires_grad_(False) param.grad.zero_()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/amp/naive_amp/_fp16_optimizer.py
colossalai/legacy/amp/naive_amp/_fp16_optimizer.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch import torch.distributed as dist from torch.distributed import ProcessGroup from torch.optim import Optimizer from colossalai.amp.naive_amp.grad_scaler import BaseGradScaler from colossalai.kernel.kernel_loader import FusedOptimizerLoader from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.utils import clip_grad_norm_fp32, copy_tensor_parallel_attributes from colossalai.logging import get_dist_logger from colossalai.utils import multi_tensor_applier from ._utils import has_inf_or_nan, zero_gard_by_list try: from colossalai._C import fused_optim except: fused_optim = None __all__ = ["FP16Optimizer"] def load_fused_optim(): global fused_optim if fused_optim is None: fused_optim = FusedOptimizerLoader().load() def _multi_tensor_copy_this_to_that(this, that, overflow_buf=None): """ adapted from Megatron-LM (https://github.com/NVIDIA/Megatron-LM) Use multi-tensor-applier to copy values from one list to another. We don't have a blfoat16 implementation so for now if the overflow_buf is not provided, we default back to simple loop copy to be compatible with bfloat16. """ if overflow_buf: overflow_buf.fill_(0) # Scaling with factor `1.0` is equivalent to copy. global fused_optim load_fused_optim() multi_tensor_applier(fused_optim.multi_tensor_scale, overflow_buf, [this, that], 1.0) else: for this_, that_ in zip(this, that): that_.copy_(this_) class FP16Optimizer(Optimizer): """Float16 optimizer for fp16 and bf16 data types. Args: optimizer (torch.optim.Optimizer): base optimizer such as Adam or SGD grad_scaler (BaseGradScaler): grad scaler for gradient chose in ``constant_grad_scaler`` or ``dynamic_grad_scaler``. clip_grad_norm (float, optional): clip gradients with this global L2 norm. Default 0. Note that clipping is ignored if clip_grad == 0 verbose (bool, optional): if set to `True`, will print debug info. Default False. """ def __init__( self, optimizer: Optimizer, grad_scaler: BaseGradScaler, verbose: bool = False, clip_grad_norm=0, dp_process_group: ProcessGroup = None, mp_process_group: ProcessGroup = None, ): # have a defaults for compatibility with pytorch optim self._optimizer = optimizer self._defaults = optimizer.defaults # fp16-related params assert isinstance(grad_scaler, BaseGradScaler) self._grad_scaler = grad_scaler self._found_overflow = torch.cuda.FloatTensor([0.0]) self._dummy_overflow_buf = torch.cuda.IntTensor([0]) # misc params self._clip_grad_max_norm = clip_grad_norm # get process group def _get_process_group(parallel_mode): if gpc.is_initialized(parallel_mode) and gpc.get_world_size(parallel_mode): return gpc.get_group(parallel_mode) else: return None if dp_process_group is None: dp_process_group = _get_process_group(ParallelMode.DATA) if mp_process_group is None: mp_process_group = _get_process_group(ParallelMode.MODEL) self._dp_process_group = dp_process_group self._mp_process_group = mp_process_group # we maintain three groups of parameters # so that the model can have a mixture # of fp16 and fp32 params # fp16_param_groups: the fp16 params of the model # fp32_master_param_groups: the fp32 params cast from the fp16 param of the model # fp32_param_groups: the fp32 params of the model # NOTE: # 1. fp16_param_groups and fp32_master_param_groups have one-to-one correspondence # 2. fp32_param_groups and fp16_param_groups are exclusive of each other self._fp16_param_groups = [] self._fp32_master_param_groups = [] self._fp32_param_groups = [] # For all the groups in the original optimizer: for param_group in self._optimizer.param_groups: fp16_params = [] fp32_master_params = [] fp32_params = [] # For all the parameters in this group: for i, param in enumerate(param_group["params"]): if param.requires_grad: # float16 params: if param.type() in ["torch.cuda.HalfTensor"]: fp16_params.append(param) # Create a fp32 copy fp32_param = param.detach().clone().float() # Copy tensor model parallel attributes. copy_tensor_parallel_attributes(param, fp32_param) # Replace the optimizer params with the new fp32 copy. param_group["params"][i] = fp32_param fp32_master_params.append(fp32_param) # Reset existing state dict key to the new main param. if param in self._optimizer.state: self._optimizer.state[fp32_param] = self._optimizer.state.pop(param) # fp32 params. elif param.type() == "torch.cuda.FloatTensor": fp32_params.append(param) else: raise TypeError( "Expected parameter of type torch.cuda.FloatTensor " f"or torch.cuda.HalfTensor, but got {param.type()}" ) self._fp16_param_groups.append(fp16_params) self._fp32_master_param_groups.append(fp32_master_params) self._fp32_param_groups.append(fp32_params) # Leverage state_dict() and load_state_dict() to # recast preexisting per-param state tensors self._optimizer.load_state_dict(self._optimizer.state_dict()) # log config self._logger = get_dist_logger() if verbose: self._logger.info( f"\n========= FP16 Optimizer Config =========\n" f"Optimizer: {optimizer.__class__.__name__}\n" f"clip_grad_norm = {clip_grad_norm}\n" f"grad_scaler = {self._grad_scaler.__class__.__name__}" f"==========================================", ranks=[0], ) @property def max_norm(self): """Returns the maximum norm of gradient clipping.""" return self._clip_grad_max_norm @property def grad_scaler(self): """Returns the gradient scaler. Returns: :class:`BaseGradScaler`: gradient scaler. """ return self._grad_scaler @property def loss_scale(self): """Returns the loss scale. Returns: int: loss scale. """ return self._grad_scaler.scale @property def optimizer(self): """Returns the optimizer. Returns: :class:`torch.optim.Optimizer`: the optimizer object wrapped. """ return self._optimizer @property def defaults(self): """Returns the default arguments of optimizer. Returns: dict: optimizer arguments saved in defaults of the optimizer wrapped. """ return self._defaults def _check_overflow(self): # clear previous overflow record self._found_overflow.fill_(0.0) # check for overflow for group in self._optimizer.param_groups: for p in group["params"]: if p.grad is not None and has_inf_or_nan(p.grad): self._found_overflow.fill_(1.0) break # all-reduce across dp group if self._dp_process_group: dist.all_reduce(self._found_overflow, op=dist.ReduceOp.MAX, group=self._dp_process_group) # all-reduce over model parallel group if self._mp_process_group: dist.all_reduce(self._found_overflow, op=dist.ReduceOp.MAX, group=self._mp_process_group) return self._found_overflow.item() > 0 def zero_grad(self, set_to_none=True): """Set gradient to zero. Args: set_to_none (bool): Whether set the gradient to None. """ # set_to_none = True can save some memory space for param_group in self._optimizer.param_groups: zero_gard_by_list(param_group["params"], set_to_none=set_to_none) def _get_fp32_param_groups_to_update(self): return self._fp32_master_param_groups + self._fp32_param_groups def _unscale_grads(self): for group in self._get_fp32_param_groups_to_update(): for p in group: if p.grad is not None: p.grad.data.div_(self.loss_scale) def _assign_grad_to_fp32_master_param(self): # This only needs to be done for the float16 group. for fp16_param_group, fp32_master_param_group in zip(self._fp16_param_groups, self._fp32_master_param_groups): for fp16_param, fp32_param in zip(fp16_param_group, fp32_master_param_group): if fp16_param.grad is not None: fp32_param.grad = fp16_param.grad.float() # clear unneeded grad on fp16 param fp16_param.grad = None def _update_fp16_param_from_fp32_param(self): fp16_param_data = [] fp32_master_param_data = [] for fp16_group, fp32_group in zip(self._fp16_param_groups, self._fp32_master_param_groups): for fp16_param, fp32_param in zip(fp16_group, fp32_group): fp16_param_data.append(fp16_param.data) fp32_master_param_data.append(fp32_param.data) _multi_tensor_copy_this_to_that( this=fp32_master_param_data, that=fp16_param_data, overflow_buf=self._dummy_overflow_buf ) def step(self): """Update the model parameters.""" # Copy gradients from model params to main params. self._assign_grad_to_fp32_master_param() self._unscale_grads() overflow = self._check_overflow() self._grad_scaler.update(overflow) if overflow: self.zero_grad() # Clip the main gradients. grad_norm = None if self._clip_grad_max_norm > 0.0: grad_norm = self.clip_grad_norm(self._clip_grad_max_norm) if not overflow: # Step the optimizer. self._optimizer.step() # Update params from main params. self._update_fp16_param_from_fp32_param() # Successful update. return True, grad_norm else: return False, None def backward(self, loss): """Execute backward pass. Args: loss (:class:`torch.Tensor`): the loss value. """ scaled_loss = loss * self.grad_scaler.scale scaled_loss.backward() def state_dict(self): """Returns the states of the fp16 optimizer as a dict object.""" state_dict = {} state_dict["optimizer"] = self._optimizer.state_dict() if self.grad_scaler: state_dict["grad_scaler"] = self.grad_scaler.state_dict() state_dict["fp32_master_param_groups"] = self._fp32_master_param_groups return state_dict def load_state_dict(self, state_dict): """Load the states of the fp16 optimizer from a dict object. Args: state_dict (dict): the states of the fp16 optimizer """ # Optimizer. self._optimizer.load_state_dict(state_dict["optimizer"]) # Grad scaler. if "grad_scaler" in state_dict: self.grad_scaler.load_state_dict(state_dict["grad_scaler"]) # Copy data for the main params. if "fp32_master_param_groups" in state_dict: for current_group, ckpt_group in zip( self._fp32_master_param_groups, state_dict["fp32_master_param_groups"] ): for current_param, ckpt_param in zip(current_group, ckpt_group): current_param.data.copy_(ckpt_param.data) def clip_grad_norm(self, clip_grad): """Clip gradients by norm. Args: clip_grad (float): the max norm for clipping """ params = [] for param_group in self._optimizer.param_groups: for param in param_group["params"]: params.append(param) return clip_grad_norm_fp32(params, clip_grad) # Promote state so it can be retrieved or set via # "optimizer_instance.state" def _get_state(self): return self._optimizer.state def _set_state(self, value): self._optimizer.state = value state = property(_get_state, _set_state) # Promote param_groups so it can be retrieved or set via # "optimizer_instance.param_groups" # (for example, to adjust the learning rate) def _get_param_groups(self): return self._optimizer.param_groups def _set_param_groups(self, value): self._optimizer.param_groups = value param_groups = property(_get_param_groups, _set_param_groups)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/common.py
colossalai/legacy/utils/common.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from collections import defaultdict from contextlib import contextmanager from typing import Dict, List, Optional, Union import torch import torch.distributed as dist from torch import inf from torch.nn.parameter import Parameter from colossalai.legacy.constants import IS_TENSOR_PARALLEL, NUM_PARTITIONS, TENSOR_PARALLEL_ATTRIBUTES from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.global_variables import tensor_parallel_env as env from colossalai.legacy.tensor import ProcessGroup from colossalai.tensor import ColoParameter from colossalai.utils.multi_tensor_apply import multi_tensor_applier try: from colossalai._C import fused_optim except: fused_optim = None def print_rank_0(msg: str, logger=None): """Print messages and save logs(optional). This is executed only if you are the rank-0 gpu. Args: msg (str): A string message to output. logger (:class:`colossalai.logging.DistributedLogger`, optional): The logger to record the message, defaults to None. """ if gpc.get_global_rank() == 0: if logger is None: print(msg, flush=True) else: logger.info(msg) def sync_model_param(model, parallel_mode): r"""Make sure data parameters are consistent during Data Parallel Mode. Args: model (:class:`torch.nn.Module`): A pyTorch model on whose parameters you check the consistency. parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): Parallel mode to be checked. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ if gpc.is_initialized(parallel_mode) and gpc.get_world_size(parallel_mode) > 1: for param in model.parameters(): ranks = gpc.get_ranks_in_group(parallel_mode) dist.broadcast(param, src=ranks[0], group=gpc.get_group(parallel_mode)) def is_dp_rank_0(): return not gpc.is_initialized(ParallelMode.DATA) or gpc.is_first_rank(ParallelMode.DATA) def is_tp_rank_0(): return not gpc.is_initialized(ParallelMode.TENSOR) or gpc.is_first_rank(ParallelMode.TENSOR) def is_no_pp_or_last_stage(): return not gpc.is_initialized(ParallelMode.PIPELINE) or gpc.is_last_rank(ParallelMode.PIPELINE) def is_using_ddp(): return gpc.is_initialized(ParallelMode.DATA) and gpc.get_world_size(ParallelMode.DATA) > 1 def is_using_pp(): return gpc.is_initialized(ParallelMode.PIPELINE) and gpc.get_world_size(ParallelMode.PIPELINE) > 1 def is_using_sequence(): return gpc.is_initialized(ParallelMode.SEQUENCE) and gpc.get_world_size(ParallelMode.SEQUENCE) > 1 class model_branch_context(object): def __enter__(self): self.env_status = env.save() def __exit__(self, *exc_info): env.load(**self.env_status) def is_model_parallel_parameter(p): return hasattr(p, IS_TENSOR_PARALLEL) and getattr(p, IS_TENSOR_PARALLEL) def _calc_l2_norm(grads): # we should not global fused_optim if fused_optim is None: from colossalai.kernel.kernel_loader import FusedOptimizerLoader fused_optim = FusedOptimizerLoader().load() norm = 0.0 if len(grads) > 0: dummy_overflow_buf = torch.cuda.IntTensor([0]) norm, _ = multi_tensor_applier( fused_optim.multi_tensor_l2norm, dummy_overflow_buf, [grads], False # no per-parameter norm ) return norm def _calc_lp(grads, norm_type): norm = 0.0 for grad in grads: grad_norm = torch.norm(grad, norm_type) norm += grad_norm**norm_type return norm def _move_norm_to_cuda(norm: Union[float, torch.Tensor]) -> Union[float, torch.Tensor]: if torch.is_tensor(norm) and norm.device.type != "cuda": norm = norm.to(torch.cuda.current_device()) return norm def _get_tensor_norm(norm: Union[float, torch.Tensor], move_to_cuda) -> torch.Tensor: if isinstance(norm, float): norm = torch.Tensor([norm]) if move_to_cuda: norm = norm.to(torch.cuda.current_device()) return norm # ======== Gradient Clipping ========= def _compute_local_lp(params: List[ColoParameter], norm_type: float) -> float: if len(params) == 0: return 0.0 grads = [p.grad for p in params] use_cuda_kernel = grads[0].device.type == "cuda" if norm_type == inf: local_lp = max([g.abs().max() for g in grads]) elif norm_type == 2.0 and use_cuda_kernel: local_lp = _calc_l2_norm(grads) ** norm_type else: local_lp = _calc_lp(grads, norm_type) if isinstance(local_lp, torch.Tensor): return local_lp.item() return local_lp def _compute_buckets_lp(params: List[ColoParameter], norm_type: float) -> float: if len(params) == 0: return 0.0 buckets: Dict[Optional[ProcessGroup], List[ColoParameter]] = defaultdict(list) for p in params: if p.is_replicate(): buckets[None].append(p) else: buckets[p.get_process_group().tp_process_group()].append(p) total_lp = 0.0 for group, bucket in buckets.items(): local_lp = _compute_local_lp(bucket, norm_type) if group is not None: local_lp_tensor = torch.tensor([local_lp], device=torch.cuda.current_device()) if norm_type == inf: dist.all_reduce(local_lp_tensor, op=dist.ReduceOp.MAX, group=group) else: dist.all_reduce(local_lp_tensor, group=group) local_lp = local_lp_tensor.item() if norm_type == inf: total_lp = max(total_lp, local_lp) else: total_lp += local_lp return total_lp def _compute_pp_grad_lp(total_lp: float, norm_type: float) -> float: if gpc.is_initialized(ParallelMode.PIPELINE) and gpc.get_world_size(ParallelMode.PIPELINE) > 1: total_lp_tensor = torch.tensor([total_lp], device=torch.cuda.current_device()) if norm_type == inf: dist.all_reduce(total_lp_tensor, op=dist.ReduceOp.MAX, group=gpc.get_group(ParallelMode.PIPELINE)) else: dist.all_reduce(total_lp_tensor, group=gpc.get_group(ParallelMode.PIPELINE)) total_lp = total_lp_tensor.item() return total_lp def _compute_grad_lp(parameters, norm_type: float = 2.0) -> float: if isinstance(parameters, torch.Tensor): parameters = [parameters] grad_dtype = None cpu_grad_params: List[ColoParameter] = [] cuda_grad_params: List[ColoParameter] = [] for p in parameters: if p.grad is None: continue assert isinstance(p, ColoParameter) if grad_dtype is None: grad_dtype = p.grad.dtype assert p.grad.dtype == grad_dtype, f"Expected all grads are {grad_dtype}, got {p.grad.dtype}" if p.grad.device.type == "cuda": cuda_grad_params.append(p) else: cpu_grad_params.append(p) norm_type = float(norm_type) cpu_lp = _compute_buckets_lp(cpu_grad_params, norm_type) cuda_lp = _compute_buckets_lp(cuda_grad_params, norm_type) if norm_type == inf: total_lp = max(cpu_lp, cuda_lp) else: total_lp = cpu_lp + cuda_lp return _compute_pp_grad_lp(total_lp, norm_type) def compute_grad_norm(parameters, norm_type: float = 2.0) -> float: norm_type = float(norm_type) total_norm = _compute_grad_lp(parameters, norm_type) if norm_type != inf: total_norm = total_norm ** (1 / norm_type) return total_norm def _clip_grad_norm(parameters, max_norm: float, total_norm: float) -> None: clip_coef = max_norm / (total_norm + 1e-6) if clip_coef < 1.0: cuda_grads: List[torch.Tensor] = [] cpu_grads: List[torch.Tensor] = [] if isinstance(parameters, torch.Tensor): parameters = [parameters] for p in parameters: if p.grad is None: continue if p.grad.device.type == "cuda": cuda_grads.append(p.grad.detach()) else: cpu_grads.append(p.grad.detach()) if len(cuda_grads) > 0: dummy_overflow_buf = torch.cuda.IntTensor([0]) multi_tensor_applier( fused_optim.multi_tensor_scale, dummy_overflow_buf, [cuda_grads, cuda_grads], clip_coef ) for g in cpu_grads: g.mul_(clip_coef) def clip_grad_norm(parameters, max_norm: float, norm_type: float = 2.0) -> float: total_norm = compute_grad_norm(parameters, norm_type) _clip_grad_norm(parameters, max_norm, total_norm) return total_norm def clip_grad_norm_fp32(parameters, max_norm, norm_type=2): """Clips gradient norm of an iterable of parameters whose gradients are in fp32. This is adapted from :func:`torch.nn.utils.clip_grad.clip_grad_norm_` and added functionality to handle model parallel parameters. Note: the gradients are modified in place. Args: parameters (Iterable[:class:`torch.tensor`] or :class:`torch.tensor`): An iterable of Tensors or a single Tensor that will have gradients normalized. max_norm (Union[float, int]): Max norm of the gradients. norm_type (Union[float, int, 'inf']): Type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: float: Total norm of the parameters. """ if isinstance(parameters, torch.Tensor): parameters = [parameters] # Filter parameters based on: # - grad should not be none # - parameter should not be shared # - should not be a replica due to tensor model parallelism params: List[Parameter] = [] has_zero_shared_param: bool = False for param in parameters: if param.grad is not None: # Make sure the grads are in fp32 assert ( param.grad.dtype == torch.float ), f"expected gradient to be dtype torch.float, but got {param.grad.type()}" if hasattr(param, "colo_attr") and param.colo_attr.sharded_data_tensor.is_sharded: has_zero_shared_param = True params.append(param) if len(params) == 0: enable_cuda_kernels = False else: enable_cuda_kernels = params[0].grad.device.type == "cuda" # Norm parameters. max_norm = float(max_norm) norm_type = float(norm_type) # Parameters can be on CPU or CUDA # If parameters are on CPU, disable CUDA kernels # Calculate norm. if norm_type == inf: total_norm = max(p.grad.data.abs().max() for p in params) total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)]) # Take max across all model-parallel GPUs. if gpc.is_initialized(ParallelMode.MODEL) and gpc.get_world_size(ParallelMode.MODEL) > 1: dist.all_reduce( total_norm_cuda, op=dist.ReduceOp.MAX, group=gpc.get_group(ParallelMode.MODEL), async_op=False ) if has_zero_shared_param: dist.all_reduce( total_norm_cuda, op=dist.ReduceOp.MAX, group=gpc.get_group(ParallelMode.DATA), async_op=False ) total_norm = total_norm_cuda[0].item() else: tensor_parallel_grads = [] no_tensor_parallel_grads = [] zero_sharded_grads = [] for p in params: if is_model_parallel_parameter(p): reductor = (gpc.get_world_size(ParallelMode.TENSOR) / getattr(p, NUM_PARTITIONS)) ** (1 / norm_type) tensor_parallel_grads.append(p.grad.data / reductor) elif hasattr(p, "colo_attr") and p.colo_attr.sharded_data_tensor.is_sharded: zero_sharded_grads.append(p.grad.data) else: no_tensor_parallel_grads.append(p.grad.data) if norm_type == 2.0 and enable_cuda_kernels: tensor_parallel_norm = _calc_l2_norm(tensor_parallel_grads) ** norm_type no_tensor_parallel_norm = _calc_l2_norm(no_tensor_parallel_grads) ** norm_type zero_sharded_norm = _calc_l2_norm(zero_sharded_grads) ** norm_type else: tensor_parallel_norm = _calc_lp(tensor_parallel_grads, norm_type) no_tensor_parallel_norm = _calc_lp(no_tensor_parallel_grads, norm_type) zero_sharded_norm = _calc_lp(zero_sharded_grads, norm_type) # If norm is type of float, then we convert them into torch.Tensor. tensor_parallel_norm = _get_tensor_norm(tensor_parallel_norm, enable_cuda_kernels) no_tensor_parallel_norm = _get_tensor_norm(no_tensor_parallel_norm, enable_cuda_kernels) zero_sharded_norm = _get_tensor_norm(zero_sharded_norm, enable_cuda_kernels) # If grads are on CPU, the norms is also on CPU. Cast them to CUDA tensors if not enable_cuda_kernels: tensor_parallel_norm = _move_norm_to_cuda(tensor_parallel_norm) no_tensor_parallel_norm = _move_norm_to_cuda(no_tensor_parallel_norm) zero_sharded_norm = _move_norm_to_cuda(zero_sharded_norm) # Sum across all model-parallel GPUs. if gpc.is_initialized(ParallelMode.TENSOR) and len(tensor_parallel_grads) > 0: dist.all_reduce(tensor_parallel_norm, op=dist.ReduceOp.SUM, group=gpc.get_group(ParallelMode.TENSOR)) # Sum across all zero sharded GPUs if len(zero_sharded_grads) > 0: dist.all_reduce(zero_sharded_norm, group=gpc.get_group(ParallelMode.DATA)) no_tensor_parallel_norm += zero_sharded_norm total_norm = tensor_parallel_norm + no_tensor_parallel_norm if gpc.is_initialized(ParallelMode.PIPELINE) and gpc.get_world_size(ParallelMode.PIPELINE) > 1: dist.all_reduce(total_norm, op=dist.ReduceOp.SUM, group=gpc.get_group(ParallelMode.PIPELINE)) total_norm = total_norm ** (1.0 / norm_type) if torch.is_tensor(total_norm): total_norm = total_norm.item() # Scale. clip_coeff = max_norm / (total_norm + 1.0e-6) if clip_coeff < 1.0: if enable_cuda_kernels: grads = [p.grad.detach() for p in params] dummy_overflow_buf = torch.cuda.IntTensor([0]) multi_tensor_applier(fused_optim.multi_tensor_scale, dummy_overflow_buf, [grads, grads], clip_coeff) else: for p in params: p.grad.detach().mul_(clip_coeff) return total_norm def count_zeros_fp32(parameters): if isinstance(parameters, torch.Tensor): parameters = [parameters] # Filter parameters based on: # - grad should not be none # - parameter should not be shared # - should not be a replica due to tensor model parallelism total_num_zeros = 0.0 for param in parameters: grad_not_none = param.grad is not None is_not_tp_duplicate = param_is_not_tensor_parallel_duplicate(param) if grad_not_none and is_not_tp_duplicate: grad = param.grad.detach() num_zeros = grad.numel() - torch.count_nonzero(grad) total_num_zeros = num_zeros + total_num_zeros total_num_zeros = torch.IntTensor([int(total_num_zeros)]).cuda() # Sum across all model-parallel GPUs. ops = [] ops.append( dist.all_reduce(total_num_zeros, op=dist.ReduceOp.SUM, group=gpc.get_group(ParallelMode.TENSOR), async_op=True) ) if gpc.is_initialized(ParallelMode.PIPELINE): ops.append( dist.all_reduce( total_num_zeros, op=dist.ReduceOp.SUM, group=gpc.get_group(ParallelMode.PIPELINE), async_op=True ) ) for req in ops: req.wait() total_num_zeros = total_num_zeros.item() return total_num_zeros def copy_tensor_parallel_attributes(src_tensor, dst_tensor): for attr in TENSOR_PARALLEL_ATTRIBUTES: if hasattr(src_tensor, attr): val = getattr(src_tensor, attr) setattr(dst_tensor, attr, val) def param_is_not_tensor_parallel_duplicate(param): return (hasattr(param, IS_TENSOR_PARALLEL) and getattr(param, IS_TENSOR_PARALLEL)) or ( gpc.get_local_rank(ParallelMode.TENSOR) == 0 ) @contextmanager def switch_virtual_pipeline_parallel_rank(rank): prev_rank = gpc.virtual_pipeline_parallel_rank try: gpc.set_virtual_pipeline_parallel_rank(rank) yield finally: gpc.set_virtual_pipeline_parallel_rank(prev_rank)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/checkpointing.py
colossalai/legacy/utils/checkpointing.py
from collections import OrderedDict from itertools import chain import torch import torch.distributed as dist from colossalai.legacy.constants import IS_TENSOR_PARALLEL from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc try: from torch.nn.modules.module import _EXTRA_STATE_KEY_SUFFIX except ImportError: _EXTRA_STATE_KEY_SUFFIX = "_extra_state" from .common import is_using_pp __all__ = ["save_checkpoint", "load_checkpoint"] def broadcast_state_dict(state_dict, parallel_mode): state_dict = [state_dict.copy() if isinstance(state_dict, dict) else state_dict] src_rank = gpc.get_ranks_in_group(parallel_mode)[0] dist.broadcast_object_list(state_dict, src=src_rank, group=gpc.get_cpu_group(parallel_mode)) return state_dict[0] def partition_tensor_parallel_state_dict( state_dict: OrderedDict, parallel_mode: ParallelMode, dims: dict = dict(), partition_states: dict = dict() ): src_rank = gpc.get_ranks_in_group(parallel_mode)[0] depth = gpc.get_world_size(parallel_mode) group = gpc.get_cpu_group(parallel_mode) is_rank0 = gpc.get_local_rank(parallel_mode) == 0 partition_info = [None] if is_rank0: partition_info_dict = OrderedDict() for key, param in state_dict.items(): dim = dims[key] is_partitioned = partition_states[key] shape = list(param.shape) if is_partitioned: shape[dim] = shape[dim] // depth partition_info_dict[key] = (is_partitioned, param.dtype, shape, dim) partition_info[0] = partition_info_dict dist.broadcast_object_list(partition_info, src_rank, group=group) partitioned_state = OrderedDict() for key, (is_partitioned, dtype, shape, dim) in partition_info[0].items(): if is_partitioned: output = torch.empty(shape, dtype=dtype) if is_rank0: scatter_list = [t.contiguous() for t in state_dict[key].chunk(depth, dim)] else: scatter_list = None dist.scatter(output, scatter_list, src_rank, group=group) else: if is_rank0: output = state_dict[key] else: output = torch.empty(shape, dtype=dtype) dist.broadcast(output, src_rank, group=group) partitioned_state[key] = output return partitioned_state def gather_tensor_parallel_state_dict( state_dict: OrderedDict, parallel_mode: ParallelMode, dims: dict = dict(), partition_states: dict = dict(), keep_vars: bool = False, ): dst_rank = gpc.get_ranks_in_group(parallel_mode)[0] depth = gpc.get_world_size(parallel_mode) for key in list(state_dict.keys()): param = state_dict.pop(key) param = param if keep_vars else param.detach() dim = dims.get(key, 0) do_partition = partition_states.get(key, True) if do_partition: temp = param.transpose(0, dim).contiguous() gather_list = None if gpc.get_local_rank(parallel_mode) == 0: shape = list(param.shape) shape[0], shape[dim] = shape[dim], shape[0] shape[0] *= depth param = torch.empty(shape, dtype=param.dtype, device=param.device) gather_list = list(torch.chunk(param, depth, dim=0)) dist.gather(temp, gather_list, dst=dst_rank, group=gpc.get_cpu_group(parallel_mode)) param = torch.transpose(param, 0, dim) # update params in state_dict only on local rank 0 if gpc.get_local_rank(parallel_mode) == 0: state_dict[key] = param return state_dict def _send_state_dict(state_dict, dst, parallel_mode): state_tensor, state_size = dist.distributed_c10d._object_to_tensor(state_dict) dist.send(state_size, dst, group=gpc.get_cpu_group(parallel_mode)) dist.send(state_tensor, dst, group=gpc.get_cpu_group(parallel_mode)) def _recv_state_dict(src, parallel_mode): state_size = torch.tensor([0], dtype=torch.long) dist.recv(state_size, src, group=gpc.get_cpu_group(parallel_mode)) state_tensor = torch.empty(state_size.item(), dtype=torch.uint8) dist.recv(state_tensor, src, group=gpc.get_cpu_group(parallel_mode)) state_dict = dist.distributed_c10d._tensor_to_object(state_tensor, state_size) return state_dict def partition_pipeline_parallel_state_dict(model, state_dict): pipeline_state = OrderedDict() if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # receive all states from prev stage if not gpc.is_first_rank(ParallelMode.PIPELINE): state_dict = _recv_state_dict(gpc.get_prev_global_rank(ParallelMode.PIPELINE), ParallelMode.PIPELINE) # move states to output for name, _ in model.named_parameters(recurse=True): if name in state_dict: pipeline_state[name] = state_dict.pop(name) for name, _ in model.named_buffers(recurse=True): if name in state_dict: pipeline_state[name] = state_dict.pop(name) for name, _ in model.named_modules(): extra_state_key = name + "." + _EXTRA_STATE_KEY_SUFFIX if extra_state_key in state_dict: pipeline_state[extra_state_key] = state_dict.pop(extra_state_key) # send rest states to next stage if not gpc.is_last_rank(ParallelMode.PIPELINE): _send_state_dict(state_dict, gpc.get_next_global_rank(ParallelMode.PIPELINE), ParallelMode.PIPELINE) return pipeline_state def gather_pipeline_parallel_state_dict(state_dict): gathered_states = ( [None for _ in range(gpc.get_world_size(ParallelMode.PIPELINE))] if gpc.get_local_rank(ParallelMode.PIPELINE) == 0 else None ) dist.gather_object( state_dict, gathered_states, dst=gpc.get_ranks_in_group(ParallelMode.PIPELINE)[0], group=gpc.get_cpu_group(ParallelMode.PIPELINE), ) state_dict = ( OrderedDict(chain.from_iterable(state.items() for state in gathered_states)) if gpc.get_local_rank(ParallelMode.PIPELINE) == 0 else OrderedDict() ) return state_dict def save_checkpoint( file, epoch: int, model: torch.nn.Module, optimizer: torch.optim.Optimizer = None, lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None, **kwargs, ): """Stores the checkpoint to disk. Saves all the training components' parameters or buffers, such as model, optimizer, lr_scheduler etc. into a checkpoint dictionary. Args: file: a file-like object (has to implement write and flush) or a string or os.PathLike object containing a file name. epoch (int): Epoch number (indicates how many epochs have you trained this model). model (:class:`torch.nn.Module`): Model to be saved. optimizer (Union[:class:`torch.optim.Optimizer`, :class:`colossalai.nn.optimizer`]): Optimizer to be saved. lr_scheduler (Union[:class:`torch.optim.lr_scheduler`, :class:`colossalai.nn.lr_scheduler`], optional): lr_scheduler to be saved, defaults to None. pickle_module: module used for pickling metadata and objects pickle_protocol: can be specified to override the default protocol """ # ckpt container checkpoint = {"epoch": epoch} model_state = model.state_dict() if is_using_pp() and gpc.get_local_rank(ParallelMode.TENSOR) == 0: model_state = gather_pipeline_parallel_state_dict(model_state) if gpc.get_global_rank() == 0: checkpoint["model"] = model_state # if optimizer is not None: # checkpoint['optimizer'] = optimizer.state_dict() # if lr_scheduler is not None: # checkpoint['lr_scheduler'] = lr_scheduler.state_dict() torch.save(checkpoint, file, **kwargs) def broadcast_model(model: torch.nn.Module): src_rank = gpc.get_ranks_in_group(ParallelMode.TENSOR)[0] for p in model.parameters(): if not getattr(p, IS_TENSOR_PARALLEL, False) and p.storage().size() > 0: group = ( gpc.get_group(ParallelMode.TENSOR) if p.device.type == "cuda" else gpc.get_cpu_group(ParallelMode.TENSOR) ) dist.broadcast(p, src_rank, group=group) def load_checkpoint( file, model: torch.nn.Module, optimizer: torch.optim.Optimizer = None, lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None, strict: bool = True, ): """Loads training states from a checkpoint file. Args: file: a file-like object (has to implement read(), readline(), tell(), and seek()), or a string or os.PathLike object containing a file name. model (:class:`torch.nn.Module`): Model to load saved weights and buffers. optimizer (Union[:class:`torch.optim.Optimizer`, :class:`colossalai.nn.optimizer`]): Optimizer to recuperate. lr_scheduler (:class:`torch.optim.lr_scheduler._LRScheduler`, optional): lr_scheduler to recuperate, defaults to None. strict (bool, optional): Whether to strictly enforce that the keys in :attr:`state_dict` of the checkpoint match the names of parameters and buffers in model, defaults to True. Returns: int: The saved epoch number. Raises: RuntimeError: Raise error if the model/optimizer cannot successfully be recuperated """ state_dict = ( torch.load(file, map_location=torch.device("cpu")) if gpc.get_local_rank(ParallelMode.MODEL) == 0 else None ) # model states model_state = state_dict.pop("model") if state_dict is not None else dict() # pipeline if is_using_pp(): model_state = partition_pipeline_parallel_state_dict(model, model_state) try: model.load_state_dict(model_state, strict=strict) broadcast_model(model) except RuntimeError as e: error_msgs = str(e) if error_msgs.startswith("Error(s) in loading state_dict for "): error_msgs = error_msgs.split("\n\t")[1:] dst_rank = gpc.get_ranks_in_group(ParallelMode.MODEL)[0] all_error_msgs = [None for _ in range(gpc.get_world_size(ParallelMode.MODEL))] dist.gather_object(error_msgs, all_error_msgs, dst=dst_rank, group=gpc.get_cpu_group(ParallelMode.MODEL)) if gpc.get_global_rank() == 0: all_error_msgs = list(chain.from_iterable(all_error_msgs)) raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format( model.__class__.__name__, "\n\t".join(all_error_msgs) ) ) else: raise e # broadcast the rest states state_dict = broadcast_state_dict(state_dict, ParallelMode.MODEL) # # optimizer states # if optimizer is not None and 'optimizer' in state_dict: # optimizer.load_state_dict(state_dict['optimizer']) # # lr scheduler states # if lr_scheduler is not None and 'lr_scheduler' in state_dict: # lr_scheduler.load_state_dict(state_dict['lr_scheduler']) # last epoch last_epoch = state_dict.pop("epoch", -1) return last_epoch
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/memory.py
colossalai/legacy/utils/memory.py
import gc from collections import namedtuple import psutil import torch import torch.distributed as dist from packaging import version from colossalai.accelerator import get_accelerator from colossalai.legacy.core import global_context as gpc from colossalai.logging import get_dist_logger _GLOBAL_CUDA_MEM_FRACTION = 1.0 _GLOBAL_CPU_MEM_CAPACITY = -1 def _bytes_to_MB(val, decimal=2): """A byte-to-Megabyte converter, default using binary notation. :param val: X bytes to convert :return: X' MB """ return round(val / (1024 * 1024), decimal) # copy from PatrickStar def _get_cpu_memory_info(): ps_mem_info = namedtuple("ps_mem_info", ["total", "free", "cached", "buffers", "used"]) try: # psutil reads the memory info from /proc/memory_info, # which results in returning the host memory instead of # that of container. # Here we try to read the container memory with method in: # https://stackoverflow.com/a/46213331/5163915 mems = {} with open("/sys/fs/cgroup/memory/memory.meminfo", "rb") as f: for line in f: fields = line.split() mems[fields[0]] = int(fields[1]) * 1024 total = mems[b"MemTotal:"] free = mems[b"MemFree:"] cached = mems[b"Cached:"] buffers = mems[b"Buffers:"] used = total - free - cached - buffers if used < 0: used = total - free mem_info = ps_mem_info(total=total, free=free, cached=cached, buffers=buffers, used=used) except FileNotFoundError: mems = psutil.virtual_memory() mem_info = ps_mem_info( total=mems.total, free=mems.free, cached=mems.cached, buffers=mems.buffers, used=mems.used, ) return mem_info def report_memory_usage(message, logger=None, report_cpu=False): """Calculate and print RAM usage (in GB) Args: message (str): A prefix message to add in the log. logger (:class:`colossalai.logging.DistributedLogger`): The logger used to record memory information. report_cpu (bool, optional): Whether to report CPU memory. Raises: EnvironmentError: Raise error if no distributed environment has been initialized. """ if not dist.is_initialized(): raise EnvironmentError("No distributed environment is initialized") gpu_allocated = _bytes_to_MB(torch.cuda.memory_allocated()) gpu_max_allocated = _bytes_to_MB(torch.cuda.max_memory_allocated()) gpu_cached = _bytes_to_MB(torch.cuda.memory_reserved()) gpu_max_cached = _bytes_to_MB(torch.cuda.max_memory_reserved()) full_log = ( f"{message}: GPU: allocated {gpu_allocated} MB, max allocated {gpu_max_allocated} MB, " + f"cached: {gpu_cached} MB, max cached: {gpu_max_cached} MB" ) if report_cpu: # python doesn't do real-time garbage collection so do it explicitly to get the correct RAM reports gc.collect() vm_stats = psutil.virtual_memory() vm_used = _bytes_to_MB(vm_stats.total - vm_stats.available) full_log += f", CPU Virtual Memory: used = {vm_used} MB, percent = {vm_stats.percent}%" if logger is None: logger = get_dist_logger() logger.info(full_log) # get the peak memory to report correct data, so reset the counter for the next call if hasattr(torch.cuda, "reset_peak_memory_stats"): # pytorch 1.4+ torch.cuda.reset_peak_memory_stats() def colo_device_memory_capacity(device: torch.device) -> int: """ Get the capacity of the memory of the device Args: device (torch.device): a device Returns: int: size in byte """ assert isinstance(device, torch.device) if device.type == "cpu": # In the context of 1-CPU-N-GPU, the memory capacity of the current process is 1/N overall CPU memory. return colo_get_cpu_memory_capacity() / gpc.num_processes_on_current_node if device.type == "cuda": return ( torch.cuda.get_device_properties(get_accelerator().get_current_device()).total_memory * _GLOBAL_CUDA_MEM_FRACTION ) def colo_device_memory_used(device: torch.device) -> int: """ Get the device memory on device belonging to the current process. Args: device (torch.device): a device Returns: int: memory size in bytes """ if device.type == "cpu": mem_info = _get_cpu_memory_info() # In the context of 1-CPU-N-GPU, the memory usage of the current process is 1/N CPU memory used. # Each process consumes the same amount of memory. ret = mem_info.used / gpc.num_processes_on_current_node return ret elif device.type == "cuda": ret: int = torch.cuda.memory_allocated(device) # get the peak memory to report correct data, so reset the counter for the next call if hasattr(torch.cuda, "reset_peak_memory_stats"): # pytorch 1.4+ torch.cuda.reset_peak_memory_stats(device) return ret def colo_set_process_memory_fraction(ratio: float) -> None: """colo_set_process_memory_fraction set how much cuda memory used on the gpu belonging to the current process. Args: ratio (float): a ratio between 0. ~ 1. """ if version.parse(torch.__version__) < version.parse("1.8"): logger = get_dist_logger("colo_set_process_memory_fraction") logger.warning("colo_set_process_memory_fraction failed because torch version is less than 1.8") return global _GLOBAL_CUDA_MEM_FRACTION _GLOBAL_CUDA_MEM_FRACTION = ratio torch.cuda.set_per_process_memory_fraction(_GLOBAL_CUDA_MEM_FRACTION, get_accelerator().get_current_device()) def colo_set_cpu_memory_capacity(size: int) -> None: global _GLOBAL_CPU_MEM_CAPACITY mem_info = _get_cpu_memory_info() total_size = mem_info.total if size <= total_size: _GLOBAL_CPU_MEM_CAPACITY = size else: _GLOBAL_CPU_MEM_CAPACITY = total_size def colo_get_cpu_memory_capacity() -> int: """ Get the cpu memory capacity. We may not use all of it. Returns: int: _description_ """ global _GLOBAL_CPU_MEM_CAPACITY if _GLOBAL_CPU_MEM_CAPACITY == -1: mem_info = _get_cpu_memory_info() return mem_info.total else: return _GLOBAL_CPU_MEM_CAPACITY
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/__init__.py
colossalai/legacy/utils/__init__.py
from .checkpointing import load_checkpoint, save_checkpoint from .common import ( clip_grad_norm_fp32, copy_tensor_parallel_attributes, count_zeros_fp32, is_dp_rank_0, is_model_parallel_parameter, is_no_pp_or_last_stage, is_tp_rank_0, is_using_ddp, is_using_pp, is_using_sequence, param_is_not_tensor_parallel_duplicate, print_rank_0, switch_virtual_pipeline_parallel_rank, sync_model_param, ) from .data_sampler import DataParallelSampler, get_dataloader from .memory import ( colo_device_memory_capacity, colo_device_memory_used, colo_get_cpu_memory_capacity, colo_set_cpu_memory_capacity, colo_set_process_memory_fraction, report_memory_usage, ) __all__ = [ "DataParallelSampler", "get_dataloader", "save_checkpoint", "load_checkpoint", "colo_device_memory_capacity", "colo_device_memory_used", "colo_get_cpu_memory_capacity", "colo_set_cpu_memory_capacity", "colo_set_process_memory_fraction", "report_memory_usage", "clip_grad_norm_fp32", "copy_tensor_parallel_attributes", "count_zeros_fp32", "is_dp_rank_0", "is_model_parallel_parameter", "is_no_pp_or_last_stage", "is_tp_rank_0", "is_using_ddp", "is_using_pp", "is_using_sequence", "param_is_not_tensor_parallel_duplicate", "print_rank_0", "switch_virtual_pipeline_parallel_rank", "sync_model_param", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/activation_checkpoint.py
colossalai/legacy/utils/activation_checkpoint.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import weakref import torch from torch.utils.checkpoint import check_backward_validity, detach_variable from colossalai.accelerator import get_accelerator from colossalai.legacy.context.random import get_current_mode, get_states, set_mode, set_seed_states, sync_states def copy_to_device(obj, device): if torch.is_tensor(obj): # Notice: # When in no_grad context, requires_gard is False after movement ret = obj.to(device).detach() ret.requires_grad = obj.requires_grad return ret elif isinstance(obj, list): return [copy_to_device(i, device) for i in obj] elif isinstance(obj, tuple): return tuple([copy_to_device(v, device) for v in obj]) elif isinstance(obj, dict): return {k: copy_to_device(v, device) for k, v in obj.items()} else: return obj class CheckpointFunction(torch.autograd.Function): @staticmethod def forward(ctx, run_function, activation_offload=False, *args): check_backward_validity(args) ctx.run_function = run_function ctx.activation_offload = activation_offload ctx.device = get_accelerator().get_current_device() # preserve rng states ctx.fwd_cpu_rng_state = torch.get_rng_state() sync_states() ctx.fwd_seed_states = get_states(copy=True) ctx.fwd_current_mode = get_current_mode() if hasattr(torch, "is_autocast_enabled"): ctx.had_autocast_in_fwd = torch.is_autocast_enabled() else: ctx.had_autocast_in_fwd = False if activation_offload: inputs_cuda = copy_to_device(args, ctx.device) else: inputs_cuda = args with torch.no_grad(): outputs = run_function(*inputs_cuda) # Save non-tensor inputs in ctx, keep a placeholder None for tensors # to be filled out during the backward. ctx.inputs = [] ctx.tensor_indices = [] tensor_inputs = [] for i, arg in enumerate(args): if torch.is_tensor(arg): if activation_offload: tensor_inputs.append(copy_to_device(arg, "cpu")) else: tensor_inputs.append(arg) ctx.tensor_indices.append(i) ctx.inputs.append(None) else: ctx.inputs.append(arg) if activation_offload: ctx.tensor_inputs = tensor_inputs else: ctx.save_for_backward(*tensor_inputs) return outputs @staticmethod def backward(ctx, *args): if not torch.autograd._is_checkpoint_valid(): raise RuntimeError( "Checkpointing is not compatible with .grad() or when an `inputs` parameter is " "passed to .backward(). Please use .backward() and do not pass its `inputs` argument." ) # Copy the list to avoid modifying original list. inputs = list(ctx.inputs) tensor_indices = ctx.tensor_indices if ctx.activation_offload: tensors = ctx.tensor_inputs else: tensors = ctx.saved_tensors # store the current states bwd_cpu_rng_state = torch.get_rng_state() sync_states() bwd_seed_states = get_states(copy=True) bwd_current_mode = get_current_mode() # set the states to what it used to be torch.set_rng_state(ctx.fwd_cpu_rng_state) for parallel_mode, state in ctx.fwd_seed_states.items(): set_seed_states(parallel_mode, state) set_mode(ctx.fwd_current_mode) if ctx.activation_offload: tensors = copy_to_device(tensors, ctx.device) # Fill in inputs with appropriate saved tensors. for i, idx in enumerate(tensor_indices): inputs[idx] = tensors[i] detached_inputs = detach_variable(tuple(inputs)) if ctx.had_autocast_in_fwd: with torch.enable_grad(), get_accelerator().autocast()(): outputs = ctx.run_function(*detached_inputs) else: with torch.enable_grad(): outputs = ctx.run_function(*detached_inputs) if isinstance(outputs, torch.Tensor): outputs = (outputs,) # recover the rng states torch.set_rng_state(bwd_cpu_rng_state) for parallel_mode, state in bwd_seed_states.items(): set_seed_states(parallel_mode, state) set_mode(bwd_current_mode) # run backward() with only tensor that requires grad outputs_with_grad = [] args_with_grad = [] for i in range(len(outputs)): if torch.is_tensor(outputs[i]) and outputs[i].requires_grad: outputs_with_grad.append(outputs[i]) args_with_grad.append(args[i]) if len(outputs_with_grad) == 0: raise RuntimeError("none of output has requires_grad=True," " this checkpoint() is not necessary") torch.autograd.backward(outputs_with_grad, args_with_grad) grads = tuple(inp.grad if isinstance(inp, torch.Tensor) else None for inp in detached_inputs) return (None, None) + grads def checkpoint(function, activation_offload, *args, use_reentrant: bool = True): """Checkpoint the computation while preserve the rng states, modified from Pytorch torch.utils.checkpoint. Args: function: Describe the forward pass function. It should know how to handle the input tuples. activation_offload: The variable to check whether we should offload activation to cpu args (list): Tuple containing the parameters of the function use_reentrant: Bool type to check if we need to use_reentrant, if use_reentrant=False, there might be more flexibility for user to define there checkpoint function Returns: Output of running function with provided args. """ if use_reentrant: return CheckpointFunction.apply(function, activation_offload, *args) else: return _checkpoint_without_reentrant( function, activation_offload, *args, ) def _checkpoint_without_reentrant(function, activation_offload=False, *args): # store rng_state fwd_cpu_state = torch.get_rng_state() sync_states() fwd_seed_states = get_states(copy=True) fwd_current_mode = get_current_mode() # check if use autocast if hasattr(torch, "is_autocast_enabled"): has_autocast_in_fwd = torch.is_autocast_enabled() else: has_autocast_in_fwd = False # using WeakKeyDictionary to store all the activation the first time we call unpack storage: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary() weak_holder_list = [] # class for weakref.ref class Holder: pass # return a Holder object for later unpack process def pack(x): res = Holder() weak_holder_list.append(weakref.ref(res)) return res # unpack hook def unpack(x): unpack_counter = 0 # re-compute all the activation inside the function when we first call unpack if len(storage) == 0: def inner_pack(inner): nonlocal unpack_counter unpack_counter += 1 # If the holder went out of scope, the SavedVariable is dead and so # the value will never be read from the storage. Skip filling it. if weak_holder_list[unpack_counter - 1]() is None: return # Use detach here to ensure we don't keep the temporary autograd # graph created during the second forward storage[weak_holder_list[unpack_counter - 1]()] = inner.detach() return def inner_unpack(packed): raise RuntimeError("You are calling backwards on a tensor that is never exposed. Please open an issue.") # restore rng state torch.set_rng_state(fwd_cpu_state) for parallel_mode, state in fwd_seed_states.items(): set_seed_states(parallel_mode, state) set_mode(fwd_current_mode) # reload arg into device if needed if activation_offload: for arg in args: if torch.is_tensor(arg): arg = arg.to(device=device) # rerun forward, the inner_pack will store all the activations in storage if has_autocast_in_fwd: with torch.enable_grad(), get_accelerator().autocast()(), torch.autograd.graph.saved_tensors_hooks( inner_pack, inner_unpack ): _unused = function(*args) else: with torch.enable_grad(), torch.autograd.graph.saved_tensors_hooks(inner_pack, inner_unpack): _unused = function(*args) if x not in storage: raise RuntimeError( "Attempt to retrieve a tensor saved by autograd multiple times without checkpoint" " recomputation being triggered in between, this is not currently supported. Please" " open an issue with details on your use case so that we can prioritize adding this." ) return storage[x] # get device if we need to offload the activation if activation_offload: device = get_accelerator().get_current_device() # run function with pack and unpack as saved_tensors_hooks with torch.autograd.graph.saved_tensors_hooks(pack, unpack): output = function(*args) # offload activation if needed if activation_offload: for arg in args: if torch.is_tensor(arg): arg = arg.to(device="cpu") return output
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/profiler.py
colossalai/legacy/utils/profiler/profiler.py
import gzip import json import os import tempfile from typing import Any, Callable, Iterable, List, Optional from torch.autograd import ProfilerActivity from torch.profiler import profile as torch_profile from torch.profiler.profiler import ProfilerAction from colossalai.legacy.engine import Engine from colossalai.legacy.utils.profiler.extention import ProfilerExtension from colossalai.legacy.utils.profiler.stateful_tensor_mem_extention import StatefulTensorMemoryProfilerExtention from colossalai.logging import get_dist_logger class profile(torch_profile): """Profiler context manager. Args: activities (iterable): list of activity groups (CPU, CUDA) to use in profiling, supported values: ``torch.profiler.ProfilerActivity.CPU``, ``torch.profiler.ProfilerActivity.CUDA``. Default value: ProfilerActivity.CPU and (when available) ProfilerActivity.CUDA. schedule (callable): callable that takes step (int) as a single parameter and returns ``ProfilerAction`` value that specifies the profiler action to perform at each step. on_trace_ready (callable): callable that is called at each step when ``schedule`` returns ``ProfilerAction.RECORD_AND_SAVE`` during the profiling. engine (Optional[Engine], optional): An ``Engine`` instance. Defaults to None. record_shapes (bool): save information about operator's input shapes. profile_memory (bool): track tensor memory allocation/deallocation. with_stack (bool): record source information (file and line number) for the ops. with_flops (bool): use formula to estimate the FLOPs (floating point operations) of specific operators (matrix multiplication and 2D convolution). with_modules (bool): record module hierarchy (including function names) corresponding to the callstack of the op. e.g. If module A's forward call's module B's forward which contains an aten::add op, then aten::add's module hierarchy is A.B Note that this support exist, at the moment, only for TorchScript models and not eager mode models. profile_stateful_tensor_memory (bool): track stateful tensor memory usage. ``engine`` must not be None if you enable this. .. note:: Use :func:`~torch.profiler.schedule` to generate the callable schedule. Non-default schedules are useful when profiling long training jobs and allow the user to obtain multiple traces at the different iterations of the training process. The default schedule simply records all the events continuously for the duration of the context manager. .. note:: Use :func:`~torch.profiler.tensorboard_trace_handler` to generate result files for TensorBoard: ``on_trace_ready=torch.profiler.tensorboard_trace_handler(dir_name)`` After profiling, result files can be found in the specified directory. Use the command: ``tensorboard --logdir dir_name`` to see the results in TensorBoard. For more information, see `PyTorch Profiler TensorBoard Plugin <https://github.com/pytorch/kineto/tree/master/tb_plugin>`__ .. note:: Enabling shape and stack tracing results in additional overhead. When record_shapes=True is specified, profiler will temporarily hold references to the tensors; that may further prevent certain optimizations that depend on the reference count and introduce extra tensor copies. Examples: .. code-block:: python with torch.profiler.profile( activities=[ torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA, ] ) as p: code_to_profile() print(p.key_averages().table( sort_by="self_cuda_time_total", row_limit=-1)) Using the profiler's ``schedule``, ``on_trace_ready`` and ``step`` functions: .. code-block:: python # Non-default profiler schedule allows user to turn profiler on and off # on different iterations of the training loop; # trace_handler is called every time a new trace becomes available def trace_handler(prof): print(prof.key_averages().table( sort_by="self_cuda_time_total", row_limit=-1)) # prof.export_chrome_trace("/tmp/test_trace_" + str(prof.step_num) + ".json") with torch.profiler.profile( activities=[ torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA, ], # In this example with wait=1, warmup=1, active=2, # profiler will skip the first step/iteration, # start warming up on the second, record # the third and the forth iterations, # after which the trace will become available # and on_trace_ready (when set) is called; # the cycle repeats starting with the next step schedule=torch.profiler.schedule( wait=1, warmup=1, active=2), on_trace_ready=trace_handler # on_trace_ready=torch.profiler.tensorboard_trace_handler('./log') # used when outputting for tensorboard ) as p: for iter in range(N): code_iteration_to_profile(iter) # send a signal to the profiler that the next iteration has started p.step() """ def __init__( self, *, activities: Optional[Iterable[ProfilerActivity]] = None, schedule: Optional[Callable[[int], ProfilerAction]] = None, on_trace_ready: Optional[Callable[..., Any]] = None, engine: Optional[Engine] = None, record_shapes: bool = False, profile_memory: bool = False, with_stack: bool = False, with_flops: bool = False, with_modules: bool = False, profile_stateful_tensor_memory: bool = False, ) -> None: super().__init__( activities=activities, schedule=schedule, on_trace_ready=on_trace_ready, record_shapes=record_shapes, profile_memory=profile_memory, with_stack=with_stack, with_flops=with_flops, with_modules=with_modules, ) self._logger = get_dist_logger() self.extentions: List[ProfilerExtension] = [] if profile_stateful_tensor_memory: if engine is None: self._logger.warning('Ignore "profile_model_data" since engine is None', ranks=[0]) else: self.extentions.append(StatefulTensorMemoryProfilerExtention(engine)) def prepare_trace(self) -> None: if hasattr(super(), "prepare_trace"): super().prepare_trace() elif hasattr(super(), "_start_warmup"): super()._start_warmup() for ext in self.extentions: ext.prepare_trace() def _start_warmup(self): self.prepare_trace() def start_trace(self): if hasattr(super(), "_start_trace"): super()._start_trace() elif hasattr(super(), "start_trace"): super().start_trace() for ext in self.extentions: ext.start_trace() def _start_trace(self): self.start_trace() def stop_trace(self): if hasattr(super(), "_stop_trace"): super()._stop_trace() elif hasattr(super(), "stop_trace"): super().stop_trace() for ext in self.extentions: ext.stop_trace() def _stop_trace(self): self.stop_trace() def export_chrome_trace(self, path: str): """ Exports the collected trace in Chrome JSON format. """ assert self.profiler fp = tempfile.NamedTemporaryFile("w+t", suffix=".json", delete=False) fp.close() retvalue = self.profiler.export_chrome_trace(fp.name) with open(fp.name) as fin: trace = json.load(fin) for ext in self.extentions: trace = ext.extend_chrome_trace(trace) open_func = gzip.open if path.endswith(".gz") else open with open_func(path, "wt") as fout: json.dump(trace, fout) os.remove(fp.name) return retvalue
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/extention.py
colossalai/legacy/utils/profiler/extention.py
from abc import ABC, abstractmethod class ProfilerExtension(ABC): @abstractmethod def prepare_trace(self): pass @abstractmethod def start_trace(self): pass @abstractmethod def stop_trace(self): pass @abstractmethod def extend_chrome_trace(self, trace: dict) -> dict: pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/stateful_tensor_mem_extention.py
colossalai/legacy/utils/profiler/stateful_tensor_mem_extention.py
import os import threading import time from enum import Enum from typing import List import torch from colossalai.gemini.ophooks import BaseOpHook from colossalai.gemini.stateful_tensor import StatefulTensor from colossalai.legacy.engine import Engine from colossalai.legacy.utils.profiler.extention import ProfilerExtension class DeviceType(Enum): CPU = 0 CUDA = 1 def get_timestamp_us(): return int(time.time() * 1e6) def generic_instant_event(name, pid, tid, timestamp, args): return {"ph": "i", "s": "t", "name": name, "pid": pid, "tid": tid, "ts": timestamp, "args": args} class StatefulTensorMemoryEvent: EVENT_NAME = "[statefulTensorMemory]" def __init__(self, timestamp: int, device_type: DeviceType, bytes_: int) -> None: self.pid = os.getpid() self.tid = threading.get_ident() self.timestamp = timestamp self.device_type = device_type self.device_id = torch.cuda.current_device() if device_type == DeviceType.CUDA else -1 self.bytes = bytes_ def state_dict(self): return generic_instant_event( StatefulTensorMemoryEvent.EVENT_NAME, self.pid, self.tid, self.timestamp, {"Device Type": self.device_type.value, "Device Id": self.device_id, "Bytes": self.bytes}, ) class StatefulTensorMemoryTracer: def __init__(self) -> None: self.events: List[StatefulTensorMemoryEvent] = [] self._tracing = False def sample(self): cuda_mem = StatefulTensor.GST_MGR.total_mem["cuda"] cpu_mem = StatefulTensor.GST_MGR.total_mem["cpu"] timestamp = get_timestamp_us() if self._tracing: self.events.append(StatefulTensorMemoryEvent(timestamp, DeviceType.CUDA, cuda_mem)) self.events.append(StatefulTensorMemoryEvent(timestamp, DeviceType.CPU, cpu_mem)) def start_trace(self): self.events.clear() self._tracing = True def stop_trace(self): self._tracing = False def state_dict(self): return [event.state_dict() for event in self.events] class StatefulTensorMemoryTracerHook(BaseOpHook): def __init__(self, tracer: StatefulTensorMemoryTracer): super().__init__() self.tracer = tracer self._enable = False def pre_fwd_exec(self, module: torch.nn.Module, *args): if self._enable: self.tracer.sample() def post_fwd_exec(self, module: torch.nn.Module, *args): if self._enable: self.tracer.sample() def pre_bwd_exec(self, module: torch.nn.Module, input_, output): if self._enable: self.tracer.sample() def post_bwd_exec(self, module: torch.nn.Module, input_): if self._enable: self.tracer.sample() def post_iter(self): if self._enable: self.tracer.sample() def enable(self): self._enable = True def disable(self): self._enable = False class StatefulTensorMemoryProfilerExtention(ProfilerExtension): def __init__(self, engine: Engine) -> None: self.engine = engine self.tracer = StatefulTensorMemoryTracer() self.hook = StatefulTensorMemoryTracerHook(self.tracer) self.hook_registered = False def prepare_trace(self): self.hook.enable() if not self.hook_registered: self.engine.add_hook(self.hook) self.hook_registered = True def start_trace(self): self.prepare_trace() self.tracer.start_trace() def stop_trace(self): self.tracer.stop_trace() self.hook.disable() if self.hook_registered: self.engine.remove_hook(self.hook) # remove_hook is not implemented now # FIXME(ver217): uncomment below line when remove_hook is implemented # self.hook_registered = False def extend_chrome_trace(self, trace: dict) -> dict: trace["traceEvents"].extend(self.tracer.state_dict()) return trace
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/__init__.py
colossalai/legacy/utils/profiler/__init__.py
from .legacy import * from .profiler import profile
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/legacy/comm_profiler.py
colossalai/legacy/utils/profiler/legacy/comm_profiler.py
import inspect from functools import partial from pathlib import Path from typing import List, Optional import torch import torch.distributed as dist from torch.autograd.profiler import profile from torch.distributed import ReduceOp from colossalai.accelerator import get_accelerator from .prof_utils import BaseProfiler, _format_bandwidth, _format_memory, _format_time def _get_code_location(depth: int): ret = [] length = min(len(inspect.stack()), depth + 1) for i in range(3, length): upper_frame = inspect.stack()[i] function_name = inspect.stack()[i - 1].function ret.append(upper_frame.filename) ret.append("(") ret.append(str(upper_frame.lineno)) ret.append("): ") ret.append(function_name) if i != length - 1: ret.append("\n") return "".join(ret) torch_all_reduce = dist.all_reduce torch_all_gather = dist.all_gather torch_reduce_scatter = dist.reduce_scatter torch_broadcast = dist.broadcast torch_reduce = dist.reduce class CommEvent(object): """Communication Event. Used for communication time and communication volume recording. """ def __init__(self, count: int = 0, comm_vol: float = 0.0, cuda_time: int = 0): self.self_count = count self.self_comm_vol = comm_vol self.self_cuda_time = cuda_time def add(self, rhs): self.self_count += rhs.self_count self.self_comm_vol += rhs.self_comm_vol self.self_cuda_time += rhs.self_cuda_time class CommProfiler(BaseProfiler): """Communication profiler. Records all communication events.""" def __init__(self, depth: int = 0, total_count: int = 0, total_comm_vol: float = 0, total_cuda_time: int = 0): super().__init__(profiler_name="Collective_Communication", priority=0) self.depth = 3 + depth self.total_count = total_count self.total_comm_vol = total_comm_vol self.total_cuda_time = total_cuda_time self.ops_record = dict() self.profiler = None self.pending_op = None self.pending_metadata = None self.warn_flag = False def reset(self): self.total_count = 0 self.total_comm_vol = 0 self.total_cuda_time = 0 self.ops_record = dict() self.profiler = None self.pending_op = None self.pending_metadata = None self.warn_flag = False def enable(self): dist.all_reduce = partial(all_reduce, profiler=self) dist.all_gather = partial(all_gather, profiler=self) dist.reduce_scatter = partial(reduce_scatter, profiler=self) dist.broadcast = partial(broadcast, profiler=self) dist.reduce = partial(reduce, profiler=self) def disable(self): dist.all_reduce = torch_all_reduce dist.all_gather = torch_all_gather dist.reduce_scatter = torch_reduce_scatter dist.broadcast = torch_broadcast dist.reduce = torch_reduce def to_tensorboard(self, writer): writer.add_text(tag="Collective Communication", text_string=self.result_str("\n\n")) def to_file(self, filename: Path): with open(filename, "w") as f: f.write(self.result_str()) def show(self): print(self.result_str()) def result_str(self, sep: str = "\n"): res = [] def append(s: str = None): if s is not None: res.append(s) res.append(sep) if self.warn_flag: append( "Warning: there exists multiple communication operations in the same time. As a result, " "the profiling result is not accurate." ) if self.total_cuda_time == 0: return "No collective communication has been called yet!" append("Collective communication profiling result:") append("total cuda time: {}".format(_format_time(self.total_cuda_time))) append("average bandwidth: {}".format(_format_bandwidth(self.total_comm_vol, self.total_cuda_time))) append("total number of calls: {}".format(self.total_count)) append("All events:") separation = "-" * 74 row_format = "{:^10}" + "{:^12}" * 2 + "{:^16}" + "{:^12}" * 2 append(separation) append(row_format.format("Location", "GPU time", "Percentage", "Comm volume", "Bandwidth", "Num of calls")) append(separation) show_list = sorted(self.ops_record.items(), key=lambda kv: -kv[1].self_cuda_time) for location, event in show_list: append(location) append( row_format.format( "", _format_time(event.self_cuda_time), "{:.1f}%".format(event.self_cuda_time / self.total_cuda_time * 100.0), _format_memory(event.self_comm_vol), _format_bandwidth(event.self_comm_vol, event.self_cuda_time), event.self_count, ) ) append() return "".join(res) @property def has_aync_op(self): return self.pending_op is not None def activate_profiler(self, kn: str, vol: float): self.pending_metadata = (kn, _get_code_location(self.depth), vol) self.profiler = profile(enabled=True, use_cuda=True, use_cpu=True, use_kineto=True) self.profiler.__enter__() def close_profiler(self, group=None): assert self.profiler is not None, "There is no running dist op" kernel_name, code_location, vol = self.pending_metadata self.profiler.__exit__(None, None, None) if self.profiler.enabled and dist.get_world_size(group) > 1: assert_flag = 0 current_comm_event = None events = self.profiler.function_events for event in events: if kernel_name in event.name: assert assert_flag == 0, "Multiple dist ops has been called " current_comm_event = CommEvent(1, vol, event.self_cuda_time_total) assert_flag += 1 assert current_comm_event is not None, "dist op has not been found" buffer = torch.tensor([current_comm_event.self_cuda_time], device=get_accelerator().get_current_device()) torch_all_reduce(buffer, op=ReduceOp.MIN, group=group) current_comm_event.self_cuda_time = buffer.item() self.total_count += current_comm_event.self_count self.total_comm_vol += current_comm_event.self_comm_vol self.total_cuda_time += current_comm_event.self_cuda_time if code_location in self.ops_record: self.ops_record[code_location].add(current_comm_event) else: self.ops_record[code_location] = current_comm_event self.profiler = None self.pending_op = None self.pending_metadata = None def wait_async_op(self): if self.pending_op is not None: op = self.pending_op op.wait() self.close_profiler() class CommHandler(object): """Communication handler. A dummy handler to wait aync operations.""" def __init__(self, profiler: CommProfiler): super().__init__() self.prof = profiler def wait(self): self.prof.wait_async_op() def async_check(profiler: CommProfiler): if profiler.pending_op is not None: profiler.warn_flag = True profiler.wait_async_op() def all_reduce( tensor: torch.Tensor, op: ReduceOp = ReduceOp.SUM, group=None, async_op: bool = False, profiler: CommProfiler = None ) -> Optional[CommHandler]: async_check(profiler) comm_size = dist.get_world_size(group) correction = 2 * (comm_size - 1) / comm_size comm_vol = correction * tensor.element_size() * tensor.numel() profiler.activate_profiler("ncclKernel_AllReduce_", comm_vol) profiler.pending_op = torch_all_reduce(tensor, op, group, async_op) if async_op: return CommHandler(profiler) profiler.close_profiler(group) def reduce_scatter( output: torch.Tensor, input_list: List[torch.Tensor], op: ReduceOp = ReduceOp.SUM, group=None, async_op: bool = False, profiler: CommProfiler = None, ) -> Optional[CommHandler]: async_check(profiler) comm_size = dist.get_world_size(group) correction = (comm_size - 1) / comm_size comm_vol = 0 for tensor in input_list: comm_vol += tensor.element_size() * tensor.numel() comm_vol *= correction profiler.activate_profiler("ncclKernel_ReduceScatter_", comm_vol) profiler.pending_op = torch_reduce_scatter(output, input_list, op, group, async_op) if async_op: return CommHandler(profiler) profiler.close_profiler(group) def all_gather( tensor_list: List[torch.Tensor], tensor: torch.Tensor, group=None, async_op: bool = False, profiler: CommProfiler = None, ) -> Optional[CommHandler]: async_check(profiler) comm_size = dist.get_world_size(group) correction = (comm_size - 1) / comm_size comm_vol = 0 for ten in tensor_list: comm_vol += ten.element_size() * ten.numel() comm_vol *= correction profiler.activate_profiler("ncclKernel_AllGather_", comm_vol) profiler.pending_op = torch_all_gather(tensor_list, tensor, group, async_op) if async_op: return CommHandler(profiler) profiler.close_profiler(group) def broadcast( tensor: torch.Tensor, src: int, group=None, async_op: bool = False, profiler: CommProfiler = None ) -> Optional[CommHandler]: async_check(profiler) comm_vol = 1.0 * tensor.element_size() * tensor.numel() profiler.activate_profiler("ncclKernel_Broadcast_", comm_vol) profiler.pending_op = torch_broadcast(tensor, src, group, async_op) if async_op: return CommHandler(profiler) profiler.close_profiler(group) def reduce( tensor: torch.Tensor, dst: int, op: ReduceOp = ReduceOp.SUM, group=None, async_op: bool = False, profiler: CommProfiler = None, ) -> Optional[CommHandler]: async_check(profiler) comm_vol = 1.0 * tensor.element_size() * tensor.numel() profiler.activate_profiler("ncclKernel_Reduce_", comm_vol) profiler.pending_op = torch_reduce(tensor, dst, op, group, async_op) if async_op: return CommHandler(profiler) profiler.close_profiler(group)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/legacy/prof_utils.py
colossalai/legacy/utils/profiler/legacy/prof_utils.py
from abc import ABC, abstractmethod from pathlib import Path from typing import List, Union from colossalai.legacy.core import global_context as gpc # copied from high version pytorch to support low version def _format_time(time_us): """Defines how to format time in FunctionEvent""" US_IN_SECOND = 1000.0 * 1000.0 US_IN_MS = 1000.0 if time_us >= US_IN_SECOND: return "{:.3f}s".format(time_us / US_IN_SECOND) if time_us >= US_IN_MS: return "{:.3f}ms".format(time_us / US_IN_MS) return "{:.3f}us".format(time_us) # copied from high version pytorch to support low version def _format_memory(nbytes): """Returns a formatted memory size string""" KB = 1024 MB = 1024 * KB GB = 1024 * MB if abs(nbytes) >= GB: return "{:.2f} GB".format(nbytes * 1.0 / GB) elif abs(nbytes) >= MB: return "{:.2f} MB".format(nbytes * 1.0 / MB) elif abs(nbytes) >= KB: return "{:.2f} KB".format(nbytes * 1.0 / KB) else: return str(nbytes) + " B" def _format_bandwidth(volume: float or int, time_us: int): sec_div_mb = (1000.0 / 1024.0) ** 2 mb_per_sec = volume / time_us * sec_div_mb if mb_per_sec >= 1024.0: return "{:.3f} GB/s".format(mb_per_sec / 1024.0) else: return "{:.3f} MB/s".format(mb_per_sec) class BaseProfiler(ABC): def __init__(self, profiler_name: str, priority: int): self.name = profiler_name self.priority = priority @abstractmethod def enable(self): pass @abstractmethod def disable(self): pass @abstractmethod def to_tensorboard(self, writer): pass @abstractmethod def to_file(self, filename: Path): pass @abstractmethod def show(self): pass class ProfilerContext(object): """Profiler context manager Usage:: world_size = 4 inputs = torch.randn(10, 10, dtype=torch.float32, device=get_current_device()) outputs = torch.empty(world_size, 10, 10, dtype=torch.float32, device=get_current_device()) outputs_list = list(torch.chunk(outputs, chunks=world_size, dim=0)) cc_prof = CommProfiler() with ProfilerContext([cc_prof]) as prof: op = dist.all_reduce(inputs, async_op=True) dist.all_gather(outputs_list, inputs) op.wait() dist.reduce_scatter(inputs, outputs_list) dist.broadcast(inputs, 0) dist.reduce(inputs, 0) prof.show() """ def __init__(self, profilers: List[BaseProfiler] = None, enable: bool = True): self.enable = enable self.profilers = sorted(profilers, key=lambda prof: prof.priority) def __enter__(self): if self.enable: for prof in self.profilers: prof.enable() return self def __exit__(self, exc_type, exc_val, exc_tb): if self.enable: for prof in self.profilers: prof.disable() def to_tensorboard(self, writer): from torch.utils.tensorboard import SummaryWriter assert isinstance( writer, SummaryWriter ), f"torch.utils.tensorboard.SummaryWriter is required, but found {type(writer)}." for prof in self.profilers: prof.to_tensorboard(writer) def to_file(self, log_dir: Union[str, Path]): if isinstance(log_dir, str): log_dir = Path(log_dir) if not log_dir.exists(): log_dir.mkdir(parents=True, exist_ok=True) for prof in self.profilers: log_file = log_dir.joinpath(f"{prof.name}_rank_{gpc.get_global_rank()}.log") prof.to_file(log_file) def show(self): for prof in self.profilers: prof.show()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/legacy/pcie_profiler.py
colossalai/legacy/utils/profiler/legacy/pcie_profiler.py
from pathlib import Path from typing import List from torch.autograd.profiler import profile from .prof_utils import BaseProfiler, _format_bandwidth, _format_memory, _format_time def _get_size(dtype: str): if dtype == "fp16": return 2 elif dtype == "fp32": return 4 else: raise NotImplementedError def _get_numel(my_list: List[int]) -> int: from functools import reduce from operator import mul return reduce(mul, my_list) def _reduce_location(locations: List[str]) -> str: ret = [] for lo in locations: ret.append(lo) ret.append("\n") ret = ret[:-1] return "".join(ret) class PcieEvent(object): """Pcie Event.""" def __init__(self, count: int = 0, pcie_vol: int = 0, cuda_time: int = 0): self.count = count self.pcie_vol = pcie_vol self.cuda_time = cuda_time def add(self, rhs): self.count += rhs.count self.pcie_vol += rhs.pcie_vol self.cuda_time += rhs.cuda_time class PcieProfiler(BaseProfiler): """Pcie profiler. Records all data transmission between CPU and GPU. TODO: Merge pcie profiler into communication profiler """ def __init__(self, dtype: str = "fp32", depth: int = 1): super().__init__(profiler_name="Pcie", priority=10) self.depth = depth self.data_size = _get_size(dtype) self.h2d_count = 0 self.h2d_time = 0 self.d2h_count = 0 self.d2h_time = 0 self.ops_record = dict() self.profiler = None def reset(self): self.h2d_count = 0 self.h2d_time = 0 self.d2h_count = 0 self.d2h_time = 0 self.ops_record = dict() self.profiler = None def enable(self): self.profiler = profile( enabled=True, use_cuda=True, use_cpu=True, use_kineto=True, record_shapes=True, with_stack=True ) self.profiler.__enter__() def disable(self): self.profiler.__exit__(None, None, None) if self.profiler.enabled: events = self.profiler.function_events for event in events: if event.name == "aten::copy_": t_shape = event.input_shapes[0] if len(t_shape) == 0 or event.cuda_time_total == 0 or len(event.stack) == 0: continue current_comm_event = PcieEvent(1, self.data_size * _get_numel(t_shape), event.cuda_time_total) code_location = _reduce_location(event.stack[: self.depth]) if code_location in self.ops_record: self.ops_record[code_location].add(current_comm_event) else: self.ops_record[code_location] = current_comm_event elif "Memcpy HtoD" in event.name: self.h2d_count += 1 self.h2d_time += event.cuda_time_total elif "Memcpy DtoH" in event.name: self.d2h_count += 1 self.d2h_time += event.cuda_time_total self.profiler = None def to_tensorboard(self, writer): writer.add_text(tag="Data Transmission", text_string=self.result_str("\n\n")) def to_file(self, filename: Path): with open(filename, "w") as f: f.write(self.result_str()) def show(self): print(self.result_str()) def result_str(self, sep: str = "\n"): res = [] def append(s: str = None): if s is not None: res.append(s) res.append(sep) append("Pcie profiling result:") append("time of data transmission (CPU -> GPU): {}".format(_format_time(self.h2d_time))) append("number of transmission (CPU -> GPU): {}".format(self.h2d_count)) append("time of data transmission (GPU -> CPU): {}".format(_format_time(self.d2h_time))) append("number of transmission (GPU -> CPU): {}".format(self.d2h_count)) append("Possible data transmission events in PCIE:") separation = "-" * 62 row_format = "{:^10}" + "{:^12}" + "{:^16}" + "{:^12}" * 2 append(separation) append(row_format.format("Location", "GPU time", "Trans volume", "Bandwidth", "Num of calls")) append(separation) show_list = sorted(self.ops_record.items(), key=lambda kv: -kv[1].cuda_time) for location, event in show_list: append(location) append( row_format.format( "", _format_time(event.cuda_time), _format_memory(event.pcie_vol), _format_bandwidth(event.pcie_vol, event.cuda_time), event.count, ) ) append() return "".join(res)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/profiler/legacy/__init__.py
colossalai/legacy/utils/profiler/legacy/__init__.py
from .comm_profiler import CommProfiler from .mem_profiler import MemProfiler from .pcie_profiler import PcieProfiler from .prof_utils import BaseProfiler, ProfilerContext __all__ = ["BaseProfiler", "CommProfiler", "PcieProfiler", "MemProfiler", "ProfilerContext"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/data_sampler/base_sampler.py
colossalai/legacy/utils/data_sampler/base_sampler.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod class BaseSampler(ABC): def __init__(self, dataset, batch_size): self.dataset = dataset self.batch_size = batch_size @abstractmethod def __len__(self): pass @abstractmethod def __iter__(self): pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/data_sampler/__init__.py
colossalai/legacy/utils/data_sampler/__init__.py
from .base_sampler import BaseSampler from .data_parallel_sampler import DataParallelSampler, get_dataloader __all__ = ["BaseSampler", "DataParallelSampler", "get_dataloader"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/data_sampler/data_parallel_sampler.py
colossalai/legacy/utils/data_sampler/data_parallel_sampler.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- # adapted from torch.utils.data.DistributedSampler import math import random from typing import Iterator, TypeVar import numpy as np import torch from torch.utils.data import DataLoader, Dataset, Sampler from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc T_co = TypeVar("T_co", covariant=True) class DataParallelSampler(Sampler): """A data sampler for distributed data parallelism. Args: dataset (:class:`torch.utils.data.Dataset`): The Dataset for sampling. shuffle (bool, optional): Whether to shuffle data, defaults to False. seed (int, optional): The random seed used for sampling, defaults to 0. drop_last (bool, optional): Set to True to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If False and the size of dataset is not divisible by the batch size, then the last batch will be smaller, defaults to False. """ def __init__(self, dataset: Dataset, shuffle: bool = False, seed: int = 0, drop_last: bool = False) -> None: self.dataset = dataset self.num_replicas = gpc.get_world_size(ParallelMode.DATA) self.rank = gpc.get_local_rank(ParallelMode.DATA) self.epoch = 0 self.drop_last = drop_last # If the dataset length is evenly divisible by # of replicas, then there # is no need to drop any data, since the dataset will be split equally. # type: ignore[arg-type] if self.drop_last and len(self.dataset) % self.num_replicas != 0: # Split to nearest available length that is evenly divisible. # This is to ensure each rank receives the same amount of data when # using this Sampler. self.num_samples = math.ceil( # `type:ignore` is required because Dataset cannot provide a default __len__ # see NOTE in pytorch/torch/utils/data/sampler.py (len(self.dataset) - self.num_replicas) / self.num_replicas # type: ignore[arg-type] ) else: self.num_samples = math.ceil(len(self.dataset) / self.num_replicas) # type: ignore[arg-type] self.total_size = self.num_samples * self.num_replicas self.shuffle = shuffle self.seed = seed def __iter__(self) -> Iterator[T_co]: if self.shuffle: # deterministically shuffle based on epoch and seed g = torch.Generator() g.manual_seed(self.seed + self.epoch) # type: ignore[arg-type] indices = torch.randperm(len(self.dataset), generator=g).tolist() # update for next epoch so that there is no need to call # set_epoch manually self.epoch += 1 else: indices = list(range(len(self.dataset))) # type: ignore[arg-type] if not self.drop_last: # add extra samples to make it evenly divisible padding_size = self.total_size - len(indices) if padding_size <= len(indices): indices += indices[:padding_size] else: indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size] else: # remove tail of data to make it evenly divisible. indices = indices[: self.total_size] assert len(indices) == self.total_size # subsample indices = indices[self.rank : self.total_size : self.num_replicas] assert len(indices) == self.num_samples return iter(indices) def __len__(self) -> int: return self.num_samples def set_epoch(self, epoch: int) -> None: r"""Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas use a different random ordering for each epoch. Otherwise, the next iteration of this sampler will yield the same ordering. Args: epoch (int): Epoch number. """ self.epoch = epoch def get_dataloader( dataset, shuffle=False, seed=1024, add_sampler=True, drop_last=False, pin_memory=False, num_workers=0, **kwargs ): r"""Set up a deterministic dataloader (also configure seed workers, samplers and whether shuffle or not) Note: When pipeline parallel is enabled, shuffle cannot be True as it will result in mismatch between input data on the 1st stage and label on the last stage. Args: dataset (:class:`torch.utils.data.Dataset`): The dataset to be loaded. shuffle (bool, optional): Whether to shuffle the dataset. Defaults to False. seed (int, optional): Random worker seed for sampling, defaults to 1024. add_sampler: Whether to add ``DistributedDataParallelSampler`` to the dataset. Defaults to True. drop_last (bool, optional): Set to True to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If False and the size of dataset is not divisible by the batch size, then the last batch will be smaller, defaults to False. pin_memory (bool, optional): Whether to pin memory address in CPU memory. Defaults to False. num_workers (int, optional): Number of worker threads for this dataloader. Defaults to 0. kwargs (dict): optional parameters for ``torch.utils.data.DataLoader``, more details could be found in `DataLoader <https://pytorch.org/docs/stable/_modules/torch/utils/data/dataloader.html#DataLoader>`_. Returns: :class:`torch.utils.data.DataLoader`: A DataLoader used for training or testing. """ _kwargs = kwargs.copy() if add_sampler and gpc.is_initialized(ParallelMode.DATA) and gpc.get_world_size(ParallelMode.DATA) > 1: sampler = DataParallelSampler(dataset, shuffle=shuffle) else: sampler = None # Deterministic dataloader def seed_worker(worker_id): worker_seed = seed np.random.seed(worker_seed) torch.manual_seed(worker_seed) random.seed(worker_seed) if sampler is None: return DataLoader( dataset, worker_init_fn=seed_worker, shuffle=shuffle, drop_last=drop_last, pin_memory=pin_memory, num_workers=num_workers, **_kwargs, ) else: return DataLoader( dataset, sampler=sampler, worker_init_fn=seed_worker, drop_last=drop_last, pin_memory=pin_memory, num_workers=num_workers, **_kwargs, )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/checkpoint/module_checkpoint.py
colossalai/legacy/utils/checkpoint/module_checkpoint.py
from typing import Dict, Optional import torch import torch.distributed as dist from colossalai.interface import OptimizerWrapper from colossalai.tensor import ColoTensor from .utils import gather_tensor, scatter_tensor def save_checkpoint( path: str, epoch: int, model: torch.nn.Module, optimizer: Optional[OptimizerWrapper] = None, lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None, *args, **kwargs, ): """save_checkpoint save a model, whose parameters are `ColoTensor`s. Args: path (str): directory to save the checkpoint files. epoch (int): the number of epoch model (torch.nn.Module): a torch module initialized by ColoInitContext optimizer (OptimizerWrapper, optional): optimizers. Defaults to None. lr_scheduler (torch.optim.lr_scheduler._LRScheduler, optional): lr schedule. Defaults to None. """ rank = dist.get_rank() model_state = model.state_dict() # save the dist context about the tensors in a new dict, while still maintain the original dict. for k, v in model_state.items(): if isinstance(v, ColoTensor): gather_tensor(v) # gather shared tensors to rank0 # don't recover tensors in rank0, since the dict is only a copy of model if rank == 0: # sanity check for k, v in model_state.items(): if isinstance(v, ColoTensor): assert v.save_ready assert v.is_replicate() delattr(v, "save_ready") # model saving save_state = {"epoch": epoch, "model": model_state} torch.save(save_state, path + "/epoch_{}_model.pth".format(epoch), *args, **kwargs) # delete old dicts del model_state # synchronize all the processes dist.barrier() if optimizer is not None: mapping = dict() optim_state = optimizer.state_dict() for k, v in optim_state["state"].items(): for n, t in v.items(): if isinstance(t, ColoTensor): mapping[(k, n)] = t.dist_spec gather_tensor(t) if rank == 0: save_state = {"epoch": epoch, "optim": optim_state} torch.save(save_state, path + "/epoch_{}_optim.pth".format(epoch), *args, **kwargs) # recover colo tensors in rank0 for k, v in optimizer.state_dict()["state"].items(): for n, t in v.items(): if isinstance(t, ColoTensor): assert hasattr(t, "save_ready") t.set_dist_spec(mapping[(k, n)]) delattr(t, "save_ready") del optim_state del mapping dist.barrier() def load_checkpoint( path: str, epoch: int, model: torch.nn.Module, optimizer: Optional[OptimizerWrapper] = None, lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None, torch_load_kwargs: Optional[Dict] = None, load_state_dict_kwargs: Optional[Dict] = None, ): """load_checkpoint load a model, whose parameters are `ColoTensor`s. Args: path (str): directory to save the checkpoint files. epoch (int): the number of epoch model (torch.nn.Module): a torch module initialized by ColoInitContext optimizer (OptimizerWrapper, optional): optimizers. Defaults to None. lr_scheduler (torch.optim.lr_scheduler._LRScheduler, optional): lr schedule. Defaults to None. torch_load_kwargs: (dict, optional): The kwargs of torch.load inside the function load_state_dict_kwargs (dict, optional): The kwargs of load_state_dict inside the function """ # initialize the default parameters if not torch_load_kwargs: torch_load_kwargs = dict() if not load_state_dict_kwargs: load_state_dict_kwargs = dict() rank = dist.get_rank() mapping = dict() for n, p in model.named_parameters(): if isinstance(p, ColoTensor): mapping[n] = p.dist_spec gather_tensor(p) if rank == 0: load_state = torch.load(path + "/epoch_{}_model.pth".format(epoch), **torch_load_kwargs) model.load_state_dict(load_state["model"], **load_state_dict_kwargs) dist.barrier() # scatter loaded parameters for n, p in model.named_parameters(): if isinstance(p, ColoTensor): scatter_tensor(p, mapping[n]) if rank == 0: assert hasattr(p, "save_ready") delattr(p, "save_ready") del mapping if optimizer is not None: mapping = dict() for k, v in optimizer.state_dict()["state"].items(): for n, t in v.items(): if isinstance(t, ColoTensor): mapping[(k, n)] = t.dist_spec gather_tensor(t) if rank == 0: colo_checkpoint = torch.load(path + "/epoch_{}_optim.pth".format(epoch), **torch_load_kwargs) optimizer.load_state_dict(colo_checkpoint["optim"], **load_state_dict_kwargs) dist.barrier() for k, v in optimizer.state_dict()["state"].items(): for n, t in v.items(): if isinstance(t, ColoTensor): scatter_tensor(t, mapping[(k, n)]) del mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/checkpoint/utils.py
colossalai/legacy/utils/checkpoint/utils.py
import torch import torch.distributed as dist from colossalai.legacy.tensor import ColoTensorSpec from colossalai.legacy.tensor.distspec import DistPlacementPattern, _DistSpec from colossalai.tensor import ColoTensor def robust_broadcast(tensor): with torch.no_grad(): is_cpu_ten = tensor.device.type == "cpu" if is_cpu_ten: b_data = tensor.cuda() else: b_data = tensor dist.broadcast(b_data, 0) if is_cpu_ten: tensor.copy_(b_data) def gather_tensor(colo_tensor: ColoTensor) -> None: """Make colo_tensor replicated when the rank is 0""" if not colo_tensor.is_replicate(): pg = colo_tensor.get_process_group() # for the group which contains rank 0 if pg.dp_local_rank() == 0: old_dist_spec = colo_tensor.dist_spec colo_tensor.to_replicate_() if dist.get_rank() != 0: colo_tensor.set_dist_spec(old_dist_spec) # synchronize all processes for unexpected problems dist.barrier() if dist.get_rank() == 0: setattr(colo_tensor, "save_ready", True) # set saving signature def scatter_tensor(colo_tensor: ColoTensor, dist_spec: _DistSpec) -> None: """Reversal operation of `gather_tensor`.""" if dist_spec.placement == DistPlacementPattern.REPLICATE: robust_broadcast(colo_tensor.data) else: global_size = colo_tensor.size_global() if dist.get_rank() == 0: entire_data = colo_tensor.data else: entire_data = torch.empty(global_size, device=colo_tensor.device) robust_broadcast(entire_data) if dist.get_rank() == 0: colo_tensor.set_dist_spec(dist_spec) else: rep_tensor = ColoTensor( entire_data, ColoTensorSpec(pg=colo_tensor.get_process_group(), compute_attr=colo_tensor.compute_spec) ) rep_tensor.set_dist_spec(dist_spec) with torch.no_grad(): colo_tensor.data.copy_(rep_tensor.data) # synchronize all processes for unexpected problems dist.barrier()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/utils/checkpoint/__init__.py
colossalai/legacy/utils/checkpoint/__init__.py
from .module_checkpoint import load_checkpoint, save_checkpoint __all__ = ["save_checkpoint", "load_checkpoint"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/op_wrapper.py
colossalai/legacy/tensor/op_wrapper.py
import functools from typing import Callable, Dict # Custom sharded ops _COLOSSAL_OPS: Dict[str, Callable] = {} def _register_colo_op(op, func): global _COLOSSAL_OPS _COLOSSAL_OPS[op] = func def colo_op_impl(func): """ Provides a way for users to write their own custom operator. This can be used to override existing ColoTensor operators or write a new one not supported by ColoTensor. If the operator in question is covered by ``__torch_function__`` dispatch and has a ColoTensor as any of its parameters, the function provided will be invoked for that operator. Example: >>> @colo_op_impl(torch.nn.functional.linear) >>> def my_custom_linear(types, args, kwargs, process_group): >>> .... >>> >>> input = torch.rand(10, 32) >>> weight = ColoTensor(torch.rand(32, 16)) >>> bias = ColoTensor(torch.rand(16)) >>> # This will call `my_custom_linear` instead of the default. >>> torch.nn.functional.linear(input, weight, bias) The types, args and kwargs parameters are the same parameters that are passed to ``__torch_function__`` dispatch API (https://pytorch.org/docs/stable/notes/extending.html#extending-torch). Args: func(Callable): Torch function for which we want to provide a sharded implementation (ex: torch.nn.functional.linear) """ def decorator_sharded_func(wrapped_func): _register_colo_op(func, wrapped_func) @functools.wraps(wrapped_func) def wrapper(*args, **kwargs): return wrapped_func(*args, **kwargs) return wrapper return decorator_sharded_func
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/tensor_spec.py
colossalai/legacy/tensor/tensor_spec.py
from dataclasses import dataclass, field from typing import Optional from colossalai.legacy.tensor.distspec import DistPlacementPattern, _DistSpec from colossalai.legacy.tensor.process_group import ProcessGroup from .compute_spec import ComputeSpec @dataclass class ColoTensorSpec: """ColoTensorSpec A data class for specifications of the `ColoTensor`. It contains attributes of `ProcessGroup`, `_DistSpec`, `ComputeSpec`. The latter two attributes are optional. If not set, they are default value is `Replicate()` and `None`. """ pg: ProcessGroup dist_attr: Optional[_DistSpec] = field(default_factory=lambda: _DistSpec(DistPlacementPattern.REPLICATE)) compute_attr: Optional[ComputeSpec] = None
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/dist_spec_mgr.py
colossalai/legacy/tensor/dist_spec_mgr.py
from contextlib import contextmanager import torch import torch.distributed as dist from numpy import prod from colossalai.legacy.tensor.distspec import DistPlacementPattern, _DistSpec from colossalai.legacy.tensor.process_group import ProcessGroup # TODO(jiaruifang) circle import, move the divide to colossalai.commons. # colossalai.legacy.tensor shall not import any submodule from colossal.nn def divide(numerator, denominator): """Only allow exact division. Args: numerator (int): Numerator of the division. denominator (int): Denominator of the division. Returns: int: the result of exact division. """ assert denominator != 0, "denominator can not be zero" assert numerator % denominator == 0, "{} is not divisible by {}".format(numerator, denominator) return numerator // denominator class TransformDistSpec(torch.autograd.Function): @staticmethod def forward(ctx, tensor, old_dist_spec, dist_spec, pg, forward_trans_func, backward_trans_func): ctx.old_dist_spec = old_dist_spec ctx.dist_spec = dist_spec ctx.backward_trans_func = backward_trans_func ctx.pg = pg return forward_trans_func(tensor, old_dist_spec, dist_spec, pg) @staticmethod def backward(ctx, grad_outputs): return ( ctx.backward_trans_func(grad_outputs, ctx.dist_spec, ctx.old_dist_spec, ctx.pg), None, None, None, None, None, ) class DistSpecManager: _use_autograd_function: bool = True @staticmethod def _sanity_check(old_dist_spec: _DistSpec, dist_spec: _DistSpec) -> None: pass @staticmethod def _shard_as( tensor: torch.Tensor, old_dist_spec: _DistSpec, dist_spec: _DistSpec, pg: ProcessGroup ) -> torch.Tensor: """_shard_as: shard the tensor w.r.t a distributed specification. Assuming the tensor passed in is a global (replicated) tensor. Args: tensor (torch.Tensor): a global (replicated) tensor before shard dist_spec (_DistSpec): the distributed spec. to be sharded as. pg (ProcessGroup): the process group of the corresponding colotensor Returns: torch.Tensor: a torch tensor after sharded. """ assert ( old_dist_spec.placement.value == "r" ), f"The old_dist_spec of DistSpecManager._shard_as must be REPLICATE!" DistSpecManager._sanity_check(old_dist_spec, dist_spec) chunk = tensor idx = pg.tp_local_rank() num_parts = prod(dist_spec.num_partitions) for i, dim in enumerate(dist_spec.dims): num_parts //= dist_spec.num_partitions[i] chunk_size = divide(tensor.size(dim), dist_spec.num_partitions[i]) chunk = chunk.narrow(dim, idx // num_parts * chunk_size, chunk_size) idx %= num_parts return chunk.clone().detach().contiguous() @staticmethod def _gather(tensor: torch.Tensor, old_dist_spec: _DistSpec, pg: ProcessGroup) -> torch.Tensor: """_gather gather sharded tensors to a replicated one. Args: tensor (torch.Tensor): a shared torch tensor old_dist_spec (_DistSpec): the distributed spec. of the tensor. Returns: torch.Tensor: a replicated tensor. """ assert old_dist_spec.placement.value == "s", f"The old_dist_spec of DistSpecManager._gather must be SHARD!" is_cpu_tensor = False if tensor.device.type == "cpu": # pytorch lower than 1.11 dose not support gather a cpu tensor. # Therefore, we transfer tensor to GPU before gather. saved_dev = tensor.device tensor.data = tensor.data.cuda() is_cpu_tensor = True buffer = [torch.empty_like(tensor) for _ in range(pg.tp_world_size())] assert tensor.device.type == "cuda" dist.all_gather(buffer, tensor, group=pg.tp_process_group()) for i in range(len(old_dist_spec.dims) - 1, -1, -1): new_buffer = [] dim = old_dist_spec.dims[i] num_parts = old_dist_spec.num_partitions[i] for start in range(0, len(buffer), num_parts): new_buffer.append(torch.cat(buffer[start : start + num_parts], dim)) buffer = new_buffer assert len(buffer) == 1 if is_cpu_tensor: buffer[0].data = buffer[0].data.to(saved_dev) return buffer[0] @staticmethod def _all_to_all( tensor: torch.Tensor, old_dist_spec: _DistSpec, dist_spec: _DistSpec, pg: ProcessGroup ) -> torch.Tensor: world_size = pg.tp_world_size() if world_size == 1: return tensor assert tensor.device.type == "cuda", ( "Currently, only CUDA Tensor with NCCL backend is supported for the requested AlltoAll " f"collective function, however, we got {tensor.device.type} device" ) gather_dim = old_dist_spec.dims[0] scatter_dim = dist_spec.dims[0] shapes = list(tensor.shape) scattered_dim_size = shapes[scatter_dim] // world_size gathered_dim_size = shapes[gather_dim] * world_size shapes[scatter_dim] = scattered_dim_size scatter_list = [t.contiguous() for t in torch.tensor_split(tensor, world_size, scatter_dim)] gather_list = [torch.empty(*shapes, dtype=tensor.dtype, device=tensor.device) for _ in range(world_size)] dist.all_to_all(gather_list, scatter_list, group=pg.tp_process_group()) output_ = torch.cat(gather_list, dim=gather_dim).contiguous() assert output_.shape[scatter_dim] == scattered_dim_size and output_.shape[gather_dim] == gathered_dim_size return output_ @staticmethod def _r2r(tensor: torch.Tensor, old_dist_spec: _DistSpec, dist_spec: _DistSpec, pg: ProcessGroup) -> torch.Tensor: DistSpecManager._sanity_check(old_dist_spec, dist_spec) return tensor @staticmethod def _r2s(tensor: torch.Tensor, old_dist_spec: _DistSpec, dist_spec: _DistSpec, pg: ProcessGroup) -> torch.Tensor: DistSpecManager._sanity_check(old_dist_spec, dist_spec) return DistSpecManager._shard_as(tensor, old_dist_spec, dist_spec, pg) @staticmethod def _s2r(tensor: torch.Tensor, old_dist_spec: _DistSpec, dist_spec: _DistSpec, pg: ProcessGroup) -> torch.Tensor: DistSpecManager._sanity_check(old_dist_spec, dist_spec) return DistSpecManager._gather(tensor, old_dist_spec, pg) @staticmethod def _s2s(tensor: torch.Tensor, old_dist_spec: _DistSpec, dist_spec: _DistSpec, pg: ProcessGroup) -> torch.Tensor: DistSpecManager._sanity_check(old_dist_spec, dist_spec) if old_dist_spec == dist_spec: return tensor if len(old_dist_spec.dims) == 1 and len(dist_spec.dims) == 1: # use all-to-all to save memory return DistSpecManager._all_to_all(tensor, old_dist_spec, dist_spec, pg) tensor = DistSpecManager._gather(tensor, old_dist_spec, pg) return DistSpecManager._shard_as(tensor, old_dist_spec, dist_spec, pg) @staticmethod def handle_trans_spec( tensor: torch.Tensor, old_dist_spec: _DistSpec, dist_spec: _DistSpec, pg: ProcessGroup ) -> torch.Tensor: assert isinstance(old_dist_spec, _DistSpec), f"{type(old_dist_spec)} should be _DistSpec" assert isinstance(dist_spec, _DistSpec), f"{type(dist_spec)} should be _DistSpec" trans_func_key = (old_dist_spec.placement, dist_spec.placement) trans_funcs = { (DistPlacementPattern.REPLICATE, DistPlacementPattern.REPLICATE): DistSpecManager._r2r, (DistPlacementPattern.REPLICATE, DistPlacementPattern.SHARD): DistSpecManager._r2s, (DistPlacementPattern.SHARD, DistPlacementPattern.REPLICATE): DistSpecManager._s2r, (DistPlacementPattern.SHARD, DistPlacementPattern.SHARD): DistSpecManager._s2s, } forward_trans_handle = trans_funcs[trans_func_key] if not DistSpecManager._use_autograd_function: return forward_trans_handle(tensor, old_dist_spec, dist_spec, pg) backward_trans_handle = trans_funcs[(dist_spec.placement, old_dist_spec.placement)] return TransformDistSpec.apply( tensor, old_dist_spec, dist_spec, pg, forward_trans_handle, backward_trans_handle ) @staticmethod @contextmanager def no_grad(): try: DistSpecManager._use_autograd_function = False yield finally: DistSpecManager._use_autograd_function = True
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/const.py
colossalai/legacy/tensor/const.py
from enum import Enum class TensorType(Enum): MODEL = 0 NONMODEL = 1 # mainly activations
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/compute_spec.py
colossalai/legacy/tensor/compute_spec.py
from enum import Enum class ComputePattern(Enum): TP1D = 0 TP2D = 1 TP2P5D = 2 TP3D = 3 class ComputeSpec(object): """ComputeSpec The Specification for computation pattern Args: compute_pattern (ComputePattern): an Enum instance for compute pattern. """ def __init__(self, compute_pattern: ComputePattern) -> None: assert isinstance(compute_pattern, ComputePattern) self.compute_pattern = compute_pattern # Make sure output tensors are replicate self.output_replicate = True def __repr__(self): return f"ComputeSpec(pattern={self.compute_pattern}, replicate_output={self.output_replicate})" def set_output_replicate(self, flag: bool = True): self.output_replicate = flag
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/__init__.py
colossalai/legacy/tensor/__init__.py
from . import distspec from .compute_spec import ComputePattern, ComputeSpec from .dist_spec_mgr import DistSpecManager from .distspec import ReplicaSpec, ShardSpec from .process_group import ProcessGroup from .tensor_spec import ColoTensorSpec __all__ = [ "ComputePattern", "ComputeSpec", "distspec", "DistSpecManager", "ProcessGroup", "ColoTensorSpec", "ShardSpec", "ReplicaSpec", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/process_group.py
colossalai/legacy/tensor/process_group.py
from typing import List, Optional import torch from colossalai.context.singleton_meta import SingletonMeta from colossalai.logging import get_dist_logger class PyTorchProcessGroupDict(metaclass=SingletonMeta): def __init__(self): # distributed settings # use this dict to record all Pytorch ProcessGroups self.dict = {} # set a distributed logger self.logger = get_dist_logger("ProcessGroup") def log_pg_init(self, rank_list: List[int], backend: str): str_list = ["Pytorch ProcessGroup Init:"] str_list.append(f"backend: {backend}") str_list.append(f"ranks: {rank_list}") self.logger.info("\n\t".join(str_list), ranks=[0]) def get(self, rank_list: List[int], backend: str = "nccl"): """Reuse Pytorch ProcessGroup when such a group is initialized""" # we need to convert the passed list to a tuple # since List is unhashable processgroup_key = (backend, tuple(rank_list)) if processgroup_key not in self.dict: self.log_pg_init(rank_list=rank_list, backend=backend) self.dict[processgroup_key] = torch.distributed.new_group(ranks=rank_list, backend=backend) return self.dict[processgroup_key] PYTORCHPGDICT_ = None class ProcessGroup: """ProcessGroup Process Group indicates how processes are organized in groups for parallel execution using Tensor Parallelism and Data Parallelism. NOTE, the ProcessGroup must be used after `torch.distributed.initialize()` Args: rank: the global rank of the current process. ranks: List[int], a list of rank id belongings to this process group. backend: str, the backend of the process group. tp_degree: Optional[int], tensor parallelism degree. How many processes are inside a tp process group. default None means 1. dp_degree: Optional[int], data parallelism degree. How many processes are inside a dp process group. . default None means len(ranks). """ def __init__( self, rank: Optional[int] = None, ranks: Optional[List[int]] = None, tp_degree: Optional[int] = None, dp_degree: Optional[int] = None, ) -> None: if not torch.distributed.is_initialized(): self.is_init = False return global PYTORCHPGDICT_ if PYTORCHPGDICT_ is None: PYTORCHPGDICT_ = PyTorchProcessGroupDict() assert torch.distributed.is_initialized(), f"ProcessGroup must be used after distributed initialized" self._rank = torch.distributed.get_rank() if rank is not None: assert self._rank == rank # make sure that the global rank is correct if ranks is None: self._rank_list = list(range(torch.distributed.get_world_size())) else: self._rank_list = ranks self._rank_list.sort() # ensure that the list is in order self._world_size = len(self._rank_list) if dp_degree is None and tp_degree is None: self._dp_degree = self._world_size self._tp_degree = 1 elif dp_degree and not tp_degree: self._dp_degree = dp_degree assert ( self._world_size % self._dp_degree == 0 ), f"DP degree {dp_degree} should be divisible by {self._world_size} hen DP degree is None" self._tp_degree = self._world_size // dp_degree elif not dp_degree and tp_degree: self._tp_degree = tp_degree assert ( self._world_size % self._tp_degree == 0 ), f"TP degree {tp_degree} should be divisible by {self._world_size} when DP degree is None" self._dp_degree = self._world_size // tp_degree else: self._dp_degree = dp_degree self._tp_degree = tp_degree assert self._dp_degree * self._tp_degree == self._world_size, ( f"the world size {self._world_size} should equals to the product of DP degree {self._dp_degree}" f"and TP degree {self._tp_degree}" ) self._tp_rank_list = None self._dp_rank_list = None for i in range(self._dp_degree): i_tp_list = [self._rank_list[i * self._tp_degree + j] for j in range(self._tp_degree)] PYTORCHPGDICT_.get(i_tp_list, "nccl") if self._rank in i_tp_list: self._tp_rank_list = i_tp_list for j in range(self._tp_degree): j_dp_list = [self._rank_list[i * self._tp_degree + j] for i in range(self._dp_degree)] PYTORCHPGDICT_.get(j_dp_list, "nccl") if self._rank in j_dp_list: self._dp_rank_list = j_dp_list self._has_cpu_groups = False self.is_init = True def set_cpu_groups(self): """set_cpu_groups Initialize Pytorch process groups for cpu communications. """ if self.has_cpu_groups: return for i in range(self._dp_degree): i_tp_list = [self._rank_list[i * self._tp_degree + j] for j in range(self._tp_degree)] PYTORCHPGDICT_.get(i_tp_list, "gloo") for j in range(self._tp_degree): j_dp_list = [self._rank_list[i * self._tp_degree + j] for i in range(self._dp_degree)] PYTORCHPGDICT_.get(j_dp_list, "gloo") self._has_cpu_groups = True @property def has_cpu_groups(self) -> bool: """has_cpu_groups If cpu groups have been initialized. Returns: bool: cpu process groups have been initialized or not. """ return self._has_cpu_groups def __repr__(self): if self.is_init: ranks_str = f"ProcessGroup(ranks={self._rank_list},\n" personal_str = f" rank={self._rank}, dp={self._dp_degree}, tp={self._tp_degree})" return ranks_str + personal_str else: return "ProcessGroup not initialized" def __eq__(self, obj: "ProcessGroup") -> bool: if not isinstance(obj, ProcessGroup): return False if self._rank != obj._rank: return False if self._rank_list != obj._rank_list: return False if self._tp_rank_list != obj._tp_rank_list: return False if self._dp_rank_list != obj._dp_rank_list: return False if self._tp_degree != obj._tp_degree: return False if self._dp_degree != obj._dp_degree: return False return True def rank(self) -> int: """rank The current rank in the global process group. Returns: int: the rank number """ return self._rank def ranks_in_group(self) -> List[int]: """ranks_in_group a list of rank number in in the global process group. Returns: List[int]: a list of rank number. """ return self._rank_list def world_size(self) -> int: """world_size The world size of the global process group. Returns: int: world size """ return self._world_size def tp_rank_list(self) -> List[int]: """tp_rank_list the rank list in the TP process group containing the current rank. Returns: List[int]: the list of rank number. """ return self._tp_rank_list def dp_rank_list(self) -> List[int]: """dp_rank_list the rank list in the DP process group containing the current rank. Returns: List[int]: the list of rank number. """ return self._dp_rank_list def tp_local_rank(self) -> int: """tp_local_rank The local rank number in the current TP process group. Returns: int: tp rank number. """ return self._rank % self._tp_degree def dp_local_rank(self) -> int: """dp_local_rank The local rank number in the current DP process group. Returns: int: dp rank number. """ return self._rank // self._tp_degree def dp_world_size(self) -> int: """dp_world_size The world size of the current DP process group. Returns: int: dp world size """ return len(self._dp_rank_list) def tp_world_size(self) -> int: """tp_world_size The world size of the current TP process group. Returns: int: tp world size """ return len(self._tp_rank_list) def dp_process_group(self): """dp_process_group the pytorch DP process group containing the current rank. Returns: `torch._C._distributed_c10d.ProcessGroup`: the pytorch DP process group. """ return PYTORCHPGDICT_.get(self._dp_rank_list, "nccl") def tp_process_group(self): """tp_process_group the pytorch TP process group containing the current rank. Returns: `torch._C._distributed_c10d.ProcessGroup`: the pytorch TP process group. """ return PYTORCHPGDICT_.get(self._tp_rank_list, "nccl") def cpu_dp_process_group(self): """cpu_dp_process_group the pytorch CPU DP process group containing the current rank. assert failed if cpu process group is not initialized. Returns: `torch._C._distributed_c10d.ProcessGroup`: the pytorch DP process group. """ assert self._has_cpu_groups return PYTORCHPGDICT_.get(self._dp_rank_list, "gloo") def cpu_tp_process_group(self): """cpu_tp_process_group the pytorch CPU TP process group containing the current rank. assert failed if cpu process group is not initialized. Returns: `torch._C._distributed_c10d.ProcessGroup`: the pytorch TP process group. """ assert self._has_cpu_groups return PYTORCHPGDICT_.get(self._tp_rank_list, "gloo") def get_ranks_in_dp(self) -> List[int]: """get_ranks_in_dp ranks in current dp process group. Returns: List[int]: a list of rank number. """ return self._dp_rank_list def get_ranks_in_tp(self): """get_ranks_in_tp ranks in current tp process group. Returns: List[int]: a list of rank number. """ return self._tp_rank_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/tensor/distspec.py
colossalai/legacy/tensor/distspec.py
from enum import Enum from typing import List __all__ = ["ReplicaSpec", "ShardSpec"] class DistPlacementPattern(Enum): REPLICATE = "r" SHARD = "s" class _DistSpec: """_DistSpec A class indicates Distributed Specification. The DistSpec is only works for the tensor parallel process groups. Because the dist spec of data parallel process group can be automatically deduced. This is an internal data structure. The API for users should be `ShardSpec` and `ReplicaSpec`. Args: dist_placement_pattern (DistPlacementPattern): the pattern describing how tensors are distributed among processes. The dist_placement_pattern is picked from a limited set, now including two patterns: replicate and shard. process_group (Optional[ProcessGroup], optional): the process group contains processes. Defaults to None. """ def __init__(self, dist_placement_pattern: DistPlacementPattern, **meta_info): self.placement = dist_placement_pattern for k, v in meta_info.items(): setattr(self, k, v) def __eq__(self, other: "_DistSpec") -> bool: if dir(self) != dir(other): return False for attr in dir(self): if not attr.startswith("__") and getattr(self, attr) != getattr(other, attr): return False return True def __repr__(self) -> str: attr_list = [] for attr in dir(self): if not attr.startswith("__"): attr_list.append(f"{attr}={str(getattr(self, attr))}") attr_str = ", ".join(attr_list) return "DistSpec(" + attr_str + ")" def ReplicaSpec() -> _DistSpec: """ReplicaSpec A distributed specification represents the tensor is replicated among the tensor parallel process group. Returns: _DistSpec: an replicated dist spec instance. """ return _DistSpec(DistPlacementPattern.REPLICATE) def ShardSpec(dims: List[int], num_partitions: List[int]) -> _DistSpec: """ShardSpec A distributed specification represents the tensor is sharded among the tensor parallel process group. Note: Currently, only shard on one dimension is valid. In another word, dims should be of size 1. Args: dims (List[int]): a list of dimensions num_partitions (List[int]): a list of partition number of each dimensions. Returns: _DistSpec: an shard dist spec instance. """ assert isinstance(dims, list) and isinstance(num_partitions, list) assert len(dims) == len(num_partitions) return _DistSpec(DistPlacementPattern.SHARD, dims=tuple(dims), num_partitions=tuple(num_partitions))
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/__init__.py
colossalai/legacy/nn/__init__.py
from .layer import * from .loss import * from .metric import *
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/loss/loss_2p5d.py
colossalai/legacy/nn/loss/loss_2p5d.py
import torch import torch.distributed as dist from torch.cuda.amp import custom_bwd, custom_fwd from torch.nn.functional import cross_entropy from torch.nn.modules.loss import _Loss from colossalai.accelerator import get_accelerator from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.nn.layer.parallel_2p5d import reduce_by_batch_2p5d, split_batch_2p5d from colossalai.legacy.nn.layer.parallel_2p5d._utils import assert_tesseract_initialization from colossalai.legacy.registry import LOSSES @LOSSES.register_module class CrossEntropyLoss2p5D(_Loss): r"""Cross entropy loss for 2.5D parallelism Args: reduction (bool, optional): whether to average the loss, defaults to True. The ``args`` and ``kwargs`` should include parameters below: :: weight (Tensor, optional) size_average (bool, optional) ignore_index (int, optional) reduce (bool, optional) label_smoothing (float, optional) More details about ``args``, ``kwargs`` and ``torch.nn.functional.cross_entropy`` could be found in `Cross_entropy <https://pytorch.org/docs/stable/generated/torch.nn.functional.cross_entropy.html#torch.nn.functional.cross_entropy>`_. """ def __init__(self, reduction=True, *args, **kwargs): super().__init__() assert_tesseract_initialization() self.reduction_mean = reduction self.loss_args = args self.loss_kwargs = kwargs def forward(self, logits, targets): """Calculate loss between logits and targets. Args: logits (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). targets (:class:`torch.tensor`): Ground truth class indices or class probabilities. """ targets = split_batch_2p5d(targets) loss = cross_entropy(logits, targets, reduction="none", *self.loss_args, **self.loss_kwargs) if self.reduction_mean: loss = loss.mean() loss = reduce_by_batch_2p5d(loss, True) return loss class _VocabParallelCrossEntropy2p5D(torch.autograd.Function): ### Modified based on megatron.mpu.cross_entropy ### @staticmethod @custom_fwd(cast_inputs=torch.float32) def forward(ctx, logits, targets): # logits: [b/dq, h/q] # loss: [b/dq] # targets: [b/dq, h/q] logits_max = torch.max(logits, dim=-1)[0] torch.distributed.all_reduce( logits_max, op=torch.distributed.ReduceOp.MAX, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW) ) # Subtract the maximum value. logits = logits - logits_max.unsqueeze(dim=-1) vocab_size = logits.size(-1) rank = gpc.get_local_rank(ParallelMode.PARALLEL_2P5D_ROW) vocab_start = rank * (vocab_size) vocab_end = (rank + 1) * (vocab_size) - 1 target_mask = (targets < vocab_start) | (targets > vocab_end) masked_target = targets.clone() - vocab_start masked_target[target_mask] = 0 arange_1d = torch.arange( start=0, end=logits.size()[0], ) predicted_logits = logits[arange_1d, masked_target] predicted_logits[target_mask] = 0.0 dist.all_reduce(predicted_logits, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW)) exp_logits = torch.exp(logits) sum_exp_logits = exp_logits.sum(dim=1) dist.all_reduce(sum_exp_logits, group=gpc.get_group(ParallelMode.PARALLEL_2P5D_ROW)) loss = torch.log(sum_exp_logits) - predicted_logits exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) ctx.save_for_backward(exp_logits, target_mask, masked_target) return loss @staticmethod @custom_bwd def backward(ctx, output_grad): # Retrieve tensors from the forward path. softmax, target_mask, masked_target = ctx.saved_tensors # All the inputs have softmax as their gradient. grad_input = softmax # For simplicity, work with the 2D gradient. partition_vocab_size = softmax.size()[-1] grad_2d = grad_input.view(-1, partition_vocab_size) # Add the gradient from matching classes. arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=get_accelerator().get_current_device()) grad_2d[arange_1d, masked_target] -= 1.0 - target_mask.view(-1).float() # Finally elementwise multiplication with the output gradients. grad_input.mul_(output_grad.unsqueeze(dim=-1)) return grad_input, None @LOSSES.register_module class VocabParallelCrossEntropyLoss2p5D(_Loss): """ Vocab parallel cross entropy loss for 2.5D parallelism Args: reduction (bool, optional): whether to average the loss, defaults to True. """ def __init__(self, reduction=True): super().__init__() self.reduction_mean = reduction def forward(self, logits, targets): """Calculate loss between logits and targets. Args: logits (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). targets (:class:`torch.tensor`): Ground truth class indices or class probabilities. """ targets = split_batch_2p5d(targets) loss = _VocabParallelCrossEntropy2p5D.apply(logits, targets) if self.reduction_mean: loss = loss.mean() loss = reduce_by_batch_2p5d(loss, True) return loss
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/loss/loss_3d.py
colossalai/legacy/nn/loss/loss_3d.py
import torch import torch.distributed as dist from torch.cuda.amp import custom_bwd, custom_fwd from torch.nn.functional import cross_entropy from torch.nn.modules.loss import _Loss from colossalai.accelerator import get_accelerator from colossalai.legacy.constants import INPUT_GROUP_3D, OUTPUT_GROUP_3D, WEIGHT_GROUP_3D from colossalai.legacy.core import global_context as gpc from colossalai.legacy.nn.layer.parallel_3d import reduce_by_batch_3d, split_tensor_3d from colossalai.legacy.nn.layer.parallel_3d._utils import get_parallel_mode_from_env from colossalai.legacy.registry import LOSSES @LOSSES.register_module class CrossEntropyLoss3D(_Loss): r"""Cross entropy loss for 3D parallelism. Args: reduction (bool, optional): whether to average the loss, defaults to True. The ``args`` and ``kwargs`` should include parameters below: :: weight (Tensor, optional) size_average (bool, optional) ignore_index (int, optional) reduce (bool, optional) label_smoothing (float, optional) More details about ``args``, ``kwargs`` and ``torch.nn.functional.cross_entropy`` could be found in `Cross_entropy <https://pytorch.org/docs/stable/generated/torch.nn.functional.cross_entropy.html#torch.nn.functional.cross_entropy>`_. """ def __init__(self, reduction=True, *args, **kwargs): super().__init__() self.input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) self.weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) self.reduction_mean = reduction self.loss_args = args self.loss_kwargs = kwargs def forward(self, logits, targets): """Calculate loss between logits and targets. Args: logits (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). targets (:class:`torch.tensor`): Ground truth class indices or class probabilities. """ targets = split_tensor_3d(targets, 0, self.weight_parallel_mode) targets = split_tensor_3d(targets, 0, self.input_parallel_mode) loss = cross_entropy(logits, targets, reduction="none", *self.loss_args, **self.loss_kwargs) if self.reduction_mean: loss = loss.mean() loss = reduce_by_batch_3d(loss, self.input_parallel_mode, self.weight_parallel_mode, True) return loss class _VocabParallelCrossEntropy3D(torch.autograd.Function): # Adapted from megatron.mpu.cross_entropy # loss[i] = -logits[i][targets] + log(sum(exp(logits[i]))) @staticmethod @custom_fwd(cast_inputs=torch.float32) def forward(ctx, logits, targets, output_parallel_mode): # logits: [b/q^2, c/q] # labels: [b/q^2] # loss: [b/q^2] logits_max = torch.max(logits, dim=-1)[0] dist.all_reduce(logits_max, op=torch.distributed.ReduceOp.MAX, group=gpc.get_group(output_parallel_mode)) # Subtract the maximum value. logits = logits - logits_max.unsqueeze(dim=-1) vocab_size_per_partition = logits.size()[-1] rank = gpc.get_local_rank(output_parallel_mode) vocab_start = rank * vocab_size_per_partition vocab_end = (rank + 1) * vocab_size_per_partition - 1 # loss[i] = 0 if targets[i] < vocab_start or targets[i] > vocab_end target_mask = (targets < vocab_start) | (targets > vocab_end) masked_target = targets.clone() - vocab_start masked_target[target_mask] = 0 arange_1d = torch.arange(start=0, end=logits.size()[0], device=get_accelerator().get_current_device()) predicted_logits = logits[arange_1d, masked_target] predicted_logits = predicted_logits.clone().contiguous().view_as(targets) predicted_logits[target_mask] = 0.0 dist.all_reduce(predicted_logits, group=gpc.get_group(output_parallel_mode)) # Loss = log(sum(exp(logits))) - predicted-logit. exp_logits = torch.exp(logits) sum_exp_logits = exp_logits.sum(dim=-1) dist.all_reduce(sum_exp_logits, group=gpc.get_group(output_parallel_mode)) loss = torch.log(sum_exp_logits) - predicted_logits exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) ctx.save_for_backward(exp_logits, target_mask, masked_target) return loss @staticmethod @custom_bwd def backward(ctx, output_grad): # Retrieve tensors from the forward path. softmax, target_mask, masked_target = ctx.saved_tensors # All the inputs have softmax as their gradient. input_grad = softmax # For simplicity, work with the 2D gradient. partition_vocab_size = softmax.size()[-1] grad_2d = input_grad.view(-1, partition_vocab_size) # Add the gradient from matching classes. arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=get_accelerator().get_current_device()) grad_2d[arange_1d, masked_target] -= 1.0 - target_mask.view(-1).float() input_grad.mul_(output_grad.unsqueeze(dim=-1)) return input_grad, None, None, None @LOSSES.register_module class VocabParallelCrossEntropyLoss3D(_Loss): """Vocab parallel cross entropy loss for 2D parallelism. Args: reduction (bool, optional): whether to average the loss, defaults to True. """ def __init__(self, reduction=True): super().__init__() self.input_parallel_mode = get_parallel_mode_from_env(INPUT_GROUP_3D) self.weight_parallel_mode = get_parallel_mode_from_env(WEIGHT_GROUP_3D) self.output_parallel_mode = get_parallel_mode_from_env(OUTPUT_GROUP_3D) self.reduction_mean = reduction def forward(self, logits, targets): """Calculate loss between logits and targets. Args: logits (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). targets (:class:`torch.tensor`): Ground truth class indices or class probabilities. """ targets = split_tensor_3d(targets, 0, self.weight_parallel_mode) targets = split_tensor_3d(targets, 0, self.input_parallel_mode) loss = _VocabParallelCrossEntropy3D.apply(logits, targets, self.output_parallel_mode) if self.reduction_mean: loss = loss.mean() loss = reduce_by_batch_3d(loss, self.input_parallel_mode, self.weight_parallel_mode, True) return loss
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/loss/loss_2d.py
colossalai/legacy/nn/loss/loss_2d.py
import torch import torch.distributed as dist from torch.cuda.amp import custom_bwd, custom_fwd from torch.nn.functional import cross_entropy from torch.nn.modules.loss import _Loss from colossalai.accelerator import get_accelerator from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.nn.layer.parallel_2d import reduce_by_batch_2d, split_batch_2d from colossalai.legacy.nn.layer.parallel_2d._utils import assert_summa_initialization from colossalai.legacy.registry import LOSSES @LOSSES.register_module class CrossEntropyLoss2D(_Loss): r"""Cross entropy loss for 2D parallelism Args: reduction (bool, optional): whether to average the loss, defaults to True. The ``args`` and ``kwargs`` should include parameters below: :: weight (Tensor, optional) size_average (bool, optional) ignore_index (int, optional) reduce (bool, optional) label_smoothing (float, optional) More details about ``args``, ``kwargs`` and ``torch.nn.functional.cross_entropy`` could be found in `Cross_entropy <https://pytorch.org/docs/stable/generated/torch.nn.functional.cross_entropy.html#torch.nn.functional.cross_entropy>`_. """ def __init__(self, reduction=True, *args, **kwargs): super().__init__() assert_summa_initialization() self.reduction_mean = reduction self.loss_args = args self.loss_kwargs = kwargs def forward(self, logits, targets): """Calculate loss between logits and targets. Args: logits (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). targets (:class:`torch.tensor`): Ground truth class indices or class probabilities. Returns: float: the loss between logits and targets. """ targets = split_batch_2d(targets) loss = cross_entropy(logits, targets, reduction="none", *self.loss_args, **self.loss_kwargs) if self.reduction_mean: loss = loss.mean() loss = reduce_by_batch_2d(loss, True) return loss class _VocabParallelCrossEntropy2D(torch.autograd.Function): ### Modified based on megatron.mpu.cross_entropy ### @staticmethod @custom_fwd(cast_inputs=torch.float32) def forward(ctx, logits, targets): # logits: [b/q, h/q] # labels: [b/q] # loss: [b/q] # vocab_parallel_logits: [b/q, s, v/q] # target: [b/q, s] logits_max = torch.max(logits, dim=-1)[0] torch.distributed.all_reduce( logits_max, op=torch.distributed.ReduceOp.MAX, group=gpc.get_group(ParallelMode.PARALLEL_2D_ROW) ) # Subtract the maximum value. # vocab_parallel_logits.sub_(logits_max.unsqueeze(dim=-1)) logits = logits - logits_max.unsqueeze(dim=-1) vocab_size = logits.size(-1) rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) vocab_start = rank * (vocab_size) vocab_end = (rank + 1) * (vocab_size) - 1 target_mask = (targets < vocab_start) | (targets > vocab_end) masked_target = targets.clone() - vocab_start masked_target[target_mask] = 0 arange_1d = torch.arange( start=0, end=logits.size()[0], ) predicted_logits = logits[arange_1d, masked_target] predicted_logits[target_mask] = 0.0 dist.all_reduce(predicted_logits, group=gpc.get_group(ParallelMode.PARALLEL_2D_ROW)) exp_logits = torch.exp(logits) sum_exp_logits = exp_logits.sum(dim=1) dist.all_reduce(sum_exp_logits, group=gpc.get_group(ParallelMode.PARALLEL_2D_ROW)) loss = torch.log(sum_exp_logits) - predicted_logits exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) ctx.save_for_backward(exp_logits, target_mask, masked_target) return loss @staticmethod @custom_bwd def backward(ctx, output_grad): # Retrieve tensors from the forward path. softmax, target_mask, masked_target = ctx.saved_tensors # All the inputs have softmax as their gradient. grad_input = softmax # For simplicity, work with the 2D gradient. partition_vocab_size = softmax.size()[-1] grad_2d = grad_input.view(-1, partition_vocab_size) # Add the gradient from matching classes. arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=get_accelerator().get_current_device()) grad_2d[arange_1d, masked_target] -= 1.0 - target_mask.view(-1).float() # Finally elementwise multiplication with the output gradients. grad_input.mul_(output_grad.unsqueeze(dim=-1)) return grad_input, None @LOSSES.register_module class VocabParallelCrossEntropyLoss2D(_Loss): """Vocab parallel cross entropy loss for 2D parallelism. Args: reduction (bool, optional): whether to average the loss, defaults to True. """ def __init__(self, reduction=True): super().__init__() self.reduction_mean = reduction def forward(self, logits, targets): """Calculate loss between logits and targets. Args: logits (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). targets (:class:`torch.tensor`): Ground truth class indices or class probabilities. """ targets = split_batch_2d(targets) loss = _VocabParallelCrossEntropy2D.apply( logits, targets, ) if self.reduction_mean: loss = loss.mean() loss = reduce_by_batch_2d(loss, True) return loss
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/loss/loss_1d.py
colossalai/legacy/nn/loss/loss_1d.py
import torch import torch.distributed as dist from torch.cuda.amp import custom_bwd, custom_fwd from torch.nn.modules.loss import _Loss from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.registry import LOSSES class _VocabParallelCrossEntropy1D(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float32) def forward(ctx, vocab_parallel_logits, targets, process_group): if process_group is None: process_group = gpc.get_group(ParallelMode.PARALLEL_1D) # Maximum value along vocab dimension across all GPUs. logits_max = torch.max(vocab_parallel_logits, dim=-1)[0] torch.distributed.all_reduce(logits_max, op=torch.distributed.ReduceOp.MAX, group=process_group) # Subtract the maximum value. vocab_parallel_logits.sub_(logits_max.unsqueeze(dim=-1)) # Get the partition's vocab indices partition_vocab_size = vocab_parallel_logits.size()[-1] rank = dist.get_rank(process_group) vocab_start_index = partition_vocab_size * rank vocab_end_index = vocab_start_index + partition_vocab_size # Create a mask of valid vocab ids (1 means it needs to be masked). target_mask = (targets < vocab_start_index) | (targets >= vocab_end_index) masked_target = targets.clone() - vocab_start_index masked_target[target_mask] = 0 # Get predicted-logits = logits[target]. # For Simplicity, we convert logits to a 2-D tensor with size # [*, partition-vocab-size] and target to a 1-D tensor of size [*]. logits_2d = vocab_parallel_logits.view(-1, partition_vocab_size) masked_target_1d = masked_target.view(-1) arange_1d = torch.arange(start=0, end=logits_2d.size()[0], device=logits_2d.device) predicted_logits_1d = logits_2d[arange_1d, masked_target_1d] predicted_logits_1d = predicted_logits_1d.clone().contiguous() predicted_logits = predicted_logits_1d.view_as(targets) predicted_logits[target_mask] = 0.0 # All reduce is needed to get the chunks from other GPUs. torch.distributed.all_reduce(predicted_logits, op=torch.distributed.ReduceOp.SUM, group=process_group) # Sum of exponential of logits along vocab dimension across all GPUs. exp_logits = torch.exp(vocab_parallel_logits) sum_exp_logits = exp_logits.sum(dim=-1) torch.distributed.all_reduce(sum_exp_logits, op=torch.distributed.ReduceOp.SUM, group=process_group) # Loss = log(sum(exp(logits))) - predicted-logit. loss = torch.log(sum_exp_logits) - predicted_logits # Store softmax, target-mask and masked-target for backward pass. exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1)) ctx.save_for_backward(exp_logits, target_mask, masked_target_1d) return loss @staticmethod @custom_bwd def backward(ctx, grad_output): # Retrieve tensors from the forward path. softmax, target_mask, masked_target_1d = ctx.saved_tensors # All the inputs have softmax as their gradient. grad_input = softmax # For simplicity, work with the 2D gradient. partition_vocab_size = softmax.size()[-1] grad_2d = grad_input.view(-1, partition_vocab_size) # Add the gradient from matching classes. arange_1d = torch.arange(start=0, end=grad_2d.size()[0], device=grad_2d.device) grad_2d[arange_1d, masked_target_1d] -= 1.0 - target_mask.view(-1).float() # Finally elementwise multiplication with the output gradients. grad_input.mul_(grad_output.unsqueeze(dim=-1)) return grad_input, None, None @LOSSES.register_module class VocabParallelCrossEntropyLoss1D(_Loss): """Vocab parallel cross entropy loss for 1D parallelism. Args: reduction (bool, optional): whether to average the loss, defaults to True. """ def __init__(self, reduction=True): super().__init__() self.reduction_mean = reduction def forward(self, logits, targets, process_group=None): """Calculate loss between logits and targets. Args: logits (:class:`torch.tensor`): Predicted unnormalized scores (often referred to as logits). targets (:class:`torch.tensor`): Ground truth class indices or class probabilities. """ loss = _VocabParallelCrossEntropy1D.apply(logits, targets, process_group) if self.reduction_mean: loss = loss.mean() return loss
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/loss/__init__.py
colossalai/legacy/nn/loss/__init__.py
from torch import nn from torch.nn.modules.loss import * from torch.nn.modules.loss import _Loss from colossalai.legacy.global_variables import tensor_parallel_env as env from colossalai.legacy.nn.layer.utils import get_tensor_parallel_mode from .loss_1d import VocabParallelCrossEntropyLoss1D from .loss_2d import CrossEntropyLoss2D, VocabParallelCrossEntropyLoss2D from .loss_2p5d import CrossEntropyLoss2p5D, VocabParallelCrossEntropyLoss2p5D from .loss_3d import CrossEntropyLoss3D, VocabParallelCrossEntropyLoss3D _parallel_cross_entropy = { "2d": CrossEntropyLoss2D, "2.5d": CrossEntropyLoss2p5D, "3d": CrossEntropyLoss3D, } _vocab_parallel_cross_entropy = { "1d": VocabParallelCrossEntropyLoss1D, "2d": VocabParallelCrossEntropyLoss2D, "2.5d": VocabParallelCrossEntropyLoss2p5D, "3d": VocabParallelCrossEntropyLoss3D, } class CrossEntropyLoss(_Loss): def __init__(self, reduction: bool = True, *args, **kwargs): super().__init__() tensor_parallel = get_tensor_parallel_mode() if tensor_parallel is not None and env.vocab_parallel: self.loss = _vocab_parallel_cross_entropy[tensor_parallel](reduction=reduction, *args, **kwargs) elif tensor_parallel is None or tensor_parallel == "1d": reduction = "mean" if reduction else "none" self.loss = nn.CrossEntropyLoss(reduction=reduction, *args, **kwargs) else: self.loss = _parallel_cross_entropy[tensor_parallel](reduction=reduction, *args, **kwargs) def forward(self, *args): return self.loss(*args)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/_ops/__init__.py
colossalai/legacy/nn/_ops/__init__.py
from ._utils import *
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/_ops/_utils.py
colossalai/legacy/nn/_ops/_utils.py
from typing import List, Optional, Union import torch import torch.distributed as dist from colossalai.legacy.global_variables import tensor_parallel_env as env from colossalai.legacy.nn.layer.utils import divide from colossalai.legacy.tensor import ColoTensorSpec, ProcessGroup from colossalai.tensor import ColoTensor GeneralTensor = Union[ColoTensor, torch.Tensor] Number = Union[int, float] def convert_to_colo_tensor(tensor: Optional[GeneralTensor], pg: ProcessGroup) -> Optional[ColoTensor]: if tensor is not None and not isinstance(tensor, ColoTensor): tensor = ColoTensor.from_torch_tensor(tensor, ColoTensorSpec(pg)) return tensor def set_parallel_input(input_parallel: bool): env.parallel_input_1d = input_parallel def get_parallel_input(): return env.parallel_input_1d def vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank): index_f = rank * per_partition_vocab_size index_l = index_f + per_partition_vocab_size return index_f, index_l def vocab_range_from_global_vocab_size(global_vocab_size, rank, world_size): per_partition_vocab_size = divide(global_vocab_size, world_size) return vocab_range_from_per_partition_vocab_size(per_partition_vocab_size, rank) def _reduce(input_, pg: ProcessGroup): # skip if only one rank involved if pg.tp_world_size() == 1: return input_ assert input_.device.type == "cuda" group = pg.tp_process_group() dist.all_reduce(input_, group=group) return input_ def _split(input_, pg: ProcessGroup, dim=-1): # skip if only one rank involved world_size = pg.tp_world_size() if world_size == 1: return input_ # Split along last dimension. dim_size = input_.size(dim) assert dim_size % world_size == 0, ( f"The dimension to split ({dim_size}) is not a multiple of world size ({world_size}), " f"cannot split tensor evenly" ) tensor_list = torch.split(input_, dim_size // world_size, dim=dim) rank = pg.tp_local_rank() output = tensor_list[rank].contiguous() return output def _gather(input_, pg: ProcessGroup, dim=-1): # skip if only one rank involved world_size = pg.tp_world_size() if world_size == 1: return input_ # all gather rank = pg.tp_local_rank() tensor_list = [torch.empty_like(input_) for _ in range(world_size)] tensor_list[rank] = input_ assert input_.device.type == "cuda" group = pg.tp_process_group() torch.distributed.all_gather(tensor_list, input_, group=group) # concat output = torch.cat(tensor_list, dim=dim).contiguous() return output class _ReduceGrad(torch.autograd.Function): """ Pass the input to the model parallel region. Args: input_: input matrix. process_group: parallel mode. """ @staticmethod def symbolic(graph, input_): return input_ @staticmethod def forward(ctx, input_, process_group): ctx.mode = process_group return input_ @staticmethod def backward(ctx, grad_output): return _reduce(grad_output, ctx.mode), None class _ReduceInput(torch.autograd.Function): """ All-reduce the input from the model parallel region. Args: input_: input matrix. process_group: parallel mode. """ @staticmethod def symbolic(graph, input_): return _reduce(input_) @staticmethod def forward(ctx, input_, process_group): return _reduce(input_, process_group) @staticmethod def backward(ctx, grad_output): return grad_output, None class _SplitForwardGatherBackward(torch.autograd.Function): """ Split the input and keep only the corresponding chuck to the rank. Args: input_: input matrix. process_group: parallel mode. dim: dimension """ @staticmethod def symbolic(graph, input_): return _split(input_) @staticmethod def forward(ctx, input_, process_group, dim): ctx.mode = process_group ctx.dim = dim return _split(input_, process_group, dim) @staticmethod def backward(ctx, grad_output): return _gather(grad_output, ctx.mode, ctx.dim), None, None class _GatherForwardSplitBackward(torch.autograd.Function): """Gather the input from model parallel region and concatenate. Args: input_: input matrix. process_group: parallel mode. dim: dimension """ @staticmethod def symbolic(graph, input_): return _gather(input_) @staticmethod def forward(ctx, input_, process_group, dim): ctx.mode = process_group ctx.dim = dim return _gather(input_, process_group, dim) @staticmethod def backward(ctx, grad_output): return _split(grad_output, ctx.mode, ctx.dim), None, None def reduce_grad(input_, process_group): return _ReduceGrad.apply(input_, process_group) def reduce_input(input_, process_group): return _ReduceInput.apply(input_, process_group) def split_forward_gather_backward(input_, process_group, dim): return _SplitForwardGatherBackward.apply(input_, process_group, dim) def gather_forward_split_backward(input_, process_group, dim): return _GatherForwardSplitBackward.apply(input_, process_group, dim) def _all_to_all(x: torch.Tensor, pg: ProcessGroup, scatter_dim: int, gather_dim: int) -> torch.Tensor: world_size = pg.tp_world_size() if world_size == 1: return x # TODO: enabling mpi backend to support CPU all_to_all assert x.device.type == "cuda", f"Currently, the collective function dual_all_to_all only supports nccl backend" shapes = list(x.size()) shapes[scatter_dim] = shapes[scatter_dim] // world_size scatter_list = [each.contiguous() for each in torch.tensor_split(x, world_size, scatter_dim)] gather_list = [torch.empty(*shapes, dtype=x.dtype, device=x.device) for _ in range(world_size)] torch.distributed.all_to_all(gather_list, scatter_list, group=pg.tp_process_group()) return torch.cat(gather_list, dim=gather_dim).contiguous() class _DualAllToAll(torch.autograd.Function): @staticmethod def forward(ctx, x, pg, scatter_dim, gather_dim): ctx.scatter_dim = scatter_dim ctx.gather_dim = gather_dim ctx.pg = pg return _all_to_all(x, pg, scatter_dim, gather_dim) @staticmethod def backward(ctx, grad): return _all_to_all(grad, ctx.pg, ctx.gather_dim, ctx.scatter_dim), None, None, None def dual_all_to_all(x, pg, scatter_dim: int, gather_dim: int): return _DualAllToAll.apply(x, pg, scatter_dim, gather_dim) # table wise embedding shard def _all_to_all_for_tablewise( x: torch.Tensor, pg: ProcessGroup, scatter_strides: List[int], gather_strides: List[int], forward=True ) -> torch.Tensor: world_size = pg.tp_world_size() rank = pg.tp_local_rank() if world_size == 1: return x assert x.device.type == "cuda", f"Currently, the collective function dual_all_to_all only supports nccl backend" if forward: scatter_list = list(x.split(scatter_strides, 0)) gather_list = [ torch.empty(scatter_strides[rank], gather_strides[i], dtype=x.dtype, device=x.device) for i in range(world_size) ] torch.distributed.all_to_all(gather_list, scatter_list, group=pg.tp_process_group()) return torch.cat(gather_list, 1).contiguous() else: # split on dim 1, lose contiguity scatter_list = [each.contiguous() for each in x.split(scatter_strides, 1)] gather_list = [ torch.empty(gather_strides[i], scatter_strides[rank], dtype=x.dtype, device=x.device) for i in range(world_size) ] torch.distributed.all_to_all(gather_list, scatter_list, group=pg.tp_process_group()) return torch.cat(gather_list, 0).contiguous() class _DualAllToAllForTablewise(torch.autograd.Function): @staticmethod def forward(ctx, x, pg, scatter_strides, gather_strides): ctx.pg = pg ctx.scatter_strides = scatter_strides ctx.gather_strides = gather_strides return _all_to_all_for_tablewise(x, pg, scatter_strides, gather_strides, forward=True) @staticmethod def backward(ctx, grad): return ( _all_to_all_for_tablewise(grad, ctx.pg, ctx.gather_strides, ctx.scatter_strides, forward=False), None, None, None, ) def dual_all_to_all_tablewise(x, pg, scatter_strides, gather_strides): return _DualAllToAllForTablewise.apply(x, pg, scatter_strides, gather_strides)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/__init__.py
colossalai/legacy/nn/layer/__init__.py
from .colossalai_layer import * from .parallel_1d import * from .parallel_2d import * from .parallel_2p5d import * from .parallel_3d import * from .parallel_sequence import * from .utils import * from .vanilla import * from .wrapper import *
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/base_layer.py
colossalai/legacy/nn/layer/base_layer.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from contextlib import contextmanager import torch.nn as nn from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc class ParallelLayer(nn.Module): global_state_dict: bool = True def __init__(self): super().__init__() self.data_parallel_rank = ( 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA) ) self.data_parallel_size = ( 1 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_world_size(ParallelMode.DATA) ) self.tensor_parallel_rank = ( 0 if not gpc.is_initialized(ParallelMode.TENSOR) else gpc.get_local_rank(ParallelMode.TENSOR) ) self.tensor_parallel_size = ( 1 if not gpc.is_initialized(ParallelMode.TENSOR) else gpc.get_world_size(ParallelMode.TENSOR) ) self.pipeline_parallel_rank = ( 0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank(ParallelMode.PIPELINE) ) self.pipeline_parallel_size = ( 1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size(ParallelMode.PIPELINE) ) def _load_from_global_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): return super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def _save_to_global_state_dict(self, destination, prefix, keep_vars): return super()._save_to_state_dict(destination, prefix, keep_vars) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): if self.global_state_dict: if gpc.get_local_rank(ParallelMode.TENSOR) != 0: missing_keys.clear() unexpected_keys.clear() return self._load_from_global_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) return super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def _save_to_state_dict(self, destination, prefix, keep_vars): if self.global_state_dict: return self._save_to_global_state_dict(destination, prefix, keep_vars) return super()._save_to_state_dict(destination, prefix, keep_vars) @classmethod @contextmanager def use_local_state_dict(cls): try: cls.global_state_dict = False yield finally: cls.global_state_dict = True
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_sequence/_operation.py
colossalai/legacy/nn/layer/parallel_sequence/_operation.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import torch from torch import distributed as dist from torch.cuda.amp import custom_bwd, custom_fwd from colossalai.accelerator import get_accelerator from colossalai.legacy.communication import ring_forward from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.nn.layer.parallel_sequence._utils import _calc_current_device_range, _calc_incoming_device_range class RingQK(torch.autograd.Function): """ Calculate QK in a ring-exchange style """ @staticmethod @custom_fwd def forward(ctx, sub_q, sub_k, batch_size, num_attention_heads, sub_seq_length): # save tensor for backward ctx.save_for_backward(sub_q, sub_k) ctx.sub_seq_length = sub_seq_length # create local segment of attention score attention_score = torch.empty( batch_size * num_attention_heads, sub_seq_length, sub_seq_length * gpc.get_world_size(ParallelMode.SEQUENCE), dtype=sub_q.dtype, device=get_accelerator().get_current_device(), ) # compute local QK^T part_a = torch.matmul(sub_q, sub_k.transpose(2, 1)) local_rank = gpc.get_local_rank(ParallelMode.SEQUENCE) local_world_size = gpc.get_world_size(ParallelMode.SEQUENCE) start_idx = local_rank * sub_seq_length end_idx = (local_rank + 1) * sub_seq_length attention_score[:, :, start_idx:end_idx] = part_a # compute QK^T in ring-all-reduce style for i in range(local_world_size - 1): sub_k = ring_forward(sub_k, ParallelMode.SEQUENCE) start_idx, end_idx = _calc_incoming_device_range(i, local_rank, local_world_size, sub_seq_length) part_a = torch.matmul(sub_q, sub_k.transpose(2, 1)) attention_score[:, :, start_idx:end_idx] = part_a return attention_score @staticmethod @custom_bwd def backward(ctx, grad_output): ( sub_q, sub_k, ) = ctx.saved_tensors local_rank = gpc.get_local_rank(ParallelMode.SEQUENCE) local_world_size = gpc.get_world_size(ParallelMode.SEQUENCE) # calculate gradient of sub_k grad_k = torch.matmul(grad_output.transpose(2, 1), sub_q) dist.all_reduce(grad_k, group=gpc.get_group(ParallelMode.SEQUENCE)) grad_k = grad_k[:, local_rank * ctx.sub_seq_length : (local_rank + 1) * ctx.sub_seq_length] grad_k /= local_world_size # calculate gradient for sub_q grad_q = torch.zeros_like( sub_q, dtype=sub_q.dtype, device=get_accelerator().get_current_device(), ) # compute with local sub_k start_idx, end_idx = _calc_current_device_range(local_rank, ctx.sub_seq_length) grad_q += torch.matmul(grad_output[:, :, start_idx:end_idx], sub_k) # compute QK^T in ring-all-reduce style for i in range(local_world_size - 1): sub_k = ring_forward(sub_k, ParallelMode.SEQUENCE) start_idx, end_idx = _calc_incoming_device_range(i, local_rank, local_world_size, ctx.sub_seq_length) grad_q += torch.matmul(grad_output[:, :, start_idx:end_idx], sub_k) grad_q /= local_world_size return grad_q, grad_k, None, None, None class RingAV(torch.autograd.Function): """ Calculate AV in a ring-exchange style """ @staticmethod @custom_fwd def forward(ctx, attention_score, sub_v, batch_size, num_attention_heads, attention_head_size, sub_seq_length): local_rank = gpc.get_local_rank(ParallelMode.SEQUENCE) local_world_size = gpc.get_world_size(ParallelMode.SEQUENCE) local_start_idx, local_end_idx = _calc_current_device_range(local_rank, sub_seq_length) sub_attention_result = torch.zeros( batch_size * num_attention_heads, sub_seq_length, attention_head_size, device=get_accelerator().get_current_device(), dtype=attention_score.dtype, ) # save tensors for backward ctx.save_for_backward(attention_score, sub_v) ctx.sub_seq_length = sub_seq_length # compute local AV part_av = torch.matmul(attention_score[:, :, local_start_idx:local_end_idx], sub_v) sub_attention_result += part_av # compute AV in ring - all - reduce style for i in range(local_world_size - 1): sub_v = ring_forward(sub_v, ParallelMode.SEQUENCE) start_idx, end_idx = _calc_incoming_device_range(i, local_rank, local_world_size, sub_seq_length) # compute QK^T part_av = torch.matmul(attention_score[:, :, start_idx:end_idx], sub_v) sub_attention_result += part_av return sub_attention_result @staticmethod @custom_bwd def backward(ctx, grad_output): local_rank = gpc.get_local_rank(ParallelMode.SEQUENCE) local_world_size = gpc.get_world_size(ParallelMode.SEQUENCE) local_start_idx, local_end_idx = _calc_current_device_range(local_rank, ctx.sub_seq_length) attention_scores, sub_v = ctx.saved_tensors # calculate gradient of v grad_v = torch.matmul(attention_scores.transpose(2, 1), grad_output) dist.all_reduce(grad_v, group=gpc.get_group(ParallelMode.SEQUENCE)) grad_v = grad_v[:, local_start_idx:local_end_idx] grad_v /= local_world_size # calculate gradient for attention score grad_attention_score = torch.zeros_like( attention_scores, dtype=grad_output.dtype, device=get_accelerator().get_current_device() ) # compute with local sub_k grad_attention_score[:, :, local_start_idx:local_end_idx] += torch.matmul(grad_output, sub_v.transpose(2, 1)) # compute QK^T in ring-all-reduce style for i in range(local_world_size - 1): sub_v = ring_forward(sub_v, ParallelMode.SEQUENCE) start_idx, end_idx = _calc_incoming_device_range(i, local_rank, local_world_size, ctx.sub_seq_length) # compute grad_q grad_attention_score[:, :, start_idx:end_idx] += torch.matmul(grad_output, sub_v.transpose(2, 1)) return grad_attention_score, grad_v, None, None, None, None
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_sequence/__init__.py
colossalai/legacy/nn/layer/parallel_sequence/__init__.py
from ._operation import RingAV, RingQK from .layers import TransformerSelfAttentionRing __all__ = ["TransformerSelfAttentionRing", "RingAV", "RingQK"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_sequence/_utils.py
colossalai/legacy/nn/layer/parallel_sequence/_utils.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- def _calc_incoming_device_range(i, rank, world_size, sub_seq_length): device_of_incoming_k = (rank - i - 1) % world_size start_idx = sub_seq_length * device_of_incoming_k end_idx = sub_seq_length * (device_of_incoming_k + 1) return start_idx, end_idx def _calc_current_device_range(rank, sub_seq_length): start_idx = sub_seq_length * rank end_idx = sub_seq_length * (rank + 1) return start_idx, end_idx
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_sequence/layers.py
colossalai/legacy/nn/layer/parallel_sequence/layers.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import math import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import Parameter from colossalai.legacy.context import seed from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.nn.layer.parallel_sequence._operation import RingAV, RingQK from colossalai.legacy.registry import LAYERS from colossalai.nn.layer.scaled_softmax import AttnMaskType, FusedScaleMaskSoftmax @LAYERS.register_module class TransformerSelfAttentionRing(nn.Module): """Parallel self-attention layer abstract class. Self-attention layer takes input with size [b, s, h] and returns output of the same size. Args: hidden_size (int): hidden size. num_attention_heads (int): number of attention heads. attention_dropout (float): dropout probability for attention layer. attention_mask_func (:class:`typing.Callable`): Mask function to be applied. layer_number (int): number of layers. """ def __init__( self, hidden_size, num_attention_heads, attention_dropout, attention_mask_func, layer_number, apply_query_key_layer_scaling: bool = False, convert_fp16_to_fp32_in_softmax: bool = False, attn_mask_type=AttnMaskType.padding, masked_softmax_fusion=True, fp16=False, bf16=False, ): super().__init__() self.convert_fp16_to_fp32_in_softmax = convert_fp16_to_fp32_in_softmax self.apply_query_key_layer_scaling = apply_query_key_layer_scaling self.attention_mask_func = attention_mask_func self.layer_number = layer_number self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.attn_mask_type = attn_mask_type assert self.layer_number > 0 self.attention_dropout = attention_dropout if self.apply_query_key_layer_scaling: self.convert_fp16_to_fp32_in_softmax = True assert ( self.hidden_size % self.num_attention_heads == 0 ), "hidden size is not divisible by the number of attention heads" self.hidden_size_per_attention_head = self.hidden_size // num_attention_heads self.world_size = gpc.get_world_size(ParallelMode.SEQUENCE) # Strided linear layer. self.query_key_value = _Linear( hidden_size, 3 * self.hidden_size, ) self.coeff = None self.norm_factor = math.sqrt(self.hidden_size) if self.apply_query_key_layer_scaling: self.coeff = layer_number self.norm_factor *= self.coeff self.scale_mask_softmax = FusedScaleMaskSoftmax( fp16, bf16, self.attn_mask_type, masked_softmax_fusion, self.attention_mask_func, self.convert_fp16_to_fp32_in_softmax, self.coeff, ) self.attention_dropout = nn.Dropout(attention_dropout) # Output. self.dense = _Linear(hidden_size, hidden_size, bias=True, skip_bias_add=True) def forward(self, hidden_states, attention_mask): # hidden_states: [sub_seq_len, batch_size, hidden_size] # attention_mask: [batch_size, 1, sub_seq_len, seq_len] sub_seq_length, batch_size, hidden_size = hidden_states.size() # ===================== # Query, Key, and Value # ===================== # Attention heads shape change: # [sub_seq_len, batch_size, hidden_size] --> [sub_seq_len, batch_size, (3 * head_size * num_heads)] mixed_x_layer = self.query_key_value(hidden_states) # [sub_seq_len, batch_size, num_heads, 3 * head_size] --> 3 [sub_seq_len, batch_size, num_heads, head_size] new_tensor_shape = mixed_x_layer.size()[:-1] + ( self.num_attention_heads, 3 * self.hidden_size_per_attention_head, ) mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) # split into query, key and value last_dim = mixed_x_layer.dim() - 1 last_dim_value = mixed_x_layer.size(-1) assert last_dim_value % 3 == 0, ( "the last dimension is not a multiple of 3, " "cannot be divided into query, key and value" ) partition_size = last_dim_value // 3 (query_layer, key_layer, value_layer) = torch.split(mixed_x_layer, partition_size, dim=last_dim) # attention scores: [batch_size, num_heads, sub_seq_len, seq_len] output_size = ( query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0) * self.world_size, ) # [sub_seq_len, batch_size, num_heads, head_size] -> [sub_seq_len, batch_size * num_heads, head_size] query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1) # [sub_seq_len, batch_size, num_heads, head_size] -> [sub_seq_len, batch_size * num_heads, head_size] key_layer = key_layer.view(key_layer.size(0), output_size[0] * output_size[1], -1) # attention_scores: [batch_size * num_heads, sub_seq_len, seq_len] attention_scores = RingQK.apply( query_layer.transpose(0, 1).contiguous(), # [batch_size * num_heads, sub_seq_len, head_size] key_layer.transpose(0, 1).contiguous(), # [batch_size * num_heads, sub_seq_len, head_size], batch_size, self.num_attention_heads, sub_seq_length, ) attention_scores /= self.norm_factor # change view to [batch_size, num_heads, sub_seq_len, seq_len] attention_scores = attention_scores.view(*output_size) # change shape to [batch_size, num_heads, sub_seq_len, seq_len] attention_probs = self.scale_mask_softmax(attention_scores, attention_mask) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. with seed(ParallelMode.TENSOR): attention_probs = self.attention_dropout(attention_probs) # context layer shape: [batch_size, num_heads, sub_seq_len, head_size] output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3)) # change view [sub_seq_len, batch_size * num_heads, head_size] value_layer = value_layer.contiguous().view(value_layer.size(0), output_size[0] * output_size[1], -1) # # change view [b * num_heads, sub_seq_len, seq_len] attention_probs = attention_probs.view( attention_probs.size(0) * attention_probs.size(1), attention_probs.size(2), attention_probs.size(3) ) # matmul: [batch_size * num_heads, sub_seq_len, head_size] context_layer = RingAV.apply( attention_probs, value_layer.transpose(0, 1).contiguous(), batch_size, self.num_attention_heads, self.hidden_size_per_attention_head, sub_seq_length, ) # change view [batch_size, num_heads, sub_seq_len, head_size] context_layer = context_layer.view(*output_size) # [batch_size, num_heads, sub_seq_len, head_size] -> [sub_seq_len, batch_size, num_heads, head_size] context_layer = context_layer.permute(2, 0, 1, 3).contiguous() # [sub_seq_len, batch_size, num_heads, head_size] -> [sub_seq_len, batch_size, hidden_size] new_context_layer_shape = context_layer.size()[:-2] + ( self.hidden_size_per_attention_head * self.num_attention_heads, ) context_layer = context_layer.view(*new_context_layer_shape) output, bias = self.dense(context_layer) return output, bias def __repr__(self): return ( f"TransformerSelfAttentionRing(apply_query_key_layer_scaling={self.apply_query_key_layer_scaling}, " f"layer_number={self.layer_number}, hidden_size:{self.hidden_size}, attention_dropout={self.attention_dropout}, " f"attn_mask_type={self.attn_mask_type}, num_attention_heads={self.num_attention_heads}, " f"hidden_size_per_attention_head={self.hidden_size_per_attention_head}, coeff={self.coeff}, norm_factor={self.norm_factor}, " f"convert_fp16_to_fp32_in_softmax={self.convert_fp16_to_fp32_in_softmax})" ) class _Linear(nn.Module): """Linear layer with column parallelism. The linear layer is defined as Y = XA + b. A is parallelized along its second dimension as A = [A_1, ..., A_p]. Arguments: input_size: first dimension of matrix A. output_size: second dimension of matrix A. bias: If true, add bias init_method: method to initialize weights. Note that bias is always set to zero. stride: For the strided linear layers. keep_master_weight_for_test: This was added for testing and should be set to False. It returns the master weights used for initialization. skip_bias_add: This was added to enable performance optimizations where bias can be fused with other elementwise operations. we skip adding bias but instead return it. """ def __init__(self, input_size, output_size, bias=True, skip_bias_add=False): super(_Linear, self).__init__() # Keep input parameters self.input_size = input_size self.output_size = output_size self.skip_bias_add = skip_bias_add self.weight = Parameter( torch.empty( self.output_size, self.input_size, ) ) nn.init.xavier_normal_(self.weight) if bias: self.bias = Parameter(torch.empty(self.output_size)) # Always initialize bias to zero. with torch.no_grad(): self.bias.zero_() else: self.register_parameter("bias", None) def forward(self, input_): # Matrix multiply. bias = self.bias if not self.skip_bias_add else None output = F.linear(input_, self.weight, bias) if self.skip_bias_add: return output, self.bias else: return output def __repr__(self): return ( f"Linear(in_features={self.input_size}, out_features={self.output_size}, " + f"bias={self.bias is not None}, skip_bias_add={self.skip_bias_add})" )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/utils/common.py
colossalai/legacy/nn/layer/utils/common.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import collections.abc from itertools import repeat import numpy as np import torch from torch import Tensor, nn from colossalai.legacy.constants import IS_TENSOR_PARALLEL, NUM_PARTITIONS from colossalai.legacy.global_variables import tensor_parallel_env as env from colossalai.legacy.utils import checkpoint class CheckpointModule(nn.Module): def __init__(self, checkpoint: bool = True, offload: bool = False): super().__init__() self.checkpoint = checkpoint self._use_checkpoint = checkpoint self._offload = offload def _forward(self, *args, **kwargs): raise NotImplementedError("CheckpointModule should implement _forward method instead of origin forward") def forward(self, *args, **kwargs): if self._use_checkpoint: return checkpoint(self._forward, self._offload, *args, **kwargs) else: return self._forward(*args, **kwargs) def train(self, mode: bool = True): self._use_checkpoint = self.checkpoint return super().train(mode=mode) def eval(self): self._use_checkpoint = False return super().eval() def divide(numerator, denominator): """Only allow exact division. Args: numerator (int): Numerator of the division. denominator (int): Denominator of the division. Returns: int: the result of exact division. """ assert denominator != 0, "denominator can not be zero" assert numerator % denominator == 0, "{} is not divisible by {}".format(numerator, denominator) return numerator // denominator def swish(x: Tensor) -> Tensor: return x * torch.sigmoid(x) ACT2FN = {"gelu": torch.nn.functional.gelu, "relu": torch.nn.functional.relu, "swish": swish} def set_tensor_parallel_attribute_by_size(param, size): setattr(param, IS_TENSOR_PARALLEL, True) setattr(param, NUM_PARTITIONS, size // np.prod(param.shape)) def set_tensor_parallel_attribute_by_partition(param, num_partitions): setattr(param, IS_TENSOR_PARALLEL, True) setattr(param, NUM_PARTITIONS, num_partitions) def get_tensor_parallel_mode(): return env.mode # From PyTorch internals def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable): return x return tuple(repeat(x, n)) return parse to_2tuple = _ntuple(2)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/utils/__init__.py
colossalai/legacy/nn/layer/utils/__init__.py
from .common import ( ACT2FN, CheckpointModule, _ntuple, divide, get_tensor_parallel_mode, set_tensor_parallel_attribute_by_partition, set_tensor_parallel_attribute_by_size, to_2tuple, ) __all__ = [ "CheckpointModule", "divide", "ACT2FN", "set_tensor_parallel_attribute_by_size", "set_tensor_parallel_attribute_by_partition", "get_tensor_parallel_mode", "_ntuple", "to_2tuple", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/vanilla/__init__.py
colossalai/legacy/nn/layer/vanilla/__init__.py
from .layers import ( DropPath, VanillaClassifier, VanillaLayerNorm, VanillaLinear, VanillaPatchEmbedding, WrappedDropout, WrappedDropPath, ) __all__ = [ "VanillaLayerNorm", "VanillaPatchEmbedding", "VanillaClassifier", "DropPath", "WrappedDropout", "WrappedDropPath", "VanillaLinear", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/vanilla/layers.py
colossalai/legacy/nn/layer/vanilla/layers.py
import math from typing import Callable import torch import torch.nn.functional as F from torch import Tensor from torch import nn as nn from torch.nn.parameter import Parameter from colossalai.accelerator import get_accelerator from colossalai.legacy.context import seed from colossalai.legacy.registry import LAYERS from colossalai.nn import init as init from ..utils import to_2tuple def drop_path(x, drop_prob: float = 0.0, training: bool = False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. Args: drop_prob (float, optional): probability of dropping path, defaults 0.0. training (bool, optional): whether in training progress, defaults False. """ if drop_prob == 0.0 or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) random_tensor.floor_() # binarize output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Module): """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Adapted from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py Args: drop_prob (float, optional): probability of dropping path, defaults None. """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class WrappedDropout(nn.Module): r"""Same as torch.nn.Dropout. But it is wrapped with the context of seed manager. During training, randomly zeroes some elements of the input tensor with probability p using samples from a Bernoulli distribution. Each channel will be zeroed out independently on every forward call. Furthermore, the outputs are scaled by a factor of 1/(1-p) during training. This means that during evaluation the module simply computes an identity function. Args: p (float, optional): probability of an element to be zeroed, defaults 0.5. inplace (bool, optional): whether to do dropout in-place, default to be False. mode (:class:`colossalai.legacy.context.ParallelMode`): The chosen parallel mode. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ def __init__(self, p: float = 0.5, inplace: bool = False, mode=None): super().__init__() if p < 0 or p > 1: raise ValueError("dropout probability has to be between 0 and 1, " "but got {}".format(p)) self.p = p self.inplace = inplace if mode is None: self.func = self.nonefunc else: self.func = self.normalfunc self.mode = mode def nonefunc(self, inputs): return F.dropout(inputs, self.p, self.training, self.inplace) def normalfunc(self, inputs): with seed(self.mode): return F.dropout(inputs, self.p, self.training, self.inplace) def forward(self, inputs): return self.func(inputs) class WrappedDropPath(nn.Module): r"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Here, it is wrapped with the context of seed manager. Args: p (float, optional): probability of dropping path, defaults 0.0. mode (:class:`colossalai.legacy.context.ParallelMode`): The chosen parallel mode. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ def __init__(self, p: float = 0.0, mode=None): super().__init__() self.p = p self.mode = mode if self.mode is None: self.func = self.nonefunc else: self.func = self.normalfunc self.mode = mode def nonefunc(self, inputs): return drop_path(inputs, self.p, self.training) def normalfunc(self, inputs): with seed(self.mode): return drop_path(inputs, self.p, self.training) def forward(self, inputs): return self.func(inputs) @LAYERS.register_module class VanillaPatchEmbedding(nn.Module): r""" 2D Image to Patch Embedding Args: img_size (int): image size. patch_size (int): patch size. in_chans (int): number of channels of input image. embed_size (int): size of embedding. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. flatten (bool, optional): whether to flatten output tensor, defaults to True. weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. position_embed_initializer (:class:`typing.Callable`, optional): The initializer of position embedding, defaults to zeros initializer. More details about initializer please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, img_size: int, patch_size: int, in_chans: int, embed_size: int, flatten: bool = True, dtype: torch.dtype = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), position_embed_initializer: Callable = init.zeros_(), ): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.img_size = img_size self.patch_size = patch_size self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.flatten = flatten self.weight = nn.Parameter( torch.empty( (embed_size, in_chans, *self.patch_size), device=get_accelerator().get_current_device(), dtype=dtype ) ) self.bias = nn.Parameter(torch.empty(embed_size, device=get_accelerator().get_current_device(), dtype=dtype)) self.cls_token = nn.Parameter( torch.zeros((1, 1, embed_size), device=get_accelerator().get_current_device(), dtype=dtype) ) self.pos_embed = nn.Parameter( torch.zeros( (1, self.num_patches + 1, embed_size), device=get_accelerator().get_current_device(), dtype=dtype ) ) self.reset_parameters(weight_initializer, bias_initializer, position_embed_initializer) def reset_parameters(self, weight_initializer, bias_initializer, position_embed_initializer): fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(self.weight) weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) bias_initializer(self.bias, fan_in=fan_in) position_embed_initializer(self.pos_embed) def forward(self, input_: Tensor) -> Tensor: B, C, H, W = input_.shape assert ( H == self.img_size[0] and W == self.img_size[1] ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." output = F.conv2d(input_, self.weight, self.bias, stride=self.patch_size) if self.flatten: output = output.flatten(2).transpose(1, 2) # BCHW -> BNC cls_token = self.cls_token.expand(output.shape[0], -1, -1) output = torch.cat((cls_token, output), dim=1) output = output + self.pos_embed return output @LAYERS.register_module class VanillaClassifier(nn.Module): r"""Dense linear classifier. Args: in_features (int): size of each input sample. num_classes (int): number of classes. weight (:class:`torch.nn.Parameter`, optional): weight of the classifier, defaults to None. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. flatten (bool, optional): whether to flatten output tensor, defaults to True. weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about initializer please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, num_classes: int, weight: nn.Parameter = None, bias: bool = True, dtype: torch.dtype = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), ): super().__init__() self.in_features = in_features self.num_classes = num_classes if weight is not None: self.weight = weight self.has_weight = False else: self.weight = nn.Parameter( torch.empty( self.num_classes, self.in_features, device=get_accelerator().get_current_device(), dtype=dtype ) ) self.has_weight = True if bias: self.bias = nn.Parameter( torch.zeros(self.num_classes, device=get_accelerator().get_current_device(), dtype=dtype) ) else: self.bias = None self.reset_parameters(weight_initializer, bias_initializer) def reset_parameters(self, weight_initializer, bias_initializer): fan_in, fan_out = self.in_features, self.num_classes if self.has_weight: weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) def forward(self, input_: Tensor) -> Tensor: return F.linear(input_, self.weight, self.bias) @LAYERS.register_module class VanillaLayerNorm(nn.Module): r""" Layer Normalization for colossalai Args: normalized_shape (int): input shape from an expected input of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]` If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps (float): a value added to the denominator for numerical stability, defaults to 1e-05. bias (bool, optional): Whether to add a bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. """ def __init__(self, normalized_shape: int, eps=1e-05, bias=True, dtype=None): super().__init__() self.normalized_shape = (normalized_shape,) self.variance_epsilon = eps factory_kwargs = {"device": get_accelerator().get_current_device(), "dtype": dtype} self.weight = nn.Parameter(torch.ones(normalized_shape, **factory_kwargs)) if bias: self.bias = nn.Parameter(torch.zeros(normalized_shape, **factory_kwargs)) else: self.bias = None def forward(self, x: Tensor) -> Tensor: return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.variance_epsilon) @LAYERS.register_module class VanillaLinear(nn.Module): """Linear layer. Args: in_features (int): size of each input sample. out_features (int): size of each output sample. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. skip_bias_add: bool (optional, default to be false). weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, dtype: torch.dtype = None, skip_bias_add: bool = False, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), **kwargs, ) -> None: super().__init__() self.in_features = in_features self.out_features = out_features self.skip_bias_add = skip_bias_add factory_kwargs = {"device": get_accelerator().get_current_device(), "dtype": dtype} self.weight = Parameter(torch.empty(self.out_features, self.in_features, **factory_kwargs)) if bias: self.bias = Parameter(torch.empty(self.out_features, **factory_kwargs)) else: self.bias = None weight_initializer(self.weight, fan_in=in_features, fan_out=out_features) if self.bias is not None: bias_initializer(self.bias, fan_in=in_features) def forward(self, input: Tensor) -> Tensor: if not self.skip_bias_add: return F.linear(input, self.weight, self.bias) else: return F.linear(input, self.weight), self.bias
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/wrapper/pipeline_wrapper.py
colossalai/legacy/nn/layer/wrapper/pipeline_wrapper.py
from typing import List, Tuple, Union import torch.distributed as dist import torch.nn as nn from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc class PipelineSharedModuleWrapper: def __init__(self, pipeline_ranks: Union[List[int], Tuple[int]]) -> None: assert len(pipeline_ranks) > 1, f"Expect len(pipeline_ranks) > 1, got {len(pipeline_ranks)}" self.pipeline_ranks = pipeline_ranks self.group = None self.ranks_in_group = None self._init_group() def _init_group(self): world_size = gpc.get_world_size(ParallelMode.GLOBAL) dp_size = gpc.get_world_size(ParallelMode.DATA) pp_size = gpc.get_world_size(ParallelMode.PIPELINE) rank = gpc.get_global_rank() num_dp_groups = world_size // dp_size num_pp_stages = num_dp_groups // pp_size for i in range(dp_size): for j in range(num_pp_stages): pipeline_ranks = list(range(i * num_dp_groups + j, (i + 1) * num_dp_groups, num_pp_stages)) sub_ranks = [pipeline_ranks[idx] for idx in self.pipeline_ranks] group = dist.new_group(sub_ranks) if rank in sub_ranks: self.group = group self.ranks_in_group = sub_ranks def register_module(self, module: nn.Module): assert ( self.ranks_in_group is not None ), f"Rank {gpc.get_local_rank(ParallelMode.PIPELINE)} is not in pipeline_ranks {self.pipeline_ranks}" src = self.ranks_in_group[self.pipeline_ranks[0]] for p in module.parameters(): setattr(p, "pipeline_shared_module_pg", self.group) dist.broadcast(p, src, group=self.group) def register_parameter(self, param: nn.Parameter): assert ( self.ranks_in_group is not None ), f"Rank {gpc.get_local_rank(ParallelMode.PIPELINE)} is not in pipeline_ranks {self.pipeline_ranks}" src = self.ranks_in_group[self.pipeline_ranks[0]] setattr(param, "pipeline_shared_module_pg", self.group) dist.broadcast(param, src, group=self.group)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/wrapper/__init__.py
colossalai/legacy/nn/layer/wrapper/__init__.py
from .pipeline_wrapper import PipelineSharedModuleWrapper __all__ = ["PipelineSharedModuleWrapper"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_2d/_operation.py
colossalai/legacy/nn/layer/parallel_2d/_operation.py
from typing import Any, Optional, Tuple import torch import torch.distributed as dist from torch import Tensor from torch.cuda.amp import custom_bwd, custom_fwd from colossalai.accelerator import get_accelerator from colossalai.legacy.communication.collective import all_gather, all_reduce, reduce_scatter from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc def matmul_2d( a, b, summa_dim, out_shape, row_rank=None, col_rank=None, row_parallel_mode=ParallelMode.PARALLEL_2D_ROW, col_parallel_mode=ParallelMode.PARALLEL_2D_COL, ): r"""Matrix multiplication for 2D parallelism. Args: a (:class:`torch.tensor`): matrix :math:`A`. b (:class:`torch.tensor`): matrix :math:`B`. summa_dim (int): dimension of SUMMA fo 2D parallelism. out_shape (:class:`torch.size`): shape of output tensor. row_rank (int, optional): the rank of row, defaults to None. col_rank (int, optional): the rank of column, defaults to None. row_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`, optional): row parallel mode, defaults to ParallelMode.PARALLEL_2D_ROW. col_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`, optional): column parallel mode, defaults to ParallelMode.PARALLEL_2D_COL. Returns: :class:`torch.tensor`: :math:`C = AB`. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ if row_rank is None: row_rank = gpc.get_local_rank(col_parallel_mode) if col_rank is None: col_rank = gpc.get_local_rank(row_parallel_mode) data_parallel_rank = 0 if not gpc.is_initialized(ParallelMode.DATA) else gpc.get_local_rank(ParallelMode.DATA) pipeline_parallel_rank = ( 0 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_local_rank(ParallelMode.PIPELINE) ) pipeline_parallel_size = ( 1 if not gpc.is_initialized(ParallelMode.PIPELINE) else gpc.get_world_size(ParallelMode.PIPELINE) ) tensor_parallel_size = summa_dim**2 return Matmul_AB_2D( a, b, summa_dim, out_shape, row_rank, col_rank, row_parallel_mode, col_parallel_mode, data_parallel_rank, pipeline_parallel_rank, pipeline_parallel_size, tensor_parallel_size, ) class _Classifier2D(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward( ctx: Any, A: Tensor, B: Tensor, bias: Optional[Tensor], summa_dim: int, out_shape: Tuple[int, ...], row_rank: int, col_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int, ) -> Tensor: A = A.clone().detach() A_shape = A.shape A = A.reshape((-1, A_shape[-1])) B_shape = B.shape B = B.reshape((-1, B_shape[-1])) B_temp = all_gather(B, -1, col_parallel_mode) if ctx: ctx.save_for_backward(A, B_temp) C = torch.matmul(A, B_temp.transpose(0, 1)) C = all_reduce(C, row_parallel_mode) ctx.use_bias = bias is not None if bias is not None: C = C + bias out = C.reshape(out_shape) if ctx: ctx.summa_dim = summa_dim ctx.row_rank = row_rank ctx.col_rank = col_rank ctx.row_parallel_mode = row_parallel_mode ctx.col_parallel_mode = col_parallel_mode ctx.A_shape = A_shape ctx.B_shape = B_shape ctx.data_parallel_rank = data_parallel_rank ctx.pipeline_parallel_rank = pipeline_parallel_rank ctx.pipeline_parallel_size = pipeline_parallel_size ctx.tensor_parallel_size = tensor_parallel_size return out @staticmethod @custom_bwd def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: A, B = ctx.saved_tensors with torch.no_grad(): A_grad = torch.matmul(output_grad, B) A_grad = A_grad.reshape(ctx.A_shape) B_grad = torch.matmul(output_grad.reshape(-1, output_grad.shape[-1]).transpose(0, 1), A) B_grad = reduce_scatter(B_grad, -1, ctx.col_parallel_mode) B_grad = B_grad.reshape(ctx.B_shape) if ctx.use_bias: bias_grad = torch.sum(output_grad, dim=tuple(range(output_grad.ndim - 1))) bias_grad = all_reduce(bias_grad, ctx.col_parallel_mode) else: bias_grad = None return A_grad, B_grad, bias_grad, None, None, None, None, None, None, None, None, None, None def classifier_2d( A: Tensor, B: Tensor, bias: Optional[Tensor], summa_dim: int, out_shape: Tuple[int, ...], row_rank: int, col_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int, ) -> Tensor: r"""2D parallel classifier. Args: A (:class:`torch.tensor`): matrix :math:`A`. B (:class:`torch.tensor`): matrix :math:`B`. bias (:class:`torch.tensor`, optional): matrix of bias. summa_dim (int): dimension of SUMMA fo 2D parallelism. out_shape (:class:`torch.size`): shape of output tensor. row_rank (int, optional): the rank of row, defaults to None. col_rank (int, optional): the rank of column, defaults to None. row_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): row parallel mode. col_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): column parallel mode. data_parallel_rank (int): data parallel rank. pipeline_parallel_rank (int): pipeline parallel rank pipeline_parallel_size (int): pipeline parallel size. tensor_parallel_size (int): tensor parallel size. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ return _Classifier2D.apply( A, B, bias, summa_dim, out_shape, row_rank, col_rank, row_parallel_mode, col_parallel_mode, data_parallel_rank, pipeline_parallel_rank, pipeline_parallel_size, tensor_parallel_size, ) class Matmul_AB_2D(torch.autograd.Function): r"""Matrix multiplication for :math:`C = AB`. Args: A (:class:`torch.tensor`): matrix :math:`A`. B (:class:`torch.tensor`): matrix :math:`B`. summa_dim (int): dimension of SUMMA fo 2D parallelism. out_shape (:class:`torch.size`): shape of output tensor. row_rank (int, optional): the rank of row, defaults to None. col_rank (int, optional): the rank of column, defaults to None. row_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): row parallel mode. col_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): column parallel mode. data_parallel_rank (int): data parallel rank. pipeline_parallel_rank (int): pipeline parallel rank pipeline_parallel_size (int): pipeline parallel size. tensor_parallel_size (int): tensor parallel size. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward( ctx: Any, A: Tensor, B: Tensor, summa_dim: int, out_shape: Tuple[int, ...], row_rank: int, col_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int, ) -> Tensor: # A: [b / q, s, h / q] -> [(b * s) / q, h / q] # B: [h / q, s / q] # C: [b / q, s, s / q] -> [(b * s) / q, s / q] assert A.shape[-1] == B.shape[-2], "Invalid shapes: A={}, B={} for AB.".format(A.shape, B.shape) if ctx: ctx.save_for_backward(A, B) A_shape = A.shape A = A.reshape((-1, A_shape[-1])) B_shape = B.shape B = B.reshape((-1, B_shape[-1])) C_shape = (A.shape[0], B.shape[-1]) C = torch.zeros(C_shape, dtype=A.dtype, device=get_accelerator().get_current_device()) # use circular buffer to store the communication tensor # 2 is enough for all cases A_list = [torch.empty_like(A) for _ in range(2)] B_list = [torch.empty_like(B) for _ in range(2)] row_group = gpc.get_group(row_parallel_mode) col_group = gpc.get_group(col_parallel_mode) src_a = ( summa_dim * row_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size ) src_b = ( col_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size ) opa = [None] * 2 opb = [None] * 2 A_list[0].copy_(A) B_list[0].copy_(B) opa[0] = dist.broadcast(A_list[0], src=src_a, group=row_group, async_op=True) opb[0] = dist.broadcast(B_list[0], src=src_b, group=col_group, async_op=True) cur = 0 for i in range(summa_dim): if i != summa_dim - 1: A_list[1 - cur].copy_(A) opa[1 - cur] = dist.broadcast(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True) B_list[1 - cur].copy_(B) opb[1 - cur] = dist.broadcast(B_list[1 - cur], src=src_b + summa_dim, group=col_group, async_op=True) if opa[cur] is not None: opa[cur].wait() if opb[cur] is not None: opb[cur].wait() torch.addmm(C, A_list[cur], B_list[cur], out=C) cur = 1 - cur src_a += 1 src_b += summa_dim out = C.reshape(out_shape) if ctx: ctx.summa_dim = summa_dim ctx.row_rank = row_rank ctx.col_rank = col_rank ctx.row_parallel_mode = row_parallel_mode ctx.col_parallel_mode = col_parallel_mode ctx.A_shape = A_shape ctx.B_shape = B_shape ctx.data_parallel_rank = data_parallel_rank ctx.pipeline_parallel_rank = pipeline_parallel_rank ctx.pipeline_parallel_size = pipeline_parallel_size ctx.tensor_parallel_size = tensor_parallel_size return out @staticmethod @custom_bwd def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: A, B = ctx.saved_tensors with torch.no_grad(): A_grad = Matmul_ABT_2D.apply( output_grad, B, ctx.summa_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank, ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank, ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size, ctx.tensor_parallel_size, ) B_grad = Matmul_ATB_2D.apply( A, output_grad, ctx.summa_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank, ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank, ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size, ctx.tensor_parallel_size, ) return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None class Matmul_ABT_2D(torch.autograd.Function): r"""Matrix multiplication for :math:`C = AB^T` Args: A (:class:`torch.tensor`): matrix :math:`A`. B (:class:`torch.tensor`): matrix :math:`B`. summa_dim (int): dimension of SUMMA fo 2D parallelism. out_shape (:class:`torch.size`): shape of output tensor. row_rank (int, optional): the rank of row, defaults to None. col_rank (int, optional): the rank of column, defaults to None. row_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): row parallel mode. col_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): column parallel mode. column parallel mode, defaults to ParallelMode.PARALLEL_2D_COL. data_parallel_rank (int): data parallel rank. pipeline_parallel_rank (int): pipeline parallel rank pipeline_parallel_size (int): pipeline parallel size. tensor_parallel_size (int): tensor parallel size. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward( ctx: Any, A: Tensor, B: Tensor, summa_dim: int, out_shape: Tuple[int, ...], row_rank: int, col_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int, ) -> Tensor: assert A.shape[-1] == B.shape[-1], "Invalid shapes: A={}, B={} for ABT.".format(A.shape, B.shape) if ctx: ctx.save_for_backward(A, B) A_shape = A.shape A = A.reshape((-1, A_shape[-1])) B_shape = B.shape B = B.reshape((-1, B_shape[-1])) C_shape = (A.shape[0], B.shape[0]) C = torch.empty(C_shape, dtype=A.dtype, device=get_accelerator().get_current_device()) # use circular buffer to store the communication tensor # 2 is enough for all cases B_list = [torch.empty_like(B) for _ in range(2)] C_list = [torch.empty_like(C) for _ in range(2)] row_group = gpc.get_group(row_parallel_mode) col_group = gpc.get_group(col_parallel_mode) src_b = ( col_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size ) src_c = ( summa_dim * row_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size ) opb = [None] * 2 opr = [None] * 2 B_list[0].copy_(B) opb[0] = dist.broadcast(B_list[0], src=src_b, group=col_group, async_op=True) cur = 0 for i in range(summa_dim): if i != summa_dim - 1: B_list[1 - cur].copy_(B) opb[1 - cur] = dist.broadcast(B_list[1 - cur], src=src_b + summa_dim, group=col_group, async_op=True) if opr[cur] is not None: opr[cur].wait() if i - 2 == col_rank: C.copy_(C_list[cur]) if opb[cur] is not None: opb[cur].wait() torch.matmul(A, B_list[cur].transpose(0, 1), out=C_list[cur]) opr[cur] = dist.reduce(C_list[cur], dst=src_c, group=row_group, async_op=True) cur = 1 - cur src_b += summa_dim src_c += 1 for op in opr: op.wait() if summa_dim - 2 == col_rank: C.copy_(C_list[cur]) if summa_dim - 1 == col_rank: C.copy_(C_list[1 - cur]) out = C.reshape(out_shape) if ctx: ctx.summa_dim = summa_dim ctx.row_rank = row_rank ctx.col_rank = col_rank ctx.row_parallel_mode = row_parallel_mode ctx.col_parallel_mode = col_parallel_mode ctx.A_shape = A_shape ctx.B_shape = B_shape ctx.data_parallel_rank = data_parallel_rank ctx.pipeline_parallel_rank = pipeline_parallel_rank ctx.pipeline_parallel_size = pipeline_parallel_size ctx.tensor_parallel_size = tensor_parallel_size return out @staticmethod @custom_bwd def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: A, B = ctx.saved_tensors with torch.no_grad(): A_grad = Matmul_AB_2D.apply( output_grad, B, ctx.summa_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank, ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank, ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size, ctx.tensor_parallel_size, ) B_grad = Matmul_ATB_2D.apply( output_grad, A, ctx.summa_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank, ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank, ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size, ctx.tensor_parallel_size, ) return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None class Matmul_ATB_2D(torch.autograd.Function): r"""Matrix multiplication for :math:`C = A^TB`. Args: A (:class:`torch.tensor`): matrix :math:`A`. B (:class:`torch.tensor`): matrix :math:`B`. summa_dim (int): dimension of SUMMA fo 2D parallelism. out_shape (:class:`torch.size`): shape of output tensor. row_rank (int, optional): the rank of row, defaults to None. col_rank (int, optional): the rank of column, defaults to None. row_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): row parallel mode. col_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): column parallel mode. data_parallel_rank (int): data parallel rank. pipeline_parallel_rank (int): pipeline parallel rank pipeline_parallel_size (int): pipeline parallel size. tensor_parallel_size (int): tensor parallel size. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_. """ @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward( ctx: Any, A: Tensor, B: Tensor, summa_dim: int, out_shape: Tuple[int, ...], row_rank: int, col_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int, ) -> Tensor: assert A.shape[-2] == B.shape[-2], "Invalid shapes: A={}, B={} for ATB.".format(A.shape, B.shape) if ctx: ctx.save_for_backward(A, B) A_shape = A.shape A = A.reshape((-1, A_shape[-1])) B_shape = B.shape B = B.reshape((-1, B_shape[-1])) C_shape = (A.shape[-1], B.shape[-1]) C = torch.empty(C_shape, dtype=A.dtype, device=get_accelerator().get_current_device()) # use circular buffer to store the communication tensor # 2 is enough for all cases A_list = [torch.empty_like(A) for _ in range(2)] C_list = [torch.empty_like(C) for _ in range(2)] row_group = gpc.get_group(row_parallel_mode) col_group = gpc.get_group(col_parallel_mode) src_a = ( summa_dim * row_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size ) src_c = ( col_rank + data_parallel_rank * pipeline_parallel_size * tensor_parallel_size + pipeline_parallel_rank * tensor_parallel_size ) opa = [None] * 2 opr = [None] * 2 A_list[0].copy_(A) opa[0] = dist.broadcast(A_list[0], src=src_a, group=row_group, async_op=True) cur = 0 for i in range(summa_dim): if i != summa_dim - 1: A_list[1 - cur].copy_(A) opa[1 - cur] = dist.broadcast(A_list[1 - cur], src=src_a + 1, group=row_group, async_op=True) if opr[cur] is not None: opr[cur].wait() if i - 2 == row_rank: C.copy_(C_list[cur]) if opa[cur] is not None: opa[cur].wait() torch.matmul(A_list[cur].transpose(0, 1), B, out=C_list[cur]) opr[cur] = dist.reduce(C_list[cur], dst=src_c, group=col_group, async_op=True) cur = 1 - cur src_a += 1 src_c += summa_dim for op in opr: op.wait() if summa_dim - 2 == row_rank: C.copy_(C_list[cur]) if summa_dim - 1 == row_rank: C.copy_(C_list[1 - cur]) out = C.reshape(out_shape) if ctx: ctx.summa_dim = summa_dim ctx.row_rank = row_rank ctx.col_rank = col_rank ctx.row_parallel_mode = row_parallel_mode ctx.col_parallel_mode = col_parallel_mode ctx.A_shape = A_shape ctx.B_shape = B_shape ctx.data_parallel_rank = data_parallel_rank ctx.pipeline_parallel_rank = pipeline_parallel_rank ctx.pipeline_parallel_size = pipeline_parallel_size ctx.tensor_parallel_size = tensor_parallel_size return out @staticmethod @custom_bwd def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: A, B = ctx.saved_tensors with torch.no_grad(): A_grad = Matmul_ABT_2D.apply( B, output_grad, ctx.summa_dim, ctx.A_shape, ctx.row_rank, ctx.col_rank, ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank, ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size, ctx.tensor_parallel_size, ) B_grad = Matmul_AB_2D.apply( A, output_grad, ctx.summa_dim, ctx.B_shape, ctx.row_rank, ctx.col_rank, ctx.row_parallel_mode, ctx.col_parallel_mode, ctx.data_parallel_rank, ctx.pipeline_parallel_rank, ctx.pipeline_parallel_size, ctx.tensor_parallel_size, ) return A_grad, B_grad, None, None, None, None, None, None, None, None, None, None class _Add_Bias_2D(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward( ctx: Any, input_: Tensor, bias: Tensor, output_size_per_partition: int, row_rank: int, col_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, skip_bias_add: bool, data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int, ) -> Tensor: bias_temp = all_gather(bias, -1, col_parallel_mode) ctx.row_rank = row_rank ctx.col_rank = col_rank ctx.row_parallel_mode = row_parallel_mode ctx.col_parallel_mode = col_parallel_mode ctx.bias = skip_bias_add ctx.data_parallel_rank = data_parallel_rank ctx.pipeline_parallel_rank = pipeline_parallel_rank ctx.pipeline_parallel_size = pipeline_parallel_size ctx.tensor_parallel_size = tensor_parallel_size if skip_bias_add: return bias_temp else: output = input_ + bias_temp return output @staticmethod @custom_bwd def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: col_parallel_mode = ctx.col_parallel_mode if ctx.bias: grad = reduce_scatter(output_grad, -1, col_parallel_mode) return None, grad, None, None, None, None, None, None, None, None, None, None else: reduce_dim = tuple(range(output_grad.ndim - 1)) reduce = torch.sum(output_grad, dim=reduce_dim) grad = reduce_scatter(reduce, -1, col_parallel_mode) return output_grad, grad, None, None, None, None, None, None, None, None, None, None def add_bias_2d( input_: Tensor, bias: Tensor, output_size_per_partition: int, row_rank: int, col_rank: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, skip_bias_add: bool, data_parallel_rank: int, pipeline_parallel_rank: int, pipeline_parallel_size: int, tensor_parallel_size: int, ) -> Tensor: r"""Matrix add bias: :math:`C = A + b`. Args: input_ (:class:`torch.tensor`): matrix :math:`A`. bias (:class:`torch.tensor`): matrix :math:`B`. output_size_per_partition (int): size of output per partition. row_rank (int, optional): the rank of row, defaults to None. col_rank (int, optional): the rank of column, defaults to None. row_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): row parallel mode. col_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): column parallel mode. skip_bias_add (bool): If set to ``True``, it will skip bias add for linear layer, which is preserved for kernel fusion. data_parallel_rank (int): data parallel rank. pipeline_parallel_rank (int): pipeline parallel rank pipeline_parallel_size (int): pipeline parallel size. tensor_parallel_size (int): tensor parallel size. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ return _Add_Bias_2D.apply( input_, bias, output_size_per_partition, row_rank, col_rank, row_parallel_mode, col_parallel_mode, skip_bias_add, data_parallel_rank, pipeline_parallel_rank, pipeline_parallel_size, tensor_parallel_size, ) class _Layernorm_2D(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float32) def forward( ctx: Any, input_: Tensor, E_x: Tensor, Var_x: Tensor, hidden_size: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, ) -> Tensor: input_ = input_ - E_x # in here, input = x - E[x], Var_x = 1 / sqrt(Var[x] + eps) ctx.normalized_shape = hidden_size output = input_ * Var_x ctx.save_for_backward(output, Var_x) ctx.row_parallel_mode = row_parallel_mode ctx.col_parallel_mode = col_parallel_mode return output @staticmethod @custom_bwd def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: row_parallel_mode = ctx.row_parallel_mode ctx.col_parallel_mode x, Var_x = ctx.saved_tensors # in here, Var_x = 1 / sqrt(Var[x] + eps), x = (x - E[x]) * Var_x output_grad_sum = torch.sum(output_grad, dim=-1, keepdim=True) torch.distributed.all_reduce(output_grad_sum, group=gpc.get_group(row_parallel_mode)) output_grad_sum /= ctx.normalized_shape output_grad_mul_x_sum = torch.sum(output_grad * x, dim=-1, keepdim=True) torch.distributed.all_reduce(output_grad_mul_x_sum, group=gpc.get_group(row_parallel_mode)) output_grad_mul_x_sum /= ctx.normalized_shape input_grad = output_grad.clone() input_grad -= x * output_grad_mul_x_sum input_grad -= output_grad_sum input_grad *= Var_x return input_grad, None, None, None, None, None def layernorm_2d( input_: Tensor, E_x: Tensor, Var_x: Tensor, hidden_size: int, row_parallel_mode: ParallelMode, col_parallel_mode: ParallelMode, ) -> Tensor: r"""Layernorm. Args: input_ (:class:`torch.tensor`): input matrix. E_x (:class:`torch.tensor`): mean. Var_x (:class:`torch.tensor`): variance. hidden_size (int): hidden size. row_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): row parallel mode. col_parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): column parallel mode. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ return _Layernorm_2D.apply(input_, E_x, Var_x, hidden_size, row_parallel_mode, col_parallel_mode) class _AllGatherTensor2D(torch.autograd.Function): @staticmethod @custom_fwd(cast_inputs=torch.float16) def forward(ctx: Any, inputs: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor: ctx.dim = dim ctx.parallel_mode = parallel_mode outputs = all_gather(inputs, dim, parallel_mode) return outputs @staticmethod @custom_bwd def backward(ctx: Any, output_grad: Tensor) -> Tuple[Tensor, ...]: grad = reduce_scatter(output_grad, ctx.dim, ctx.parallel_mode) return grad.contiguous(), None, None def all_gather_tensor_2d(tensor: Tensor, dim: int, parallel_mode: ParallelMode) -> Tensor: r"""All gather the tensor of 2D parallelism. Args: tensor (:class:`torch.tensor`): Input tensor. dim (int): Dimension to gather. parallel_mode (:class:`colossalai.legacy.context.ParallelMode`): The parallel mode tensor used. Note: The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_ """ return _AllGatherTensor2D.apply(tensor, dim, parallel_mode) def split_batch_2d(input_: Tensor, dim: int = 0) -> Tensor: """Splits 2D tensor in specified dimension across cols. Args: input_ (:class:`torch.tensor`): Input tensor. dim (int): Specified dimension in which to split. Returns: :class:`torch.tensor`: The tensor has been split. """ dim_size = input_.size(dim) world_size = gpc.get_world_size(ParallelMode.PARALLEL_2D_COL) if world_size <= 1: return input_ assert dim_size % world_size == 0, f"The batch size ({dim_size}) is not a multiple of 2D size ({world_size})." return torch.chunk(input_, gpc.get_world_size(ParallelMode.PARALLEL_2D_COL), dim=dim)[ gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) ].contiguous() class _ReduceTensor2D(torch.autograd.Function): @staticmethod def forward(ctx, input_, parallel_mode): return all_reduce(input_, parallel_mode) @staticmethod def backward(ctx, output_grad): return output_grad, None def reduce_tensor_2d(input_: Tensor, parallel_mode: ParallelMode) -> Tensor: r"""All-reduce the input. Args:
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_2d/__init__.py
colossalai/legacy/nn/layer/parallel_2d/__init__.py
from ._operation import reduce_by_batch_2d, split_batch_2d from .layers import ( Classifier2D, Embedding2D, LayerNorm2D, Linear2D, PatchEmbedding2D, VocabParallelClassifier2D, VocabParallelEmbedding2D, ) __all__ = [ "split_batch_2d", "reduce_by_batch_2d", "Linear2D", "LayerNorm2D", "Classifier2D", "PatchEmbedding2D", "Embedding2D", "VocabParallelEmbedding2D", "VocabParallelClassifier2D", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_2d/_utils.py
colossalai/legacy/nn/layer/parallel_2d/_utils.py
from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.global_variables import tensor_parallel_env as env def get_summa_dim_from_env() -> int: try: summa_dim = env.summa_dim assert summa_dim > 0, "SUMMA_DIM must be larger than zero" return summa_dim except KeyError: raise EnvironmentError( "SUMMA_DIM is not found in the current environment, " "please make sure that you have used the correct process group initializer" ) def assert_summa_initialization(): assert gpc.is_initialized(ParallelMode.PARALLEL_2D_COL) and gpc.is_initialized( ParallelMode.PARALLEL_2D_ROW ), "Both TWO_DIMENSION_COL and TWO_DIMENSION_ROW must be initialized by the process group initializer"
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/parallel_2d/layers.py
colossalai/legacy/nn/layer/parallel_2d/layers.py
import math from collections import OrderedDict from typing import Callable import torch import torch.nn as nn import torch.nn.functional as F from torch import Tensor from torch.nn import Parameter from colossalai.accelerator import get_accelerator from colossalai.legacy.communication import broadcast from colossalai.legacy.context import ParallelMode, seed from colossalai.legacy.core import global_context as gpc from colossalai.legacy.global_variables import tensor_parallel_env as env from colossalai.legacy.registry import LAYERS from colossalai.legacy.utils.checkpointing import ( gather_tensor_parallel_state_dict, partition_tensor_parallel_state_dict, ) from colossalai.nn import init as init from ..base_layer import ParallelLayer from ..utils import divide, set_tensor_parallel_attribute_by_partition, to_2tuple from ._operation import ( Matmul_AB_2D, Matmul_ABT_2D, add_bias_2d, all_gather_tensor_2d, classifier_2d, layernorm_2d, reduce_scatter_tensor_2d, split_batch_2d, ) from ._utils import assert_summa_initialization, get_summa_dim_from_env @LAYERS.register_module class Linear2D(ParallelLayer): r"""Linear layer for 2D parallelism Args: in_features (int): size of each input sample. out_features (int): size of each output sample. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. skip_bias_add (bool, optional): If set to ``True``, it will skip bias add for linear layer, which is preserved for kernel fusion, defaults to False. weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, dtype: torch.dtype = None, skip_bias_add: bool = False, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), ): super().__init__() self.in_features = in_features self.out_features = out_features self.skip_bias_add = skip_bias_add # parallel settings assert_summa_initialization() self.row_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) self.col_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) self.summa_dim = get_summa_dim_from_env() # partitioning dimension self.input_size_per_partition = divide(self.in_features, self.summa_dim) self.hidden_size_per_partition = divide(self.out_features, self.summa_dim) # create weight, shape: [k/q, h/q] factory_kwargs = {"device": get_accelerator().get_current_device(), "dtype": dtype} self.weight = Parameter( torch.empty(self.input_size_per_partition, self.hidden_size_per_partition, **factory_kwargs) ) # create bias, shape: [h/q] if bias: self.bias = Parameter(torch.empty(divide(self.out_features, self.summa_dim**2), **factory_kwargs)) else: self.register_parameter("bias", None) # initialize parameters with seed(ParallelMode.TENSOR): self.reset_parameters(weight_initializer, bias_initializer) self._set_tensor_parallel_attributes() def _set_tensor_parallel_attributes(self): set_tensor_parallel_attribute_by_partition(self.weight, self.summa_dim**2) if self.bias is not None: set_tensor_parallel_attribute_by_partition(self.bias, self.summa_dim**2) def reset_parameters(self, weight_initializer, bias_initializer) -> None: fan_in, fan_out = self.in_features, self.out_features weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) def _load_from_global_state_dict(self, state_dict, prefix, *args, **kwargs): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight.transpose(0, 1) # bias if self.bias is not None: bias = state_dict.pop(bias_key, None) if bias is not None: local_state[bias_key] = bias # partition in row groups if gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 0: local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_ROW, dims={weight_key: -1, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, ) # partition in column groups local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_COL, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, ) super()._load_from_global_state_dict(local_state, prefix, *args, **kwargs) def _save_to_global_state_dict(self, destination, prefix, keep_vars): weight_key = prefix + "weight" bias_key = prefix + "bias" local_state = OrderedDict({weight_key: self.weight}) if self.bias is not None: local_state[bias_key] = self.bias # gather in column groups local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_COL, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, keep_vars=keep_vars, ) # gather in row groups if gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 0: local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_ROW, dims={weight_key: -1, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, keep_vars=keep_vars, ) if gpc.get_local_rank(ParallelMode.TENSOR) == 0: local_state[weight_key] = local_state[weight_key].transpose(0, 1) destination.update(local_state) def forward(self, x: Tensor) -> Tensor: # input: [m/q, n/q, k/q] # output: [m/q, n/q, h/q] out_shape = x.shape[:-1] + (self.hidden_size_per_partition,) output = Matmul_AB_2D.apply( x, self.weight, self.summa_dim, out_shape, self.row_rank, self.col_rank, ParallelMode.PARALLEL_2D_ROW, ParallelMode.PARALLEL_2D_COL, self.data_parallel_rank, self.pipeline_parallel_rank, self.pipeline_parallel_size, self.tensor_parallel_size, ) if self.bias is not None: if self.skip_bias_add: bias = add_bias_2d( None, self.bias, self.hidden_size_per_partition, self.row_rank, self.col_rank, ParallelMode.PARALLEL_2D_ROW, ParallelMode.PARALLEL_2D_COL, True, self.data_parallel_rank, self.pipeline_parallel_rank, self.pipeline_parallel_size, self.tensor_parallel_size, ) return output, bias else: output = add_bias_2d( output, self.bias, self.hidden_size_per_partition, self.row_rank, self.col_rank, ParallelMode.PARALLEL_2D_ROW, ParallelMode.PARALLEL_2D_COL, False, self.data_parallel_rank, self.pipeline_parallel_rank, self.pipeline_parallel_size, self.tensor_parallel_size, ) return output else: return output @LAYERS.register_module class LayerNorm2D(ParallelLayer): r"""Layer Normalization for 2D parallelism. Args: normalized_shape (int): input shape from an expected input of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]` If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps (float, optional): a value added to the denominator for numerical stability, defaults to 1e-05. bias (bool, optional): Whether to add a bias, defaults to ``True``. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. """ def __init__(self, normalized_shape: int, eps: float = 1e-05, bias=True, dtype=None): super().__init__() # layer norm config self.normalized_shape = normalized_shape self.variance_epsilon = eps # parallel setting assert_summa_initialization() self.row_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) self.col_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_ROW) self.summa_dim = get_summa_dim_from_env() # partitioning dimension self.partitioned_partition = divide(normalized_shape, self.summa_dim**2) # create parameters factory_kwargs = {"device": get_accelerator().get_current_device(), "dtype": dtype} self.weight = Parameter(torch.ones(self.partitioned_partition, **factory_kwargs)) if bias: self.bias = Parameter(torch.zeros(self.partitioned_partition, **factory_kwargs)) else: self.bias = None self._set_tensor_parallel_attributes() def _set_tensor_parallel_attributes(self): set_tensor_parallel_attribute_by_partition(self.weight, self.summa_dim**2) if self.bias is not None: set_tensor_parallel_attribute_by_partition(self.bias, self.summa_dim**2) def _load_from_global_state_dict(self, state_dict, prefix, *args, **kwargs): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight # bias bias = state_dict.pop(bias_key, None) if bias is not None: local_state[bias_key] = bias # partition in row groups if gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 0: local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_ROW, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, ) # partition in column groups local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_COL, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, ) super()._load_from_global_state_dict(local_state, prefix, *args, **kwargs) def _save_to_global_state_dict(self, destination, prefix, keep_vars): weight_key = prefix + "weight" bias_key = prefix + "bias" local_state = OrderedDict({weight_key: self.weight}) if self.bias is not None: local_state[bias_key] = self.bias # gather in column groups local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_COL, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, keep_vars=keep_vars, ) # gather in row groups if gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 0: local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_ROW, dims={weight_key: 0, bias_key: 0}, partition_states={weight_key: True, bias_key: True}, keep_vars=keep_vars, ) if gpc.get_local_rank(ParallelMode.TENSOR) == 0: destination.update(local_state) def forward(self, x: Tensor) -> Tensor: with torch.no_grad(): E_x = torch.sum(x, dim=-1, keepdim=True) # [b/q, s, 1] torch.distributed.all_reduce(E_x, group=gpc.get_group(ParallelMode.PARALLEL_2D_ROW)) E_x /= self.normalized_shape # Var_x in the block below is the sum of input^2 Var_x = torch.sum(x * x, dim=-1, keepdim=True) # [b/q, s, 1] torch.distributed.all_reduce(Var_x, group=gpc.get_group(ParallelMode.PARALLEL_2D_ROW)) Var_x /= self.normalized_shape Var_x = Var_x - E_x * E_x # variance of x [b/q, s, 1] # this time 1/sqrt(Var_x + epsilon) Var_x = 1.0 / torch.sqrt(Var_x + self.variance_epsilon) output = layernorm_2d( x, E_x, Var_x, self.normalized_shape, ParallelMode.PARALLEL_2D_ROW, ParallelMode.PARALLEL_2D_COL ) scale = add_bias_2d( None, self.weight, self.partitioned_partition, self.row_rank, self.col_rank, ParallelMode.PARALLEL_2D_ROW, ParallelMode.PARALLEL_2D_COL, True, self.data_parallel_rank, self.pipeline_parallel_rank, self.pipeline_parallel_size, self.tensor_parallel_size, ) if self.bias is not None: bias = add_bias_2d( None, self.bias, self.partitioned_partition, self.row_rank, self.col_rank, ParallelMode.PARALLEL_2D_ROW, ParallelMode.PARALLEL_2D_COL, True, self.data_parallel_rank, self.pipeline_parallel_rank, self.pipeline_parallel_size, self.tensor_parallel_size, ) output = torch.addcmul(bias, scale, output) else: output = torch.mul(scale, output) return output @LAYERS.register_module class PatchEmbedding2D(ParallelLayer): r"""2D Image to Patch Embedding. Args: img_size (int): image size. patch_size (int): patch size. in_chans (int): number of channels of input image. embed_size (int): size of embedding. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. flatten (bool, optional): whether to flatten output tensor, defaults to True. weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. position_embed_initializer (:class:`typing.Callable`, optional): The initializer of position embedding, defaults to zeros initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, img_size: int, patch_size: int, in_chans: int, embed_size: int, flatten: bool = True, dtype: torch.dtype = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), position_embed_initializer: Callable = init.zeros_(), ): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) assert_summa_initialization() self.summa_dim = get_summa_dim_from_env() self.img_size = img_size self.patch_size = patch_size self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.flatten = flatten self.embed_size = embed_size self.embed_size_per_partition = embed_size // (self.summa_dim**2) with seed(ParallelMode.TENSOR): self.weight = Parameter( torch.empty( (self.embed_size_per_partition, in_chans, *self.patch_size), device=get_accelerator().get_current_device(), dtype=dtype, ) ) self.bias = Parameter( torch.empty(self.embed_size_per_partition, device=get_accelerator().get_current_device(), dtype=dtype) ) self.cls_token = Parameter( torch.zeros( (1, 1, self.embed_size_per_partition), device=get_accelerator().get_current_device(), dtype=dtype ) ) self.pos_embed = Parameter( torch.zeros( (1, self.num_patches + 1, self.embed_size_per_partition), device=get_accelerator().get_current_device(), dtype=dtype, ) ) self.reset_parameters(weight_initializer, bias_initializer, position_embed_initializer) self._set_tensor_parallel_attribute() def _set_tensor_parallel_attribute(self): set_tensor_parallel_attribute_by_partition(self.weight, self.summa_dim**2) set_tensor_parallel_attribute_by_partition(self.bias, self.summa_dim**2) set_tensor_parallel_attribute_by_partition(self.cls_token, self.summa_dim**2) set_tensor_parallel_attribute_by_partition(self.pos_embed, self.summa_dim**2) def reset_parameters(self, weight_initializer, bias_initializer, position_embed_initializer): with seed(ParallelMode.TENSOR): fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight) fan_out = self.embed_size weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) bias_initializer(self.bias, fan_in=fan_in) position_embed_initializer(self.pos_embed) def _load_from_global_state_dict(self, state_dict, prefix, *args, **kwargs): local_state = OrderedDict() weight_key = prefix + "weight" bias_key = prefix + "bias" cls_token_key = prefix + "cls_token" pos_embed_key = prefix + "pos_embed" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight # bias bias = state_dict.pop(bias_key, None) if bias is not None: local_state[bias_key] = bias # cls token cls_token = state_dict.pop(cls_token_key, None) if cls_token is not None: local_state[cls_token_key] = cls_token # pos embed pos_embed = state_dict.pop(pos_embed_key, None) if pos_embed is not None: local_state[pos_embed_key] = pos_embed # partition in row groups if gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 0: local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_ROW, dims={weight_key: 0, bias_key: 0, cls_token_key: -1, pos_embed_key: -1}, partition_states={weight_key: True, bias_key: True, cls_token_key: True, pos_embed_key: True}, ) # partition in column groups local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_COL, dims={weight_key: 0, bias_key: 0, cls_token_key: -1, pos_embed_key: -1}, partition_states={weight_key: True, bias_key: True, cls_token_key: True, pos_embed_key: True}, ) super()._load_from_global_state_dict(local_state, prefix, *args, **kwargs) def _save_to_global_state_dict(self, destination, prefix, keep_vars): weight_key = prefix + "weight" bias_key = prefix + "bias" cls_token_key = prefix + "cls_token" pos_embed_key = prefix + "pos_embed" local_state = OrderedDict( {weight_key: self.weight, bias_key: self.bias, cls_token_key: self.cls_token, pos_embed_key: self.pos_embed} ) # gather in column groups local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_COL, dims={weight_key: 0, bias_key: 0, cls_token_key: -1, pos_embed_key: -1}, partition_states={weight_key: True, bias_key: True, cls_token_key: True, pos_embed_key: True}, keep_vars=keep_vars, ) # gather in row groups if gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 0: local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_ROW, dims={weight_key: 0, bias_key: 0, cls_token_key: -1, pos_embed_key: -1}, partition_states={weight_key: True, bias_key: True, cls_token_key: True, pos_embed_key: True}, keep_vars=keep_vars, ) if gpc.get_local_rank(ParallelMode.TENSOR) == 0: destination.update(local_state) def forward(self, input_: Tensor) -> Tensor: input_ = split_batch_2d(input_) B, C, H, W = input_.shape assert ( H == self.img_size[0] and W == self.img_size[1] ), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." weight = all_gather_tensor_2d(self.weight, 0, ParallelMode.PARALLEL_2D_COL) bias = all_gather_tensor_2d(self.bias, 0, ParallelMode.PARALLEL_2D_COL) output = F.conv2d(input_, weight, bias, stride=self.patch_size) if self.flatten: output = output.flatten(2).transpose(1, 2) # BCHW -> BNC cls_token = all_gather_tensor_2d(self.cls_token, -1, ParallelMode.PARALLEL_2D_COL) pos_embed = all_gather_tensor_2d(self.pos_embed, -1, ParallelMode.PARALLEL_2D_COL) cls_token = cls_token.expand(output.shape[0], -1, -1) output = torch.cat((cls_token, output), dim=1) output = output + pos_embed return output @LAYERS.register_module class Embedding2D(ParallelLayer): r"""Embedding for 2D parallelism. Args: num_embeddings (int): number of embeddings. embedding_dim (int): dimension of embedding. padding_idx (int, optional): If specified, the entries at padding_idx do not contribute to the gradient; therefore, the embedding vector at padding_idx is not updated during training, i.e. it remains as a fixed “pad”, defaults to None. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. weight_initializer (:class:`typing.Callable`, optional): he initializer of weight, defaults to normal initializer. The ``args`` and ``kwargs`` used in :class:``torch.nn.functional.embedding`` should contain: :: max_norm (float, optional): If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm. Note: this will modify weight in-place. norm_type (float, optional): The p of the p-norm to compute for the max_norm option. Default 2. scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default False. sparse (bool, optional): If True, gradient w.r.t. weight will be a sparse tensor. Default False. More details about ``args`` and ``kwargs`` could be found in `Embedding <https://pytorch.org/docs/stable/generated/torch.nn.functional.embedding.html#torch.nn.functional.embedding>`_. More details about initializer please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_ """ def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: int = None, dtype: torch.dtype = None, weight_initializer: Callable = init.normal_(), *args, **kwargs, ): super().__init__() assert_summa_initialization() self.summa_dim = get_summa_dim_from_env() self.num_embeddings = num_embeddings self.embed_dim = embedding_dim embed_dim_per_partition = divide(embedding_dim, self.summa_dim**2) self.padding_idx = padding_idx self.embed_args = args self.embed_kwargs = kwargs self.weight = Parameter( torch.empty( (num_embeddings, embed_dim_per_partition), device=get_accelerator().get_current_device(), dtype=dtype ) ) self.reset_parameters(weight_initializer) self._set_tensor_parallel_attributes() def _set_tensor_parallel_attributes(self): set_tensor_parallel_attribute_by_partition(self.weight, self.summa_dim**2) def reset_parameters(self, weight_initializer) -> None: with seed(ParallelMode.TENSOR): fan_in, fan_out = self.num_embeddings, self.embed_dim weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) self._fill_padding_idx_with_zero() def _fill_padding_idx_with_zero(self) -> None: if self.padding_idx is not None: with torch.no_grad(): self.weight[self.padding_idx].fill_(0) def _load_from_global_state_dict(self, state_dict, prefix, *args, **kwargs): local_state = OrderedDict() weight_key = prefix + "weight" if gpc.get_local_rank(ParallelMode.TENSOR) == 0: # weight weight = state_dict.pop(weight_key, None) if weight is not None: local_state[weight_key] = weight # partition in row groups if gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 0: local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_ROW, dims={weight_key: -1}, partition_states={weight_key: True}, ) # partition in column groups local_state = partition_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_COL, dims={weight_key: -1}, partition_states={weight_key: True}, ) super()._load_from_global_state_dict(local_state, prefix, *args, **kwargs) def _save_to_global_state_dict(self, destination, prefix, keep_vars): weight_key = prefix + "weight" local_state = OrderedDict({weight_key: self.weight}) # gather in column groups local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_COL, dims={weight_key: -1}, partition_states={weight_key: True}, keep_vars=keep_vars, ) # gather in row groups if gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) == 0: local_state = gather_tensor_parallel_state_dict( local_state, ParallelMode.PARALLEL_2D_ROW, dims={weight_key: -1}, partition_states={weight_key: True}, keep_vars=keep_vars, ) if gpc.get_local_rank(ParallelMode.TENSOR) == 0: destination.update(local_state) def forward(self, input_: Tensor) -> Tensor: input_ = split_batch_2d(input_) weight = all_gather_tensor_2d(self.weight, -1, ParallelMode.PARALLEL_2D_COL) output = F.embedding(input_, weight, self.padding_idx, *self.embed_args, **self.embed_kwargs) return output @LAYERS.register_module class VocabParallelEmbedding2D(ParallelLayer): r"""Embedding parallelized in the vocabulary dimension. Args: num_embeddings (int): number of embeddings. embedding_dim (int): dimension of embedding. padding_idx (int, optional): If specified, the entries at padding_idx do not contribute to the gradient; therefore, the embedding vector at padding_idx is not updated during training, i.e. it remains as a fixed “pad”, defaults to None. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. weight_initializer (:class:`typing.Callable`, optional): he initializer of weight, defaults to normal initializer. The ``args`` and ``kwargs`` used in :class:``torch.nn.functional.embedding`` should contain: :: max_norm (float, optional): If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm. Note: this will modify weight in-place. norm_type (float, optional): The p of the p-norm to compute for the max_norm option. Default 2. scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default False. sparse (bool, optional): If True, gradient w.r.t. weight will be a sparse tensor. Default False. More details about ``args`` and ``kwargs`` could be found in `Embedding <https://pytorch.org/docs/stable/generated/torch.nn.functional.embedding.html#torch.nn.functional.embedding>`_. More details about initializer please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: int = None, dtype: torch.dtype = None, weight_initializer: Callable = init.normal_(), *args, **kwargs, ): super().__init__() self.num_embeddings = num_embeddings self.embed_dim = embedding_dim self.padding_idx = padding_idx self.embed_args = args self.embed_kwargs = kwargs assert_summa_initialization() self.summa_dim = get_summa_dim_from_env() self.num_embeddings_per_partition = divide(self.num_embeddings, self.summa_dim) self.embed_dim_per_partition = divide(self.embed_dim, self.summa_dim) tensor_parallel_rank = gpc.get_local_rank(ParallelMode.PARALLEL_2D_COL) self.vocab_start_index = tensor_parallel_rank * self.num_embeddings_per_partition self.vocab_end_index = self.vocab_start_index + self.num_embeddings_per_partition self.weight = Parameter( torch.empty( (self.num_embeddings_per_partition, self.embed_dim_per_partition), device=get_accelerator().get_current_device(), dtype=dtype, ) ) self.reset_parameters(weight_initializer) self._set_tensor_parallel_attributes() env.vocab_parallel = True def _set_tensor_parallel_attributes(self): set_tensor_parallel_attribute_by_partition(self.weight, self.summa_dim**2) def reset_parameters(self, weight_initializer) -> None: with seed(ParallelMode.TENSOR): fan_in, fan_out = self.num_embeddings, self.embed_dim weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) self._fill_padding_idx_with_zero()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/nn/layer/colossalai_layer/dropout.py
colossalai/legacy/nn/layer/colossalai_layer/dropout.py
import torch.nn as nn from colossalai.legacy.context import ParallelMode, seed from ..parallel_1d import * from ..utils import get_tensor_parallel_mode from ._utils import ColossalaiModule class Dropout(ColossalaiModule): """Dropout layer of colossalai. Args: p (float, optional): probability of an element to be zeroed, defaults 0.5. inplace (bool, optional): whether to do dropout in-place, default to be False. """ def __init__(self, p: float = 0.5, inplace: bool = False) -> None: tensor_parallel = get_tensor_parallel_mode() if tensor_parallel == "1d": drop = Dropout1D(p, inplace) else: drop = nn.Dropout(p, inplace) super().__init__(drop, tensor_parallel=tensor_parallel) def forward(self, *args): if self.tensor_parallel in [None, "1d"]: return super().forward(*args) else: with seed(ParallelMode.TENSOR): return super().forward(*args)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false