repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/dp_plugin_base.py
colossalai/booster/plugin/dp_plugin_base.py
import random import numpy as np import torch import torch.distributed as dist from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from .plugin_base import Plugin class DPPluginBase(Plugin): """This is a base class for all DP plugins. It sets up world size and rank, and provides data loader creation.""" def __init__(self) -> None: super().__init__() assert ( dist.is_initialized() ), "torch.distributed is not initialized, please use colossalai.launch to create the distributed environment" self.rank = dist.get_rank() self.world_size = dist.get_world_size() def prepare_dataloader( self, dataset, batch_size, shuffle=False, seed=1024, drop_last=False, pin_memory=False, num_workers=0, distributed_sampler_cls=None, **kwargs, ): r""" Prepare a dataloader for distributed training. The dataloader will be wrapped by `torch.utils.data.DataLoader` and `torch.utils.data.DistributedSampler`. Args: dataset (`torch.utils.data.Dataset`): The dataset to be loaded. shuffle (bool, optional): Whether to shuffle the dataset. Defaults to False. seed (int, optional): Random worker seed for sampling, defaults to 1024. add_sampler: Whether to add ``DistributedDataParallelSampler`` to the dataset. Defaults to True. drop_last (bool, optional): Set to True to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If False and the size of dataset is not divisible by the batch size, then the last batch will be smaller, defaults to False. pin_memory (bool, optional): Whether to pin memory address in CPU memory. Defaults to False. num_workers (int, optional): Number of worker threads for this dataloader. Defaults to 0. kwargs (dict): optional parameters for ``torch.utils.data.DataLoader``, more details could be found in `DataLoader <https://pytorch.org/docs/stable/_modules/torch/utils/data/dataloader.html#DataLoader>`_. Returns: :class:`torch.utils.data.DataLoader`: A DataLoader used for training or testing. """ _kwargs = kwargs.copy() distributed_sampler_cls = distributed_sampler_cls or DistributedSampler sampler = distributed_sampler_cls(dataset, num_replicas=self.world_size, rank=self.rank, shuffle=shuffle) # Deterministic dataloader def seed_worker(worker_id): worker_seed = seed np.random.seed(worker_seed) torch.manual_seed(worker_seed) random.seed(worker_seed) return DataLoader( dataset, batch_size=batch_size, sampler=sampler, worker_init_fn=seed_worker, drop_last=drop_last, pin_memory=pin_memory, num_workers=num_workers, **_kwargs, )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/plugin_base.py
colossalai/booster/plugin/plugin_base.py
from abc import ABC, abstractmethod from typing import Callable, Dict, Iterator, List, Optional, Tuple import torch.nn as nn from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from torch.utils.data import DataLoader, Dataset from colossalai.checkpoint_io import CheckpointIO from colossalai.interface import OptimizerWrapper __all__ = ["Plugin"] class Plugin(ABC): @abstractmethod def supported_devices(self) -> List[str]: pass @abstractmethod def supported_precisions(self) -> List[str]: pass @abstractmethod def control_precision(self) -> bool: pass @abstractmethod def control_device(self) -> bool: pass @abstractmethod def support_no_sync(self) -> bool: pass @abstractmethod def support_lora(self) -> bool: pass @abstractmethod def configure( self, model: nn.Module, optimizer: Optional[Optimizer] = None, criterion: Optional[Callable] = None, dataloader: Optional[DataLoader] = None, lr_scheduler: Optional[LRScheduler] = None, ) -> Tuple[nn.Module, OptimizerWrapper, Callable, DataLoader, LRScheduler]: # implement this method pass @abstractmethod def control_checkpoint_io(self) -> bool: """ Whether the plugin controls the checkpoint io """ @abstractmethod def get_checkpoint_io(self) -> CheckpointIO: """ Get checkpoint io object for this plugin, only invoked when control_checkpoint_io is True. """ @abstractmethod def no_sync(self, model: nn.Module, optimizer: OptimizerWrapper) -> Iterator[None]: """ Context manager to disable gradient synchronization. """ @abstractmethod def enable_lora(self, model: nn.Module, pretrained_dir: str, lora_config: Dict) -> nn.Module: """ Add LoRA modules to the model passed in. Should only be called in booster.enable_lora(). """ @abstractmethod def prepare_dataloader( self, dataset: Dataset, batch_size: int, shuffle: bool = False, seed: int = 1024, drop_last: bool = False, pin_memory: bool = False, num_workers: int = 0, **kwargs, ): """Prepare a dataloader for distributed training. The dataloader will be wrapped by `torch.utils.data.DataLoader` """
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/pp_plugin_base.py
colossalai/booster/plugin/pp_plugin_base.py
from abc import abstractmethod from typing import Any, Callable, Iterator, Optional import torch from colossalai.interface import ModelWrapper, OptimizerWrapper from .plugin_base import Plugin class PipelinePluginBase(Plugin): @abstractmethod def execute_pipeline( self, data_iter: Iterator, model: ModelWrapper, criterion: Callable[[Any, Any], torch.Tensor], optimizer: Optional[OptimizerWrapper] = None, return_loss: bool = True, return_outputs: bool = False, ) -> dict: pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/__init__.py
colossalai/booster/plugin/__init__.py
from .gemini_plugin import GeminiPlugin from .hybrid_parallel_plugin import HybridParallelPlugin from .low_level_zero_plugin import LowLevelZeroPlugin from .moe_hybrid_parallel_plugin import MoeHybridParallelPlugin from .plugin_base import Plugin from .torch_ddp_plugin import TorchDDPPlugin __all__ = [ "Plugin", "TorchDDPPlugin", "GeminiPlugin", "LowLevelZeroPlugin", "HybridParallelPlugin", "MoeHybridParallelPlugin", ] import torch from packaging import version if version.parse(torch.__version__) >= version.parse("1.12.0"): from .torch_fsdp_plugin import TorchFSDPPlugin __all__.append("TorchFSDPPlugin")
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/gemini_plugin.py
colossalai/booster/plugin/gemini_plugin.py
import os import random from pathlib import Path from typing import Callable, Dict, Iterator, List, Optional, Tuple import numpy as np import torch import torch.distributed as dist import torch.nn as nn from torch.distributed.distributed_c10d import _get_default_group from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from colossalai.accelerator import get_accelerator from colossalai.checkpoint_io import CheckpointIndexFile, CheckpointIO, GeneralCheckpointIO from colossalai.checkpoint_io.utils import ( async_save_state_dict_shards, create_pinned_state_dict, get_model_base_filenames, get_optimizer_base_filenames, load_state_dict_shards, save_config_file, save_state_dict, save_state_dict_shards, ) from colossalai.cluster import DistCoordinator, ProcessGroupMesh from colossalai.interface import ModelWrapper, OptimizerWrapper from colossalai.logging import get_dist_logger from colossalai.shardformer import ShardConfig, ShardFormer from colossalai.zero import GeminiDDP, GeminiOptimizer from colossalai.zero.gemini.memory_tracer import MemStats from .dp_plugin_base import DPPluginBase __all__ = ["GeminiPlugin"] SUPPORTED_PRECISION = ["fp16", "bf16"] PRECISION_STR_TO_DTYPE = {"fp16": torch.half, "bf16": torch.bfloat16} ZERO_AXIS, DP_AXIS, TP_AXIS = 0, 1, 2 def get_param_info(optim: Optimizer): # Get a backup of necessary information of parameters for future use, which includes: # 1. A mapping from integer param_id to param32 shape. if optim is None: return {} param_info = {"id2shape": {}} start_index = 0 for group in optim.param_groups: for param_id, param in enumerate(group["params"], start_index): original_shape = param.shape if isinstance(param, torch.Tensor) else None param_info["id2shape"][param_id] = original_shape start_index += len(group["params"]) return param_info class GeminiCheckpointIO(GeneralCheckpointIO): def __init__(self) -> None: super().__init__() self.coordinator = DistCoordinator() self.logger = get_dist_logger() def save_unsharded_model( self, model: GeminiDDP, checkpoint: str, gather_dtensor: bool, use_safetensors: bool, use_async: bool = False, ): """ Save sharded model to checkpoint but only on master process. The model should be unwrapped in self.load_model via ModelWrapper.unwrap. As there is communication when getting state dict, model.state_dict() must be called on all processes. """ assert isinstance(model, GeminiDDP), "Please boost the model before saving!" state_dict = model.state_dict(only_rank_0=True) if self.coordinator.is_master(): if use_async: from colossalai.utils.safetensors import save if hash(model) not in self.pinned_state_dicts: self.pinned_state_dicts[hash(model)] = create_pinned_state_dict(state_dict) for k, v in state_dict.items(): self.pinned_state_dicts[hash(model)][k].copy_(v) state_dict[k] = self.pinned_state_dicts[hash(model)][k] writer = save(checkpoint, state_dict) self.async_writers.append(writer) else: save_state_dict(state_dict, checkpoint, use_safetensors) def load_unsharded_model( self, model: GeminiDDP, checkpoint: str, strict: bool = True, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """ Load model from checkpoint with automatic unwrapping. The model should be unwrapped in self.load_model via ModelWrapper.unwrap. """ assert isinstance(model, GeminiDDP), "Please boost the model before loading!" super().load_unsharded_model( model, checkpoint, strict=strict, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads ) def save_unsharded_optimizer( self, optimizer: GeminiOptimizer, checkpoint: str, gather_dtensor: bool, use_async: bool = False ): """ Save unsharded optimizer state dict to checkpoint. After calling optimizer.state_dict(), the complete optimizer states will be collected on master rank. As there is communication when getting state dict, optimizer.state_dict() must be called on all processes. The saving process will only be executed by master rank. """ assert isinstance(optimizer, GeminiOptimizer), "Please boost the optimizer before saving!" state_dict = optimizer.state_dict() if self.coordinator.is_master(): if use_async: from colossalai.utils.safetensors import _flatten_optim_state_dict, save flatten_state_dict, metadata = _flatten_optim_state_dict(state_dict) if id(optimizer) not in self.pinned_state_dicts: self.pinned_state_dicts[id(optimizer)] = create_pinned_state_dict(flatten_state_dict) for k, v in flatten_state_dict.items(): self.pinned_state_dicts[id(optimizer)][k].copy_(v) flatten_state_dict[k] = self.pinned_state_dicts[id(optimizer)][k] writer = save(checkpoint, flatten_state_dict, metadata) self.async_writers.append(writer) else: save_state_dict(state_dict, checkpoint, use_safetensors=False) def load_unsharded_optimizer( self, optimizer: GeminiOptimizer, checkpoint: str, low_cpu_mem_mode: bool = True, num_threads: int = 1 ): """ Loading unsharded optimizer from checkpoint file. For each process, only loading optimizer states of parameters it controls. """ assert isinstance(optimizer, GeminiOptimizer), "Please boost the optimizer before loading!" super().load_unsharded_optimizer( optimizer, checkpoint, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads ) def save_sharded_model( self, model: GeminiDDP, checkpoint_path: str, gather_dtensor: bool = False, prefix: Optional[str] = None, max_shard_size: int = 1024, use_safetensors: bool = False, use_async: bool = False, ): """ Save sharded model. As there is communication when getting state dict, model.state_dict() must be called on all processes. """ assert isinstance(model, GeminiDDP), "Please boost the model before saving!" if os.path.isfile(checkpoint_path): self.logger.error(f"Provided path ({checkpoint_path}) should be a directory, not a file", ranks=[0]) return Path(checkpoint_path).mkdir(parents=True, exist_ok=True) if use_async and self.coordinator.is_master(): if hash(model) not in self.pinned_state_dicts: self.pinned_state_dicts[hash(model)] = {} pinned_state_dicts = self.pinned_state_dicts[hash(model)] else: pinned_state_dicts = None state_dict_shard = model.state_dict_shard( max_shard_size=max_shard_size, only_rank_0=True, pinned_state_dicts=pinned_state_dicts ) weights_name, save_index_file = get_model_base_filenames(prefix, use_safetensors) index_file = CheckpointIndexFile(checkpoint_path) # Save shards of optimizer states. is_master = self.coordinator.is_master() if use_async: total_size, writers = async_save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint_path, index_file=index_file, base_filename=weights_name, is_master=is_master, ) self.async_writers.extend(writers) else: total_size = save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint_path, index_file=index_file, base_filename=weights_name, is_master=is_master, use_safetensors=use_safetensors, ) # only save the index file on the master rank if self.coordinator.is_master(): index_file.append_meta_data("total_size", total_size) index_file.write_index_file(save_index_file) save_config_file(model.unwrap(), checkpoint_path) self.logger.info( f"The model is split into checkpoint shards. " f"You can find where each parameters has been saved in the " f"index located at {save_index_file}.", ranks=[0], ) def load_sharded_model( self, model: GeminiDDP, checkpoint_index_file: Path, strict: bool = False, use_safetensors: bool = False, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """ Load shard model, load model from multiple files. """ assert isinstance(model, GeminiDDP), "Please boost the model before loading!" return super().load_sharded_model( model, checkpoint_index_file, strict, use_safetensors, load_sub_module=False, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads, ) def save_sharded_optimizer( self, optimizer: GeminiOptimizer, checkpoint: Path, gather_dtensor: bool, prefix: str, size_per_shard: int, use_async: bool = False, ): """ Save sharded optimizer state dict to checkpoint folder. As there is communication when getting state dict, this must be called on all processes. """ assert isinstance(optimizer, GeminiOptimizer), "Please boost the optimizer before saving!" if os.path.isfile(checkpoint): self.logger.error(f"Provided path ({checkpoint}) should be a directory, not a file", ranks=[0]) return Path(checkpoint).mkdir(parents=True, exist_ok=True) # Preparing file paths and index file. states_name, save_index_file, param_group_file = get_optimizer_base_filenames(prefix, use_safetensors=use_async) index_file = CheckpointIndexFile(checkpoint) index_file.append_meta_data("param_groups", param_group_file) # Store the information of param groups to param_group_file. if self.coordinator.is_master(): group_file_path = os.path.join(checkpoint, param_group_file) param_groups = optimizer.get_param_groups_for_saving() torch.save(param_groups, group_file_path) # States are broken into shards within max_shard_size. if use_async and self.coordinator.is_master(): if id(optimizer) not in self.pinned_state_dicts: self.pinned_state_dicts[id(optimizer)] = {} pinned_state_dicts = self.pinned_state_dicts[id(optimizer)] else: pinned_state_dicts = None state_dict_shard = optimizer.state_shard( prefix=prefix, max_shard_size=size_per_shard, only_rank_0=True, pinned_state_dicts=pinned_state_dicts ) # Save shards of optimizer states. if use_async: total_size, writers = async_save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint, index_file=index_file, base_filename=states_name, is_master=self.coordinator.is_master(), state_preprocess=True, ) self.async_writers.extend(writers) else: total_size = save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint, index_file=index_file, base_filename=states_name, is_master=self.coordinator.is_master(), use_safetensors=False, ) # Wrap up index file. Only save it on master rank. if self.coordinator.is_master(): index_file.append_meta_data("total_size", total_size) index_file.write_index_file(save_index_file) self.logger.info( f"The optimizer is going to be split to checkpoint shards. " f"You can find where each parameters has been saved in the " f"index located at {save_index_file}.", ranks=[0], ) def load_sharded_optimizer( self, optimizer: GeminiOptimizer, checkpoint_index_file: Path, prefix: str, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """ Loading sharded optimizer from checkpoint folder, with index file given. For each process, only loading optimizer states of parameters it controls. """ assert isinstance(optimizer, GeminiOptimizer), "Please boost the optimizer before loading!" if not os.path.isfile(checkpoint_index_file): self.logger.error(f"Provided path ({checkpoint_index_file}) should be a file", ranks=[0]) assert isinstance(optimizer, GeminiOptimizer) # Read checkpoint index file. ckpt_index_file = CheckpointIndexFile.from_file(checkpoint_index_file) # Load param_groups. param_group_path = ckpt_index_file.get_param_group_filename() if param_group_path is None: raise RuntimeError( f"Invalid index file path {checkpoint_index_file} for an optimizer. \ Lacking param group file under current directory." ) saved_param_groups = torch.load(param_group_path) optimizer.load_param_groups(saved_param_groups) checkpoint_files, _ = ckpt_index_file.get_checkpoint_filenames() # Load optimizer states from shard files under checkpoint path. # For each file, only load the states managed by current process. for state_dict_shard in load_state_dict_shards( checkpoint_files, True, False, low_cpu_mem_mode=low_cpu_mem_mode ): if not low_cpu_mem_mode: state_dict_shard = create_pinned_state_dict(state_dict_shard, empty=False, num_threads=num_threads) optimizer.load_param_states(state_dict_shard) optimizer.optimizer_loading_epilogue() def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str): """ Save model to checkpoint but only on master process. """ if self.coordinator.is_master(): super().save_lr_scheduler(lr_scheduler, checkpoint) class GeminiPlugin(DPPluginBase): """ Plugin for Gemini. ```python from colossalai.booster import Booster from colossalai.booster.plugin import GeminiPlugin model, train_dataset, optimizer, criterion = ... plugin = GeminiPlugin() train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8) booster = Booster(plugin=plugin) model, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion) ``` Args: chunk_config_dict (dict, optional): chunk configuration dictionary. chunk_init_device (torch.device, optional): device to initialize the chunk. placement_policy (str, optional): "static" and "auto". Defaults to "static". enable_gradient_accumulation (bool, optional): Whether to enable gradient accumulation. When set to True, gradient will be stored after doing backward pass. Defaults to False. shard_param_frac (float, optional): fraction of parameters to be sharded. Only for "static" placement. If `shard_param_frac` is 1.0, it's equal to zero-3. If `shard_param_frac` is 0.0, it's equal to zero-2. Defaults to 1.0. offload_optim_frac (float, optional): fraction of optimizer states to be offloaded. Only for "static" placement. If `shard_param_frac` is 1.0 and `offload_optim_frac` is 0.0, it's equal to old "cuda" placement. Defaults to 0.0. offload_param_frac (float, optional): fraction of parameters to be offloaded. Only for "static" placement. For efficiency, this argument is useful only when `shard_param_frac` is 1.0 and `offload_optim_frac` is 1.0. If `shard_param_frac` is 1.0, `offload_optim_frac` is 1.0 and `offload_param_frac` is 1.0, it's equal to old "cpu" placement. When using static placement, we recommend users to tune `shard_param_frac` first and then `offload_optim_frac`. Defaults to 0.0. warmup_non_model_data_ratio (float, optional): ratio of expected non-model data memory during warmup. Only for "auto" placement. Defaults to 0.8. steady_cuda_cap_ratio (float, optional): ratio of allowed cuda capacity for model data during steady state. Only for "auto" placement. Defaults to 0.9. precision (str, optional): precision. Support 'fp16' and 'bf16'. Defaults to 'fp16'. master_weights (bool, optional): Whether to keep fp32 master parameter weights in optimizer. Defaults to True. pin_memory (bool, optional): use pin memory on CPU. Defaults to False. force_outputs_fp32 (bool, optional): force outputs are fp32. Defaults to False. strict_ddp_mode (bool, optional): use strict ddp mode (only use dp without other parallelism). Defaults to False. search_range_m (int, optional): chunk size searching range divided by 2^20. Defaults to 32. hidden_dim (int, optional): the hidden dimension of DNN. Users can provide this argument to speed up searching. If users do not know this argument before training, it is ok. We will use a default value 1024. min_chunk_size_m (float, optional): the minimum chunk size divided by 2^20. If the aggregate size of parameters is still smaller than the minimum chunk size, all parameters will be compacted into one small chunk. memstats (MemStats, optional) the memory statistics collector by a runtime memory tracer. gpu_margin_mem_ratio (float, optional): The ratio of GPU remaining memory (after the first forward-backward) which will be used when using hybrid CPU optimizer. This argument is meaningless when `placement_policy` of `GeminiManager` is not "auto". Defaults to 0.0. initial_scale (float, optional): Initial scale used by DynamicGradScaler. Defaults to 2**16. min_scale (float, optional): Min scale used by DynamicGradScaler. Defaults to 1. growth_factor (float, optional): growth_factor used by DynamicGradScaler. Defaults to 2. backoff_factor (float, optional): backoff_factor used by DynamicGradScaler. Defaults to 0.5. growth_interval (float, optional): growth_interval used by DynamicGradScaler. Defaults to 1000. hysteresis (float, optional): hysteresis used by DynamicGradScaler. Defaults to 2. max_scale (int, optional): max_scale used by DynamicGradScaler. Defaults to 2**32. max_norm (float, optional): max_norm used for `clip_grad_norm`. You should notice that you shall not do clip_grad_norm by yourself when using ZeRO DDP. The ZeRO optimizer will take care of clip_grad_norm. norm_type (float, optional): norm_type used for `clip_grad_norm`. tp_size (int, optional): If 'tp_size' is set to be greater than 1, it means using tensor parallelism strategy, which is implemented in Shardformer, 'tp_size' determines the size of the tensor parallel process group. Default to 1. extra_dp_size (int, optional): If 'extra_dp_size' is set to be greater than 1, it means creating another group to run with a ddp-like strategy. Default to 1. enable_all_optimization (bool, optional): Whether to switch on all the optimizations supported by Shardformer. Currently all the optimization methods include fused normalization, flash attention and JIT. Defaults to False. enable_fused_normalization (bool, optional): Whether to switch on fused normalization in Shardformer. Defaults to False. enable_flash_attention (bool, optional): Whether to switch on flash attention in Shardformer. Defaults to False. enable_jit_fused (bool, optional): Whether to switch on JIT in Shardformer. Default to False. enable_sequence_parallelism (bool): Whether to turn on sequence parallelism in Shardformer. Defaults to False. use_fp8 (bool, optional): Whether to enable fp8 mixed precision training. Defaults to False. verbose (bool, optional): verbose mode. Debug info including chunk search result will be printed. Defaults to False. fp8_communication (bool, optional): Whether to enable fp8 communication. Defaults to False. """ def __init__( self, chunk_config_dict: Optional[dict] = None, chunk_init_device: Optional[torch.device] = None, placement_policy: str = "static", enable_gradient_accumulation: bool = False, max_prefetch: int = 0, shard_param_frac: float = 1.0, # only for static placement offload_optim_frac: float = 0.0, # only for static placement offload_param_frac: float = 0.0, # only for static placement warmup_non_model_data_ratio: float = 0.8, # only for auto placement steady_cuda_cap_ratio: float = 0.9, # only for auto placement precision: str = "fp16", master_weights: bool = True, pin_memory: bool = False, force_outputs_fp32: bool = False, strict_ddp_mode: bool = False, search_range_m: int = 32, hidden_dim: Optional[int] = None, min_chunk_size_m: float = 32, memstats: Optional[MemStats] = None, gpu_margin_mem_ratio: float = 0.0, initial_scale: float = 2**16, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 2**32, max_norm: float = 0.0, norm_type: float = 2.0, tp_size: int = 1, extra_dp_size: int = 1, enable_all_optimization: bool = False, enable_fused_normalization: bool = False, enable_flash_attention: bool = False, enable_sequence_parallelism: bool = False, enable_jit_fused: bool = False, enable_async_reduce: bool = True, use_fp8: bool = False, verbose: bool = False, fp8_communication: bool = False, ) -> None: super().__init__() assert precision in SUPPORTED_PRECISION, f"precision {precision} is not supported" if get_accelerator().name == "npu": assert placement_policy == "static", "NPU only supports static placement policy" self.logger = get_dist_logger() if enable_async_reduce and not pin_memory: self.logger.warning( f"enable_async_reduce sets pin_memory=True to achieve best performance, which is not implicitly set.", ranks=[0], ) pin_memory = True self.gemini_config = dict( chunk_config_dict=chunk_config_dict, chunk_init_device=(chunk_init_device or get_accelerator().get_current_device()), placement_policy=placement_policy, enable_gradient_accumulation=enable_gradient_accumulation, shard_param_frac=shard_param_frac, offload_optim_frac=offload_optim_frac, offload_param_frac=offload_param_frac, warmup_non_model_data_ratio=warmup_non_model_data_ratio, steady_cuda_cap_ratio=steady_cuda_cap_ratio, pin_memory=pin_memory, force_outputs_fp32=force_outputs_fp32, strict_ddp_mode=strict_ddp_mode, search_range_m=search_range_m, hidden_dim=hidden_dim, min_chunk_size_m=min_chunk_size_m, memstats=memstats, mixed_precision=PRECISION_STR_TO_DTYPE[precision], master_weights=master_weights, max_prefetch=max_prefetch, enable_async_reduce=enable_async_reduce, fp8_communication=fp8_communication, use_fp8=use_fp8, ) self.zero_optim_config = dict( gpu_margin_mem_ratio=gpu_margin_mem_ratio, ) self.optim_kwargs = dict( initial_scale=initial_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, hysteresis=hysteresis, min_scale=min_scale, max_scale=max_scale, max_norm=max_norm, norm_type=norm_type, ) self.enable_tensor_parallelism = tp_size > 1 self.enable_all_optimization = enable_all_optimization self.enable_fused_normalization = enable_fused_normalization self.enable_flash_attention = enable_flash_attention self.enable_sequence_parallelism = enable_sequence_parallelism if self.enable_tensor_parallelism else False self.enable_jit_fused = enable_jit_fused self.verbose = verbose self.tp_size = tp_size self.extra_dp_size = extra_dp_size world_size = dist.get_world_size() self.zero_size = world_size // (self.tp_size * self.extra_dp_size) assert ( world_size == (self.tp_size * self.extra_dp_size) * self.zero_size ), f"The global group size can't be evenly divided by the subgroup size." self.pg_mesh = ProcessGroupMesh(self.zero_size, self.extra_dp_size, self.tp_size) self.zero_group = ( self.pg_mesh.get_group_along_axis(ZERO_AXIS) if self.zero_size < world_size else _get_default_group() ) self.extra_dp_group = self.pg_mesh.get_group_along_axis(DP_AXIS) if self.extra_dp_size > 1 else None self.tp_group = self.pg_mesh.get_group_along_axis(TP_AXIS) if self.tp_size > 1 else None self.dp_size = self.zero_size * self.extra_dp_size self.shard_config = ShardConfig( tensor_parallel_process_group=self.tp_group, enable_tensor_parallelism=self.enable_tensor_parallelism, enable_all_optimization=self.enable_all_optimization, enable_fused_normalization=self.enable_fused_normalization, enable_flash_attention=self.enable_flash_attention, enable_jit_fused=self.enable_jit_fused, enable_sequence_parallelism=self.enable_sequence_parallelism, ) def __del__(self): """Destroy the process groups in ProcessGroupMesh""" self.pg_mesh.destroy_mesh_process_groups() def support_no_sync(self) -> bool: return False def support_lora(self) -> bool: return False def control_precision(self) -> bool: return True def supported_precisions(self) -> List[str]: return SUPPORTED_PRECISION def control_device(self) -> bool: return True def supported_devices(self) -> List[str]: return ["cuda", "npu"] def prepare_dataloader( self, dataset, batch_size, shuffle=False, seed=1024, drop_last=False, pin_memory=False, num_workers=0, distributed_sampler_cls=None, **kwargs, ): r""" Prepare a dataloader for distributed training. The dataloader will be wrapped by `torch.utils.data.DataLoader` and `torch.utils.data.DistributedSampler`. Args: dataset (`torch.utils.data.Dataset`): The dataset to be loaded. shuffle (bool, optional): Whether to shuffle the dataset. Defaults to False. seed (int, optional): Random worker seed for sampling, defaults to 1024. add_sampler: Whether to add ``DistributedDataParallelSampler`` to the dataset. Defaults to True. drop_last (bool, optional): Set to True to drop the last incomplete batch, if the dataset size is not divisible by the batch size. If False and the size of dataset is not divisible by the batch size, then the last batch will be smaller, defaults to False. pin_memory (bool, optional): Whether to pin memory address in CPU memory. Defaults to False. num_workers (int, optional): Number of worker threads for this dataloader. Defaults to 0. kwargs (dict): optional parameters for ``torch.utils.data.DataLoader``, more details could be found in `DataLoader <https://pytorch.org/docs/stable/_modules/torch/utils/data/dataloader.html#DataLoader>`_. Returns: :class:`torch.utils.data.DataLoader`: A DataLoader used for training or testing. """ _kwargs = kwargs.copy() zero_world_size = self.pg_mesh.size(ZERO_AXIS) extra_dp_world_size = self.pg_mesh.size(DP_AXIS) zero_rank = self.pg_mesh.coordinate(ZERO_AXIS) extra_dp_rank = self.pg_mesh.coordinate(DP_AXIS) distributed_sampler_cls = distributed_sampler_cls or DistributedSampler sampler = distributed_sampler_cls( dataset, num_replicas=zero_world_size * extra_dp_world_size, rank=zero_rank * extra_dp_world_size + extra_dp_rank, shuffle=shuffle, ) # Deterministic dataloader def seed_worker(worker_id): worker_seed = seed np.random.seed(worker_seed) torch.manual_seed(worker_seed) random.seed(worker_seed) return DataLoader( dataset, batch_size=batch_size, sampler=sampler, worker_init_fn=seed_worker, drop_last=drop_last, pin_memory=pin_memory, num_workers=num_workers, **_kwargs, ) def configure( self, model: nn.Module, optimizer: Optional[Optimizer] = None, criterion: Optional[Callable] = None, dataloader: Optional[DataLoader] = None, lr_scheduler: Optional[LRScheduler] = None, ) -> Tuple[nn.Module, OptimizerWrapper, Callable, DataLoader, LRScheduler]: params_info = get_param_info(optimizer) if not isinstance(model, ModelWrapper): # convert model to sync bn # FIXME(ver217): gemini does not support sync bn # In torch/nn/modules/_functions.py, line 22, ``mean, invstd = torch.batch_norm_stats(input, eps)`` will get fp32 mean and invstd even though the input is fp16. # This inconsistency of dtype will cause the error. # We have two possible solutions: # 1. keep batch norm always in fp32. This is hard for gemini, as it use chunks. # 2. patch sync bn or write a new on. This is relatively easy, but we need to test it. # model = nn.SyncBatchNorm.convert_sync_batchnorm(model, None) # wrap the model with Gemini if self.enable_tensor_parallelism: shardformer = ShardFormer(self.shard_config) model, _ = shardformer.optimize(model) model = GeminiDDP( model, **self.gemini_config, zero_group=self.zero_group, extra_dp_group=self.extra_dp_group, verbose=self.verbose, ) if optimizer is not None and not isinstance(optimizer, OptimizerWrapper): optimizer = GeminiOptimizer( optimizer, model, **self.zero_optim_config, **self.optim_kwargs, tp_group=self.tp_group, params_info=params_info, verbose=self.verbose, ) return model, optimizer, criterion, dataloader, lr_scheduler def control_checkpoint_io(self) -> bool: return True def get_checkpoint_io(self) -> CheckpointIO: return GeminiCheckpointIO() def no_sync(self, model: nn.Module, optimizer: OptimizerWrapper) -> Iterator[None]: raise NotImplementedError def enable_lora(
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/moe_hybrid_parallel_plugin.py
colossalai/booster/plugin/moe_hybrid_parallel_plugin.py
from collections import defaultdict from types import MethodType from typing import Callable, List, Optional, OrderedDict, Tuple import torch import torch.distributed as dist from torch.distributed import ProcessGroup from torch.nn import Module from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from torch.utils.data import DataLoader from colossalai.booster.plugin.hybrid_parallel_plugin import ( PRECISION_TORCH_TYPE, SUPPORT_SP_MODE, HybridParallelAMPOptimizer, HybridParallelModule, HybridParallelNaiveOptimizer, HybridParallelPlugin, HybridParallelZeroOptimizer, get_param_info, ) from colossalai.checkpoint_io import MoECheckpointIO from colossalai.cluster.process_group_mesh import ProcessGroupMesh from colossalai.interface import ModelWrapper, OptimizerWrapper from colossalai.interface.optimizer import DistributedOptim from colossalai.logging import get_dist_logger from colossalai.nn.optimizer import cast_to_distributed from colossalai.pipeline.schedule.interleaved_pp import InterleavedSchedule from colossalai.pipeline.schedule.one_f_one_b import OneForwardOneBackwardSchedule from colossalai.pipeline.schedule.zero_bubble_pp import ZeroBubbleVPipeScheduler from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer.policies.base_policy import Policy from colossalai.shardformer.shard.grad_ckpt_config import GradientCheckpointConfig from colossalai.shardformer.shard.shard_config import ShardConfig from colossalai.tensor.moe_tensor.api import is_moe_tensor class MoeHybridParallelZeroOptimizer(HybridParallelZeroOptimizer): def __init__( self, optimizer: Optimizer, model: Module, use_pipeline: bool, dp_process_group: Optional[ProcessGroup], # the dp pg for comm tp_process_group: Optional[ProcessGroup], # if using tp pp_process_group: Optional[ProcessGroup], # if using pp moe_dp_group: ProcessGroup, # moe dp pg for comm param_info: OrderedDict, initial_scale: int = 2**16, # grad scaler config min_scale: int = 1, growth_factor: float = 2.0, backoff_factor: float = 0.5, growth_interval: int = 2000, hysteresis: int = 2, max_scale: int = 2**24, clip_grad_norm: float = 0.0, # grad clipping verbose: bool = False, reduce_bucket_size: int = 1024 * 1024, # communication communication_dtype: Optional[torch.dtype] = None, overlap_communication: bool = False, partition_grad: bool = False, # stage 2 flag cpu_offload: bool = False, # cpu offload forced_dtype: Optional[torch.dtype] = None, overlap_allgather: bool = False, ): if dp_process_group is moe_dp_group: pg_param_list = { dp_process_group: list(model.parameters()), } else: pg_param_list = { dp_process_group: list(filter(lambda p: not is_moe_tensor(p), model.parameters())), moe_dp_group: list(filter(is_moe_tensor, model.parameters())), } if len(pg_param_list[moe_dp_group]) == 0: raise ValueError("No parameters found in moe_dp_group, please consider using HybridParallelPlugin instead") super().__init__( model=model, optimizer=optimizer, use_pipeline=use_pipeline, param_info=param_info, initial_scale=initial_scale, min_scale=min_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, hysteresis=hysteresis, max_scale=max_scale, clip_grad_norm=clip_grad_norm, verbose=verbose, reduce_bucket_size=reduce_bucket_size, communication_dtype=communication_dtype, overlap_communication=overlap_communication, partition_grad=partition_grad, cpu_offload=cpu_offload, tp_process_group=tp_process_group, pp_process_group=pp_process_group, forced_dtype=forced_dtype, pg_to_param_list=pg_param_list, overlap_allgather=overlap_allgather, ) class MoeHybridParallelPlugin(HybridParallelPlugin): """ Plugin for MoE Hybrid Parallel Training, which is similar to HybridParallelPlugin Tensor parallel, pipeline parallel and data parallel(DDP/ZeRO) can be picked and combined in this plugin. The size of tp and pp should be passed in by user, then the size of dp is automatically calculated from dp_size = world_size / (tp_size * pp_size). ```python from colossalai.booster import Booster from colossalai.booster.plugin import MoeHybridParallelPlugin model, train_dataset, optimizer, criterion = ... plugin = MoeHybridParallelPlugin(tp_size=2, pp_size=2, ep_size=2) train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8) booster = Booster(plugin=plugin) model, optimizer, criterion, train_dataloader, _ = booster.boost(model, optimizer, criterion, train_dataloader) ``` Args: tp_size (int): The size of tensor parallelism. Tensor parallelism will not be used when tp_size is set to 1. pp_size (int): The number of pipeline stages in pipeline parallelism. Pipeline parallelism will not be used when pp_size is set to 1. ep_size (int): The size of expert parallelism sp_size (int): The size of sequence parallelism. precision (str, optional): Specifies the precision of parameters during training. Auto-mixied precision will be used when this argument is set to 'fp16' or 'bf16', otherwise model is trained with 'fp32'. Defaults to 'fp16'. zero_stage (int, optional): The stage of ZeRO for data parallelism. Can only be choosed from [0, 1, 2]. When set to 0, ZeRO will not be used. Defaults to 0. enable_all_optimization (bool, optional): Whether to switch on all the optimizations supported by Shardformer. Currently all the optimization methods include fused normalization, flash attention and JIT. Defaults to False. enable_fused_normalization (bool, optional): Whether to switch on fused normalization in Shardformer. Defaults to False. enable_flash_attention (bool, optional): Whether to switch on flash attention in Shardformer. Defaults to False. enable_jit_fused (bool, optional): Whether to switch on JIT in Shardformer. Default to False. enable_sequence_parallelism (bool): Whether to turn on sequence parallelism in Shardformer. Defaults to False. sequence_parallelism_mode (str): The Sequence parallelism mode. Can only be choosed from ["split_gather", "ring", "all_to_all"]. Defaults to "split_gather". parallel_output (bool): Whether to keep the output parallel when enabling tensor parallelism. Default to True. num_microbatches (int, optional): Number of microbatches when using pipeline parallelism. Defaults to None. microbatch_size (int, optional): Microbatch size when using pipeline parallelism. Either ``num_microbatches`` or ``microbatch_size`` should be provided if using pipeline. If ``num_microbatches`` is provided, this will be ignored. Defaults to None. initial_scale (float, optional): The initial loss scale of AMP. Defaults to 2**16. min_scale (float, optional): The minimum loss scale of AMP. Defaults to 1. growth_factor (float, optional): The multiplication factor for increasing loss scale when using AMP. Defaults to 2. backoff_factor (float, optional): The multiplication factor for decreasing loss scale when using AMP. Defaults to 0.5. growth_interval (int, optional): The number of steps to increase loss scale when no overflow occurs when using AMP. Defaults to 1000. hysteresis (int, optional): The number of overflows before decreasing loss scale when using AMP. Defaults to 2. max_scale (float, optional): The maximum loss scale of AMP. Defaults to 2**32. max_norm (float, optional): Maximum norm for gradient clipping. Defaults to 0. broadcast_buffers (bool, optional): Whether to broadcast buffers in the beginning of training when using DDP. Defaults to True. ddp_bucket_cap_mb (int, optional): The bucket size in MB when using DDP. Defaults to 25. find_unused_parameters (bool, optional): Whether to find unused parameters when using DDP. Defaults to False. check_reduction (bool, optional): Whether to check reduction when using DDP. Defaults to False. gradient_as_bucket_view (bool, optional): Whether to use gradient as bucket view when using DDP. Defaults to False. static_graph (bool, optional): Whether to use static graph when using DDP. Defaults to False. zero_bucket_size_in_m (int, optional): Gradient reduce bucket size in million elements when using ZeRO. Defaults to 12. cpu_offload (bool, optional): Whether to open cpu_offload when using ZeRO. Defaults to False. communication_dtype (torch.dtype, optional): Communication dtype when using ZeRO. If not specified, the dtype of param will be used. Defaults to None. overlap_communication (bool, optional): Whether to overlap communication and computation when using ZeRO. Defaults to True. custom_policy (Policy, optional): Custom policy for Shardformer. Defaults to None. pp_style (str, optional): The style for pipeline parallelism. Defaults to '1f1b'. num_model_chunks (int, optional): The number of model chunks for interleaved pipeline parallelism. Defaults to 1. gradient_checkpoint_config (GradientCheckpointConfig, optional): Configuration for gradient checkpointing. Defaults to None. enable_metadata_cache (bool, optional): Whether to enable metadata cache for pipeline parallelism. Defaults to True. make_vocab_size_divisible_by (int, optional): it's used when padding the vocabulary size, to make it choose an faster kenel. Default to 64. overlap_p2p (bool, optional): Whether to overlap the p2p communication in pipeline parallelism. use_fp8 (bool, optional): Whether to enable fp8 mixed precision training. Defaults to False. fp8_communication (bool, optional): Whether to enable fp8 communication. Defaults to False. """ def __init__( self, tp_size: int, pp_size: int, ep_size: int, sp_size: int = None, precision: str = "fp16", zero_stage: int = 0, enable_all_optimization: bool = False, enable_fused_normalization: bool = False, enable_flash_attention: bool = False, enable_jit_fused: bool = False, enable_sequence_parallelism: bool = False, sequence_parallelism_mode: str = None, parallel_output: bool = True, num_microbatches: Optional[int] = None, microbatch_size: Optional[int] = None, initial_scale: float = 2**16, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 2**32, max_norm: float = 0, broadcast_buffers: bool = True, ddp_bucket_cap_mb: int = 25, find_unused_parameters: bool = False, check_reduction: bool = False, gradient_as_bucket_view: bool = False, static_graph: bool = False, zero_bucket_size_in_m: int = 12, cpu_offload: bool = False, communication_dtype: Optional[torch.dtype] = None, overlap_communication: bool = False, custom_policy: Policy = None, pp_style: str = "1f1b", num_model_chunks: int = 1, scheduler_nodes: List = None, num_layers_per_stage: Optional[List[int]] = None, gradient_checkpoint_config: Optional[GradientCheckpointConfig] = None, enable_metadata_cache: bool = True, make_vocab_size_divisible_by: int = 64, moe_dp_outside: bool = True, overlap_p2p: bool = True, overlap_allgather: bool = False, fp8_communication: bool = False, use_fp8: bool = False, ) -> None: self.logger = get_dist_logger() if overlap_communication or zero_stage == 2: overlap_communication = False zero_stage = 1 self.logger.warning( f"overlap_communication and zero_stage are set to False and 1 because " f"ZeRO-2 or comm overlap cause program hang when some experts are not routed.", ranks=[0], ) assert ( dist.get_world_size() % (tp_size * pp_size) == 0 ), f"World size {dist.get_world_size()} is not divisible by tp_size {tp_size} * pp_size {pp_size}" if enable_sequence_parallelism: self.sequence_parallelism_mode = ( sequence_parallelism_mode if sequence_parallelism_mode is not None else "all_to_all" ) assert ( self.sequence_parallelism_mode in SUPPORT_SP_MODE ), f"Sequence parallelism mode {self.sequence_parallelism_mode} is not in the supported list {SUPPORT_SP_MODE}" if self.sequence_parallelism_mode in ["split_gather", "ring"]: assert ( tp_size > 1 ), f"Sequence parallelism mode {self.sequence_parallelism_mode} must be enabled when using tensor parallelism" if sp_size != 1: self.logger.warning( f"The sp_size will be the same as tp_size in sequence parallelism mode {self.sequence_parallelism_mode}," "will ignore the given sequence parallelism size.", ranks=[0], ) self.sp_size = 1 self.dp_size = dist.get_world_size() // (tp_size * pp_size) elif self.sequence_parallelism_mode in ["all_to_all"]: self.sp_size = 1 if sp_size is None else sp_size self.dp_size = dist.get_world_size() // (self.sp_size * pp_size * tp_size) else: self.dp_size = dist.get_world_size() // (tp_size * pp_size) assert ( sp_size == 1 or sp_size is None ), f"You should not set sp_size when sequence parallelism is not enabled." self.sp_size = 1 assert self.dp_size % ep_size == 0, f"dp_size should be divisible by ep_size, {self.dp_size=} {ep_size=}" self.moe_dp_size = self.dp_size // ep_size self.ep_size = ep_size self.tp_size = tp_size self.pp_size = pp_size self.precision = precision self.zero_stage = zero_stage self.cpu_offload = cpu_offload self.enable_all_optimization = enable_all_optimization self.enable_fused_normalization = enable_fused_normalization self.enable_flash_attention = enable_flash_attention self.enable_jit_fused = enable_jit_fused self.enable_sequence_parallelism = enable_sequence_parallelism if moe_dp_outside: self.moe_dp_axis, self.pp_axis, self.ep_axis, self.tp_axis, self.sp_axis = 0, 1, 2, 3, 4 self.pg_mesh = ProcessGroupMesh(self.moe_dp_size, self.pp_size, self.ep_size, self.tp_size, self.sp_size) else: self.pp_axis, self.moe_dp_axis, self.ep_axis, self.tp_axis, self.sp_axis = 0, 1, 2, 3, 4 self.pg_mesh = ProcessGroupMesh(self.pp_size, self.moe_dp_size, self.ep_size, self.tp_size, self.sp_size) self.stage_manager = None self.scheduler = None self.custom_policy = custom_policy assert zero_stage in (0, 1, 2) if self.pp_size > 1: assert pp_style in ["1f1b", "interleaved", "zbv"], "Unsupported pipeline parallelism style" assert ( pp_style in ["interleaved", "zbv"] or num_model_chunks == 1 ), "num_model_chunks must be 1 when using 1f1b" assert ( pp_style in ["1f1b", "interleaved"] or num_model_chunks == 2 ), "num_model_chunks must be 2 when using zero bubble pipeline" assert ( num_microbatches is not None or microbatch_size is not None ), "num_microbatches or microbatch_size must be specified when using pipeline parallelism" assert ( self.zero_stage <= 1 ), "To avoid prohibitive gradient synchronization costs, zero stage must be 0 or 1 when using pipeline parallelism" self.stage_manager = PipelineStageManager( self.pg_mesh, pipeline_axis=self.pp_axis, enable_interleave=(pp_style == "interleaved" or pp_style == "zbv"), num_model_chunks=num_model_chunks, num_layers_per_stage=num_layers_per_stage, use_zbv=(pp_style == "zbv"), ) if pp_style == "interleaved": assert num_model_chunks > 1, "number of model chunks must be > 1 when using interleaved" self.scheduler = InterleavedSchedule( stage_manager=self.stage_manager, num_model_chunks=num_model_chunks, num_microbatch=num_microbatches, microbatch_size=microbatch_size, enable_metadata_cache=enable_metadata_cache, overlap_p2p=overlap_p2p, ) elif pp_style == "1f1b": self.scheduler = OneForwardOneBackwardSchedule( stage_manager=self.stage_manager, num_microbatches=num_microbatches, microbatch_size=microbatch_size, enable_metadata_cache=enable_metadata_cache, ) elif pp_style == "zbv": assert num_model_chunks > 1, "number of model chunks must be > 1 when using ZerbubbleV" self.scheduler = ZeroBubbleVPipeScheduler( schedule=scheduler_nodes, stage_manager=self.stage_manager, num_model_chunks=num_model_chunks, num_microbatch=num_microbatches, overlap_p2p=overlap_p2p, ) else: raise NotImplementedError() self.tp_group = self.pg_mesh.get_group_along_axis(self.tp_axis) self.dp_group = self.pg_mesh.get_group_along_axis([self.moe_dp_axis, self.ep_axis]) self.pp_group = self.pg_mesh.get_group_along_axis(self.pp_axis) self.moe_dp_group = self.pg_mesh.get_group_along_axis(self.moe_dp_axis) self.ep_group = self.pg_mesh.get_group_along_axis(self.ep_axis) if self.enable_sequence_parallelism and self.sequence_parallelism_mode in ["split_gather", "ring"]: self.sp_group = self.pg_mesh.get_group_along_axis(self.tp_axis) else: self.sp_group = self.pg_mesh.get_group_along_axis(self.sp_axis) # sync gradients across DP * SP ranks if self.enable_sequence_parallelism and self.sequence_parallelism_mode == "all_to_all": self.mixed_dp_group = self.pg_mesh.create_group_along_axis([self.moe_dp_axis, self.ep_axis, self.sp_axis]) self.dp_size = dist.get_world_size(self.mixed_dp_group) else: self.mixed_dp_group = self.dp_group self.use_fp8 = use_fp8 self.shard_config = ShardConfig( tensor_parallel_process_group=self.tp_group, sequence_parallel_process_group=self.sp_group, ep_group=self.ep_group, moe_dp_group=self.moe_dp_group, pipeline_stage_manager=self.stage_manager, enable_tensor_parallelism=self.tp_size > 1, enable_all_optimization=self.enable_all_optimization, enable_fused_normalization=self.enable_fused_normalization, enable_flash_attention=self.enable_flash_attention, enable_jit_fused=self.enable_jit_fused, enable_sequence_parallelism=enable_sequence_parallelism, sequence_parallelism_mode=sequence_parallelism_mode, parallel_output=parallel_output, make_vocab_size_divisible_by=make_vocab_size_divisible_by, gradient_checkpoint_config=gradient_checkpoint_config, fp8_communication=fp8_communication, ) self.amp_config = dict( initial_scale=initial_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, hysteresis=hysteresis, min_scale=min_scale, max_scale=max_scale, ) self.ddp_config = dict( broadcast_buffers=broadcast_buffers, bucket_cap_mb=ddp_bucket_cap_mb, find_unused_parameters=find_unused_parameters, check_reduction=check_reduction, gradient_as_bucket_view=gradient_as_bucket_view, static_graph=static_graph, ) self.zero_config = dict( reduce_bucket_size=zero_bucket_size_in_m * 1024 * 1024, communication_dtype=communication_dtype, overlap_communication=overlap_communication, cpu_offload=cpu_offload, partition_grad=(self.zero_stage == 2), forced_dtype=PRECISION_TORCH_TYPE[precision], overlap_allgather=overlap_allgather, ) self.max_norm = max_norm def get_checkpoint_io(self) -> MoECheckpointIO: return MoECheckpointIO( self.mixed_dp_group, self.pp_group, self.tp_group, self.sp_group, self.ep_group, self.moe_dp_group, self.zero_stage, ) def configure( self, model: Module, optimizer: Optional[Optimizer] = None, criterion: Optional[Callable] = None, dataloader: Optional[DataLoader] = None, lr_scheduler: Optional[LRScheduler] = None, ) -> Tuple[Module, OptimizerWrapper, Callable, DataLoader, LRScheduler]: param_info = get_param_info(optimizer) # TODO: Support Galore + ZeRO # Replace with distributed implementation if exists optimizer = cast_to_distributed(optimizer) if not isinstance(model, ModelWrapper): use_ddp = (self.dp_size > 1 and self.pp_size == 1 and self.zero_stage == 0) or ( self.dp_size == 1 and self.pp_size == 1 and self.enable_sequence_parallelism and self.sequence_parallelism_mode == "all_to_all" ) if use_ddp: self.logger.warning( f"Will have to check all params are used in pytorch DDP since not all experts are always activated", ranks=[0], ) self.ddp_config["find_unused_parameters"] = True if dist.get_process_group_ranks(self.mixed_dp_group) != dist.get_process_group_ranks(self.moe_dp_group): raise ValueError( f"if pytorch DDP is used, dp_group and moe_dp_group are expected to be the same since DDP can only reduce grad across a single group, but found dp_group {dist.get_process_group_ranks(dp_group)} and moe_dp_group {dist.get_process_group_ranks(self.moe_dp_group)}, you might want to modify your config to bypass DDP \nhint: check the above ddp condition to by pass this" ) model = HybridParallelModule( module=model, precision=self.precision, shard_config=self.shard_config, dp_group=self.mixed_dp_group, tp_group=self.tp_group, sp_group=self.sp_group, use_ddp=use_ddp, ddp_config=self.ddp_config, custom_policy=self.custom_policy, use_fp8=self.use_fp8, ) if optimizer is not None and not isinstance(optimizer, OptimizerWrapper): if self.zero_stage == 0: is_zero = False if self.precision in ["fp16", "bf16"]: optimizer = HybridParallelAMPOptimizer( optimizer, model, use_pipeline=self.enable_pipeline_parallelism or self.ep_size > 1, param_info=param_info, precision=self.precision, max_norm=self.max_norm, **self.amp_config, ) else: optimizer = HybridParallelNaiveOptimizer( optimizer, model, use_pipeline=self.enable_pipeline_parallelism or self.ep_size > 1, param_info=param_info, max_norm=self.max_norm, pp_process_group=self.pp_group, tp_process_group=self.tp_group, ) else: is_zero = True if self.dp_size <= 1: self.logger.warning( "Use Zero Optimizer when data parallel size is 1 may introduce unnecessary overhead. " "If you do not intend to use cpu_offload, please consider set zero_stage=0.", ranks=[0], ) assert self.precision != "fp32", "Please set precision to 'fp16' or 'bf16' when using ZeRO." optimizer = MoeHybridParallelZeroOptimizer( optimizer, model, use_pipeline=self.enable_pipeline_parallelism or self.ep_size > 1, param_info=param_info, dp_process_group=self.mixed_dp_group, tp_process_group=self.tp_group, pp_process_group=self.pp_group, moe_dp_group=self.moe_dp_group, verbose=True, clip_grad_norm=self.max_norm, **self.zero_config, **self.amp_config, ) # inject update_master_params model.update_master_params = MethodType(optimizer.update_master_params, model) # Setup optimizers that require global states optim = optimizer.optim if isinstance(optim, DistributedOptim): shard_to_param = optimizer.get_master_to_working_map() if is_zero else {} padding_map = optimizer.get_param_padding_map() if is_zero else defaultdict(int) optim.setup_distributed(self.tp_group, self.dp_group, shard_to_param, padding_map, is_zero) return model, optimizer, criterion, dataloader, lr_scheduler
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/plugin/hybrid_parallel_plugin.py
colossalai/booster/plugin/hybrid_parallel_plugin.py
import ctypes import random from collections import defaultdict from contextlib import contextmanager, nullcontext from copy import deepcopy from functools import partial from types import MethodType from typing import Any, Callable, Dict, Iterator, List, Optional, OrderedDict, Tuple, Union import numpy as np import torch import torch.distributed as dist from peft import PeftModel from torch import Tensor, inf from torch.distributed import ProcessGroup, get_world_size from torch.nn import Module, SyncBatchNorm from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from torch.utils._pytree import tree_map from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from colossalai.accelerator import get_accelerator from colossalai.amp.naive_amp.mixed_precision_optimizer import MixedPrecisionOptimizer from colossalai.checkpoint_io import CheckpointIO, HybridParallelCheckpointIO from colossalai.cluster import ProcessGroupMesh from colossalai.interface import AMPModelMixin, ModelWrapper, OptimizerWrapper from colossalai.interface.model import PeftUnwrapMixin from colossalai.interface.optimizer import DistributedOptim from colossalai.logging import get_dist_logger from colossalai.nn.optimizer import DistGaloreAwamW, cast_to_distributed from colossalai.pipeline.schedule import InterleavedSchedule, OneForwardOneBackwardSchedule, ZeroBubbleVPipeScheduler from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.quantization import BnbQuantizationConfig, quantize_model from colossalai.quantization.fp8_hook import FP8Hook from colossalai.shardformer import GradientCheckpointConfig, ShardConfig, ShardFormer from colossalai.shardformer.layer.utils import SeqParallelUtils, is_share_sp_tp from colossalai.shardformer.policies.base_policy import Policy from colossalai.tensor.colo_parameter import ColoParameter from colossalai.tensor.d_tensor.api import is_distributed_tensor from colossalai.tensor.param_op_hook import ColoParamOpHookManager from colossalai.zero.low_level import LowLevelZeroOptimizer from colossalai.zero.low_level.zero_hook import ZeroOpHook, wait_all_gather_handle from .pp_plugin_base import PipelinePluginBase SUPPORT_SP_MODE = ["split_gather", "ring", "all_to_all", "ring_attn"] PRECISION_TORCH_TYPE = {"fp16": torch.float16, "fp32": torch.float32, "bf16": torch.bfloat16} def _convert_floating_point(x, dtype: torch.dtype = torch.float16): if isinstance(x, torch.Tensor) and torch.is_floating_point(x): return x.to(dtype) return x class HybridParallelModule(ModelWrapper, AMPModelMixin): def __init__( self, module: Module, precision: str, shard_config: ShardConfig, dp_group: ProcessGroup, tp_group: ProcessGroup, sp_group: ProcessGroup, use_ddp: bool, ddp_config: dict, custom_policy: Policy, overlap_allgather: bool = False, use_fp8: bool = False, ) -> None: self.stage_manager = shard_config.pipeline_stage_manager self.shard_config = shard_config self.dp_group = dp_group self.tp_group = tp_group self.sp_group = sp_group self.use_ddp = use_ddp self.require_grad_sync = True self.overlap_allgather = overlap_allgather self.use_fp8 = use_fp8 shardformer = ShardFormer(shard_config) if custom_policy is not None: assert isinstance(custom_policy, object) module, self.shared_params = shardformer.optimize(module, policy=custom_policy) # setting process groups for shared parameters self.shared_param_process_groups = [] for shared_param in self.shared_params: if len(shared_param) > 0: self.shared_param_process_groups.append( self.stage_manager.init_process_group_by_stages(list(shared_param.keys())) ) # setting mixed_precision self.mixed_precision = None if precision == "fp16": self.mixed_precision = torch.float16 elif precision == "bf16": self.mixed_precision = torch.bfloat16 if self.mixed_precision is not None: module = module.to(self.mixed_precision) module = module.to(get_accelerator().get_current_device()) # setting input type cast when using mixed precision self.convert_fn = None if self.mixed_precision is not None: self.convert_fn = partial(_convert_floating_point, dtype=self.mixed_precision) # setting ddp configs if use_ddp: # convert model to sync bn module = SyncBatchNorm.convert_sync_batchnorm(module, dp_group) # wrap the model with PyTorch DDP module = DDP(module, process_group=dp_group, **ddp_config) super().__init__(module) self.op_hooks = [] if use_fp8: self.op_hooks.append(FP8Hook()) if overlap_allgather: self.op_hooks.append(ZeroOpHook()) if use_fp8 or overlap_allgather: for p in module.parameters(): if p.requires_grad and type(p) is not ColoParameter: p.__class__ = ColoParameter p.__init__(p, requires_grad=True) def sync_shared_params(self): for shared_param, group in zip(self.shared_params, self.shared_param_process_groups): if self.stage_manager.stage in shared_param: param = shared_param[self.stage_manager.stage] dist.all_reduce(param.grad, group=group) dist.barrier() @contextmanager def no_sync(self): r""" A context manager to disable automatic gradient synchronization (all-reduce) and allow manual synchronization when 'no_sync' is active. Alternatively, synchronization will occur in the first forward-backward pass when exiting the context. """ # Store the current value of 'require_grad_sync' to restore it later. old_require_grad_sync = self.require_grad_sync # Disable automatic gradient synchronization. self.require_grad_sync = False try: if self.use_ddp: # If using data parallel processing (use_ddp), disable synchronization too. with self.module.no_sync(): yield else: yield finally: # Restore the original value of 'require_grad_sync'. self.require_grad_sync = old_require_grad_sync def sync_dp_grads(self): r""" Synchronize gradients across data parallelism (DP) if the DP group size is greater than 1. This function performs an all-reduce operation to combine gradients from different devices in the DP group. Args: None Returns: None """ # Check if the DP group size is 1, meaning no synchronization is needed. if self.dp_group.size() == 1: return # Iterate through the model's parameters and perform gradient synchronization. for p in self.module.parameters(): if p.grad is not None: # Perform all-reduce to combine gradients from different devices. dist.all_reduce(p.grad, group=self.dp_group) # Normalize the gradient by dividing it by the DP group size. p.grad.div_(self.dp_group.size()) def sync_sp_grads(self, grads: Optional[List[torch.Tensor]] = None): r""" Synchronize gradients that are partially derived within sequence parallelism if sequence parallelism is enabled. Gradients can be provided explicitly or extracted from the module. Args: grads (Optional[List[torch.Tensor]]): A list of gradient tensors to synchronize. If not provided, gradients will be extracted from the model. Returns: None """ if self.shard_config.enable_sequence_parallelism: if self.shard_config.sequence_parallelism_mode in ["all_to_all", "ring_attn"]: return if self.shard_config.sequence_parallelism_mode in ["split_gather", "ring"]: # If sequence parallelism is enabled and mode is split_gather or ring, gradients are synchronized # across the tensor parallelism group. group = self.tp_group else: raise ValueError(f"Unknown sequence parallelism mode: {self.shard_config.sequence_parallelism_mode}") if grads is not None: # Synchronize provided gradient tensors across the tensor parallelism group. SeqParallelUtils.allreduce_partial_data_grad(process_group=group, grads=grads) else: # Synchronize gradients from the model across the tensor parallelism group. SeqParallelUtils.allreduce_partial_data_grad(process_group=group, model=self.module) def forward(self, *args, **kwargs): if self.convert_fn is not None: args = tree_map(self.convert_fn, args) kwargs = tree_map(self.convert_fn, kwargs) with self._hook_context(): return super().forward(*args, **kwargs) def unwrap(self, unwrap_peft: bool = True): model = self.module if isinstance(model, DDP): model = model.module if unwrap_peft and isinstance(model, PeftModel): model = PeftUnwrapMixin(model) return model def _force_wait_all_gather(self): for p in self.module.parameters(): wait_all_gather_handle(p) def _hook_context(self): return ColoParamOpHookManager.use_hooks(*self.op_hooks) if len(self.op_hooks) > 0 else nullcontext() def get_param_info(optim: Optimizer): # Get a backup of necessary information of parameters for future use, which includes: # 1. A complete param_group, with params in the form of param_id # 2. A mapping from param address (obtained using id(param)) to integer param_id # 3. A mapping from integer param_id to param address. # 4. A mapping from param_address (obtained using id(param)) to the original shape of parameter before sharding. # When Zero is used, the params here are fp16/bf16 model params rather than fp32 master params in optimizer. if optim is None: return {} param_info = {"param_groups": [], "param2id": {}, "id2param": {}, "param2shape": {}} start_index = 0 for group in optim.param_groups: packed_group = {k: v for k, v in group.items() if k != "params"} packed_group["params"] = [] for param_id, param in enumerate(group["params"], start_index): original_shape = param.shape if isinstance(param, torch.Tensor) else None packed_group["params"].append(param_id) param_info["param2id"][id(param)] = param_id param_info["id2param"][param_id] = id(param) param_info["param2shape"][id(param)] = original_shape param_info["param_groups"].append(packed_group) start_index += len(group["params"]) return param_info def reinitialize_optimizer(optim: Optimizer, model: Module): model_params = set(model.parameters()) new_param_groups = [] for group in optim.param_groups: params = [p for p in group["params"] if p in model_params] new_param_groups.append({**group, "params": params}) optim.__setstate__({"param_groups": new_param_groups}) class HybridParallelNaiveOptimizer(OptimizerWrapper): def __init__( self, optim: Optimizer, model: HybridParallelModule, use_pipeline: bool, param_info: OrderedDict, max_norm: float = 0, tp_process_group: Optional[ProcessGroup] = None, # if using tp pp_process_group: Optional[ProcessGroup] = None, # if using pp ): self.param_info = param_info if use_pipeline: reinitialize_optimizer(optim, model) self.model = model self.stage_manager = model.stage_manager self.shared_params = model.shared_params self.max_norm = max_norm self.tp_pg = tp_process_group self.pp_pg = pp_process_group self.tp_size = get_world_size(self.tp_pg) if self.tp_pg is not None else 1 self.pp_size = get_world_size(self.pp_pg) if self.pp_pg is not None else 1 self._current_grad_norm: Optional[float] = None super().__init__(optim) def backward(self, loss: Tensor, inputs=None, retain_graph=False, **kwargs): r""" Backpropagate gradients through the model and optionally synchronize sequence parallelism gradients. This method performs backward pass for gradient computation. If sequence parallelism is enabled and gradient synchronization is required, it will synchronize gradients that are partially derived within sequence parallelism across tp parallelism groups. Args: loss (Tensor): The loss tensor to compute gradients with respect to. *args: Additional positional arguments to be passed to the superclass backward method. **kwargs: Additional keyword arguments to be passed to the superclass backward method. Returns: None """ # Call the superclass backward method to compute gradients. with self.model._hook_context(): super().backward(loss, inputs=inputs, retain_graph=retain_graph, **kwargs) if self.model.require_grad_sync: # If gradient synchronization is required, sync sequence parallelism gradients. self.model.sync_sp_grads() else: # If gradient synchronization is is not required, return. return def backward_by_grad(self, tensor: Tensor, grad: Tensor, inputs: Tensor = None, retain_graph: bool = False): """ Backpropagate gradients through the model using a precomputed gradient and optionally synchronize sequence parallelism gradients. This method performs a backward pass for gradient computation using a precomputed gradient tensor. If sequence parallelism is enabled and gradient synchronization is required, it will synchronize gradients that are partially derived within sequence parallelism across tp parallelism groups. Args: tensor (Tensor): The input tensor for which gradients are computed. grad (Tensor): The precomputed gradient tensor to compute gradients with respect to the input tensor. Returns: None """ # Call the superclass backward method to compute gradients. super().backward_by_grad(tensor, grad, inputs=inputs, retain_graph=retain_graph) if self.model.require_grad_sync: # If gradient synchronization is required, sync sequence parallelism gradients. self.model.sync_sp_grads() else: # If gradient synchronization is is not required, return. return def step(self, *args, **kwargs): r""" Perform an optimization step. Args: *args: Variable-length positional arguments to be passed to the optimizer's step function. **kwargs: Keyword arguments to be passed to the optimizer's step function. """ if self.max_norm > 0: # Compute the total gradient norm. param_gradient_pairs = [ (p, p.grad) for group in self.optim.param_groups for p in group["params"] if p.grad is not None ] total_norm = self._compute_grad_norm(param_gradient_pairs) self._current_grad_norm = total_norm # Clip the gradients to prevent exploding gradients. self._clip_grad_norm(total_norm) # Perform the optimization step using the underlying optimizer. self.optim.step(*args, **kwargs) def _compute_grad_norm(self, param_gradient_pairs: List[Tuple[Tensor]], norm_type: int = 2) -> int: r""" Compute and return the gradient norm for gradient clipping. Args: param_gradient_pairs (List[Tuple[Tensor]]): List of (parameter, gradient) pairs; gradients are used for norm calculation. norm_type (int, optional): Type of the norm used (e.g., 2 for L2 norm). Defaults to 2. Returns: float: The total norm of the given gradients. """ if len(param_gradient_pairs) == 0: return 0.0 norm_type = float(norm_type) # gradients used for norm calculation. gradients = [grad for param, grad in param_gradient_pairs] if norm_type == inf: total_norm = max(grad.data.abs().max() for grad in gradients) total_norm_cuda = torch.tensor( [float(total_norm)], device=get_accelerator().get_current_device(), dtype=torch.float32 ) if self.tp_size > 1: dist.all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.MAX, group=self.tp_pg) if self.pp_size > 1: dist.all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.MAX, group=self.pp_pg) total_norm = total_norm_cuda.item() else: # gradients used for norm calculation. gradients = [grad for param, grad in param_gradient_pairs] # grad_to_param_mapping is used to check which gradients are not distributed across devices of the 'tp_group'. grad_to_param_mapping = {id(grad): param for param, grad in param_gradient_pairs} total_norm_exponentiated = 0.0 for grad in gradients: grad_norm_exponentiated = grad.data.double().norm(norm_type) ** norm_type # If 'tp_size' is greater than 1 and the parameter for the gradient is not a distributed tensor, # it indicates that the parameter is not distributed across devices of the 'tp_group'. # Consequently, there is no need to perform an 'all_reduce' operation for 'grad_norm'. # However, we still perform the 'all_reduce' operation for the sake of good coding practices. # To ensure mathematical equivalence, we divide the 'grad_norm' by 'tp_size.' if self.tp_size > 1: param_for_grad = grad_to_param_mapping[id(grad)] if not is_distributed_tensor(param_for_grad): grad_norm_exponentiated /= self.tp_size # If 'pp_size' is greater than 1 and the gradient belongs to shared parameters, # it means that this parameter is used in two different pipeline stages. # To avoid redundant norm calculations, we divide the exponent of this norm by # the number of shared stages. if self.pp_size > 1: for shared_param in self.shared_params: if self.stage_manager.stage in shared_param: stage_shared_param = shared_param[self.stage_manager.stage] if grad is stage_shared_param.grad: grad_norm_exponentiated /= len(shared_param) total_norm_exponentiated += grad_norm_exponentiated total_norm_exponentiated_cuda = torch.tensor( [float(total_norm_exponentiated)], device=get_accelerator().get_current_device(), dtype=torch.float32 ) if self.tp_size > 1: # compute norm in tp process group dist.all_reduce(tensor=total_norm_exponentiated_cuda, op=dist.ReduceOp.SUM, group=self.tp_pg) if self.pp_size > 1: # compute norm in pp process group dist.all_reduce(tensor=total_norm_exponentiated_cuda, op=dist.ReduceOp.SUM, group=self.pp_pg) # compute the total_norm total_norm = total_norm_exponentiated_cuda.item() ** (1.0 / norm_type) return total_norm def _clip_grad_norm(self, total_norm: float) -> None: r""" Clips the gradients of the model's parameters to prevent exploding gradients. Args: total_norm (float): The computed total gradient norm. Returns: None """ clip_coef = torch.tensor(self.max_norm / (total_norm + 1e-6)) clip_coef_clamped = torch.clamp(clip_coef, max=1.0) for group in self.optim.param_groups: for p in group["params"]: if p.grad is None: continue p.grad.data.mul_(clip_coef_clamped) def update_master_params(self, model: Module): pass def get_working_to_master_map(self): return None def get_master_to_working_map(self): return None def get_grad_norm(self, norm_type=2, **kwargs): return self._current_grad_norm class HybridParallelAMPOptimizer(MixedPrecisionOptimizer): def __init__( self, optim: Optimizer, model: HybridParallelModule, use_pipeline: bool, param_info: OrderedDict, precision: str = "fp16", initial_scale: float = 2**16, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 2**32, max_norm: float = 0, tp_process_group: Optional[ProcessGroup] = None, # if using tp pp_process_group: Optional[ProcessGroup] = None, # if using pp ): self.model = model self.param_info = param_info self.stage_manager = model.stage_manager self.shared_params = model.shared_params self.tp_pg = tp_process_group self.pp_pg = pp_process_group self.tp_size = get_world_size(self.tp_pg) if self.tp_pg is not None else 1 self.pp_size = get_world_size(self.pp_pg) if self.pp_pg is not None else 1 if use_pipeline: reinitialize_optimizer(optim, model) super().__init__( optim, precision=precision, initial_scale=initial_scale, min_scale=min_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, hysteresis=hysteresis, max_scale=max_scale, max_norm=max_norm, ) def backward(self, loss: Tensor, inputs=None, retain_graph=False, **kwargs): r""" Backpropagate gradients through the model and optionally synchronize sequence parallelism gradients. This method performs backward pass for gradient computation. If sequence parallelism is enabled and gradient synchronization is required, it will synchronize gradients that are partially derived within sequence parallelism across tp parallelism groups. Args: loss (Tensor): The loss tensor to compute gradients with respect to. *args: Additional positional arguments to be passed to the superclass backward method. **kwargs: Additional keyword arguments to be passed to the superclass backward method. Returns: None """ # Call the superclass backward method to compute gradients. with self.model._hook_context(): super().backward(loss, inputs=inputs, retain_graph=retain_graph, **kwargs) if self.model.require_grad_sync: # If gradient synchronization is required, sync sequence parallelism gradients. self.model.sync_sp_grads() else: # If gradient synchronization is is not required, return. return def backward_by_grad(self, tensor: Tensor, grad: Tensor, inputs: Tensor = None, retain_graph: bool = False): """ Backpropagate gradients through the model using a precomputed gradient and optionally synchronize sequence parallelism gradients. This method performs a backward pass for gradient computation using a precomputed gradient tensor. If sequence parallelism is enabled and gradient synchronization is required, it will synchronize gradients that are partially derived within sequence parallelism across tp parallelism groups. Args: tensor (Tensor): The input tensor for which gradients are computed. grad (Tensor): The precomputed gradient tensor to compute gradients with respect to the input tensor. Returns: None """ # Call the superclass backward method to compute gradients. super().backward_by_grad(tensor, grad, inputs=inputs, retain_graph=retain_graph) if self.model.require_grad_sync: # If gradient synchronization is required, sync sequence parallelism gradients. self.model.sync_sp_grads() else: # If gradient synchronization is is not required, return. return def _compute_grad_norm(self, param_gradient_pairs: List[Tuple[Tensor]], norm_type: int = 2) -> int: r""" Compute and return the gradient norm for gradient clipping. Args: param_gradient_pairs (List[Tuple[Tensor]]): List of (parameter, gradient) pairs; gradients are used for norm calculation. norm_type (int, optional): Type of the norm used (e.g., 2 for L2 norm). Defaults to 2. Returns: float: The total norm of the given gradients. """ if len(param_gradient_pairs) == 0: return 0.0 norm_type = float(norm_type) if norm_type == inf: # The parent class calculates the norm of 'dp' gradients, # so we need to calculate the norm of 'tp' and 'pp' gradients. total_norm = super()._compute_grad_norm(param_gradient_pairs, norm_type) total_norm_cuda = torch.tensor( [float(total_norm)], device=get_accelerator().get_current_device(), dtype=torch.float32 ) if self.tp_size > 1: dist.all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.MAX, group=self.tp_pg) if self.pp_size > 1: dist.all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.MAX, group=self.pp_pg) total_norm = total_norm_cuda.item() else: # gradients used for norm calculation. gradients = [grad for param, grad in param_gradient_pairs] # grad_to_param_mapping is used to check which gradients are not distributed in tensor parallelism. grad_to_param_mapping = {id(grad): param for param, grad in param_gradient_pairs} total_norm_exponentiated = 0.0 for grad in gradients: grad_norm_exponentiated = grad.data.double().norm(norm_type) ** norm_type # If 'tp_size' is greater than 1 and the parameter for the gradient is not a distributed tensor, # it indicates that the parameter is not distributed across devices of the 'tp_group'. # Consequently, there is no need to perform an 'all_reduce' operation for 'grad_norm'. # However, we still perform the 'all_reduce' operation for the sake of good coding practices. # To ensure mathematical equivalence, we divide the 'grad_norm' by 'tp_size.' if self.tp_size > 1: param_for_grad = grad_to_param_mapping[id(grad)] if not is_distributed_tensor(param_for_grad): grad_norm_exponentiated /= self.tp_size # If 'pp_size' is greater than 1 and the gradient belongs to shared parameters, # it means that this parameter is used in two different pipeline stages. # To avoid redundant norm calculations, we divide the exponent of this norm by # the number of shared stages. if self.pp_size > 1: for shared_param in self.shared_params: if self.stage_manager.stage in shared_param: stage_working_shared_param = shared_param[self.stage_manager.stage] stage_master_shared_param = self.working_to_master_map[stage_working_shared_param] if grad is stage_master_shared_param.grad: grad_norm_exponentiated /= len(shared_param) total_norm_exponentiated += grad_norm_exponentiated total_norm_exponentiated_cuda = torch.tensor( [float(total_norm_exponentiated)], device=get_accelerator().get_current_device(), dtype=torch.float32 ) if self.tp_size > 1: # compute norm in tp process group dist.all_reduce(tensor=total_norm_exponentiated_cuda, op=dist.ReduceOp.SUM, group=self.tp_pg) if self.pp_size > 1: # compute norm in pp process group dist.all_reduce(tensor=total_norm_exponentiated_cuda, op=dist.ReduceOp.SUM, group=self.pp_pg) # compute the total_norm total_norm = total_norm_exponentiated_cuda.item() ** (1.0 / norm_type) return total_norm class HybridParallelZeroOptimizer(LowLevelZeroOptimizer): def __init__( self, optimizer: Optimizer, model: HybridParallelModule, use_pipeline: bool, param_info: OrderedDict, pg_to_param_list: Dict[ProcessGroup, List[torch.nn.Parameter]] = None, initial_scale: int = 2**16, # grad scaler config min_scale: int = 1, growth_factor: float = 2.0, backoff_factor: float = 0.5, growth_interval: int = 2000, hysteresis: int = 2, max_scale: int = 2**24, clip_grad_norm: float = 0.0, # grad clipping verbose: bool = False, reduce_bucket_size: int = 1024 * 1024, # communication communication_dtype: Optional[torch.dtype] = None, overlap_communication: bool = True, partition_grad: bool = False, # stage 2 flag cpu_offload: bool = False, # cpu offload dp_process_group: Optional[ProcessGroup] = None, # the dp pg for comm tp_process_group: Optional[ProcessGroup] = None, # if using tp pp_process_group: Optional[ProcessGroup] = None, # if using pp forced_dtype: Optional[torch.dtype] = None, overlap_allgather: bool = False, fp8_communication: bool = False, ): self.model = model self.param_info = param_info self.stage_manager = model.stage_manager self.shared_params = model.shared_params self.tp_pg = tp_process_group self.pp_pg = pp_process_group if use_pipeline: reinitialize_optimizer(optimizer, model) super().__init__( optimizer=optimizer, initial_scale=initial_scale, min_scale=min_scale, pg_to_param_list=pg_to_param_list, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, hysteresis=hysteresis, max_scale=max_scale, clip_grad_norm=clip_grad_norm, verbose=verbose, reduce_bucket_size=reduce_bucket_size, communication_dtype=communication_dtype, overlap_communication=overlap_communication, partition_grad=partition_grad, cpu_offload=cpu_offload, dp_process_group=dp_process_group, forced_dtype=forced_dtype, overlap_allgather=overlap_allgather, fp8_communication=fp8_communication, backward_context=model._hook_context, ) def sync_dp_grads(self): r""" Synchronize gradients in the data parallelism dimension. This method wraps the existing `_sync_grad` method in order to explicitly synchronize gradients in the data parallelism dimension. It is necessary due to the introduction of new parallel dimensions, namely tp (tensor parallelism) and pp (pipeline parallelism). This ensures better code organization and readability. Args: None Returns: None """ # Call the superclass `_sync_grad` method to synchronize gradients. super()._sync_grad() def _sync_sp_grads(self): r"""
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/mixed_precision/fp16_naive.py
colossalai/booster/mixed_precision/fp16_naive.py
from .mixed_precision_base import MixedPrecision class FP16NaiveMixedPrecision(MixedPrecision): """ Precision for mixed precision training in FP16 using naive AMP. Args: log_num_zeros_in_grad(bool): return number of zeros in the gradients. initial_scale(int): initial scale of gradient scaler. growth_factor(int): the growth rate of loss scale. backoff_factor(float): the decrease rate of loss scale. hysteresis(int): delay shift in dynamic loss scaling. max_scale(int): maximum loss scale allowed. verbose(bool): if set to `True`, will print debug info. """ def __init__( self, log_num_zeros_in_grad: bool, initial_scale: int, growth_factor: int, backoff_factor: float, hysteresis: int, max_scale: int, verbose: bool = None, ) -> None: pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/mixed_precision/bf16.py
colossalai/booster/mixed_precision/bf16.py
from .mixed_precision_base import MixedPrecision class BF16MixedPrecision(MixedPrecision): pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/mixed_precision/fp16_apex.py
colossalai/booster/mixed_precision/fp16_apex.py
from typing import Any, Optional, Union import torch from .mixed_precision_base import MixedPrecision class FP16ApexMixedPrecision(MixedPrecision): """ Precision for mixed precision training in FP16 using apex AMP. Args: opt_level(str, optional, default="O1" ): Pure or mixed precision optimization level. Accepted values are “O0”, “O1”, “O2”, and “O3”, explained in detail above Apex AMP Documentation. cast_model_type (torch.dtype, optional, default=None): Casts your model’s parameters and buffers to the desired type. patch_torch_functions (bool, optional, default=None): Patch all Torch functions and Tensor methods to perform Tensor Core-friendly ops like GEMMs and convolutions in FP16, and any ops that benefit from FP32 precision in FP32. keep_batchnorm_fp32 (bool or str, optional, default=None): To enhance precision and enable cudnn batchnorm (which improves performance), it’s often beneficial to keep batchnorm weights in FP32 even if the rest of the model is FP16. master_weights (bool, optional, default=None): Maintain FP32 master weights to accompany any FP16 model weights. FP32 master weights are stepped by the optimizer to enhance precision and capture small gradients. loss_scale (float or str, optional, default=None): If loss_scale is a float value, use this value as the static (fixed) loss scale. If loss_scale is the string "dynamic", adaptively adjust the loss scale over time. Dynamic loss scale adjustments are performed by Amp automatically. cast_model_outputs (torch.dpython:type, optional, default=None): Option to ensure that the outputs of your model(s) are always cast to a particular type regardless of opt_level. num_losses(int, optional, default=1): Option to tell AMP in advance how many losses/backward passes you plan to use. When used in conjunction with the loss_id argument to `amp.scale_loss`, enables Amp to use a different loss scale per loss/backward pass, which can improve stability. If num_losses is left to 1, Amp will still support multiple losses/backward passes, but use a single global loss scale for all of them. verbosity(int, default=1): Set to 0 to suppress Amp-related output. min_loss_scale(float, default=None): Sets a floor for the loss scale values that can be chosen by dynamic loss scaling. The default value of None means that no floor is imposed. If dynamic loss scaling is not used, min_loss_scale is ignored. max_loss_scale(float, default=2.**24 ): Sets a ceiling for the loss scale values that can be chosen by dynamic loss scaling. If dynamic loss scaling is not used, max_loss_scale is ignored. """ def __init__( self, opt_level: Optional[str] = "O1", cast_model_type: torch.dtype = None, patch_torch_functions: bool = None, keep_batchnorm_fp32: Union[bool, str] = None, master_weights: bool = None, loss_scale: Union[float, str] = None, cast_model_outputs: Any = None, num_losses: Optional[int] = 1, verbosity: int = 1, min_loss_scale: float = None, max_loss_scale: float = 2.0**24, ) -> None: pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/mixed_precision/fp16_torch.py
colossalai/booster/mixed_precision/fp16_torch.py
from typing import Callable, Optional, Tuple, Union import torch import torch.nn as nn from torch import Tensor from torch.optim import Optimizer from colossalai.accelerator import get_accelerator from colossalai.interface import ModelWrapper, OptimizerWrapper from .mixed_precision_base import MixedPrecision __all__ = ["FP16_Torch_MixedPrecision", "TorchAMPOptimizer", "TorchAMPModule"] class TorchAMPOptimizer(OptimizerWrapper): """ Optimizer wrapper for mixed precision training in FP16 using PyTorch AMP. Args: optim (Optimizer): Optimizer to wrap. init_scale (float): Initial scale factor. Default: 2**16. growth_factor (float): Factor by which the scale is multiplied during :meth:`torch.cuda.amp.GradScaler.step` if gradients were found to be finite this iteration. Default: 2.0. backoff_factor (float): Factor by which the scale is multiplied during :meth:`torch.cuda.amp.GradScaler.step` if gradients were found to be infinite this iteration. Default: 0.5. growth_interval (int): Number of iterations between :meth:`torch.cuda.amp.GradScaler.step` calls that may cause the scale to increase. Default: 2000. """ def __init__( self, optim: Optimizer, init_scale: float = 2.0**16, growth_factor: float = 2.0, backoff_factor: float = 0.5, growth_interval: int = 2000, ) -> None: super().__init__(optim) self.scaler = torch.cuda.amp.GradScaler( init_scale=init_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, ) def backward(self, loss: Tensor, inputs=None, retain_graph=False, **kwargs) -> None: scaled_loss = self.scale_loss(loss) scaled_loss.backward(inputs=inputs, retain_graph=retain_graph, **kwargs) def step(self, *args, **kwargs) -> Optional[float]: out = self.scaler.step(self.optim, *args, **kwargs) self.scaler.update() return out def scale_loss(self, loss: Tensor) -> Tensor: return self.scaler.scale(loss) def unscale_grad(self) -> None: self.scaler.unscale_(self.optim) def clip_grad_by_value(self, clip_value: float, *args, **kwargs) -> None: self.unscale_grad() super().clip_grad_by_value(clip_value, *args, **kwargs) def clip_grad_by_norm( self, max_norm: Union[float, int], norm_type: Union[float, int] = 2.0, error_if_nonfinite: bool = False, *args, **kwargs, ) -> None: self.unscale_grad() super().clip_grad_by_norm(max_norm, norm_type, error_if_nonfinite, *args, **kwargs) class TorchAMPModule(ModelWrapper): """ Module wrapper for mixed precision training in FP16 using PyTorch AMP. Args: module (nn.Module): Module to wrap. """ def __init__(self, module: nn.Module): super().__init__(module) def forward(self, *args, **kwargs): with get_accelerator().autocast(): return self.module(*args, **kwargs) class FP16TorchMixedPrecision(MixedPrecision): """ Precision for mixed precision training in FP16 using PyTorch AMP. Args: init_scale (float): Initial scale factor. Default: 2**16. growth_factor (float): Factor by which the scale is multiplied during :meth:`torch.cuda.amp.GradScaler.step` if gradients were found to be finite this iteration. Default: 2.0. backoff_factor (float): Factor by which the scale is multiplied during :meth:`torch.cuda.amp.GradScaler.step` if gradients were found to be infinite this iteration. Default: 0.5. growth_interval (int): Number of iterations between :meth:`torch.cuda.amp.GradScaler.step` calls that may cause the scale to increase. Default: 2000. """ def __init__( self, init_scale: float = 2.0**16, growth_factor: float = 2.0, backoff_factor: float = 0.5, growth_interval: int = 2000, ) -> None: super().__init__() self.torch_amp_kwargs = dict( init_scale=init_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, ) def configure( self, model: nn.Module, optimizer: Optional[Optimizer] = None, criterion: Optional[Callable] = None, ) -> Tuple[nn.Module, OptimizerWrapper, Callable]: model = TorchAMPModule(model) if optimizer is not None: optimizer = TorchAMPOptimizer(optimizer, **self.torch_amp_kwargs) if criterion is not None: criterion = TorchAMPModule(criterion) return model, optimizer, criterion
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/mixed_precision/fp8.py
colossalai/booster/mixed_precision/fp8.py
from .mixed_precision_base import MixedPrecision class FP8MixedPrecision(MixedPrecision): pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/mixed_precision/__init__.py
colossalai/booster/mixed_precision/__init__.py
from .bf16 import BF16MixedPrecision from .fp8 import FP8MixedPrecision from .fp16_apex import FP16ApexMixedPrecision from .fp16_naive import FP16NaiveMixedPrecision from .fp16_torch import FP16TorchMixedPrecision from .mixed_precision_base import MixedPrecision __all__ = [ "MixedPrecision", "mixed_precision_factory", "FP16_Apex_MixedPrecision", "FP16_Torch_MixedPrecision", "FP32_MixedPrecision", "BF16_MixedPrecision", "FP8_MixedPrecision", "FP16NaiveMixedPrecision", ] _mixed_precision_mapping = { "fp16": FP16TorchMixedPrecision, "fp16_apex": FP16ApexMixedPrecision, "fp16_naive": FP16NaiveMixedPrecision, "bf16": BF16MixedPrecision, "fp8": FP8MixedPrecision, } def mixed_precision_factory(mixed_precision_type: str) -> MixedPrecision: """ Factory method to create mixed precision object Args: mixed_precision_type (str): mixed precision type, including None, 'fp16', 'fp16_apex', 'bf16', and 'fp8'. """ if mixed_precision_type in _mixed_precision_mapping: return _mixed_precision_mapping[mixed_precision_type]() else: raise ValueError( f"Mixed precision type {mixed_precision_type} is not supported, support types include {list(_mixed_precision_mapping.keys())}" )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/booster/mixed_precision/mixed_precision_base.py
colossalai/booster/mixed_precision/mixed_precision_base.py
from abc import ABC, abstractmethod from typing import Callable, Optional, Tuple import torch.nn as nn from torch.optim import Optimizer from colossalai.interface import OptimizerWrapper class MixedPrecision(ABC): """ An abstract class for mixed precision training. """ @abstractmethod def configure( self, model: nn.Module, optimizer: Optional[Optimizer] = None, criterion: Optional[Callable] = None, ) -> Tuple[nn.Module, OptimizerWrapper, Callable]: # TODO: implement this method pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/__init__.py
colossalai/amp/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/mixed_precision_optimizer.py
colossalai/amp/naive_amp/mixed_precision_optimizer.py
from typing import Dict, List, Optional, Tuple import torch from torch import Tensor, inf from torch.nn import Module, Parameter from torch.optim import Optimizer from colossalai.interface import OptimizerWrapper from .mixed_precision_mixin import BF16MixedPrecisionMixin, FP16MixedPrecisionMixin class NaiveFP16MixedPrecisionMixin(FP16MixedPrecisionMixin): def __init__( self, working_params: List[Parameter], initial_scale: float = 2**16, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 2**32, ) -> None: super().__init__( initial_scale, min_scale, growth_factor, backoff_factor, growth_interval, hysteresis, max_scale ) self.params = working_params def check_local_overflow(self) -> bool: for p in self.params: if p.grad is not None and not torch.isfinite(p.grad).all(): return True return False class MixedPrecisionOptimizer(OptimizerWrapper): def __init__( self, optim: Optimizer, precision: str = "fp16", initial_scale: float = 2**16, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 2**32, max_norm: float = 0.0, ): super().__init__(optim) if precision == "fp16": working_params = [] for group in self.optim.param_groups: for p in group["params"]: working_params.append(p) self.mixed_precision = NaiveFP16MixedPrecisionMixin( working_params, initial_scale=initial_scale, min_scale=min_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, hysteresis=hysteresis, max_scale=max_scale, ) elif precision == "bf16": self.mixed_precision = BF16MixedPrecisionMixin() else: raise ValueError(f"Unsupported precision: {precision}") self.max_norm = max_norm self.working_to_master_map: Dict[Parameter, Tensor] = {} self.master_to_working_map: Dict[Tensor, Parameter] = {} # create master weights for group in self.optim.param_groups: master_params = [] for p in group["params"]: if p.requires_grad: master_p = p if p.dtype != torch.float: master_p = p.detach().float() self.working_to_master_map[p] = master_p self.master_to_working_map[master_p] = p master_params.append(master_p) group["params"] = master_params self._current_grad_norm: Optional[float] = None def backward(self, loss: Tensor, inputs=None, retain_graph=False, **kwargs): loss = self.mixed_precision.pre_backward(loss) loss.backward(inputs=inputs, retain_graph=retain_graph, **kwargs) def backward_by_grad(self, tensor: Tensor, grad: Tensor, inputs: Tensor = None, retain_graph: bool = False): grad = self.mixed_precision.pre_backward_by_grad(tensor, grad) torch.autograd.backward( tensors=tensor, grad_tensors=grad, inputs=inputs, retain_graph=retain_graph, ) def zero_grad(self, *args, **kwargs): for p in self.working_to_master_map.keys(): p.grad = None self.mixed_precision.pre_zero_grad() return super().zero_grad(*args, **kwargs) def _unscale_and_clip_grads(self, total_norm: float) -> None: """ Unscale and clip gradients before performing the optimization step. Args: total_norm (float): The computed total gradient norm. Returns: None """ div_scale = 1.0 # If mixed-precision training is used, get the gradient division scale from the mixed-precision handler. if self.mixed_precision is not None: div_scale = self.mixed_precision.get_grad_div_scale() if self.max_norm > 0.0: # Calculate the scaling factor for gradient clipping # The gradient norm is scaled by 'div_scale' and then clipped to 'max_norm' clip = ((total_norm / div_scale) + 1e-6) / self.max_norm # If the clip factor exceeds 1, adjust 'div_scale' accordingly to ensure clipping if clip > 1: div_scale = clip * div_scale # Apply the scaling factor to gradients for group in self.param_groups: for p in group["params"]: if p.grad is None: continue p.grad.data.mul_(1.0 / div_scale) def _compute_grad_norm(self, param_gradient_pairs: List[Tuple[Tensor]], norm_type: int = 2) -> int: r""" Compute and return the gradient norm for gradient clipping. Args: param_gradient_pairs (List[Tuple[Tensor]]): List of (parameter, gradient) pairs; gradients are used for norm calculation. norm_type (int, optional): Type of the norm used (e.g., 2 for L2 norm). Defaults to 2. Returns: float: The total norm of the given gradients. """ if len(param_gradient_pairs) == 0: return 0.0 # gradients used for norm calculation. gradients = [grad for param, grad in param_gradient_pairs] if norm_type == inf: total_norm = max(grad.data.abs().max() for grad in gradients) else: total_norm_exponentiated = 0.0 for grad in gradients: total_norm_exponentiated += grad.data.double().norm(norm_type) ** norm_type total_norm = total_norm_exponentiated ** (1.0 / norm_type) return total_norm def step(self, *args, **kwargs): if self.mixed_precision.should_skip_step(): self.zero_grad() return # prepare grads for group in self.optim.param_groups: for p in group["params"]: working_param = self.master_to_working_map[p] if p is working_param: continue if working_param.grad is not None: p.grad = working_param.grad.data.float() working_param.grad = None # gradient unscale and clip. if self.max_norm <= 0: # no need to compute gradient norm. total_norm = 0.0 else: # compute the total norm. param_gradient_pairs = [ (self.master_to_working_map[p], p.grad) for group in self.param_groups for p in group["params"] if p.grad is not None ] total_norm = self._compute_grad_norm(param_gradient_pairs) self._current_grad_norm = total_norm self._unscale_and_clip_grads(total_norm) self.optim.step(*args, **kwargs) # update working params for group in self.optim.param_groups: for p in group["params"]: working_param = self.master_to_working_map[p] if p is working_param: continue working_param.data.copy_(p.data) def update_master_params(self, model: Module): # Update master params from working params with torch.no_grad(): for p in model.parameters(): if (p is None) or (p not in self.working_to_master_map): continue master_param = self.working_to_master_map[p] master_param.data.copy_(p.data) def get_working_to_master_map(self) -> Dict[int, torch.Tensor]: return {id(working_p): master_p for working_p, master_p in self.working_to_master_map.items()} def get_master_to_working_map(self) -> Dict[int, torch.Tensor]: return {id(master_p): working_p for master_p, working_p in self.master_to_working_map.items()} def get_grad_norm(self, norm_type=2, **kwargs): return self._current_grad_norm
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/__init__.py
colossalai/amp/naive_amp/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/grad_scaler/constant_grad_scaler.py
colossalai/amp/naive_amp/grad_scaler/constant_grad_scaler.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from .base_grad_scaler import BaseGradScaler __all__ = ["ConstantGradScaler"] class ConstantGradScaler(BaseGradScaler): """A gradient scaler which uses constant loss scale Args: initial_scale (float): the initial loss scale verbose (bool): whether to log messages """ def __init__(self, initial_scale: int, verbose: bool): super().__init__(initial_scale, verbose) self.log(f"Constant Gradient Scaler is initialized with scale {self.scale}", ranks=[0]) def update(self, overflow: bool) -> None: """Do nothing to keep the loss scale constant. Args: overflow (bool): whether overflow occurs """
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/grad_scaler/base_grad_scaler.py
colossalai/amp/naive_amp/grad_scaler/base_grad_scaler.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from abc import ABC, abstractmethod from typing import Dict import torch from torch import Tensor from colossalai.accelerator import get_accelerator from colossalai.logging import get_dist_logger __all__ = ["BaseGradScaler"] class BaseGradScaler(ABC): """A base class for the gradient scaler. Args: initial_scale (float): the initial loss scale verbose (bool): whether to log messages """ def __init__(self, initial_scale: float, verbose: bool): assert initial_scale > 0 self._scale = torch.tensor([initial_scale], device=get_accelerator().get_current_device(), dtype=torch.float) self._verbose = verbose if self._verbose: self._logger = get_dist_logger() @property def scale(self) -> Tensor: """Returns the loss scale.""" return self._scale @property def inv_scale(self) -> Tensor: """Returns the inverse of the loss scale.""" return self._scale.double().reciprocal().float() def state_dict(self) -> Dict: """Returns the states of the gradient scaler as a dict object.""" state_dict = dict() state_dict["scale"] = self.scale return state_dict def load_state_dict(self, state_dict: Dict) -> None: """Load the states of the gradient scaler from a dict object. Args: state_dict (dict): the states of the gradient scaler """ self._scale = state_dict["scale"] @abstractmethod def update(self, overflow: bool) -> None: """Update the loss scale. Args: overflow (bool): whether overflow occurs """ def log(self, message, *args, **kwargs): """Log messages. Args: message (str): the message to log *args: positional arguments for :class:`colossalai.logging.DistributedLogger` **kwargs: key-word arguments for :class:`colossalai.logging.DistributedLogger` """ if self._verbose: self._logger.info(message, *args, **kwargs)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/grad_scaler/dynamic_grad_scaler.py
colossalai/amp/naive_amp/grad_scaler/dynamic_grad_scaler.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Optional import torch from colossalai.accelerator import get_accelerator from .base_grad_scaler import BaseGradScaler __all__ = ["DynamicGradScaler"] class DynamicGradScaler(BaseGradScaler): """A gradient scaler which uses dynamic loss scale Args: initial_scale (float): the initial loss scale, defaults to 2**16 growth_factor (float): the multiplication factor for increasing loss scale, defaults to 2 backoff_factor (float): the multiplication factor for decreasing loss scale, defaults to 0.5 growth_interval (int): the number of steps to increase loss scale when no overflow occurs, defaults to 1000 min_scale (float): the minimum loss scale, defaults to None max_scale (float): the maximum loss scale, defaults to None hysteresis (int): the number of overflows before decreasing loss scale, defaults to 2 verbose (bool): whether to log messages, defaults to False """ def __init__( self, initial_scale: float = 2**16, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, min_scale: Optional[float] = None, max_scale: Optional[float] = None, hysteresis: int = 2, verbose: bool = False, ): a = get_accelerator() a.device_count() super().__init__(initial_scale, verbose) if min_scale: self._min_scale = torch.tensor( [min_scale], device=get_accelerator().get_current_device(), dtype=torch.float ) else: self._min_scale = None if max_scale: self._max_scale = torch.tensor( [max_scale], device=get_accelerator().get_current_device(), dtype=torch.float ) else: self._max_scale = None self._growth_factor = growth_factor self._backoff_factor = backoff_factor self._growth_interval = growth_interval self._growth_step = 0 self._hysteresis = hysteresis self._hysteresis_step = 0 self._sanity_checks() def _sanity_checks(self) -> None: """Check if the arguments are correct.""" if self._min_scale: assert self._min_scale > 0, "The minimum gradient scale cannot be zero or negative" assert self._min_scale <= self._scale, "The minimum gradient scale cannot be greater than the current scale" if self._max_scale: assert self._max_scale > 0, "The maximum gradient scale cannot be zero or negative" assert self._max_scale >= self._scale, "The maximum gradient scale cannot be smaller than the current scale" assert self._growth_factor > 1, "The growth factor cannot be equal or smaller than 1" assert 0 < self._backoff_factor < 1, "The backoff factor must be between 0 and 1" assert self._hysteresis >= 0, "The hysteresis cannot be negative" def update(self, overflow: bool) -> None: """Update the loss scale. Args: overflow (bool): whether overflow occurs """ if overflow: self._hysteresis_step += 1 self._growth_step = 0 if self._hysteresis_step >= self._hysteresis: self._backoff_scale() self.log(f"Overflow occurs, the loss scale is adjusted to {self.scale.item()}", ranks=[0]) else: self._growth_step += 1 if self._growth_step == self._growth_interval: self._growth_step = 0 self._hysteresis_step = 0 self._grow_scale() self.log( f"No overflow for consecutive {self._growth_interval} steps, " f"the loss scale is adjusted to {self.scale.item()}", ranks=[0], ) def _backoff_scale(self) -> None: """Decrease the loss scale""" self._scale = self._scale * self._backoff_factor if self._min_scale: self._scale = torch.max(self._scale, self._min_scale) def _grow_scale(self) -> None: """Increase the loss scale""" self._scale = self._scale * self._growth_factor if self._max_scale: self._scale = torch.min(self._scale, self._max_scale) def state_dict(self): state_dict = dict() state_dict["scale"] = self._scale state_dict["growth_factor"] = self._growth_factor state_dict["backoff_factor"] = self._backoff_factor state_dict["hysteresis"] = self._hysteresis return state_dict def load_state_dict(self, state_dict): self._scale = state_dict["scale"].to(get_accelerator().get_current_device()) self._growth_factor = state_dict["growth_factor"] self._backoff_factor = state_dict["backoff_factor"] self._hysteresis = state_dict["hysteresis"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/grad_scaler/__init__.py
colossalai/amp/naive_amp/grad_scaler/__init__.py
from .base_grad_scaler import BaseGradScaler from .constant_grad_scaler import ConstantGradScaler from .dynamic_grad_scaler import DynamicGradScaler __all__ = ["BaseGradScaler", "ConstantGradScaler", "DynamicGradScaler"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/mixed_precision_mixin/fp16.py
colossalai/amp/naive_amp/mixed_precision_mixin/fp16.py
from abc import abstractmethod from enum import Enum import torch import torch.distributed as dist from torch import Tensor from colossalai.accelerator import get_accelerator from colossalai.amp.naive_amp.grad_scaler import DynamicGradScaler from .base import MixedPrecisionMixin class OptimState(Enum): SCALED = 0 UNSCALED = 1 class FP16MixedPrecisionMixin(MixedPrecisionMixin): dtype = torch.float16 def __init__( self, initial_scale: float = 2**16, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 2**32, ) -> None: super().__init__() self.grad_scaler = DynamicGradScaler( initial_scale=initial_scale, min_scale=min_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, hysteresis=hysteresis, max_scale=max_scale, ) self.optim_state = OptimState.UNSCALED self.found_overflow = torch.zeros(1, dtype=torch.float, device=get_accelerator().get_current_device()) @property def loss_scale(self) -> float: return self.grad_scaler.scale.item() @abstractmethod def check_local_overflow(self) -> bool: """Check whether there is overflow in the local process. This method should be implemented by subclasses. Returns: bool: Whether there is overflow in the local process. """ def check_overflow(self) -> bool: # clear previous overflow record self.found_overflow.fill_(0.0) if self.check_local_overflow(): self.found_overflow.fill_(1.0) dist.all_reduce(self.found_overflow, op=dist.ReduceOp.MAX) return self.found_overflow.item() > 0 def pre_backward(self, loss: Tensor) -> Tensor: loss = self.loss_scale * loss self.optim_state = OptimState.SCALED return loss def pre_backward_by_grad(self, tensor: Tensor, grad: Tensor) -> Tensor: self.optim_state = OptimState.SCALED return grad def should_skip_step(self) -> bool: found_inf = self.check_overflow() self.grad_scaler.update(found_inf) if found_inf: self.optim_state = OptimState.UNSCALED return found_inf def pre_zero_grad(self) -> None: pass def get_grad_div_scale(self) -> float: assert self.optim_state == OptimState.SCALED, "grads should be scaled before clipping" self.optim_state = OptimState.UNSCALED return self.loss_scale
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/mixed_precision_mixin/bf16.py
colossalai/amp/naive_amp/mixed_precision_mixin/bf16.py
import torch from torch import Tensor from .base import MixedPrecisionMixin class BF16MixedPrecisionMixin(MixedPrecisionMixin): dtype = torch.bfloat16 def pre_backward(self, loss: Tensor) -> Tensor: return loss def pre_backward_by_grad(self, tensor: Tensor, grad: Tensor) -> Tensor: return grad def should_skip_step(self) -> bool: return False def pre_zero_grad(self) -> None: pass def get_grad_div_scale(self) -> float: return 1.0
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/mixed_precision_mixin/__init__.py
colossalai/amp/naive_amp/mixed_precision_mixin/__init__.py
from .base import MixedPrecisionMixin from .bf16 import BF16MixedPrecisionMixin from .fp16 import FP16MixedPrecisionMixin __all__ = [ "MixedPrecisionMixin", "FP16MixedPrecisionMixin", "BF16MixedPrecisionMixin", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/amp/naive_amp/mixed_precision_mixin/base.py
colossalai/amp/naive_amp/mixed_precision_mixin/base.py
from abc import ABC, abstractmethod import torch from torch import Tensor class MixedPrecisionMixin(ABC): """A helper class for mixed precision training. This mixin is used in mixed precision optimizers. Attributes: dtype (torc.dtype): The expected dtype of the gradients. Examples: ```python class MyMixedPrecisionOptimizer(OptimizerWrapper): def __init__(self, optim: Optimizer): super().__init__(optim) self.mixed_precision = MixedPrecisionMixin() def backward(self, loss): loss = self.mixed_precision.pre_backward(loss) loss.backward() def backward_by_grad(self, tensor, grad): grad = self.mixed_precision.pre_backward_by_grad(tensor, grad) tensor.backward(grad) def step(self): if self.mixed_precision.should_skip_step(): self.zero_grad() return div_scale = self.mixed_precision.get_grad_div_scale() # maybe clip grad here # maybe scale grad here self.optim.step() def zero_grad(self): self.mixed_precision.pre_zero_grad() return self.optim.zero_grad() ``` """ dtype: torch.dtype @abstractmethod def pre_backward(self, loss: Tensor, *args, **kwargs) -> Tensor: """Called before backward. Args: loss (Tensor): Loss value. Returns: Tensor: Loss value (possibly scaled). """ @abstractmethod def pre_backward_by_grad(self, tensor: Tensor, grad: Tensor) -> Tensor: """Called before backward by grad. This is helpful for pipeline parallelism. Args: tensor (Tensor): Tensor to backward. grad (Tensor): Gradient of the tensor. Returns: Tensor: Gradient of the tensor (possibly scaled). """ @abstractmethod def should_skip_step(self) -> bool: """Called before step. Returns: bool: Whether to skip the step. """ @abstractmethod def pre_zero_grad(self) -> None: """Called before zero_grad.""" @abstractmethod def get_grad_div_scale(self) -> float: """Called before step or clip_grad. To keep computation efficiency, this method does not (maybe) unscale grads. Returns: float: A divisor for gradient clipping or step. """
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/common.py
colossalai/utils/common.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import functools import os import random from contextlib import contextmanager from pathlib import Path from typing import Callable, Optional, Set import numpy as np import torch import torch.nn as nn from colossalai.accelerator import get_accelerator def get_current_device(): """ A wrapper function for accelerator's API for backward compatibility. """ return get_accelerator().get_current_device() def ensure_path_exists(filename: str): # ensure the path exists dirpath = os.path.dirname(filename) if not os.path.exists(dirpath): Path(dirpath).mkdir(parents=True, exist_ok=True) @contextmanager def conditional_context(context_manager, enable=True): if enable: with context_manager: yield else: yield def is_ddp_ignored(p): return getattr(p, "_ddp_to_ignore", False) def disposable(func: Callable) -> Callable: executed = False @functools.wraps(func) def wrapper(*args, **kwargs): nonlocal executed if not executed: executed = True return func(*args, **kwargs) return wrapper def free_storage(data: torch.Tensor) -> None: """Free underlying storage of a Tensor.""" if data.storage().size() > 0: # Since we're modifying the Tensor's Storage directly, make sure the Tensor # is the sole occupant of the Storage. assert data.storage_offset() == 0 data.storage().resize_(0) def _cast_float(args, dtype: torch.dtype): if isinstance(args, torch.Tensor) and torch.is_floating_point(args): args = args.to(dtype) elif isinstance(args, (list, tuple)): args = type(args)(_cast_float(t, dtype) for t in args) elif isinstance(args, dict): args = {k: _cast_float(v, dtype) for k, v in args.items()} return args def set_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def get_non_persistent_buffers_set( module, memo: Optional[Set[nn.Module]] = None, prefix: str = "", remove_duplicate: bool = True ): r""" Args: memo: a memo to store the set of modules already added to the result prefix: a prefix that will be added to the name of the module remove_duplicate: whether to remove the duplicated module instances in the result or not """ if memo is None: memo = set() self_non_persistent_set = set() if module not in memo: if remove_duplicate: memo.add(module) self_non_persistent_set = set( map(lambda key: prefix + ("." if prefix else "") + key, module._non_persistent_buffers_set) ) for name, sub_module in module._modules.items(): if sub_module is None: continue submodule_prefix = prefix + ("." if prefix else "") + name child_non_persistent_set = get_non_persistent_buffers_set( sub_module, memo, submodule_prefix, remove_duplicate ) self_non_persistent_set = set.union(self_non_persistent_set, child_non_persistent_set) return self_non_persistent_set
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/timer.py
colossalai/utils/timer.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import time from typing import Tuple from colossalai.accelerator import get_accelerator class Timer: """A timer object which helps to log the execution times, and provides different tools to assess the times.""" def __init__(self): self._started = False self._start_time = time.time() self._elapsed = 0 self._history = [] @property def has_history(self): return len(self._history) != 0 @property def current_time(self) -> float: get_accelerator().synchronize() return time.time() def start(self): """Firstly synchronize cuda, reset the clock and then start the timer.""" self._elapsed = 0 get_accelerator().synchronize() self._start_time = time.time() self._started = True def lap(self): """lap time and return elapsed time""" return self.current_time - self._start_time def stop(self, keep_in_history: bool = False): """Stop the timer and record the start-stop time interval. Args: keep_in_history (bool, optional): Whether does it record into history each start-stop interval, defaults to False. Returns: int: Start-stop interval. """ get_accelerator().synchronize() end_time = time.time() elapsed = end_time - self._start_time if keep_in_history: self._history.append(elapsed) self._elapsed = elapsed self._started = False return elapsed def get_history_mean(self): """Mean of all history start-stop time intervals. Returns: int: Mean of time intervals """ return sum(self._history) / len(self._history) def get_history_sum(self): """Add up all the start-stop time intervals. Returns: int: Sum of time intervals. """ return sum(self._history) def get_elapsed_time(self): """Return the last start-stop time interval. Returns: int: The last time interval. Note: Use it only when timer is not in progress """ assert not self._started, "Timer is still in progress" return self._elapsed def reset(self): """Clear up the timer and its history""" self._history = [] self._started = False self._elapsed = 0 class MultiTimer: """An object contains multiple timers. Args: on (bool, optional): Whether the timer is enabled. Default is True. """ def __init__(self, on: bool = True): self._on = on self._timers = dict() def start(self, name: str): """Start namely one of the timers. Args: name (str): Timer's key. """ if self._on: if name not in self._timers: self._timers[name] = Timer() return self._timers[name].start() def stop(self, name: str, keep_in_history: bool): """Stop namely one of the timers. Args: name (str): Timer's key. keep_in_history (bool): Whether does it record into history each start-stop interval. """ if self._on: return self._timers[name].stop(keep_in_history) else: return None def get_timer(self, name): """Get timer by its name (from multimer) Args: name (str): Timer's key. Returns: :class:`colossalai.utils.Timer`: Timer with the name you give correctly. """ return self._timers[name] def reset(self, name=None): """Reset timers. Args: name (str, optional): If name is designated, the named timer will be reset and others will not, defaults to None. """ if self._on: if name is not None: self._timers[name].reset() else: for timer in self._timers: timer.reset() def is_on(self): return self._on def set_status(self, mode: bool): self._on = mode def __iter__(self) -> Tuple[str, Timer]: for name, timer in self._timers.items(): yield name, timer
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/memory.py
colossalai/utils/memory.py
from collections import namedtuple import psutil import torch import torch.distributed as dist from colossalai.utils import get_current_device _GLOBAL_CUDA_MEM_FRACTION = 1.0 _GLOBAL_CPU_MEM_CAPACITY = -1 # copy from PatrickStar def _get_cpu_memory_info(): ps_mem_info = namedtuple("ps_mem_info", ["total", "free", "cached", "buffers", "used"]) try: # psutil reads the memory info from /proc/memory_info, # which results in returning the host memory instead of # that of container. # Here we try to read the container memory with method in: # https://stackoverflow.com/a/46213331/5163915 mems = {} with open("/sys/fs/cgroup/memory/memory.meminfo", "rb") as f: for line in f: fields = line.split() mems[fields[0]] = int(fields[1]) * 1024 total = mems[b"MemTotal:"] free = mems[b"MemFree:"] cached = mems[b"Cached:"] buffers = mems[b"Buffers:"] used = total - free - cached - buffers if used < 0: used = total - free mem_info = ps_mem_info(total=total, free=free, cached=cached, buffers=buffers, used=used) except FileNotFoundError: mems = psutil.virtual_memory() mem_info = ps_mem_info( total=mems.total, free=mems.free, cached=mems.cached, buffers=mems.buffers, used=mems.used, ) return mem_info def colo_device_memory_capacity(device: torch.device) -> int: """ Get the capacity of the memory of the device Args: device (torch.device): a device Returns: int: size in byte """ # TODO: add NPU support assert isinstance(device, torch.device) if device.type == "cpu": # In the context of 1-CPU-N-GPU, the memory capacity of the current process is 1/N overall CPU memory. return colo_get_cpu_memory_capacity() // dist.get_world_size() if device.type == "cuda": return torch.cuda.get_device_properties(get_current_device()).total_memory * _GLOBAL_CUDA_MEM_FRACTION def colo_get_cpu_memory_capacity() -> int: """ Get the cpu memory capacity. We may not use all of it. Returns: int: _description_ """ global _GLOBAL_CPU_MEM_CAPACITY if _GLOBAL_CPU_MEM_CAPACITY == -1: mem_info = _get_cpu_memory_info() return mem_info.total else: return _GLOBAL_CPU_MEM_CAPACITY
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/__init__.py
colossalai/utils/__init__.py
from .common import ( _cast_float, conditional_context, disposable, ensure_path_exists, free_storage, get_current_device, get_non_persistent_buffers_set, is_ddp_ignored, set_seed, ) from .multi_tensor_apply import multi_tensor_applier from .tensor_detector import TensorDetector from .timer import MultiTimer, Timer __all__ = [ "conditional_context", "Timer", "MultiTimer", "multi_tensor_applier", "TensorDetector", "ensure_path_exists", "disposable", "_cast_float", "free_storage", "set_seed", "get_current_device", "is_ddp_ignored", "get_non_persistent_buffers_set", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/safetensors.py
colossalai/utils/safetensors.py
# a python safetensors serializer modified from https://github.com/huggingface/safetensors/blob/41bd1acf38ad28ac559522d40596c6c802f79453/safetensors/src/tensor.rs#L214 import json import warnings from dataclasses import asdict, dataclass from typing import Dict, List, Optional, Tuple import torch from safetensors.torch import _TYPES, load_file, safe_open try: from tensornvme.async_file_io import AsyncFileWriter except Exception: warnings.warn( "Please install the latest tensornvme to use async save. pip install git+https://github.com/hpcaitech/TensorNVMe.git" ) _TYPES_INV = {v: k for k, v in _TYPES.items()} import io from torch.distributed.distributed_c10d import _pickler, _unpickler ASYNC_WRITE_ENTRIES = 32 def _object_to_tensor(obj, device): f = io.BytesIO() _pickler(f).dump(obj) byte_storage = torch.ByteStorage._from_buffer(f.getvalue()) # type: ignore[attr-defined] # Do not replace `torch.ByteTensor` or `torch.LongTensor` with torch.tensor and specifying dtype. # Otherwise, it will casue 100X slowdown. # See: https://github.com/pytorch/pytorch/issues/65696 byte_tensor = torch.ByteTensor(byte_storage).to(device) return byte_tensor def _tensor_to_object(tensor, tensor_size): tensor = tensor.cpu() buf = tensor.numpy().tobytes()[:tensor_size] return _unpickler(io.BytesIO(buf)).load() @dataclass class TensorInfo: dtype: str shape: List[int] data_offsets: Tuple[int, int] @dataclass class PreparedData: n: int header_bytes: bytes offset: int def _cast_to_tensor(obj): if isinstance(obj, torch.Tensor): return obj return _object_to_tensor(obj, "cpu") def _cast_to_object(tensor: torch.Tensor): return _tensor_to_object(tensor, tensor.numel() * tensor.element_size()) def _flatten_optim_state_dict(state_dict: dict, seperator: str = ".") -> Tuple[dict, Optional[dict]]: flat_dict = {} non_tensor_keys = [] if "state" in state_dict: # 3-level dict states = state_dict["state"] else: # 2-level dict, usually for optimizer state dict shard states = state_dict for idx, d in states.items(): for k, v in d.items(): if v is None: continue nested_key = f"state{seperator}{idx}{seperator}{k}" if not isinstance(v, torch.Tensor): non_tensor_keys.append(nested_key) flat_dict[nested_key] = _cast_to_tensor(v) if "param_groups" in state_dict: flat_dict["param_groups"] = _cast_to_tensor(state_dict["param_groups"]) non_tensor_keys.append("param_groups") if len(non_tensor_keys) > 0: metadata = {"non_tensor_keys": non_tensor_keys} else: metadata = None return flat_dict, metadata def _unflatten_optim_state_dict(flat_dict: dict, metadata: Optional[dict] = None, seperator: str = "."): state_dict = {} if metadata is not None and "non_tensor_keys" in metadata: non_tensor_keys = json.loads(metadata["non_tensor_keys"]) else: non_tensor_keys = [] flat_dict = {k: _cast_to_object(v) if k in non_tensor_keys else v for k, v in flat_dict.items()} if "param_groups" in flat_dict: # 3-level dict state_dict["param_groups"] = flat_dict.pop("param_groups") state_dict["state"] = {} states = state_dict["state"] else: # 2-level dict, usually for optimizer state dict shard states = state_dict for k, v in flat_dict.items(): parts = k.split(seperator) assert len(parts) == 3 and parts[0] == "state" idx = int(parts[1]) key = parts[2] if idx not in states: states[idx] = {} states[idx][key] = v return state_dict def prepare( data: Dict[str, torch.Tensor], metadata: Optional[Dict[str, str]] = None ) -> Tuple[PreparedData, List[torch.Tensor], List[str]]: if metadata is not None: assert isinstance(metadata, dict) for k, v in metadata.items(): metadata[k] = json.dumps(v) assert isinstance(k, str) assert isinstance(metadata[k], str) tensors = [] tensor_keys = [] header = {} offset = 0 header_metadata = {"format": "pt"} if metadata is not None: header_metadata.update(metadata) header["__metadata__"] = header_metadata for name, tensor in data.items(): n = tensor.numel() * tensor.element_size() tensor_info = TensorInfo( dtype=_TYPES_INV[tensor.dtype], shape=list(tensor.shape), data_offsets=(offset, offset + n) ) offset += n header[name] = asdict(tensor_info) tensors.append(tensor) tensor_keys.append(name) header_buf = json.dumps(header).encode("utf-8") extra = (8 - len(header_buf) % 8) % 8 header_buf += b" " * extra n = len(header_buf) return PreparedData(n=n, header_bytes=header_buf, offset=offset), tensors, tensor_keys def save(path: str, state_dict: Dict[str, torch.Tensor], metadata: Optional[Dict[str, str]] = None) -> None: prepared_data, tensors, _ = prepare(state_dict, metadata) n, header_bytes, _ = prepared_data.n, prepared_data.header_bytes, prepared_data.offset f_writer = AsyncFileWriter(path, n_entries=ASYNC_WRITE_ENTRIES, backend="pthread", n_tasks=2 + len(tensors)) f_writer.write(n.to_bytes(8, byteorder="little")) f_writer.write(header_bytes) for tensor in tensors: f_writer.write_raw(tensor, tensor.data_ptr(), tensor.numel() * tensor.element_size(), f_writer.offset) return f_writer def save_nested(path: str, state_dict: Dict[str, torch.Tensor]) -> None: flatten_data, metadata = _flatten_optim_state_dict(state_dict) return save(path, flatten_data, metadata) def move_and_save( path: str, state_dict: Dict[str, torch.Tensor], state_dict_pinned: Optional[Dict[str, torch.Tensor]] = None, metadata: Optional[Dict[str, str]] = None, ) -> None: prepared_data, _, tensor_keys = prepare(state_dict, metadata) n, header_bytes, _ = prepared_data.n, prepared_data.header_bytes, prepared_data.offset f_writer = AsyncFileWriter(path, n_entries=ASYNC_WRITE_ENTRIES, backend="pthread", n_tasks=2 + len(tensor_keys)) f_writer.write(n.to_bytes(8, byteorder="little")) f_writer.write(header_bytes) f_writer.register_h2d(len(tensor_keys)) for name in tensor_keys: if state_dict_pinned: f_writer.write_tensor(state_dict[name], state_dict_pinned[name]) else: f_writer.write_tensor(state_dict[name]) return f_writer def load_flat(checkpoint_path, seperator: str = "."): with safe_open(checkpoint_path, framework="pt") as f: metadata = f.metadata() state_dict_load = load_file(checkpoint_path) state_dict = _unflatten_optim_state_dict(state_dict_load, metadata, seperator) return state_dict
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/rank_recorder/rank_recorder.py
colossalai/utils/rank_recorder/rank_recorder.py
import atexit import json import os import shutil import time from typing import Dict, List import matplotlib.colors as mcolors import matplotlib.pyplot as plt import torch import torch.distributed as dist cmap = list(mcolors.TABLEAU_COLORS.values()) LOG_FOLDER = "record.log" MAX_WAIT_TIME = 20 class Event: def __init__(self, start: int, end: int, name: str, rank: int) -> None: self.start = start self.end = end self.name = name self.rank = rank class Recorder: def __init__(self) -> None: self.rank_to_history: Dict[int, List[Event]] = {} self.base_time = time.time() self.temp_event = None self.export_format = "png" self.export_name = "test" self.dpi = 500 self.theme = "dark_background" self.figure_width = 30 self.figure_height = 10 self.legend_fontsize = 16 self.device_fontsize = 20 self.bar_height = 0.2 if not os.path.exists(LOG_FOLDER): os.makedirs(LOG_FOLDER) def start(self, name: str, rank: int): # TODO : add lock to prevent conflict torch.cuda.synchronize() start_time = time.time() self.temp_event = Event(start_time, None, name, rank) def end(self): assert self.temp_event is not None, "`start` before `end`" torch.cuda.synchronize() end_time = time.time() self.temp_event.end = end_time rank = self.temp_event.rank if rank not in self.rank_to_history: self.rank_to_history[rank] = [] self.rank_to_history[rank].append(self.temp_event) self.temp_event = None def get_history(self): return self.history def __call__(self, name: str, rank: str): self.temp_name = name self.temp_rank = rank return self def __enter__(self): name = self.temp_name rank = self.temp_rank self.start(name, rank) def __exit__(self, *args): self.end() def dump_record(self): rank = dist.get_rank() rank_to_history = self.rank_to_history records = {"base_time": self.base_time, "content": {}} for record_rank in rank_to_history: history = rank_to_history[record_rank] recs = [] for event in history: rec = {"start": event.start, "end": event.end, "name": event.name} recs.append(rec) records["content"][record_rank] = recs dump_name = f"{rank}.json" dump_path = os.path.join(LOG_FOLDER, dump_name) with open(dump_path, "w", encoding="utf-8") as f: json.dump(records, f, ensure_ascii=False) def merge_recode(self): base_time = self.base_time world_size = dist.get_world_size() wait_time = 0 while True: time.sleep(0.1) log_num = len(os.listdir(LOG_FOLDER)) if log_num == world_size: break wait_time += 1 if wait_time >= MAX_WAIT_TIME: break # merge logs_path = [os.path.join(LOG_FOLDER, file) for file in os.listdir(LOG_FOLDER)] recoders = {} for path in logs_path: with open(path, "r", encoding="utf-8") as f: recs = json.load(f) for record_rank in recs["content"]: history = recs["content"][record_rank] recoders[record_rank] = [] for rec in history: recoders[record_rank].append( {"start": rec["start"] - base_time, "end": rec["end"] - base_time, "name": rec["name"]} ) shutil.rmtree(LOG_FOLDER) with open(self.export_name + ".json", "w", encoding="utf-8") as f: json.dump(recoders, f, ensure_ascii=False) def visualize_record(self): with open(self.export_name + ".json", "r", encoding="utf-8") as f: records = json.load(f) records = dict(records) ranks = list(sorted(records.keys())) name_list = {} plots = {} plt.figure(dpi=self.dpi, figsize=[self.figure_width, self.figure_height]) plt.style.use(self.theme) for rank in ranks: rank_records = records[rank] for rec in rank_records: s = rec["start"] e = rec["end"] name = rec["name"] if name not in name_list: name_list[name] = len(name_list) bar = plt.barh(rank, width=e - s, height=self.bar_height, left=s, color=cmap[name_list[name]]) if name not in plots: plots[name] = bar plt.legend(list(plots.values()), list(plots.keys()), loc="upper left", fontsize=self.legend_fontsize) plt.yticks(ticks=ranks, labels=[f"Device:{rank}" for rank in ranks], fontsize=self.device_fontsize) plt.grid(axis="x") plt.savefig("{}.{}".format(self.export_name, self.export_format)) def exit_worker(self): if len(self.rank_to_history) == 0: return self.dump_record() # if this is rank 0, wait for merge rank = dist.get_rank() if rank == 1: # take the base time of rank 0 as standard self.merge_recode() self.visualize_record() recorder = Recorder() atexit.register(recorder.exit_worker)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/rank_recorder/__init__.py
colossalai/utils/rank_recorder/__init__.py
from colossalai.utils.rank_recorder.rank_recorder import recorder __all__ = ["recorder"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/model/utils.py
colossalai/utils/model/utils.py
# This code has been adapted from the DeepSpeed library. # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. import functools from typing import Optional import torch def substitute_init_recursively(cls, func, visited: set): for subcls in cls.__subclasses__(): substitute_init_recursively(subcls, func, visited) if subcls not in visited: func(subcls) visited.add(subcls) def call_to_str(base, *args, **kwargs): """Construct a string representation of a call. Args: base (str): name of the call args (tuple, optional): args to ``base`` kwargs (dict, optional): kwargs supplied to ``base`` Returns: str: A string representation of base(*args, **kwargs) """ name = f"{base}(" if args: name += ", ".join(repr(arg) for arg in args) if kwargs: name += ", " if kwargs: name += ", ".join(f"{key}={repr(arg)}" for key, arg in kwargs.items()) name += ")" return name class InsertPostInitMethodToModuleSubClasses(object): def __init__(self, default_dtype: Optional[torch.dtype] = None): self._old_default_dtype = None self._default_dtype = default_dtype def __enter__(self): r""" Enter the context scope. """ if self._default_dtype is not None: self._old_default_dtype = torch.get_default_dtype() torch.set_default_dtype(self._default_dtype) def preprocess_after(f): @functools.wraps(f) def wrapper(module: torch.nn.Module, *args, **kwargs): f(module, *args, **kwargs) self._post_init_method(module, *args, **kwargs) return wrapper def _enable_class(cls): cls._old_init = cls.__init__ cls.__init__ = preprocess_after(cls.__init__) # The function is called during init subclass. def _init_subclass(cls, **kwargs): cls.__init__ = preprocess_after(cls.__init__) # Replace .__init__() for all existing subclasses of torch.nn.Module # Execution self._post_init_method after the default init function. substitute_init_recursively(torch.nn.modules.module.Module, _enable_class, set()) # holding on to the current __init__subclass__ for exit torch.nn.modules.module.Module._old_init_subclass = torch.nn.modules.module.Module.__init_subclass__ # Replace .__init__() for future subclasses of torch.nn.Module torch.nn.modules.module.Module.__init_subclass__ = classmethod(_init_subclass) self._pre_context_exec() return self def __exit__(self, exc_type, exc_value, traceback): if self._default_dtype is not None: torch.set_default_dtype(self._old_default_dtype) def _disable_class(cls): if not hasattr(cls, "_old_init"): raise AttributeError( f"_old_init is not found in the {cls.__name__}, please make sure that you have imported {cls.__name__} before entering the context." ) cls.__init__ = cls._old_init # Replace .__init__() for all existing subclasses of torch.nn.Module substitute_init_recursively(torch.nn.modules.module.Module, _disable_class, set()) # Replace .__init__() for future subclasses of torch.nn.Module torch.nn.modules.module.Module.__init_subclass__ = torch.nn.modules.module.Module._old_init_subclass self._post_context_exec() # Now that we cleaned up the metaclass injection, raise the exception. if exc_type is not None: return False # To be implemented by inheriting classes def _post_init_method(self, module, *args, **kwargs): pass def _pre_context_exec(self): pass def _post_context_exec(self): pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/model/__init__.py
colossalai/utils/model/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/multi_tensor_apply/__init__.py
colossalai/utils/multi_tensor_apply/__init__.py
from .multi_tensor_apply import MultiTensorApply multi_tensor_applier = MultiTensorApply(2048 * 32)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/multi_tensor_apply/multi_tensor_apply.py
colossalai/utils/multi_tensor_apply/multi_tensor_apply.py
# modified from https://github.com/NVIDIA/apex/blob/master/apex/multi_tensor_apply/multi_tensor_apply.py class MultiTensorApply(object): """ Apply an operation to a list of tensors efficiently. Args: chunk_size (int): Size of a chunk. """ available = False warned = False def __init__(self, chunk_size): try: MultiTensorApply.available = True self.chunk_size = chunk_size except ImportError as err: MultiTensorApply.available = False MultiTensorApply.import_err = err def check_avail(self): if not MultiTensorApply.available: raise RuntimeError( "Attempted to call MultiTensorApply method, but MultiTensorApply " "is not available, possibly because Apex was installed without " "--cpp_ext --cuda_ext. Original import error message:", MultiTensorApply.import_err, ) def __call__(self, op, noop_flag_buffer, tensor_lists, *args): self.check_avail() return op(self.chunk_size, noop_flag_buffer, tensor_lists, *args)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/tensor_detector/tensor_detector.py
colossalai/utils/tensor_detector/tensor_detector.py
import gc import inspect from collections import defaultdict from typing import Optional import torch import torch.nn as nn LINE_WIDTH = 108 LINE = "-" * LINE_WIDTH + "\n" class TensorDetector: def __init__( self, show_info: bool = True, log: str = None, include_cpu: bool = False, module: Optional[nn.Module] = None ): """This class is a detector to detect tensor on different devices. Args: show_info (bool, optional): whether to print the info on screen, default True. log (str, optional): the file name to save the log. Defaults to None. include_cpu (bool, optional): whether to detect tensor on cpu, default False. module (Optional[:class:`nn.Module`]): when sending an ``nn.Module`` object, the detector can name the tensors detected better. """ self.show_info = show_info self.log = log self.include_cpu = include_cpu self.tensor_info = defaultdict(list) self.saved_tensor_info = defaultdict(list) self.order = [] self.detected = [] self.devices = [] self.info = "" self.module = module if isinstance(module, nn.Module): # if module is an instance of nn.Module, we can name the parameter with its real name for name, param in module.named_parameters(): self.tensor_info[id(param)].append(name) self.tensor_info[id(param)].append(param.device) self.tensor_info[id(param)].append(param.shape) self.tensor_info[id(param)].append(param.requires_grad) self.tensor_info[id(param)].append(param.dtype) self.tensor_info[id(param)].append(self.get_tensor_mem(param)) def get_tensor_mem(self, tensor): # calculate the memory occupied by a tensor memory_size = tensor.element_size() * tensor.storage().size() if (tensor.is_leaf or tensor.retains_grad) and tensor.grad is not None: grad_memory_size = tensor.grad.element_size() * tensor.grad.storage().size() memory_size += grad_memory_size return self.mem_format(memory_size) def mem_format(self, real_memory_size): # format the tensor memory into a reasonable magnitude if real_memory_size >= 2**30: return str(real_memory_size / (2**30)) + " GB" if real_memory_size >= 2**20: return str(real_memory_size / (2**20)) + " MB" if real_memory_size >= 2**10: return str(real_memory_size / (2**10)) + " KB" return str(real_memory_size) + " B" def collect_tensors_state(self): for obj in gc.get_objects(): if torch.is_tensor(obj): # skip cpu tensor when include_cpu is false and the tensor we have collected before if (not self.include_cpu) and obj.device == torch.device("cpu"): continue self.detected.append(id(obj)) # skip parameters we had added in __init__ when module is an instance of nn.Module for the first epoch if id(obj) not in self.tensor_info: name = type(obj).__name__ # after backward, we want to update the records, to show you the change if isinstance(self.module, nn.Module) and name == "Parameter": if obj.grad is not None: # with grad attached for par_name, param in self.module.named_parameters(): if param.requires_grad and param.grad.equal(obj.grad): name = par_name + " (with grad)" else: # with no grad attached # there will be no new parameters created during running # so it must be in saved_tensor_info continue # we can also marked common tensors as tensor(with grad) if name == "Tensor" and (obj.is_leaf or obj.retains_grad): if obj.grad is not None: name = name + " (with grad)" # in fact, common tensor have no grad # unless you set retain_grad() if id(obj) in self.saved_tensor_info.keys() and name == self.saved_tensor_info[id(obj)][0]: continue self.tensor_info[id(obj)].append(name) self.tensor_info[id(obj)].append(obj.device) self.tensor_info[id(obj)].append(obj.shape) self.tensor_info[id(obj)].append(obj.requires_grad) self.tensor_info[id(obj)].append(obj.dtype) self.tensor_info[id(obj)].append(self.get_tensor_mem(obj)) # recorded the order we got the tensor # by this we can guess the tensor easily # it will record every tensor updated this turn self.order.append(id(obj)) # recorded all different devices if obj.device not in self.devices: self.devices.append(obj.device) def print_tensors_state(self): template_format = "{:3s}{:<30s}{:>10s}{:>20s}{:>10s}{:>20s}{:>15s}" self.info += LINE self.info += template_format.format(" ", "Tensor", "device", "shape", "grad", "dtype", "Mem") self.info += "\n" self.info += LINE # if a tensor updates this turn, and was recorded before # it should be updated in the saved_tensor_info as well outdated = [x for x in self.saved_tensor_info.keys() if x in self.order] minus = [x for x in self.saved_tensor_info.keys() if x not in self.detected] minus = outdated + minus if len(self.order) > 0: for tensor_id in self.order: self.info += template_format.format( "+", str(self.tensor_info[tensor_id][0]), str(self.tensor_info[tensor_id][1]), str(tuple(self.tensor_info[tensor_id][2])), str(self.tensor_info[tensor_id][3]), str(self.tensor_info[tensor_id][4]), str(self.tensor_info[tensor_id][5]), ) self.info += "\n" if len(self.order) > 0 and len(minus) > 0: self.info += "\n" if len(minus) > 0: for tensor_id in minus: self.info += template_format.format( "-", str(self.saved_tensor_info[tensor_id][0]), str(self.saved_tensor_info[tensor_id][1]), str(tuple(self.saved_tensor_info[tensor_id][2])), str(self.saved_tensor_info[tensor_id][3]), str(self.saved_tensor_info[tensor_id][4]), str(self.saved_tensor_info[tensor_id][5]), ) self.info += "\n" # deleted the updated tensor self.saved_tensor_info.pop(tensor_id) # trace where is the detect() locate_info = inspect.stack()[2] locate_msg = '"' + locate_info.filename + '" line ' + str(locate_info.lineno) self.info += LINE self.info += f"Detect Location: {locate_msg}\n" for device in self.devices: if device == torch.device("cpu"): continue gpu_mem_alloc = self.mem_format(torch.cuda.memory_allocated(device)) self.info += f"Total GPU Memory Allocated on {device} is {gpu_mem_alloc}\n" self.info += LINE self.info += "\n\n" if self.show_info: print(self.info) if self.log is not None: with open(self.log + ".log", "a") as f: f.write(self.info) def detect(self, include_cpu=False): self.include_cpu = include_cpu self.collect_tensors_state() self.print_tensors_state() self.saved_tensor_info.update(self.tensor_info) self.tensor_info.clear() self.order = [] self.detected = [] self.info = "" def close(self): self.saved_tensor_info.clear() self.module = None
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/utils/tensor_detector/__init__.py
colossalai/utils/tensor_detector/__init__.py
from .tensor_detector import TensorDetector
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/colo_parameter.py
colossalai/tensor/colo_parameter.py
from typing import Optional import torch from colossalai.tensor.colo_tensor import ColoTensor from colossalai.tensor.param_op_hook import ColoParamOpHookManager from .colo_tensor import _convert_output WHITE_LIST_FUNCS = {torch.Tensor.__getitem__} NO_HOOK_FUNCS = {torch.Tensor.is_floating_point} def is_no_hook_op(func) -> bool: return (func.__name__.startswith("__") and func not in WHITE_LIST_FUNCS) or func in NO_HOOK_FUNCS def filter_colo_parameters(*args, **kwargs): param_list = [] def get_colo_parameters(element) -> None: if isinstance(element, list) or isinstance(element, tuple): for e in element: get_colo_parameters(e) elif isinstance(element, dict): raise RuntimeError("Found Dict: ColoParameter can't deal with complicated arguments.") elif isinstance(element, ColoParameter): param_list.append(element) return for a in args: get_colo_parameters(a) for v in kwargs.values(): get_colo_parameters(v) return param_list def replace_args(args, kwargs, new_args): args = new_args[: len(args)] for k, v in zip(kwargs.keys(), new_args[len(args) :]): kwargs[k] = v return tuple(args), kwargs class ColoParameter(ColoTensor, torch.nn.Parameter): r"""A kind of ColoTensor to be considered as a module parameter.""" def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad: bool = True) -> "ColoParameter": if data is None: data = torch.empty(0) return torch.Tensor._make_subclass(cls, data, requires_grad) @classmethod def __torch_function__(cls, func, types, args=..., kwargs=None): if kwargs is None: kwargs = {} if ColoParamOpHookManager.has_hook() and not is_no_hook_op(func): params = filter_colo_parameters(*args, **kwargs) if len(params) > 0: with torch._C.DisableTorchFunction(): new_args = ColoParamOpHookManager.pre_op(params, *args, *kwargs.values()) args, kwargs = replace_args(args, kwargs, new_args) with torch._C.DisableTorchFunction(): func = ColoParamOpHookManager.rewrite_op(func) ret = super().__torch_function__(func, types, args, kwargs) with torch._C.DisableTorchFunction(): ret = ColoParamOpHookManager.post_op(params, ret) return _convert_output(ret, func) return super().__torch_function__(func, types, args, kwargs) def __deepcopy__(self, memo): if id(self) in memo: return memo[id(self)] else: with torch._C.DisableTorchFunction(): data = self.data.clone() tensor = ColoParameter(data, self.requires_grad) memo[id(self)] = tensor return tensor def __reduce_ex__(self, proto): # Adapted from torch._utils._rebuild_parameter # def _rebuild_colo_parameter(data, requires_grad, backward_hooks): # colo_param = ColoParameter(data, requires_grad) # colo_param._backward_hooks = backward_hooks # return colo_param # return ( # _rebuild_colo_parameter, # (self.data, self.requires_grad, OrderedDict()) # ) # TODO(jzy) we don't support object reflection now. # distspec cannot be pickled or rebuilt because it's tightly connected to runtime attribute `process_group`. raise NotImplementedError
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/shape_consistency.py
colossalai/tensor/shape_consistency.py
import math from copy import deepcopy from dataclasses import dataclass from typing import Dict, List, Tuple import numpy as np import torch from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, TrainCycleItem from colossalai.context.singleton_meta import SingletonMeta from colossalai.tensor.sharding_spec import ShardingSpec, ShardingSpecException from colossalai.tensor.utils import all_gather_simulator, all_to_all_simulator, mix_gather_simulator, shard_simulator from .comm_spec import * __all__ = ["ShapeConsistencyManager", "ShapeConsistencyOptions", "set_shape_consistency_options"] @dataclass class ShapeConsistencyOptions: """ ShapeConsistencyOptions is a dataclass which specifies the preferences for shape consistency. """ # TODO: shape consistency option is not implemented yet def to_global(distributed_tensor: torch.Tensor, sharding_spec: ShardingSpec) -> torch.Tensor: shape_consistency_manager = ShapeConsistencyManager() global_sharding_spec = ShardingSpec(sharding_spec.device_mesh, sharding_spec.entire_shape, {}) with torch.no_grad(): global_tensor = shape_consistency_manager.apply_for_autoparallel_runtime( distributed_tensor, sharding_spec, global_sharding_spec ) return global_tensor def set_shape_consistency_options(options: ShapeConsistencyOptions): """ Configure the shape consistency manager via function call. """ manager = ShapeConsistencyManager() manager.options = options class ShapeConsistencyManager(metaclass=SingletonMeta): def __init__(self): self._options = None self._forward_only = False self.total_communication_cost = 0 self.total_transform_steps = 0 self.cached_spec_pairs_transform_path = {} @property def options(self): return self._options @options.setter def options(self, options_: ShapeConsistencyOptions): assert isinstance(options_, ShapeConsistencyOptions) self._options = options_ @property def forward_only(self): return self._forward_only @forward_only.setter def forward_only(self, value): assert isinstance(value, bool) self._forward_only = value def get_all_all_gather_spec( self, source_spec: ShardingSpec, orig_cost_dict: Dict[str, float] ) -> Dict[ShardingSpec, float]: """ Get all valid sharding specs from source_spec with single all-gather operation, and accumulate communication cost on origin cost which will finally be used in auto sharding solver. For the all-gather operation, we just care about the S dimension. Argument: source_spec(ShardingSpec): the ShardingSpec of the source_spec. orig_cost(Dict[str, float]): the original communication cost before this operation. Return: valid_spec_dict(Dict[ShardingSpec, float]): all valid sharding specs from source_spec with single all-gather operation. Example: dim_partition_dict = {0: [0], 1: [1]} # DistSpec: # shard_sequence: S0,S1,R # device_mesh_shape: (4, 4) sharding_spec = ShardingSpec(device_mesh, entire_shape, dim_partition_dict) shape_consistency_manager = ShapeConsistencyManager() rst_dict = shape_consistency_manager.get_all_all_gather_spec(sharding_spec, {'forward': 0, 'backward': 0, 'total': 0}) print(rst_dict) Output: {DistSpec: shard_sequence: R,S1,R device_mesh_shape: (4, 4): 0, DistSpec: shard_sequence: S0,R,R device_mesh_shape: (4, 4): 0} """ valid_spec_dict = {} comm_pattern = CollectiveCommPattern.GATHER_FWD_SPLIT_BWD for target_pair in source_spec.dim_partition_dict.items(): shard_list = all_gather_simulator(target_pair) index = target_pair[0] new_dim_partition_dict = deepcopy(source_spec.dim_partition_dict) # We won't add empty list into dim_partition_dict # The key will be popped if the related shard_list is empty if shard_list: new_dim_partition_dict[index] = shard_list else: new_dim_partition_dict.pop(index) # generate the CommSpec to record the action of source_sharding_spec->new_sharding_spec gather_dim = index logical_process_axis = target_pair[1][-1] comm_spec = CommSpec( comm_pattern, sharding_spec=source_spec, gather_dim=gather_dim, # shard_dim will be used during backward shard_dim=gather_dim, logical_process_axis=logical_process_axis, forward_only=self.forward_only, ) # compute the communication cost with CommSpec cost_dict = comm_spec.get_comm_cost() # generate new sharding spec try: new_sharding_spec = ShardingSpec( source_spec.device_mesh, source_spec.entire_shape, dim_partition_dict=new_dim_partition_dict ) for phase, cost in cost_dict.items(): cost_dict[phase] = cost + orig_cost_dict[phase] valid_spec_dict[new_sharding_spec] = (comm_spec, cost_dict) except ShardingSpecException: pass return valid_spec_dict def get_all_all_to_all_spec( self, source_spec: ShardingSpec, orig_cost_dict: Dict[str, float] ) -> Dict[ShardingSpec, float]: """ Get all valid sharding specs from source_spec with single all-to-all operation, and accumulate communication cost on origin cost which will finally be used in auto sharding solver. For the all-to-all operation, we just care about the pairs containing S dimension. Argument: source_spec(ShardingSpec): the ShardingSpec of the source_spec. orig_cost(Dict[str, float]): the original communication cost before this operation. Return: valid_spec_dict(Dict[ShardingSpec, float]): all valid sharding specs from source_spec with single all-to-all operation. Example: dim_partition_dict = {0: [0], 1: [1]} # DistSpec: # shard_sequence: S0,S1,R # device_mesh_shape: (4, 4) sharding_spec = ShardingSpec(device_mesh, entire_shape, dim_partition_dict) shape_consistency_manager = ShapeConsistencyManager() rst_dict = shape_consistency_manager.get_all_all_to_all_spec(sharding_spec, {'forward': 0, 'backward': 0, 'total': 0}) print(rst_dict) Output: {DistSpec: shard_sequence: S01,R,R device_mesh_shape: (4, 4): 0, DistSpec: shard_sequence: R,S1,S0 device_mesh_shape: (4, 4): 0, DistSpec: shard_sequence: S0,R,S1 device_mesh_shape: (4, 4): 0} """ valid_spec_dict = {} comm_pattern = CollectiveCommPattern.ALL2ALL_FWD_ALL2ALL_BWD tensor_dims = len(source_spec.entire_shape) for f_index in range(tensor_dims - 1): for b_index in range(f_index + 1, tensor_dims): # skip (R, R) cases if f_index not in source_spec.dim_partition_dict and b_index not in source_spec.dim_partition_dict: continue else: if f_index in source_spec.dim_partition_dict: # skip (S01, R) -> (R, S01) is NOT allowed if len(source_spec.dim_partition_dict[f_index]) >= 2: continue f_target_pair = (f_index, deepcopy(source_spec.dim_partition_dict[f_index])) else: f_target_pair = (f_index, []) if b_index in source_spec.dim_partition_dict: # skip (R, S01) -> (S01, R) is NOT allowed if len(source_spec.dim_partition_dict[b_index]) >= 2: continue b_target_pair = (b_index, deepcopy(source_spec.dim_partition_dict[b_index])) else: b_target_pair = (b_index, []) # skip (S1, S0) -> S10 if f_target_pair[1] and b_target_pair[1] and f_target_pair[1][0] >= b_target_pair[1][0]: continue f_shard_list, b_shard_list = all_to_all_simulator(f_target_pair, b_target_pair) f_index = f_target_pair[0] b_index = b_target_pair[0] # generate the CommSpec to record the action of source_sharding_spec->new_sharding_spec if len(f_shard_list) < len(f_target_pair[1]): gather_dim = f_index shard_dim = b_index logical_process_axis = f_target_pair[1][-1] else: gather_dim = b_index shard_dim = f_index logical_process_axis = b_target_pair[1][-1] comm_spec = CommSpec( comm_pattern, sharding_spec=source_spec, gather_dim=gather_dim, shard_dim=shard_dim, logical_process_axis=logical_process_axis, forward_only=self.forward_only, ) # compute the communication cost with CommSpec cost_dict = comm_spec.get_comm_cost() new_dim_partition_dict = deepcopy(source_spec.dim_partition_dict) # We won't add empty list into dim_partition_dict # The key will be popped if the related shard_list is empty if f_shard_list: new_dim_partition_dict[f_index] = f_shard_list else: new_dim_partition_dict.pop(f_index) if b_shard_list: new_dim_partition_dict[b_index] = b_shard_list else: new_dim_partition_dict.pop(b_index) # generate new sharding spec try: new_sharding_spec = ShardingSpec( source_spec.device_mesh, source_spec.entire_shape, dim_partition_dict=new_dim_partition_dict ) for phase, cost in cost_dict.items(): cost_dict[phase] = cost + orig_cost_dict[phase] valid_spec_dict[new_sharding_spec] = (comm_spec, cost_dict) except ShardingSpecException: pass return valid_spec_dict def get_all_shard_spec(self, source_spec: ShardingSpec, orig_cost_dict): """ Get all valid sharding specs from source_spec with single shard operation, and accumulate communication cost on origin cost which will finally be used in auto sharding solver. For the sharding operation, we just care about legal sharding dimensions. Argument: source_spec(ShardingSpec): the ShardingSpec of the source_spec. orig_cost(float): the original communication cost before this operation. Return: valid_spec_dict(Dict[ShardingSpec, float]): all valid sharding specs from source_spec with single all-to-all operation. Example: dim_partition_dict = {0: [0]} # DistSpec: # shard_sequence: S0,R,R # device_mesh_shape: (4, 4) sharding_spec = ShardingSpec(device_mesh, entire_shape, dim_partition_dict) shape_consistency_manager = ShapeConsistencyManager() rst_dict = shape_consistency_manager.get_all_shard_spec(sharding_spec, {'forward': 0, 'backward': 0, 'total': 0}) print(rst_dict) Output: {DistSpec: shard_sequence: S01,R,R device_mesh_shape: (4, 4): 0, DistSpec: shard_sequence: S0,S1,R device_mesh_shape: (4, 4): 0, DistSpec: shard_sequence: S0,R,S1 device_mesh_shape: (4, 4): 0} """ valid_spec_dict = {} comm_pattern = CollectiveCommPattern.SPLIT_FWD_GATHER_BWD # legal sharding dims means the mesh_id is still available to use. legal_sharding_dims = [i for i in range(len(source_spec.device_mesh.shape))] for dim, shard_list in source_spec.dim_partition_dict.items(): for element in shard_list: legal_sharding_dims.remove(element) if len(legal_sharding_dims) == 0: return valid_spec_dict tensor_dims = len(source_spec.entire_shape) for index in range(tensor_dims): if index not in source_spec.dim_partition_dict: shard_list_list = shard_simulator((index, []), legal_sharding_dims) else: shard_list_list = shard_simulator((index, source_spec.dim_partition_dict[index]), legal_sharding_dims) if not shard_list_list: continue for shard_list in shard_list_list: new_dim_partition_dict = deepcopy(source_spec.dim_partition_dict) new_dim_partition_dict[index] = shard_list # generate the CommSpec to record the action of source_sharding_spec->new_sharding_spec shard_dim = index logical_process_axis = shard_list[-1] comm_spec = CommSpec( comm_pattern, sharding_spec=source_spec, gather_dim=shard_dim, shard_dim=shard_dim, logical_process_axis=logical_process_axis, forward_only=self.forward_only, ) # compute the communication cost with CommSpec cost_dict = comm_spec.get_comm_cost() # generate new sharding spec try: new_sharding_spec = ShardingSpec( source_spec.device_mesh, source_spec.entire_shape, dim_partition_dict=new_dim_partition_dict ) for phase, cost in cost_dict.items(): cost_dict[phase] = cost + orig_cost_dict[phase] valid_spec_dict[new_sharding_spec] = (comm_spec, cost_dict) except ShardingSpecException: pass return valid_spec_dict def get_all_mix_gather_spec( self, source_spec: ShardingSpec, orig_cost_dict: Dict[str, float] ) -> Dict[ShardingSpec, float]: """ S0S1 -> RR S1S0 -> RR S01R -> RR RS01 -> RR """ valid_spec_dict = {} comm_pattern = CollectiveCommPattern.MIXGATHER_FWD_SPLIT_BWD tensor_dims = len(source_spec.entire_shape) for f_index in range(tensor_dims - 1): for b_index in range(f_index + 1, tensor_dims): if (f_index not in source_spec.dim_partition_dict) and (b_index not in source_spec.dim_partition_dict): continue else: if f_index in source_spec.dim_partition_dict: # skip (S10, R) -> (R, R) if len(f_target_pair[1]) == 2 and f_target_pair[1][0] >= f_target_pair[1][1]: continue f_target_pair = (f_index, deepcopy(source_spec.dim_partition_dict[f_index])) else: f_target_pair = (f_index, []) if b_index in source_spec.dim_partition_dict: # skip (R, S10) -> (R, R) if len(b_target_pair[1]) == 2 and b_target_pair[1][0] >= b_target_pair[1][1]: continue b_target_pair = (b_index, deepcopy(source_spec.dim_partition_dict[b_index])) else: b_target_pair = (b_index, []) gather_dim, logical_process_axes = mix_gather_simulator(f_target_pair, b_target_pair) comm_spec = CommSpec( comm_pattern, sharding_spec=source_spec, gather_dim=gather_dim, logical_process_axis=logical_process_axes, forward_only=self.forward_only, mix_gather=True, ) cost_dict = comm_spec.get_comm_cost() new_dim_partition_dict = {} # generate new sharding spec try: new_sharding_spec = ShardingSpec( source_spec.device_mesh, source_spec.entire_shape, dim_partition_dict=new_dim_partition_dict ) for phase, cost in cost_dict.items(): cost_dict[phase] = cost + orig_cost_dict[phase] valid_spec_dict[new_sharding_spec] = (comm_spec, cost_dict) except ShardingSpecException: pass return valid_spec_dict def get_all_one_step_transform_spec(self, source_spec: ShardingSpec, orig_cost_dict) -> Dict[ShardingSpec, float]: """ Get all valid sharding specs from source_spec with one step transform, and accumulate communication cost on origin cost which will finally be used in auto sharding solver. Note: all-gather will eliminate a sharding dimension, all-to-all will keep sharding dimension same as before, and shard will add a sharding dimension. Therefore, the result of above operations are mutual exclusive, we could safely put them together. Argument: source_spec(ShardingSpec): the ShardingSpec of the source_spec. orig_cost(float): the original communication cost before this operation. Return: valid_spec_dict(Dict[ShardingSpec, float]): all valid sharding specs from source_spec with single all-to-all operation. """ valid_spec_dict = {} valid_spec_dict.update(self.get_all_all_gather_spec(source_spec, orig_cost_dict)) valid_spec_dict.update(self.get_all_all_to_all_spec(source_spec, orig_cost_dict)) valid_spec_dict.update(self.get_all_shard_spec(source_spec, orig_cost_dict)) return valid_spec_dict def mem_cost(self, comm_action_sequence: List[CommSpec]) -> TrainCycleItem: """memory cost of the communication action sequence Args: comm_action_sequence (List[CommSpec]): list of communication actions Returns: TrainCycleItem: memory (numel) cost of such comm_action_sequence """ def compute_shape(sharding_spec: ShardingSpec): shape = sharding_spec.entire_shape new_shape = [] for dim, shard in sharding_spec.dim_partition_dict.items(): new_shape.append(shape[dim] // len(shard)) return new_shape def gather_analysis(comm_spec: CommSpec, discard_input: bool, alloc_numel: int, peak_numel: int): """analyze all_gather memory footprint all_gather will allocate memory for the output tensor, and there will be temp memory for all_gather operation, which is twice the size of output tensor Args: comm_spec (CommSpec): input CommSpec discard_input (bool): whether to discard the input tensor alloc_numel (int): current allocated numel peak_numel (int): current peak numel """ input_shape = compute_shape(comm_spec.sharding_spec) input_numel = np.prod(input_shape) output_numel = input_numel * comm_spec.device_mesh.shape[comm_spec.logical_process_axis] peak_numel = max(peak_numel, alloc_numel + output_numel * 2) alloc_numel += output_numel if discard_input: alloc_numel -= input_numel return alloc_numel, peak_numel def split_analysis(comm_spec: CommSpec, discard_input: bool, alloc_numel: int, peak_numel: int): """analyze split memory footprint split will allocate memory for the output tensor if we don't apply shard on the first dimension of the input tensor. If we apply shard on the first dimension, the `torch.tensor.contiguous()` will not generate new tensor in this case, so no memory will be allocated. Args: comm_spec (CommSpec): input CommSpec discard_input (bool): whether to discard the input tensor alloc_numel (int): current allocated numel peak_numel (int): current peak numel """ shard_dim = comm_spec.shard_dim if shard_dim != 0: # if we don't shard the tensor on the first dimension, the split action will # generate a new tensor input_shape = compute_shape(comm_spec.sharding_spec) input_numel = np.prod(input_shape) output_numel = input_numel // comm_spec.device_mesh.shape[comm_spec.logical_process_axis] alloc_numel += output_numel peak_numel = max(peak_numel, alloc_numel) if discard_input: alloc_numel -= input_numel else: # if we shard the tensor on the first dimension, the split action will not generate # a new tensor, and as it will preserve a reference to the input tensor, we could # override the discard_input option here # NOTE: this special case might fail in some weird cases, e.g. if we have three split # actions in the comm actions sequence, the first split action operate on the second dimension, # the second split action operate on the first dimension, and the third split action operate, again, # on the second dimension. Therefore, after the first two actions in the sequence, we will allocate # memory the same size as the output of first split action. However, the third split action will discard # the input tensor, and it actually should discard the tensor generated by the first split action, so in # the current memory estimation framework, we will overestimate the memory usage. But the above case is # kind of weird, and I think we could ignore it for now. pass return alloc_numel, peak_numel def reduce_analysis(comm_spec: CommSpec, discard_input: bool, alloc_numel: int, peak_numel: int): """ a dummy function for reduce memory footprint analysis, as the reduce action doesn't allocate extra memory """ return alloc_numel, peak_numel def all2all_analysis(comm_spec: CommSpec, discard_input: bool, alloc_numel: int, peak_numel: int): """analyze all_to_all memory footprint all_to_all will allocate memory for the output tensor, and temp memory of all_to_all action is twice the size of output tensor if we shard input tensor on the first dimension, otherwise the temp memory is three times the size of output tensor Args: comm_spec (CommSpec): input CommSpec discard_input (bool): whether to discard the input tensor alloc_numel (int): current allocated numel peak_numel (int): current peak numel """ input_shape = compute_shape(comm_spec.sharding_spec) input_numel = np.prod(input_shape) output_numel = input_numel shard_dim = comm_spec.shard_dim if shard_dim != 0: peak_numel = max(peak_numel, alloc_numel + output_numel * 3) else: peak_numel = max(peak_numel, alloc_numel + output_numel * 2) alloc_numel += output_numel if discard_input: alloc_numel -= input_numel return alloc_numel, peak_numel def identity_analysis(comm_spec: CommSpec, discard_input: bool, alloc_numel: int, peak_numel: int): """ a dummy function for identity memory footprint analysis, as the identity action doesn't allocate extra memory """ return alloc_numel, peak_numel pattern_to_func_dict = { CollectiveCommPattern.GATHER_FWD_SPLIT_BWD: [gather_analysis, split_analysis], CollectiveCommPattern.ALL2ALL_FWD_ALL2ALL_BWD: [all2all_analysis, all2all_analysis], CollectiveCommPattern.SPLIT_FWD_GATHER_BWD: [split_analysis, gather_analysis], CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD: [reduce_analysis, identity_analysis], CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD: [identity_analysis, reduce_analysis], CollectiveCommPattern.MIXGATHER_FWD_SPLIT_BWD: [], } fwd_actions = [] bwd_actions = [] # construct forward and backward comm actions sequence for comm_spec in comm_action_sequence: comm_spec: CommSpec fwd_action, bwd_action = pattern_to_func_dict[comm_spec.comm_pattern] fwd_actions.append(fwd_action) bwd_actions.append(bwd_action) # analyze memory footprint of forward comm actions sequence fwd_alloc_numel = 0 fwd_peak_numel = 0 for idx, action_spec_pair in enumerate(zip(fwd_actions, comm_action_sequence)): # the first forward comm action will not discard input fwd_action, comm_spec = action_spec_pair fwd_alloc_numel, fwd_peak_numel = ( fwd_action(comm_spec, False, fwd_alloc_numel, fwd_peak_numel) if idx == 0 else fwd_action(comm_spec, True, fwd_alloc_numel, fwd_peak_numel) ) # analyze memory footprint for backward comm actions sequence bwd_alloc_numel = 0 bwd_peak_numel = 0 for idx, action_spec_pair in enumerate(zip(reversed(bwd_actions), reversed(comm_action_sequence))): bwd_action, comm_spec = action_spec_pair bwd_alloc_numel, bwd_peak_numel = ( bwd_action(comm_spec, False, bwd_alloc_numel, bwd_peak_numel) if idx == 0 else bwd_action(comm_spec, True, bwd_alloc_numel, bwd_peak_numel) ) fwd_mem = MemoryCost(activation=fwd_alloc_numel, temp=fwd_peak_numel - fwd_alloc_numel) bwd_mem = MemoryCost(activation=bwd_alloc_numel, temp=bwd_peak_numel - bwd_alloc_numel) total_mem = MemoryCost(activation=fwd_alloc_numel + bwd_alloc_numel) return TrainCycleItem(fwd_mem, bwd_mem, total_mem) def shape_consistency( self, source_spec: ShardingSpec, target_spec: ShardingSpec ) -> Tuple[List[ShardingSpec], List[CommSpec], float]: """ This method will find a path to transform source_spec to target_spec with a greedy algorithm. The basic idea is: Step1: Generate all one-step transform sequences from source_spec. Step2: Pick the 'best' sharding spec following the heuristic function. Step3: Repeat above steps until the source spec transform to target spec. During finding the transform path, communication cost will be accumulated, and it will be finally used in auto parallel solver. Additionally, to avoid repeating the path search in runtime, we cached all solved path in auto parallel strategy building time, which could handle most of cases in runtime. Argument: source_spec(ShardingSpec): ShardingSpec of the source activation. target_spec(ShardingSpec): ShardingSpec of the target activation. Return: transform_path(List[ShardingSpec]): The transform path from source_spec to target_spec, it contains the source_spec and target_spec. comm_action_sequence(List[CommSpec]): Keep the communication operations to complete the shape consistency in order. total_cost(float): total cost to complete shape consistency transform. Example: dim_partition_source = {1: [0, 1]} dim_partition_target = {0: [0, 1]} # DistSpec: # shard_sequence: R,S01,R # device_mesh_shape: (4, 4) sharding_spec_source = ShardingSpec(device_mesh, entire_shape, dim_partition_source) # DistSpec: # shard_sequence: S01,R,R # device_mesh_shape: (4, 4) sharding_spec_target = ShardingSpec(device_mesh, entire_shape, dim_partition_target) transform_path, comm_action_sequence, total_cost = shape_consistency_manager.shape_consistency(sharding_spec_source, sharding_spec_target) print(f'transform_path: {transform_path}') print(f'comm_action_sequence: {comm_action_sequence}') print(f'total_cost: {total_cost}') output: transform_path: [DistSpec: shard_sequence: R,S01,R device_mesh_shape: (4, 4), DistSpec: shard_sequence: R,S0,R device_mesh_shape: (4, 4), DistSpec: shard_sequence: S0,R,R device_mesh_shape: (4, 4), DistSpec: shard_sequence: S01,R,R device_mesh_shape: (4, 4)] comm_action_sequence: [CommSpec:(comm_pattern:allgather, gather_dim:1, logical_process_axis:1), CommSpec:(comm_pattern:all2all, gather_dim:1, shard_dim:0, logical_process_axis: 0), CommSpec:(comm_pattern:shard, shard_dim:0, logical_process_axis:1)] total_cost: 12294.402000000002 """ MAX_TRANSFORM_STEPS = 20 total_cost_dict = {"forward": 0, "backward": 0, "total": 0} total_steps = 0 transform_path = [] comm_action_sequence = [] spec_pairs = (str(source_spec.sharding_sequence), str(target_spec.sharding_sequence)) self.cached_spec_pairs_transform_path[spec_pairs] = (None, None) # We do nothing if the sharding spec is all the same. if source_spec.sharding_sequence_difference(target_spec) == 0: self.cached_spec_pairs_transform_path[spec_pairs] = (transform_path, comm_action_sequence) return (transform_path, comm_action_sequence, total_cost_dict) temp_sharding_spec = source_spec transform_path.append(temp_sharding_spec) # To avoid dead loop, the loop will break after MAX_TRANSFORM_STEPS transforms while total_steps <= MAX_TRANSFORM_STEPS: valid_transform_spec_dict = self.get_all_one_step_transform_spec(temp_sharding_spec, total_cost_dict) best_difference_score = math.inf for sharding_spec, info_pairs in valid_transform_spec_dict.items(): comm_spec, cost_dict = info_pairs spec_difference = sharding_spec.sharding_sequence_difference(target_spec) if spec_difference == 0: for phase, cost in total_cost_dict.items(): total_cost_dict[phase] = cost + cost_dict[phase] transform_path.append(sharding_spec) comm_action_sequence.append(comm_spec) self.cached_spec_pairs_transform_path[spec_pairs] = (transform_path, comm_action_sequence) return (transform_path, comm_action_sequence, total_cost_dict) if spec_difference < best_difference_score: temp_sharding_spec = sharding_spec temp_cost_dict = cost_dict temp_comm_spec = comm_spec best_difference_score = spec_difference transform_path.append(temp_sharding_spec) comm_action_sequence.append(temp_comm_spec) for phase, cost in total_cost_dict.items(): total_cost_dict[phase] = cost + temp_cost_dict[phase] total_steps += 1 raise RuntimeError(f"Could not find a valid transform path with in {MAX_TRANSFORM_STEPS} steps.")
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/comm_spec.py
colossalai/tensor/comm_spec.py
import operator from enum import Enum from functools import reduce import torch import torch.distributed as dist from torch.distributed import ReduceOp __all__ = [ "CollectiveCommPattern", "CommSpec", ] def _all_gather(tensor, comm_spec): """ Implement all gather operation on device mesh based on information provided by comm_spec. """ process_groups = comm_spec.device_mesh.get_process_group_for_all_axes() process_group = process_groups[comm_spec.logical_process_axis] tensor_list = [ torch.zeros(tensor.shape, dtype=tensor.dtype, device=tensor.device) for _ in range(comm_spec.device_mesh.shape[comm_spec.logical_process_axis]) ] # without this contiguous operation, the all gather may get some unexpected results. tensor = tensor.contiguous() dist.all_gather(tensor_list, tensor, group=process_group) output = torch.cat(tuple(tensor_list), comm_spec.gather_dim).contiguous() return output def _split(tensor, comm_spec): """ Implement shard operation on device mesh based on information provided by comm_spec. """ process_groups = comm_spec.device_mesh.get_process_group_for_all_axes() process_group = process_groups[comm_spec.logical_process_axis] dim = comm_spec.shard_dim length = tensor.shape[comm_spec.shard_dim] // dist.get_world_size(process_group) start = length * dist.get_rank(process_group) output = torch.narrow(tensor, dim, start, length).contiguous() return output def _all_to_all(tensor, comm_spec): """ Implement all to all operation on device mesh based on information provided by comm_spec. """ process_groups = comm_spec.device_mesh.get_process_group_for_all_axes() process_group = process_groups[comm_spec.logical_process_axis] world_size = dist.get_world_size(process_group) new_shape = list(tensor.shape) new_shape[comm_spec.shard_dim] = new_shape[comm_spec.shard_dim] // world_size new_shape = torch.Size(new_shape) output_tensor_list = [torch.zeros(new_shape, dtype=tensor.dtype, device=tensor.device) for _ in range(world_size)] dim = comm_spec.shard_dim length = tensor.shape[comm_spec.shard_dim] // world_size input_tensor_list = [torch.narrow(tensor, dim, length * i, length).contiguous() for i in range(world_size)] group = process_group dist.all_to_all(output_tensor_list, input_tensor_list, group) output = torch.cat(tuple(output_tensor_list), comm_spec.gather_dim).contiguous() return output def _all_reduce(tensor, comm_spec, async_op=False): """ Implement all reduce operation on device mesh based on information provided by comm_spec. """ process_groups = comm_spec.device_mesh.get_process_group_for_all_axes() process_group = process_groups[comm_spec.logical_process_axis] if not tensor.is_contiguous(): tensor = tensor.contiguous() dist.all_reduce(tensor, op=ReduceOp.SUM, group=process_group, async_op=async_op) return tensor def _mix_gather(tensor, comm_spec): """ Implement mix gather operation on device mesh based on information provided by comm_spec. Mix gather is the all-gather operation on all devices in the device_mesh(FlattenDeviceMesh) of the comm_spec. It is different from _all_gather because _mix_gather does all-gather in two dimensions of device mesh, while _all_gather only does all-gather in one dimension. Assume index of f and b target pairs are 'f' and 'b' ShardingSpec => gather_dim, logical_process_axes S0S1 => [b, f], (1, 0) S1S0 => [b, f], (0, 1) S01R => [f], (1, 1) RS01 => [b], (1, 1) Example: mesh_shape = (2,4) # [[0, 1, 2, 3], # [4, 5, 6, 7]] # return {0: [0, 4, 1, 5, 2, 6, 3, 7], 1: [0, 1, 2, 3, 4, 5, 6, 7]} S0S1: leading_group_dim = 1 process_group = "[0, 1, 2, 3, 4, 5, 6, 7]" tensor_list = [(0,0),(0,1),(0,2),(0,3),(1,0),(1,1),(1,2),(1,3)] # [(slice_id_f, slice_id_b),...] mesh_shape = (2,4) cat_slice = [4,2] tmp_tensor_list = [(...,shape[f],shape[b]*4,...),(...,shape[f],shape[b]*4,...)] tmp_tensor_list[0] = torch.cat(((0,0),(0,1),(0,2),(0,3)), dim=b) tmp_tensor_list[1] = torch.cat(((1,0),(1,1),(1,2),(1,3)), dim=b) output = torch.cat((tmp_tensor_list[0],tmp_tensor_list[1]), dim=a) S1S0: leading_group_dim = 0 process_group = "[0, 4, 1, 5, 2, 6, 3, 7]" tensor_list = [(0,0),(0,1),(1,0),(1,1),(2,0),(2,1),(3,0),(3,1)] mesh_shape = (2,4) cat_slice = [2,4] tmp_tensor_list = [(...,shape[f],shape[b]*2,...),(...,shape[f],shape[b]*2,...),(...,shape[f],shape[b]*2,...),(...,shape[f],shape[b]*2,...)] tmp_tensor_list[0] = torch.cat(((0,0),(0,1)), dim=b) tmp_tensor_list[1] = torch.cat(((1,0),(1,1)), dim=b) tmp_tensor_list[2] = torch.cat(((2,0),(2,1)), dim=b) tmp_tensor_list[3] = torch.cat(((3,0),(3,1)), dim=b) S10R: leading_group_dim = 0 process_group = "[0, 4, 1, 5, 2, 6, 3, 7]" tensor_list = [(0,0),(1,0),(2,0),(3,0),(4,0),(5,0),(6,0),(7,0)] S01R: leading_group_dim = 1 process_group = "[0, 1, 2, 3, 4, 5, 6, 7]" tensor_list = [(0,0),(1,0),(2,0),(3,0),(4,0),(5,0),(6,0),(7,0)] """ total_slices = comm_spec.device_mesh.shape[0] tensor_list = [torch.zeros(tensor.shape, dtype=tensor.dtype, device=tensor.device) for _ in range(total_slices)] leading_group_dim = comm_spec.logical_process_axes[0] assert len(comm_spec.device_mesh.process_groups_dict) == 1 _, process_group = comm_spec.device_mesh.process_groups_dict[0][0] process_number_list = comm_spec.device_meshes.process_number_dict[leading_group_dim] # Global all_gather dist.all_gather(tensor_list, tensor, group=process_group) # This is very ugly. I'm figuring out more elegant methods tensor_list_sorted = [ torch.zeros(tensor.shape, dtype=tensor.dtype, device=tensor.device) for _ in range(total_slices) ] for i in range(total_slices): tensor_list_sorted[i] = tensor_list[process_number_list[i]] tensor_list = tensor_list_sorted if comm_spec.logical_process_axes[0] == comm_spec.logical_process_axes[1]: output = torch.cat(tuple(tensor_list), comm_spec.gather_dim[0]).contiguous() else: mesh_shape = comm_spec.device_meshes.shape cat_slice = [mesh_shape[comm_spec.logical_process_axes[0]], mesh_shape[comm_spec.logical_process_axes[1]]] tmp_tensor_shape = list(tensor.shape) tmp_tensor_shape[comm_spec.gather_dim[0]] *= cat_slice[0] tmp_tensor_shape = torch.Size(tmp_tensor_shape) tmp_tensor_list = [ torch.zeros(tmp_tensor_shape, dtype=tensor.dtype, device=tensor.device) for _ in range(cat_slice[1]) ] for i in range(cat_slice[1]): tmp_tensor_list[i] = torch.cat( tuple(tensor_list[i * cat_slice[0] : (i + 1) * cat_slice[0]]), comm_spec.gather_dim[0] ).contiguous() output = torch.cat(tuple(tmp_tensor_list), comm_spec.gather_dim[1]).contiguous() return output def _mix_split(tensor, comm_spec): """ Implement mix split operation. Mix split is only called for the backward of mix gather (Use ctx to keep consistent) Mix split shards the tensor on device mesh based on information provided by comm_spec. It is different from split because _mix_split shards the tensor in two dimensions of device mesh, while _split only shards in one dimension. Assume index of f and b target pairs are 'f' and 'b' S0S1 => [b, f], (1, 0) S1S0 => [b, f], (0, 1) S01R => [f], (0, 0) RS01 => [b], (0, 0) Example: mesh_shape = (2,4) # [[0, 1, 2, 3], # [4, 5, 6, 7]] # return {0: [0, 4, 1, 5, 2, 6, 3, 7], 1: [0, 1, 2, 3, 4, 5, 6, 7]} """ mesh_shape = comm_spec.device_meshes.shape dim = comm_spec.gather_dim total_slices = comm_spec.device_mesh.shape[0] # Get global rank rank = dist.get_rank() leading_group_dim = comm_spec.logical_process_axes[0] process_number_list = comm_spec.device_meshes.process_number_dict[leading_group_dim] rank = process_number_list.index(rank) if comm_spec.logical_process_axes[0] == comm_spec.logical_process_axes[1]: length = tensor.shape[dim[0]] // total_slices start = length * rank output = torch.narrow(tensor, dim[0], start, length).contiguous() else: tensor_shape = [tensor.shape[dim[0]], tensor.shape[dim[1]]] rank_slice = [mesh_shape[comm_spec.logical_process_axes[0]], mesh_shape[comm_spec.logical_process_axes[1]]] length = [tensor_shape[0] // rank_slice[0], tensor_shape[1] // rank_slice[1]] start = [(rank % rank_slice[0]) * length[0], (rank // rank_slice[0]) * length[1]] tmp_output = torch.narrow(tensor, dim[0], start[0], length[0]).contiguous() output = torch.narrow(tmp_output, dim[1], start[1], length[1]).contiguous() return output class _ReduceGrad(torch.autograd.Function): """ A customized communication operation which forward is an identity operation, backward is all_reduce operation. Args: input_: input matrix. comm_spec: comm_spec will give information like process group, rank list, etc. """ @staticmethod def symbolic(graph, input_): return input_ @staticmethod def forward(ctx, input_, comm_spec): ctx.comm_spec = comm_spec return input_ @staticmethod def backward(ctx, grad_output): return _all_reduce(grad_output, ctx.comm_spec), None class _ReduceInput(torch.autograd.Function): """ A customized communication operation which forward is all_reduce operation, backward is an identity operation. Args: input_: input matrix. comm_spec: comm_spec will give information like process group, rank list, etc. """ @staticmethod def symbolic(graph, input_): return _all_reduce(input_) @staticmethod def forward(ctx, input_, comm_spec): return _all_reduce(input_, comm_spec) @staticmethod def backward(ctx, grad_output): return grad_output, None class _SplitForwardGatherBackward(torch.autograd.Function): """ A customized communication operation which forward is split operation, backward is an all gather operation. Args: input_: input matrix. comm_spec: comm_spec will give information like process group, rank list, etc. """ @staticmethod def symbolic(graph, input_): return _split(input_) @staticmethod def forward(ctx, input_, comm_spec): ctx.comm_spec = comm_spec return _split(input_, comm_spec) @staticmethod def backward(ctx, grad_output): return _all_gather(grad_output, ctx.comm_spec), None class _GatherForwardSplitBackward(torch.autograd.Function): """ A customized communication operation which forward is an all gather operation, backward is split operation. Args: input_: input matrix. comm_spec: comm_spec will give information like process group, rank list, etc. """ @staticmethod def symbolic(graph, input_): return _all_gather(input_) @staticmethod def forward(ctx, input_, comm_spec): ctx.comm_spec = comm_spec return _all_gather(input_, comm_spec) @staticmethod def backward(ctx, grad_output): return _split(grad_output, ctx.comm_spec), None class _AllToAll(torch.autograd.Function): """ A customized communication operation which forward is an all to all operation, backward is an all to all operation. Args: input_: input matrix. comm_spec: comm_spec will give information like process group, rank list, etc. """ @staticmethod def symbolic(graph, input_): return _all_to_all(input_) @staticmethod def forward(ctx, input_, comm_spec): output = _all_to_all(input_, comm_spec) comm_spec_for_backward = CommSpec( comm_pattern=comm_spec.comm_pattern, sharding_spec=comm_spec.sharding_spec, gather_dim=comm_spec.shard_dim, shard_dim=comm_spec.gather_dim, logical_process_axis=comm_spec.logical_process_axis, ) ctx.comm_spec = comm_spec_for_backward return output @staticmethod def backward(ctx, grad_outputs): return _all_to_all(grad_outputs, ctx.comm_spec), None class _MixGatherForwardMixSplitBackward(torch.autograd.Function): @staticmethod def symbolic(graph, input_): return _mix_gather(input_) @staticmethod def forward(ctx, input_, comm_spec): ctx.comm_spec = comm_spec return _mix_gather(input_, comm_spec) @staticmethod def backward(ctx, grad_output): return _mix_split(grad_output, ctx.comm_spec), None def reduce_grad(input_, comm_spec): return _ReduceGrad.apply(input_, comm_spec) def reduce_input(input_, comm_spec): return _ReduceInput.apply(input_, comm_spec) def split_forward_gather_backward(input_, comm_spec): return _SplitForwardGatherBackward.apply(input_, comm_spec) def gather_forward_split_backward(input_, comm_spec): return _GatherForwardSplitBackward.apply(input_, comm_spec) def all_to_all(input_, comm_spec): return _AllToAll.apply(input_, comm_spec) def mixgather_forward_split_backward(input_, comm_spec): return _MixGatherForwardMixSplitBackward.apply(input_, comm_spec) class CollectiveCommPattern(Enum): GATHER_FWD_SPLIT_BWD = "gather_fwd_split_bwd" ALL2ALL_FWD_ALL2ALL_BWD = "all2all_fwd_all2all_bwd" SPLIT_FWD_GATHER_BWD = "split_fwd_gather_bwd" ALLREDUCE_FWD_IDENTITY_BWD = "all_reduce_fwd_identity_bwd" IDENTITY_FWD_ALLREDUCE_BWD = "identity_fwd_all_reduce_bwd" MIXGATHER_FWD_SPLIT_BWD = "mixgather_fwd_split_bwd" class CommSpec: """ Communication spec is used to record the communication action. It has two main functions: 1. Compute the communication cost which will be used in auto parallel solver. 2. Convert the communication spec to real action which will be used in runtime. It contains comm_pattern to determine the communication method, sharding_spec to determine the communication size, gather_dim and shard_dim to determine the buffer shape, and logical_process_axis Argument: comm_pattern(CollectiveCommPattern): describe the communication method used in this spec. sharding_spec(ShardingSpec): This is sharding spec of the tensor which will join the communication action. gather_dim(int, Optional): The gather_dim of the tensor will be gathered. shard_dim(int, Optional): The shard_dim of the tensor will be sharded. logical_process_axis(Union(int, List[int]), Optional): The mesh_dim to implement the communication action. """ def __init__( self, comm_pattern, sharding_spec, gather_dim=None, shard_dim=None, logical_process_axis=None, forward_only=False, mix_gather=False, ): self.comm_pattern = comm_pattern self.sharding_spec = sharding_spec self.gather_dim = gather_dim self.shard_dim = shard_dim self.logical_process_axis = logical_process_axis self.forward_only = forward_only if isinstance(self.logical_process_axis, list): if not mix_gather: self.device_mesh = self.sharding_spec.device_mesh.flatten() self.logical_process_axis = 0 else: self.device_meshes = self.sharding_spec.device_mesh.flatten_device_meshes self.device_mesh = self.sharding_spec.device_mesh.flatten_device_mesh # Create a new member `logical_process_axes` to distinguish from original flatten self.logical_process_axes = logical_process_axis else: self.device_mesh = self.sharding_spec.device_mesh def __repr__(self): res_list = ["CommSpec:("] if self.comm_pattern == CollectiveCommPattern.GATHER_FWD_SPLIT_BWD: res_list.append(f"comm_pattern:GATHER_FWD_SPLIT_BWD, ") res_list.append(f"gather_dim:{self.gather_dim}, ") res_list.append(f"shard_dim:{self.shard_dim}, ") res_list.append(f"logical_process_axis:{self.logical_process_axis})") elif self.comm_pattern == CollectiveCommPattern.ALL2ALL_FWD_ALL2ALL_BWD: res_list.append(f"comm_pattern:ALL2ALL_FWD_ALL2ALL_BWD, ") res_list.append(f"gather_dim:{self.gather_dim}, ") res_list.append(f"shard_dim:{self.shard_dim}, ") res_list.append(f"logical_process_axis: {self.logical_process_axis})") elif self.comm_pattern == CollectiveCommPattern.SPLIT_FWD_GATHER_BWD: res_list.append(f"comm_pattern:SPLIT_FWD_GATHER_BWD, ") res_list.append(f"gather_dim:{self.gather_dim}, ") res_list.append(f"shard_dim:{self.shard_dim}, ") res_list.append(f"logical_process_axis:{self.logical_process_axis})") elif self.comm_pattern == CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD: res_list.append(f"comm_pattern:ALLREDUCE_FWD_IDENTITY_BWD, ") res_list.append(f"logical_process_axis:{self.logical_process_axis})") elif self.comm_pattern == CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD: res_list.append(f"comm_pattern:IDENTITY_FWD_ALLREDUCE_BWD, ") res_list.append(f"logical_process_axis:{self.logical_process_axis})") elif self.comm_pattern == CollectiveCommPattern.MIXGATHER_FWD_SPLIT_BWD: res_list.append(f"comm_pattern:MIXGATHER_FWD_SPLIT_BWD, ") res_list.append(f"gather_dim:{self.gather_dim}, ") res_list.append(f"logical_process_axes:{self.logical_process_axes})") return "".join(res_list) def get_comm_cost(self): """ For all_gather, all2all, and all_reduce operation, the formula provided in DeviceMesh with alpha-beta model is used to compute the communication cost. For shard operation, it is an on-chip operation, so the communication cost is zero. """ comm_size = reduce(operator.mul, self.sharding_spec.get_sharded_shape_per_device(), 1) cost_dict = {} if self.comm_pattern == CollectiveCommPattern.GATHER_FWD_SPLIT_BWD: forward_communication_cost = self.device_mesh.all_gather_cost(comm_size, self.logical_process_axis) # give a tiny cost to shard backward_communication_cost = 100 if self.comm_pattern == CollectiveCommPattern.ALL2ALL_FWD_ALL2ALL_BWD: forward_communication_cost = self.device_mesh.all_to_all_cost(comm_size, self.logical_process_axis) # grad should have same shape as input tensor # all to all operation has same logical process axis as forward. backward_communication_cost = self.device_mesh.all_to_all_cost(comm_size, self.logical_process_axis) if self.comm_pattern == CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD: forward_communication_cost = self.device_mesh.all_reduce_cost(comm_size, self.logical_process_axis) backward_communication_cost = 0 if self.comm_pattern == CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD: forward_communication_cost = 0 backward_communication_cost = self.device_mesh.all_reduce_cost(comm_size, self.logical_process_axis) if self.comm_pattern == CollectiveCommPattern.SPLIT_FWD_GATHER_BWD: # give a tiny cost to shard forward_communication_cost = 100 backward_communication_cost = self.device_mesh.all_gather_cost(comm_size, self.logical_process_axis) if self.comm_pattern == CollectiveCommPattern.MIXGATHER_FWD_SPLIT_BWD: # no need for axis because all devices are used in mix_gather forward_communication_cost = self.device_mesh.mix_gather_cost(comm_size) backward_communication_cost = 100 if self.forward_only: cost_dict["forward"] = forward_communication_cost cost_dict["backward"] = 0 cost_dict["total"] = cost_dict["forward"] + cost_dict["backward"] else: cost_dict["forward"] = forward_communication_cost cost_dict["backward"] = backward_communication_cost cost_dict["total"] = cost_dict["forward"] + cost_dict["backward"] return cost_dict def covert_spec_to_action(self, tensor): """ Convert CommSpec into runtime action, implement real collection communication to target tensor. The collection communication action is directed by the CommSpec. Argument: tensor(torch.Tensor): Tensor stored in each device, which could be different in different ranks. """ if self.comm_pattern in pattern_to_func_dict: tensor = pattern_to_func_dict[self.comm_pattern](tensor, self) else: tensor = tensor return tensor pattern_to_func_dict = { CollectiveCommPattern.GATHER_FWD_SPLIT_BWD: gather_forward_split_backward, CollectiveCommPattern.ALL2ALL_FWD_ALL2ALL_BWD: all_to_all, CollectiveCommPattern.SPLIT_FWD_GATHER_BWD: split_forward_gather_backward, CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD: reduce_input, CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD: reduce_grad, CollectiveCommPattern.MIXGATHER_FWD_SPLIT_BWD: mixgather_forward_split_backward, }
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/colo_tensor.py
colossalai/tensor/colo_tensor.py
from functools import lru_cache from typing import Callable, Set import torch INPALCE_MAPPING = { torch.Tensor.add_: torch.Tensor.add, torch.Tensor.sub_: torch.Tensor.sub, torch.Tensor.mul_: torch.Tensor.mul, torch.Tensor.div_: torch.Tensor.div, } @lru_cache(None) def _get_my_nowrap_functions() -> Set[Callable]: Tensor = torch.Tensor return { Tensor._base.__get__, Tensor.grad.__get__, Tensor._grad.__get__, Tensor.data.__get__, # make .data returns torch.Tensor rather than ColoTensor } def _convert(output): if isinstance(output, torch.Tensor) and not isinstance(output, ColoTensor): output.__class__ = ColoTensor elif isinstance(output, (list, tuple)): output = type(output)(_convert(o) for o in output) return output def _convert_output(output, func): if func in _get_my_nowrap_functions(): return output return _convert(output) class ColoTensor(torch.Tensor): """Data Structure for Tensor in Colossal-AI. It is a subclass of torch.Tensor. It is only used to trigger the torch function hook. Args: data (torch.Tensor): a torch tensor used as the payload the colotensor. """ torch_major = int(torch.__version__.split(".")[0]) torch_minor = int(torch.__version__.split(".")[1]) def __new__(cls, data: torch.Tensor) -> "ColoTensor": """ The signature of the __new__ has to be consistent with the torch.Tensor. Args: data (torch.Tensor): a torch tensor used as the payload the colotensor. Returns: ColoTensor: a ColoTensor wrappers the data. """ if data is None: data = torch.empty(0) return torch.Tensor._make_subclass(cls, data, data.requires_grad) @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): if kwargs is None: kwargs = {} if not all(issubclass(cls, t) for t in types): return NotImplemented if cls.torch_major > 1 or (cls.torch_major == 1 and cls.torch_minor >= 12): # in order to trigger pre-op hook in the forward of checkpoint module # we have to capture the `backward` function # and make sure that it does not in `torch._C.DisableTorchFunction()` context if func is torch.Tensor.backward: assert len(args) == 1 # only has 1 parameter backward_tensor = torch.Tensor(args[0]) tensor_kwargs = {k: torch.Tensor(v) if torch.is_tensor(v) else v for k, v in kwargs.items()} return backward_tensor.backward(**tensor_kwargs) # replace the in-place function if func in INPALCE_MAPPING: func = INPALCE_MAPPING[func] # set the 'inplace' kwargs to False if "inplace" in kwargs: kwargs["inplace"] = False with torch._C.DisableTorchFunction(): ret = func(*args, **kwargs) return _convert_output(ret, func) def __deepcopy__(self, memo): if id(self) in memo: return memo[id(self)] else: with torch._C.DisableTorchFunction(): data = self.data.clone() tensor = ColoTensor(data) memo[id(self)] = tensor return tensor
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/utils.py
colossalai/tensor/utils.py
from typing import Dict, Iterator, List, Tuple, Union import torch import torch.nn as nn from colossalai.tensor.colo_tensor import ColoTensor def all_gather_simulator(target_pair): """ Simulating all-gather operation, analyze the communication cost and simulate the influence of the DimSpec. We don't allow uncontiguous layout, such as all-gather(S012)->S02 is NOT allowed. Therefore, all gather operation just remove the last element in shard list, e.g.: all-gather(S01) -> S0 Argument: target_pair(Tuple[int, List[int]]): The first element is the dimension of tensor to be sharded, and the second element describes which logical axis will be sharded in that dimension. """ _, shard_list = target_pair new_shard_list = shard_list[:-1] return new_shard_list def all_to_all_simulator(f_target_pair, b_target_pair): """ Simulating all-to-all operation, analyze the communication cost and simulate the influence of the DimSpec. We BANNED all representations which shard_list in decreasing order, such as S10, so all-to-all(S0, S1) -> RS01 is NOT allowed. Therefore, if the behind shard_list is not None, we just extend it to the front shard_list. Argument: target_pair(Tuple[int, List[int]]): The first element is the dimension of tensor to be sharded, and the second element describes which logical axis will be sharded in that dimension. e.g.: all-to-all(S0, S1) -> [S01, R] all-to-all(S0, R) -> [R, S0] Otherwise, we extend the front shard_list to behind. e.g.: all-to-all(R, S1) -> [S1, R] Argument: target_pair(Tuple[int, List[int]]): The first element is the dimension of tensor to be sharded, and the second element describes which logical axis will be sharded in that dimension. """ _, f_shard_list = f_target_pair _, b_shard_list = b_target_pair if not len(b_shard_list): b_shard_list.extend(f_shard_list) f_shard_list = [] else: f_shard_list.extend(b_shard_list) b_shard_list = [] return f_shard_list, b_shard_list def shard_simulator(target_pair, legal_sharding_dims): """ Simulating shard operation, analyze the communication cost(always ZERO) and simulate the influence of the DimSpec. We don't allow uncontiguous layout, such as shard(S0)->S02 is NOT allowed. In addition, We BANNED all representations which shard_list in decreasing order, such as S10, so shard(S0) -> S10 is NOT allowed. Therefore, for the R dimension, we could just append any legal sharding dim on it. e.g.: shard(R) -> S0 For the S dimension, we need to make sure the shard_list after sharding still keep rising order. e.g: shard(S0) -> S01 Argument: target_pair(Tuple[int, List[int]]): The first element is the dimension of tensor to be sharded, and the second element describes which logical axis will be sharded in that dimension. """ _, shard_list = target_pair shard_list_list = [] for dim in legal_sharding_dims: if len(shard_list) != 0 and dim <= shard_list[-1]: continue new_shard_list = shard_list + [dim] shard_list_list.append(new_shard_list) return shard_list_list def mix_gather_simulator(f_target_pair, b_target_pair): """ Assume index of f and b target pairs are 'f' and 'b' S0S1 => Input: (f, [0]), (b, [1]) Output: [b, f], (1, 0) S1S0 => Input: (f, [1]), (b, [0]) Output: [b, f], (0, 1) S01R => Input: (f, [0, 1]), (b, []) Output: [f], (1, 1) RS01 => Input: (f, []), (b, [0, 1]) Output: [b], (1, 1) S10R => Input: (f, [0, 1]), (b, []) Output: [f], (0, 0) RS10 => Input: (f, []), (b, [0, 1]) Output: [b], (0, 0) """ if f_target_pair[1] and b_target_pair[1]: leading_dim = b_target_pair[1] > f_target_pair[1] return [b_target_pair[0], f_target_pair[0]], [int(leading_dim), int(leading_dim ^ 1)] if f_target_pair[1]: leading_dim = f_target_pair[1][0] < f_target_pair[1][1] return [ f_target_pair[0], ], [int(leading_dim), int(leading_dim)] if b_target_pair[1]: leading_dim = b_target_pair[1][0] < b_target_pair[1][1] return [ b_target_pair[0], ], [int(leading_dim), int(leading_dim)] # The function is credited to PyTorch Team def named_params_with_colotensor( module: nn.Module, prefix: str = "", recurse: bool = True, ) -> Iterator[Tuple[str, Union[nn.Parameter, ColoTensor]]]: r"""Returns an iterator over module parameters (together with the ColoTensor parameters), yielding both the name of the parameter as well as the parameter itself. This is typically passed to a :class:torchshard._shard.sharded_optim.ShardedOptimizer Args: prefix (str): prefix to prepend to all parameter names. recurse (bool): if True, then yields parameters of this module and all submodules. Otherwise, yields only parameters that are direct members of this module. Yields: (string, Union[Tensor, ColoTensor]): Tuple containing the name and parameter (or ColoTensor parameter) Example: >>> model = torch.nn.Linear(*linear_size) >>> delattr(model.weight) >>> setattr(model.weight, ColoTensor(...)) >>> for name, param in named_params_with_colotensor(model): >>> if name in ['weight']: >>> print(param.size()) """ modules = module.named_modules(prefix=prefix) if recurse else [(prefix, module)] memo = set() for mod_prefix, mod in modules: # find all sharded tensor params for name, val in vars(mod).items(): if isinstance(val, ColoTensor) and val not in memo: memo.add(val) name = mod_prefix + ("." if mod_prefix else "") + name yield name, val # find all nn.Parameters for name, val in module.named_parameters(): yield name, val def _convert_tensor(tensor: torch.Tensor) -> ColoTensor: return ColoTensor(tensor) def convert_parameter(module: torch.nn.Module, param_name: str): # Perform some validation first. if not hasattr(module, param_name): raise ValueError(f"module: {module} does not have parameter with name: {param_name}") tensor = getattr(module, param_name) if not isinstance(tensor, torch.Tensor): raise ValueError( f"Expected {type(module).__name__}.{param_name} to be a Tensor, but found {type(tensor).__name__}" ) if not tensor.is_contiguous(): raise ValueError(f"param: {param_name} is not a contiguous Tensor") st = _convert_tensor(tensor) # Replace param with ColoTensor. # Need to delete the attribute first since param_name might be # torch.nn.Parameter and can't be replaced with ColoTensor which is # not torch.nn.Parameter. delattr(module, param_name) # Now we can set the attribute appropriately. setattr(module, param_name, st) def convert_dim_partition_dict(dim_size: int, dim_partition_dict: Dict[int, List[int]]) -> Dict[int, List[int]]: """ This method is used to convert the negative dim value to positive. """ dims_to_convert = [] for dim, mesh_list in dim_partition_dict.items(): if dim < 0: dims_to_convert.append(dim) for dim in dims_to_convert: dim_partition_dict.pop(dim) dim_partition_dict[dim_size + dim] = mesh_list return dim_partition_dict def merge_same_dim_mesh_list(dim_size: int, dim_partition_dict: Dict[int, List[int]]) -> Dict[int, List[int]]: """ This method is used to merge the different key value which points to same physical position. For example: dim_partition_dict: {1 :[0], -1: [1]} or {1: [0], 1: [1]} for a 2d tensor, the dim 1 and -1 point same physical position. In this method, above dim_partition_dict will be converted to {1: [0, 1]} """ converted_dim_partition_dict = {} for dim, mesh_list in dim_partition_dict.items(): if dim < 0: dim = dim_size + dim if dim not in converted_dim_partition_dict: converted_dim_partition_dict[dim] = mesh_list else: converted_dim_partition_dict[dim].extend(mesh_list) return converted_dim_partition_dict
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/param_op_hook.py
colossalai/tensor/param_op_hook.py
from abc import ABC, abstractmethod from contextlib import contextmanager from typing import Any, List, Tuple import torch from torch.utils._pytree import TreeSpec, tree_flatten, tree_unflatten class ColoParamOpHook(ABC): """ Hook which is triggered by each operation when operands contain ColoParameter. To customize it, you must inherit this abstract class, and implement ``pre_forward``, ``post_forward``, ``pre_backward`` and ``post_backward``. These four methods apply a list of ColoParameter as input args. """ @abstractmethod def pre_forward(self, params: List[torch.Tensor]) -> None: pass @abstractmethod def post_forward(self, params: List[torch.Tensor]) -> None: pass @abstractmethod def pre_backward(self, params: List[torch.Tensor]) -> None: pass @abstractmethod def post_backward(self, params: List[torch.Tensor]) -> None: pass def rewrite_op(self, func) -> Any: return func class ColoParamOpHookManager: """ Manage your param op hooks. It only has static methods. The only static method you should call is ``use_hooks(*hooks)``. """ hooks: Tuple[ColoParamOpHook, ...] = tuple() @staticmethod @contextmanager def use_hooks(*hooks: ColoParamOpHook): """Change the param op hooks you use. Nested calling is allowed. Example: >>> with ColoParamOpHookManager.use_hooks(*hooks): >>> do_something() >>> with ColoParamOpHookManager.use_hooks(): >>> // clear hooks >>> do_something() """ try: old_param_op_hooks = ColoParamOpHookManager.hooks ColoParamOpHookManager.hooks = hooks yield finally: ColoParamOpHookManager.hooks = old_param_op_hooks @staticmethod def _trigger_pre_forward(params: List[torch.Tensor]) -> None: for hook in ColoParamOpHookManager.hooks: hook.pre_forward(params) @staticmethod def _trigger_post_forward(params: List[torch.Tensor]) -> None: for hook in ColoParamOpHookManager.hooks: hook.post_forward(params) @staticmethod def _trigger_pre_backward(params: List[torch.Tensor]) -> None: for hook in ColoParamOpHookManager.hooks: hook.pre_backward(params) @staticmethod def _trigger_post_backward(params: List[torch.Tensor]) -> None: for hook in ColoParamOpHookManager.hooks: hook.post_backward(params) @staticmethod def pre_op(params: List[torch.Tensor], *args: Any) -> list: ColoParamOpHookManager._trigger_pre_forward(params) # auto grad function can only recognize torch.Tensor, thus we have to flatten the input # if one of the input requires grad, all the output will be treated as requires grad # and will have grad fn even the corresponding input does not require grad # we have to extract tensors requiring grad into flat list and then merge them back grad_args, other_args, grad_flags, spec = _flatten_grad_args(args) new_grad_args = PreFwdPostBwd.apply(params, *grad_args) return _merge_args(new_grad_args, other_args, grad_flags, spec) @staticmethod def post_op(params: List[torch.Tensor], arg: Any) -> Any: ColoParamOpHookManager._trigger_post_forward(params) # incase the output is a tuple, we have to flatten it grad_args, other_args, grad_flags, spec = _flatten_grad_args(arg) new_grad_args = PostFwdPreBwd.apply(params, *grad_args) return _merge_args(new_grad_args, other_args, grad_flags, spec) @staticmethod def has_hook() -> bool: return len(ColoParamOpHookManager.hooks) > 0 @staticmethod def rewrite_op(func) -> Any: for hook in ColoParamOpHookManager.hooks: func = hook.rewrite_op(func) return func class PreFwdPostBwd(torch.autograd.Function): @staticmethod def forward(ctx, params, *args): ctx.params = params return args @staticmethod def backward(ctx, *grads): ColoParamOpHookManager._trigger_post_backward(ctx.params) return (None,) + grads class PostFwdPreBwd(torch.autograd.Function): @staticmethod def forward(ctx, params, *args): ctx.params = params return args @staticmethod def backward(ctx, *grads): ColoParamOpHookManager._trigger_pre_backward(ctx.params) return (None,) + grads def _is_grad_tensor(obj) -> bool: if torch.is_tensor(obj): if obj.grad_fn is not None or obj.requires_grad: return True return False def _flatten_grad_args(args) -> Tuple[list, list, List[bool], TreeSpec]: flat_args, spec = tree_flatten(args) grad_args = [] other_args = [] grad_flags = [] for arg in flat_args: flag = _is_grad_tensor(arg) grad_flags.append(flag) if flag: grad_args.append(arg) else: other_args.append(arg) return grad_args, other_args, grad_flags, spec def _merge_args(grad_args, other_args, grad_flags, spec): grad_iter = iter(grad_args) other_iter = iter(other_args) flat_args = [next(grad_iter) if flag else next(other_iter) for flag in grad_flags] return tree_unflatten(flat_args, spec)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/__init__.py
colossalai/tensor/__init__.py
from .colo_parameter import ColoParameter from .colo_tensor import ColoTensor from .comm_spec import CollectiveCommPattern, CommSpec from .param_op_hook import ColoParamOpHook, ColoParamOpHookManager from .utils import convert_dim_partition_dict, convert_parameter, merge_same_dim_mesh_list, named_params_with_colotensor __all__ = [ "ColoTensor", "convert_parameter", "named_params_with_colotensor", "ColoParameter", "ColoParamOpHook", "ColoParamOpHookManager", "CommSpec", "CollectiveCommPattern", "convert_dim_partition_dict", "merge_same_dim_mesh_list", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/sharding_spec.py
colossalai/tensor/sharding_spec.py
import operator from functools import reduce import torch from colossalai.device.device_mesh import DeviceMesh from .utils import merge_same_dim_mesh_list __all__ = ["_DimSpec", "ShardingException", "ShardingSpec"] ALLGATHER_COST = 20 SHARD_COST = 5 STEP_PENALTY = 6 NAN = "nan" class _DimSpec: """ Sharding spec for single dimension of the sharded tensor describe the sharding dimension of logical device mesh and give a method to compute the difference between them. This class is used internally in ShardingSpec. Argument: shard_list(List[int]): if shard_list is None, the dim spec will be 'R' type. Otherwise, the element in shard_list means the data will be sharded in that dimension. """ _DIFFERENCE_DICT = None def __init__(self, shard_list): self.is_replica = len(shard_list) == 0 self.shard_list = shard_list def __eq__(self, other): return str(self) == str(other) def __repr__(self): if self.is_replica: return "R" target = "S" for dim in self.shard_list: target += str(dim) return target @property def difference_dict(self): """ Returns the difference dict, and lazily initializes it when needed Return: difference_dict(Dict[Tuple[int, int], Union[int, float, str]]): difference dict """ if self._DIFFERENCE_DICT is None: self._DIFFERENCE_DICT = self._build_difference_2d_dict() return self._DIFFERENCE_DICT def difference(self, other): """ The difference between two _DimSpec. Argument: other(_DimSpec): the dim spec to compare with. Return: difference(int): the difference between two _DimSpec. Example: dim_spec = _DimSpec([0]) other_dim_spec = _DimSpec([0, 1]) print(dim_spec.difference(other_dim_spec)) Output: 5 """ difference = self.difference_dict[(str(self), str(other))] return difference @classmethod def _build_difference_2d_dict(cls): """ Build a difference mapping for 2D device mesh case. It will be used to compute the difference between _DimSpec pairs. """ source_spec_list = ["R", "S0", "S1", "S01"] target_spec_list = ["R", "S0", "S1", "S01"] difference_dict = {} for source_spec in source_spec_list: for target_spec in target_spec_list: source_shard_list = cls._convert_str_to_shard_list(source_spec) target_shard_list = cls._convert_str_to_shard_list(target_spec) # source same as target if source_shard_list == target_shard_list: difference = 0 # all_gather(source) -> target elif ( len(source_shard_list) == len(target_shard_list) + 1 and source_shard_list[:-1] == target_shard_list ): difference = ALLGATHER_COST # shard(source) -> target elif ( len(source_shard_list) == len(target_shard_list) - 1 and source_shard_list == target_shard_list[:-1] and target_shard_list[-1] not in source_shard_list ): difference = SHARD_COST # S1 -> S0 or S0 -> S1 elif len(source_shard_list) == len(target_shard_list): # source -> R -> target difference = ALLGATHER_COST + STEP_PENALTY + SHARD_COST # R -> S01 elif len(source_shard_list) == len(target_shard_list) - 2: difference = SHARD_COST + STEP_PENALTY + SHARD_COST # S01 -> R elif len(source_shard_list) == len(target_shard_list) + 2: difference = ALLGATHER_COST + STEP_PENALTY + ALLGATHER_COST # S1 -> S01 elif len(source_shard_list) == len(target_shard_list) - 1: difference = ALLGATHER_COST + STEP_PENALTY + SHARD_COST + STEP_PENALTY + SHARD_COST # S01 -> S1 elif len(source_shard_list) == len(target_shard_list) + 1: difference = ALLGATHER_COST + STEP_PENALTY + ALLGATHER_COST + STEP_PENALTY + SHARD_COST else: difference = NAN difference_dict[(source_spec, target_spec)] = difference return difference_dict @staticmethod def _convert_str_to_shard_list(str_spec): """ Convert str_spec into shard_list. Argument: str_spec(str): dim spec in str type. """ if str_spec == "R": return [] if str_spec == "S0": return [0] if str_spec == "S1": return [1] if str_spec == "S01": return [0, 1] class ShardingSpecException(Exception): pass class ShardingOutOfIndexError(ShardingSpecException): pass class DuplicatedShardingDimensionError(ShardingSpecException): pass class ShardingNotDivisibleError(ShardingSpecException): pass class ShardingSpec: """ Sharding spec for a tensor, it contains info of the logical device mesh this tensor belong to, the entire shape of the tensor before sharded, and the sharding sequence looks like [R, R, S0, S1]. Argument: device_mesh(DeviceMesh): A logical view of a physical mesh. entire_shape(torch.Size): The entire shape of tensor before sharded. dim_partition_dict(Dict[int, List[int]], optional): The key is the dimension of tensor to be sharded, and the value of the key describe which logical axis will be sharded in that dimension. sharding_sequence(List[_DimSpec], optional): A straight view of ShardingSpec looks like [R, R, S0, S1]. """ def __init__( self, device_mesh: DeviceMesh, entire_shape: torch.Size, dim_partition_dict=None, sharding_sequence=None ): self.device_mesh = device_mesh if isinstance(entire_shape, (list, tuple)): entire_shape = torch.Size(entire_shape) self.entire_shape = entire_shape self.dim_partition_dict = dim_partition_dict self.sharding_sequence = sharding_sequence if self.sharding_sequence is None: assert ( self.dim_partition_dict is not None ), f"dim_partition_dict should not be None, if sharding_sequence is NoneType object." self.dim_partition_dict = merge_same_dim_mesh_list( dim_size=len(entire_shape), dim_partition_dict=self.dim_partition_dict ) self.convert_dict_to_shard_sequence() elif self.dim_partition_dict is None: assert ( self.sharding_sequence is not None ), f"sharding_sequence should not be None, if dim_partition_dict is NoneType object." self.convert_shard_sequence_to_dict() self._sanity_check() def __repr__(self): res_list = ["DistSpec:"] res_list.append(f"\n\tshard_sequence: " + ",".join(str(dimspec) for dimspec in self.sharding_sequence)) res_list.append(f"\n\tdevice_mesh_shape: {self.device_mesh.shape}") return " ".join(res_list) def _sanity_check(self): # make sure all axes in logical device mesh only be used once dim_check_list = list(range(self.device_mesh.logical_mesh_id.dim())) for dim, shard_list in self.dim_partition_dict.items(): for element in shard_list: if element in dim_check_list: dim_check_list.remove(element) else: raise DuplicatedShardingDimensionError( f"find an invalid sharding axis {element} in dim_partition_dict in tensor dimension {dim}." ) # make sure that the dimension is not out of index for dim in self.dim_partition_dict.keys(): if dim >= len(self.entire_shape): raise ShardingOutOfIndexError( f"The dim_partition_dict specifies to shard dimension {dim} but the entire_shape only has {len(self.entire_shape)} dimensions" ) # make sure that the sharding for a dimension is divisible by the number of devices for dim, shard_list in self.dim_partition_dict.items(): tensor_dim_size = self.entire_shape[dim] num_devices = 1 for element in shard_list: num_devices *= self.device_mesh.shape[element] if tensor_dim_size % num_devices != 0: raise ShardingNotDivisibleError( f"The size of dimension at index {dim} is {tensor_dim_size}, it cannot be sharded over {num_devices} devices." ) def convert_dict_to_shard_sequence(self): """ Convert dim_partition_dict into list of _DimSpec, and assign it to sharding_sequence. """ sharding_sequence = [_DimSpec([])] * len(self.entire_shape) for dim, shard_list in self.dim_partition_dict.items(): sharding_sequence[dim] = _DimSpec(shard_list) self.sharding_sequence = sharding_sequence def convert_shard_sequence_to_dict(self): """ Convert sharding_sequence into dim_partition_dict. """ new_dim_partition_dict = {} for index, dim_spec in enumerate(self.sharding_sequence): if not dim_spec.is_replica: if index not in new_dim_partition_dict: new_dim_partition_dict[index] = [] new_dim_partition_dict[index].extend(dim_spec.shard_list) self.dim_partition_dict = new_dim_partition_dict def sharding_sequence_difference(self, other): """ This function is a naive version of difference computation. It just simply accumulates difference every dimension between the pair of sharding sequence. Example: dim_partition_dict = {0: [0, 1]} # DistSpec: # shard_sequence: S01,R,R # device_mesh_shape: (4, 4) sharding_spec = ShardingSpec(device_mesh, entire_shape, dim_partition_dict) dim_partition_dict_to_compare = {0: [0], 1: [1]} # DistSpec: # shard_sequence: S0,S1,R # device_mesh_shape: (4, 4) sharding_spec_to_compare = ShardingSpec(device_mesh, entire_shape, dim_partition_dict_to_compare) print(sharding_spec.sharding_sequence_difference(sharding_spec_to_compare)) Output: 25 Argument: other(ShardingSpec): The ShardingSpec to compared with. Return: difference(int): Difference between two ShardingSpec. """ assert len(self.sharding_sequence) == len( other.sharding_sequence ), f"Cannot compare difference for two sharding specs with different length." difference = 0 for orig_dim_spec, other_dim_spec in zip(self.sharding_sequence, other.sharding_sequence): difference += orig_dim_spec.difference(other_dim_spec) return difference def get_sharded_shape_per_device(self): sharded_shape = list(self.entire_shape) for dim, shard_list in self.dim_partition_dict.items(): mesh_list = [self.device_mesh.shape[mesh_dim] for mesh_dim in shard_list] shard_partitions = reduce(operator.mul, mesh_list, 1) assert ( sharded_shape[dim] % shard_partitions == 0 ), f"Cannot shard dimension {dim} into {shard_partitions} partitions." sharded_shape[dim] //= shard_partitions return torch.Size(sharded_shape)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/moe_tensor/api.py
colossalai/tensor/moe_tensor/api.py
from typing import List import torch import torch.distributed as dist from torch.distributed import ProcessGroup from .moe_info import MoeParallelInfo def is_moe_tensor(tensor: torch.Tensor) -> bool: """ Check whether the given tensor is a moe tensor. Args: tensor (torch.Tensor): The tensor to be checked. Returns: bool: Whether the given tensor is a moe tensor. """ return hasattr(tensor, "ep_group") def set_moe_tensor_ep_group(tensor: torch.Tensor, ep_group: ProcessGroup) -> None: """ Set moe info for the given tensor. Args: tensor (torch.Tensor): The tensor to be set. moe_info (dict): The moe info to be set. """ tensor.__setattr__("ep_group", ep_group) def get_moe_info(ep_size: int, dp_size: int, pp_size: int, ep_inside: bool) -> MoeParallelInfo: """ Get moe info for the given tensor. Args: ep_size (int): The expert parallel size. dp_size (int): The data parallel size. pp_size (int): The pipeline parallel size. ep_inside (bool, optional): Use ep inside dp if True, dp inside ep if False. Returns: dict: The moe info of the given tensor. """ return MoeParallelInfo(ep_inside, ep_size, dp_size, pp_size) def get_ep_group(tensor: torch.Tensor) -> ProcessGroup: """ Get the expert parallel group of the given tensor. Args: tensor (torch.Tensor): The tensor to be checked. Returns: torch.distributed.ProcessGroup: The expert parallel group of the given tensor. """ return tensor.ep_group def get_ep_size(tensor: torch.Tensor) -> int: """ Get the expert parallel size of the given tensor. Args: tensor (torch.Tensor): The tensor to be checked. Returns: int: The expert parallel size of the given tensor. """ assert getattr(tensor, "ep_group") is not None, "The tensor does not have expert parallel group." return dist.get_world_size(tensor.ep_group) def get_dp_size(tensor: torch.Tensor) -> int: """ Get the data parallel size of the given tensor. Args: tensor (torch.Tensor): The tensor to be checked. Returns: int: The data parallel size of the given tensor. """ return tensor.moe_info.dp_size def get_dp_group(tensor: torch.Tensor) -> ProcessGroup: """ Get the data parallel group of the given tensor. Args: tensor (torch.Tensor): The tensor to be checked. Returns: torch.distributed.ProcessGroup: The data parallel group of the given tensor. """ return tensor.moe_info.dp_group def get_ep_rank(tensor: torch.Tensor) -> int: """ Get the expert parallel rank of the given tensor. Args: tensor (torch.Tensor): The tensor to be checked. Returns: int: The expert parallel rank of the given tensor. """ return dist.get_rank(get_ep_group(tensor)) def get_dp_rank(tensor: torch.Tensor) -> int: """ Get the data parallel rank of the given tensor. Args: tensor (torch.Tensor): The tensor to be checked. Returns: int: The data parallel rank of the given tensor. """ return dist.get_rank(get_dp_group(tensor)) def get_ep_group_ranks(tensor: torch.Tensor) -> List[int]: """ Get the expert parallel group ranks of the given tensor. Args: tensor (torch.Tensor): The tensor to be checked. Returns: int: The expert parallel group ranks of the given tensor. """ return tensor.moe_info.ep_group_ranks def get_dp_group_ranks(tensor: torch.Tensor) -> List[int]: """ Get the data parallel group ranks of the given tensor. Args: tensor (torch.Tensor): The tensor to be checked. Returns: int: The data parallel group ranks of the given tensor. """ return tensor.moe_info.dp_group_ranks
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/moe_tensor/moe_info.py
colossalai/tensor/moe_tensor/moe_info.py
from colossalai.cluster import ProcessGroupMesh class MoeParallelInfo: """Moe parallelism information, storing parallel sizes and groups.""" def __init__(self, ep_inside: bool, ep_size: int, dp_size: int, pp_size: int = 1): """ init MoeParallelInfo with ep_size, dp_size and pp_size Args: ep_size (int): expert parallel size dp_size (int): data parallel (zero) size pp_size (int, optional): pipeline parallel size. Defaults to 1. ep_inside (bool, optional): Use ep inside dp if True, dp inside ep if False. Defaults to True. """ self.pp_size, self.dp_size, self.ep_size = pp_size, dp_size, ep_size if ep_inside: self.pp_axis, self.dp_axis, self.ep_axis = 0, 1, 2 self.pg = ProcessGroupMesh(self.pp_size, self.dp_size, self.ep_size) else: self.pp_axis, self.ep_axis, self.dp_axis = 0, 1, 2 self.pg = ProcessGroupMesh(self.pp_size, self.ep_size, self.dp_size) self.ep_group = self.pg.get_group_along_axis(self.ep_axis) self.ep_group_ranks = self.pg.get_ranks_in_group(self.ep_group) self.dp_group = self.pg.get_group_along_axis(self.dp_axis) self.dp_group_ranks = self.pg.get_ranks_in_group(self.dp_group) self.ep_rank = self.pg.coordinate(self.ep_axis) self.dp_rank = self.pg.coordinate(self.dp_axis)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/moe_tensor/__init__.py
colossalai/tensor/moe_tensor/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/padded_tensor/api.py
colossalai/tensor/padded_tensor/api.py
import torch def _hijack_detach_and_clone(ptensor: torch.Tensor) -> torch.Tensor: """ Hijack the detach and clone methods of the tensor to make sure the dist_layout is copied. Args: tensor (torch.Tensor): The tensor to be hijacked. Returns: torch.Tensor: The hijacked tensor. """ ptensor._unpad_detach = ptensor.detach ptensor._unpad_clone = ptensor.clone def new_detach(self): t_ = self._unpad_detach() t_._padding_dim = self._padding_dim t_._origin_length = self._origin_length t_._current_length = self._current_length return t_ def new_clone(self, *args, **kwargs): t_ = self._unpad_clone(*args, **kwargs) t_._padding_dim = self._padding_dim t_._origin_length = self._origin_length t_._current_length = self._current_length return t_ # bind the new methods to the tensor ptensor.detach = new_detach.__get__(ptensor) ptensor.clone = new_clone.__get__(ptensor) return ptensor def _hijack_back_detach_and_clone(ptensor: torch.Tensor) -> torch.Tensor: """ Hijack the detach and clone methods of the tensor to make sure the dist_layout is copied. Args: tensor (torch.Tensor): The tensor to be hijacked. Returns: torch.Tensor: The hijacked tensor. """ ptensor.detach = ptensor._unpad_detach ptensor.clone = ptensor._unpad_clone delattr(ptensor, "_unpad_detach") delattr(ptensor, "_unpad_clone") return ptensor def is_padded_tensor(tensor: torch.Tensor) -> bool: """ Check whether the given tensor is a padding tensor. Args: tensor (torch.Tensor): The tensor to be checked. Returns: bool: Whether the given tensor is a padding tensor. """ return hasattr(tensor, "_padding_dim") def to_padded_tensor( tensor: torch.Tensor, current_length: int, padding_dim: int, ) -> torch.Tensor: assert ( padding_dim < tensor.dim() ), f"Please passing a valid padding_dim. the dimension of the tensor is {tensor.dim()}" if is_padded_tensor(tensor): return tensor origin_length = tensor.shape[padding_dim] padding_num = current_length - origin_length padding_data = torch.zeros( *tensor.shape[:padding_dim], padding_num, *tensor.shape[padding_dim + 1 :], device=tensor.device, dtype=tensor.dtype, ) tensor.data = torch.cat((tensor.data, padding_data), dim=padding_dim).contiguous() tensor._padding_dim = padding_dim tensor._origin_length = origin_length tensor._current_length = current_length _hijack_detach_and_clone(tensor) return tensor def to_unpadded_tensor(ptensor: torch.Tensor): if not is_padded_tensor(ptensor): return ptensor unpad_slices = [slice(None)] * ptensor.dim() unpad_slices[ptensor._padding_dim] = slice(None, ptensor._origin_length) ptensor.data = ptensor.data[tuple(unpad_slices)] delattr(ptensor, "_padding_dim") delattr(ptensor, "_origin_length") delattr(ptensor, "_current_length") _hijack_back_detach_and_clone(ptensor) return ptensor def init_as_padded_tensor(tensor: torch.Tensor, current_length: int, origin_length: int, padding_dim: int): if is_padded_tensor(tensor): return tensor tensor._padding_dim = padding_dim tensor._origin_length = origin_length tensor._current_length = current_length _hijack_detach_and_clone(tensor) return tensor
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/padded_tensor/__init__.py
colossalai/tensor/padded_tensor/__init__.py
from .api import init_as_padded_tensor, is_padded_tensor, to_padded_tensor, to_unpadded_tensor __all__ = ["is_padded_tensor", "to_padded_tensor", "to_unpadded_tensor", "init_as_padded_tensor"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/api.py
colossalai/tensor/d_tensor/api.py
import copy import operator from functools import reduce from typing import Union import torch import torch.distributed as dist from torch.distributed import ProcessGroup from colossalai.device.device_mesh import DeviceMesh from colossalai.tensor.d_tensor.sharding_spec import DimSpec from .layout import Layout from .layout_converter import LayoutConverter from .sharding_spec import ShardingSpec layout_converter = LayoutConverter() _SHARD_DIM = DimSpec([0]) def get_shard_dim_1d(p: torch.Tensor): """ Get the dimension along which the tensor is sharded, for example in 1D Tensor Parallel. Args: p (torch.Tensor): the input tensor Returns: int: the dimension along which the tensor is sharded """ if not is_distributed_tensor(p): raise ValueError("p is not a distributed tensor") sharding = p.dist_layout.sharding_spec.sharding_sequence return sharding.index(_SHARD_DIM) def clear_layout_converter(): global layout_converter layout_converter.cached_solution.clear() def is_distributed_tensor(tensor: torch.Tensor) -> bool: """ Check whether the given tensor is a distributed tensor. Args: tensor (torch.Tensor): The tensor to be checked. Returns: bool: Whether the given tensor is a distributed tensor. """ return hasattr(tensor, "dist_layout") def is_sharded(dtensor: torch.Tensor) -> bool: """ Check if a tensor is sharded. Args: tensor (torch.Tensor): The tensor to be checked. Returns: bool: True if the tensor is sharded, False otherwise. """ assert is_distributed_tensor(dtensor), "The input tensor is not a distributed tensor." return list(dtensor.shape) == list(dtensor.dist_layout.global_shape) def _hijack_detach_and_clone(dtensor: torch.Tensor) -> torch.Tensor: """ Hijack the detach and clone methods of the tensor to make sure the dist_layout is copied. Args: tensor (torch.Tensor): The tensor to be hijacked. Returns: torch.Tensor: The hijacked tensor. """ dtensor._old_detach = dtensor.detach dtensor._old_clone = dtensor.clone def new_detach(self): t_ = self._old_detach() t_.dist_layout = copy.deepcopy(self.dist_layout) return t_ def new_clone(self, *args, **kwargs): t_ = self._old_clone(*args, **kwargs) t_.dist_layout = copy.deepcopy(self.dist_layout) return t_ # bind the new methods to the tensor dtensor.detach = new_detach.__get__(dtensor) dtensor.clone = new_clone.__get__(dtensor) return dtensor def _construct_default_sharding_spec( tensor: torch.Tensor, ) -> ShardingSpec: """ Construct the default sharding specification for the tensor. Args: tensor (`torch.Tensor`): the tensor to be sharded. Returns: A `ShardingSpec` object without any sharding specified. """ return ShardingSpec(dim_size=tensor.dim(), dim_partition_dict={}) def _apply_layout(tensor, layout): """ Apply the layout to the local tensor during initializing process. """ # layout converter requires a source and target layout # we construct the source layer for an unsharded tensor # and use self.dist_layer as the target layout for the sharded tensor source_spec = _construct_default_sharding_spec(tensor) source_layout = Layout(device_mesh=layout.device_mesh, sharding_spec=source_spec, global_shape=tensor.shape) sharded_tensor = layout_converter.apply(tensor=tensor, source_layout=source_layout, target_layout=layout) return sharded_tensor def distribute_tensor(tensor: torch.Tensor, device_mesh: DeviceMesh, sharding_spec: ShardingSpec) -> torch.Tensor: """ Convert the given tensor to a distributed tensor. Args: tensor (torch.Tensor): The tensor to be converted. device_mesh (DeviceMesh): The device mesh for abstraction of the compute devices. sharding_spec (ShardingSpec): The sharding specification which describes how the tensor will be sharded. Returns: torch.Tensor: The distributed tensor. """ assert not is_distributed_tensor(tensor), "The input tensor is already a distributed tensor." dist_layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec, global_shape=tensor.shape) # shard tensor sharded_tensor = _apply_layout(tensor, dist_layout) # hack some tensor methods _hijack_detach_and_clone(sharded_tensor) return sharded_tensor def init_as_dtensor( tensor: torch.Tensor, device_mesh: DeviceMesh, sharding_spec: ShardingSpec, global_shape: torch.Size ) -> torch.Tensor: assert not is_distributed_tensor(tensor), "The input tensor is already a distributed tensor." dist_layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec, global_shape=global_shape) # shard tensor tensor.dist_layout = dist_layout # hack some tensor methods _hijack_detach_and_clone(tensor) return tensor def redistribute(dtensor: torch.Tensor, device_mesh: DeviceMesh, sharding_spec: ShardingSpec) -> None: """ Convert the layout of the tensor from source_spec to target_spec. This will update the `local_tensor` and `dist_layout` in place. Args: dtensor (torch.Tensor): the distributed tensor to be converted. device_mesh (DeviceMesh): the device mesh for abstraction of the compute devices. target_layout (Layout): the target layout specification. """ assert is_distributed_tensor(dtensor), "The input tensor is not a distributed tensor." global_shape = get_global_shape(dtensor) target_layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec, global_shape=global_shape) resharded_tensor = layout_converter.apply( tensor=dtensor, source_layout=dtensor.dist_layout, target_layout=target_layout ) return resharded_tensor def to_global(dtensor: torch.Tensor) -> torch.Tensor: """ Convert a distributed tensor to the global tensor with the given layout. This function returns a native `torch.Tensor` object. Args: dtensor (torch.Tensor): the distributed tensor to be converted. Returns: torch.Tensor: the global tensor. """ assert is_distributed_tensor(dtensor), "The input tensor is not a distributed tensor." layout_converter = LayoutConverter() global_sharding_spec = ShardingSpec(dtensor.dim(), {}) device_mesh = get_device_mesh(dtensor) global_shape = get_global_shape(dtensor) global_layout = Layout(device_mesh=device_mesh, sharding_spec=global_sharding_spec, global_shape=global_shape) global_tensor = layout_converter.apply(dtensor, dtensor.dist_layout, global_layout) return global_tensor def shard_rowwise( tensor: torch.Tensor, group_or_device_mesh: Union[ProcessGroup, DeviceMesh] = None, ) -> torch.Tensor: """ Shard the first dim of the given tensor. Args: tensor (torch.Tensor): The tensor to be sharded. group_or_device_mesh (Union[ProcessGroup, DeviceMesh], optional): The group or device mesh to shard the tensor. If None, the tensor will be sharded with respect to the global process group. Defaults to None. inplace (bool, optional): Whether to shard the tensor in-place. Defaults to False. Returns: torch.Tensor: The sharded tensor. """ # if the group_or_device_mesh is None, we shard the tensor with respect to the global process group if group_or_device_mesh is None: group_or_device_mesh = dist.GroupMember.WORLD if isinstance(group_or_device_mesh, ProcessGroup): device_mesh = DeviceMesh.from_process_group(group_or_device_mesh) else: assert len(group_or_device_mesh.shape) == 1, "Only 1D DeviceMesh is accepted for row-wise sharding." device_mesh = group_or_device_mesh sharding_spec = ShardingSpec(dim_size=tensor.dim(), dim_partition_dict={0: [0]}) return distribute_tensor(tensor, device_mesh, sharding_spec) def shard_colwise(tensor: torch.Tensor, group_or_device_mesh: Union[ProcessGroup, DeviceMesh] = None) -> torch.Tensor: """ Shard the first dim of the given tensor. Args: tensor (torch.Tensor): The tensor to be sharded. group_or_device_mesh (Union[ProcessGroup, DeviceMesh], optional): The group or device mesh to shard the tensor. If None, the tensor will be sharded with respect to the global process group. Defaults to None. inplace (bool, optional): Whether to shard the tensor in-place. Defaults to False. Returns: torch.Tensor: The sharded tensor. """ # if the group_or_device_mesh is None, we shard the tensor with respect to the global process group if group_or_device_mesh is None: group_or_device_mesh = dist.GroupMember.WORLD if isinstance(group_or_device_mesh, ProcessGroup): device_mesh = DeviceMesh.from_process_group(group_or_device_mesh) else: assert len(group_or_device_mesh.shape) == 1, "Only 1D DeviceMesh is accepted for row-wise sharding." device_mesh = group_or_device_mesh sharding_spec = ShardingSpec(dim_size=tensor.dim(), dim_partition_dict={-1: [0]}) return distribute_tensor(tensor, device_mesh, sharding_spec) def sharded_tensor_to_param(dtensor: torch.Tensor, requires_grad: bool = True): assert is_distributed_tensor(dtensor), "The input tensor is not a distributed tensor." param = torch.nn.Parameter(dtensor, requires_grad=requires_grad) # make it distributed as well param.dist_layout = dtensor.dist_layout _hijack_detach_and_clone(param) return param def sharded_tensor_to_existing_param(dtensor: torch.Tensor, param: torch.nn.Parameter) -> None: assert is_distributed_tensor(dtensor), "The input tensor is not a distributed tensor." param.data = dtensor # make it distributed as well param.dist_layout = dtensor.dist_layout _hijack_detach_and_clone(param) def compute_global_numel(dtensor: torch.Tensor) -> int: """ Compute the global number of elements in the distributed tensor. Args: dtensor (torch.Tensor): The distributed tensor. Returns: int: The global number of elements in the distributed tensor. """ assert is_distributed_tensor(dtensor), "The input tensor is not a distributed tensor." numel = reduce(operator.mul, dtensor.dist_layout.global_shape) return numel def get_layout(dtensor: torch.Tensor) -> Layout: """ Get the layout of the distributed tensor. Args: dtensor (torch.Tensor): The distributed tensor. Returns: Layout: The layout of the distributed tensor. """ assert is_distributed_tensor(dtensor), "The input tensor is not a distributed tensor." return dtensor.dist_layout def get_global_shape(dtensor: torch.Tensor) -> torch.Size: """ Get the global shape of the distributed tensor. Args: dtensor (torch.Tensor): The distributed tensor. Returns: torch.Size: The global shape of the distributed tensor. """ assert is_distributed_tensor(dtensor), "The input tensor is not a distributed tensor." return dtensor.dist_layout.global_shape def get_device_mesh(dtensor: torch.Tensor) -> DeviceMesh: """ Get the device mesh of the distributed tensor. Args: dtensor (torch.Tensor): The distributed tensor. Returns: DeviceMesh: The device mesh of the distributed tensor. """ assert is_distributed_tensor(dtensor), "The input tensor is not a distributed tensor." return dtensor.dist_layout.device_mesh def get_sharding_spec(dtensor: torch.Tensor) -> ShardingSpec: """ Get the sharding spec of the distributed tensor. Args: dtensor (torch.Tensor): The distributed tensor. Returns: ShardingSpec: The sharding spec of the distributed tensor. """ assert is_distributed_tensor(dtensor), "The input tensor is not a distributed tensor." return dtensor.dist_layout.sharding_spec # ====================================================== # Some sharding does not obey the SPMD style # e.g. Fused QKV layer in GPT2 # we support customize sharding with the following APIs # ====================================================== def is_customized_distributed_tensor(tensor: torch.Tensor): """ Check whether the given tensor is a customized distributed tensor. Args: tensor (torch.Tensor): The tensor to be checked. Returns: bool: Whether the given tensor is a customized distributed tensor. """ return hasattr(tensor, "shard_fn") and hasattr(tensor, "gather_fn") def _hijack_detach_and_clone_for_customized_distributed_tensor(dtensor: torch.Tensor) -> torch.Tensor: """ Hijack the detach and clone methods of the tensor to make sure the dist_layout is copied. Args: tensor (torch.Tensor): The tensor to be hijacked. Returns: torch.Tensor: The hijacked tensor. """ dtensor._old_detach = dtensor.detach dtensor._old_clone = dtensor.clone def new_detach(self): t_ = self._old_detach() t_.shard_fn = self.shard_fn t_.gather_fn = self.gather_fn return t_ def new_clone(self, *args, **kwargs): t_ = self._old_clone(*args, **kwargs) t_.shard_fn = self.shard_fn t_.gather_fn = self.gather_fn return t_ # bind the new methods to the tensor dtensor.detach = new_detach.__get__(dtensor) dtensor.clone = new_clone.__get__(dtensor) return dtensor def distribute_tensor_with_customization(tensor: torch.Tensor, shard_fn, gather_fn: callable): """ Distribute the given tensor with the given shard_fn and gather_fn. Example: ```python # define shard and gather functions def shard_fn(tensor): rank = torch.distributed.get_rank() world_size = torch.distributed.get_world_size() return tensor.chunk(world_size, dim=0)[rank] def gather_fn(tensor): rank = torch.distributed.get_rank() world_size = torch.distributed.get_world_size() shard_list = [torch.zeros_like(tensor) for _ in range(world_size)] torch.distributed.all_gather(shard_list, tensor) return torch.cat(shard_list, dim=0) # create a distributed tensor tensor = torch.rand(4, 4) dtensor = distribute_tensor_with_customization(tensor, shard_fn, gather_fn) ``` Args: tensor (torch.Tensor): The tensor to be distributed. shard_fn (callable): The function to shard the tensor. gather_fn (callable): The function to gather the tensor. Returns: torch.Tensor: The distributed tensor. """ assert callable(shard_fn), "The shard_fn must be callable." assert callable(gather_fn), "The gather_fn must be callable." assert not is_distributed_tensor(tensor), "The input tensor is already a distributed tensor." sharded_tensor = shard_fn(tensor) # set the shard_fn and gather_fn as attributes of the distributed tensor sharded_tensor.shard_fn = shard_fn sharded_tensor.gather_fn = gather_fn # set the shard_fn and gather_fn as attributes of the distributed tensor _hijack_detach_and_clone_for_customized_distributed_tensor(sharded_tensor) return sharded_tensor def init_tensor_as_customization_distributed(tensor: torch.Tensor, shard_fn, gather_fn: callable): """ Distribute the given tensor with the given shard_fn and gather_fn. Example: ```python # define shard and gather functions def shard_fn(tensor): rank = torch.distributed.get_rank() world_size = torch.distributed.get_world_size() return tensor.chunk(world_size, dim=0)[rank] def gather_fn(tensor): rank = torch.distributed.get_rank() world_size = torch.distributed.get_world_size() shard_list = [torch.zeros_like(tensor) for _ in range(world_size)] torch.distributed.all_gather(shard_list, tensor) return torch.cat(shard_list, dim=0) # create a distributed tensor tensor = torch.rand(4, 4) dtensor = init_tensor_as_customization_distributed(tensor, shard_fn, gather_fn) ``` Args: tensor (torch.Tensor): The tensor to be distributed. shard_fn (callable): The function to shard the tensor. gather_fn (callable): The function to gather the tensor. Returns: torch.Tensor: The distributed tensor. """ assert callable(shard_fn), "The shard_fn must be callable." assert callable(gather_fn), "The gather_fn must be callable." assert not is_distributed_tensor(tensor), "The input tensor is already a distributed tensor." # set the shard_fn and gather_fn as attributes of the distributed tensor tensor.shard_fn = shard_fn tensor.gather_fn = gather_fn # set the shard_fn and gather_fn as attributes of the distributed tensor _hijack_detach_and_clone_for_customized_distributed_tensor(tensor) return tensor def to_global_for_customized_distributed_tensor(dtensor: torch.Tensor) -> torch.Tensor: """ Gather the given tensor to the global tensor. Args: dtensor (torch.Tensor): The distributed tensor. Returns: torch.Tensor: The global tensor. """ assert is_customized_distributed_tensor(dtensor), "The input tensor is not a customized distributed tensor." return dtensor.gather_fn(dtensor) def customized_distributed_tensor_to_param(dtensor: torch.Tensor, requires_grad: bool = True): """ Convert the given customized distributed tensor to a parameter. """ assert is_customized_distributed_tensor(dtensor), "The input tensor is not a customized distributed tensor." param = torch.nn.Parameter(dtensor, requires_grad=requires_grad) # make it distributed as well param.shard_fn = dtensor.shard_fn param.gather_fn = dtensor.gather_fn _hijack_detach_and_clone_for_customized_distributed_tensor(param) return param def customized_distributed_tensor_to_existing_param(dtensor: torch.Tensor, param: torch.nn.Parameter): """ Convert the given customized distributed tensor to an existing parameter. """ assert is_customized_distributed_tensor(dtensor), "The input tensor is not a customized distributed tensor." param.data = dtensor.data param.shard_fn = dtensor.shard_fn param.gather_fn = dtensor.gather_fn _hijack_detach_and_clone_for_customized_distributed_tensor(param)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/comm_spec.py
colossalai/tensor/d_tensor/comm_spec.py
from enum import Enum from typing import Dict import torch import torch.distributed as dist from torch.distributed import ReduceOp __all__ = [ "CollectiveCommPattern", "CommSpec", ] class CollectiveCommPattern(Enum): GATHER_FWD_SPLIT_BWD = "gather_fwd_split_bwd" ALL2ALL_FWD_ALL2ALL_BWD = "all2all_fwd_all2all_bwd" SPLIT_FWD_GATHER_BWD = "split_fwd_gather_bwd" ALLREDUCE_FWD_IDENTITY_BWD = "all_reduce_fwd_identity_bwd" IDENTITY_FWD_ALLREDUCE_BWD = "identity_fwd_all_reduce_bwd" MIXGATHER_FWD_SPLIT_BWD = "mixgather_fwd_split_bwd" class CommSpec: """ Communication spec is used to record the communication action. It converts the communication spec to real action which will be used in runtime. It contains comm_pattern to determine the communication method, process_group_dict to determine the process groups, gather_dim and shard_dim to determine the buffer shape, and logical_process_axis Argument: comm_pattern(CollectiveCommPattern): describe the communication method used in this spec. process_group_dict(Dict): A dict which contains the process groups used to apply this CommSpec. gather_dim(int, Optional): The gather_dim of the tensor will be gathered. shard_dim(int, Optional): The shard_dim of the tensor will be sharded. logical_process_axis(Union(int, List[int]), Optional): The mesh_dim to implement the communication action. """ def __init__( self, comm_pattern: CollectiveCommPattern, process_group_dict: Dict, gather_dim: int = None, shard_dim: int = None, logical_process_axis: int = None, ): self.comm_pattern = comm_pattern self.gather_dim = gather_dim self.shard_dim = shard_dim self.logical_process_axis = logical_process_axis self.process_group_dict = process_group_dict def __repr__(self): res_list = ["CommSpec:("] if self.comm_pattern == CollectiveCommPattern.GATHER_FWD_SPLIT_BWD: res_list.append(f"comm_pattern:GATHER_FWD_SPLIT_BWD, ") res_list.append(f"gather_dim:{self.gather_dim}, ") res_list.append(f"shard_dim:{self.gather_dim}, ") res_list.append(f"logical_process_axis:{self.logical_process_axis})") elif self.comm_pattern == CollectiveCommPattern.ALL2ALL_FWD_ALL2ALL_BWD: res_list.append(f"comm_pattern:ALL2ALL_FWD_ALL2ALL_BWD, ") res_list.append(f"gather_dim:{self.gather_dim}, ") res_list.append(f"shard_dim:{self.shard_dim}, ") res_list.append(f"logical_process_axis: {self.logical_process_axis})") elif self.comm_pattern == CollectiveCommPattern.SPLIT_FWD_GATHER_BWD: res_list.append(f"comm_pattern:SPLIT_FWD_GATHER_BWD, ") res_list.append(f"gather_dim:{self.gather_dim}, ") res_list.append(f"shard_dim:{self.shard_dim}, ") res_list.append(f"logical_process_axis:{self.logical_process_axis})") elif self.comm_pattern == CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD: res_list.append(f"comm_pattern:ALLREDUCE_FWD_IDENTITY_BWD, ") res_list.append(f"logical_process_axis:{self.logical_process_axis})") elif self.comm_pattern == CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD: res_list.append(f"comm_pattern:IDENTITY_FWD_ALLREDUCE_BWD, ") res_list.append(f"logical_process_axis:{self.logical_process_axis})") return "".join(res_list) def covert_spec_to_action(self, tensor): """ Convert CommSpec into runtime action, implement real collection communication to target tensor. The collection communication action is directed by the CommSpec. Argument: tensor(torch.Tensor): Tensor stored in each device, which could be different in different ranks. """ if self.comm_pattern in pattern_to_func_dict: tensor = pattern_to_func_dict[self.comm_pattern](tensor, self) else: tensor = tensor return tensor def _all_gather(tensor: torch.Tensor, comm_spec: CommSpec): """ Implement all gather operation on device mesh based on information provided by comm_spec. """ process_group = comm_spec.process_group_dict[comm_spec.logical_process_axis] world_size = dist.get_world_size(process_group) tensor_list = [torch.zeros(tensor.shape, dtype=tensor.dtype, device=tensor.device) for _ in range(world_size)] # without this contiguous operation, the all gather may get some unexpected results. tensor = tensor.contiguous() dist.all_gather(tensor_list, tensor, group=process_group) output = torch.cat(tuple(tensor_list), comm_spec.gather_dim).contiguous() return output def _split(tensor: torch.Tensor, comm_spec: CommSpec): """ Implement shard operation on device mesh based on information provided by comm_spec. """ process_group = comm_spec.process_group_dict[comm_spec.logical_process_axis] dim = comm_spec.shard_dim length = tensor.shape[comm_spec.shard_dim] // dist.get_world_size(process_group) start = length * dist.get_rank(process_group) output = torch.narrow(tensor, dim, start, length).clone().contiguous() return output def _all_to_all(tensor: torch.Tensor, comm_spec: CommSpec): """ Implement all to all operation on device mesh based on information provided by comm_spec. """ process_group = comm_spec.process_group_dict[comm_spec.logical_process_axis] world_size = dist.get_world_size(process_group) new_shape = list(tensor.shape) new_shape[comm_spec.shard_dim] = new_shape[comm_spec.shard_dim] // world_size new_shape = torch.Size(new_shape) output_tensor_list = [torch.zeros(new_shape, dtype=tensor.dtype, device=tensor.device) for _ in range(world_size)] dim = comm_spec.shard_dim length = tensor.shape[comm_spec.shard_dim] // world_size input_tensor_list = [torch.narrow(tensor, dim, length * i, length).contiguous() for i in range(world_size)] group = process_group dist.all_to_all(output_tensor_list, input_tensor_list, group) output = torch.cat(tuple(output_tensor_list), comm_spec.gather_dim).contiguous() return output def _all_reduce(tensor: torch.Tensor, comm_spec: CommSpec, async_op: bool = False): """ Implement all reduce operation on device mesh based on information provided by comm_spec. """ process_group = comm_spec.process_group_dict[comm_spec.logical_process_axis] if not tensor.is_contiguous(): tensor = tensor.contiguous() dist.all_reduce(tensor, op=ReduceOp.SUM, group=process_group, async_op=async_op) return tensor class _ReduceGrad(torch.autograd.Function): """ A customized communication operation which forward is an identity operation, backward is all_reduce operation. Args: input_: input matrix. comm_spec: comm_spec will give information like process group, rank list, etc. """ @staticmethod def symbolic(graph, input_): return input_ @staticmethod def forward(ctx, input_, comm_spec): ctx.comm_spec = comm_spec return input_ @staticmethod def backward(ctx, grad_output): return _all_reduce(grad_output, ctx.comm_spec), None class _ReduceInput(torch.autograd.Function): """ A customized communication operation which forward is all_reduce operation, backward is an identity operation. Args: input_: input matrix. comm_spec: comm_spec will give information like process group, rank list, etc. """ @staticmethod def symbolic(graph, input_): return _all_reduce(input_) @staticmethod def forward(ctx, input_, comm_spec): return _all_reduce(input_, comm_spec) @staticmethod def backward(ctx, grad_output): return grad_output, None class _SplitForwardGatherBackward(torch.autograd.Function): """ A customized communication operation which forward is split operation, backward is an all gather operation. Args: input_: input matrix. comm_spec: comm_spec will give information like process group, rank list, etc. """ @staticmethod def symbolic(graph, input_): return _split(input_) @staticmethod def forward(ctx, input_, comm_spec): ctx.comm_spec = comm_spec return _split(input_, comm_spec) @staticmethod def backward(ctx, grad_output): return _all_gather(grad_output, ctx.comm_spec), None class _GatherForwardSplitBackward(torch.autograd.Function): """ A customized communication operation which forward is an all gather operation, backward is split operation. Args: input_: input matrix. comm_spec: comm_spec will give information like process group, rank list, etc. """ @staticmethod def symbolic(graph, input_): return _all_gather(input_) @staticmethod def forward(ctx, input_, comm_spec): ctx.comm_spec = comm_spec return _all_gather(input_, comm_spec) @staticmethod def backward(ctx, grad_output): return _split(grad_output, ctx.comm_spec), None class _AllToAll(torch.autograd.Function): """ A customized communication operation which forward is an all to all operation, backward is an all to all operation. Args: input_: input matrix. comm_spec: comm_spec will give information like process group, rank list, etc. """ @staticmethod def symbolic(graph, input_): return _all_to_all(input_) @staticmethod def forward(ctx, input_, comm_spec): output = _all_to_all(input_, comm_spec) comm_spec_for_backward = CommSpec( comm_pattern=comm_spec.comm_pattern, process_group_dict=comm_spec.process_group_dict, gather_dim=comm_spec.shard_dim, shard_dim=comm_spec.gather_dim, logical_process_axis=comm_spec.logical_process_axis, ) ctx.comm_spec = comm_spec_for_backward return output @staticmethod def backward(ctx, grad_outputs): return _all_to_all(grad_outputs, ctx.comm_spec), None def reduce_grad(input_, comm_spec): return _ReduceGrad.apply(input_, comm_spec) def reduce_input(input_, comm_spec): return _ReduceInput.apply(input_, comm_spec) def split_forward_gather_backward(input_, comm_spec): return _SplitForwardGatherBackward.apply(input_, comm_spec) def gather_forward_split_backward(input_, comm_spec): return _GatherForwardSplitBackward.apply(input_, comm_spec) def all_to_all(input_, comm_spec): return _AllToAll.apply(input_, comm_spec) pattern_to_func_dict = { CollectiveCommPattern.GATHER_FWD_SPLIT_BWD: gather_forward_split_backward, CollectiveCommPattern.ALL2ALL_FWD_ALL2ALL_BWD: all_to_all, CollectiveCommPattern.SPLIT_FWD_GATHER_BWD: split_forward_gather_backward, CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD: reduce_input, CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD: reduce_grad, }
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/layout_converter.py
colossalai/tensor/d_tensor/layout_converter.py
import math from copy import deepcopy from dataclasses import dataclass from typing import Dict, List, Tuple import torch import torch.distributed as dist from colossalai.context.singleton_meta import SingletonMeta from colossalai.tensor.d_tensor.comm_spec import * from colossalai.tensor.d_tensor.layout import Layout from colossalai.tensor.d_tensor.misc import LayoutException from colossalai.tensor.padded_tensor.api import init_as_padded_tensor, is_padded_tensor from colossalai.tensor.utils import all_gather_simulator, all_to_all_simulator, shard_simulator from .sharding_spec import ShardingSpec from .utils import get_comm_cost __all__ = ["LayoutConverter", "LayoutConverterOptions", "set_layout_converting_options"] @dataclass class LayoutConverterOptions: """ LayoutConverterOptions is a dataclass which specifies the preferences for layout converting. """ # TODO: layout converter option is not implemented yet def set_layout_converting_options(options: LayoutConverterOptions): """ Configure the shape consistency manager via function call. """ manager = LayoutConverter() manager.options = options class LayoutConverter(metaclass=SingletonMeta): """ LayoutConverter is a singleton class which converts the layout of a distributed tensor. """ def __init__(self): self._options = None self._forward_only = False self.cached_solution = {} @property def options(self): return self._options @options.setter def options(self, options_: LayoutConverterOptions): assert isinstance(options_, LayoutConverterOptions) self._options = options_ @property def forward_only(self): return self._forward_only @forward_only.setter def forward_only(self, value): assert isinstance(value, bool) self._forward_only = value def all_gather_transform_layouts(self, source_layout: Layout) -> Dict[Layout, CommSpec]: """ Get all valid layouts from source_layout with single all-gather operation. For the all-gather operation, we just care about the S dimension. Argument: source_layout: the layout to be transformed. Return: valid_spec_dict(Dict[Layout, CommSpec]): all valid layouts from source_layout with single all-gather operation. Example: layout_converter = LayoutConverter() physical_mesh_id = torch.arange(0, 4) mesh_shape = (2, 2) # [[0, 1, # [2, 3]] device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True) global_shape = (4, 4, 4) dim_partition_dict = {0: [0], 1: [1]} # [S0,S1,R] sharding_spec = ShardingSpec(dim_size=3, dim_partition_dict=dim_partition_dict) layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec, global_shape=global_shape) rst_dict = layout_converter.all_gather_transform_layouts(layout) for layout, comm_spec in rst_dict.items(): print(f'{layout.sharding_spec.sharding_sequence}: {comm_spec}') Output: [R, S1, R]: CommSpec:(comm_pattern:GATHER_FWD_SPLIT_BWD, gather_dim:0, shard_dim:0, logical_process_axis:0) [S0, R, R]: CommSpec:(comm_pattern:GATHER_FWD_SPLIT_BWD, gather_dim:1, shard_dim:1, logical_process_axis:1) """ valid_spec_dict = {} comm_pattern = CollectiveCommPattern.GATHER_FWD_SPLIT_BWD source_spec = source_layout.sharding_spec # the key of the dict is the axis # the value is the process group current_rank = source_layout.device_mesh._global_rank_of_current_process process_group_dict = source_layout.device_mesh._process_group_dict[current_rank] for target_pair in source_spec.dim_partition_dict.items(): shard_list = all_gather_simulator(target_pair) index = target_pair[0] new_dim_partition_dict = deepcopy(source_spec.dim_partition_dict) # We won't add empty list into dim_partition_dict # The key will be popped if the related shard_list is empty if shard_list: new_dim_partition_dict[index] = shard_list else: new_dim_partition_dict.pop(index) # generate the CommSpec to record the action of source_sharding_spec->new_sharding_spec gather_dim = index logical_process_axis = target_pair[1][-1] comm_spec = CommSpec( comm_pattern, process_group_dict=process_group_dict, gather_dim=gather_dim, # shard_dim will be used during backward shard_dim=gather_dim, logical_process_axis=logical_process_axis, ) # generate new sharding spec try: new_sharding_spec = ShardingSpec(source_spec.dims, dim_partition_dict=new_dim_partition_dict) new_layout = Layout( device_mesh=source_layout.device_mesh, sharding_spec=new_sharding_spec, global_shape=source_layout.global_shape, ) valid_spec_dict[new_layout] = comm_spec except LayoutException: pass return valid_spec_dict def all_to_all_transform_layout(self, source_layout: Layout) -> Dict[Layout, CommSpec]: """ Get all valid layouts from source_layout with single all-to-all operation. For the all-to-all operation, we just care about the pairs containing S dimension. Argument: source_layout(Layout): the layout to be transformed. Return: valid_spec_dict(Dict[Layout, CommSpec]): all valid layouts from source_layout with single all-to-all operation. Example: layout_converter = LayoutConverter() physical_mesh_id = torch.arange(0, 4) mesh_shape = (2, 2) # [[0, 1, # [2, 3]] device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True) global_shape = (4, 4, 4) dim_partition_dict = {0: [0], 1: [1]} # [S0,S1,R] sharding_spec = ShardingSpec(dim_size=3, dim_partition_dict=dim_partition_dict) layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec, global_shape=global_shape) rst_dict = layout_converter.all_to_all_transform_layout(layout) for layout, comm_spec in rst_dict.items(): print(f'{layout.sharding_spec.sharding_sequence}: {comm_spec}') Output: [S01, R, R]: CommSpec:(comm_pattern:ALL2ALL_FWD_ALL2ALL_BWD, gather_dim:1, shard_dim:0, logical_process_axis: 1) [R, S1, S0]: CommSpec:(comm_pattern:ALL2ALL_FWD_ALL2ALL_BWD, gather_dim:0, shard_dim:2, logical_process_axis: 0) [S0, R, S1]: CommSpec:(comm_pattern:ALL2ALL_FWD_ALL2ALL_BWD, gather_dim:1, shard_dim:2, logical_process_axis: 1) """ valid_spec_dict = {} comm_pattern = CollectiveCommPattern.ALL2ALL_FWD_ALL2ALL_BWD # the key of the dict is the axis # the value is the process group current_rank = source_layout.device_mesh._global_rank_of_current_process process_group_dict = source_layout.device_mesh._process_group_dict[current_rank] source_spec = source_layout.sharding_spec tensor_dims = source_spec.dims for f_index in range(tensor_dims - 1): for b_index in range(f_index + 1, tensor_dims): # skip (R, R) cases if f_index not in source_spec.dim_partition_dict and b_index not in source_spec.dim_partition_dict: continue else: if f_index in source_spec.dim_partition_dict: # skip (S01, R) -> (R, S01) is NOT allowed if len(source_spec.dim_partition_dict[f_index]) >= 2: continue f_target_pair = (f_index, deepcopy(source_spec.dim_partition_dict[f_index])) else: f_target_pair = (f_index, []) if b_index in source_spec.dim_partition_dict: # skip (R, S01) -> (S01, R) is NOT allowed if len(source_spec.dim_partition_dict[b_index]) >= 2: continue b_target_pair = (b_index, deepcopy(source_spec.dim_partition_dict[b_index])) else: b_target_pair = (b_index, []) # skip (S1, S0) -> S10 if f_target_pair[1] and b_target_pair[1] and f_target_pair[1][0] >= b_target_pair[1][0]: continue f_shard_list, b_shard_list = all_to_all_simulator(f_target_pair, b_target_pair) f_index = f_target_pair[0] b_index = b_target_pair[0] # generate the CommSpec to record the action of source_sharding_spec->new_sharding_spec if len(f_shard_list) < len(f_target_pair[1]): gather_dim = f_index shard_dim = b_index logical_process_axis = f_target_pair[1][-1] else: gather_dim = b_index shard_dim = f_index logical_process_axis = b_target_pair[1][-1] comm_spec = CommSpec( comm_pattern, process_group_dict=process_group_dict, gather_dim=gather_dim, shard_dim=shard_dim, logical_process_axis=logical_process_axis, ) new_dim_partition_dict = deepcopy(source_spec.dim_partition_dict) # We won't add empty list into dim_partition_dict # The key will be popped if the related shard_list is empty if f_shard_list: new_dim_partition_dict[f_index] = f_shard_list else: new_dim_partition_dict.pop(f_index) if b_shard_list: new_dim_partition_dict[b_index] = b_shard_list else: new_dim_partition_dict.pop(b_index) # generate new sharding spec try: new_sharding_spec = ShardingSpec(source_spec.dims, dim_partition_dict=new_dim_partition_dict) new_layout = Layout( device_mesh=source_layout.device_mesh, sharding_spec=new_sharding_spec, global_shape=source_layout.global_shape, ) valid_spec_dict[new_layout] = comm_spec except LayoutException: pass return valid_spec_dict def shard_transform_layout(self, source_layout: Layout) -> Dict[Layout, CommSpec]: """ Get all valid layouts from source_layout with single shard operation. For the sharding operation, we just care about legal sharding dimensions. Argument: source_layout(Layout): the layout to be transformed. Return: valid_spec_dict(Dict[Layout, CommSpec]): all valid layouts from source_layout with single shard operation. Example: layout_converter = LayoutConverter() physical_mesh_id = torch.arange(0, 4) mesh_shape = (2, 2) # [[0, 1, # [2, 3]] device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True) global_shape = (4, 4, 4) dim_partition_dict = {0: [0]} # [S0,R,R] sharding_spec = ShardingSpec(dim_size=3, dim_partition_dict=dim_partition_dict) layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec, global_shape=global_shape) rst_dict = layout_converter.shard_transform_layout(layout) for layout, comm_spec in rst_dict.items(): print(f'{layout.sharding_spec.sharding_sequence}: {comm_spec}') Output: [S01, R, R]: CommSpec:(comm_pattern:SPLIT_FWD_GATHER_BWD, gather_dim:0, shard_dim:0, logical_process_axis:1) [S0, S1, R]: CommSpec:(comm_pattern:SPLIT_FWD_GATHER_BWD, gather_dim:1, shard_dim:1, logical_process_axis:1) [S0, R, S1]: CommSpec:(comm_pattern:SPLIT_FWD_GATHER_BWD, gather_dim:2, shard_dim:2, logical_process_axis:1) """ valid_spec_dict = {} comm_pattern = CollectiveCommPattern.SPLIT_FWD_GATHER_BWD source_spec = source_layout.sharding_spec # the key of the dict is the axis # the value is the process group current_rank = source_layout.device_mesh._global_rank_of_current_process process_group_dict = source_layout.device_mesh._process_group_dict[current_rank] # legal sharding dims means the mesh_id is still available to use. legal_sharding_dims = [i for i in range(len(source_layout.device_mesh.shape))] for dim, shard_list in source_spec.dim_partition_dict.items(): for element in shard_list: legal_sharding_dims.remove(element) if len(legal_sharding_dims) == 0: return valid_spec_dict tensor_dims = source_spec.dims for index in range(tensor_dims): if index not in source_spec.dim_partition_dict: shard_list_list = shard_simulator((index, []), legal_sharding_dims) else: shard_list_list = shard_simulator((index, source_spec.dim_partition_dict[index]), legal_sharding_dims) if not shard_list_list: continue for shard_list in shard_list_list: new_dim_partition_dict = deepcopy(source_spec.dim_partition_dict) new_dim_partition_dict[index] = shard_list # generate the CommSpec to record the action of source_sharding_spec->new_sharding_spec shard_dim = index logical_process_axis = shard_list[-1] comm_spec = CommSpec( comm_pattern, process_group_dict=process_group_dict, gather_dim=shard_dim, shard_dim=shard_dim, logical_process_axis=logical_process_axis, ) # generate new sharding spec try: new_sharding_spec = ShardingSpec( dim_size=source_spec.dims, dim_partition_dict=new_dim_partition_dict ) new_layout = Layout( device_mesh=source_layout.device_mesh, sharding_spec=new_sharding_spec, global_shape=source_layout.global_shape, ) valid_spec_dict[new_layout] = comm_spec except LayoutException: pass return valid_spec_dict def get_all_one_step_transform_spec(self, source_layout: Layout) -> Dict[Layout, CommSpec]: """ Get all valid layouts from source_layout with one step transform. Note: all-gather will eliminate a sharding dimension, all-to-all will keep sharding dimension same as before, and shard will add a sharding dimension. Therefore, the result of above operations are mutual exclusive, we could safely put them together. Argument: source_layout(Layout): the layout to be transformer. Return: valid_spec_dict(Dict[Layout, CommSpec]): all valid layouts from source_layout with one step transform. """ valid_spec_dict = {} valid_spec_dict.update(self.all_gather_transform_layouts(source_layout)) valid_spec_dict.update(self.all_to_all_transform_layout(source_layout)) valid_spec_dict.update(self.shard_transform_layout(source_layout)) return valid_spec_dict def layout_converting( self, source_layout: Layout, target_layout: Layout ) -> Tuple[List[Layout], List[CommSpec], float]: """ This method will find a path to transform source_layout to target_layout with a greedy algorithm. The basic idea is: Step1: Generate all one-step transform sequences from source_layout. Step2: Pick the 'best' layout following the heuristic function. Step3: Repeat above steps until the source layout transform to target layout. Additionally, to avoid repeating the path search in runtime, we cached all solved path in auto parallel strategy building time, which could handle most of cases in runtime. Args: source_layout(Layout): the layout to be transformed. target_layout(Layout): the layout to be achieved after a serious of transforms. Return: transform_path(List[Layout]): The transform path from source_layout to target_layout, it contains the source_layout and target_layout. comm_action_sequence(List[CommSpec]): Keep the communication operations to complete the layout converting in order. Example: physical_mesh_id = torch.arange(0, 4) mesh_shape = (2, 2) # [[0, 1, # [2, 3]] device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True) global_shape = (4, 4, 4) dim_partition_source = {1: [0, 1]} dim_partition_target = {0: [0, 1]} # [R,S01,R] sharding_spec_source = ShardingSpec(dim_size=3, dim_partition_dict=dim_partition_source) source_layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec_source, global_shape=global_shape) # [S01,R,R] sharding_spec_target = ShardingSpec(dim_size=3, dim_partition_dict=dim_partition_target) target_layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec_target, global_shape=global_shape) transform_path, comm_action_sequence = layout_converter.layout_converting(source_layout, target_layout) transform_path_str = '->'.join([str(layout.sharding_spec.sharding_sequence) for layout in transform_path]) print(transform_path_str) output: [R, S01, R]->[R, S0, R]->[S0, R, R]->[S01, R, R] """ source_spec = source_layout.sharding_spec target_spec = target_layout.sharding_spec MAX_TRANSFORM_STEPS = 20 total_steps = 0 transform_path = [] comm_action_sequence: List[CommSpec] = [] src_shape = source_layout.get_sharded_shape_per_device() dst_shape = target_layout.get_sharded_shape_per_device() spec_pairs = ((str(source_spec.sharding_sequence), src_shape), (str(target_spec.sharding_sequence), dst_shape)) if spec_pairs in self.cached_solution: # Solution Cache hit def _group_alive_check(cached_comm_action_sequence): r""" Check if the process groups required for sharding have been deleted by torch.distributed.destroy_process_group method. If not deleted, return True; otherwise, return False. Args: cached_comm_action_sequence (List[CommSpec]): A list of communication specifications representing actions. Returns: bool: True if all process groups are still registered, False if at least one has been deleted. Raises: RuntimeError: If there is an error while checking the status of a process group. """ # Collect all process groups used in communication actions from the cached sequence used_process_groups = [ pg for comm_spec in cached_comm_action_sequence for pg in comm_spec.process_group_dict.values() ] # Check if each process group is still alive for process_group in used_process_groups: try: dist.get_rank(process_group) except (ValueError, RuntimeError) as e: # If the group is not registered, it means it has been deleted if str(e) == ( f"Group {process_group} is not registered, please create group with torch.distributed.new_group API" ): return False elif str(e) == "The given group does not exist": return False else: # Re-raise the exception if it's not related to group deletion raise e # All process groups are alive return True cached_transform_path, cached_comm_action_sequence = self.cached_solution[spec_pairs] if _group_alive_check(cached_comm_action_sequence): # If all process groups have not been deleted, the cache is valid return cached_transform_path, cached_comm_action_sequence else: # If at least one process group has been deleted, the cache is invalid, so delete it del self.cached_solution[spec_pairs] # We do nothing if the sharding spec is all the same. if source_spec.spec_diff(target_spec) == 0: self.cached_solution[spec_pairs] = (transform_path, comm_action_sequence) return ( transform_path, comm_action_sequence, ) temp_sharding_layout = source_layout transform_path.append(temp_sharding_layout) # To avoid dead loop, the loop will break after MAX_TRANSFORM_STEPS transforms while total_steps <= MAX_TRANSFORM_STEPS: valid_transform_spec_dict = self.get_all_one_step_transform_spec(temp_sharding_layout) best_difference_score = math.inf for layout, comm_spec in valid_transform_spec_dict.items(): sharding_spec = layout.sharding_spec spec_difference = sharding_spec.spec_diff(target_spec) if spec_difference == 0: transform_path.append(layout) comm_action_sequence.append(comm_spec) self.cached_solution[spec_pairs] = (transform_path, comm_action_sequence) return (transform_path, comm_action_sequence) if spec_difference < best_difference_score: temp_sharding_layout = layout temp_comm_spec = comm_spec best_difference_score = spec_difference transform_path.append(temp_sharding_layout) comm_action_sequence.append(temp_comm_spec) total_steps += 1 raise RuntimeError(f"Could not find a valid transform path with in {MAX_TRANSFORM_STEPS} steps.") def get_total_comm_cost(self, source_layout: Layout, target_layout: Layout) -> Dict[str, float]: """ Get the total communication cost of the layout converting process. """ transform_path, comm_action_sequence = self.layout_converting(source_layout, target_layout) total_cost = {"forward": 0.0, "backward": 0.0, "total": 0.0} for layout, comm_spec in zip(transform_path, comm_action_sequence): cost_dict = get_comm_cost(layout, comm_spec, self.forward_only) for key in total_cost: total_cost[key] += cost_dict[key] return total_cost def apply(self, tensor: torch.Tensor, source_layout: Layout, target_layout: Layout) -> torch.Tensor: """ Apply target_layout to tensor with source layout, the transform path is generated by the layout_converting method. Argument: tensor (torch.Tensor): The tensor to be redistributed. source_layout(Layout): The source layout of the tensor. target_layout (Layout): The tensor will be redistributed to the target_layout. Example: layout_converter = LayoutConverter() dim_partition_source = {0: [0]} dim_partition_target = {1: [0]} physical_mesh_id = torch.arange(0, 4) mesh_shape = (2, 2) # [[0, 1, # [2, 3]] device_mesh = DeviceMesh(physical_mesh_id, mesh_shape, init_process_group=True) global_shape = (4, 4, 4) # [S0,R,R] sharding_spec_source = ShardingSpec(dim_size=3, dim_partition_dict=dim_partition_source) source_layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec_source, global_shape=global_shape) # [R,S0,R] sharding_spec_target = ShardingSpec(dim_size=3, dim_partition_dict=dim_partition_target) target_layout = Layout(device_mesh=device_mesh, sharding_spec=sharding_spec_target, global_shape=global_shape) if rank in (0, 1): sharded_tensor_0 = torch.zeros(2, 1) sharded_tensor_1 = torch.ones(2, 1) # tensor([[0., 1.], # [0., 1.]]) tensor_to_comm = torch.cat((sharded_tensor_0, sharded_tensor_1), 1).cuda() if rank in (2, 3): sharded_tensor_0 = torch.ones(2, 1) * 2 sharded_tensor_1 = torch.ones(2, 1) * 3 # tensor([[2., 3.], # [2., 3.]]) tensor_to_comm = torch.cat((sharded_tensor_0, sharded_tensor_1), 1).cuda() # converted_tensor: [R, S0, R] converted_tensor = layout_converter.apply(tensor_to_comm, source_layout, target_layout) print(converted_tensor) Output in rank0 and rank1: tensor([[0.], [0.], [2.], [2.]]) Output in rank2 and rank3: tensor([[1.], [1.], [3.], [3.]]) """ _, comm_action_sequence = self.layout_converting(source_layout, target_layout) target_tensor = tensor for comm_spec in comm_action_sequence: target_tensor = comm_spec.covert_spec_to_action(target_tensor) target_tensor.dist_layout = target_layout # restore the padding information if is_padded_tensor(tensor) and not is_padded_tensor(target_tensor): target_tensor = init_as_padded_tensor( target_tensor, tensor._current_length, tensor._origin_length, tensor._padding_dim ) return target_tensor
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/utils.py
colossalai/tensor/d_tensor/utils.py
import operator from functools import reduce from typing import Dict from colossalai.tensor.d_tensor.comm_spec import CollectiveCommPattern, CommSpec from colossalai.tensor.d_tensor.layout import Layout def get_comm_cost(layout: Layout, comm_spec: CommSpec, forward_only: bool = False) -> Dict[str, float]: """ This method is used to compute the communication cost for a given layout and comm_spec. For all_gather, all2all, and all_reduce operation, the formula provided in DeviceMesh with alpha-beta model is used to compute the communication cost. For shard operation, it is an on-chip operation, so the communication cost is a tiny cost. Args: layout: the layout of the tensor. comm_spec: the comm_spec to instruct the communication operation. forward_only: if it is True, we will just count the forward communication cost. If it is False, we will count both forward and backward communication cost. """ comm_size = reduce(operator.mul, layout.get_sharded_shape_per_device(), 1) device_mesh = layout.device_mesh comm_pattern = comm_spec.comm_pattern logical_process_axis = comm_spec.logical_process_axis cost_dict = {} if comm_pattern == CollectiveCommPattern.GATHER_FWD_SPLIT_BWD: # the comm size for all gather is the size of the gathered tensor gather_dim = comm_spec.gather_dim all_gather_axis = layout.sharding_spec.dim_partition_dict[gather_dim][-1] all_gather_size = device_mesh.shape[all_gather_axis] comm_size_for_all_gather = comm_size * all_gather_size forward_communication_cost = device_mesh.all_gather_cost(comm_size_for_all_gather, logical_process_axis) # give a tiny cost to shard backward_communication_cost = 100 if comm_pattern == CollectiveCommPattern.ALL2ALL_FWD_ALL2ALL_BWD: forward_communication_cost = device_mesh.all_to_all_cost(comm_size, logical_process_axis) # grad should have same shape as input tensor # all to all operation has same logical process axis as forward. backward_communication_cost = device_mesh.all_to_all_cost(comm_size, logical_process_axis) if comm_pattern == CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD: forward_communication_cost = device_mesh.all_reduce_cost(comm_size, logical_process_axis) backward_communication_cost = 0 if comm_pattern == CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD: forward_communication_cost = 0 backward_communication_cost = device_mesh.all_reduce_cost(comm_size, logical_process_axis) if comm_pattern == CollectiveCommPattern.SPLIT_FWD_GATHER_BWD: # give a tiny cost to shard forward_communication_cost = 100 backward_communication_cost = device_mesh.all_gather_cost(comm_size, logical_process_axis) if forward_only: cost_dict["forward"] = forward_communication_cost cost_dict["backward"] = 0 cost_dict["total"] = cost_dict["forward"] + cost_dict["backward"] else: cost_dict["forward"] = forward_communication_cost cost_dict["backward"] = backward_communication_cost cost_dict["total"] = cost_dict["forward"] + cost_dict["backward"] return cost_dict
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/misc.py
colossalai/tensor/d_tensor/misc.py
class LayoutException(Exception): pass class DuplicatedShardingDimensionError(LayoutException): pass class ShardingNotDivisibleError(LayoutException): pass class ShardingOutOfIndexError(LayoutException): pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/__init__.py
colossalai/tensor/d_tensor/__init__.py
from .api import ( compute_global_numel, customized_distributed_tensor_to_param, distribute_tensor, distribute_tensor_with_customization, get_device_mesh, get_global_shape, get_layout, get_shard_dim_1d, get_sharding_spec, init_as_dtensor, init_tensor_as_customization_distributed, is_customized_distributed_tensor, is_distributed_tensor, is_sharded, redistribute, shard_colwise, shard_rowwise, sharded_tensor_to_param, to_global, to_global_for_customized_distributed_tensor, ) from .layout import Layout from .sharding_spec import ShardingSpec __all__ = [ "is_distributed_tensor", "distribute_tensor", "init_as_dtensor", "to_global", "is_sharded", "shard_rowwise", "shard_colwise", "sharded_tensor_to_param", "compute_global_numel", "get_sharding_spec", "get_global_shape", "get_device_mesh", "redistribute", "get_layout", "get_shard_dim_1d", "is_customized_distributed_tensor", "distribute_tensor_with_customization", "init_tensor_as_customization_distributed", "to_global_for_customized_distributed_tensor", "customized_distributed_tensor_to_param", "Layout", "ShardingSpec", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/sharding_spec.py
colossalai/tensor/d_tensor/sharding_spec.py
from typing import Dict, List from ..utils import merge_same_dim_mesh_list from .misc import ShardingOutOfIndexError __all__ = ["DimSpec", "ShardingException", "ShardingSpec"] ALLGATHER_COST = 20 SHARD_COST = 5 STEP_PENALTY = 6 NAN = "nan" class DimSpec: """ Sharding spec for single dimension of the sharded tensor describe the sharding dimension of logical device mesh and give a method to compute the difference between them. This class is used internally in ShardingSpec. Argument: shard_list(List[int]): if shard_list is None, the dim spec will be 'R' type. Otherwise, the element in shard_list means the data will be sharded in that dimension. """ _DIFFERENCE_DICT = None def __init__(self, shard_list): self.is_replica = len(shard_list) == 0 self.shard_list = shard_list def __eq__(self, other): return str(self) == str(other) def __repr__(self): if self.is_replica: return "R" target = "S" for dim in self.shard_list: target += str(dim) return target @property def difference_dict(self): """ Returns the difference dict, and lazily initializes it when needed Return: difference_dict(Dict[Tuple[int, int], Union[int, float, str]]): difference dict """ if self._DIFFERENCE_DICT is None: self._DIFFERENCE_DICT = self._build_difference_2d_dict() return self._DIFFERENCE_DICT def dim_diff(self, other): """ The difference between two DimSpec. Argument: other(DimSpec): the dim spec to compare with. Return: difference(int): the difference between two DimSpec. Example: dim_spec = DimSpec([0]) other_dim_spec = DimSpec([0, 1]) print(dim_spec.dim_diff(other_dim_spec)) Output: 5 """ difference = self.difference_dict[(str(self), str(other))] return difference @classmethod def _build_difference_2d_dict(cls): """ Build a difference mapping for 2D device mesh case. It will be used to compute the difference between DimSpec pairs. """ source_spec_list = ["R", "S0", "S1", "S01"] target_spec_list = ["R", "S0", "S1", "S01"] difference_dict = {} for source_spec in source_spec_list: for target_spec in target_spec_list: source_shard_list = cls._convert_str_to_shard_list(source_spec) target_shard_list = cls._convert_str_to_shard_list(target_spec) # source same as target if source_shard_list == target_shard_list: difference = 0 # all_gather(source) -> target elif ( len(source_shard_list) == len(target_shard_list) + 1 and source_shard_list[:-1] == target_shard_list ): difference = ALLGATHER_COST # shard(source) -> target elif ( len(source_shard_list) == len(target_shard_list) - 1 and source_shard_list == target_shard_list[:-1] and target_shard_list[-1] not in source_shard_list ): difference = SHARD_COST # S1 -> S0 or S0 -> S1 elif len(source_shard_list) == len(target_shard_list): # source -> R -> target difference = ALLGATHER_COST + STEP_PENALTY + SHARD_COST # R -> S01 elif len(source_shard_list) == len(target_shard_list) - 2: difference = SHARD_COST + STEP_PENALTY + SHARD_COST # S01 -> R elif len(source_shard_list) == len(target_shard_list) + 2: difference = ALLGATHER_COST + STEP_PENALTY + ALLGATHER_COST # S1 -> S01 elif len(source_shard_list) == len(target_shard_list) - 1: difference = ALLGATHER_COST + STEP_PENALTY + SHARD_COST + STEP_PENALTY + SHARD_COST # S01 -> S1 elif len(source_shard_list) == len(target_shard_list) + 1: difference = ALLGATHER_COST + STEP_PENALTY + ALLGATHER_COST + STEP_PENALTY + SHARD_COST else: difference = NAN difference_dict[(source_spec, target_spec)] = difference return difference_dict @staticmethod def _convert_str_to_shard_list(str_spec): """ Convert str_spec into shard_list. Argument: str_spec(str): dim spec in str type. """ if str_spec == "R": return [] if str_spec == "S0": return [0] if str_spec == "S1": return [1] if str_spec == "S01": return [0, 1] class ShardingSpec: """ Sharding spec describes how to shard a tensor with dim_size dimensions. For example for a 3D tensor, the sharding sequence [R, S0, S1] means not sharding the first dim, sharding the 3rd along the 1st device mesh axis (Process group) and sharding the 3th dim along the 2nd device mesh axis. Useful for say, 2D Tensor Parallel. Argument: dim_partition_dict(Dict[int, List[int]], optional): The key is the dimension of tensor to be sharded, and the value of the key describe which logical axis will be sharded in that dimension. sharding_sequence(List[DimSpec], optional): A straight view of ShardingSpec looks like [R, R, S0, S1]. """ def __init__( self, dim_size: int, dim_partition_dict: Dict[int, List[int]] = None, sharding_sequence: List[DimSpec] = None ): self.dims = dim_size self.dim_partition_dict = dim_partition_dict self.sharding_sequence = sharding_sequence if self.sharding_sequence is None: assert ( self.dim_partition_dict is not None ), f"dim_partition_dict should not be None, if sharding_sequence is NoneType object." self.dim_partition_dict = merge_same_dim_mesh_list( dim_size=self.dims, dim_partition_dict=self.dim_partition_dict ) self.sharding_sequence = self.convert_dict_to_shard_sequence() elif self.dim_partition_dict is None: assert ( self.sharding_sequence is not None ), f"sharding_sequence should not be None, if dim_partition_dict is NoneType object." self.dim_partition_dict = self.convert_shard_sequence_to_dict() self._sanity_check() def _sanity_check(self): if len(self.sharding_sequence) > self.dims: raise ShardingOutOfIndexError( f"sharding_sequence should have {self.dims} elements, but got index {len(self.sharding_sequence)}." ) if list(self.dim_partition_dict.keys()) and max(list(self.dim_partition_dict.keys())) >= self.dims: raise ShardingOutOfIndexError( f"the key of dim_partition_dict should be less than {self.dims}, but got {max(list(self.dim_partition_dict.keys()))}." ) def __repr__(self): res_list = ["ShardingSpec:"] res_list.append(f"\n\tshard_sequence: " + ",".join(str(dimspec) for dimspec in self.sharding_sequence)) return " ".join(res_list) def convert_dict_to_shard_sequence(self): """ Convert dim_partition_dict into list of DimSpec, and assign it to sharding_sequence. """ sharding_sequence = [DimSpec([])] * self.dims for dim, shard_list in self.dim_partition_dict.items(): sharding_sequence[dim] = DimSpec(shard_list) return sharding_sequence def convert_shard_sequence_to_dict(self): """ Convert sharding_sequence into dim_partition_dict. """ new_dim_partition_dict = {} for index, dim_spec in enumerate(self.sharding_sequence): if not dim_spec.is_replica: if index not in new_dim_partition_dict: new_dim_partition_dict[index] = [] new_dim_partition_dict[index].extend(dim_spec.shard_list) return new_dim_partition_dict def spec_diff(self, other): """ This function is a naive version of difference computation. It just simply accumulates difference every dimension between the pair of sharding sequence. Example: dim_partition_dict = {0: [0, 1]} # DistSpec: # shard_sequence: S01,R,R # device_mesh_shape: (4, 4) sharding_spec = ShardingSpec(device_mesh, entire_shape, dim_partition_dict) dim_partition_dict_to_compare = {0: [0], 1: [1]} # DistSpec: # shard_sequence: S0,S1,R # device_mesh_shape: (4, 4) sharding_spec_to_compare = ShardingSpec(device_mesh, entire_shape, dim_partition_dict_to_compare) print(sharding_spec.sharding_sequence_difference(sharding_spec_to_compare)) Output: 25 Argument: other(ShardingSpec): The ShardingSpec to compared with. Return: difference(int): Difference between two ShardingSpec. """ assert len(self.sharding_sequence) == len( other.sharding_sequence ), f"Cannot compare difference for two sharding specs with different length." difference = 0 for orig_dim_spec, other_dim_spec in zip(self.sharding_sequence, other.sharding_sequence): difference += orig_dim_spec.dim_diff(other_dim_spec) return difference
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/tensor/d_tensor/layout.py
colossalai/tensor/d_tensor/layout.py
import operator from functools import reduce import torch from colossalai.device.device_mesh import DeviceMesh from .misc import DuplicatedShardingDimensionError, ShardingNotDivisibleError from .sharding_spec import ShardingSpec class Layout: """Layout of a tensor. Attributes: device_mesh: the device mesh to store the tensor distributed. sharding_spec: the sharding specification to describe how the tensor is sharded. global_shape: the entire shape of the global tensor. """ def __init__(self, device_mesh: DeviceMesh, sharding_spec: ShardingSpec, global_shape: torch.Size): self.device_mesh = device_mesh self.sharding_spec = sharding_spec self.global_shape = global_shape self._sanity_check() def __hash__(self) -> int: return hash(f"{self.sharding_spec}") def get_sharded_shape_per_device(self): sharded_shape = list(self.global_shape) for dim, shard_list in self.sharding_spec.dim_partition_dict.items(): mesh_list = [self.device_mesh.shape[mesh_dim] for mesh_dim in shard_list] shard_partitions = reduce(operator.mul, mesh_list, 1) assert ( sharded_shape[dim] % shard_partitions == 0 ), f"Cannot shard dimension {dim} into {shard_partitions} partitions." sharded_shape[dim] //= shard_partitions return torch.Size(sharded_shape) def _sanity_check(self): sharding_spec = self.sharding_spec # make sure all axes in logical device mesh only be used once if self.device_mesh.logical_mesh_id is not None: dim_check_list = list(range(self.device_mesh.logical_mesh_id.dim())) for dim, shard_list in sharding_spec.dim_partition_dict.items(): for element in shard_list: if element in dim_check_list: dim_check_list.remove(element) else: raise DuplicatedShardingDimensionError( f"find an invalid sharding axis {element} in dim_partition_dict in tensor dimension {dim}." ) # make sure that the sharding for a dimension is divisible by the number of devices for dim, shard_list in sharding_spec.dim_partition_dict.items(): tensor_dim_size = self.global_shape[dim] num_devices = 1 for element in shard_list: num_devices *= self.device_mesh.shape[element] if tensor_dim_size % num_devices != 0: raise ShardingNotDivisibleError( f"The size of dimension at index {dim} is {tensor_dim_size}, it cannot be sharded over {num_devices} devices." )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/checkpoint_io/checkpoint_io_base.py
colossalai/checkpoint_io/checkpoint_io_base.py
from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, Optional, Union import torch import torch.nn as nn from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from colossalai.interface import ModelWrapper from colossalai.logging import get_dist_logger from .utils import SAFE_WEIGHTS_NAME, WEIGHTS_NAME, has_index_file __all__ = ["CheckpointIO"] class CheckpointIO(ABC): """ CheckpointIO is the base class for all checkpoint IO classes. It defines the interface for checkpoint IO. Examples: >>> from colossalai.checkpoint_io import GeneralCheckpointIO >>> checkpoint_io = CheckpointIO() >>> >>> # load model from checkpoint >>> model = checkpoint_io.load_model(model, 'model.pt') >>> >>> # save model to checkpoint, any distributed tensor is gathered by default >>> checkpoint_io.save_model(model, 'model.pt') >>> >>> # if the model contains distributed tensor, and you don't want to gather it >>> # each rank will save its own shard of the distributed tensor >>> checkpoint_io.save_model(model, 'model.pt', gather_dtensor=False) >>> >>> # save model to sharded checkpoints >>> checkpoint_io.save_model(model, './checkpoints/', shard=True) >>> >>> # save model to sharded and assume we don't want to gather distributed tensors >>> checkpoint_io.save_model(model, './checkpoints/', shard=True, gather_dtensor=False) >>> >>> # Note: >>> # 1. we don't support loading from distributed tensors, conversion from distributed tensors >>> # checkpoints to full tensor checkpoint should be done offline via our CLI >>> # 2. you don't have to specify whether the model is sharded or not when loading the model >>> # as it will be automatically detected >>> >>> # load model from sharded checkpoints >>> model = checkpoint_io.load_model(model, './checkpoints/') >>> >>> # load model from unsharded checkpoints >>> model = checkpoint_io.load_model(model, './checkpoints/') >>> >>> # load optimizer from checkpoint >>> optimizer = checkpoint_io.load_optimizer(optimizer, 'optimizer.pt') >>> >>> # save optimizer to checkpoint >>> checkpoint_io.save_optimizer(optimizer, 'optimizer.pt') """ # ====================================== # Public methods # ====================================== def __init__(self): super().__init__() self.pinned_state_dicts: Dict[int, dict] = {} self.async_writers = [] def _sync_io(self): for writer in self.async_writers: writer.synchronize() self.async_writers.clear() def _sync_d2h(self): for writer in self.async_writers: writer.sync_before_step() def synchronize(self): """This method must be called before updating the model weights.""" self._sync_d2h() def __del__(self): self._sync_d2h() self._sync_io() def load_model( self, model: Union[nn.Module, ModelWrapper], checkpoint: str, strict: bool = True, low_cpu_mem_mode: bool = True, num_threads: int = 1, ) -> Union[nn.Module, ModelWrapper]: """ Load model from checkpoint. Args: model (nn.Module): model to be loaded. checkpoint (str): checkpoint path. This value is made compatibility with the model checkpoints in the mainstream model zoos such as Hugging Face and TIMM. The checkpoint path can be: 1. a file path, e.g. 'model.pt' 2. a path to a json file which defines the index to the sharded checkpoint 3. a path to a folder containing a unique .index.json file for sharded checkpoint Distributed tensors cannot be loaded directly unless gathered offline via our CLI. strict (bool): whether to strictly enforce that the param name in the checkpoint match the keys returned by this module's. low_cpu_mem_mode (bool): whether to load the model in low cpu memory mode. If false, it will use RAM cache to accelerate loading. Default: True. num_threads (int): number of threads to use when loading the model. Only useful when disabling low cpu mem mode. Default: 1. """ # since we only support loaded sharded and unsharded weight format # containing no distributed tensors, dtensor -> full tensor conversion # should be done offline via our CLI # the existence of index file means it is a sharded checkpoint index_file_exists, index_file_path = has_index_file(checkpoint) # return the origin model instead of the unwrapped model origin_model = model if index_file_exists: self.load_sharded_model( model, index_file_path, strict, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads ) else: path = Path(checkpoint, SAFE_WEIGHTS_NAME) if path.is_file(): self.load_unsharded_model( model, str(path), strict, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads ) else: path = Path(checkpoint, WEIGHTS_NAME) if path.is_file(): self.load_unsharded_model( model, str(path), strict, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads ) else: self.load_unsharded_model( model, checkpoint, strict, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads ) return origin_model def save_model( self, model: Union[nn.Module, ModelWrapper], checkpoint: str, shard: bool = False, gather_dtensor: bool = True, prefix: str = None, size_per_shard: int = 1024, use_safetensors: bool = False, use_async: bool = False, ): """ Save model to checkpoint. Examples: >>> from colossalai.checkpoint_io import GeneralCheckpointIO >>> checkpoint_io = CheckpointIO() >>> >>> # save model to a single file >>> save_model(model, 'model.pt') >>> >>> # save model to a sharded checkpoint >>> save_model(model, './checkpoints/', shard=True) Args: model (nn.Module): model to be saved. checkpoint (str): checkpoint path. The checkpoint path can be : 1. a file path, e.g. 'model.pt' 2. a directory path to save the sharded checkpoint, e.g. './checkpoints/' when shard = True. shard (bool): whether to shard the checkpoint. Default: False. If set to True, the checkpoint will be sharded into multiple files. The model shards will be specified by a `model.index.json` file. When shard = True, please ensure that the checkpoint path is a directory path instead of a file path. gather_dtensor (bool): whether to gather the distributed tensor to the first device. Default: True. prefix (str): If specified, weights are saved in the format pytorch_model.<prefix>.bin. Default: None. size_per_shard (int): size per shard in MB. Default: 1024. This value is only used when shard = True. use_safetensors (bool): whether to use safe tensors. Default: False. If set to True, the checkpoint will be saved """ self._sync_io() if use_async and not use_safetensors: logger = get_dist_logger() logger.warning( "Async save is only supported when use_safetensors is set to True. " "Setting use_safetensors to True for async save." ) use_safetensors = True if shard: self.save_sharded_model( model, checkpoint, gather_dtensor, prefix, size_per_shard, use_safetensors, use_async ) else: self.save_unsharded_model(model, checkpoint, gather_dtensor, use_safetensors, use_async) def load_optimizer( self, optimizer: Optimizer, checkpoint: str, prefix: str = None, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """ Load optimizer from checkpoint. Args: optimizer (Optimizer): optimizer to be loaded. checkpoint (str): checkpoint path. This value is made compatibility with the model checkpoints in the prefix (str, optional): A prefix added to parameter and buffer names to compose the keys in state_dict. Defaults to None. low_cpu_mem_mode (bool): whether to load the model in low cpu memory mode. If false, it will use RAM cache to accelerate loading. Default: True. num_threads (int): number of threads to use when loading the model. Only useful when disabling low cpu mem mode. Default: 1. """ index_file_exists, index_file_path = has_index_file(checkpoint) if Path(checkpoint).is_dir() and not index_file_exists: # if the checkpoint is a directory and there is no index file, raise error raise ValueError(f"Cannot find index file in {checkpoint}") if index_file_exists: # the existence of index file means it is a sharded checkpoint self.load_sharded_optimizer( optimizer, index_file_path, prefix, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads ) else: self.load_unsharded_optimizer( optimizer, checkpoint, low_cpu_mem_mode=low_cpu_mem_mode, num_threads=num_threads ) def save_optimizer( self, optimizer: Optimizer, checkpoint: str, shard: bool = False, gather_dtensor=True, prefix: str = None, size_per_shard: int = 1024, use_async: bool = False, ): """ Save optimizer to checkpoint. Optimizer states saving is not compatible with safetensors. Args: optimizer (Optimizer): optimizer to be saved. checkpoint (str): checkpoint path. The checkpoint path can be : 1. a file path, e.g. 'model.pt' 2. a path to a json file which defines the index to the sharded checkpoint for the optimizer 3. a path to a folder containing a unique .index.json file for sharded checkpoint shard (bool): whether to shard the checkpoint. Default: False. If set to True, the checkpoint will be sharded into multiple files. The optimizer shards will be specified by a `optimizer.index.json` file. gather_dtensor (bool): whether to gather the distributed tensor to the first device. Default: True. prefix (str): prefix for the optimizer checkpoint when shard = True. Default: None. size_per_shard (int): size per shard in MB. Default: 1024. This value is only used when shard is set to True. """ if shard: self.save_sharded_optimizer( optimizer, checkpoint, gather_dtensor, prefix, size_per_shard, use_async=use_async ) else: self.save_unsharded_optimizer(optimizer, checkpoint, gather_dtensor, use_async=use_async) # ======================================================== # Abstract methods for model loading/saving implementation # ======================================================== @abstractmethod def load_sharded_model( self, model: nn.Module, index_file_path: str, strict: bool, low_cpu_mem_mode: bool = True, num_threads: int = 1 ): """ Load model from sharded checkpoint. Args: model (nn.Module): model to be loaded. index_file_path (str): checkpoint path. It should be path to the .index.json file or a path to a directory which contains a .index.json file. strict (bool): whether to strictly enforce that the param name in the checkpoint match the keys returned by this module's. low_cpu_mem_mode (bool): whether to load the model in low cpu memory mode. If false, it will use RAM cache to accelerate loading. Default: True. num_threads (int): number of threads to use when loading the model. Only useful when disabling low cpu mem mode. Default: 1. """ @abstractmethod def load_unsharded_model( self, model: nn.Module, checkpoint: str, strict: bool, low_cpu_mem_mode: bool = True, num_threads: int = 1 ): """ Load model from unsharded checkpoint. Args: model (nn.Module): model to be loaded. checkpoint (str): checkpoint path. It should be a single file path pointing to a model weight binary. strict (bool): whether to strictly enforce that the param name in the checkpoint match the keys returned by this module's. low_cpu_mem_mode (bool): whether to load the model in low cpu memory mode. If false, it will use RAM cache to accelerate loading. Default: True. num_threads (int): number of threads to use when loading the model. Only useful when disabling low cpu mem mode. Default: 1. """ @abstractmethod def save_sharded_model( self, model: nn.Module, checkpoint: str, gather_dtensor: bool, prefix: Optional[str], size_per_shard: int, use_safetensors: bool, use_async: bool = False, ): """ Save model to sharded checkpoint. Args: model (nn.Module): model to be saved. checkpoint (str): checkpoint path. It should be a directory path. gather_dtensor (bool): whether to gather the distributed tensor to the first device. prefix (str): prefix for the model checkpoint. size_per_shard (int): size per shard in MB. use_safetensors (bool): whether to use safe tensors. """ @abstractmethod def save_unsharded_model( self, model: nn.Module, checkpoint: str, gather_dtensor: bool, use_safetensors: bool, use_async: bool = False ): """ Save model to unsharded checkpoint. Args: model (nn.Module): model to be saved. checkpoint (str): checkpoint path. It should be a single file path pointing to a model weight binary. gather_dtensor (bool): whether to gather the distributed tensor to the first device. use_safetensors (bool): whether to use safe tensors. """ # ======================================================== # Abstract methods for optimizer loading/saving implementation # ======================================================== @abstractmethod def load_sharded_optimizer( self, optimizer: Optimizer, index_file_path: str, prefix: str, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """ Load optimizer from sharded checkpoint. Args: optimizer (Optimizer): optimizer to be loaded. index_file_path (str): checkpoint path. It should be path to the .index.json file or a path to a directory which contains a .index.json file. prefix (str): prefix for the optimizer checkpoint. low_cpu_mem_mode (bool): whether to load the model in low cpu memory mode. If false, it will use RAM cache to accelerate loading. Default: True. num_threads (int): number of threads to use when loading the model. Only useful when disabling low cpu mem mode. Default: 1. """ @abstractmethod def load_unsharded_optimizer( self, optimizer: Optimizer, checkpoint: Path, low_cpu_mem_mode: bool = True, num_threads: int = 1 ): """ Load optimizer from unsharded checkpoint. Args: optimizer (Optimizer): optimizer to be loaded. checkpoint (str): checkpoint path. It should be a single file path pointing to a model weight binary. low_cpu_mem_mode (bool): whether to load the model in low cpu memory mode. If false, it will use RAM cache to accelerate loading. Default: True. num_threads (int): number of threads to use when loading the model. Only useful when disabling low cpu mem mode. Default: 1. """ @abstractmethod def save_sharded_optimizer( self, optimizer: Optimizer, checkpoint: Path, gather_dtensor: bool, prefix: str, size_per_shard: int, use_async: bool = False, ): """ Save optimizer to sharded checkpoint. Args: optimizer (Optimizer): optimizer to be saved. checkpoint (Path): checkpoint path. It should be a directory path. gather_dtensor (bool): whether to gather the distributed tensor to the first device. prefix (str): prefix for the optimizer checkpoint. size_per_shard (int): size per shard in MB. """ @abstractmethod def save_unsharded_optimizer( self, optimizer: Optimizer, checkpoint: Path, gather_dtensor: bool, use_async: bool = False ): """ Save optimizer to unsharded checkpoint. Args: optimizer (Optimizer): optimizer to be saved. checkpoint (str): checkpoint path. It should be a single file path pointing to a model weight binary. gather_dtensor (bool): whether to gather the distributed tensor to the first device. """ # ============================================ # methods for loading and saving lr scheduler # as this is quite standard, there is no need # to make them abstract # ============================================ def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str): """ Save lr scheduler to checkpoint. Args: lr_scheduler (LRScheduler): lr scheduler to be saved. checkpoint: checkpoint path. The checkpoint path can only be a file path. """ torch.save(lr_scheduler.state_dict(), checkpoint) def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str): """ Load lr scheduler from checkpoint. Args: lr_scheduler (LRScheduler): lr scheduler to be loaded. checkpoint (str): the path for a single checkpoint file. """ state_dict = torch.load(checkpoint) lr_scheduler.load_state_dict(state_dict) # ================================================================================ # Abstract method for lora saving implementation. # ================================================================================ @abstractmethod def save_lora_as_pretrained( self, model: Union[nn.Module, ModelWrapper], checkpoint: str, use_safetensors: bool = False, state_dict: Optional[dict] = None, ) -> None: """ Save the lora adapters and adapter configuration file to a pretrained checkpoint directory. Args: model (Union[nn.Module, ModelWrapper]): A model boosted by Booster. checkpoint (str): Path to the checkpoint directory. It must be a local path. use_safetensors (bool, optional): Whether to use safe tensors when saving. Defaults to False. state_dict (Optional[dict], optional): The state dict to save. Defaults to None. """
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/checkpoint_io/index_file.py
colossalai/checkpoint_io/index_file.py
import json import os from collections import OrderedDict from pathlib import Path from typing import Any, Dict, List, Union from .utils import is_dtensor_checkpoint __all__ = ["CheckpointIndexFile"] class CheckpointIndexFile: """ This class is a data structure to keep the content in the index.json file for sharded checkpoint. Example: >>> index = CheckpointIndexFile.from_file('model.index.json') >>> index.append_metadata('model_type', 'bert') >>> index.append_weight_map('bert.embeddings.word_embeddings.weight', 'model_0001-of-0002.bin') >>> index.export('new_index.json') """ def __init__(self, root_path=None) -> None: self.root_path = root_path # use ordered dict to preserve the tensor checkpoint order self.metadata: Dict = OrderedDict() self.weight_map: Dict = OrderedDict() @staticmethod def from_file(index_path: Union[str, Path]): """ Create a CheckpointIndexFile object from a json file. Args: index_path (str): path to the json file. Returns: CheckpointIndexFile: CheckpointIndexFile object. """ index = CheckpointIndexFile() index.load(index_path) return index def load(self, json_path: str): """ Load the index file from a json file. Args: json_path (str): path to the json file. """ # load the json file with open(json_path, "r") as f: index = json.load(f) # assign attributes if exists if "metadata" in index: self.metadata = index["metadata"] if "weight_map" in index: self.weight_map = index["weight_map"] # assign the root directory for the index file self.root_path = Path(json_path).absolute().parent def export(self, json_path: str): """ Export the index file to a json file. Args: json_path (str): path to the json file. """ # create the index file index = dict() index["metadata"] = self.metadata index["weight_map"] = self.weight_map # export the index file with open(json_path, "w") as f: json.dump(index, f, indent=4) def append_weight_map(self, param_name: str, shard_file: str): """ Append a weight map entry to the index file. Args: param_name (str): name of the parameter. shard_file (str): name of the shard file. """ self.weight_map[param_name] = shard_file def append_meta_data(self, name: str, val: Any): """ Append a metadata entry to the index file. Args: name (str): name of the metadata. val (Any): value of the metadata. """ self.metadata[name] = val def contains_dtensor(self): """ Check if the index file contains any distributed tensor. The distributed tensors will be stored in `dtensor/module.linear.weight.*.bin` or `dtensor/module.linear.weight.*.safetensors` in the weight map. Returns: bool: True if the index file contains any distributed tensor, False otherwise. """ for value in self.weight_map.values(): if value.endswith(".*.bin") or value.endswith(".*.safetensors"): return True return False def get_checkpoint_filenames(self) -> List[str]: """ Get the set of checkpoint filenames in the weight map. Returns: list: checkpoint shard filenames. """ # read the checkpoint file list from the json file and get a list of unique file names checkpoint_files = sorted(list(set(self.weight_map.values()))) # get the absolute paths for all checkpoint files checkpoint_files = [str(self.root_path.joinpath(f)) for f in checkpoint_files] dtensor_list = [] checkpoint_list = [] for ckpt_file in checkpoint_files: if is_dtensor_checkpoint(ckpt_file): dtensor_list.append(ckpt_file) else: checkpoint_list.append(ckpt_file) return checkpoint_list, dtensor_list def assert_no_dtensor_checkpoint(self): for val in self.weight_map.values(): if is_dtensor_checkpoint(val): raise ValueError(f"Checkpoint file {val} contains distributed tensor") def get_checkpoint_file(self, param_name: str) -> str: """ Get the checkpoint file name for a parameter. Args: param_name (str): name of the parameter. Returns: str: checkpoint file name. """ ckpt_path = self.weight_map[param_name] return ckpt_path def get_all_param_names(self): """ Get all the weight keys. """ return list(self.weight_map.keys()) def get_param_group_filename(self) -> Union[str, None]: """ Get the file name of param_group file if this is a checkpoint for optimizer. Returns: str: param_group file name """ filename = self.metadata.get("param_groups", None) if filename: return str(self.root_path.joinpath(filename)) else: return None def write_index_file(self, save_index_file): """ Write index file. """ save_index_file = os.path.join(self.root_path, save_index_file) index = {"metadata": self.metadata, "weight_map": self.weight_map} with open(save_index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2) + "\n" f.write(content)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/checkpoint_io/general_checkpoint_io.py
colossalai/checkpoint_io/general_checkpoint_io.py
import logging import os from functools import reduce from pathlib import Path from typing import Optional import torch.nn as nn from torch.optim import Optimizer from colossalai.utils.safetensors import load_flat from .checkpoint_io_base import CheckpointIO from .index_file import CheckpointIndexFile from .utils import ( async_move_save_state_dict_shards, create_pinned_state_dict, get_model_base_filenames, get_optimizer_base_filenames, is_safetensors_available, load_param_groups_into_optimizer, load_state_dict, load_state_dict_into_model, load_state_dict_shards, load_states_into_optimizer, save_config_file, save_param_groups, save_state_dict, save_state_dict_shards, shard_model_checkpoint, shard_optimizer_checkpoint, sharded_optimizer_loading_epilogue, ) __all__ = ["GeneralCheckpointIO"] class GeneralCheckpointIO(CheckpointIO): """ Checkpoint IO """ def load_unsharded_model( self, model: nn.Module, checkpoint: str, strict: bool, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): checkpoint = load_state_dict(checkpoint) if not low_cpu_mem_mode: checkpoint = create_pinned_state_dict(checkpoint, empty=False, num_threads=num_threads) model.load_state_dict(checkpoint, strict=strict) def save_unsharded_model( self, model: nn.Module, checkpoint: str, gather_dtensor: bool, use_safetensors: bool, use_async: bool = False ): state_dict = model.state_dict() if use_async: from colossalai.utils.safetensors import move_and_save if hash(model) not in self.pinned_state_dicts: self.pinned_state_dicts[hash(model)] = create_pinned_state_dict(state_dict) writer = move_and_save(checkpoint, state_dict, self.pinned_state_dicts[hash(model)]) self.async_writers.append(writer) else: # save the checkpoint save_state_dict(state_dict, checkpoint, use_safetensors) def load_sharded_optimizer( self, optimizer: Optimizer, index_file_path: str, prefix: str, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """ Load sharded optimizer with the given path to index file. """ # Read checkpoint index file. ckpt_index_file = CheckpointIndexFile.from_file(index_file_path) # Load param_groups param_group_path = ckpt_index_file.get_param_group_filename() if param_group_path is None: raise RuntimeError( f"Invalid index file path {index_file_path} for an optimizer. \ Lacking param group file under current directory." ) id_map = load_param_groups_into_optimizer(optimizer, param_group_path) checkpoint_files, _ = ckpt_index_file.get_checkpoint_filenames() for state_dict in load_state_dict_shards(checkpoint_files, True, False, low_cpu_mem_mode): if not low_cpu_mem_mode: state_dict = create_pinned_state_dict(state_dict, empty=False, num_threads=num_threads) load_states_into_optimizer(optimizer, state_dict, id_map) sharded_optimizer_loading_epilogue(optimizer) def save_sharded_optimizer( self, optimizer: Optimizer, checkpoint: Path, gather_dtensor: bool, prefix: str, size_per_shard: int, use_async: bool = False, ): """ Save sharded optimizer checkpoint under the given checkpointing path. The following files will be created under the path: - An index file (pytorch_optim.bin.index.json) containing a map between optimizer states and file names - A group file (pytorch_optim_group.bin) recording information of param_groups - Multiple files (pytorch_optim-000XX.bin) that store state tensors of optimizer in a sharding way """ if os.path.isfile(checkpoint): logging.error(f"Provided path ({checkpoint}) should be a directory, not a file") return Path(checkpoint).mkdir(parents=True, exist_ok=True) # Offload optimizer states. States are broken into shards within max_shard_size. state_dict = optimizer.state_dict() sharded_state = shard_optimizer_checkpoint(state_dict, max_shard_size=size_per_shard) # Preparing file paths and index file. states_name, save_index_file, param_group_file = get_optimizer_base_filenames(prefix, use_safetensors=use_async) index_file = CheckpointIndexFile(checkpoint) # Store the information of param groups to param_group_file. index_file.append_meta_data("param_groups", param_group_file) group_file_path = os.path.join(checkpoint, param_group_file) save_param_groups(state_dict, group_file_path) # Save shards of optimizer states. # In general cases, is_master is set to True to get the right behavior. if use_async: pinned_state_dict = self.pinned_state_dicts.get(id(optimizer), None) total_size, new_pinned_state_dict, writers = async_move_save_state_dict_shards( sharded_state_dict=sharded_state, checkpoint=checkpoint, index_file=index_file, base_filename=states_name, is_master=True, pinned_state_dict=pinned_state_dict, state_preprocess=True, ) self.pinned_state_dicts[id(optimizer)] = new_pinned_state_dict self.async_writers.extend(writers) else: total_size = save_state_dict_shards( sharded_state_dict=sharded_state, checkpoint=checkpoint, index_file=index_file, base_filename=states_name, is_master=True, use_safetensors=False, ) # Wrap up index file. index_file.append_meta_data("total_size", total_size) index_file.write_index_file(save_index_file) logging.info( f"The optimizer is going to be split to checkpoint shards. " f"You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) def load_unsharded_optimizer( self, optimizer: Optimizer, checkpoint: Path, low_cpu_mem_mode: bool = True, num_threads: int = 1 ): if checkpoint.endswith(".safetensors"): checkpoint = load_flat(checkpoint) else: checkpoint = load_state_dict(checkpoint) if not low_cpu_mem_mode: checkpoint = create_pinned_state_dict(checkpoint, empty=False, num_threads=num_threads) optimizer.load_state_dict(checkpoint) def save_unsharded_optimizer( self, optimizer: Optimizer, checkpoint: Path, gather_dtensor: bool, use_async: bool = False, ): # TODO(FrankLeeeee): handle distributed tensors state_dict = optimizer.state_dict() if use_async: from colossalai.utils.safetensors import _flatten_optim_state_dict, move_and_save flatten_state_dict, metadata = _flatten_optim_state_dict(state_dict) if id(optimizer) not in self.pinned_state_dicts: self.pinned_state_dicts[id(optimizer)] = create_pinned_state_dict(flatten_state_dict) writer = move_and_save( path=checkpoint, state_dict=flatten_state_dict, state_dict_pinned=self.pinned_state_dicts[id(optimizer)], metadata=metadata, ) self.async_writers.append(writer) else: save_state_dict(state_dict, checkpoint, use_safetensors=False) def save_sharded_model( self, model: nn.Module, checkpoint_path: str, gather_dtensor: bool = False, prefix: Optional[str] = None, max_shard_size: int = 1024, use_safetensors: bool = False, use_async: bool = False, ): """ implement this method as it can be supported by Huggingface model, save shard model, save model to multiple files """ if os.path.isfile(checkpoint_path): logging.error(f"Provided path ({checkpoint_path}) should be a directory, not a file") return Path(checkpoint_path).mkdir(parents=True, exist_ok=True) # shard checkpoint state_dict = model.state_dict() state_dict_shard = shard_model_checkpoint(state_dict, max_shard_size=max_shard_size) weights_name, save_index_file = get_model_base_filenames(prefix, use_safetensors) index_file = CheckpointIndexFile(checkpoint_path) if use_async: pinned_state_dict = self.pinned_state_dicts.get(hash(model), None) total_size, new_pinned_state_dict, writers = async_move_save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint_path, index_file=index_file, base_filename=weights_name, is_master=True, pinned_state_dict=pinned_state_dict, ) self.pinned_state_dicts[hash(model)] = new_pinned_state_dict self.async_writers.extend(writers) else: # Save shards of optimizer states. # In general cases, is_master is set to True to get the right behavior. total_size = save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint_path, index_file=index_file, base_filename=weights_name, is_master=True, use_safetensors=use_safetensors, ) index_file.append_meta_data("total_size", total_size) index_file.write_index_file(save_index_file) save_config_file(model, checkpoint_path, is_master=True) logging.info( f"The model is going to be split to checkpoint shards. " f"You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) def load_sharded_model( self, model: nn.Module, checkpoint_index_file: Path, strict: bool = False, use_safetensors: bool = False, load_sub_module: bool = True, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """ load shard model, load model from multiple files """ use_safetensors = False if "safetensors" in checkpoint_index_file.name: use_safetensors = True if use_safetensors and not is_safetensors_available(): raise ImportError("`safe_serialization` requires the `safetensors` library: `pip install safetensors`.") # read checkpoint index file ckpt_index_file = CheckpointIndexFile.from_file(checkpoint_index_file) checkpoint_files, _ = ckpt_index_file.get_checkpoint_filenames() missing_keys = [] for state_dict in load_state_dict_shards(checkpoint_files, False, use_safetensors, low_cpu_mem_mode): if not low_cpu_mem_mode: state_dict = create_pinned_state_dict(state_dict, empty=False, num_threads=num_threads) load_state_dict_into_model(model, state_dict, missing_keys, strict, load_sub_module) if strict: remain_keys = reduce(lambda a, b: a & b, map(set, missing_keys)) if len(remain_keys) > 0: error_msgs = [ "Missing key(s) in state_dict: {}. ".format(", ".join('"{}"'.format(k) for k in remain_keys)) ] raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format( self.__class__.__name__, "\n\t".join(error_msgs) ) ) def save_lora_as_pretrained( self, model: nn.Module, checkpoint: str, use_safetensors: bool = False, state_dict: Optional[dict] = None ) -> None: raise NotImplementedError
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/checkpoint_io/utils.py
colossalai/checkpoint_io/utils.py
# coding=utf-8 import concurrent.futures import os import re import warnings from collections import abc as container_abcs from collections import defaultdict from itertools import chain from pathlib import Path from typing import Dict, Generator, Iterator, List, Mapping, Optional, OrderedDict, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from packaging.version import Version from peft import PeftModel, PeftType from peft.utils.other import EMBEDDING_LAYER_NAMES, check_file_exists_on_hf_hub from peft.utils.save_and_load import get_embedding_layer_name, has_valid_embedding_base_layer from torch.optim import Optimizer from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten from colossalai.accelerator import get_accelerator from colossalai.interface.model import PeftUnwrapMixin from colossalai.tensor.d_tensor import ( is_customized_distributed_tensor, is_distributed_tensor, to_global, to_global_for_customized_distributed_tensor, ) from colossalai.utils import get_current_device from colossalai.utils.safetensors import _flatten_optim_state_dict, load_flat SAFE_WEIGHTS_NAME = "model.safetensors" WEIGHTS_NAME = "pytorch_model.bin" STATES_NAME = "pytorch_optim.bin" SAFE_STATE_NAME = "optimizer.safetensors" SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json" WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json" STATES_INDEX_NAME = "pytorch_optim.bin.index.json" SAFE_STATES_INDEX_NAME = "optimizer.safetensors.index.json" GROUP_FILE_NAME = "pytorch_optim_group.bin" # ====================================== # General helper functions # ====================================== def calculate_tensor_size(tensor: torch.Tensor) -> float: """ Calculate the size of a parameter in MB. Used to compute whether a group of params exceed the shard size. If so, a new shard should be created. Args: tensor (torch.Tensor): the tensor to calculate size for. Returns: float: size of the tensor in MB. """ return tensor.numel() * tensor.element_size() / 1024 / 1024 def is_safetensors_available() -> bool: """ Check whether safetensors is available. Returns: bool: whether safetensors is available. """ try: return True except ImportError: return False def is_dtensor_checkpoint(checkpoint_file_path: str) -> bool: """ Check whether the checkpoint file is a dtensor checkpoint. Args: checkpoint_file_path (str): path to the checkpoint file. Returns: bool: whether the checkpoint file is a dtensor checkpoint. """ if checkpoint_file_path.endswith(".*.safetensors") or checkpoint_file_path.endswith(".*.bin"): return True else: return False def is_safetensor_checkpoint(checkpoint_file_path: str) -> bool: """ Check whether the checkpoint file is a safetensor checkpoint. Args: checkpoint_file_path (str): path to the checkpoint file. Returns: bool: whether the checkpoint file is a safetensor checkpoint. """ if checkpoint_file_path.endswith(".safetensors"): return True else: return False def search_tp_partition_dim(current_shape: torch.Size, original_shape: torch.Size, tp_size: int) -> Optional[int]: """ Given the current shape of parameter and the shape of parameter before sharding, return the dimension along which the parameter is sharded when using tensor parallel. If tensor parallel is not used, return None. Args: current_shape (torch.Size): The current shape of parameter after sharding. original_shape (torch.Size): The shape of parameter before sharding. tp_size (int): The size of tp group. Returns: Optional[int]: The dimension along which parameter is partitioned. """ partition_dim = None for dim, length in enumerate(original_shape): if length > current_shape[dim]: partition_dim = dim break if partition_dim is not None: assert ( original_shape[partition_dim] == tp_size * current_shape[partition_dim] ), f"The parameter isn't evenly distributed among tensor parallel group: \ shape before sharding {original_shape}, shape after sharding {current_shape}" return partition_dim def search_padding_dim(global_shape: torch.Size, original_shape: torch.Size) -> Optional[int]: padding_dim = None for dim, length in enumerate(global_shape): if length > original_shape[dim]: padding_dim = dim break return padding_dim # ====================================== # Helper classes and functions for saving shard file # ====================================== class StateDictSharder: def __init__(self, size_per_shard: int) -> None: self.max_shard_size = size_per_shard self.current_block = OrderedDict() self.current_block_size = 0 def append_param(self, name: str, tensor: torch.Tensor) -> Tuple[Optional[OrderedDict], int]: tensor_size = calculate_tensor_size(tensor) ret_block = None ret_block_size = 0 # before we return the current block and create a new block, # we need to ensure that the current block is not empty if self.current_block_size + tensor_size > self.max_shard_size and self.current_block_size > 0: ret_block = self.current_block ret_block_size = self.current_block_size self.current_block = OrderedDict() self.current_block_size = 0 self.current_block[name] = tensor self.current_block_size += tensor_size return ret_block, ret_block_size def append_optim_state(self, param_id: int, state: OrderedDict) -> Tuple[Optional[OrderedDict], int]: # A state might contain more than one tensors. # e.g. each Adam state includes: 'step', 'exp_avg', 'exp_avg_sq' state_size = 0 isDTensor = False for state_tensor in state.values(): # When state_tensor is not of Tensor class, # e.g., a SGD optimizer with momentum set to 0 can have None as state # The calculation of tensor size should be skipped to avoid error. if not isinstance(state_tensor, torch.Tensor): continue # If the states are stored as DTensors, mark isDTensor as true. if is_distributed_tensor(state_tensor): isDTensor = True state_size += calculate_tensor_size(state_tensor) ret_block = None ret_block_size = 0 # directly return if state is stored as distributed tensor if isDTensor: return ret_block, ret_block_size # before we return the current block and create a new block, # we need to ensure that the current block is not empty if self.current_block_size + state_size > self.max_shard_size and self.current_block_size > 0: ret_block = self.current_block ret_block_size = self.current_block_size self.current_block = OrderedDict() self.current_block_size = 0 self.current_block[param_id] = state self.current_block_size += state_size return ret_block, ret_block_size def gather_distributed_param(param: torch.Tensor, keep_vars: bool = False) -> torch.Tensor: """ Gather the complete parameter for saving if passed in param is distributed under tp setting. Args: param (torch.Tensor): A model parameter, might be d_tensor. keep_vars (bool, optional): Whether to return the parameter in calculation graph. Defaults to False. Returns: torch.Tensor: the complete parameter """ param_ = param if keep_vars else param.detach() if is_distributed_tensor(param_): return to_global(param_) elif is_customized_distributed_tensor(param_): return to_global_for_customized_distributed_tensor(param_) else: return param_ def save_state_dict_shards( sharded_state_dict: Iterator[Tuple[OrderedDict, int]], checkpoint: str, index_file: "CheckpointIndexFile", base_filename: str, is_master: bool, use_safetensors: bool = False, use_pp_format: bool = False, ) -> int: """ Save sharded state dict only on master rank, this method can be used by both model and optimizer states. Args: sharded_state_dict (Iterator[Tuple[OrderedDict, int]]): a generator of shards, each shard contains state dict and shard size. checkpoint (str): The path of checkpoint directory as string. index_file (CheckpointIndexFile): The index file object to be updated. base_filename (str): Decides the prefix of filenames of shards. is_master (bool): Whether current rank is main process. use_safetensors (bool, optional): Whether to use safetensors to save checkpoint. Defaults to False. use_pp_format: (bool, optional): Whether to save the files in pipeline format including stage information. Defaults to False. Returns: int: the total size of shards """ total_size = 0 shard_filenames = [] for idx, shard_pair in enumerate(sharded_state_dict): shard, current_size = shard_pair # Just loop over the sharder and gather to other ranks if not master if not is_master: del shard continue shard_file = get_shard_filename(base_filename, idx) total_size = total_size + current_size for key in shard.keys(): index_file.append_weight_map(key, shard_file) checkpoint_file_path = os.path.join(checkpoint, shard_file) # Only save on master rank. save_state_dict(shard, checkpoint_file_path, use_safetensors=use_safetensors) shard_filenames.append(shard_file) del shard # Clean folder, deleted unneeded files. clean_folder(checkpoint, base_filename, shard_filenames, is_master=is_master, use_pp_format=use_pp_format) return total_size def async_save_state_dict_shards( sharded_state_dict: Iterator[Tuple[OrderedDict, int]], checkpoint: str, index_file: "CheckpointIndexFile", base_filename: str, is_master: bool, use_pp_format: bool = False, state_preprocess: bool = False, ) -> Tuple[int, list]: """ Save sharded state dict only on master rank, this method can be used by both model and optimizer states. Args: sharded_state_dict (Iterator[Tuple[OrderedDict, int]]): a generator of shards, each shard contains state dict and shard size. checkpoint (str): The path of checkpoint directory as string. index_file (CheckpointIndexFile): The index file object to be updated. base_filename (str): Decides the prefix of filenames of shards. is_master (bool): Whether current rank is main process. use_safetensors (bool, optional): Whether to use safetensors to save checkpoint. Defaults to False. use_pp_format: (bool, optional): Whether to save the files in pipeline format including stage information. Defaults to False. Returns: int: the total size of shards """ from colossalai.utils.safetensors import save total_size = 0 shard_filenames = [] writers = [] for idx, shard_pair in enumerate(sharded_state_dict): shard, current_size = shard_pair # Just loop over the sharder and gather to other ranks if not master if not is_master: del shard continue shard_file = get_shard_filename(base_filename, idx) total_size = total_size + current_size for key in shard.keys(): index_file.append_weight_map(key, shard_file) checkpoint_file_path = os.path.join(checkpoint, shard_file) if state_preprocess: state_dict, metadata = _flatten_optim_state_dict(state_dict=shard, seperator=".") else: state_dict = shard metadata = None # Only save on master rank. writer = save(checkpoint_file_path, state_dict=state_dict, metadata=metadata) writers.append(writer) shard_filenames.append(shard_file) del shard # Clean folder, deleted unneeded files. clean_folder(checkpoint, base_filename, shard_filenames, is_master=is_master, use_pp_format=use_pp_format) return total_size, writers def async_move_save_state_dict_shards( sharded_state_dict: Iterator[Tuple[OrderedDict, int]], checkpoint: str, index_file: "CheckpointIndexFile", base_filename: str, is_master: bool, pinned_state_dict: Optional[Dict[str, torch.Tensor]], use_pp_format: bool = False, state_preprocess: bool = False, ) -> Tuple[int, Dict[str, torch.Tensor], list]: """ Save sharded state dict only on master rank, this method can be used by both model and optimizer states. Args: sharded_state_dict (Iterator[Tuple[OrderedDict, int]]): a generator of shards, each shard contains state dict and shard size. checkpoint (str): The path of checkpoint directory as string. index_file (CheckpointIndexFile): The index file object to be updated. base_filename (str): Decides the prefix of filenames of shards. is_master (bool): Whether current rank is main process. use_safetensors (bool, optional): Whether to use safetensors to save checkpoint. Defaults to False. use_pp_format: (bool, optional): Whether to save the files in pipeline format including stage information. Defaults to False. Returns: int: the total size of shards """ from colossalai.utils.safetensors import move_and_save total_size = 0 shard_filenames = [] if pinned_state_dict is None: returned_state_dict = {} else: returned_state_dict = pinned_state_dict writers = [] for idx, shard_pair in enumerate(sharded_state_dict): shard, current_size = shard_pair # Just loop over the sharder and gather to other ranks if not master if not is_master: del shard continue shard_file = get_shard_filename(base_filename, idx) total_size = total_size + current_size for key in shard.keys(): index_file.append_weight_map(key, shard_file) checkpoint_file_path = os.path.join(checkpoint, shard_file) if state_preprocess: state_dict, metadata = _flatten_optim_state_dict(state_dict=shard) else: state_dict = shard metadata = None if pinned_state_dict is not None: sub_pinned_state_dict = {k: pinned_state_dict[k] for k in state_dict.keys()} else: sub_pinned_state_dict = create_pinned_state_dict(state_dict) returned_state_dict.update(sub_pinned_state_dict) # Only save on master rank. writer = move_and_save(checkpoint_file_path, state_dict, sub_pinned_state_dict, metadata) writers.append(writer) shard_filenames.append(shard_file) del shard # Clean folder, deleted unneeded files. clean_folder(checkpoint, base_filename, shard_filenames, is_master=is_master, use_pp_format=use_pp_format) return total_size, returned_state_dict, writers def shard_model_checkpoint( state_dict: torch.Tensor, max_shard_size: int = 1024, pinned_state_dicts: Optional[Dict[int, Dict[str, torch.Tensor]]] = None, ) -> Iterator[Tuple[OrderedDict, int]]: """ Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. """ state_dict_sharder = StateDictSharder(max_shard_size) for key, weight in state_dict.items(): if not is_distributed_tensor(weight): if pinned_state_dicts is not None: if key not in pinned_state_dicts: pinned_state_dicts[key] = torch.empty_like(weight, pin_memory=True, device="cpu") pinned_state_dicts[key].copy_(weight) weight = pinned_state_dicts[key] block, block_size = state_dict_sharder.append_param(key, weight) if block != None: yield block, block_size # Return the last block in sharder. yield state_dict_sharder.current_block, state_dict_sharder.current_block_size def shard_optimizer_checkpoint( state_dict: dict, max_shard_size: int = 1024, pinned_state_dicts: Optional[Dict[str, torch.Tensor]] = None ) -> Iterator[Tuple[OrderedDict, int]]: """ Splits an optimizer state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. """ # Only split state_dict['state']; state_dict['param_group'] is not considered in this function. states = state_dict["state"] state_dict_sharder = StateDictSharder(max_shard_size) for param_id, state in states.items(): if pinned_state_dicts is not None: if param_id not in pinned_state_dicts: pinned_state_dicts[param_id] = {} for k, v in state.items(): if k not in pinned_state_dicts[param_id]: pinned_state_dicts[param_id][k] = torch.empty_like(v, pin_memory=True, device="cpu") pinned_state_dicts[param_id][k].copy_(v) state[k] = pinned_state_dicts[param_id][k] block, block_size = state_dict_sharder.append_optim_state(param_id, state) if block != None: yield block, block_size # Return the last block in sharder. yield state_dict_sharder.current_block, state_dict_sharder.current_block_size # ====================================== # Helper functions for saving state dict # ====================================== def save_state_dict( state_dict: dict, checkpoint_file_path: str, use_safetensors: bool, ) -> None: """ Save state dict to checkpoint. Args: state_dict (dict): state dict. checkpoint_file_path (str): path to the checkpoint file. use_safetensors (bool): whether to use safetensors to save the checkpoint. """ # Move all tensors in the state_dict to CPU before saving to avoid serialization issues state_dict_cpu = tree_map(lambda x: x.data.cpu() if torch.is_tensor(x) else x, state_dict) if use_safetensors: assert is_safetensors_available(), "safetensors is not available." assert checkpoint_file_path.endswith( ".safetensors" ), "safetensors only supports .safetensors suffix for checkpoint file." from safetensors.torch import save_file as safe_save_file safe_save_file(state_dict_cpu, checkpoint_file_path, metadata={"format": "pt"}) else: torch.save(state_dict_cpu, checkpoint_file_path) def save_param_groups(state_dict: dict, group_file_path: str) -> None: """ Save information of param_groups to given file path. Args: state_dict (dict): state dict. group_file_path (str): path to the group file. """ param_groups = state_dict["param_groups"] torch.save(param_groups, group_file_path) def clean_folder( checkpoint_path: str, weights_name: str, shard_filenames: List[str], is_master: bool = True, use_pp_format: bool = False, ): """ Clean the unneeded files in checkpoint directory after shards of state_dict have been saved. Args: checkpoint_path (str): Path to the checkpoint directory. weights_name (str): Decides the prefix of filenames of weight shards. shard_filenames (List[str]): The list of saved shard filenames which should not be removed. is_master (bool, optional): Whether current rank is main process. Defaults to True. use_pp_format: (bool, optional): Whether to save the files in pipeline format including stage information. Defaults to False. """ if is_master: for filename in os.listdir(checkpoint_path): full_filename = os.path.join(checkpoint_path, filename) weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "") filename_no_suffix = filename.replace(".bin", "").replace(".safetensors", "") if not use_pp_format: reg = re.compile(r"(.*?)-\d{5}") else: # When this checkpoint is created by pipeline parallel process, the pattern is a little different. reg = re.compile(r"(.*?)-stage-\d{5}-shard-\d{5}") if ( filename.startswith(weights_no_suffix) and filename not in shard_filenames and reg.fullmatch(filename_no_suffix) is not None ): os.remove(full_filename) def save_config_file(model: nn.Module, checkpoint_path: str, is_master: bool = True): """ Save config.json/generation_config.json if model is a Huggingface pretrained model. This method can only be called when a model is saved in a sharded way. Args: model (nn.Module): The model whose config should be saved if it's a huggingface model. checkpoint_path (str): Path to the checkpoint directory. is_master (bool): Whether current rank is main process. """ try: from transformers.modeling_utils import PreTrainedModel, get_parameter_dtype from transformers.modeling_utils import unwrap_model as unwrap_huggingface_model except ImportError: return if isinstance(model, PeftUnwrapMixin): model = model.base_model if not isinstance(model, PreTrainedModel): return model = unwrap_huggingface_model(model) # save the string version of dtype to the config, e.g. convert torch.float32 => "float32" dtype = get_parameter_dtype(model) model.config.torch_dtype = str(dtype).split(".")[1] # Attach architecture to the config model.config.architectures = [model.__class__.__name__] # Save the config if is_master: model.config.save_pretrained(checkpoint_path) if model.can_generate(): model.generation_config.save_pretrained(checkpoint_path) def save_dtensor(name: str, tensor: torch.Tensor, index_file: "CheckpointIndexFile", use_safetensors: bool) -> None: """ Save distributed tensor to checkpoint. This checkpoint will be a dictionary which contains only one tensor. Args: tensor (Tensor): tensor to be saved. index_file (CheckpointIndexFile): path to the checkpoint file. size_per_shard (int): size per shard in MB. """ root_path = index_file.root_path output_root_path = root_path.joinpath("dtensor") # create directory output_root_path.mkdir(exist_ok=True) # save tensor to this directory # TODO(YuliangLiu): get index of the tensor shard # e.g. index = index = 0 # save tensor to file ckpt_file_name = generate_dtensor_file_name(name, index, use_safetensors) ckpt_file_path = output_root_path.joinpath(ckpt_file_name) # dtensor ckpt file always contains only one tensor state_dict = {name: tensor} save_state_dict(state_dict, str(ckpt_file_path), use_safetensors) # update the weight map # * means all shards ckpt_file_name_in_weight_map = "dtensor/" + generate_dtensor_file_name(name, "*", use_safetensors) index_file.append_weight_map(name, ckpt_file_name_in_weight_map) def get_checkpoint_file_suffix(use_safetensors: bool) -> str: """ Get checkpoint file suffix. Args: use_safetensors (bool): whether to use safetensors to save the checkpoint. Returns: str: checkpoint file suffix. """ if use_safetensors: return ".safetensors" else: return ".bin" def generate_checkpoint_shard_file_name( index: int, total_number: int, use_safetensors: bool, prefix: str = None ) -> str: """ Generate checkpoint shard file name. Args: index (int): index of the shard. total_number (int): total number of shards. use_safetensors (bool): whether to use safetensors to save the checkpoint. prefix (str): prefix of the shard file name. Default: None. Returns: str: checkpoint shard file name. """ suffix = get_checkpoint_file_suffix(use_safetensors) if prefix is None: return f"{index:05d}-of-{total_number:05d}.{suffix}" else: return f"{prefix}-{index:05d}-of-{total_number:05d}.{suffix}" def generate_dtensor_file_name(param_name: str, index: int, use_safetensors: bool) -> str: """ Generate dtensor file name. Args: param_name (str): name of the distributed parameter. index (int): index of the shard. use_safetensors (bool): whether to use safetensors to save the checkpoint. Returns: str: dtensor file name. """ suffix = get_checkpoint_file_suffix(use_safetensors) return f"{param_name}.{index}.{suffix}" # ======================================== # Helper functions for loading state dict # ======================================== def load_shard_state_dict(checkpoint_file: Path, use_safetensors: bool = False): """ load shard state dict into model """ if use_safetensors and not checkpoint_file.suffix == ".safetensors": raise Exception("load the model using `safetensors`, but no file endwith .safetensors") if use_safetensors: from safetensors.torch import load_file as safe_load_file return safe_load_file(checkpoint_file) else: return torch.load(checkpoint_file, map_location=torch.device("cpu")) def load_state_dict_into_model( model: nn.Module, state_dict: torch.Tensor, missing_keys: List, strict: bool = False, load_sub_module: bool = True ): r"""Copies parameters and buffers from :attr:`state_dict` into this module and its descendants. Args: state_dict (dict): a dict containing parameters and persistent buffers. """ if isinstance(model, PeftUnwrapMixin): state_dict = model.patch_state_dict(state_dict) model = model.base_model if not isinstance(state_dict, Mapping): raise TypeError("Expected state_dict to be dict-like, got {}.".format(type(state_dict))) unexpected_keys: List[str] = [] sub_missing_keys: List[str] = [] error_msgs: List[str] = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = OrderedDict(state_dict) if metadata is not None: state_dict._metadata = metadata def load(module: nn.Module, state_dict, prefix="", load_sub_module: bool = True): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) args = (state_dict, prefix, local_metadata, True, sub_missing_keys, unexpected_keys, error_msgs) # Parameters of module and children will start with prefix. We can exit early if there are none in this # state_dict if strict or len([key for key in state_dict if key.startswith(prefix)]) > 0: module._load_from_state_dict(*args) if load_sub_module: for name, child in module._modules.items(): if child is not None: load(child, state_dict, prefix + name + ".") load(model, state_dict, "", load_sub_module) del load missing_keys = missing_keys.append(sub_missing_keys) if strict: if len(unexpected_keys) > 0: error_msgs = [ "Unexpected key(s) in state_dict: {}. ".format(", ".join('"{}"'.format(k) for k in unexpected_keys)) ] raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs)) ) def load_param_groups_into_optimizer(optimizer: Optimizer, param_group_path: str) -> dict: """ Load information of param_groups into an initialized optimizer. """ # Load list of param_groups from given file path. # The params in saved_groups are in the form of integer indices. saved_groups = torch.load(param_group_path, map_location=torch.device("cpu")) if not isinstance(saved_groups, List): raise ValueError(f"The param_groups saved at {param_group_path} is not of List type") # The params in param_groups are in the form of pytorch tensors. # For more details, please view source code of Optimizer class in pytorch. param_groups = optimizer.param_groups # Check the compatibility of saved_groups and param_groups. if len(param_groups) != len(saved_groups): raise ValueError("loaded state dict has a different number of original parameter groups") param_lens = (len(g["params"]) for g in param_groups) saved_lens = (len(g["params"]) for g in saved_groups) if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)): raise ValueError( "loaded state dict contains a parameter group " "that doesn't match the size of optimizer's group" ) # Creating mapping from id to parameters. id_map = { old_id: p for old_id, p in zip( chain.from_iterable((g["params"] for g in saved_groups)), chain.from_iterable((g["params"] for g in param_groups)), ) } # Update parameter groups, setting their 'params' value. def update_group(group, new_group): new_group["params"] = group["params"] return new_group updated_groups = [update_group(g, ng) for g, ng in zip(param_groups, saved_groups)] optimizer.__dict__.update({"param_groups": updated_groups}) return id_map def load_states_into_optimizer(optimizer: Optimizer, state_dict: dict, id_map: dict, strict: bool = False): r"""Copies states from `state_dict` into an Optimizer object. Args: optimizer(Optimizer): An initialized Optimizer object to be loaded state_dict(dict): A mapping from tensor index (an integer) to its states to be loaded (a mapping from state name to a tensor). id_map(dict): A mapping from tensor index (an integer) to its corresponding parameter (a tensor) whose states will be updated. strict(bool, optional): If set to True, only load the parameters with its id in id_map. Defaults to False. """ # Ensure that the keys of state_dict are integers. state_dict = {int(k): v for k, v in state_dict.items()} def cast(param, value, key=None): r"""Make a deep copy of value, casting all tensors to device of param.""" if isinstance(value, torch.Tensor): # Floating-point types are a bit special here. They are the only ones # that are assumed to always match the type of params. # Make sure state['step'] is not casted https://github.com/pytorch/pytorch/issues/74424 if key != "step": if param.is_floating_point(): value = value.to(param.dtype) value = value.to(param.device, non_blocking=True) return value elif isinstance(value, dict): return {k: cast(param, v, key=k) for k, v in value.items()} elif isinstance(value, container_abcs.Iterable): return type(value)(cast(param, v) for v in value) else: return value # Copy state assigned to params (and cast tensors to appropriate types). # State that is not assigned to params is copied as is (needed for # backward compatibility). new_states = defaultdict(dict) for k, v in state_dict.items(): if k in id_map: param = id_map[k] new_states[param] = cast(param, v) elif not strict: new_states[k] = v get_accelerator().synchronize() optimizer.state.update(new_states) def sharded_optimizer_loading_epilogue(optimizer: Optimizer): r"""Do the cleaning up work after state_dict has been loaded into optimizer Args: optimizer(Optimizer): An optimizer object whose state has just been loaded. """ # Do the cleaning up as in src code of Pytorch. if Version(torch.__version__) >= Version("2.0.0"): optimizer._patch_step_function() # To support multiprocessing pickle/unpickle else: optimizer._hook_for_profile() # To support multiprocessing pickle/unpickle. optimizer.defaults.setdefault("differentiable", False) def has_index_file(checkpoint_path: str) -> Tuple[bool, Optional[Path]]: """ Check whether the checkpoint has an index file. Args: checkpoint_path (str): path to the checkpoint. Returns:
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/checkpoint_io/__init__.py
colossalai/checkpoint_io/__init__.py
from .checkpoint_io_base import CheckpointIO from .general_checkpoint_io import GeneralCheckpointIO from .hybrid_parallel_checkpoint_io import HybridParallelCheckpointIO from .index_file import CheckpointIndexFile from .moe_checkpoint import MoECheckpointIO __all__ = [ "CheckpointIO", "CheckpointIndexFile", "GeneralCheckpointIO", "HybridParallelCheckpointIO", "MoECheckpointIO", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/checkpoint_io/hybrid_parallel_checkpoint_io.py
colossalai/checkpoint_io/hybrid_parallel_checkpoint_io.py
import copy import logging import os from collections import defaultdict from functools import reduce from pathlib import Path from shutil import rmtree from typing import Dict, Iterator, Optional, OrderedDict, Tuple import torch import torch.distributed as dist import torch.nn as nn from torch.distributed import ProcessGroup from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler as LRScheduler from torch.utils._pytree import tree_map from colossalai.cluster import DistCoordinator from colossalai.interface import ModelWrapper, OptimizerWrapper from colossalai.tensor.padded_tensor import ( init_as_padded_tensor, is_padded_tensor, to_padded_tensor, to_unpadded_tensor, ) from colossalai.utils import get_current_device, get_non_persistent_buffers_set from colossalai.utils.safetensors import _flatten_optim_state_dict, load_flat from .general_checkpoint_io import GeneralCheckpointIO from .index_file import CheckpointIndexFile from .utils import ( StateDictSharder, async_save_state_dict_shards, create_pinned_state_dict, gather_distributed_param, gather_state_dict_fast, get_lora_state_dict, get_model_base_filenames, get_optimizer_base_filenames, is_safetensors_available, load_shard_state_dict, load_state_dict, load_state_dict_into_model, save_config_file, save_param_groups, save_state_dict, save_state_dict_shards, search_padding_dim, search_tp_partition_dim, sharded_optimizer_loading_epilogue, ) try: from torch.nn.modules.module import _EXTRA_STATE_KEY_SUFFIX except ImportError: _EXTRA_STATE_KEY_SUFFIX = "_extra_state" class HybridParallelCheckpointIO(GeneralCheckpointIO): """ CheckpointIO for Hybrid Parallel Training. Args: dp_group (ProcessGroup): Process group along data parallel dimension. pp_group (ProcessGroup): Process group along pipeline parallel dimension. tp_group (ProcessGroup): Process group along tensor parallel dimension. zero_stage (int): The zero stage of plugin. Should be in [0, 1, 2]. verbose (bool, optional): Whether to print logging massage when saving/loading has been successfully executed. Defaults to True. """ def __init__( self, dp_group: ProcessGroup, pp_group: ProcessGroup, tp_group: ProcessGroup, sp_group: ProcessGroup, zero_stage: int, verbose: bool = True, ) -> None: super().__init__() self.global_dp_group = dp_group self.pp_group = pp_group self.tp_group = tp_group self.sp_group = sp_group self.dp_rank = dist.get_rank(self.global_dp_group) self.tp_rank = dist.get_rank(self.tp_group) self.pp_rank = dist.get_rank(self.pp_group) self.sp_rank = dist.get_rank(self.sp_group) self.global_dp_size = dist.get_world_size(dp_group) self.pp_size = dist.get_world_size(pp_group) self.tp_size = dist.get_world_size(tp_group) self.use_zero = zero_stage > 0 self.verbose = verbose self.coordinator = DistCoordinator() @staticmethod def _model_sharder( model: nn.Module, prefix: str = "", keep_vars: bool = False, size_per_shard: int = 1024, pinned_state_dicts: Optional[Dict[str, torch.Tensor]] = None, ) -> Iterator[Tuple[OrderedDict, int]]: # An internel method that breaks state_dict of model into shards within limited size. state_dict_sharder = StateDictSharder(size_per_shard) # Save parameters. for name, param in model.named_parameters(): if param is None: continue # Gather tensor pieces when using tensor parallel. param_ = gather_distributed_param(param, keep_vars=False) if is_padded_tensor(param_): param_ = to_unpadded_tensor(param_) if pinned_state_dicts is not None: if (prefix + name) not in pinned_state_dicts: pinned_state_dicts[prefix + name] = torch.empty_like(param_, pin_memory=True, device="cpu") pinned_state_dicts[prefix + name].copy_(param_) param_ = pinned_state_dicts[prefix + name] block, block_size = state_dict_sharder.append_param(prefix + name, param_) if block is not None: yield block, block_size # Save buffers. non_persist_buffers_set = get_non_persistent_buffers_set(model) for name, buf in model.named_buffers(): if buf is not None and name not in non_persist_buffers_set: buffer = buf if keep_vars else buf.detach() if pinned_state_dicts is not None: if (prefix + name) not in pinned_state_dicts: pinned_state_dicts[prefix + name] = torch.empty_like(param_, pin_memory=True, device="cpu") pinned_state_dicts[prefix + name].copy_(buffer) buffer = pinned_state_dicts[prefix + name] block, block_size = state_dict_sharder.append_param(prefix + name, buffer) if block is not None: yield block, block_size # Save extra states. extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX if ( getattr(model.__class__, "get_extra_state", torch.nn.Module.get_extra_state) is not torch.nn.Module.get_extra_state ): extra_state = model.get_extra_state() if pinned_state_dicts is not None: if extra_state_key not in pinned_state_dicts: pinned_state_dicts[extra_state_key] = torch.empty_like(param_, pin_memory=True, device="cpu") pinned_state_dicts[extra_state_key].copy_(extra_state) extra_state = pinned_state_dicts[extra_state_key] block, block_size = state_dict_sharder.append_param(extra_state_key, extra_state) if block is not None: yield block, block_size # Return the last block in sharder. yield state_dict_sharder.current_block, state_dict_sharder.current_block_size @staticmethod def _optimizer_sharder( optimizer: OptimizerWrapper, use_zero: bool, dp_group: ProcessGroup, tp_group: ProcessGroup, size_per_shard: int = 1024, pinned_state_dicts: Optional[Dict[int, Dict[str, torch.Tensor]]] = None, ): # An internel method that breaks state_dict of optimizer into shards within limited size. state_dict_sharder = StateDictSharder(size_per_shard) param_info = optimizer.param_info master_to_working_map = optimizer.get_master_to_working_map() for param, state in optimizer.optim.state.items(): if param is None: continue if master_to_working_map is not None: working_param = master_to_working_map[id(param)] else: working_param = param param_id = param_info["param2id"][id(working_param)] if pinned_state_dicts is not None: if param_id not in pinned_state_dicts: pinned_state_dicts[param_id] = {} original_shape = param_info["param2shape"][id(working_param)] state_ = HybridParallelCheckpointIO.gather_from_sharded_optimizer_state( state, working_param, original_shape=original_shape, dp_group=dp_group, tp_group=tp_group, use_zero=use_zero, inplace=False, pinned_state_dicts=pinned_state_dicts[param_id] if pinned_state_dicts is not None else None, ) block, block_size = state_dict_sharder.append_optim_state(param_id, state_) if block is not None: yield block, block_size # Return the last block in sharder. yield state_dict_sharder.current_block, state_dict_sharder.current_block_size def save_sharded_model( self, model: ModelWrapper, checkpoint: str, gather_dtensor: bool = True, prefix: Optional[str] = None, size_per_shard: int = 1024, use_safetensors: bool = False, use_async: bool = False, ) -> None: """ Save sharded model checkpoint under the given checkpointing path. The following files will be created under the path: - An index file (pytorch_model.bin.index.json) containing a map between model params/buffers and file names. - Multiple files that store state tensors of models. If pipeline parallelism is used, the filenames are in the form of "pytorch_model.<prefix>-stage-000XX-shard-000XX.bin". If pipeline parallelism is not used, "pytorch_model.<prefix>-000XX.bin" Args: model (nn.Module): Model on local device to be saved. checkpoint (str): Checkpointing path which should be a directory path. gather_dtensor (bool, optional): Whether to gather_dtensor, currently not used. Defaults to True. prefix (str, optional): Perfix of file to save. Defaults to None. size_per_shard (int, optional): Size per shard in MB. Defaults to 1024. use_safetensors (bool, optional): Whether to use safe tensors. Defaults to False. use_async (bool, optional): Whether to save the state_dicts of model asynchronously. Defaults to False. """ assert isinstance(model, ModelWrapper), "Please boost the model before saving!" model._force_wait_all_gather() model = model.unwrap() if os.path.isfile(checkpoint): logging.error(f"Provided path ({checkpoint}) should be a directory, not a file") return Path(checkpoint).mkdir(parents=True, exist_ok=True) # Devices along the same dp_group share the same copies of model. # So only let the device with dp_rank == 0 save the model. if self.dp_rank != 0: return # Then collect the sharded parameters & buffers along tp_group. # Only devices with tp_rank == 0 are responsible for model saving. control_saving = self.tp_rank == 0 and self.sp_rank == 0 if control_saving and use_async: if hash(model) not in self.pinned_state_dicts: self.pinned_state_dicts[hash(model)] = {} pinned_state_dicts = self.pinned_state_dicts[hash(model)] else: pinned_state_dicts = None state_dict_shard = HybridParallelCheckpointIO._model_sharder( model, size_per_shard=size_per_shard, pinned_state_dicts=pinned_state_dicts ) weights_name, save_index_file = get_model_base_filenames(prefix, use_safetensors) index_file = CheckpointIndexFile(checkpoint) if self.pp_size == 1: # When pipeline is not used, save the model shards as in general checkpointIO if use_async: total_size, writers = async_save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint, index_file=index_file, base_filename=weights_name, is_master=control_saving, state_preprocess=False, ) self.async_writers.extend(writers) else: total_size = save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint, index_file=index_file, base_filename=weights_name, is_master=control_saving, use_safetensors=use_safetensors, ) if control_saving: index_file.append_meta_data("total_size", total_size) index_file.write_index_file(save_index_file) save_config_file(model, checkpoint) if self.verbose and self.coordinator.is_master(): logging.info( f"The model is split into checkpoint shards. " f"You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) else: # When pipeline is used, each stage produces its own shard files and index files. # Index files belonging to each stage are saved under a temporary folder ./tmp_index_files/ # After all the state_dicts have been saved, the master rank integrates all the index files into one final index file and deletes the tmp folder. final_index_file_path = copy.deepcopy(save_index_file) tmp_index_file_folder = os.path.join(checkpoint, "tmp_index_files") Path(tmp_index_file_folder).mkdir(parents=True, exist_ok=True) # Manage filenames of sharded weights and index file for each pipeline stage. weights_name = weights_name.replace(".bin", f"-stage-{self.pp_rank+1:05d}-shard.bin") weights_name = weights_name.replace(".safetensors", f"-stage-{self.pp_rank+1:05d}-shard.safetensors") save_index_file = save_index_file.replace(".json", f"-stage-{self.pp_rank+1:05d}.json") save_index_file = os.path.join("tmp_index_files", save_index_file) if use_async: total_size, writers = async_save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint, index_file=index_file, base_filename=weights_name, is_master=control_saving, state_preprocess=False, ) self.async_writers.extend(writers) else: total_size = save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint, index_file=index_file, base_filename=weights_name, is_master=control_saving, use_safetensors=use_safetensors, use_pp_format=True, ) if control_saving: assert ( self.dp_rank == 0 and self.tp_rank == 0 ), "The saving process should have both dp_rank and tp_rank as 0." index_file.append_meta_data("total_size", total_size) index_file.write_index_file(save_index_file) else: return dist.barrier(self.pp_group) # The global master rank integrates the index files and clean the folder. if self.pp_rank == 0: final_index_file = CheckpointIndexFile(checkpoint) final_index_file.append_meta_data("total_size", 0) for filename in os.listdir(tmp_index_file_folder): stage_index_file = CheckpointIndexFile.from_file(os.path.join(tmp_index_file_folder, filename)) final_index_file.metadata["total_size"] += stage_index_file.metadata["total_size"] for weight, weight_filename in stage_index_file.weight_map.items(): final_index_file.append_weight_map(weight, weight_filename) final_index_file.write_index_file(final_index_file_path) save_config_file(model, checkpoint) rmtree(tmp_index_file_folder) if self.verbose and self.coordinator.is_master(): logging.info( f"The model is split into checkpoint shards. " f"You can find where each parameters has been saved in the " f"index located at {final_index_file_path}." ) def load_sharded_model( self, model: ModelWrapper, checkpoint_index_file: Path, strict: bool = False, low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """ Load sharded model with the given path to index file of checkpoint folder. Args: model (nn.Module): The model to be loaded. checkpoint_index_file (str): Path to the index file of checkpointing folder. strict (bool, optional): For name matching during loading state_dict. Defaults to False. This argument should be manually set to False since params on same device might be stored in different files. """ assert isinstance(model, ModelWrapper), "Please boost the model before loading!" model._force_wait_all_gather() model_before_wrapping = model # backup for model before wrapping model = model.unwrap() # Check whether the checkpoint uses safetensors. use_safetensors = False if "safetensors" in checkpoint_index_file.name: use_safetensors = True if use_safetensors and not is_safetensors_available(): raise ImportError("`safe_serialization` requires the `safetensors` library: `pip install safetensors`.") # Read checkpoint index file. ckpt_index_file = CheckpointIndexFile.from_file(checkpoint_index_file) ckpt_root_path = ckpt_index_file.root_path weight_map = ckpt_index_file.weight_map strict = False # Load params & buffers to model. # Keep a record of loaded files so that file will not be repeatedly loaded. loaded_file = set() missing_keys = [] missing_file_keys = [] def _load(name: str): if name not in weight_map: missing_file_keys.append(name) return filename = weight_map[name] # If this param/buffer has been loaded before, directly return. if filename in loaded_file: return file_path = os.path.join(ckpt_root_path, filename) state_dict = load_shard_state_dict(Path(file_path), use_safetensors) if not low_cpu_mem_mode: state_dict = create_pinned_state_dict(state_dict, empty=False, num_threads=num_threads) load_state_dict_into_model( model, state_dict, missing_keys=missing_keys, strict=strict, load_sub_module=True ) loaded_file.add(filename) # Load parameters. for name, _ in model.named_parameters(): _load(name) # Load buffers. non_persistent_buffers = get_non_persistent_buffers_set(model) for name, buf in model.named_buffers(): if buf is not None and name not in non_persistent_buffers: _load(name) # Load extra states. extra_state_key = _EXTRA_STATE_KEY_SUFFIX if ( getattr(model.__class__, "get_extra_state", torch.nn.Module.get_extra_state) is not torch.nn.Module.get_extra_state ): _load(extra_state_key) # Update master params if mixed-precision training is enabled. model_before_wrapping.update_master_params() if self.verbose and self.coordinator.is_master(): logging.info(f"The model has been successfully loaded from sharded checkpoint: {ckpt_root_path}.") if len(missing_keys) == 0: raise RuntimeError( "No weigth is loaded into the model. Please check the checkpoint files and the model structure." ) remain_keys = reduce(lambda a, b: a & b, map(set, missing_keys)) remain_keys = remain_keys.union(set(missing_file_keys)) if len(remain_keys) > 0: if strict: error_msgs = [ "Missing key(s) in state_dict: {}. ".format(", ".join('"{}"'.format(k) for k in missing_keys)) ] raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format( self.__class__.__name__, "\n\t".join(error_msgs) ) ) else: if self.coordinator.is_master(): logging.info(f"The following keys are not loaded from checkpoint: {remain_keys}") def save_sharded_optimizer( self, optimizer: OptimizerWrapper, checkpoint: str, gather_dtensor: bool = True, prefix: Optional[str] = None, size_per_shard: int = 1024, use_async: bool = False, ): """ Save sharded optimizer checkpoint under the given checkpointing path. The following files will be created under the path: - An index file (pytorch_optim.bin.index.json) containing a map between optimizer states and file names - A group file (pytorch_optim_group.bin) recording information of param_groups - Multiple files that store state tensors of optimizers. If pipeline parallelism is used, the filenames are in the form of "pytorch_optim.<prefix>-stage-000XX-shard-000XX.bin". If pipeline parallelism is not used, "pytorch_optim.<prefix>-000XX.bin" Args: optimizer (OptimizerWrapper): Optimizer to save sharded state_dict checkpoint (str): Path to save optimizer state_dict gather_dtensor (bool): Whether to gather_dtensor, not used prefix (str): Perfix of file to save size_per_shard (int): Max file size of each file shard that store state tensors """ assert isinstance(optimizer, OptimizerWrapper), "Please boost the optimizer before saving!" if os.path.isfile(checkpoint): logging.error(f"Provided path ({checkpoint}) should be a directory, not a file") return Path(checkpoint).mkdir(parents=True, exist_ok=True) # Devices along the same dp_group share the same copies of states when zero is not used. # In this case only let the device with dp_rank == 0 save the model. if not self.use_zero and self.dp_rank != 0: return # Then collect the sharded states along dp_group(if using zero)/tp_group. # Only devices with (dp_rank == 0 and tp_rank == 0) are responsible for states saving. control_saving = self.dp_rank == 0 and self.tp_rank == 0 and self.sp_rank == 0 if use_async and control_saving: if id(optimizer) not in self.pinned_state_dicts: self.pinned_state_dicts[id(optimizer)] = {} pinned_state_dicts = self.pinned_state_dicts[id(optimizer)] else: pinned_state_dicts = None state_dict_shard = HybridParallelCheckpointIO._optimizer_sharder( optimizer, use_zero=self.use_zero, dp_group=self.global_dp_group, tp_group=self.tp_group, size_per_shard=size_per_shard, pinned_state_dicts=pinned_state_dicts, ) states_name, save_index_file, param_group_file = get_optimizer_base_filenames(prefix, use_safetensors=use_async) index_file = CheckpointIndexFile(checkpoint) if self.pp_size == 1: # When pipeline is not used, save the optimizer shards as in general checkpointIO if use_async: total_size, writers = async_save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint, index_file=index_file, base_filename=states_name, is_master=control_saving, use_pp_format=True, state_preprocess=True, ) self.async_writers.extend(writers) else: total_size = save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint, index_file=index_file, base_filename=states_name, is_master=control_saving, ) if control_saving: # Store param groups. index_file.append_meta_data("param_groups", param_group_file) group_file_path = os.path.join(checkpoint, param_group_file) param_groups = [ {**group, "params": group_info["params"]} for group, group_info in zip(optimizer.param_groups, optimizer.param_info["param_groups"]) ] save_param_groups({"param_groups": param_groups}, group_file_path) # Store index file. index_file.append_meta_data("total_size", total_size) index_file.write_index_file(save_index_file) if self.verbose and self.coordinator.is_master(): logging.info( f"The optimizer is going to be split to checkpoint shards. " f"You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) else: # When pipeline is used, each stage produces its own shard files and index files. # Index files belonging to each stage are saved under a temporary folder ./tmp_index_files/ # After all the state_dicts have been saved, the master rank integrates all the index files into one final index file and deletes the tmp folder. final_index_file_path = copy.deepcopy(save_index_file) tmp_index_file_folder = os.path.join(checkpoint, "tmp_index_files") Path(tmp_index_file_folder).mkdir(parents=True, exist_ok=True) # Manage filenames of sharded weights and index file for each pipeline stage. if not use_async: states_name = states_name.replace(".bin", f"-stage-{self.pp_rank+1:05d}-shard.bin") else: states_name = states_name.replace(".safetensors", f"-stage-{self.pp_rank+1:05d}-shard.safetensors") save_index_file = save_index_file.replace(".json", f"-stage-{self.pp_rank+1:05d}.json") save_index_file = os.path.join("tmp_index_files", save_index_file) if use_async: total_size, writers = async_save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint, index_file=index_file, base_filename=states_name, is_master=control_saving, use_pp_format=True, state_preprocess=True, ) self.async_writers.extend(writers) else: total_size = save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint, index_file=index_file, base_filename=states_name, is_master=control_saving, use_pp_format=True, ) if control_saving: assert ( self.dp_rank == 0 and self.tp_rank == 0 ), "The saving process should have both dp_rank and tp_rank as 0." index_file.append_meta_data("total_size", total_size) index_file.write_index_file(save_index_file) else: return dist.barrier(self.pp_group) # The global master rank integrates the index files and clean the folder. if self.pp_rank == 0: final_index_file = CheckpointIndexFile(checkpoint) final_index_file.append_meta_data("total_size", 0) for filename in os.listdir(tmp_index_file_folder): stage_index_file = CheckpointIndexFile.from_file(os.path.join(tmp_index_file_folder, filename)) final_index_file.metadata["total_size"] += stage_index_file.metadata["total_size"] for param_id, state_filename in stage_index_file.weight_map.items(): final_index_file.append_weight_map(param_id, state_filename) # Store param groups. final_index_file.append_meta_data("param_groups", param_group_file) group_file_path = os.path.join(checkpoint, param_group_file) param_groups = [ {**group, "params": group_info["params"]} for group, group_info in zip(optimizer.param_groups, optimizer.param_info["param_groups"]) ] save_param_groups({"param_groups": param_groups}, group_file_path) final_index_file.write_index_file(final_index_file_path) rmtree(tmp_index_file_folder) if self.verbose and self.coordinator.is_master(): logging.info( f"The model is split into checkpoint shards. " f"You can find where each parameters has been saved in the " f"index located at {final_index_file_path}." ) def load_sharded_optimizer( self, optimizer: OptimizerWrapper, checkpoint_index_file: str, prefix: str = "", low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """ Load sharded optimizer with the given path to index file of checkpoint folder. Args: optimizer (OptimizerWrapper): The optimizer to be loaded. checkpoint_index_file (str): Path to the index file of checkpointing folder. prefix (str): Not used. """ assert isinstance(optimizer, OptimizerWrapper), "Please boost the optimizer before loading!" def _get_param_id_from_optimizer_param( param: torch.Tensor, master_to_working_map: Optional[Dict[int, torch.Tensor]] = None ): if master_to_working_map is not None: working_param = master_to_working_map[id(param)] else: working_param = param return optimizer.param_info["param2id"][id(working_param)] # id_map is a mapping from param ids kept by current pipeline, to their corresponding parameter objects. # When Zero is used, the mapped parameter objects should be fp32 master parameters. # IDs should be obtained through saved param2id mapping earlier saved in optimizer.param_info. id_map = {} master_to_working_map = optimizer.get_master_to_working_map() for pg in optimizer.optim.param_groups: for param in pg["params"]: param_id = _get_param_id_from_optimizer_param(param, master_to_working_map) id_map[param_id] = param # Read checkpoint index file. ckpt_index_file = CheckpointIndexFile.from_file(checkpoint_index_file) ckpt_root_path = ckpt_index_file.root_path weight_map = ckpt_index_file.weight_map weight_map = {int(k): v for k, v in weight_map.items()} # convert saved id from str to int # Load param_groups param_group_path = ckpt_index_file.get_param_group_filename() if param_group_path is None: raise RuntimeError( f"Invalid index file path {checkpoint_index_file} for an optimizer. \ Lacking param group file under current directory." ) saved_groups = torch.load(param_group_path) updated_groups = [] for old_pg, saved_pg in zip(optimizer.optim.param_groups, saved_groups): # obtain updated param group new_pg = copy.deepcopy(saved_pg) new_pg["params"] = old_pg["params"] # The parameters in the same group shouldn't change. updated_groups.append(new_pg) optimizer.optim.__dict__.update({"param_groups": updated_groups}) # Load saved states to optimizer. # Keep a record of loaded files so that file will not be repeatedly loaded. loaded_file = set() for pg in optimizer.optim.param_groups: for param in pg["params"]: if param is None: continue param_id = _get_param_id_from_optimizer_param(param, master_to_working_map) if param_id not in weight_map: continue filename = weight_map[param_id]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/checkpoint_io/moe_checkpoint.py
colossalai/checkpoint_io/moe_checkpoint.py
import copy import logging import os from pathlib import Path from shutil import rmtree from typing import Dict, Iterator, Optional, OrderedDict, Tuple import torch import torch.distributed as dist import torch.nn as nn from torch.distributed import ProcessGroup from torch.distributed.distributed_c10d import get_global_rank from torch.utils._pytree import tree_map from colossalai.checkpoint_io import CheckpointIndexFile from colossalai.checkpoint_io.hybrid_parallel_checkpoint_io import HybridParallelCheckpointIO from colossalai.checkpoint_io.index_file import CheckpointIndexFile from colossalai.checkpoint_io.utils import ( StateDictSharder, gather_distributed_param, gather_state_dict_fast, get_lora_state_dict, get_model_base_filenames, get_optimizer_base_filenames, load_shard_state_dict, load_state_dict, load_states_into_optimizer, save_config_file, save_param_groups, save_state_dict, save_state_dict_shards, search_tp_partition_dim, sharded_optimizer_loading_epilogue, ) from colossalai.interface import ModelWrapper, OptimizerWrapper from colossalai.tensor.moe_tensor.api import is_moe_tensor try: from torch.nn.modules.module import _EXTRA_STATE_KEY_SUFFIX except ImportError: _EXTRA_STATE_KEY_SUFFIX = "_extra_state" class MoECheckpointIO(HybridParallelCheckpointIO): def __init__( self, global_dp_group: ProcessGroup, pp_group: ProcessGroup, tp_group: ProcessGroup, sp_group: ProcessGroup, ep_group: ProcessGroup, moe_dp_group: ProcessGroup, zero_stage: int, verbose: bool = True, ) -> None: super().__init__(global_dp_group, pp_group, tp_group, sp_group, zero_stage, verbose) self.global_dp_group = global_dp_group self.global_dp_rank = dist.get_rank(global_dp_group) self.global_dp_size = dist.get_world_size(global_dp_group) self.pp_group = pp_group self.tp_group = tp_group self.moe_dp_group = moe_dp_group self.moe_dp_size = dist.get_world_size(moe_dp_group) self.moe_dp_rank = dist.get_rank(moe_dp_group) self.ep_group = ep_group self.ep_size = dist.get_world_size(ep_group) self.ep_rank = dist.get_rank(ep_group) @staticmethod def _model_sharder( model: nn.Module, prefix: str = "", keep_vars: bool = False, size_per_shard: int = 1024, param_name_pattern: Optional[str] = None, ) -> Iterator[Tuple[OrderedDict, int]]: # An internel method that breaks state_dict of model into shards within limited size. state_dict_sharder = StateDictSharder(size_per_shard) # Save parameters. for name, param in model.named_parameters(): if param is None: continue if param_name_pattern is not None and param_name_pattern not in name: continue # Gather tensor pieces when using tensor parallel. param_ = gather_distributed_param(param, keep_vars=False) block, block_size = state_dict_sharder.append_param(prefix + name, param_) if block is not None: yield block, block_size # Save buffers. for name, buf in model.named_buffers(): if buf is not None and name not in model._non_persistent_buffers_set: buffer = buf if keep_vars else buf.detach() block, block_size = state_dict_sharder.append_param(prefix + name, buffer) if block is not None: yield block, block_size # Save extra states. extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX if ( getattr(model.__class__, "get_extra_state", torch.nn.Module.get_extra_state) is not torch.nn.Module.get_extra_state ): extra_state = model.get_extra_state() block, block_size = state_dict_sharder.append_param(extra_state_key, extra_state) if block is not None: yield block, block_size # Return the last block in sharder. yield state_dict_sharder.current_block, state_dict_sharder.current_block_size def save_sharded_model( self, model: ModelWrapper, checkpoint: str, gather_dtensor: bool = True, prefix: Optional[str] = None, size_per_shard: int = 1024, use_safetensors: bool = False, use_async: bool = False, ) -> None: """ Save sharded model checkpoint under the given checkpointing path. The following files will be created under the path: - An index file (pytorch_model.bin.index.json) containing a map between model params/buffers and file names. - Multiple files that store state tensors of models. If pipeline parallelism is used, the filenames are in the form of "pytorch_model.<prefix>-stage-000XX-shard-000XX.bin". If pipeline parallelism is not used, "pytorch_model.<prefix>-000XX.bin" Args: model (nn.Module): Model on local device to be saved. checkpoint (str): Checkpointing path which should be a directory path. gather_dtensor (bool, optional): Whether to gather_dtensor, currently not used. Defaults to True. prefix (str, optional): Perfix of file to save. Defaults to None. size_per_shard (int, optional): Size per shard in MB. Defaults to 1024. use_safetensors (bool, optional): Whether to use safe tensors. Defaults to False. """ assert isinstance(model, ModelWrapper), "Please boost the model before saving!" model = model.unwrap() if os.path.isfile(checkpoint): logging.error(f"Provided path ({checkpoint}) should be a directory, not a file") return Path(checkpoint).mkdir(parents=True, exist_ok=True) if self.moe_dp_rank != 0: dist.barrier() return # ep_rank 0 saves all the parameters and buffers. # other ep_ranks save only experts # Then collect the sharded parameters & buffers along tp_group. # Only devices with tp_rank == 0 are responsible for model saving. state_dict_shard = MoECheckpointIO._model_sharder(model, size_per_shard=size_per_shard) weights_name, save_index_file = get_model_base_filenames(prefix, use_safetensors) index_file = CheckpointIndexFile(checkpoint) control_saving = self.tp_rank == 0 and self.sp_rank == 0 if self.pp_size == 1 and self.ep_size == 1: # When pipeline is not used, save the model shards as in general checkpointIO if use_async: super().save_unsharded_model(model, checkpoint, gather_dtensor, use_safetensors, use_async=use_async) else: total_size = save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint, index_file=index_file, base_filename=weights_name, is_master=control_saving, use_safetensors=use_safetensors, ) if control_saving: index_file.append_meta_data("total_size", total_size) index_file.write_index_file(save_index_file) save_config_file(model, checkpoint) if self.verbose and self.coordinator.is_master(): logging.info( f"The model is split into checkpoint shards. " f"You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) dist.barrier() else: # When pipeline is used, each stage produces its own shard files and index files. # Index files belonging to each stage are saved under a temporary folder ./tmp_index_files/ # After all the state_dicts have been saved, the master rank integrates all the index files into one final index file and deletes the tmp folder. final_index_file_path = copy.deepcopy(save_index_file) tmp_index_file_folder = os.path.join(checkpoint, "tmp_index_files") Path(tmp_index_file_folder).mkdir(parents=True, exist_ok=True) # Manage filenames of sharded weights and index file for each pipeline stage. weights_name = weights_name.replace(".bin", f"-stage-{self.pp_rank+1:05d}-{self.ep_rank+1:05d}-shard.bin") weights_name = weights_name.replace( ".safetensors", f"-stage-{self.pp_rank+1:05d}-{self.ep_rank+1:05d}-shard.safetensors" ) save_index_file = save_index_file.replace(".json", f"-stage-{self.pp_rank+1:05d}-{self.ep_rank+1:05d}.json") save_index_file = os.path.join("tmp_index_files", save_index_file) total_size = save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint, index_file=index_file, base_filename=weights_name, is_master=control_saving, use_safetensors=use_safetensors, use_pp_format=True, ) if control_saving: index_file.append_meta_data("total_size", total_size) index_file.write_index_file(save_index_file) else: dist.barrier() return dist.barrier() # The global master rank integrates the index files and clean the folder. if self.coordinator.is_master(): final_index_file = CheckpointIndexFile(checkpoint) final_index_file.append_meta_data("total_size", 0) for filename in os.listdir(tmp_index_file_folder): stage_index_file = CheckpointIndexFile.from_file(os.path.join(tmp_index_file_folder, filename)) final_index_file.metadata["total_size"] += stage_index_file.metadata["total_size"] for weight, weight_filename in stage_index_file.weight_map.items(): final_index_file.append_weight_map(weight, weight_filename) final_index_file.write_index_file(final_index_file_path) save_config_file(model, checkpoint) rmtree(tmp_index_file_folder) if self.verbose and self.coordinator.is_master(): logging.info( f"The model is split into checkpoint shards. " f"You can find where each parameters has been saved in the " f"index located at {final_index_file_path}." ) @staticmethod def gather_from_sharded_optimizer_state( state: OrderedDict, param: torch.Tensor, original_shape: torch.Size, global_dp_group: ProcessGroup, tp_group: ProcessGroup, use_zero: bool, inplace: bool, is_moe_param: bool, moe_dp_group: ProcessGroup = None, device: torch.device = torch.device("cpu"), ) -> OrderedDict: """ With given parameter and its optimizer states, gather the complete optimizer state for saving. Args: state (OrderedDict): Optimizer states of given parameter, might be distributed among tp/dp group if using TP/Zero. param (torch.Tensor): The given parameter. It should be working_param when using Zero. original_shape (torch.Size): The size of parameter before sharding. global_dp_group (ProcessGroup): The process group of data parallel. tp_group (ProcessGroup): The process group of tensor parallel. use_zero (bool): Whether Zero is used. inplace (bool): If set to True, will update the values of argument 'state' in place. Else will make a copy of state. device (torch.device): The destination device of loaded optimizer states. Defaults to torch.device('cpu'). Returns: OrderedDict: The complete optimizer state of given parameter. """ global_dp_size = dist.get_world_size(global_dp_group) tp_size = dist.get_world_size(tp_group) moe_dp_size = dist.get_world_size(moe_dp_group) if moe_dp_group is not None else 1 current_shape = param.shape state_ = state if inplace else copy.deepcopy(state) for k, v in state_.items(): if isinstance(v, torch.Tensor) and k != "step": v = v.cuda() # First gather Zero shards. if use_zero and is_moe_param and moe_dp_size > 1: moe_dp_rank = dist.get_rank(moe_dp_group) dst = get_global_rank(moe_dp_group, 0) if moe_dp_rank == 0: gather_tensor = [torch.zeros_like(v) for _ in range(moe_dp_size)] dist.gather(v, gather_tensor, group=moe_dp_group, dst=dst) v = torch.stack(gather_tensor).view(-1)[: param.numel()].reshape_as(param) else: dist.gather(v, group=moe_dp_group, dst=dst) elif use_zero and not is_moe_param and global_dp_size > 1: dp_rank = dist.get_rank(global_dp_group) dst = get_global_rank(global_dp_group, 0) if dp_rank == 0: gather_tensor = [torch.zeros_like(v) for _ in range(global_dp_size)] dist.gather(v, gather_tensor, group=global_dp_group, dst=dst) v = torch.stack(gather_tensor).view(-1)[: param.numel()].reshape_as(param) else: dist.gather(v, group=global_dp_group, dst=dst) # Then gather TP shards. partition_dim = search_tp_partition_dim(current_shape, original_shape, tp_size) if partition_dim is not None: tp_rank = dist.get_rank(tp_group) dst = get_global_rank(tp_group, 0) if tp_rank == 0: gather_tensor = [torch.zeros_like(v) for _ in range(tp_size)] dist.gather(v, gather_tensor, group=tp_group, dst=dst) v = torch.cat(gather_tensor, dim=partition_dim) else: dist.gather(v, group=tp_group, dst=dst) state_[k] = v.detach().clone().to(device) return state_ @staticmethod def _optimizer_sharder( optimizer: OptimizerWrapper, use_zero: bool, global_dp_group: ProcessGroup, tp_group: ProcessGroup, moe_dp_group: ProcessGroup, size_per_shard: int = 1024, only_moe_param: bool = False, ): # An internel method that breaks state_dict of optimizer into shards within limited size. state_dict_sharder = StateDictSharder(size_per_shard) param_info = optimizer.param_info master_to_working_map = optimizer.get_master_to_working_map() for param, state in optimizer.optim.state.items(): if param is None: continue if master_to_working_map is not None: working_param = master_to_working_map[id(param)] else: working_param = param param_id = param_info["param2id"][id(working_param)] original_shape = param_info["param2shape"][id(working_param)] state_ = MoECheckpointIO.gather_from_sharded_optimizer_state( state, working_param, original_shape=original_shape, global_dp_group=global_dp_group, moe_dp_group=moe_dp_group, tp_group=tp_group, use_zero=use_zero, inplace=False, is_moe_param=is_moe_tensor(working_param), # TODO: Check correctness here ) if only_moe_param and not is_moe_tensor(working_param): continue block, block_size = state_dict_sharder.append_optim_state(param_id, state_) if block is not None: yield block, block_size # Return the last block in sharder. yield state_dict_sharder.current_block, state_dict_sharder.current_block_size def save_sharded_optimizer( self, optimizer: OptimizerWrapper, checkpoint: str, gather_dtensor: bool = True, prefix: Optional[str] = None, size_per_shard: int = 1024, use_async: bool = False, ): """ Save sharded optimizer checkpoint under the given checkpointing path. The following files will be created under the path: - An index file (pytorch_optim.bin.index.json) containing a map between optimizer states and file names - A group file (pytorch_optim_group.bin) recording information of param_groups - Multiple files that store state tensors of optimizers. If pipeline parallelism is used, the filenames are in the form of "pytorch_optim.<prefix>-stage-000XX-shard-000XX.bin". If pipeline parallelism is not used, "pytorch_optim.<prefix>-000XX.bin" Args: optimizer (OptimizerWrapper): Optimizer to save sharded state_dict checkpoint (str): Path to save optimizer state_dict gather_dtensor (bool): Whether to gather_dtensor, not used prefix (str): Perfix of file to save size_per_shard (int): Max file size of each file shard that store state tensors """ assert isinstance(optimizer, OptimizerWrapper), "Please boost the optimizer before saving!" if os.path.isfile(checkpoint): logging.error(f"Provided path ({checkpoint}) should be a directory, not a file") return Path(checkpoint).mkdir(parents=True, exist_ok=True) # If optim states are not sharded, other ranks don't need to participate in gather. if not self.use_zero and self.moe_dp_rank != 0: dist.barrier() return # Then collect the sharded states along dp_group(if using zero)/tp_group. # Only devices with (dp_rank == 0 and tp_rank == 0) are responsible for states saving. state_dict_shard = MoECheckpointIO._optimizer_sharder( optimizer, use_zero=self.use_zero, global_dp_group=self.global_dp_group, tp_group=self.tp_group, moe_dp_group=self.moe_dp_group, size_per_shard=size_per_shard, only_moe_param=self.ep_rank != 0, ) states_name, save_index_file, param_group_file = get_optimizer_base_filenames(prefix) index_file = CheckpointIndexFile(checkpoint) # e.g. dp_size = 4, moe_dp_size = 2, ep_size = 2 and use gather # rank 0 saves moe & non-moe params; rank 1 only saves moe params # rank 3 & 4 save nothing control_saving = self.tp_rank == 0 and self.moe_dp_rank == 0 and self.sp_rank == 0 if self.pp_size == 1 and self.ep_size == 1: # When pipeline is not used, save the optimizer shards as in general checkpointIO total_size = save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint, index_file=index_file, base_filename=states_name, is_master=control_saving, ) if control_saving: # Store param groups. index_file.append_meta_data("param_groups", param_group_file) group_file_path = os.path.join(checkpoint, param_group_file) param_groups = [ {**group, "params": group_info["params"]} for group, group_info in zip(optimizer.param_groups, optimizer.param_info["param_groups"]) ] save_param_groups({"param_groups": param_groups}, group_file_path) # Store index file. index_file.append_meta_data("total_size", total_size) index_file.write_index_file(save_index_file) if self.verbose and self.coordinator.is_master(): logging.info( f"The optimizer is going to be split to checkpoint shards. " f"You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) dist.barrier() else: # When pipeline is used, each stage produces its own shard files and index files. # Index files belonging to each stage are saved under a temporary folder ./tmp_index_files/ # After all the state_dicts have been saved, the master rank integrates all the index files into one final index file and deletes the tmp folder. final_index_file_path = copy.deepcopy(save_index_file) tmp_index_file_folder = os.path.join(checkpoint, "tmp_index_files") Path(tmp_index_file_folder).mkdir(parents=True, exist_ok=True) # Manage filenames of sharded weights and index file for each pipeline stage. states_name = states_name.replace(".bin", f"-stage-{self.pp_rank+1:05d}-{self.ep_rank+1:05d}-shard.bin") save_index_file = save_index_file.replace(".json", f"-stage-{self.pp_rank+1:05d}-{self.ep_rank+1:05d}.json") save_index_file = os.path.join("tmp_index_files", save_index_file) total_size = save_state_dict_shards( sharded_state_dict=state_dict_shard, checkpoint=checkpoint, index_file=index_file, base_filename=states_name, is_master=control_saving, use_pp_format=True, ) if control_saving: index_file.append_meta_data("total_size", total_size) index_file.write_index_file(save_index_file) else: dist.barrier() return dist.barrier() # The global master rank integrates the index files and clean the folder. if self.coordinator.is_master(): final_index_file = CheckpointIndexFile(checkpoint) final_index_file.append_meta_data("total_size", 0) for filename in os.listdir(tmp_index_file_folder): stage_index_file = CheckpointIndexFile.from_file(os.path.join(tmp_index_file_folder, filename)) final_index_file.metadata["total_size"] += stage_index_file.metadata["total_size"] for param_id, state_filename in stage_index_file.weight_map.items(): final_index_file.append_weight_map(param_id, state_filename) # Store param groups. final_index_file.append_meta_data("param_groups", param_group_file) group_file_path = os.path.join(checkpoint, param_group_file) param_groups = [ {**group, "params": group_info["params"]} for group, group_info in zip(optimizer.param_groups, optimizer.param_info["param_groups"]) ] save_param_groups({"param_groups": param_groups}, group_file_path) final_index_file.write_index_file(final_index_file_path) rmtree(tmp_index_file_folder) if self.verbose and self.coordinator.is_master(): logging.info( f"The model is split into checkpoint shards. " f"You can find where each parameters has been saved in the " f"index located at {final_index_file_path}." ) def load_sharded_optimizer( self, optimizer: OptimizerWrapper, checkpoint_index_file: str, prefix: str = "", low_cpu_mem_mode: bool = True, num_threads: int = 1, ): """ Load sharded optimizer with the given path to index file of checkpoint folder. Args: optimizer (OptimizerWrapper): The optimizer to be loaded. checkpoint_index_file (str): Path to the index file of checkpointing folder. prefix (str): Not used. """ assert isinstance(optimizer, OptimizerWrapper), "Please boost the optimizer before loading!" def _get_param_id_from_optimizer_param( param: torch.Tensor, master_to_working_map: Optional[Dict[int, torch.Tensor]] = None ): if master_to_working_map is not None: working_param = master_to_working_map[id(param)] else: working_param = param return optimizer.param_info["param2id"][id(working_param)] # id_map is a mapping from param ids kept by current pipeline, to their corresponding parameter objects. # When Zero is used, the mapped parameter objects should be fp32 master parameters. # IDs should be obtained through saved param2id mapping earlier saved in optimizer.param_info. id_map = {} master_to_working_map = optimizer.get_master_to_working_map() for pg in optimizer.optim.param_groups: for param in pg["params"]: param_id = _get_param_id_from_optimizer_param(param, master_to_working_map) id_map[param_id] = param # Read checkpoint index file. ckpt_index_file = CheckpointIndexFile.from_file(checkpoint_index_file) ckpt_root_path = ckpt_index_file.root_path weight_map = ckpt_index_file.weight_map weight_map = {int(k): v for k, v in weight_map.items()} # convert saved id from str to int # Load param_groups param_group_path = ckpt_index_file.get_param_group_filename() if param_group_path is None: raise RuntimeError( f"Invalid index file path {checkpoint_index_file} for an optimizer. \ Lacking param group file under current directory." ) saved_groups = torch.load(param_group_path) updated_groups = [] for old_pg, saved_pg in zip(optimizer.optim.param_groups, saved_groups): # obtain updated param group new_pg = copy.deepcopy(saved_pg) new_pg["params"] = old_pg["params"] # The parameters in the same group shouln't change. updated_groups.append(new_pg) # ep param groups if len(optimizer.optim.param_groups) == len(saved_groups) + 1: new_pg = copy.deepcopy(saved_pg) new_pg["params"] = optimizer.optim.param_groups[-1]["params"] updated_groups.append(new_pg) optimizer.optim.__dict__.update({"param_groups": updated_groups}) # Load saved states to optimizer. # Keep a record of loaded files so that file will not be repeatedly loaded. loaded_file = set() for pg in optimizer.optim.param_groups: for param in pg["params"]: if param is None: continue param_id = _get_param_id_from_optimizer_param(param, master_to_working_map) if param_id not in weight_map: continue filename = weight_map[param_id] # If this param's states has been loaded before, directly return. if filename in loaded_file: continue file_path = os.path.join(ckpt_root_path, filename) state_dict = load_shard_state_dict(Path(file_path), use_safetensors=False) load_states_into_optimizer(optimizer.optim, state_dict, id_map, strict=True) loaded_file.add(filename) # Then shard the loaded optimizer states if using tp/zero. for param, state in optimizer.optim.state.items(): device = param.device if master_to_working_map is not None: working_param = master_to_working_map[id(param)] else: working_param = param original_shape = optimizer.param_info["param2shape"][id(working_param)] sharded_state = self.shard_from_complete_optimizer_state( state, current_shape=working_param.shape, original_shape=original_shape, device=device, inplace=True, is_moe_param=is_moe_tensor(working_param), ) optimizer.optim.state[param] = sharded_state sharded_optimizer_loading_epilogue(optimizer.optim) if self.verbose and self.coordinator.is_master(): logging.info(f"The optimizer has been successfully loaded from sharded checkpoint: {ckpt_root_path}.") def shard_from_complete_optimizer_state( self, state: OrderedDict, current_shape: torch.Size, original_shape: torch.Size, device: torch.device, inplace: bool, is_moe_param: bool, ) -> OrderedDict: """ With complete optimizer states of a specific parameter loaded from checkpoint, slice out the sharded optimizer states kept by current device. Args: state (OrderedDict): Complete optimizer states of a given parameter, loaded from checkpoint. current_shape (torch.Size): The size of parameter after sharding. original_shape (torch.Size): The size of parameter before sharding. device (torch.device): The destination device of loaded optimizer states. inplace (bool): If set to True, will update the values of argument 'state' in place. Else will make a copy of state. Returns: OrderedDict: The sharded optimizer state of the given parameter. """ state_ = state if inplace else copy.deepcopy(state) for k, v in state_.items(): if isinstance(v, torch.Tensor) and k != "step": # Shard state along tensor parallel group. partition_dim = search_tp_partition_dim(current_shape, original_shape, self.tp_size) if partition_dim is not None: slice_size = current_shape[partition_dim] v = v.split(slice_size, dim=partition_dim)[self.tp_rank] # Shard state along data parallel group when using Zero. if self.use_zero and not is_moe_param and self.global_dp_size > 1: padding_size = (self.global_dp_size - v.numel() % self.global_dp_size) % self.global_dp_size with torch.no_grad(): v = v.flatten() if padding_size > 0: v = torch.nn.functional.pad(v, [0, padding_size]) slice_size = v.numel() // self.global_dp_size v = v.split(slice_size, dim=0)[self.global_dp_rank] elif self.use_zero and is_moe_param and self.moe_dp_size > 1: # LowLevelZeRO pads by global dp size for now. # TODO: update both to use moe dp size padding_size = (self.global_dp_size - v.numel() % self.global_dp_size) % self.global_dp_size with torch.no_grad(): v = v.flatten() if padding_size > 0: v = torch.nn.functional.pad(v, [0, padding_size]) slice_size = v.numel() // self.moe_dp_size v = v.split(slice_size, dim=0)[self.moe_dp_rank] state_[k] = v.detach().clone().to(device) return state_ """Migration from MoEHybridParallelCheckpointIO. These functions mostly deals with unsharded saving, and can be savely deleted since large MoE models are often saved in shards. """ # Copied from colossalai.moe def pre_save_model(self, model: nn.Module) -> dict: state_dict = model.state_dict() for name, param in model.named_parameters(): if ".experts." in name and is_moe_tensor(param): ep_group = param.ep_group ep_rank = dist.get_rank(ep_group) ep_size = dist.get_world_size(ep_group) # TODO: check correctness here # dp_rank = get_dp_rank(param)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/accelerator/npu_accelerator.py
colossalai/accelerator/npu_accelerator.py
#!/usr/bin/env python from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.distributed as dist from .base_accelerator import BaseAccelerator try: import torch_npu # noqa except ImportError: pass __all__ = ["NpuAccelerator"] class NpuAccelerator(BaseAccelerator): """ Accelerator class for Huawei NPU devices. """ def __init__(self): super().__init__(name="npu", communication_backend="hccl", is_synchronous=False) # ======================= # device APIs # ======================= def get_version(self) -> str: """ Return the version of the accelerator which torch is built against. """ return torch.version.cann def get_current_device(self) -> torch.device: """ Return the current device. """ return torch.device(f"npu:{torch.npu.current_device()}") def current_device(self) -> int: """ Return the current device index. """ return torch.npu.current_device() def set_device(self, device: Optional[Union[torch.device, int]] = None) -> None: """ Bind the current process to a device. """ if device is None: if not dist.is_initialized(): raise RuntimeError("Cannot get current device when distributed is not initialized.") device = dist.get_rank() % self.device_count() torch.npu.set_device(device) def get_device_name(self, device: Union[torch.device, int]) -> str: """ Return the name of the device. """ return torch.npu.get_device_name(device) def synchronize(self, device: Union[torch.device, int] = None): """ Synchronize the current process. """ torch.npu.synchronize(device) def is_available(self): """ Check if the accelerator is available. """ return torch.npu.is_available() def device_count(self): """ Return the number of devices on the machine. """ return torch.npu.device_count() def get_device_capability(self, device=None) -> Tuple[int, int]: """ Gets the npu capability of a device. """ return torch.npu.get_device_capability(device) def get_device_name(self, device=None) -> str: """ Gets the name of a device. """ return torch.npu.get_device_name(device) def get_device_properties(self, device): """ Gets the properties of a device. """ return torch.npu.get_device_properties(device) def utilization(self, device=None) -> int: """ Returns the percent of time over the past sample period during which one or more kernels was executing on the GPU as given by nvidia-smi """ return torch.npu.utilization(device) # ======================= # random number generator APIs # ======================= def get_rng_state(self, device="npu") -> torch.Tensor: """ Returns the random number generator state of the specified GPU as a ByteTensor. """ return torch.npu.get_rng_state(device) def get_rng_state_all(self) -> List[torch.Tensor]: """ Returns a list of ByteTensor representing the random number states of all devices. """ return torch.npu.get_rng_state_all() def set_rng_state(self, new_state: torch.ByteTensor, device: str = "npu") -> None: """ Sets the random number generator state of the specified GPU. """ torch.npu.set_rng_state(new_state, device) def set_rng_state_all(self, new_states: List[torch.ByteTensor]) -> None: """ Sets the random number generator state of all devices. """ torch.npu.set_rng_state_all(new_states) def manual_seed(self, seed: int) -> None: """ Sets the seed for generating random numbers for the current GPU. """ torch.npu.manual_seed(seed) def manual_seed_all(self, seed: int) -> None: """ Set the random seed for the all processes. """ torch.npu.manual_seed_all(seed) def seed(self) -> None: """ Sets the seed for generating random numbers to a random number for the current GPU. """ torch.npu.seed() def seed_all(self) -> None: """ Sets the seed for generating random numbers to a random number on all GPUs. """ torch.npu.seed_all() def initial_seed(self) -> int: """ Returns the current random seed of the current GPU. """ return torch.npu.initial_seed() # ======================= # memory management APIs # ======================= def empty_cache(self) -> None: """ Releases all unoccupied cached memory currently held by the caching allocator so that those can be used in other GPU application and visible in nvidia-smi. """ torch.npu.empty_cache() def memory_stats(self, device=None) -> Dict[str, Any]: """ Returns a dictionary of npu memory allocator statistics for a given device. """ return torch.npu.memory_stats(device=device) def memory_summary(self, device=None, abbreviated=False) -> str: """ Returns a human-readable printout of the current memory allocator statistics for a given device. """ return torch.npu.memory_summary(device=device, abbreviated=abbreviated) def memory_snapshot(self): """ Returns a snapshot of the npu memory allocator state across all devices. """ return torch.npu.memory_snapshot() def memory_allocated(self, device=None) -> int: """ Returns the current GPU memory occupied by tensors in bytes for a given device. """ return torch.npu.memory_allocated(device=device) def max_memory_allocated(self, device=None) -> int: """ Returns the maximum GPU memory occupied by tensors in bytes for a given device. """ return torch.npu.max_memory_allocated(device=device) def reset_max_memory_allocated(self, device=None) -> None: """ Resets the starting point in tracking maximum GPU memory occupied by tensors for a given device. """ torch.npu.reset_max_memory_allocated(device=device) def reset_max_memory_cached(self, device=None) -> None: """ Resets the starting point in tracking maximum GPU memory managed by the caching allocator for a given device. """ torch.npu.reset_max_memory_cached(device=device) def memory_reserved(self, device=None) -> int: """ Returns the current GPU memory managed by the caching allocator in bytes for a given device. """ return torch.npu.memory_reserved(device=device) def max_memory_reserved(self, device=None) -> int: """ Returns the maximum GPU memory managed by the caching allocator in bytes for a given device. """ return torch.npu.max_memory_reserved(device=device) def set_per_process_memory_fraction(self, fraction: float, device=None) -> None: """ Set memory fraction for a process. """ torch.npu.set_per_process_memory_fraction(fraction, device=device) def reset_peak_memory_stats(self, device=None) -> None: """ Resets the "peak" stats tracked by the npu memory allocator. """ torch.npu.reset_peak_memory_stats(device=device) # ======================= # streams and events APIs # ======================= def Stream(self, device=None, priority=0, **kwargs): """ A npu stream is a linear sequence of execution that belongs to a specific device, independent from other streams. See npu-semantics for details. """ return torch.npu.Stream(device, priority, **kwargs) def Event(self, enable_timing: bool = False, blocking: bool = False, interprocess: bool = False): """ npu events are synchronization markers that can be used to monitor the device's progress, to accurately measure timing, and to synchronize npu streams. """ return torch.npu.Event(enable_timing, blocking, interprocess) def current_stream(self, device=None): """ Returns the currently selected Stream for a given device. """ return torch.npu.current_stream(device) def default_stream(self, device=None): """ Returns the default Stream for a given device. """ return torch.npu.default_stream(device) def set_stream(self, stream_): """ Sets the current stream.This is a wrapper API to set the stream. """ torch.npu.set_stream(stream_) def stream(self, stream_): """ Wrapper around the Context-manager StreamContext that selects a given stream. """ return torch.npu.stream(stream_) # ======================= # amp APIs # ======================= def autocast( self, enabled: bool = True, dtype: torch.dtype = torch.float16, cache_enabled: bool = True ) -> Callable: """ Return autocast function """ return torch.npu.amp.autocast(enabled=enabled, dtype=dtype, cache_enabled=cache_enabled)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/accelerator/api.py
colossalai/accelerator/api.py
#!/usr/bin/env python from collections import OrderedDict from typing import Union from .base_accelerator import BaseAccelerator from .cpu_accelerator import CpuAccelerator from .cuda_accelerator import CudaAccelerator from .npu_accelerator import NpuAccelerator __all__ = ["set_accelerator", "auto_set_accelerator", "get_accelerator"] _ACCELERATOR = None # we use ordered dictionary here to associate the # order with device check priority # i.e. auto_set_accelerator will check cuda first _ACCELERATOR_MAPPING = OrderedDict(cuda=CudaAccelerator, npu=NpuAccelerator, cpu=CpuAccelerator) def set_accelerator(accelerator: Union[str, BaseAccelerator]) -> None: """ Set the global accelerator for the current process. Args: accelerator (Union[str, BaseAccelerator]): the type of accelerator to which the current device belongs. """ global _ACCELERATOR if isinstance(accelerator, str): _ACCELERATOR = _ACCELERATOR_MAPPING[accelerator]() elif isinstance(accelerator, BaseAccelerator): _ACCELERATOR = accelerator else: raise TypeError("accelerator must be either a string or an instance of BaseAccelerator") def auto_set_accelerator() -> None: """ Automatically check if any accelerator is available. If an accelerator is available, set it as the global accelerator. """ global _ACCELERATOR for accelerator_name, accelerator_cls in _ACCELERATOR_MAPPING.items(): try: accelerator = accelerator_cls() if accelerator_name == "cpu" or accelerator.is_available(): _ACCELERATOR = accelerator break except: pass if _ACCELERATOR is None: raise RuntimeError("No accelerator is available.") def get_accelerator() -> BaseAccelerator: """ Return the accelerator for the current process. If the accelerator is not initialized, it will be initialized to the default accelerator type. Returns: the accelerator for the current process. """ global _ACCELERATOR if _ACCELERATOR is None: auto_set_accelerator() return _ACCELERATOR
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/accelerator/cpu_accelerator.py
colossalai/accelerator/cpu_accelerator.py
#!/usr/bin/env python import resource from contextlib import nullcontext from typing import Any, Callable, Dict, List, Optional, Tuple, Union import psutil import torch from .base_accelerator import BaseAccelerator __all__ = ["CpuAccelerator"] class CpuAccelerator(BaseAccelerator): support_set_device: bool = False """ Accelerator class for cpu. """ def __init__(self): super().__init__(name="cpu", communication_backend="gloo", is_synchronous=False) # ======================= # device APIs # ======================= def get_version(self) -> str: """ Return the version of the accelerator which torch is built against. """ return "" def get_current_device(self) -> torch.device: """ Return the current device. """ return torch.device("cpu") def current_device(self) -> int: """ Return the current device index. """ raise RuntimeError("this method is not supported for cpu accelerator") def set_device(self, device: Optional[Union[torch.device, int]] = None) -> None: """ Bind the current process to a device. """ raise RuntimeError("this method is not supported for cpu accelerator") def get_device_name(self, device: Union[torch.device, int]) -> str: """ Return the name of the device. """ raise RuntimeError("this method is not supported for cpu accelerator") def synchronize(self, device: Union[torch.device, int] = None): """ Synchronize the current process. """ raise RuntimeError("this method is not supported for cpu accelerator") def is_available(self): """ Check if the accelerator is available. """ return True def device_count(self): """ Return the number of devices on the machine. """ raise RuntimeError("this method is not supported for cpu accelerator") def get_device_capability(self, device=None) -> Tuple[int, int]: """ Gets the cuda capability of a device. """ raise RuntimeError("this method is not supported for cpu accelerator") def get_device_name(self, device=None) -> str: """ Gets the name of a device. """ raise RuntimeError("this method is not supported for cpu accelerator") def get_device_properties(self, device): """ Gets the properties of a device. """ raise RuntimeError("this method is not supported for cpu accelerator") def utilization(self, device=None) -> int: """ Returns the percent of time over the past sample period during which one or more kernels was executing on the GPU as given by nvidia-smi """ raise RuntimeError("this method is not supported for cpu accelerator") # ======================= # random number generator APIs # ======================= def get_rng_state(self, device=None) -> torch.Tensor: """ Returns the random number generator state of the specified GPU as a ByteTensor. """ return torch.get_rng_state(device) def get_rng_state_all(self) -> List[torch.Tensor]: """ Returns a list of ByteTensor representing the random number states of all devices. """ raise RuntimeError("this method is not supported for cpu accelerator") def set_rng_state(self, new_state: torch.ByteTensor, device: str = None) -> None: """ Sets the random number generator state of the specified GPU. """ torch.set_rng_state(new_state) def set_rng_state_all(self, new_states: List[torch.ByteTensor]) -> None: """ Sets the random number generator state of all devices. """ raise RuntimeError("this method is not supported for cpu accelerator") def manual_seed(self, seed: int) -> None: """ Sets the seed for generating random numbers for the current GPU. """ raise RuntimeError("this method is not supported for cpu accelerator") def manual_seed_all(self, seed: int) -> None: """ Set the random seed for the all processes. """ raise RuntimeError("this method is not supported for cpu accelerator") def seed(self) -> None: """ Sets the seed for generating random numbers to a random number for the current GPU. """ raise RuntimeError("this method is not supported for cpu accelerator") def seed_all(self) -> None: """ Sets the seed for generating random numbers to a random number on all GPUs. """ raise RuntimeError("this method is not supported for cpu accelerator") def initial_seed(self) -> int: """ Returns the current random seed of the current GPU. """ raise RuntimeError("this method is not supported for cpu accelerator") # ======================= # memory management APIs # ======================= def empty_cache(self) -> None: """ Releases all unoccupied cached memory currently held by the caching allocator so that those can be used in other GPU application and visible in nvidia-smi. """ raise RuntimeError("this method is not supported for cpu accelerator") def memory_stats(self, device=None) -> Dict[str, Any]: """ Returns a dictionary of CUDA memory allocator statistics for a given device. """ raise RuntimeError("this method is not supported for cpu accelerator") def memory_summary(self, device=None, abbreviated=False) -> str: """ Returns a human-readable printout of the current memory allocator statistics for a given device. """ raise RuntimeError("this method is not supported for cpu accelerator") def memory_snapshot(self): """ Returns a snapshot of the CUDA memory allocator state across all devices. """ raise RuntimeError("this method is not supported for cpu accelerator") def memory_allocated(self, device=None) -> int: """ Returns the current GPU memory occupied by tensors in bytes for a given device. """ raise RuntimeError("this method is not supported for cpu accelerator") def max_memory_allocated(self, device=None) -> int: """ Returns the maximum GPU memory occupied by tensors in bytes for a given device. """ raise RuntimeError("this method is not supported for cpu accelerator") def reset_max_memory_allocated(self, device=None) -> None: """ Resets the starting point in tracking maximum GPU memory occupied by tensors for a given device. """ raise RuntimeError("this method is not supported for cpu accelerator") def reset_max_memory_cached(self, device=None) -> None: """ Resets the starting point in tracking maximum GPU memory managed by the caching allocator for a given device. """ raise RuntimeError("this method is not supported for cpu accelerator") def memory_reserved(self, device=None) -> int: """ Returns the current GPU memory managed by the caching allocator in bytes for a given device. """ return psutil.Process().memory_info().rss def max_memory_reserved(self, device=None) -> int: """ Returns the maximum GPU memory managed by the caching allocator in bytes for a given device. """ return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss def set_per_process_memory_fraction(self, fraction: float, device=None) -> None: """ Set memory fraction for a process. """ max_memory = int(psutil.virtual_memory().total * fraction) _, hard = resource.getrlimit(resource.RLIMIT_AS) resource.setrlimit(resource.RLIMIT_AS, (max_memory, hard)) def reset_peak_memory_stats(self, device=None) -> None: """ Resets the "peak" stats tracked by the CUDA memory allocator. """ raise RuntimeError("this method is not supported for cpu accelerator") # ======================= # streams and events APIs # ======================= def Stream(self, device=None, priority=0, **kwargs): """ A CUDA stream is a linear sequence of execution that belongs to a specific device, independent from other streams. See cuda-semantics for details. """ raise RuntimeError("this method is not supported for cpu accelerator") def Event(self, enable_timing: bool = False, blocking: bool = False, interprocess: bool = False): """ CUDA events are synchronization markers that can be used to monitor the device's progress, to accurately measure timing, and to synchronize CUDA streams. """ raise RuntimeError("this method is not supported for cpu accelerator") def current_stream(self, device=None): """ Returns the currently selected Stream for a given device. """ raise RuntimeError("this method is not supported for cpu accelerator") def default_stream(self, device=None): """ Returns the default Stream for a given device. """ raise RuntimeError("this method is not supported for cpu accelerator") def set_stream(self, stream_): """ Sets the current stream.This is a wrapper API to set the stream. """ raise RuntimeError("this method is not supported for cpu accelerator") def stream(self, stream_): """ Wrapper around the Context-manager StreamContext that selects a given stream. """ raise RuntimeError("this method is not supported for cpu accelerator") # ======================= # amp APIs # ======================= def autocast( self, enabled: bool = True, dtype: torch.dtype = torch.float16, cache_enabled: bool = True ) -> Callable: """ Return autocast function """ return nullcontext
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/accelerator/cuda_accelerator.py
colossalai/accelerator/cuda_accelerator.py
#!/usr/bin/env python from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch import torch.distributed as dist from .base_accelerator import BaseAccelerator __all__ = ["CudaAccelerator"] class CudaAccelerator(BaseAccelerator): """ Accelerator class for Nvidia CUDA devices. """ def __init__(self): super().__init__(name="cuda", communication_backend="nccl", is_synchronous=False) # ======================= # device APIs # ======================= def get_version(self) -> str: """ Return the version of the accelerator which torch is built against. """ return torch.version.cuda def get_current_device(self) -> torch.device: """ Return the current device. """ return torch.device(f"cuda:{torch.cuda.current_device()}") def current_device(self) -> int: """ Return the current device index. """ return torch.cuda.current_device() def set_device(self, device: Optional[Union[torch.device, int]] = None) -> None: """ Bind the current process to a device. """ if device is None: if not dist.is_initialized(): raise RuntimeError("Cannot get current device when distributed is not initialized.") device = dist.get_rank() % self.device_count() torch.cuda.set_device(device) def get_device_name(self, device: Union[torch.device, int]) -> str: """ Return the name of the device. """ return torch.cuda.get_device_name(device) def synchronize(self, device: Union[torch.device, int] = None): """ Synchronize the current process. """ torch.cuda.synchronize(device) def is_available(self): """ Check if the accelerator is available. """ return torch.cuda.is_available() def device_count(self): """ Return the number of devices on the machine. """ return torch.cuda.device_count() def get_device_capability(self, device=None) -> Tuple[int, int]: """ Gets the cuda capability of a device. """ return torch.cuda.get_device_capability(device) def get_device_name(self, device=None) -> str: """ Gets the name of a device. """ return torch.cuda.get_device_name(device) def get_device_properties(self, device): """ Gets the properties of a device. """ return torch.cuda.get_device_properties(device) def utilization(self, device=None) -> int: """ Returns the percent of time over the past sample period during which one or more kernels was executing on the GPU as given by nvidia-smi """ return torch.cuda.utilization(device) # ======================= # random number generator APIs # ======================= def get_rng_state(self, device="cuda") -> torch.Tensor: """ Returns the random number generator state of the specified GPU as a ByteTensor. """ return torch.cuda.get_rng_state(device) def get_rng_state_all(self) -> List[torch.Tensor]: """ Returns a list of ByteTensor representing the random number states of all devices. """ return torch.cuda.get_rng_state_all() def set_rng_state(self, new_state: torch.ByteTensor, device: str = "cuda") -> None: """ Sets the random number generator state of the specified GPU. """ torch.cuda.set_rng_state(new_state, device) def set_rng_state_all(self, new_states: List[torch.ByteTensor]) -> None: """ Sets the random number generator state of all devices. """ torch.cuda.set_rng_state_all(new_states) def manual_seed(self, seed: int) -> None: """ Sets the seed for generating random numbers for the current GPU. """ torch.cuda.manual_seed(seed) def manual_seed_all(self, seed: int) -> None: """ Set the random seed for the all processes. """ torch.cuda.manual_seed_all(seed) def seed(self) -> None: """ Sets the seed for generating random numbers to a random number for the current GPU. """ torch.cuda.seed() def seed_all(self) -> None: """ Sets the seed for generating random numbers to a random number on all GPUs. """ torch.cuda.seed_all() def initial_seed(self) -> int: """ Returns the current random seed of the current GPU. """ return torch.cuda.initial_seed() # ======================= # memory management APIs # ======================= def empty_cache(self) -> None: """ Releases all unoccupied cached memory currently held by the caching allocator so that those can be used in other GPU application and visible in nvidia-smi. """ torch.cuda.empty_cache() def memory_stats(self, device=None) -> Dict[str, Any]: """ Returns a dictionary of CUDA memory allocator statistics for a given device. """ return torch.cuda.memory_stats(device=device) def memory_summary(self, device=None, abbreviated=False) -> str: """ Returns a human-readable printout of the current memory allocator statistics for a given device. """ return torch.cuda.memory_summary(device=device, abbreviated=abbreviated) def memory_snapshot(self): """ Returns a snapshot of the CUDA memory allocator state across all devices. """ return torch.cuda.memory_snapshot() def memory_allocated(self, device=None) -> int: """ Returns the current GPU memory occupied by tensors in bytes for a given device. """ return torch.cuda.memory_allocated(device=device) def max_memory_allocated(self, device=None) -> int: """ Returns the maximum GPU memory occupied by tensors in bytes for a given device. """ return torch.cuda.max_memory_allocated(device=device) def reset_max_memory_allocated(self, device=None) -> None: """ Resets the starting point in tracking maximum GPU memory occupied by tensors for a given device. """ torch.cuda.reset_max_memory_allocated(device=device) def reset_max_memory_cached(self, device=None) -> None: """ Resets the starting point in tracking maximum GPU memory managed by the caching allocator for a given device. """ torch.cuda.reset_max_memory_cached(device=device) def memory_reserved(self, device=None) -> int: """ Returns the current GPU memory managed by the caching allocator in bytes for a given device. """ return torch.cuda.memory_reserved(device=device) def max_memory_reserved(self, device=None) -> int: """ Returns the maximum GPU memory managed by the caching allocator in bytes for a given device. """ return torch.cuda.max_memory_reserved(device=device) def set_per_process_memory_fraction(self, fraction: float, device=None) -> None: """ Set memory fraction for a process. """ torch.cuda.set_per_process_memory_fraction(fraction, device=device) def reset_peak_memory_stats(self, device=None) -> None: """ Resets the "peak" stats tracked by the CUDA memory allocator. """ torch.cuda.reset_peak_memory_stats(device=device) # ======================= # streams and events APIs # ======================= def Stream(self, device=None, priority=0, **kwargs): """ A CUDA stream is a linear sequence of execution that belongs to a specific device, independent from other streams. See cuda-semantics for details. """ return torch.cuda.Stream(device, priority, **kwargs) def Event(self, enable_timing: bool = False, blocking: bool = False, interprocess: bool = False): """ CUDA events are synchronization markers that can be used to monitor the device's progress, to accurately measure timing, and to synchronize CUDA streams. """ return torch.cuda.Event(enable_timing, blocking, interprocess) def current_stream(self, device=None): """ Returns the currently selected Stream for a given device. """ return torch.cuda.current_stream(device) def default_stream(self, device=None): """ Returns the default Stream for a given device. """ return torch.cuda.default_stream(device) def set_stream(self, stream_): """ Sets the current stream.This is a wrapper API to set the stream. """ torch.cuda.set_stream(stream_) def stream(self, stream_): """ Wrapper around the Context-manager StreamContext that selects a given stream. """ return torch.cuda.stream(stream_) # ======================= # amp APIs # ======================= def autocast( self, enabled: bool = True, dtype: torch.dtype = torch.float16, cache_enabled: bool = True ) -> Callable: """ Return autocast function """ return torch.amp.autocast(device_type="cuda", enabled=enabled, dtype=dtype, cache_enabled=cache_enabled)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/accelerator/__init__.py
colossalai/accelerator/__init__.py
from .api import auto_set_accelerator, get_accelerator, set_accelerator from .base_accelerator import BaseAccelerator from .cpu_accelerator import CpuAccelerator from .cuda_accelerator import CudaAccelerator from .npu_accelerator import NpuAccelerator __all__ = [ "get_accelerator", "set_accelerator", "auto_set_accelerator", "BaseAccelerator", "CudaAccelerator", "NpuAccelerator", "CpuAccelerator", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/accelerator/base_accelerator.py
colossalai/accelerator/base_accelerator.py
#!/usr/bin/env python from abc import ABC, abstractmethod from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch __all__ = ["BaseAccelerator"] class BaseAccelerator(ABC): support_set_device: bool = True def __init__(self, name: str, communication_backend: str, is_synchronous: bool) -> None: self._name = name self._communication_backend = communication_backend self._is_synchronous = is_synchronous # ======================= # immutable attributes # ======================= @property def name(self) -> str: """ Return the name of the accelerator. """ return self._name @property def communication_backend(self) -> str: """ Return the name of the backend communication library. """ return self._communication_backend @property def is_synchronous(self) -> bool: """ Return whether the accelerator is a synchronous device. """ return self._is_synchronous def __repr__(self) -> str: cls_name = self.__class__.__name__ return f"{cls_name}(name={self._name}, communication_backend={self._communication_backend}, is_synchronous={self._is_synchronous})" # ======================= # device APIs # ======================= @abstractmethod def get_version(self) -> str: """ Return the version of the accelerator which torch is built against. """ @abstractmethod def get_current_device(self) -> torch.device: """ Return the current device. """ @abstractmethod def current_device(self) -> int: """ Return the current device index. """ @abstractmethod def set_device(self, device: Optional[Union[torch.device, int]] = None) -> None: """ Bind the current process to a device. """ @abstractmethod def get_device_name(self, device: Union[torch.device, int]) -> str: """ Return the name of the device. """ @abstractmethod def synchronize(self, device: Union[torch.device, int] = None): """ Synchronize the current process. """ @abstractmethod def is_available(self): """ Check if the accelerator is available. """ @abstractmethod def device_count(self): """ Return the number of devices on the machine. """ def set_to_device(self, models: Any) -> Any: """ Send model to device. :param models: nn.module or a list of module """ if isinstance(models, list) and len(models) > 1: ret = [] for model in models: ret.append(model.to(self.get_current_device())) return ret elif isinstance(models, list): return models[0].to(self.get_current_device()) else: return models.to(self.get_current_device()) @abstractmethod def get_device_capability(self, device=None) -> Tuple[int, int]: """ Gets the capability of a device. """ @abstractmethod def get_device_name(self, device=None) -> str: """ Gets the name of a device. """ @abstractmethod def get_device_properties(self, device): """ Gets the properties of a device. """ @abstractmethod def utilization(self, device=None) -> int: """ Returns the percent of time over the past sample period during which one or more kernels was executing on the device as given by nvidia-smi or npu-smi, etc. """ # ======================= # random number generator APIs # ======================= @abstractmethod def get_rng_state(self, device="cuda") -> torch.Tensor: """ Returns the random number generator state of the specified device as a ByteTensor. """ @abstractmethod def get_rng_state_all(self) -> List[torch.Tensor]: """ Returns a list of ByteTensor representing the random number states of all devices. """ @abstractmethod def set_rng_state(self, new_state: torch.ByteTensor, device: str = "cuda") -> None: """ Sets the random number generator state of the specified device. """ @abstractmethod def set_rng_state_all(self, new_states: List[torch.ByteTensor]) -> None: """ Sets the random number generator state of all devices. """ @abstractmethod def manual_seed(self, seed: int) -> None: """ Sets the seed for generating random numbers for the current device. """ @abstractmethod def manual_seed_all(self, seed: int) -> None: """ Sets the seed for generating random numbers on all devices. """ @abstractmethod def seed(self) -> None: """ Sets the seed for generating random numbers to a random number for the current device. """ @abstractmethod def seed_all(self) -> None: """ Sets the seed for generating random numbers to a random number on all devices. """ @abstractmethod def initial_seed(self) -> int: """ Returns the current random seed of the current device. """ # ======================= # memory management APIs # ======================= @abstractmethod def empty_cache(self) -> None: """ Releases all unoccupied cached memory currently held by the caching allocator so that those can be used in other device application and visible in nvidia-smi. """ @abstractmethod def memory_stats(self, device=None) -> Dict[str, Any]: """ Returns a dictionary of CUDA memory allocator statistics for a given device. """ @abstractmethod def memory_summary(self, device=None, abbreviated=False) -> str: """ Returns a human-readable printout of the current memory allocator statistics for a given device. """ @abstractmethod def memory_snapshot(self): """ Returns a snapshot of the CUDA memory allocator state across all devices. """ @abstractmethod def memory_allocated(self, device=None) -> int: """ Returns the current device memory occupied by tensors in bytes for a given device. """ @abstractmethod def max_memory_allocated(self, device=None) -> int: """ Returns the maximum device memory occupied by tensors in bytes for a given device. """ @abstractmethod def reset_max_memory_allocated(self, device=None) -> None: """ Resets the starting point in tracking maximum device memory occupied by tensors for a given device. """ @abstractmethod def reset_max_memory_cached(self, device=None) -> None: """ Resets the starting point in tracking maximum device memory managed by the caching allocator for a given device. """ @abstractmethod def memory_reserved(self, device=None) -> int: """ Returns the current device memory managed by the caching allocator in bytes for a given device. """ @abstractmethod def max_memory_reserved(self, device=None) -> int: """ Returns the maximum device memory managed by the caching allocator in bytes for a given device. """ @abstractmethod def set_per_process_memory_fraction(self, fraction: float, device=None) -> None: """ Set memory fraction for a process. """ @abstractmethod def reset_peak_memory_stats(self, device=None) -> None: """ Resets the "peak" stats tracked by the device memory allocator. """ # ======================= # streams and events APIs # ======================= @abstractmethod def Stream(self, device=None, priority=0, **kwargs): """ A device stream is a linear sequence of execution that belongs to a specific device, independent from other streams. See cuda-semantics for details. """ @abstractmethod def Event(self, enable_timing: bool = False, blocking: bool = False, interprocess: bool = False): """ device events are synchronization markers that can be used to monitor the device's progress, to accurately measure timing, and to synchronize CUDA streams. """ @abstractmethod def current_stream(self, device=None): """ Returns the currently selected Stream for a given device. """ @abstractmethod def default_stream(self, device=None): """ Returns the default Stream for a given device. """ @abstractmethod def set_stream(self, stream_): """ Sets the current stream.This is a wrapper API to set the stream. """ @abstractmethod def stream(self, stream_): """ Wrapper around the Context-manager StreamContext that selects a given stream. """ # ======================= # amp APIs # ======================= @abstractmethod def autocast( self, enabled: bool = True, dtype: torch.dtype = torch.float16, cache_enabled: bool = True ) -> Callable: """ Return autocast function """
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cluster/dist_coordinator.py
colossalai/cluster/dist_coordinator.py
import functools import os from contextlib import contextmanager import torch.distributed as dist from torch.distributed import ProcessGroup from colossalai.context.singleton_meta import SingletonMeta class DistCoordinator(metaclass=SingletonMeta): """ This class is used to coordinate distributed training. It is a singleton class, which means that there is only one instance of this class in the whole program. There are some terms that are used in this class: - rank: the rank of the current process - world size: the total number of processes - local rank: the rank of the current process on the current node - master: the process with rank 0 - node master: the process with local rank 0 on the current node ```python from colossalai.cluster.dist_coordinator import DistCoordinator coordinator = DistCoordinator() if coordinator.is_master(): do_something() coordinator.print_on_master('hello world') ``` Attributes: rank (int): the rank of the current process world_size (int): the total number of processes local_rank (int): the rank of the current process on the current node """ def __init__(self): assert ( dist.is_initialized() ), "Distributed is not initialized. Please call `torch.distributed.init_process_group` or `colossalai.launch` first." self._rank = dist.get_rank() self._world_size = dist.get_world_size() # this is often passed by launchers such as torchrun self._local_rank = int(os.environ.get("LOCAL_RANK", -1)) @property def rank(self) -> int: return self._rank @property def world_size(self) -> int: return self._world_size @property def local_rank(self) -> int: return self._local_rank def _assert_local_rank_set(self): """ Assert that the local rank is set. This is often passed by launchers such as torchrun. """ assert ( self.local_rank >= 0 ), "The environment variable LOCAL_RANK is not set, thus the coordinator is not aware of the local rank of the current process." def is_master(self, process_group: ProcessGroup = None) -> bool: """ Check if the current process is the master process (rank is 0). It can accept a sub process group to check the rank 0 with respect to the process. Args: process_group (ProcessGroup, optional): process group to use for the rank 0 check. Defaults to None, which refers to the default process group. Returns: bool: True if the current process is the master process, False otherwise """ rank = dist.get_rank(group=process_group) return rank == 0 def is_node_master(self) -> bool: """ Check if the current process is the master process on the current node (local rank is 0). Returns: bool: True if the current process is the master process on the current node, False otherwise """ self._assert_local_rank_set() return self.local_rank == 0 def is_last_process(self, process_group: ProcessGroup = None) -> bool: """ Check if the current process is the last process (rank is world size - 1). It can accept a sub process group to check the last rank with respect to the process. Args: process_group (ProcessGroup, optional): process group to use for the last rank check. Defaults to None, which refers to the default process group. Returns: bool: True if the current process is the last process, False otherwise """ rank = dist.get_rank(group=process_group) world_size = dist.get_world_size(group=process_group) return rank == world_size - 1 def print_on_master(self, msg: str, process_group: ProcessGroup = None): """ Print message only from rank 0. Args: msg (str): message to print process_group (ProcessGroup, optional): process group to use for the rank 0 check. Defaults to None, which refers to the default process group. """ rank = dist.get_rank(group=process_group) if rank == 0: print(msg) def print_on_node_master(self, msg: str): """ Print message only from local rank 0. Local rank 0 refers to the 0th process running the current node. Args: msg (str): message to print """ self._assert_local_rank_set() if self.local_rank == 0: print(msg) @contextmanager def priority_execution(self, executor_rank: int = 0, process_group: ProcessGroup = None): """ This context manager is used to allow one process to execute while blocking all other processes in the same process group. This is often useful when downloading is required as we only want to download in one process to prevent file corruption. ```python from colossalai.cluster import DistCoordinator dist_coordinator = DistCoordinator() with dist_coordinator.priority_execution(): dataset = CIFAR10(root='./data', download=True) ``` Args: executor_rank (int): the process rank to execute without blocking, all other processes will be blocked process_group (ProcessGroup, optional): process group to use for the executor rank check. Defaults to None, which refers to the default process group. """ rank = dist.get_rank(group=process_group) should_block = rank != executor_rank if should_block: self.block_all(process_group) yield if not should_block: self.block_all(process_group) def destroy(self, process_group: ProcessGroup = None): """ Destroy the distributed process group. Args: process_group (ProcessGroup, optional): process group to destroy. Defaults to None, which refers to the default process group. """ dist.destroy_process_group(process_group) def block_all(self, process_group: ProcessGroup = None): """ Block all processes in the process group. Args: process_group (ProcessGroup, optional): process group to block. Defaults to None, which refers to the default process group. """ dist.barrier(group=process_group) def on_master_only(self, process_group: ProcessGroup = None): """ A function wrapper that only executes the wrapped function on the master process (rank 0). ```python from colossalai.cluster import DistCoordinator dist_coordinator = DistCoordinator() @dist_coordinator.on_master_only() def print_on_master(msg): print(msg) ``` """ is_master = self.is_master(process_group) # define an inner function def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): if is_master: return func(*args, **kwargs) return wrapper return decorator
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cluster/__init__.py
colossalai/cluster/__init__.py
from .device_mesh_manager import DeviceMeshManager from .dist_coordinator import DistCoordinator from .process_group_manager import ProcessGroupManager from .process_group_mesh import ProcessGroupMesh __all__ = ["DistCoordinator", "ProcessGroupManager", "DeviceMeshManager", "ProcessGroupMesh"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cluster/process_group_mesh.py
colossalai/cluster/process_group_mesh.py
import gc import itertools from functools import reduce from operator import mul from typing import Dict, List, Optional, Tuple, Union import numpy as np import torch.distributed as dist from torch.distributed import ProcessGroup from torch.distributed.distributed_c10d import GroupMember def prod(nums: List[int]) -> int: """Product of a list of numbers. Args: nums (List[int]): A list of numbers. Returns: int: The product of the numbers. """ return reduce(mul, nums) class ProcessGroupMesh: """A helper class to manage the process group mesh. It only describes how to organize process groups, and it's decoupled with parallel method. It just initialize process groups and cache them. The parallel method should manage them and use them to do the parallel computation. We use a ND-tuple to represent the process group mesh. And a ND-coordinate is to represent each process. For example, ``(0, 1, 0)`` represents the process whose rank is 2 in a 3D process group mesh with size ``(2, 2, 2)``. Args: *size (int): The size of each dimension of the process group mesh. The product of the size must be equal to the world size. Attributes: shape (Tuple[int, ...]): The shape of the process group mesh. rank (int): The rank of the current process. """ def __init__(self, *size: int) -> None: assert dist.is_initialized(), "Please initialize torch.distributed first." world_size = dist.get_world_size() prod_size = prod(size) assert ( prod_size == world_size ), f"The product of the size({prod_size}) must be equal to the world size({world_size})." self._shape = size self._rank = dist.get_rank() self._coord = ProcessGroupMesh.unravel(self._rank, self._shape) self._ranks_to_group: Dict[Tuple[int, ...], Union[ProcessGroup, GroupMember.NON_GROUP_MEMBER]] = {} self._group_to_ranks: Dict[ProcessGroup, Tuple[int, ...]] = {} def destroy_mesh_process_groups(self): r""" Destructor method for the ProcessGroupMesh class. When the ProcessGroupMesh object is deleted or goes out of scope, this method is called. It is responsible for cleaning up any process groups that were created during the lifetime of the object. Note: All process groups in PyTorch are represented as global variables, and they may not be automatically destroyed when the ProcessGroupMesh's lifetime ends. This method manually destroys the process groups to release system resources. """ for group in self._ranks_to_group.values(): try: dist.destroy_process_group(group) except ValueError: pass # Manually clear all process groups to save memory gc.collect() @property def shape(self) -> Tuple[int, ...]: return self._shape @property def rank(self) -> int: return self._rank def size(self, dim: Optional[int] = None) -> Union[int, Tuple[int, ...]]: """Get the size of the process group mesh. Args: dim (Optional[int], optional): Dimension of the process group mesh. `None` means all dimensions. Defaults to None. Returns: Union[int, Tuple[int, ...]]: Size of the target dimension or the whole process group mesh. """ if dim is None: return self._shape else: return self._shape[dim] def coordinate(self, dim: Optional[int] = None) -> Union[int, Tuple[int, ...]]: """Get the coordinate of the process group mesh. Args: dim (Optional[int], optional): Dimension of the process group mesh. `None` means all dimensions. Defaults to None. Returns: Union[int, Tuple[int, ...]]: Coordinate of the target dimension or the whole process group mesh. """ if dim is None: return self._coord else: return self._coord[dim] @staticmethod def unravel(rank: int, shape: Tuple[int, ...]) -> Tuple[int, ...]: """Convert a rank to a coordinate. Args: rank (int): Rank to be converted. shape (Tuple[int, ...]): Shape of the process group mesh. Returns: Tuple[int, ...]: Coordinate of the rank. """ return np.unravel_index(rank, shape) @staticmethod def ravel(coord: Tuple[int, ...], shape: Tuple[int, ...], mode: str = "raise") -> int: """Convert a coordinate to a rank. mode: ['raise', 'wrap', 'clip'], see https://numpy.org/doc/stable/reference/generated/numpy.ravel_multi_index.html. with wrap, index out of range would be wrapped around. For instance, ravel((0, i, 0), (1, 2, 1), 'wrap') returns (i % 2) Args: coords (Tuple[int, ...]): Coordinate to be converted. shape (Tuple[int, ...]): Shape of the process group mesh. mode (Optional[str]): The mode for numpy.ravel_multi_index. Returns: int: Rank of the coordinate. """ assert mode in ["raise", "wrap", "clip"] return int(np.ravel_multi_index(coord, shape, mode)) def _get_group(self, ranks_in_group: List[int], backend: Optional[str] = None) -> ProcessGroup: """Get the process group with the given ranks. It the process group doesn't exist, it will be created. Args: ranks_in_group (List[int]): Ranks in the process group. backend (Optional[str], optional): Backend of the process group. Defaults to None. Returns: ProcessGroup: The process group with the given ranks. """ ranks_in_group = sorted(ranks_in_group) if tuple(ranks_in_group) not in self._ranks_to_group: group = dist.new_group(ranks_in_group, backend=backend) self._ranks_to_group[tuple(ranks_in_group)] = group if group is not GroupMember.NON_GROUP_MEMBER: self._group_to_ranks[group] = tuple(ranks_in_group) return self._ranks_to_group[tuple(ranks_in_group)] def get_ranks_in_group(self, group: ProcessGroup) -> List[int]: """Get the ranks in the given process group. The process group must be created by this class. Args: group (ProcessGroup): The process group. Returns: List[int]: Ranks in the process group. """ return list(self._group_to_ranks[group]) @staticmethod def get_coords_along_axis( base_coord: Tuple[int, ...], axis: Union[int, List[int]], indices_at_axis: Union[List[int], List[List[int]]] ) -> List[Tuple[int, ...]]: """Get coordinates along the given axis. Args: base_coord (Tuple[int, ...]): Base coordinate which the coordinates along the axis are based on. axis (int): Axis along which the coordinates are generated. indices_at_axis (List[int]): Indices at the axis. Returns: List[Tuple[int, ...]]: Coordinates along the axis. """ if isinstance(axis, int): axis = [ axis, ] assert isinstance(indices_at_axis[0], int), f"Expected int, but got {type(indices_at_axis[0])}." indices_at_axis = [ indices_at_axis, ] def add_index(base_coord, axis, indices_at_axis): coords_in_group = [] for idx in indices_at_axis: coords_in_group.append(base_coord[:axis] + (idx,) + base_coord[axis + 1 :]) return coords_in_group coords_in_group = [base_coord] for ax, indices_at_ax in zip(axis, indices_at_axis): new_coords_in_group = [] for coords in coords_in_group: new_coords_in_group += add_index(coords, ax, indices_at_ax) coords_in_group = new_coords_in_group return coords_in_group def create_group_along_axis( self, axis: Union[int, List[int]], indices_at_axis: Optional[Union[List[int], List[List[int]]]] = None, backend: Optional[str] = None, ) -> ProcessGroup: """Create all process groups along the given axis, and return the one which the current process belongs to. Args: axis (int): Axis along which the process groups are created. indices_at_axis (Optional[List[int]], optional): Indices at the axis. Defaults to None. backend (Optional[str], optional): Backend of the process group. Defaults to None. Returns: ProcessGroup: The process group along the given axis which the current process belongs to. """ if isinstance(axis, int): axis = [ axis, ] if indices_at_axis is not None: assert isinstance(indices_at_axis[0], int) indices_at_axis = [ indices_at_axis, ] indices_at_axis = indices_at_axis or [list(range(self._shape[ax])) for ax in axis] reduced_shape = list(self._shape) # the choices on the axis are reduced to 1, since it's determined by `indices_at_axis` for ax in axis: reduced_shape[ax] = 1 target_group = None # use Cartesian product to generate all combinations of coordinates for base_coord in itertools.product(*[range(s) for s in reduced_shape]): coords_in_group = ProcessGroupMesh.get_coords_along_axis(base_coord, axis, indices_at_axis) ranks_in_group = tuple([ProcessGroupMesh.ravel(coord, self._shape) for coord in coords_in_group]) group = self._get_group(ranks_in_group, backend=backend) if self._rank in ranks_in_group: target_group = group return target_group def get_group_along_axis( self, axis: Union[int, List[int]], indices_at_axis: Optional[List[int]] = None, backend: Optional[str] = None ) -> ProcessGroup: """Get the process group along the given axis which the current process belongs to. If the process group doesn't exist, it will be created. Args: axis (int or list of int): Axes along which the process groups are created. indices_at_axis (Optional[List[int]], optional): Indices at the axis. Defaults to None. backend (Optional[str], optional): Backend of the process group. Defaults to None. Returns: ProcessGroup: The process group along the given axis which the current process belongs to. """ indices_at_axis = indices_at_axis if indices_at_axis is None: if isinstance(axis, (list, tuple)): indices_at_axis = list(list(range(self._shape[ax])) for ax in axis) else: indices_at_axis = list(range(self._shape[axis])) coords_in_group = ProcessGroupMesh.get_coords_along_axis(self._coord, axis, indices_at_axis) ranks_in_group = tuple([ProcessGroupMesh.ravel(coord, self._shape) for coord in coords_in_group]) if ranks_in_group not in self._ranks_to_group: # no need to cache it explicitly, since it will be cached in `create_group_along_axis` return self.create_group_along_axis(axis, indices_at_axis, backend=backend) return self._ranks_to_group[ranks_in_group]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cluster/process_group_manager.py
colossalai/cluster/process_group_manager.py
from typing import List import torch.distributed as dist from torch.distributed import ProcessGroup class ProcessGroupManager: """ ProcessGroupManager is used to manage the process groups in the cluster. There are some terms used in this class: - pg: the short name for process group - pg_name: the name of the process group - pg_size: the world size of the process group - rank: the rank of the current process in the process group - world_size: the total number of processes in the process group """ def __init__(self): self.pg_store = dict() def create_process_group(self, name: str, ranks: List[int], backend: str = "nccl") -> ProcessGroup: """ Get a process group by name. If the process group does not exist, it will be created. Args: name (str): name of the process group ranks (List[int]): ranks of the process group backend (str, optional): backend of the process group. Defaults to 'nccl'. Returns: ProcessGroup: the process group """ if name not in self.pg_store: pg = dist.new_group(ranks=ranks, backend=backend) self.pg_store[name] = pg return pg else: raise ValueError(f"Process group {name} already exists.") def get(self, name: str) -> ProcessGroup: """ Get a process group by name. Args: name (str): name of the process group Returns: ProcessGroup: the process group """ if name in self.pg_store: return self.pg_store[name] else: raise ValueError(f"Process group {name} does not exist.") def destroy(self, name: str) -> None: """ Destroy a process group by name. Args: name (str): name of the process group """ if name in self.pg_store: dist.destroy_process_group(self.pg_store[name]) del self.pg_store[name] else: raise ValueError(f"Process group {name} does not exist.") def destroy_all(self) -> None: """ Destroy all process groups. """ for name in self.pg_store: dist.destroy_process_group(self.pg_store[name]) self.pg_store.clear()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cluster/device_mesh_manager.py
colossalai/cluster/device_mesh_manager.py
from dataclasses import dataclass from typing import Dict, List, Tuple, Union import torch import torch.distributed as dist from colossalai.device.alpha_beta_profiler import AlphaBetaProfiler from colossalai.device.device_mesh import DeviceMesh @dataclass class DeviceMeshInfo: """ This class is used to store the information used to initialize the device mesh. Args: physical_ids (List[int]): The physical ids of the current booster. For example, if we have the last 4 GPUs on a 8-devices cluster, then the physical ids should be [4, 5, 6, 7]. mesh_shapes (List[Union[torch.Size, List[int], Tuple[int]]]): The shape of the mesh. For example, if we have 4 GPUs and we want to use 2D mesh with mesh shape [2, 2], then the mesh shape should be [2, 2]. """ physical_ids: List[int] mesh_shape: Union[torch.Size, List[int], Tuple[int]] = None def __post_init__(self): if self.mesh_shape is not None: world_size = len(self.physical_ids) mesh_shape_numel = torch.Size(self.mesh_shape).numel() assert ( world_size == mesh_shape_numel ), f"the numel of mesh_shape should be equal to world size, but got {world_size} != {mesh_shape_numel}" def initialize_device_mesh(device_mesh_info: DeviceMeshInfo): """ This method is used to initialize the device mesh. Args: device_mesh_info (DeviceMeshInfo): The information used to initialize device mesh. """ # parse the device mesh info physical_devices = device_mesh_info.physical_ids physical_mesh = torch.tensor(physical_devices) logical_mesh_shape = device_mesh_info.mesh_shape if logical_mesh_shape is None: ab_profiler = AlphaBetaProfiler(physical_devices) # search for the best logical mesh shape logical_mesh_id = ab_profiler.search_best_logical_mesh() logical_mesh_id = torch.Tensor(logical_mesh_id).to(torch.int) else: logical_mesh_id = physical_mesh.reshape(logical_mesh_shape) device_mesh = DeviceMesh(physical_mesh_id=physical_mesh, logical_mesh_id=logical_mesh_id, init_process_group=True) return device_mesh class DeviceMeshManager: """ Device mesh manager is responsible for creating and managing device meshes. """ def __init__(self): self.device_mesh_store: Dict[str, DeviceMesh] = dict() def create_device_mesh(self, name, device_mesh_info: DeviceMeshInfo) -> DeviceMesh: """ Create a device mesh and store it in the manager. Args: name (str): name of the device mesh device_mesh_info (DeviceMeshInfo): the information used to initialize the device mesh """ if name not in self.device_mesh_store: device_mesh = initialize_device_mesh(device_mesh_info) self.device_mesh_store[name] = device_mesh return device_mesh else: raise ValueError(f"Device mesh {name} already exists.") def get(self, name: str) -> DeviceMesh: """ Get a device mesh by name. Args: name (str): name of the device mesh Returns: DeviceMesh: the device mesh """ if name in self.device_mesh_store: return self.device_mesh_store[name] else: raise ValueError(f"Device mesh {name} does not exist.") def destroy(self, name: str) -> None: """ Destroy a device mesh by name. Args: name (str): name of the device mesh """ if name in self.device_mesh_store: for pgs in self.device_mesh_store[name].process_groups_dict.values(): for pg in pgs: dist.destroy_process_group(pg) del self.device_mesh_store[name] else: raise ValueError(f"Device mesh {name} does not exist.") def destroy_all(self): """ Destroy all device meshes. """ for name in self.device_mesh_store: for pgs in self.device_mesh_store[name].process_groups_dict.values(): for pg in pgs: dist.destroy_process_group(pg) self.device_mesh_store.clear()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/testing/pytest_wrapper.py
colossalai/testing/pytest_wrapper.py
""" This file will not be automatically imported by `colossalai.testing` as this file has a dependency on `pytest`. Therefore, you need to explicitly import this file `from colossalai.testing.pytest_wrapper import <func>`.from """ import os def run_on_environment_flag(name: str): """ Conditionally run a test based on the environment variable. If this environment variable is set to 1, this test will be executed. Otherwise, this test is skipped. The environment variable is default to 0. Args: name (str): the name of the environment variable flag. Usage: # in your pytest file @run_on_environment_flag(name='SOME_FLAG') def test_for_something(): do_something() # in your terminal # this will execute your test SOME_FLAG=1 pytest test_for_something.py # this will skip your test pytest test_for_something.py """ try: import pytest except ImportError: raise ImportError( "This function requires `pytest` to be installed, please do `pip install pytest` and try again." ) assert isinstance(name, str) flag = os.environ.get(name.upper(), "0") reason = f"Environment variable {name} is {flag}" if flag == "1": return pytest.mark.skipif(False, reason=reason) else: return pytest.mark.skipif(True, reason=reason)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/testing/comparison.py
colossalai/testing/comparison.py
from typing import Any, List, OrderedDict import torch import torch.distributed as dist from torch import Tensor from torch.distributed import ProcessGroup from torch.testing import assert_close from torch.utils._pytree import tree_flatten def assert_equal(a: Tensor, b: Tensor): assert torch.all(a == b), f"expected a and b to be equal but they are not, {a} vs {b}" def assert_not_equal(a: Tensor, b: Tensor): assert not torch.all(a == b), f"expected a and b to be not equal but they are, {a} vs {b}" def assert_close_loose(a: Tensor, b: Tensor, rtol: float = 1e-3, atol: float = 1e-3): assert_close( a, b, rtol=rtol, atol=atol, ) def assert_equal_in_group(tensor: Tensor, process_group: ProcessGroup = None): # all gather tensors from different ranks world_size = dist.get_world_size(process_group) tensor_list = [torch.empty_like(tensor) for _ in range(world_size)] dist.all_gather(tensor_list, tensor, group=process_group) # check if they are equal one by one for i in range(world_size - 1): a = tensor_list[i] b = tensor_list[i + 1] assert torch.all(a == b), f"expected tensors on rank {i} and {i + 1} to be equal but they are not, {a} vs {b}" def check_state_dict_equal( d1: OrderedDict, d2: OrderedDict, ignore_device: bool = True, ignore_dtype: bool = False, ): assert len(list(d1.keys())) == len( list(d2.keys()) ), f"Number of keys unequal: {len(list(d1.keys()))} vs {len(list(d2.keys()))}" for k, v1 in d1.items(): assert k in d2 v2 = d2[k] if isinstance(v1, dict): assert isinstance(v2, dict) check_state_dict_equal(v1, v2, ignore_device) elif isinstance(v1, list): assert isinstance(v2, list) for v1_i, v2_i in zip(v1, v2): if isinstance(v1_i, torch.Tensor): assert isinstance(v2_i, torch.Tensor) if not ignore_device: v1_i = v1_i.to("cpu") v2_i = v2_i.to("cpu") if ignore_dtype: v1_i = v1_i.to(v2_i.dtype) assert_close_loose(v1_i, v2_i) elif isinstance(v1_i, dict): assert isinstance(v2_i, dict) check_state_dict_equal(v1_i, v2_i, ignore_device) else: assert v1_i == v2_i, f"{v1_i} not equals to {v2_i}" elif isinstance(v1, torch.Tensor): assert isinstance(v2, torch.Tensor) if not ignore_device: v1 = v1.to("cpu") v2 = v2.to("cpu") if ignore_dtype: v1 = v1.to(v2.dtype) assert_close_loose(v1, v2) else: assert v1 == v2, f"{v1} not equals to {v2}" def check_state_dict_equal_pytree(d1: OrderedDict, d2: OrderedDict, ignore_device: bool = True): flat_d1, _ = tree_flatten(d1) flat_d2, _ = tree_flatten(d2) assert len(flat_d1) == len(flat_d2) for v1, v2 in zip(flat_d1, flat_d2): if isinstance(v1, torch.Tensor): assert isinstance(v2, torch.Tensor) if not ignore_device: v1 = v1.to("cpu") v2 = v2.to("cpu") assert_close_loose(v1, v2) else: assert v1 == v2, f"{v1} not equals to {v2}" def assert_hf_output_close( out1: Any, out2: Any, ignore_keys: List[str] = None, track_name: str = "", atol=1e-5, rtol=1e-5, ): """ Check if two outputs from huggingface are equal. Args: out1 (Any): the first output out2 (Any): the second output ignore_keys (List[str]): the keys to ignore when comparing two dicts track_name (str): the name of the value compared, used to track the path """ if isinstance(out1, dict) and isinstance(out2, dict): # if two values are dict # we recursively check the keys assert set(out1.keys()) == set(out2.keys()) for k in out1.keys(): if ignore_keys is not None and k in ignore_keys: continue assert_hf_output_close( out1[k], out2[k], track_name=f"{track_name}.{k}", ignore_keys=ignore_keys, atol=atol, rtol=rtol, ) elif isinstance(out1, (list, tuple)) and isinstance(out2, (list, tuple)): # if two values are list # we recursively check the elements assert len(out1) == len(out2) for i in range(len(out1)): assert_hf_output_close( out1[i], out2[i], track_name=f"{track_name}.{i}", ignore_keys=ignore_keys, atol=atol, rtol=rtol, ) elif isinstance(out1, Tensor) and isinstance(out2, Tensor): if out1.shape != out2.shape: raise AssertionError(f"{track_name}: shape mismatch: {out1.shape} vs {out2.shape}") assert_close( out1, out2, atol=atol, rtol=rtol ), f"{track_name}: tensor value mismatch\nvalue 1: {out1}\nvalue 2: {out2}, \nmean error: {torch.abs(out1 - out2).mean()}" else: assert out1 == out2, f"{track_name}: value mismatch.\nout1: {out1}\nout2: {out2}"
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/testing/utils.py
colossalai/testing/utils.py
import gc import random import re import socket from functools import partial from inspect import signature from typing import Any, Callable, List import torch import torch.multiprocessing as mp from packaging import version from colossalai.accelerator import get_accelerator def parameterize(argument: str, values: List[Any]) -> Callable: """ This function is to simulate the same behavior as pytest.mark.parameterize. As we want to avoid the number of distributed network initialization, we need to have this extra decorator on the function launched by torch.multiprocessing. If a function is wrapped with this wrapper, non-parametrized arguments must be keyword arguments, positional arguments are not allowed. Usage:: # Example 1: @parameterize('person', ['xavier', 'davis']) def say_something(person, msg): print(f'{person}: {msg}') say_something(msg='hello') # This will generate output: # > xavier: hello # > davis: hello # Example 2: @parameterize('person', ['xavier', 'davis']) @parameterize('msg', ['hello', 'bye', 'stop']) def say_something(person, msg): print(f'{person}: {msg}') say_something() # This will generate output: # > xavier: hello # > xavier: bye # > xavier: stop # > davis: hello # > davis: bye # > davis: stop Args: argument (str): the name of the argument to parameterize values (List[Any]): a list of values to iterate for this argument """ def _wrapper(func): def _execute_function_by_param(**kwargs): for val in values: arg_map = {argument: val} partial_func = partial(func, **arg_map) partial_func(**kwargs) return _execute_function_by_param return _wrapper def rerun_on_exception(exception_type: Exception = Exception, pattern: str = None, max_try: int = 5) -> Callable: """ A decorator on a function to re-run when an exception occurs. Usage:: # rerun for all kinds of exception @rerun_on_exception() def test_method(): print('hey') raise RuntimeError('Address already in use') # rerun for RuntimeError only @rerun_on_exception(exception_type=RuntimeError) def test_method(): print('hey') raise RuntimeError('Address already in use') # rerun for maximum 10 times if Runtime error occurs @rerun_on_exception(exception_type=RuntimeError, max_try=10) def test_method(): print('hey') raise RuntimeError('Address already in use') # rerun for infinite times if Runtime error occurs @rerun_on_exception(exception_type=RuntimeError, max_try=None) def test_method(): print('hey') raise RuntimeError('Address already in use') # rerun only the exception message is matched with pattern # for infinite times if Runtime error occurs @rerun_on_exception(exception_type=RuntimeError, pattern="^Address.*$") def test_method(): print('hey') raise RuntimeError('Address already in use') Args: exception_type (Exception, Optional): The type of exception to detect for rerun pattern (str, Optional): The pattern to match the exception message. If the pattern is not None and matches the exception message, the exception will be detected for rerun max_try (int, Optional): Maximum reruns for this function. The default value is 5. If max_try is None, it will rerun forever if exception keeps occurring """ def _match_lines(lines, pattern): for line in lines: if re.match(pattern, line): return True return False def _wrapper(func): def _run_until_success(*args, **kwargs): try_count = 0 assert max_try is None or isinstance( max_try, int ), f"Expected max_try to be None or int, but got {type(max_try)}" while max_try is None or try_count < max_try: try: try_count += 1 ret = func(*args, **kwargs) return ret except exception_type as e: error_lines = str(e).split("\n") if try_count < max_try and (pattern is None or _match_lines(error_lines, pattern)): print("Exception is caught, retrying...") # when pattern is not specified, we always skip the exception # when pattern is specified, we only skip when pattern is matched continue else: print("Maximum number of attempts is reached or pattern is not matched, no more retrying...") raise e # Override signature # otherwise pytest.mark.parameterize will raise the following error: # function does not use argument xxx sig = signature(func) _run_until_success.__signature__ = sig return _run_until_success return _wrapper def rerun_if_address_is_in_use(): """ This function reruns a wrapped function if "address already in use" occurs in testing spawned with torch.multiprocessing Usage:: @rerun_if_address_is_in_use() def test_something(): ... """ # check version torch_version = version.parse(torch.__version__) assert torch_version.major >= 1 # only torch >= 1.8 has ProcessRaisedException if torch_version >= version.parse("1.8.0"): exception = torch.multiprocessing.ProcessRaisedException else: exception = Exception func_wrapper = rerun_on_exception(exception_type=exception, pattern=".*(A|a)ddress already in use.*") return func_wrapper def skip_if_not_enough_gpus(min_gpus: int): """ This function is used to check the number of available GPUs on the system and automatically skip the test cases which require more GPUs. Note: The wrapped function must have `world_size` in its keyword argument. Usage: @skip_if_not_enough_gpus(min_gpus=8) def test_something(): # will be skipped if there are fewer than 8 GPUs available do_something() Arg: min_gpus (int): the minimum number of GPUs required to run this test. """ def _wrap_func(f): def _execute_by_gpu_num(*args, **kwargs): num_avail_gpu = get_accelerator().device_count() if num_avail_gpu >= min_gpus: f(*args, **kwargs) return _execute_by_gpu_num return _wrap_func def free_port() -> int: """Get a free port on localhost. Returns: int: A free port on localhost. """ while True: port = random.randint(20000, 65000) try: with socket.socket() as sock: sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind(("localhost", port)) return port except OSError: continue def spawn(func, nprocs=1, **kwargs): """ This function is used to spawn processes for testing. Usage: # must contains arguments rank, world_size, port def do_something(rank, world_size, port): ... spawn(do_something, nprocs=8) # can also pass other arguments def do_something(rank, world_size, port, arg1, arg2): ... spawn(do_something, nprocs=8, arg1=1, arg2=2) Args: func (Callable): The function to be spawned. nprocs (int, optional): The number of processes to spawn. Defaults to 1. """ port = free_port() wrapped_func = partial(func, world_size=nprocs, port=port, **kwargs) mp.spawn(wrapped_func, nprocs=nprocs) def clear_cache_before_run(): """ This function is a wrapper to clear CUDA and python cache before executing the function. Usage: @clear_cache_before_run() def test_something(): ... """ def _wrap_func(f): def _clear_cache(*args, **kwargs): get_accelerator().empty_cache() get_accelerator().reset_peak_memory_stats() get_accelerator().reset_max_memory_allocated() get_accelerator().reset_max_memory_cached() get_accelerator().synchronize() gc.collect() f(*args, **kwargs) return _clear_cache return _wrap_func class DummyDataloader: def __init__(self, data_gen_fn: Callable, length: int = 10): self.data_gen_fn = data_gen_fn self.length = length self.step = 0 def __iter__(self): self.step = 0 return self def __next__(self): if self.step < self.length: self.step += 1 return self.data_gen_fn() else: raise StopIteration def __len__(self): return self.length
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/testing/random.py
colossalai/testing/random.py
import random import numpy as np import torch def seed_all(seed, cuda_deterministic=False): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) if cuda_deterministic: # slower, more reproducible torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False else: torch.backends.cudnn.deterministic = False torch.backends.cudnn.benchmark = True
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/testing/__init__.py
colossalai/testing/__init__.py
from .comparison import ( assert_close, assert_close_loose, assert_equal, assert_equal_in_group, assert_hf_output_close, assert_not_equal, check_state_dict_equal, ) from .pytest_wrapper import run_on_environment_flag from .utils import ( DummyDataloader, clear_cache_before_run, free_port, parameterize, rerun_if_address_is_in_use, rerun_on_exception, skip_if_not_enough_gpus, spawn, ) __all__ = [ "assert_equal", "assert_not_equal", "assert_close", "assert_close_loose", "assert_equal_in_group", "parameterize", "rerun_on_exception", "rerun_if_address_is_in_use", "skip_if_not_enough_gpus", "free_port", "spawn", "clear_cache_before_run", "run_on_environment_flag", "check_state_dict_equal", "assert_hf_output_close", "DummyDataloader", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/constants.py
colossalai/legacy/constants.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- ALLOWED_MODES = [None, "1d", "2d", "2.5d", "3d", "sequence"] TENSOR_PARALLEL_MODE = "tensor_parallel_mode" # initializer INITIALIZER_MAPPING = { "data": "Initializer_Data", "tensor": "Initializer_Tensor", "pipeline": "Initializer_Pipeline", "embedding": "Initializer_Embedding", "1d": "Initializer_1D", "2d": "Initializer_2D", "2.5d": "Initializer_2p5D", "3d": "Initializer_3D", "sequence": "Initializer_Sequence", "model": "Initializer_Model", "moe": "Initializer_Moe", } # 3D parallelism groups INPUT_GROUP_3D = "input_group_3d" WEIGHT_GROUP_3D = "weight_group_3d" OUTPUT_GROUP_3D = "output_group_3d" INPUT_X_WEIGHT_3D = "input_x_weight_group_3d" OUTPUT_X_WEIGHT_3D = "output_x_weight_group_3d" # Attributes of tensor parallel parameters IS_TENSOR_PARALLEL = "is_tensor_parallel" NUM_PARTITIONS = "num_partitions" TENSOR_PARALLEL_ATTRIBUTES = [IS_TENSOR_PARALLEL, NUM_PARTITIONS]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/core.py
colossalai/legacy/core.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from colossalai.legacy.context.parallel_context import global_context __all__ = ["global_context"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/global_variables.py
colossalai/legacy/global_variables.py
from typing import Optional class TensorParallelEnv(object): _instance = None def __new__(cls, *args, **kwargs): if cls._instance is None: cls._instance = object.__new__(cls, *args, **kwargs) return cls._instance def __init__(self, *args, **kwargs): self.load(*args, **kwargs) def load( self, mode: Optional[str] = None, vocab_parallel: bool = False, parallel_input_1d: bool = False, summa_dim: int = None, tesseract_dim: int = None, tesseract_dep: int = None, depth_3d: int = None, input_group_3d=None, weight_group_3d=None, output_group_3d=None, input_x_weight_group_3d=None, output_x_weight_group_3d=None, ): self.mode = mode self.vocab_parallel = vocab_parallel self.parallel_input_1d = parallel_input_1d self.summa_dim = summa_dim self.tesseract_dim = tesseract_dim self.tesseract_dep = tesseract_dep self.depth_3d = depth_3d self.input_group_3d = input_group_3d self.weight_group_3d = weight_group_3d self.output_group_3d = output_group_3d self.input_x_weight_group_3d = input_x_weight_group_3d self.output_x_weight_group_3d = output_x_weight_group_3d def save(self): return dict( mode=self.mode, vocab_parallel=self.vocab_parallel, parallel_input_1d=self.parallel_input_1d, summa_dim=self.summa_dim, tesseract_dim=self.tesseract_dim, tesseract_dep=self.tesseract_dep, depth_3d=self.depth_3d, input_group_3d=self.input_group_3d, weight_group_3d=self.weight_group_3d, output_group_3d=self.output_group_3d, input_x_weight_group_3d=self.input_x_weight_group_3d, output_x_weight_group_3d=self.output_x_weight_group_3d, ) tensor_parallel_env = TensorParallelEnv()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/__init__.py
colossalai/legacy/__init__.py
from .initialize import ( get_default_parser, initialize, launch, launch_from_openmpi, launch_from_slurm, launch_from_torch, ) __all__ = [ "launch", "launch_from_openmpi", "launch_from_slurm", "launch_from_torch", "initialize", "get_default_parser", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/initialize.py
colossalai/legacy/initialize.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import argparse import os import pprint from pathlib import Path from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union import torch import torch.nn as nn from torch.nn.modules.loss import _Loss from torch.nn.parallel import DistributedDataParallel as DDP from torch.optim.lr_scheduler import _LRScheduler from torch.optim.optimizer import Optimizer from torch.utils.data import DataLoader from colossalai.accelerator import get_accelerator from colossalai.context import Config, ConfigException from colossalai.interface import OptimizerWrapper from colossalai.legacy.amp import AMP_TYPE, convert_to_amp from colossalai.legacy.amp.naive_amp import NaiveAMPModel from colossalai.legacy.builder.builder import build_gradient_handler from colossalai.legacy.context import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.engine import Engine from colossalai.legacy.engine.gradient_accumulation import accumulate_gradient from colossalai.legacy.engine.schedule import ( InterleavedPipelineSchedule, NonPipelineSchedule, PipelineSchedule, get_tensor_shape, ) from colossalai.legacy.utils import is_using_ddp, is_using_pp, is_using_sequence, sync_model_param from colossalai.legacy.zero import ShardedOptimizerV2, convert_to_zero_v2 from colossalai.legacy.zero.gemini.ophooks import BaseOpHook from colossalai.logging import get_dist_logger def get_default_parser(): """Reads user command line and uses an argument parser to parse the input arguments. Input arguments include configuration, host, port, world size, local rank, backend for torch.distributed. Returns: Namespace: Returns the parser with the default arguments, the user may add customized arguments into this parser. """ parser = argparse.ArgumentParser() parser.add_argument("--config", type=str, help="path to the config file") parser.add_argument("--host", type=str, help="the master address for distributed training") parser.add_argument("--port", type=int, help="the master port for distributed training") parser.add_argument("--world_size", type=int, help="world size for distributed training") parser.add_argument("--rank", type=int, help="rank for the default process group") parser.add_argument("--local_rank", type=int, help="local rank on the node") parser.add_argument("--backend", type=str, default="nccl", help="backend for distributed communication") return parser def launch( config: Union[str, Path, Config, Dict], rank: int, world_size: int, host: str, port: int, backend: str = "nccl", local_rank: int = None, seed: int = 1024, verbose: bool = True, ): """This function first parses the configuration arguments, using :func:`parse_args()` in case one of the input arguments are not given. Then initialize and set distributed environment by calling global_context's functions. Args: config (Union[str, dict, Config]): Config file or config file path are both acceptable rank (int): Rank for the default process group world_size (int): World size of the default process group host (str): The master address for distributed training port (str): The master port for distributed training backend (str, optional): Backend for ``torch.distributed``, defaults to ``nccl`` local_rank (int, optional): Rank for the process on the node and is used to set the default CUDA device, defaults to None. If local_rank = None, the default device ordinal will be calculated automatically. seed (int, optional): Specified random seed for every process. Defaults to 1024. verbose (bool, optional): Whether to print logs. Defaults to True. Raises: Exception: Raise exception when config type is wrong """ gpc.verbose = verbose # set config assert isinstance( config, (Config, str, Path, dict) ), f"expected argument config to be Config, str or Path, but got {type(config)}" if not isinstance(config, Config) and isinstance(config, dict): config = Config(config) if isinstance(config, (str, Path)): config = Config.from_file(config) gpc.load_config(config) # init default process group gpc.init_global_dist(rank, world_size, backend, host, port) # init process groups for different parallel modes from config gpc.init_parallel_groups() # set cuda device if torch.cuda.is_available(): # if local rank is not given, calculate automatically gpc.set_device(local_rank) # set the number of processes running on the same node gpc.detect_num_processes_on_current_node() gpc.set_seed(seed) if verbose: logger = get_dist_logger() logger.info( f"Distributed environment is initialized, " f"data parallel size: {gpc.data_parallel_size}, pipeline parallel size: {gpc.pipeline_parallel_size}, " f"tensor parallel size: {gpc.tensor_parallel_size}", ranks=[0], ) def launch_from_slurm( config: Union[str, Path, Config, Dict], host: str, port: int, backend: str = "nccl", seed: int = 1024, verbose: bool = True, ): """A wrapper for colossalai.launch for SLURM launcher by reading rank and world size from the environment variables set by SLURM Args: config (Union[str, dict, Config]): Config file or config file path are both acceptable host (str): The master address for distributed training port (str): The master port for distributed training backend (str, optional): Backend for ``torch.distributed``, defaults to ``nccl`` seed (int, optional): Specified random seed for every process. Defaults to 1024. verbose (bool, optional): Whether to print logs. Defaults to True. """ try: rank = int(os.environ["SLURM_PROCID"]) world_size = int(os.environ["SLURM_NPROCS"]) except KeyError as e: raise RuntimeError( f"Could not find {e} in the SLURM environment, visit https://www.colossalai.org/ for more information on launching with SLURM" ) launch( config=config, rank=rank, world_size=world_size, host=host, port=port, backend=backend, seed=seed, verbose=verbose, ) def launch_from_openmpi( config: Union[str, Path, Config, Dict], host: str, port: int, backend: str = "nccl", seed: int = 1024, verbose: bool = True, ): """A wrapper for colossalai.launch for OpenMPI launcher by reading rank and world size from the environment variables set by OpenMPI Args: config (Union[str, dict, Config]): Config file or config file path are both acceptable host (str): The master address for distributed training port (str): The master port for distributed training backend (str, optional): Backend for ``torch.distributed``, defaults to ``nccl`` seed (int, optional): Specified random seed for every process. Defaults to 1024. verbose (bool, optional): Whether to print logs. Defaults to True. """ try: rank = int(os.environ["OMPI_COMM_WORLD_RANK"]) local_rank = int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"]) world_size = int(os.environ["OMPI_COMM_WORLD_SIZE"]) except KeyError as e: raise RuntimeError( f"Could not find {e} in the OpenMPI environment, visit https://www.colossalai.org/ for more information on launching with OpenMPI" ) launch( config=config, local_rank=local_rank, rank=rank, world_size=world_size, host=host, port=port, backend=backend, seed=seed, verbose=verbose, ) def launch_from_torch( config: Union[str, Path, Config, Dict], backend: str = "nccl", seed: int = 1024, verbose: bool = True ): """A wrapper for colossalai.launch for torchrun or torch.distributed.launch by reading rank and world size from the environment variables set by PyTorch Args: config (Union[str, dict, Config]): Config file or config file path are both acceptable backend (str, optional): Backend for ``torch.distributed``, defaults to ``nccl`` seed (int, optional): Specified random seed for every process. Defaults to 1024. verbose (bool, optional): Whether to print logs. Defaults to True. """ try: rank = int(os.environ["RANK"]) local_rank = int(os.environ["LOCAL_RANK"]) world_size = int(os.environ["WORLD_SIZE"]) host = os.environ["MASTER_ADDR"] port = int(os.environ["MASTER_PORT"]) except KeyError as e: raise RuntimeError( f"Could not find {e} in the torch environment, visit https://www.colossalai.org/ for more information on launching with torch" ) launch( config=config, local_rank=local_rank, rank=rank, world_size=world_size, host=host, port=port, backend=backend, seed=seed, verbose=verbose, ) def initialize( model: nn.Module, optimizer: Optimizer, criterion: Optional[_Loss] = None, train_dataloader: Optional[Iterable] = None, test_dataloader: Optional[Iterable] = None, lr_scheduler: Optional[_LRScheduler] = None, ophooks: Optional[List[BaseOpHook]] = None, verbose: bool = True, ) -> Tuple[Engine, DataLoader, DataLoader, _LRScheduler]: """Core function to wrap the essential training components with our functionality based on the config which is loaded into gpc.config. Args: model (:class:`torch.nn.Module` or Callable): Your model instance or a function to build the model. optimizer (:class:`torch.optim.optimizer.Optimizer` or :class:`Type[torch.optim.optimizer]`): Your optimizer instance. criterion (:class:`torch.nn.modules.loss._Loss`, optional): Your criterion instance. train_dataloader (:class:`torch.utils.data.DataLoader`, optional): Dataloader for training. test_dataloader (:class:`torch.utils.data.DataLoader`, optional): Dataloader for testing. lr_scheduler (:class:`torch.nn.lr_scheduler._LRScheduler`, optional): Your lr scheduler instance, optional. verbose (bool, optional): Whether to print logs. Returns: Tuple (engine, train_dataloader, test_dataloader, lr_scheduler): A tuple of ``(engine, train_dataloader, test_dataloader, lr_scheduler)`` where only ``engine`` could not be None. """ # get logger logger = get_dist_logger() gpc.verbose = verbose # get config from gpc config = gpc.config # print config if verbose: logger.info( f"\n========== Your Config ========\n" f"{pprint.pformat(gpc.config)}\n" f"================================\n", ranks=[0], ) # cudnn cudnn_benchmark = config.get("cudnn_benchmark", False) cudnn_deterministic = config.get("cudnn_deterministic", False) torch.backends.cudnn.benchmark = cudnn_benchmark torch.backends.cudnn.deterministic = cudnn_deterministic if verbose: logger.info(f"cuDNN benchmark = {cudnn_benchmark}, deterministic = {cudnn_deterministic}", ranks=[0]) # zero use_zero = hasattr(gpc.config, "zero") if use_zero: zero_cfg = gpc.config.get("zero", None) if zero_cfg is not None: cfg_ = zero_cfg.copy() else: cfg_ = {} optimizer_config = zero_cfg.get("optimizer_config", None) model_config = zero_cfg.get("model_config", None) model, optimizer = convert_to_zero_v2( model, optimizer, model_config=model_config, optimizer_config=optimizer_config ) logger.info("Initializing ZeRO model and optimizer finished!", ranks=[0]) else: if isinstance(model, nn.Module): # first sync model across dp ranks model.to(get_accelerator().get_current_device()) elif isinstance(model, Callable): model = model().to(get_accelerator().get_current_device()) # optimizer maybe a optimizer_cls if isinstance(optimizer, Callable): optimizer = optimizer(model.parameters()) logger.warning("Initializing an non ZeRO model with optimizer class") if not use_zero: if is_using_sequence(): sync_model_param(model, ParallelMode.SEQUENCE_DP) elif is_using_ddp(): sync_model_param(model, ParallelMode.DATA) else: logger.warning( "The parameters of models is not automatically synchronized.\n" "Please make sure that all parameters are the same in data parallel group.", ranks=[0], ) # check amp and zero fp16_cfg = gpc.config.get("fp16", None) if fp16_cfg is not None and fp16_cfg.mode is not None and use_zero: raise ConfigException( "It is not allowed to set fp16 and zero configuration in your config file at the same time" ) # clip grad norm clip_grad_norm = gpc.config.get("clip_grad_norm", 0.0) # initialize amp amp_mode = None if fp16_cfg is not None and fp16_cfg.mode is not None: cfg_ = fp16_cfg.copy() amp_mode = cfg_.pop("mode") if is_using_pp(): assert amp_mode == AMP_TYPE.NAIVE, "Pipeline only support NaiveAMP currently" if amp_mode == AMP_TYPE.NAIVE: cfg_["clip_grad_norm"] = clip_grad_norm model, optimizer, criterion = convert_to_amp( model=model, optimizer=optimizer, criterion=criterion, mode=amp_mode, amp_config=cfg_ ) # get torch ddp config torch_ddp_cfg = gpc.config.get("torch_ddp", dict()) # gradient handler gradient_handler_cfg = gpc.config.get("gradient_handler", None) if gradient_handler_cfg is None: # if gradient handler is not specified in the configuration file, # check in the following order # 1. if optimizer is ZERO, then use zero grad handler # 2. if dp size is larger than 1 and pipeline is not used, use pytorch ddp # 3. if using pipeline and dp size larger than 1, use data parallel grad handler if isinstance(optimizer, ShardedOptimizerV2): gradient_handler_cfg = [dict(type="ZeROGradientHandler")] if verbose: logger.info( "Training with zero is detected, ZeROGradientHandler is automatically " "added even though not specified in the configuration", ranks=[0], ) elif is_using_sequence(): model = DDP( model, process_group=gpc.get_group(ParallelMode.SEQUENCE_DP), device_ids=[torch.cuda.current_device()], **torch_ddp_cfg, ) if verbose: logger.info( "Model is using torch.nn.parallel.DistributedDataParallel for Sequence Parallelism", ranks=[0] ) elif is_using_ddp() and not is_using_pp() and amp_mode != AMP_TYPE.NAIVE: model = DDP( model, process_group=gpc.get_group(ParallelMode.DATA), device_ids=[torch.cuda.current_device()], **torch_ddp_cfg, ) if verbose: logger.info("Model is using torch.nn.parallel.DistributedDataParallel for Data Parallelism", ranks=[0]) elif is_using_ddp(): gradient_handler_cfg = [dict(type="DataParallelGradientHandler")] if verbose: logger.info( "Data parallel training is detected when using pipeline parallel, " "DataParallelGradientHandler is automatically " "added even though not specified in the configuration", ranks=[0], ) # add pipeline parallel gradient handler, if pipeline shared module is detected for param in model.parameters(): if getattr(param, "pipeline_shared_module_pg", None) is not None: if gradient_handler_cfg is None: gradient_handler_cfg = [dict(type="PipelineSharedModuleGradientHandler")] else: gradient_handler_cfg.append(dict(type="PipelineSharedModuleGradientHandler")) if verbose: logger.info( "pipeline_shared_module is detected, PipelineSharedModuleGradientHandler is automatically " "added even though not specified in the configuration", ranks=[0], ) break else: if not isinstance(gradient_handler_cfg, list): raise ConfigException( f"expected gradient_handler in the configuration file to be a list but got {type(gradient_handler_cfg)}" ) # turn off sync buffer for NaiveAMPModel if using torch DDP and NaiveAMPModel at the same time # to avoid duplicated buffer synchronization if isinstance(model, DDP) and isinstance(model.module, NaiveAMPModel): model.module.sync_buffer = False # initialize schedule for engine if is_using_pp(): tensor_shape = get_tensor_shape() use_interleaved = hasattr(gpc.config, "model") and hasattr(gpc.config.model, "num_chunks") if gpc.is_initialized(ParallelMode.PARALLEL_1D): scatter_gather = True else: scatter_gather = False if use_interleaved: if isinstance(model, nn.Sequential): model = nn.ModuleList([model]) schedule = InterleavedPipelineSchedule( gpc.config.NUM_MICRO_BATCHES, gpc.config.model.num_chunks, tensor_shape=tensor_shape, scatter_gather_tensors=scatter_gather, ) else: schedule = PipelineSchedule( gpc.config.NUM_MICRO_BATCHES, tensor_shape=tensor_shape, scatter_gather_tensors=scatter_gather ) else: schedule = NonPipelineSchedule() if gradient_handler_cfg is None: gradient_handlers = None if verbose and not isinstance(model, DDP): logger.warning( "No PyTorch DDP or gradient handler is set up, please make sure you do not need " "to all-reduce the gradients after a training step.", ranks=[0], ) else: gradient_handlers = [build_gradient_handler(cfg, model, optimizer) for cfg in gradient_handler_cfg] # check if optimizer is OptimizerWrapper if not isinstance(optimizer, (OptimizerWrapper, ShardedOptimizerV2)): optimizer = OptimizerWrapper(optim=optimizer) # gradient accumulation grad_accum_size = gpc.config.get("gradient_accumulation", None) if grad_accum_size is not None: optimizer, train_dataloader, gradient_handlers, lr_scheduler = accumulate_gradient( model=model, optimizer=optimizer, dataloader=train_dataloader, accumulate_size=grad_accum_size, gradient_handlers=gradient_handlers, lr_scheduler=lr_scheduler, ) engine = Engine( model=model, optimizer=optimizer, criterion=criterion, gradient_handlers=gradient_handlers, clip_grad_norm=clip_grad_norm, ophook_list=ophooks, schedule=schedule, ) return engine, train_dataloader, test_dataloader, lr_scheduler
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/__init__.py
colossalai/legacy/zero/__init__.py
from typing import Tuple import torch import torch.nn as nn from colossalai.logging import get_dist_logger from .init_ctx import ZeroInitContext, no_shard_zero_context, no_shard_zero_decrator from .shard_utils import BucketTensorShardStrategy, TensorShardStrategy from .sharded_model import ShardedModelV2 from .sharded_optim import ShardedOptimizerV2 def convert_to_zero_v2( model: nn.Module, optimizer: torch.optim.Optimizer, model_config, optimizer_config ) -> Tuple[ShardedModelV2, ShardedOptimizerV2]: """ A helper function to integrate the model and optimizer with ZeRO optimizer and off-loading :param model: Your model object :type model: :class:`torch.nn.Module` :param optimizer_config: Your optimizer object :type optimizer_config: :class:`dict` :return: (model, optimizer) :rtype: Tuple """ logger = get_dist_logger("convert_to_zero_v2") logger.info(f"optimizer_config is {optimizer_config}", ranks=[0]) if optimizer_config is None: optimizer_config = dict() logger.info(f"model_config is {model_config}", ranks=[0]) if model_config is None: model_config = dict() zero_model = ShardedModelV2(model, **model_config) zero_optimizer = ShardedOptimizerV2(zero_model, optimizer, **optimizer_config) return zero_model, zero_optimizer __all__ = [ "convert_to_zero_v2", "ShardedModelV2", "ShardedOptimizerV2", "ZeroInitContext", "no_shard_zero_context", "no_shard_zero_decrator", "TensorShardStrategy", "BucketTensorShardStrategy", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_param/sharded_tensor.py
colossalai/legacy/zero/sharded_param/sharded_tensor.py
import torch from colossalai.legacy.zero.gemini.stateful_tensor import StatefulTensor, TensorState class ShardedTensor(StatefulTensor): def __init__(self, tensor: torch.Tensor, state: TensorState = TensorState.HOLD) -> None: r""" A tensor sharded in multiple processes. Constructed from an existing torch.Tensor instance. """ assert tensor.requires_grad is False super().__init__(tensor, state) # kept the shape, numel and dtype of the init tensor. self._origin_shape = tensor.shape self._origin_numel = tensor.numel() self._origin_dtype = tensor.dtype self._is_sharded = False @property def dtype(self) -> torch.dtype: assert self._payload.dtype == self._origin_dtype return self._payload.dtype @property def origin_numel(self) -> int: return self._origin_numel @property def origin_shape(self) -> int: return self._origin_shape @property def is_sharded(self): return self._is_sharded @is_sharded.setter def is_sharded(self, flag: bool): self._is_sharded = flag
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_param/__init__.py
colossalai/legacy/zero/sharded_param/__init__.py
from .sharded_param import ShardedParamV2 from .sharded_tensor import ShardedTensor __all__ = ["ShardedTensor", "ShardedParamV2"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_param/sharded_param.py
colossalai/legacy/zero/sharded_param/sharded_param.py
from typing import List, Optional, Tuple import torch from colossalai.legacy.zero.gemini.stateful_tensor import StatefulTensor, TensorState from colossalai.legacy.zero.gemini.tensor_utils import colo_tensor_mem_usage from .sharded_tensor import ShardedTensor EMPTY_TENSOR_DICT = {} def get_empty_tensor(device: torch.device, dtype: torch.dtype): key = (device, dtype) if key not in EMPTY_TENSOR_DICT: EMPTY_TENSOR_DICT[key] = torch.empty(0, dtype=dtype, device=device) return EMPTY_TENSOR_DICT[key] class ShardedParamV2(object): def __init__(self, param: torch.nn.Parameter, set_data_none: bool = False) -> None: self._sharded_data_tensor: ShardedTensor = ShardedTensor(param.data) self.saved_grad: StatefulTensor = StatefulTensor(None, TensorState.FREE) # This attribute must be initialized in ShardedModel self.offload_grad: bool = False # make sure the shared param is the only owner of payload # The param.data maybe used to init the other part of the model. # For example: File "resnet.py", line 190, in __init__ # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') # So we can not empty the .data at this time self.param = param if set_data_none: self.set_data_none() def get_payload_tensors(self) -> List[StatefulTensor]: """returns stateful tensors kept by this class.""" return [self._sharded_data_tensor] def set_data_none(self): self.param.data = get_empty_tensor(self.sharded_data_tensor.device, self.sharded_data_tensor.dtype) def set_grad_none(self): self.saved_grad.set_null() @property def sharded_data_tensor(self): return self._sharded_data_tensor @property def data_payload(self): assert not self.sharded_data_tensor.is_null() return self.sharded_data_tensor.payload @property def grad_payload(self): assert not self.saved_grad.is_null() return self.saved_grad.payload @property def param_is_sharded(self): return self.sharded_data_tensor.is_sharded def data_payload_reset(self, tensor: torch.Tensor): assert type(tensor) is torch.Tensor assert tensor.requires_grad is False self.sharded_data_tensor.payload_reset(tensor) def grad_payload_reset(self, tensor: torch.Tensor): assert type(tensor) is torch.Tensor assert tensor.requires_grad is False self.saved_grad.payload_reset(tensor) def get_memory_usage(self) -> Tuple[int, int]: """ get the memory usage of the param, including data and grad Returns: Tuple[int, int]: cuda mem usage in Byte, cpu memory usage in Byte """ cuda_mem_use, cpu_mem_use = 0, 0 def _update_mem_use(t: Optional[torch.Tensor]): if t is None: return assert isinstance(t, torch.Tensor) nonlocal cuda_mem_use nonlocal cpu_mem_use t_cuda, t_cpu = colo_tensor_mem_usage(t) cuda_mem_use += t_cuda cpu_mem_use += t_cpu address_set = set() _update_mem_use(self.data_payload) address_set.add(self.data_payload.data_ptr()) if not self.saved_grad.is_null() and self.saved_grad.data_ptr() not in address_set: _update_mem_use(self.grad_payload) address_set.add(self.saved_grad.data_ptr()) if self.param.data is not None and self.param.data.data_ptr() not in address_set: _update_mem_use(self.param.data) address_set.add(self.param.data.data_ptr()) if self.param.grad is not None and self.param.grad.data_ptr() not in address_set: _update_mem_use(self.param.grad) return cuda_mem_use, cpu_mem_use
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/init_ctx/__init__.py
colossalai/legacy/zero/init_ctx/__init__.py
from .init_context import ZeroInitContext, no_shard_zero_context, no_shard_zero_decrator __all__ = ["ZeroInitContext", "no_shard_zero_context", "no_shard_zero_decrator"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/init_ctx/init_context.py
colossalai/legacy/zero/init_ctx/init_context.py
import contextlib import functools from contextlib import AbstractContextManager from dataclasses import dataclass from typing import Optional import torch import torch.distributed as dist import torch.nn as nn from colossalai.context.singleton_meta import SingletonMeta from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.zero.shard_utils import BaseShardStrategy from colossalai.legacy.zero.sharded_model._utils import cast_tensor_to_bf16, cast_tensor_to_fp16 from colossalai.legacy.zero.sharded_model.sharded_model_v2 import ShardedModelV2 from colossalai.legacy.zero.sharded_param import ShardedParamV2 from colossalai.logging import get_dist_logger from colossalai.utils.model.utils import InsertPostInitMethodToModuleSubClasses @dataclass class ZeroContextConfig: """The configuration used to control zero context initialization. Args: target_device (torch.device): The device where param data are after exiting the context. is_replicated (bool, optional): Whether the param is replicated across data parallel group. Some parameters are not replicated, e.g. parameters in MOE experts. shard_param (bool, optional): Is param sharded after exiting the context. Defaults to False. """ target_device: torch.device is_replicated: bool = True shard_param: bool = False def __post_init__(self): if self.shard_param: assert self.is_replicated, "Non-replicated parameters can't be sharded." if self.is_replicated and not self.shard_param: assert self.target_device.type == "cuda", "Replicated no-shard parameters should be located in cuda." class ZeroInitContext(InsertPostInitMethodToModuleSubClasses): """A context to initialize model. 1. Convert the model to fp16. 2. The parameters of the module are adapted to type ShardedParameter. 3. Shard the param and grad according to flags. Args: target_device (torch.device): The device where param data are after exiting the context. shard_strategy (BaseShardStrategy): Shard strategy instance. seed (int, optional): Random seed for weight initialization shard_param (bool, optional): Is param sharded after exiting the context. Defaults to False. default_dtype (torch.dtype, optional): If it's not None, parameters will be initialized as ``default_dtype`` then converted to fp16. bf16 (bool, optional): If it's True, parameters will be initialized as ``torch.bfloat16``. Otherwise, parameters will be initialized as ``torch.float16``. Defaults to False. model_numel_tensor (torch.Tensor, optional): A tensor which will store the number of elements of model. Defaults to torch.zeros(1, dtype=torch.int). """ def __init__( self, target_device: torch.device, shard_strategy: BaseShardStrategy, seed: int = 2**10 - 1, shard_param: bool = False, default_dtype: Optional[torch.dtype] = None, bf16: bool = False, model_numel_tensor: torch.Tensor = torch.zeros(1, dtype=torch.long), ): super().__init__(default_dtype=default_dtype) self.shard_strategy = shard_strategy self.param_list = [] self.model_numel_tensor = model_numel_tensor self.seed = seed self.bf16 = bf16 self.dp_process_group = gpc.get_group(ParallelMode.DATA) self.config = ZeroContextConfig(target_device=target_device, is_replicated=True, shard_param=shard_param) ZeroContextMgr().current_context = self self.param_numel = {} self.top_module = None @property def target_device(self): return self.config.target_device @property def is_replicated(self): return self.config.is_replicated @property def shard_param(self): return self.config.shard_param @staticmethod def calc_fanin_fanout(tensor: torch.Tensor): """We use this function to substitute fan-in and fan-out calculation in torch.nn.init. This can help us get correct fan-in and fan-out for sharded tensor. """ assert isinstance(tensor, nn.Parameter), "Sharded tensor initialization is only allowed for parameters" # get correct shape of input tensor if not hasattr(tensor, "colo_attr") or not tensor.colo_attr.param_is_sharded: tensor_shape = tensor.shape else: tensor_shape = tensor.colo_attr.sharded_data_tensor.origin_shape dimensions = len(tensor_shape) if dimensions < 2: raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions") num_input_fmaps = tensor_shape[1] num_output_fmaps = tensor_shape[0] receptive_field_size = 1 if dimensions > 2: # math.prod is not always available, accumulate the product manually # we could use functools.reduce but that is not supported by TorchScript for s in tensor_shape[2:]: receptive_field_size *= s fan_in = num_input_fmaps * receptive_field_size fan_out = num_output_fmaps * receptive_field_size return fan_in, fan_out def _pre_context_exec(self): """ The Callback function when entering the context """ self.logger = get_dist_logger("ZeroInitContext") # substitute fan-in and fan-out calculation self.nn_fanin_fanout = nn.init._calculate_fan_in_and_fan_out nn.init._calculate_fan_in_and_fan_out = self.calc_fanin_fanout self.module_load_from_state_dict = nn.Module._load_from_state_dict shard_strategy = self.shard_strategy if self.config.shard_param else None nn.Module._load_from_state_dict = functools.partialmethod( ShardedModelV2._colo_load_from_state_dict, shard_strategy=shard_strategy ) self.module_state_dict = nn.Module.state_dict nn.Module.state_dict = functools.partialmethod( ShardedModelV2._colo_state_dict, shard_strategy=shard_strategy, state_dict_func=self.module_state_dict, process_group=self.dp_process_group, ) # reserve rng states self.cpu_rng_state = torch.get_rng_state() self.cuda_rng_state = torch.cuda.get_rng_state() # set new seed for initialization, since we initialize sharded tensor separately # we don't want all processes have the same seed # otherwise all sharded tensors are same after init offset = self.seed + 1 # we want to have more 1 in binary format seed torch.manual_seed(self.seed + offset * dist.get_rank()) def _post_context_exec(self): """The callback function when exiting context.""" # broadcast replicated no-shard parameters src_rank = gpc.get_ranks_in_group(ParallelMode.DATA)[0] for param in self.param_list: assert hasattr(param, "colo_attr") if not param.colo_attr.param_is_sharded and param.colo_attr.is_replicated: dist.broadcast(tensor=param.data, src=src_rank, group=self.dp_process_group) param.colo_attr.set_data_none() del self.param_list nn.init._calculate_fan_in_and_fan_out = self.nn_fanin_fanout nn.Module.load_state_dict = self.module_load_from_state_dict nn.Module.state_dict = self.module_state_dict torch.set_rng_state(self.cpu_rng_state) torch.cuda.set_rng_state(self.cuda_rng_state) params = frozenset(self.top_module.parameters()) for param in self.param_numel.keys(): if param not in params: self.param_numel[param] = 0 self.model_numel_tensor.fill_(sum(self.param_numel.values())) def _post_init_method(self, module: torch.nn.Module, *args, **kwargs): """ The function to call at the end of the constructor of each module. NOTE() The module may be passed to this function multiple times. """ self.top_module = module half_dtype = torch.float16 if not self.bf16 else torch.bfloat16 def half_fn(t: torch.Tensor): return t.to(half_dtype) if t.is_floating_point() else t for param in module.parameters(recurse=False): # avoid adapting a param to ShardedParam twice if hasattr(param, "colo_attr"): continue self.param_numel[param] = param.numel() # convert parameters to half param_half = half_fn(param) param.data = param_half if param.grad is not None: grad_half = half_fn(param.grad) param.grad.data = grad_half # move torch parameters to the target device target_device = self.target_device param.data = param.data.to(target_device) if param.grad is not None: param.grad = param.grad.to(target_device) param.colo_attr = ShardedParamV2(param, set_data_none=True) if self.shard_param: self.shard_strategy.shard([param.colo_attr.sharded_data_tensor], self.dp_process_group) param.data = param.colo_attr.data_payload # set param.data to payload # mark whether the param is replicated param.colo_attr.is_replicated = self.is_replicated # mark whether the param should keep not sharded # if True, the param is used as Zero stage 2 param.colo_attr.keep_not_shard = not self.shard_param self.param_list.append(param) # We must cast buffers # If we use BN, buffers may be on CPU and Float # We must cast them cast_fn = cast_tensor_to_fp16 if not self.bf16 else cast_tensor_to_bf16 for buffer in module.buffers(recurse=False): buffer.data = buffer.data.to(device=torch.cuda.current_device()) buffer.data = cast_fn(buffer.data) class ZeroContextMgr(metaclass=SingletonMeta): current_context: Optional[ZeroInitContext] = None @contextlib.contextmanager def hijack_context_config(self, **kwargs): if self.current_context is None: yield else: old_config = self.current_context.config self.current_context.config = ZeroContextConfig(**kwargs) yield self.current_context.config = old_config def no_shard_zero_context(is_replicated: bool = True) -> AbstractContextManager: return ZeroContextMgr().hijack_context_config( target_device=torch.device("cuda", torch.cuda.current_device()), is_replicated=is_replicated, shard_param=False ) def no_shard_zero_decrator(is_replicated: bool = True): def _wrapper(init_func): def _no_shard(*args, **kwargs): with no_shard_zero_context(is_replicated): ret = init_func(*args, **kwargs) return ret return _no_shard return _wrapper
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/shard_utils/base_shard_strategy.py
colossalai/legacy/zero/shard_utils/base_shard_strategy.py
from abc import ABC, abstractmethod from typing import List, Optional import torch.distributed as dist from colossalai.legacy.zero.sharded_param.sharded_tensor import ShardedTensor class BaseShardStrategy(ABC): def __init__(self) -> None: """Abstract Shard Strategy. Use to shard a tensors on multiple GPUs.""" super().__init__() @abstractmethod def shard(self, tensor_list: List[ShardedTensor], process_group: Optional[dist.ProcessGroup] = None): pass @abstractmethod def gather(self, tensor_list: List[ShardedTensor], process_group: Optional[dist.ProcessGroup] = None): pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/shard_utils/bucket_tensor_shard_strategy.py
colossalai/legacy/zero/shard_utils/bucket_tensor_shard_strategy.py
from typing import List, Optional import torch import torch.distributed as dist from torch._utils import _flatten_dense_tensors as flatten from colossalai.accelerator import get_accelerator from colossalai.legacy.zero.sharded_param.sharded_tensor import ShardedTensor from .tensor_shard_strategy import TensorShardStrategy class BucketTensorShardStrategy(TensorShardStrategy): """Use the same shard scheme as `TensorShardStrategy`'s, but it gathers tensors of a sub-module together, which will fully utilize network bandwidth. It is especially useful when sub-module contains bias, since we cannot utilize network bandwidth well if we only gather a bias tensor (bias is usually small). """ def gather(self, tensor_list: List[ShardedTensor], process_group: Optional[dist.ProcessGroup] = None): tensor_list: List[ShardedTensor] = [t for t in tensor_list if t.is_sharded] if len(tensor_list) == 0: return target_device = tensor_list[0].device dtype = tensor_list[0].dtype buffer_list: List[torch.Tensor] = [] tensor_numels = [t.payload.numel() for t in tensor_list] buffer_size = sum(tensor_numels) world_size = dist.get_world_size(process_group) rank = dist.get_rank(process_group) for i in range(world_size): if i == rank: buffer_list.append( flatten([t.payload for t in tensor_list]).cuda(get_accelerator().get_current_device()) ) else: buffer_list.append(torch.zeros(buffer_size, dtype=dtype, device=get_accelerator().get_current_device())) dist.all_gather(buffer_list, buffer_list[rank], group=process_group) # Move to target device before splitting buffer # Ensure we utilize maximum PCIE bandwidth buffer_list = [buffer.to(target_device) for buffer in buffer_list] offset = 0 for i, t in enumerate(tensor_list): gathered_payload = [buffer[offset : offset + tensor_numels[i]] for buffer in buffer_list] gathered_payload = torch.cat(gathered_payload)[: t.origin_numel].view(t.origin_shape) t.payload_reset(gathered_payload) t.is_sharded = False offset += tensor_numels[i]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/shard_utils/tensor_shard_strategy.py
colossalai/legacy/zero/shard_utils/tensor_shard_strategy.py
from typing import List, Optional import torch import torch.distributed as dist from colossalai.accelerator import get_accelerator from colossalai.legacy.zero.gemini.tensor_utils import colo_model_data_tensor_move_inline from colossalai.legacy.zero.shard_utils import BaseShardStrategy from colossalai.legacy.zero.shard_utils.commons import get_shard from colossalai.legacy.zero.sharded_param.sharded_tensor import ShardedTensor class TensorShardStrategy(BaseShardStrategy): """ A naive implementation which shard each tensor evenly over all ranks """ def shard(self, tensor_list: List[ShardedTensor], process_group: Optional[dist.ProcessGroup] = None): for t in tensor_list: self._shard_tensor(t, process_group) def gather(self, tensor_list: List[ShardedTensor], process_group: Optional[dist.ProcessGroup] = None): for t in tensor_list: self._gather_tensor(t, process_group) def _shard_tensor(self, t: ShardedTensor, process_group: Optional[dist.ProcessGroup] = None): """Shard tensor among processes. Args: t (ShardedTensor): a tensor to be sharded. process_group (Optional[dist.ProcessGroup], optional): the process group among which tensor shards. Defaults to None. """ if t.is_sharded: return if t.payload.device.type == "cuda": assert t.payload.device == get_accelerator().get_current_device(), ( f"shard tensor on cuda device index {t.payload.device.index}," f" but current cuda device is {get_accelerator().get_current_device()}" ) sharded_payload, _ = get_shard(t.payload, dist.get_rank(process_group), dist.get_world_size(process_group)) t.payload_reset(sharded_payload) t.is_sharded = True def _gather_tensor(self, t: ShardedTensor, process_group: Optional[dist.ProcessGroup] = None): if not t.is_sharded: return target_device = t.device payload_numel = t.payload.numel() world_size = dist.get_world_size(process_group) rank = dist.get_rank(process_group) buffer = torch.empty( payload_numel * world_size, dtype=t.payload.dtype, device=get_accelerator().get_current_device() ) buffer_list = list(torch.chunk(buffer, chunks=world_size, dim=0)) buffer_list[rank].copy_(t.payload) dist.all_gather(buffer_list, buffer_list[rank], group=process_group, async_op=False) gathered_payload = torch.narrow(buffer, 0, 0, t.origin_numel).reshape(t.origin_shape) t.payload_reset(gathered_payload) colo_model_data_tensor_move_inline(t, target_device) t.is_sharded = False
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/shard_utils/commons.py
colossalai/legacy/zero/shard_utils/commons.py
from typing import Tuple import torch def get_shard(tensor: torch.Tensor, rank: int, world_size: int) -> Tuple[torch.Tensor, int]: """Return the local shard of a full tensor.""" # Shard using torch.chunk to match all-gather/reduce-scatter. chunks = list(torch.flatten(tensor).chunk(world_size)) while len(chunks) < world_size: chunks.append(chunks[0].new_empty(0)) # Determine number of padding elements. num_to_pad = chunks[0].numel() - chunks[rank].numel() assert num_to_pad >= 0, num_to_pad shard = torch.zeros_like(chunks[0]) length = chunks[rank].size(0) shard_temp = shard[:length] shard_temp.copy_(chunks[rank]) return shard, num_to_pad
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/shard_utils/__init__.py
colossalai/legacy/zero/shard_utils/__init__.py
from .base_shard_strategy import BaseShardStrategy from .bucket_tensor_shard_strategy import BucketTensorShardStrategy from .tensor_shard_strategy import TensorShardStrategy __all__ = ["BaseShardStrategy", "TensorShardStrategy", "BucketTensorShardStrategy"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_optim/sharded_optim_v2.py
colossalai/legacy/zero/sharded_optim/sharded_optim_v2.py
# this code is inspired by the DeepSpeed library and implemented with our own design from scratch from enum import Enum from typing import Dict, Optional, Tuple import torch import torch.distributed as dist import torch.nn as nn from torch import Tensor from torch.distributed import ProcessGroup from torch.nn.parameter import Parameter from torch.optim import Optimizer from colossalai.amp.naive_amp.grad_scaler import DynamicGradScaler from colossalai.interface import OptimizerWrapper from colossalai.legacy.context.parallel_mode import ParallelMode from colossalai.legacy.core import global_context as gpc from colossalai.legacy.zero.gemini.stateful_tensor import StatefulTensor, TensorState from colossalai.legacy.zero.gemini.tensor_placement_policy import AutoTensorPlacementPolicy from colossalai.legacy.zero.gemini.tensor_utils import colo_model_data_tensor_move_inline, colo_tensor_mem_usage from colossalai.legacy.zero.sharded_model import ShardedModelV2 from colossalai.legacy.zero.sharded_model._utils import cast_tensor_to_fp32 from colossalai.logging import get_dist_logger class OptimState(Enum): SCALED = 1 UNSCALED = 2 class ShardedOptimizerV2(OptimizerWrapper): """A wrapper for optimizer. ``ShardedOptimizerV2`` and ``ShardedModelV2`` implement Zero Redundancy Optimizer (ZeRO). By default the ZeRO optimizer stage 3 offload Optimizer States on CPU. We apply the Device-aware Operator Placement technique for OS placement from the following paper. `PatrickStar: Parallel Training of Pre-trained Models via Chunk-based Memory Management`_ GPU margin space is the remaining space after removing peak non-model data from the overall GPU memory, which is detected by a runtime memory tracer. We place as many OS chunks in the margin space as possible. The size of margin space can be controlled by ``gpu_margin_mem_ratio``. If it is set as ``0.0``, it is the same as classical ZeRO optimizer. Note: You must use ``ShardedOptimizerV2`` with ``ShardedModelV2``. Note: Make sure you set ``tensor_placement_policy`` in ``ShardedModelV2`` to `"auto"`, if you set ``gpu_margin_mem_ratio > 0``. Args: sharded_model (ShardedModelV2): A sharded model initialized by class ShardedModelV2. The optimizer will use the shard strategy provided by sharded model to shard param fp32 tensors. optimizer (Optimizer): An Optimizer instance. gpu_margin_mem_ratio (float, optional): The ratio of GPU remaining memory (after the first forward-backward) which will be used when using hybrid CPU optimizer. This argument is meaningless when `tensor_placement_policy` of `ShardedModelV2` is not "auto". Defaults to 0.0. initial_scale (float, optional): Initial scale used by DynamicGradScaler. Defaults to 2**32. min_scale (float, optional): Min scale used by DynamicGradScaler. Defaults to 1. growth_factor (float, optional): growth_factor used by DynamicGradScaler. Defaults to 2. backoff_factor (float, optional): backoff_factor used by DynamicGradScaler. Defaults to 0.5. growth_interval (float, optional): growth_interval used by DynamicGradScaler. Defaults to 1000. hysteresis (float, optional): hysteresis used by DynamicGradScaler. Defaults to 2. max_scale (int, optional): max_scale used by DynamicGradScaler. Defaults to 2**32. dp_process_group (Optional[ProcessGroup], optional): data parallel process group. Defaults to None. mp_process_group (Optional[ProcessGroup], optional): model parallel process group. Defaults to None. .. _PatrickStar\: Parallel Training of Pre-trained Models via Chunk-based Memory Management: https://arxiv.org/abs/2108.05818 """ def __init__( self, sharded_model: ShardedModelV2, optimizer: Optimizer, gpu_margin_mem_ratio: float = 0.0, initial_scale: float = 2**32, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 2**32, dp_process_group: Optional[ProcessGroup] = None, mp_process_group: Optional[ProcessGroup] = None, verbose: bool = False, ) -> None: assert isinstance(sharded_model, ShardedModelV2), "model must be wrapped with ShardedModel" assert not isinstance(optimizer, ShardedOptimizerV2), "Nested ShardedOptimizerV2 is not supported." super().__init__(optimizer) self.shard_strategy = sharded_model.shard_strategy self.model: ShardedModelV2 = sharded_model self.bf16 = sharded_model.bf16 self.gpu_margin_mem_ratio: float = float(gpu_margin_mem_ratio) assert 0.0 <= self.gpu_margin_mem_ratio <= 1.0, f"gpu_margin_mem_ratio must >=0.0 and <=1.0" # Only move fp32 shards from CPU to GPU when user allows and inner optimizer is valid # Inner optimizer must support optimizing hybrid (CPU and CUDA) tensors, # and it must set `num_fp32_shards_per_param` correctly self._should_move_fp32_shards_h2d: bool = ( sharded_model.cpu_offload and self.gpu_margin_mem_ratio > 0.0 and getattr(optimizer, "num_fp32_shards_per_param", 0) >= 2 ) self.device = sharded_model._tensor_placement_policy.device or torch.device("cpu") self.optim_state: OptimState = OptimState.UNSCALED self.dp_process_group = dp_process_group or gpc.get_group(ParallelMode.DATA) self.mp_process_group = mp_process_group or gpc.get_group(ParallelMode.MODEL) # Grad scaler self.grad_scaler = DynamicGradScaler( initial_scale=initial_scale, min_scale=min_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, hysteresis=hysteresis, max_scale=max_scale, ) self._found_overflow: Tensor = torch.IntTensor([0]).to(torch.cuda.current_device()) self._logger = get_dist_logger("ShardedOptimizerV2") self._verbose = verbose self._grad_prepared: bool = ( False # this should be set to true when _prepare_grads() and reset to false when backward ) # Store fp32 param shards self._register_master_weight() if self.gpu_margin_mem_ratio != 0.0 and not isinstance( sharded_model._tensor_placement_policy, AutoTensorPlacementPolicy ): self._logger.warning( f'gpu_margin_mem_ratio is meaningless when tensor_placement_policy is not "auto"', ranks=[0] ) if self._verbose: self._logger.debug( f"After init ShardedOptimizerV2 consumes {self.get_memory_usage()[0] / 1e6} MB CUDA Memory!", ranks=[0] ) self._use_memory_tracer = self.model.use_memory_tracer @property def loss_scale(self): return self.grad_scaler.scale.item() def get_memory_usage(self) -> Tuple[int, int]: """Get the memory usage of the optimizer. Including master_params (param fp32), momentum (``self.state[p]['exp_avg']``) variance (``self.state[p]['exp_avg_sq']``) Returns: Tuple[int, int]: cuda/cpu memory usage in Byte. """ cuda_use = 0 cpu_use = 0 def update_mem_use(t): nonlocal cuda_use nonlocal cpu_use t_cuda_use, t_cpu_use = colo_tensor_mem_usage(t) cuda_use += t_cuda_use cpu_use += t_cpu_use for _, p_fp32 in self.master_params.items(): update_mem_use(p_fp32) for group in self.optim.param_groups: for p in group["params"]: state = self.optim.state[p] for k, v in state.items(): update_mem_use(v) return cuda_use, cpu_use def zero_grad(self, *args, **kwargs): self._zero_grad() def backward(self, loss: Tensor) -> None: if not self.bf16: loss = self.loss_scale * loss self.optim_state = OptimState.SCALED self._grad_prepared = False self.model.backward(loss) def backward_by_grad(self, tensor: Tensor, grad: Tensor) -> None: # This function is called except the last stage of pipeline parallel # It receives the scaled grad from the previous rank # No need to scale the grad again # Need to unscale when optimizing if not self.bf16: self.optim_state = OptimState.SCALED self._grad_prepared = False self.model.backward_by_grad(tensor, grad) def clip_grad_norm(self, model: nn.Module, max_norm: float): self._prepare_grads() if not self.bf16 and self.optim_state == OptimState.SCALED: self._unscale_grads() return super().clip_grad_norm(model, max_norm) def step(self, *args, **kwargs): self._prepare_grads() # unscale grads if scaled if not self.bf16 and self.optim_state == OptimState.SCALED: self._unscale_grads() self._maybe_move_fp32_shards() if not self.bf16: found_inf = self._check_overflow() self.grad_scaler.update(found_inf) if found_inf: self._logger.warning("found inf during ShardedOptimV2 step") self._zero_grad(recover_data=True) return self._point_param_fp16_to_master_param() if self._verbose: gpu_mem, cpu_mem = self.get_memory_usage() self._logger.debug( f"Before step ShardedOptimizerV2 consumes {gpu_mem / 1e6} MB CUDA Memory, {cpu_mem / 1e6} MB CUDA Memory!", ranks=[0], ) ret = self.optim.step(*args, **kwargs) if self._verbose: gpu_mem, cpu_mem = self.get_memory_usage() self._logger.debug( f"After step ShardedOptimizerV2 consumes {gpu_mem / 1e6} MB CUDA Memory, {cpu_mem / 1e6} MB CUDA Memory!", ranks=[0], ) self._copy_master_model_to_model_fp16() return ret def _check_overflow(self): # clear previous overflow record self._found_overflow.fill_(self.model.overflow_counter) # all-reduce across dp group dist.all_reduce(self._found_overflow, group=self.dp_process_group) # all-reduce over model parallel group dist.all_reduce(self._found_overflow, group=self.mp_process_group) return self._found_overflow.item() > 0 def _unscale_grads(self): assert self.optim_state == OptimState.SCALED for group in self.optim.param_groups: for p in group["params"]: if p.grad is not None: p.grad.data.div_(self.loss_scale) self.optim_state = OptimState.UNSCALED def _zero_grad(self, recover_data: bool = False): """zero grad and maybe recover fp16 params When `reuse_fp16_shard` is enabled, p.colo_attr.sharded_data_tensor stores grad here. We have to recover them from fp32 params. Args: recover_data (bool, optional): Whether to recover fp16 param from fp32 param. Defaults to False. """ # We must set grad to None # Because grad here is sharded # But next backward pass will create a full grad first # Which leads to wrong accumulation self.optim.zero_grad(set_to_none=True) for group in self.optim.param_groups: for p in group["params"]: # p.colo_attr.sharded_data_tensor stores grad now # we have to recover fp16 param reuse_fp16_shard = p.colo_attr.sharded_data_tensor.payload_size == 0 if recover_data and reuse_fp16_shard: self._copy_master_param_to_param_fp16(p) else: # release saved gradient p.colo_attr.saved_grad.set_null() self.model.overflow_counter = 0 # set overflow counter to zero def sync_grad(self): pass def _register_master_weight(self): self.master_params: Dict[Parameter, StatefulTensor] = {} for group in self.optim.param_groups: for p in group["params"]: assert hasattr(p, "colo_attr"), "The parameter must be wrapped with ShardedParam" shard_flag = not p.colo_attr.sharded_data_tensor.is_sharded and p.colo_attr.is_replicated if shard_flag: # we always shard replicated parameters self.shard_strategy.shard([p.colo_attr.sharded_data_tensor], self.dp_process_group) self.master_params[p] = StatefulTensor(cast_tensor_to_fp32(p.colo_attr.data_payload.to(self.device))) if shard_flag: # In this branch, there's no need to shard param # So we gather here self.shard_strategy.gather([p.colo_attr.sharded_data_tensor], self.dp_process_group) def _maybe_move_fp32_shards(self): if self._should_move_fp32_shards_h2d: self._should_move_fp32_shards_h2d = False available_cuda_margin_mem = self.model.cuda_margin_space * self.gpu_margin_mem_ratio fp32_shards_available_cuda_margin_mem = available_cuda_margin_mem / self.optim.num_fp32_shards_per_param fp32_shards_used_cuda_margin_mem = 0 for group in self.optim.param_groups: for p in group["params"]: if p.colo_attr.saved_grad.is_null(): continue shard_mem = self.master_params[p].payload.numel() * self.master_params[p].payload.element_size() if fp32_shards_used_cuda_margin_mem + shard_mem < fp32_shards_available_cuda_margin_mem: colo_model_data_tensor_move_inline(self.master_params[p], torch.cuda.current_device()) colo_model_data_tensor_move_inline(p.colo_attr.saved_grad, torch.cuda.current_device()) p.colo_attr.offload_grad = False fp32_shards_used_cuda_margin_mem += shard_mem state = self.optim.state[p] for k, v in state.items(): if isinstance(v, Tensor): state[k] = v.cuda() def _prepare_grads(self): if self._grad_prepared: return for group in self.optim.param_groups: for p in group["params"]: if p.colo_attr.saved_grad.is_null(): continue p.colo_attr.saved_grad.trans_state(TensorState.COMPUTE) # If reuse_fp16_shard, grad fp16 which wasn't be offloaded may be evicted to CPU if not p.colo_attr.offload_grad: colo_model_data_tensor_move_inline(p.colo_attr.saved_grad, torch.cuda.current_device()) # FIXME(ver217): p.data here is an empty tensor on CUDA and has no useful information # If we change p.grad directly # it may raise error because of different shape/dtype/device of p.data and p.grad # We just set p.data = p.colo_attr.saved_grad.payload here p.data = p.colo_attr.grad_payload p.grad = p.colo_attr.grad_payload # Set p.data to empty tensor, in case of memory leaking p.colo_attr.set_data_none() self._grad_prepared = True def _point_param_fp16_to_master_param(self): # assign master param pointers to p.data. # We will not trigger data copy here. for group in self.optim.param_groups: for p in group["params"]: self.master_params[p].trans_state(TensorState.COMPUTE) p.data = self.master_params[p].payload # Now p.data is sharded # So optimizer states are sharded naturally def _copy_master_model_to_model_fp16(self): # Copy master param data (fp32) to payload of colo_attr (fp16) # TODO() improve efficiency by gathering tensors into a chunk and transferring # a chunk. for group in self.optim.param_groups: for p in group["params"]: self._copy_master_param_to_param_fp16(p) def _copy_master_param_to_param_fp16(self, p): # flush gradient if p.colo_attr.sharded_data_tensor.payload_size == 0: # here reuse_fp16_shard is True # in order to use copy below, we should give sharded data tensor a payload p.colo_attr.sharded_data_tensor.payload_relay(p.colo_attr.saved_grad) else: p.colo_attr.saved_grad.set_null() p.data = self.master_params[p].payload # we need to allocate new memory for keep_not_shard parameters # in order to use copy, otherwise, the sizes of tensor is not compatible if p.colo_attr.data_payload.numel() != p.data.numel(): p.colo_attr.data_payload_reset( torch.empty(p.data.shape, dtype=p.colo_attr.data_payload.dtype, device=p.colo_attr.data_payload.device) ) # TODO() optimize this line CPU (fp32) -> GPU (fp16) half_dtype = torch.bfloat16 if self.bf16 else torch.float16 p.colo_attr.sharded_data_tensor.payload_copy(p.to(half_dtype).detach()) p.colo_attr.set_data_none() if p.colo_attr.keep_not_shard and p.colo_attr.is_replicated: # We gather full fp16 param here p.colo_attr.sharded_data_tensor.is_sharded = True # since only gradient is sharded, we should set to True self.shard_strategy.gather([p.colo_attr.sharded_data_tensor], self.dp_process_group) self.master_params[p].trans_state(TensorState.HOLD) def state_dict(self): optim_state_dict = super().state_dict() scaler_state_dict = self.grad_scaler.state_dict() optim_state_dict["scaler"] = scaler_state_dict return optim_state_dict def load_state_dict(self, *args, **kwargs): if "scaler" not in args[0]: self._logger.warning("Missing scaler when loading optimizer state dict", ranks=[0]) else: scaler_state_dict = args[0].pop("scaler") self.grad_scaler.load_state_dict(scaler_state_dict) super().load_state_dict(*args, **kwargs) for group in self.optim.param_groups: for p in group["params"]: state = self.optim.state[p] for k, v in state.items(): if isinstance(v, Tensor): state[k] = v.to(dtype=self.master_params[p].dtype, device=self.master_params[p].device)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/legacy/zero/sharded_optim/__init__.py
colossalai/legacy/zero/sharded_optim/__init__.py
from .sharded_optim_v2 import ShardedOptimizerV2 __all__ = ["ShardedOptimizerV2"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false