repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/constants.py
colossalai/auto_parallel/tensor_shard/constants.py
import operator import torch __all__ = [ "ELEMENTWISE_MODULE_OP", "ELEMENTWISE_FUNC_OP", "RESHAPE_FUNC_OP", "CONV_MODULE_OP", "CONV_FUNC_OP", "LINEAR_MODULE_OP", "LINEAR_FUNC_OP", "BATCHNORM_MODULE_OP", "POOL_MODULE_OP", "NON_PARAM_FUNC_OP", "BCAST_FUNC_OP", "EMBEDDING_MODULE_OP", "LAYERNORM_MODULE_OP", "ELEMENTWISE_METHOD_OP", "RESHAPE_METHOD_OP", "INFINITY_COST", ] ELEMENTWISE_MODULE_OP = [torch.nn.Dropout, torch.nn.ReLU] ELEMENTWISE_FUNC_OP = [ torch.abs, torch.cos, torch.exp, operator.neg, torch.multiply, torch.nn.functional.relu, torch.nn.functional.dropout, # softmax should not be here torch.nn.functional.softmax, ] ELEMENTWISE_METHOD_OP = [ torch.Tensor.to, torch.Tensor.type, # TODO: contiguous maybe need some extra processes. torch.Tensor.contiguous, ] RESHAPE_FUNC_OP = [ torch.flatten, torch.reshape, torch.transpose, torch.split, torch.permute, operator.getitem, ] RESHAPE_METHOD_OP = [ torch.Tensor.view, torch.Tensor.unsqueeze, torch.Tensor.split, torch.Tensor.permute, torch.Tensor.transpose, ] BCAST_FUNC_OP = [ torch.add, torch.sub, torch.mul, torch.div, torch.floor_divide, torch.true_divide, operator.add, operator.sub, operator.mul, operator.floordiv, operator.truediv, torch.matmul, operator.pow, torch.pow, ] CONV_MODULE_OP = [ torch.nn.Conv1d, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d, ] CONV_FUNC_OP = [ torch.conv1d, torch.conv2d, torch.conv3d, torch.conv_transpose1d, torch.conv_transpose2d, torch.conv_transpose3d, ] EMBEDDING_MODULE_OP = [torch.nn.modules.sparse.Embedding] LINEAR_MODULE_OP = [torch.nn.Linear] LINEAR_FUNC_OP = [torch.nn.functional.linear, torch.matmul, torch.bmm] BATCHNORM_MODULE_OP = [torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm] LAYERNORM_MODULE_OP = [torch.nn.LayerNorm] POOL_MODULE_OP = [torch.nn.MaxPool1d, torch.nn.MaxPool2d, torch.nn.MaxPool3d, torch.nn.AdaptiveAvgPool2d] NON_PARAM_FUNC_OP = [ torch.flatten, torch.reshape, torch.abs, torch.cos, torch.exp, operator.neg, torch.multiply, torch.nn.functional.relu, torch.nn.functional.dropout, torch.flatten, torch.where, operator.pow, torch.pow, torch.tanh, torch.add, torch.sub, torch.mul, torch.div, torch.floor_divide, torch.true_divide, operator.add, operator.sub, operator.mul, operator.floordiv, operator.truediv, # softmax should not be here torch.nn.functional.softmax, ] INFINITY_COST = 1e13
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/__init__.py
colossalai/auto_parallel/tensor_shard/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/sharding_strategy.py
colossalai/auto_parallel/tensor_shard/sharding_strategy.py
from copy import deepcopy from dataclasses import dataclass from enum import Enum from typing import Any, Dict, List, Tuple, Union import torch from torch.fx.node import Node from colossalai.tensor.comm_spec import CommSpec from colossalai.tensor.sharding_spec import ShardingSpec from .constants import ( ELEMENTWISE_FUNC_OP, ELEMENTWISE_METHOD_OP, ELEMENTWISE_MODULE_OP, RESHAPE_FUNC_OP, RESHAPE_METHOD_OP, ) __all__ = ["OperationDataType", "OperationData", "TrainCycleItem", "MemoryCost", "ShardingStrategy", "StrategiesVector"] class OperationDataType(Enum): """ An operation can come from the argument list of an operator or the parameter list of a module. """ INPUT = 0 ARG = 1 PARAM = 2 BUFFER = 3 OUTPUT = 4 @dataclass class OperationData: """ OperationData is the data related to an operator, the data can be the operand or the output. Args: name (str): the name of the operation-related data type (OperationDataType): the type of the operation data data (Any): the value for this data, usually it is a meta tensor. logical_shape (Tuple[int]): the logical shape of the data, it can be different from the its actual shape in memory. """ name: str type: OperationDataType data: Any logical_shape: Tuple[int] = None def __post_init__(self): # if no logical shape is specified, use the data shape as the logical shape if self.logical_shape is None: def _infer_logical_shape(data: any): """ This function is used to infer the logical shape of the data. """ if isinstance(data, torch.Tensor): return data.shape elif isinstance(data, torch.Size): return None elif isinstance(data, (tuple, list)): data_type = type(data) return data_type([_infer_logical_shape(d) for d in data]) else: return None self.logical_shape = _infer_logical_shape(self.data) def __repr__(self) -> str: return f"OperationData(name={self.name}, type={self.type})" def __eq__(self, other) -> bool: return other.name == self.name def __hash__(self) -> int: return hash(f"{self.name}") @dataclass class TrainCycleItem: """ TrainCycleItem is a dataclass to store the items which have different values for the forward and backward pass in a training iteration. Args: fwd (float): the item for the forward pass bwd (float): the item for the backward pass """ fwd: Any bwd: Any total: Any @dataclass class MemoryCost: """ MemoryCost is a dataclass which stores the memory usage in the program. Args: activation (int): the memory cost incurred by the activations in bytes. parameter (int): the memory cost incurred by the module parameter in bytes. temp (int): the memory cost incurred by the temporary tensors in bytes. buffer (int): the memory cost incurred by the module buffer in bytes. """ activation: int = 0 parameter: int = 0 temp: int = 0 buffer: int = 0 class CommType(Enum): """ CommType describes the sequential order of a communication action and a computation action. Meaning: BEFORE: the communication action happens just before the computation operation. AFTER: the communication action happens after the computation operation. HOOK: the communication action is used to do the grad all reduce. IMPLICIT: the communication action happens during the kernel execution, such as SyncBatchNorm """ BEFORE = 0 AFTER = 1 HOOK = 2 IMPLICIT = 3 @dataclass class CommAction: """ CommAction is used to record the communication action. Args: comm_spec: express the communication pattern and the process groups to execute the communication action. comm_type: describes the sequential order of a communication action and a computation action. arg_index: record the location of tensor which join the communication, we cannot use name of node or op_data at runtime, because the args of node may be changed by graph transform passes. """ comm_spec: CommSpec = None comm_type: CommType = None arg_index: int = -1 key_for_kwarg: any = None @dataclass class ShardingStrategy: """ ShardingStrategy is a dataclass to store the meta information on tensor sharding for a node. Args: name (str): express the sharding strategies in string, such as 'S0S1 = S0R x RS1'. output_sharding_spec (ShardingSpec): ShardingSpec of the output node. compute_cost (TrainCycleItem): Computation cost to complete this strategy. (default to None) communication_cost (TrainCycleItem): Communication cost to complete this strategy. (default to None) memory_cost (TrainCycleItem): Memory cost of the output node using this strategy. (default to None) input_sharding_specs (List(ShardingSpec)): The ShardingSpecs of the input nodes. """ name: str sharding_specs: Dict[OperationData, Union[ShardingSpec, Tuple[ShardingSpec]]] = None compute_cost: TrainCycleItem = None communication_cost: TrainCycleItem = None memory_cost: TrainCycleItem = None communication_actions: Dict[OperationData, CommAction] = None resharding_costs: Dict[Node, List[TrainCycleItem]] = None @property def input_sharding_specs(self) -> Dict[OperationData, ShardingSpec]: specs = {} specs.update(self._get_sharding_spec(OperationDataType.ARG)) specs.update(self._get_sharding_spec(OperationDataType.PARAM)) return specs @property def argument_sharding_specs(self) -> Dict[OperationData, ShardingSpec]: return self._get_sharding_spec(OperationDataType.ARG) @property def param_sharding_specs(self) -> Dict[OperationData, ShardingSpec]: return self._get_sharding_spec(OperationDataType.PARAM) @property def output_sharding_specs(self) -> Dict[OperationData, ShardingSpec]: return self._get_sharding_spec(OperationDataType.OUTPUT) def _get_sharding_spec(self, operation_data_type: OperationDataType): specs = {k: v for k, v in self.sharding_specs.items() if k.type == operation_data_type} return specs def get_op_data_by_name(self, name: str): for op_data in self.sharding_specs.keys(): if op_data.name == name: return op_data raise KeyError(f"Could not find the OperationData with name {name}") def get_sharding_spec_by_name(self, name: str): for op_data, sharding_spec in self.sharding_specs.items(): if op_data.name == name: return sharding_spec raise KeyError(f"Could not find the ShardingSpec for OperationData with name {name}") def clone(self): def _deepcopy_dict_vals(data: Dict): return {k: deepcopy(v) for k, v in data.items()} sharding_specs = _deepcopy_dict_vals(self.sharding_specs) if self.sharding_specs is not None else None # We need to deepcopy it when self.communication_actions is not None, instead of checking its __bool__ value. # Consider the examples below: # If self.communication_actions is an empty dictionary {}, then self.communication_actions is not None, but its __bool__ value is False. # In this case, if we set None to the new object, program will crash when we try to access the communication_actions.items. communication_actions = ( _deepcopy_dict_vals(self.communication_actions) if self.communication_actions is not None else None ) # same reason as communication_actions resharding_costs = _deepcopy_dict_vals(self.resharding_costs) if self.resharding_costs is not None else None compute_cost = deepcopy(self.compute_cost) communication_cost = deepcopy(self.communication_cost) memory_cost = deepcopy(self.memory_cost) return ShardingStrategy( name=self.name, sharding_specs=sharding_specs, compute_cost=compute_cost, communication_cost=communication_cost, memory_cost=memory_cost, communication_actions=communication_actions, resharding_costs=resharding_costs, ) class StrategiesVector(list): """ Each node in fx graph will have a corresponding StrategiesVector, to store all the possible strategies of the node. Argument: node (Node): node for which the list of sharding strategies are generated. """ def __init__(self, node: Node): super().__init__() self.node = node # fetch its input and output nodes # TODO: placeholder input nodes self.predecessor_nodes = list(node._input_nodes.keys()) self.successor_nodes = list(node.users.keys()) def check_merge(self): merge_label = False if self.node.op == "call_module": target = self.node.target root_module = self.node.graph.owning_module submod = root_module.get_submodule(target) submod_type = type(submod) # merge elementwise module node into source nodes # we could merge element-wise op, because the output sharding spec is always same as the input sharding spec. if submod_type in ELEMENTWISE_MODULE_OP: merge_label = True if self.node.op == "call_function": # we could merge element-wise op, because the output sharding spec is always same as the input sharding spec. if self.node.target in ELEMENTWISE_FUNC_OP: merge_label = True # we could merge bcast op if the rhs is a scalar, because it will fall back to the element-wise case. # TODO: remove this after we support the fall back logic. # if self.node.target in BCAST_FUNC_OP and len(self.predecessor_nodes) == 1: # merge_label = True # we could merge reshape op, because their computation costs are negligible. if self.node.target in RESHAPE_FUNC_OP: merge_label = True if self.node.op == "call_method": # we could merge reshape op, because their computation costs are negligible. method = getattr(self.node.args[0]._meta_data.__class__, self.node.target) if method in RESHAPE_METHOD_OP: merge_label = True if method in ELEMENTWISE_METHOD_OP: merge_label = True return merge_label
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/initialize.py
colossalai/auto_parallel/tensor_shard/initialize.py
from typing import Dict, List, Tuple import torch import torch.distributed as dist import torch.nn as nn from torch.fx.graph import Graph from colossalai._analyzer.fx.codegen import ActivationCheckpointCodeGen from colossalai._analyzer.fx.graph_module import ColoGraphModule from colossalai._analyzer.fx.passes import shape_prop_pass from colossalai._analyzer.fx.tracer.tracer import ColoTracer from colossalai.auto_parallel.passes.runtime_apply_pass import runtime_apply_pass from colossalai.auto_parallel.passes.runtime_preparation_pass import runtime_preparation_pass from colossalai.auto_parallel.tensor_shard.options import DataloaderOption, ShardOption, SolverOptions, SolverPerference from colossalai.auto_parallel.tensor_shard.sharding_strategy import CommAction from colossalai.auto_parallel.tensor_shard.solver import CostGraph, Solver, StrategiesConstructor from colossalai.device.alpha_beta_profiler import AlphaBetaProfiler from colossalai.device.device_mesh import DeviceMesh from colossalai.tensor.sharding_spec import ShardingSpec class ModuleWrapper(nn.Module): """ This class is used to wrap the original module, and add the sharding_spec_dict, origin_spec_dict, comm_actions_dict into the forward function. """ def __init__( self, module: ColoGraphModule, sharding_spec_dict: Dict[int, List[ShardingSpec]], origin_spec_dict: Dict[int, ShardingSpec], comm_actions_dict: Dict[int, Dict[str, CommAction]], ): """ Args: module: the original module sharding_spec_dict: The sharding_spec_dict is used to record the target sharding specs of each tensor required in user node. origin_spec_dict: The origin_spec_dict is used to record the original sharding spec of each tensor. comm_actions_dict: The comm_actions_dict is used to record the communication actions of each tensor. """ super(ModuleWrapper, self).__init__() self.module = module self.sharding_spec_dict = sharding_spec_dict self.origin_spec_dict = origin_spec_dict self.comm_actions_dict = comm_actions_dict def forward(self, *args, **kwargs): return self.module( *args, sharding_spec_convert_dict=self.sharding_spec_dict, origin_node_sharding_spec_dict=self.origin_spec_dict, comm_actions_dict=self.comm_actions_dict, **kwargs, ) def extract_meta_args_from_dataloader(data_loader: torch.utils.data.DataLoader, data_process_func: callable): """ This method is used to extract the meta_args from the dataloader under the instruction of the data_process_func. """ # TODO: implement this function def extract_alpha_beta_for_device_mesh(alpha_beta_dict: Dict[Tuple[int], Tuple[float]], logical_mesh_shape: Tuple[int]): """ This method is used to extract the mesh_alpha and mesh_beta for the given logical_mesh_shape from the alpha_beta_dict. These two values will be used to estimate the communication cost. """ # TODO: implement this function def build_strategy_constructor( graph: Graph, device_mesh: DeviceMesh, solver_preference: str, dataloader_option: str, shard_option: str ): """ This method is used to build the strategy_constructor for the given graph. After this method, each node in the graph will have a strategies_vector which is constructed by the related node handler. """ if solver_preference == "standard": solver_preference = SolverPerference.STANDARD elif solver_preference == "tp": solver_preference = SolverPerference.TP elif solver_preference == "dp": solver_preference = SolverPerference.DP else: raise ValueError(f"Invalid solver_preference: {solver_preference}") if dataloader_option == "replicated": dataloader_option = DataloaderOption.REPLICATED elif dataloader_option == "distributed": dataloader_option = DataloaderOption.DISTRIBUTED else: raise ValueError(f"Invalid dataloader_option: {dataloader_option}") if shard_option == "standard": shard_option = ShardOption.STANDARD elif shard_option == "shard": shard_option = ShardOption.SHARD elif shard_option == "shard_last_axis": shard_option = ShardOption.SHARD_LAST_AXIS elif shard_option == "full_shard": shard_option = ShardOption.FULL_SHARD else: raise ValueError(f"Invalid shard_option: {shard_option}") solver_options = SolverOptions( solver_perference=solver_preference, dataloader_option=dataloader_option, shard_option=shard_option ) strategies_constructor = StrategiesConstructor(graph, device_mesh, solver_options) strategies_constructor.build_strategies_and_cost() return strategies_constructor def solve_solution(gm: ColoGraphModule, strategy_constructor: StrategiesConstructor, memory_budget: float = -1.0): """ This method is used to solve the best solution for the given graph. The solution is a list of integers, each integer represents the best strategy index of the corresponding node. """ # temporarily we use all nodes as liveness list, we count the backward memory cost together with # forward memory cost into the node memory cost, and no activation checkpoint is used in this phase. # graph_analyser = GraphAnalyser(gm) # liveness_list = graph_analyser.liveness_analysis() cost_graph = CostGraph(strategy_constructor.leaf_strategies) cost_graph.simplify_graph() solver = Solver(gm.graph, strategy_constructor, cost_graph, memory_budget=memory_budget) ret = solver.call_solver_serialized_args() solution = list(ret[0]) return solution def transform_to_sharded_model( gm: ColoGraphModule, meta_args: Dict, solution: List[int], device_mesh: DeviceMesh, strategies_constructor: StrategiesConstructor, overlap: bool = False, ): """ This method is used to transform the original graph to the sharded graph. The model parameters will be sharded according to the solution and the grad hooks will be added to the sharded graph using the runtime_preparation_pass. The communication node will be added into the graph using the runtime_apply_pass. """ gm, sharding_spec_dict, origin_spec_dict, comm_actions_dict = runtime_preparation_pass( gm, solution, device_mesh, strategies_constructor, overlap=overlap ) gm = runtime_apply_pass(gm) shape_prop_pass(gm, *meta_args.values(), sharding_spec_dict, origin_spec_dict, comm_actions_dict) gm.recompile() sharding_spec_dicts = (sharding_spec_dict, origin_spec_dict, comm_actions_dict) return gm, sharding_spec_dicts def initialize_device_mesh( world_size: int = -1, physical_devices: List[int] = None, alpha_beta_dict: Dict[Tuple[int], Tuple[float]] = None, logical_mesh_shape: Tuple[int] = None, logical_mesh_id: torch.Tensor = None, ): """ This method is used to initialize the device mesh. Args: world_size: the size of device mesh. If the world_size is -1, the world size will be set to the number of GPUs in the current machine. physical_devices: the physical devices used to initialize the device mesh. alpha_beta_dict(optional): the alpha_beta_dict contains the alpha and beta values for each devices. if the alpha_beta_dict is None, the alpha_beta_dict will be generated by profile_alpha_beta function. logical_mesh_shape(optional): the logical_mesh_shape is used to specify the logical mesh shape. logical_mesh_id(optional): the logical_mesh_id is used to specify the logical mesh id. """ # if world_size is not set, use the world size from torch.distributed if world_size == -1: world_size = dist.get_world_size() if physical_devices is None: physical_devices = [i for i in range(world_size)] physical_mesh = torch.tensor(physical_devices) if alpha_beta_dict is None: # if alpha_beta_dict is not given, use a series of executions to profile alpha and beta values for each device ab_profiler = AlphaBetaProfiler(physical_devices) alpha_beta_dict = ab_profiler.alpha_beta_dict else: ab_profiler = AlphaBetaProfiler(physical_devices, alpha_beta_dict=alpha_beta_dict) if logical_mesh_shape is None and logical_mesh_id is None: # search for the best logical mesh shape logical_mesh_id = ab_profiler.search_best_logical_mesh() logical_mesh_id = torch.Tensor(logical_mesh_id).to(torch.int) logical_mesh_shape = logical_mesh_id.shape # extract alpha and beta values for the chosen logical mesh shape mesh_alpha, mesh_beta = ab_profiler.extract_alpha_beta_for_device_mesh() elif logical_mesh_shape is not None and logical_mesh_id is None: logical_mesh_id = physical_mesh.reshape(logical_mesh_shape) # extract alpha and beta values for the chosen logical mesh shape mesh_alpha, mesh_beta = extract_alpha_beta_for_device_mesh(alpha_beta_dict, logical_mesh_id) device_mesh = DeviceMesh( physical_mesh_id=physical_mesh, logical_mesh_id=logical_mesh_id, mesh_alpha=mesh_alpha, mesh_beta=mesh_beta, init_process_group=True, ) return device_mesh def initialize_model( model: nn.Module, meta_args: Dict[str, torch.Tensor], device_mesh: DeviceMesh, memory_budget: float = -1.0, overlap: bool = False, solver_preference: str = "standard", dataloader_option: str = "replicated", shard_option: str = "standard", save_solver_solution: bool = False, load_solver_solution: bool = False, solution_path: str = None, return_solution: bool = False, ): """ This method is used to initialize the sharded model which could be used as normal pytorch model. Args: model: the model to be sharded. meta_args: the meta_args is used to specify the input shapes of the model. device_mesh: the device mesh to execute the model. memory_budget(optional): the max cuda memory could be used. If the memory budget is -1.0, the memory budget will be infinity. overlap(optional): the overlap is used to specify whether to overlap gradient communication and backward computing. solver_preference(optional): the solver_preference is used to specify which parallelism algorithm has higher priority. The valid solver_preference could be 'standard', 'tp', or 'dp'. dataloader_option(optional): the dataloader_option is used to specify which kind of data_loader will be used. The valid dataloader_option could be 'replicated' or 'distributed'. shard_option(optional): the shard_option is used to specify how many axes will be used to shard the model. The valid shard_option could be 'standard', 'shard', 'shard_last_axis', or 'full_shard'. save_solver_solution(optional): if the save_solver_solution is True, the solution will be saved to the solution_path. load_solver_solution(optional): if the load_solver_solution is True, the solution will be loaded from the solution_path. solution_path(optional): the path to save or load the solution. return_solution(optional): if the return_solution is True, the solution will be returned. The returned solution will be used to debug or help to analyze the sharding result. Therefore, we will not just return a series of integers, but return the best strategies. """ tracer = ColoTracer(trace_act_ckpt=True, bias_addition_split=True) graph = tracer.trace(root=model, meta_args=meta_args) graph.set_codegen(ActivationCheckpointCodeGen()) gm = ColoGraphModule(model, graph, model.__class__.__name__) shape_prop_pass(gm, *meta_args.values()) gm.recompile() strategies_constructor = build_strategy_constructor( graph, device_mesh, solver_preference=solver_preference, dataloader_option=dataloader_option, shard_option=shard_option, ) if load_solver_solution: solution = torch.load(solution_path) else: solution = solve_solution(gm, strategies_constructor, memory_budget) if save_solver_solution: torch.save(solution, solution_path) gm, sharding_spec_dicts = transform_to_sharded_model( gm, meta_args, solution, device_mesh, strategies_constructor, overlap ) model_to_return = ModuleWrapper(gm, *sharding_spec_dicts) if return_solution: solution_to_return = [] nodes = [strategies_vector.node for strategies_vector in strategies_constructor.leaf_strategies] for index, node in enumerate(nodes): solution_to_return.append(f"{node.name} {node.strategies_vector[solution[index]].name}") return model_to_return, solution_to_return else: return model_to_return def autoparallelize( model: nn.Module, meta_args: Dict[str, torch.Tensor] = None, data_loader: torch.utils.data.DataLoader = None, data_process_func: callable = None, alpha_beta_dict: Dict[Tuple[int], Tuple[float]] = None, logical_mesh_shape: Tuple[int] = None, logical_mesh_id: torch.Tensor = None, solver_preference: str = "standard", dataloader_option: str = "replicated", shard_option: str = "standard", save_solver_solution: bool = False, load_solver_solution: bool = False, solver_solution_path: str = None, return_solution: bool = False, memory_budget: float = -1.0, ): """ This method is used to initialize the device mesh, extract the meta_args, and use them to create a sharded model. Args: model: the model to be sharded. meta_args(optional): the meta_args is used to specify the input shapes of the model. If the meta_args is None, the meta_args will be extracted from the data_loader. data_loader(optional): the data_loader to be used in normal training loop. data_process_func(optional): the data_process_func is used to process the data from the data_loader. alpha_beta_dict(optional): the alpha_beta_dict contains the alpha and beta values for each devices. if the alpha_beta_dict is None, the alpha_beta_dict will be generated by profile_alpha_beta function. logical_mesh_shape(optional): the logical_mesh_shape is used to specify the logical mesh shape. If the logical_mesh_shape is None, the logical_mesh_shape will be generated by search_best_logical_mesh_shape function. logical_mesh_id(optional): the logical_mesh_id is used to specify the logical mesh id. solver_preference(optional): the solver_preference is used to specify which parallelism algorithm has higher priority. The valid solver_preference could be 'standard', 'tp', or 'dp'. dataloader_option(optional): the dataloader_option is used to specify which kind of data_loader will be used. The valid dataloader_option could be 'replicated' or 'distributed'. shard_option(optional): the shard_option is used to specify how many axes will be used to shard the model. The valid shard_option could be 'standard', 'shard', 'shard_last_axis', or 'full_shard'. save_solver_solution(optional): if the save_solver_solution is True, the solution will be saved to the solution_path. load_solver_solution(optional): if the load_solver_solution is True, the solution will be loaded from the solution_path. solver_solution_path(optional): the path to save or load the solution. return_solution(optional): if the return_solution is True, the solution will be returned. memory_budget(optional): the max cuda memory could be used. If the memory budget is -1.0, the memory budget will be infinity. """ device_mesh = initialize_device_mesh( alpha_beta_dict=alpha_beta_dict, logical_mesh_shape=logical_mesh_shape, logical_mesh_id=logical_mesh_id ) if meta_args is None: meta_args = extract_meta_args_from_dataloader(data_loader, data_process_func) rst_to_unpack = initialize_model( model, meta_args, device_mesh, solver_preference=solver_preference, dataloader_option=dataloader_option, shard_option=shard_option, save_solver_solution=save_solver_solution, load_solver_solution=load_solver_solution, solution_path=solver_solution_path, return_solution=return_solution, memory_budget=memory_budget, ) if return_solution: model, solution = rst_to_unpack return model, solution else: model = rst_to_unpack return model
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/options.py
colossalai/auto_parallel/tensor_shard/options.py
from dataclasses import dataclass from enum import Enum __all__ = ["SolverOptions", "SolverPerference", "DataloaderOption", "ShardOption"] class SolverPerference(Enum): """ This enum class is to define the solver preference. """ STANDARD = 0 DP = 1 TP = 2 class ShardOption(Enum): """ This enum class is to define the shard level required in node strategies. Notes: STANDARD: We do not add any extra shard requirements. SHARD: We require the node to be shard using at least one device mesh axis. SHARD_ONE_AXIS: We require the node to be shard using the last device mesh axis. FULL_SHARD: We require the node to be shard using all device mesh axes. TP_SHARD: We require the node to be shard using tensor parallel strategies on last device mesh axis. TP_FULL_SHARD: We require the node to be shard using tensor parallel strategies on all device mesh axes. """ STANDARD = 0 SHARD = 1 SHARD_LAST_AXIS = 2 FULL_SHARD = 3 class DataloaderOption(Enum): """ This enum class is to define the dataloader option. """ REPLICATED = 0 DISTRIBUTED = 1 @dataclass class SolverOptions: """ SolverOptions is a dataclass used to configure the preferences for the parallel execution plan search. """ solver_perference: SolverPerference = SolverPerference.STANDARD dataloader_option: DataloaderOption = DataloaderOption.REPLICATED shard_option: ShardOption = ShardOption.STANDARD
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/solver/cost_graph.py
colossalai/auto_parallel/tensor_shard/solver/cost_graph.py
import torch from colossalai.auto_parallel.tensor_shard.constants import INFINITY_COST class CostGraph: """ A graph data structure to simplify the edge cost graph. It has two main functions: 1. To feed the quadratic resharding costs into solver, we need to linearize it. We build edge_cost in CostGraph, and it stored every combinations of strategies for a src-dst node pair in an 1D list. 2. To reduce the searching space, we merge computationally-trivial operators, such as element-wise operators, transpose, and reduction, into their following nodes. The merging information will be given by the StrategiesVector depending on the type of target node and following nodes. Argument: leaf_strategies(List[StrategiesVector]): It stores StrategiesVector of every nodes on the graph. simplify(bool, optional): The generated cost graph will be simplified if it is true. (default to True) """ def __init__(self, leaf_strategies, simplify=True, forward_only=False): self.leaf_strategies = leaf_strategies self.nodes = [strategies_vector.node for strategies_vector in self.leaf_strategies] # stores number of strategies in each node self.node_lens = {strategies_vector.node: len(strategies_vector) for strategies_vector in self.leaf_strategies} # extra_node_costs will store the extra costs introduced by merging nodes self.extra_node_costs = {} self.following_dict = {} self.simplify = simplify self.forward_only = forward_only self._build_cost_graph() def _remove_invalid_node(self, node, attr_name): remove_list = [] target_node_list = getattr(node, attr_name, []) for target_node in target_node_list: if target_node not in self.nodes: remove_list.append(target_node) for element in remove_list: target_node_list.remove(element) def _build_cost_graph(self): """ This method will generate edge_cost for adjacent node pair. Additionally, 'parents' and 'children' attribute will be set to node. """ self.edge_costs = {} if self.simplify: self.merge_pair = [] for strategies_vector in self.leaf_strategies: # build edge_cost dst_node = strategies_vector.node for src_node in strategies_vector.predecessor_nodes: if src_node not in self.nodes: continue node_pair = (src_node, dst_node) edge_cost = {} for i in range(len(strategies_vector)): for j in range(len(src_node.strategies_vector)): resharding_cost_item = strategies_vector[i].resharding_costs[src_node][j] if self.forward_only: edge_cost[(j, i)] = resharding_cost_item.fwd else: edge_cost[(j, i)] = resharding_cost_item.total self.edge_costs[node_pair] = edge_cost parent_nodes = [] children_nodes = [] def _check_tensor_in_node(data): """ This method is used to check whether the data has a tensor inside or not. """ has_tensor_flag = False if isinstance(data, torch.Tensor): return True elif isinstance(data, (tuple, list)): for d in data: has_tensor_flag = has_tensor_flag or _check_tensor_in_node(d) return has_tensor_flag for node in strategies_vector.predecessor_nodes: if _check_tensor_in_node(node._meta_data): parent_nodes.append(node) for node in strategies_vector.successor_nodes: if _check_tensor_in_node(node._meta_data): children_nodes.append(node) setattr(dst_node, "parents", parent_nodes) setattr(dst_node, "children", children_nodes) if self.simplify and strategies_vector.check_merge(): for followed_node in strategies_vector.predecessor_nodes: # we only merge node pairs which src node has a tensor element inside. # This is necessary because the node without a tensor element inside will not # be assigned any strategy. if _check_tensor_in_node(followed_node._meta_data): self.merge_pair.append((followed_node, dst_node)) def get_edge_cost(self, src_node, dst_node): return self.edge_costs[(src_node, dst_node)] def merge_node(self, src_node, dst_node): """ To merge dst_node into src_node, we need to do it in following steps: 1. For each strategy in dst_node, we need to pick an appropriate strategy of src_node to merge, it is important because the logical resharding costs between the parents node of src_node and merged node depend on the src_node strategies dispatching. For example, for the graph 0->1->2, after merging node 1 into node 2, edge_costs[(node 0, node 2)][(0, 0)] = edge_costs[(node 0, node 1)][(0, x)] x represents the picking strategy of node 1 merged into node 2 strategy 0. 2. We need to accumulate the extra costs introduced by merging nodes, the extra costs contains two parts, one is resharding costs between src_node strategy and dst_node strategy, another is the origin extra costs in src_node strategy. 3. Build connections between new node pairs, and remove the src_node after all consumer nodes detached from it. Argument: src_node(Node): The node will be merged into dst_node. dst_node(Node): The node to integrate src_node. """ # build merge_map merge_map = {} for src_index, _ in enumerate(src_node.strategies_vector): min_cost = INFINITY_COST lowest_cost_index = -1 for dst_index, dst_strategy in enumerate(dst_node.strategies_vector): resharding_cost_item = dst_strategy.resharding_costs[src_node][src_index] if self.forward_only: resharding_cost = resharding_cost_item.fwd else: resharding_cost = resharding_cost_item.total if resharding_cost <= min_cost: min_cost = resharding_cost lowest_cost_index = dst_index merge_map[src_index] = lowest_cost_index # extra_node_cost for src node self.extra_node_costs[src_node] = [0.0] * self.node_lens[src_node] for src_index, strategy in enumerate(src_node.strategies_vector): target_strate_index = merge_map[src_index] target_strategy = dst_node.strategies_vector[target_strate_index] resharding_cost_item = target_strategy.resharding_costs[src_node][src_index] if self.forward_only: resharding_cost_to_add = resharding_cost_item.fwd else: resharding_cost_to_add = resharding_cost_item.total self.extra_node_costs[src_node][src_index] += resharding_cost_to_add if dst_node in self.extra_node_costs: self.extra_node_costs[src_node][src_index] += self.extra_node_costs[dst_node][target_strate_index] # add new node pair to cost graph for child_node in dst_node.children: new_node_pair = (src_node, child_node) old_node_pair = (dst_node, child_node) if new_node_pair in self.edge_costs: continue edge_cost = {} for i in range(self.node_lens[src_node]): for j in range(self.node_lens[child_node]): dst_strate_index = merge_map[i] edge_cost[(i, j)] = self.edge_costs[old_node_pair][(dst_strate_index, j)] if new_node_pair not in self.edge_costs: self.edge_costs[new_node_pair] = edge_cost else: # we should accumulate the resharding costs if args of child node contain # both src node and dst node. for index_pair, resharding_cost in self.edge_costs[new_node_pair]: self.edge_costs[new_node_pair][index_pair] += edge_cost[index_pair] # connect src node and children of dst node dst_node.parents.remove(src_node) src_node.children.remove(dst_node) self.edge_costs.pop((src_node, dst_node)) for child_node in dst_node.children: if child_node not in src_node.children: src_node.children.append(child_node) if src_node not in child_node.parents: child_node.parents.append(src_node) # remove dst node from cost graph when dst node has no producer. if len(dst_node.parents) == 0: child_node.parents.remove(dst_node) node_pair = (dst_node, child_node) self.edge_costs.pop(node_pair) if len(dst_node.parents) == 0: self.following_dict[dst_node] = src_node dst_node.children = [] def _reindexing_src(self, src): if src not in self.following_dict: return src return self._reindexing_src(self.following_dict[src]) def simplify_graph(self): if not self.simplify: return self.merge_pair.reverse() for src_node, dst_node in self.merge_pair: self.merge_node(src_node, dst_node) self.merge_pair.reverse() reindexing_following_dict = {} for dst, src in self.following_dict.items(): reindexing_following_dict[dst] = self._reindexing_src(src) self.following_dict = reindexing_following_dict
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/solver/strategies_constructor.py
colossalai/auto_parallel/tensor_shard/solver/strategies_constructor.py
import torch from torch.fx import Graph from colossalai.auto_parallel.tensor_shard.node_handler import ( GetattrHandler, OutputHandler, PlaceholderHandler, operator_registry, ) from colossalai.auto_parallel.tensor_shard.sharding_strategy import StrategiesVector from colossalai.auto_parallel.tensor_shard.utils.factory import find_repeat_blocks from colossalai.device.device_mesh import DeviceMesh from ..options import DataloaderOption, SolverOptions __all__ = ["StrategiesConstructor"] class StrategiesConstructor: """ StrategiesConstructor is used to construct the parallelization plan for the model execution. Args: graph (Graph): a Graph object used for analysis and strategy generation. device_mesh (DeviceMesh): a DeviceMesh object which contains the meta information about the cluster. solver_options (SolverOptions): a SolverOptions object which specifies the preferences for plan searching. """ def __init__(self, graph: Graph, device_mesh: DeviceMesh, solver_options: SolverOptions): self.graph = graph assert graph.owning_module is not None, "The given graph is not associated with a owning_module" self.root_module = self.graph.owning_module self.nodes = list(graph.nodes) self.device_mesh = device_mesh self.leaf_strategies = [] self.strategy_map = {} self.solver_options = solver_options self.no_strategy_nodes = [] self.alias_set = None def remove_duplicated_strategy(self, strategies_vector): """ In build_strategies_and_cost method, we may produce some duplicated strategies. In this method, we will remove the duplicated strategies depending on the strategies name. Note that this operation is in-place. """ name_checklist = [] remove_list = [] for strategy in strategies_vector: if strategy.name not in name_checklist: name_checklist.append(strategy.name) else: remove_list.append(strategy) for strategy in remove_list: strategies_vector.remove(strategy) def generate_alias_set(self): node_list = [strategy_vector.node for strategy_vector in self.leaf_strategies] common_blocks = find_repeat_blocks(node_list, self.root_module, common_length_threshold=10) repeat_block_nums = len(common_blocks) alias_set = {} if repeat_block_nums == 0: return alias_set for index, common_node in enumerate(common_blocks[0]): for i in range(1, repeat_block_nums): alias_set[node_list.index(common_blocks[i][index])] = node_list.index(common_node) return alias_set def build_strategies_and_cost(self): """ This method is to build the strategy vector for each node in the computation graph. """ def _check_no_strategy_for_node(node): if node.op in ("placeholder", "get_attr", "output"): return False def _check_no_strategy_for_data(data): label = True if isinstance(data, torch.Tensor): return False elif isinstance(data, (tuple, list)): for d in data: label = label and _check_no_strategy_for_data(d) return label return _check_no_strategy_for_data(node._meta_data) for node in self.nodes: strategies_vector = StrategiesVector(node) if _check_no_strategy_for_node(node): self.no_strategy_nodes.append(node) # placeholder node elif node.op == "placeholder": if self.solver_options.dataloader_option == DataloaderOption.DISTRIBUTED: placeholder_option = "distributed" else: assert ( self.solver_options.dataloader_option == DataloaderOption.REPLICATED ), f"placeholder_option {self.solver_options.dataloader_option} is not supported" placeholder_option = "replicated" placeholder_handler = PlaceholderHandler( node, self.device_mesh, strategies_vector, placeholder_option=placeholder_option ) placeholder_handler.register_strategy() # get_attr node elif node.op == "get_attr": getattr_handler = GetattrHandler( node, self.device_mesh, strategies_vector, shard_option=self.solver_options.shard_option, solver_perference=self.solver_options.solver_perference, ) getattr_handler.register_strategy() # call_module node elif node.op == "call_module": target = node.target submod = self.root_module.get_submodule(target) submod_type = type(submod) handler = operator_registry.get(submod_type)( node, self.device_mesh, strategies_vector, shard_option=self.solver_options.shard_option, solver_perference=self.solver_options.solver_perference, ) handler.register_strategy() # attach strategies_info to node if hasattr(handler, "strategies_info"): setattr(node, "strategies_info", handler.strategies_info) # call_function node elif node.op == "call_function": target = node.target handler = operator_registry.get(target)( node, self.device_mesh, strategies_vector, shard_option=self.solver_options.shard_option, solver_perference=self.solver_options.solver_perference, ) handler.register_strategy() # attach strategies_info to node if hasattr(handler, "strategies_info"): setattr(node, "strategies_info", handler.strategies_info) # call_method node elif node.op == "call_method": method = getattr(node.args[0]._meta_data.__class__, node.target) handler = operator_registry.get(method)( node, self.device_mesh, strategies_vector, shard_option=self.solver_options.shard_option, solver_perference=self.solver_options.solver_perference, ) handler.register_strategy() # attach strategies_info to node if hasattr(handler, "strategies_info"): setattr(node, "strategies_info", handler.strategies_info) # output node elif node.op == "output": if self.solver_options.dataloader_option == DataloaderOption.DISTRIBUTED: output_option = "distributed" else: assert ( self.solver_options.dataloader_option == DataloaderOption.REPLICATED ), f"placeholder_option {self.solver_options.dataloader_option} is not supported" output_option = "replicated" output_handler = OutputHandler(node, self.device_mesh, strategies_vector, output_option=output_option) output_handler.register_strategy() self.remove_duplicated_strategy(strategies_vector) setattr(node, "strategies_vector", strategies_vector) self.leaf_strategies.append(strategies_vector) self.strategy_map[node] = strategies_vector # remove no strategy nodes remove_list = [] for strategies_vector in self.leaf_strategies: if len(strategies_vector) == 0: remove_list.append(strategies_vector.node) for node in remove_list: if node.strategies_vector in self.leaf_strategies: self.leaf_strategies.remove(node.strategies_vector) if node in self.strategy_map: self.strategy_map.pop(node) alias_set = self.generate_alias_set() self.alias_set = alias_set
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/solver/graph_analysis.py
colossalai/auto_parallel/tensor_shard/solver/graph_analysis.py
from dataclasses import dataclass from typing import List from torch.fx.graph import Graph from torch.fx.graph_module import GraphModule from torch.fx.node import Node from colossalai.fx.passes.utils import get_node_module __all__ = ["LiveVariable", "LiveVariableVector", "LiveStage", "GraphAnalyser"] @dataclass class LiveVariable: """ LiveVariable is a data structure to store the meta information of a variable for liveness analysis. """ name: str node: Node is_inplace: bool class LiveVariableVector(list): """ LiveVariableVector is a data structure to store the list of LiveVariable objects. """ def exists(self, name) -> bool: """ Check if a variable has already existed in the current list by name. """ for var in self: if name == var.name: return True return False def get(self, name) -> LiveVariable: for var in self: if name == var.name: return var raise KeyError(f"Variable {name} is not found") def copy(self) -> "LiveVariableVector": """ Create a copy of this vector """ vector = LiveVariableVector() for var in self: vector.append(var) return vector @dataclass class LiveStage: """ LiveStage is a data structure to record the living variables at this current node. """ name: str node: Node all_live_vars: LiveVariableVector unique_live_vars: LiveVariableVector class GraphAnalyser: def __init__(self, gm: GraphModule): self._gm = gm self._graph = gm.graph @property def gm(self) -> GraphModule: """ Return the GraphModule object associated with this analyser. """ return self._gm @property def graph(self) -> Graph: """ Return the Graph object associated with this analyser. """ return self._graph def liveness_analysis(self) -> List[LiveStage]: """ Analyses the graph to obtain the variable liveness information. This function returns an ordered dictionary where the key is the compute stage ID and the value is a LivenessStage object. """ compute_nodes = self.graph.nodes liveness_list = [] # checked: record all variables created since the first stage # all: record the live variables only exist until the current stage. # this can be different from the `checked list`` as some variables may be destroyed prior to this stage. # unique: record the unique live variables only exist until the current stage. # this is different from `all list` as some variables are duplicated. checked_variables = LiveVariableVector() all_live_variables = LiveVariableVector() unique_live_vars = LiveVariableVector() for idx, node in enumerate(compute_nodes): ############################# # find new living variables # ############################# # detect whether the current op is an in-place op # if it is an in-place op, we would deem it as a duplicate var is_inplace = False if node.op == "call_function": # check if this is an inplace op such as torch.nn.functional.relu(x, inplace=True) if node.kwargs.get("inplace", False): is_inplace = True elif node.op == "call_module": # to check if this is an inplace op such as torch.nn.Relu(inplace=True) module = get_node_module(node) if getattr(module, "inplace", False): is_inplace = True # add the output var getattr(node, "_meta_data", None) live_var = LiveVariable(name=node.name, node=node, is_inplace=is_inplace) if not is_inplace: unique_live_vars.append(live_var) checked_variables.append(live_var) all_live_variables.append(live_var) # check if any input is not checked yet for arg in node.args: if not isinstance(arg, Node): continue arg_name = arg.name if not checked_variables.exists(arg_name): live_var_from_arg = LiveVariable(name=arg_name, node=node, is_inplace=False) all_live_variables.append(live_var_from_arg) checked_variables.append(live_var_from_arg) unique_live_vars.append(live_var_from_arg) # TODO: add the logic to remove live variables # this should be completed if we are able to trace the backward compute graph # add this stage to liveness dict stage = LiveStage( name=node.name, node=node, all_live_vars=all_live_variables.copy(), unique_live_vars=unique_live_vars.copy(), ) # if a LiveStage is covered by another LiveStage, we just keep the larger one. replace = False for index, prev_stage in enumerate(liveness_list): all_covered = True for ele in prev_stage.unique_live_vars: if ele not in stage.unique_live_vars: all_covered = False break if all_covered: replace = True break if replace: liveness_list[index] = stage else: liveness_list.append(stage) return liveness_list def get_alias_set(self): pass
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/solver/solver.py
colossalai/auto_parallel/tensor_shard/solver/solver.py
"""This code is adapted from Alpa https://github.com/alpa-projects/alpa/ with some changes. """ import multiprocessing import time import warnings from typing import Dict import numpy as np from torch.fx.graph import Graph from torch.fx.node import Node from colossalai.auto_parallel.tensor_shard.constants import INFINITY_COST from .cost_graph import CostGraph from .graph_analysis import GraphAnalyser from .strategies_constructor import StrategiesConstructor try: import pulp from pulp import LpMinimize, LpProblem, LpStatus, LpVariable, lpDot, lpSum except: warnings.warn(f"please install the pulp") __all___ = ["Solver"] class Solver: def __init__( self, graph: Graph, strategies_constructor: StrategiesConstructor, cost_graph: CostGraph, graph_analyser: GraphAnalyser = None, memory_budget: float = -1.0, solution_numbers: int = 1, forward_only: bool = False, memory_increasing_coefficient: float = 1.3, verbose=False, ): """ Solver class will integrate information provided by the components and use ILP solver to find a possible optimal strategies combination for target computing graph. Argument: graph: The computing graph to be optimized. strategies_constructor: It will provide all the possible strategies for each node in the computing graph. cost_graph: A graph data structure to simplify the edge cost graph. graph_analyser: graph_analyser will analyses the graph to obtain the variable liveness information, which will be used to generate memory constraints. memory_budget: Memory constraint for the solution. solution_numbers: If solution_numbers is larger than one, solver will us a serious of solutions based on different memory budget. memory_increasing_coefficient: If solution_numbers is larger than one, we will use this coefficient to generate new memory budget. """ self.graph = graph self.strategies_constructor = strategies_constructor self.cost_graph = cost_graph self.graph_analyser = graph_analyser self.leaf_strategies = self.strategies_constructor.leaf_strategies self.nodes = [strategies_vector.node for strategies_vector in self.leaf_strategies] self.strategy_map = self.strategies_constructor.strategy_map self.memory_budget = memory_budget self.solution_numbers = solution_numbers self.forward_only = forward_only if self.solution_numbers > 1: self.memory_increasing_coefficient = memory_increasing_coefficient else: self.memory_increasing_coefficient = 1 # temporarily we use all nodes as liveness list, we count the backward memory cost together with # forward memory cost into the node memory cost, and no activation checkpoint is used in this phase. # self.liveness_list = self.graph_analyser.liveness_analysis() self.liveness_list = self.nodes self.node_index_dict = self._generate_node_index_dict() # The last solution vector of auto sharding. self.last_s_val = None # The last objective value of the best ILP solution. self.last_objective = None self.verbose = verbose def _recover_merged_node_strategy(self): """ During cost graph constructing, some nodes, such as unary element-wise node or ReshapeOp, were merged into the previous node. Therefore, the index of those strategies are copied from the previous node. This method is used to recover the strategy index of those merged node. """ for node_index, node in enumerate(self.nodes): if node.strategies_vector.check_merge(): # the merged node has only one input, and its strategies follow the input sharding strategy input_strategies_vector = node.args[0].strategies_vector input_best_strategy_index = self.last_s_val[node_index - 1] input_sharding_spec = input_strategies_vector[input_best_strategy_index].output_sharding_spec for strategy_index, strategy in enumerate(node.strategies_vector): if strategy.input_shardings[0].sharding_sequence == input_sharding_spec.sharding_sequence: self.last_s_val[node_index] = strategy_index break def _generate_node_index_dict(self) -> Dict[Node, int]: node_index_dict = {} for index, strategies_vector in enumerate(self.leaf_strategies): node_index_dict[strategies_vector.node] = index return node_index_dict def _prepare_data_for_solver(self): """ Extract information from components for solver. """ node_nums = len(self.leaf_strategies) memory_budget = self.memory_budget # prepare strategies_len strategies_len = [] for node in self.nodes: strategies_len.append(self.cost_graph.node_lens[node]) strategies_len = np.array(strategies_len) # prepare following_nodes following_nodes = self.cost_graph.following_dict index_following_nodes = {} for src, target in following_nodes.items(): src_index = self.node_index_dict[src] target_index = self.node_index_dict[target] index_following_nodes[src_index] = target_index following_nodes = index_following_nodes for index in range(node_nums): if index not in following_nodes: following_nodes[index] = -1 # prepare edge_pairs and resharding costs edge_pairs = [] resharding_costs = [] for pairs, edge_cost in self.cost_graph.edge_costs.items(): src_node = pairs[0] dst_node = pairs[1] src_node_index = self.node_index_dict[src_node] dst_node_index = self.node_index_dict[dst_node] edge_pairs.append(src_node_index) edge_pairs.append(dst_node_index) for i in range(strategies_len[src_node_index]): for j in range(strategies_len[dst_node_index]): resharding_costs.append(edge_cost[(i, j)]) edge_pairs = np.array(edge_pairs) resharding_costs = np.array(resharding_costs) # prepare liveness_set liveness_set = self.liveness_list # omit alias_set now alias_set = self.strategies_constructor.alias_set alias_convert_costs = None # prepare compute_costs, communication_costs and memory_costs compute_costs = [] communication_costs = [] memory_costs = [] extra_node_costs = self.cost_graph.extra_node_costs for strategies_vector in self.leaf_strategies: node = strategies_vector.node for index, strategy in enumerate(strategies_vector): compute_cost_item = strategy.compute_cost communication_cost_item = strategy.communication_cost memory_cost_item = strategy.memory_cost if self.forward_only: origin_communication_cost = communication_cost_item.fwd compute_cost = compute_cost_item.fwd # extract MemoryCost item from the memory TrainCycleItem memory_cost = memory_cost_item.fwd else: origin_communication_cost = communication_cost_item.total compute_cost = compute_cost_item.total # extract MemoryCost item from the memory TrainCycleItem memory_cost = memory_cost_item.total # extract the memory cost in float from MemoryCost item and sum them up memory_cost = memory_cost.parameter + memory_cost.activation + memory_cost.buffer compute_costs.append(compute_cost) # node in extra_node_costs means it has some extra communication # cost from node merging, so we need to add those extra communication # cost into if node in extra_node_costs: extra_node_cost = extra_node_costs[node][index] communication_cost = origin_communication_cost + extra_node_cost communication_costs.append(communication_cost) else: communication_costs.append(origin_communication_cost) memory_costs.append(memory_cost) compute_costs = np.array(compute_costs) communication_costs = np.array(communication_costs) memory_costs = np.array(memory_costs) # omit initial value for nodes s_init_np = None return ( node_nums, memory_budget, strategies_len, following_nodes, edge_pairs, alias_set, liveness_set, compute_costs, communication_costs, memory_costs, resharding_costs, alias_convert_costs, s_init_np, self.verbose, ) def _call_solver_serialized_args( self, node_nums, memory_budget, strategies_len, following_nodes, edge_pairs, alias_set, liveness_set, compute_costs, communication_costs, memory_costs, resharding_costs, alias_convert_costs, s_init_np=None, verbose=True, ): """ Call the solver with serialized arguments. """ tic = time.time() for x in [strategies_len, edge_pairs, compute_costs, communication_costs, memory_costs, resharding_costs]: assert isinstance(x, np.ndarray) assert len(strategies_len) == node_nums, "strategies_len" def get_non_zero_index(binary_vector): """ Get the index of non-zero item in a vector. """ ct = 0 ret = None for i, elem in enumerate(binary_vector): if pulp.value(elem): ret = i ct += 1 assert ct == 1 return ret # 0. Unpack flatten numpy arrays s_follow = following_nodes s_alias = alias_set E = edge_pairs.reshape((-1, 2)) # noqa r = [] pt = 0 edge_set = set() for i, j in E: prod_length = strategies_len[i] * strategies_len[j] if (i, j) in edge_set: raise ValueError(f"Duplicated edges: {(i, j)}") edge_set.add((i, j)) r.append(resharding_costs[pt : pt + prod_length]) pt += prod_length assert pt == len(resharding_costs) ###################### # omit alias set now # ###################### # A = alias_set.reshape((-1, 2)) # noqa # for (i, j) in A: # prod_length = strategies_len[i] * strategies_len[j] # v.append(alias_convert_costs[pt:pt + prod_length]) # pt += prod_length # assert pt == len(alias_convert_costs) # L = [] # noqa # pt = node_nums # for i in range(node_nums): # length = liveness_set[i] # L.append(liveness_set[pt:pt + length]) # pt += length # assert pt == len(liveness_set) pt = 0 c = [] d = [] m = [] pt = 0 for i in range(node_nums): length = strategies_len[i] c.append(compute_costs[pt : pt + length]) d.append(communication_costs[pt : pt + length]) m.append(memory_costs[pt : pt + length]) pt += length assert pt == len(compute_costs), f"{pt} == {len(compute_costs)}" assert pt == len(communication_costs), f"{pt} == {len(communication_costs)}" assert pt == len(memory_costs), f"{pt} == {len(memory_costs)}" # 1. Create variables ############################# # create variables for node # ############################# s = [] num_nodes = 0 reverse_follow_backpatch = [] for i in range(node_nums): if s_follow[i] < 0: if strategies_len[i] == 1: s.append([1]) else: if i not in s_alias: num_nodes += 1 s.append(LpVariable.matrix(f"s[{i}]", (range(strategies_len[i]),), cat="Binary")) else: s.append(s[s_alias[i]]) else: if s_follow[i] < len(s): s.append(s[s_follow[i]]) else: s.append(None) reverse_follow_backpatch.append(i) for i in reverse_follow_backpatch: s[i] = s[s_follow[i]] ############################# # create variables for edge # ############################# e = [] num_edges = 0 map_edge_to_idx = {} for idx, (i, j) in enumerate(E): if len(s[i]) == 1: e.append(s[j]) elif len(s[j]) == 1: e.append(s[i]) else: if i in s_alias and j in s_alias and (s_alias[i], s_alias[j]) in map_edge_to_idx: e.append(e[map_edge_to_idx[(s_alias[i], s_alias[j])]]) else: num_edges += 1 e.append(LpVariable.matrix(f"e[{i},{j}]", (range(len(s[i]) * len(s[j])),), cat="Binary")) assert len(e[idx]) == len(r[idx]) map_edge_to_idx[(i, j)] = idx for element in s: assert len(element) > 0 # 2. Set initial value ###################################### # set a initial value for warm start # ###################################### if s_init_np is not None: s_init = s_init_np.reshape((-1, 3)) for idx, value, fix in s_init: for i in range(len(s[idx])): s[idx][i].setInitialValue(i == value) if fix: s[idx][i].fixValue() # 3. Objective prob = LpProblem("myProblem", LpMinimize) ################################################################### # computing the node cost(computing cost and communication cost) # ################################################################### obj = 0 for i in range(node_nums): assert len(s[i]) == len(c[i]) assert len(s[i]) == len(d[i]) obj += lpDot(s[i], c[i]) + lpDot(s[i], d[i]) ############################################# # computing the edge cost(resharding cost) # ############################################# for i in range(len(E)): assert len(e[i]) == len(r[i]) obj += lpDot(e[i], r[i]) prob += obj # 4. Constraints # (a). specified by `cat="Binary"` # (b) ################################################# # make sure each node only choose one strategy # ################################################# for i in range(node_nums): if s_follow[i] < 0: prob += lpSum(s[i]) == 1 # (c) ################################################# # compute memory consumption with liveness set # ################################################# if memory_budget > 0: mem = 0 for node in liveness_set: if node not in self.node_index_dict: continue node_index = self.node_index_dict[node] mem += lpSum(s[node_index][j] * m[node_index][j] for j in range(len(s[node_index]))) prob += mem <= memory_budget # (d). specified by `cat="Binary"` for idx, (i, j) in enumerate(E): if strategies_len[i] == 1 or strategies_len[j] == 1: continue # (e) prob += lpSum(e[idx]) == 1 # (f) for row in range(len(s[i])): C = len(s[j]) # noqa prob += lpSum(e[idx][row * C + col] for col in range(0, C)) <= s[i][row] # (g) for col in range(len(s[j])): R = len(s[i]) # noqa C = len(s[j]) # noqa prob += lpSum(e[idx][row * C + col] for row in range(0, R)) <= s[j][col] # (h) ###################### # omit alias set now # ###################### # alias_set = set() # for (idx, (i, j)) in enumerate(A): # R = len(s[i]) # noqa # C = len(s[j]) # noqa # if (i, j) in alias_set: # raise ValueError(f"Duplicated edges: {(i, j)}") # alias_set.add((i, j)) # alias_set.add((j, i)) # for row in range(len(s[i])): # for col in range(len(s[j])): # if v[idx][row * C + col] > 0.5: # prob += s[i][row] + s[j][col] <= 1 msg = verbose time_limit = 600 assert "COIN_CMD" in pulp.listSolvers( onlyAvailable=True ), "Please install ILP solvers by 'sudo apt install coinor-cbc'" solver = pulp.COIN_CMD(mip=True, msg=msg, timeLimit=time_limit, threads=multiprocessing.cpu_count()) # solver = pulp.GLPK_CMD(mip=True, msg=msg, timeLimit=time_limit) prob.solve(solver) status = prob.status objective = pulp.value(prob.objective) objective = float(objective) if objective is not None else -1.0 if verbose: print(f"ILP Status: {LpStatus[status]}\tObjective: {objective}\t" f"Time: {time.time() - tic}") print(f"#nodes: {num_nodes}, #edges: {num_edges}") if prob.status in [pulp.LpStatusInfeasible]: raise RuntimeError( "Cannot run the function under the given memory budget. " "Please increase the memory budget." ) # Get and check results s_val = np.full((node_nums,), -1, dtype=np.int32) for i in range(node_nums): s_val[i] = get_non_zero_index(s[i]) e_val = np.full((len(E),), -1, dtype=np.int32) for idx, (i, j) in enumerate(E): e_val[idx] = get_non_zero_index(e[idx]) i_spec_index = e_val[idx] // len(s[j]) j_spec_index = e_val[idx] % len(s[j]) assert i_spec_index == s_val[i], f"e_val[{i}][{j}]" assert j_spec_index == s_val[j], f"e_val[{i}][{j}]" if verbose and r[idx][e_val[idx]] > 0: print(f"Edge cost {(i, j)} : {r[idx][e_val[idx]]}") self.last_s_val = list(s_val) # self._recover_merged_node_strategy() self.last_objective = objective if objective > INFINITY_COST: warnings.warn("Detect unexpected behaviors in the auto-sharding pass.") return self.last_s_val, e_val, self.last_objective, status def call_solver_serialized_args(self): """ Call the solver with serialized arguments and handle python errors. Additionally, we could give a serious of solutions with different memory budget. """ if self.solution_numbers == 1: args = self._prepare_data_for_solver() ret = self._call_solver_serialized_args(*args) return ret origin_memory_budget = self.memory_budget memory_budget_list = [ origin_memory_budget * self.memory_increasing_coefficient**i for i in range(self.solution_numbers) ] ret_list = [] for memory_budget in memory_budget_list: self.memory_budget = memory_budget args = self._prepare_data_for_solver() ret = self._call_solver_serialized_args(*args) ret_list.append(ret) return ret_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/solver/__init__.py
colossalai/auto_parallel/tensor_shard/solver/__init__.py
from .cost_graph import CostGraph from .graph_analysis import GraphAnalyser from .solver import Solver from .strategies_constructor import StrategiesConstructor __all__ = ["GraphAnalyser", "Solver", "StrategiesConstructor", "CostGraph"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/utils/broadcast.py
colossalai/auto_parallel/tensor_shard/utils/broadcast.py
from enum import Enum, auto from typing import List import torch from torch.fx.node import Node from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommAction, CommType, OperationData, OperationDataType, ) from colossalai.tensor.comm_spec import CollectiveCommPattern, CommSpec from colossalai.tensor.sharding_spec import ShardingSpec __all__ = [ "BroadcastType", "is_broadcastable", "get_broadcast_shape", "recover_sharding_spec_for_broadcast_shape", "comm_actions_for_oprands", ] class BroadcastType(Enum): EQUAL = auto() PADDING = auto() MULTIPLE = auto() def is_broadcastable(shape1: torch.Size, shape2: torch.Size) -> bool: """ Check if two shapes are broadcastable to each other. """ for s1, s2 in zip(shape1[::-1], shape2[::-1]): if s1 == 1 or s2 == 1 or s1 == s2: pass else: return False return True def get_broadcast_shape(shape1: torch.Size, shape2: torch.Size) -> List[int]: """ Compute the broadcast shape given two shapes. """ assert is_broadcastable(shape1, shape2), f"{shape1} and {shape2} are not broadcastable" shape1_reverse = shape1[::-1] shape2_reverse = shape2[::-1] min_common_dim = min(len(shape1), len(shape2)) dims = [] for s1, s2 in zip(shape1_reverse, shape2_reverse): dims.append(max(s1, s2)) # append the remaining dims dims.extend(shape1_reverse[min_common_dim:]) dims.extend(shape2_reverse[min_common_dim:]) return dims[::-1] def get_broadcast_dim_info(logical_shape, physical_shape): # get the number of dimensions logical_num_dims = len(logical_shape) physical_num_dims = len(physical_shape) assert ( logical_num_dims >= physical_num_dims ), "The number of dimensions in the logical shape is smaller than that of the physical shape, this tensor is not broadcast!" # track the dim and its broadcasting type logical_dim_broadcast_info = {} for i in range(logical_num_dims): # get the trailing dim size logical_dim_idx = logical_num_dims - i - 1 physical_dim_idx = physical_num_dims - i - 1 logical_dim_size = logical_shape[logical_dim_idx] if physical_dim_idx >= 0: physical_dim_size = physical_shape[physical_dim_idx] if physical_dim_size == logical_dim_size: logical_dim_broadcast_info[logical_dim_idx] = BroadcastType.EQUAL elif physical_dim_size == 1 and physical_dim_size != logical_dim_size: logical_dim_broadcast_info[logical_dim_idx] = BroadcastType.MULTIPLE else: logical_dim_broadcast_info[logical_dim_idx] = BroadcastType.PADDING return logical_dim_broadcast_info def recover_sharding_spec_for_broadcast_shape( logical_sharding_spec: ShardingSpec, logical_shape: torch.Size, physical_shape: torch.Size ) -> ShardingSpec: """ This function computes the sharding spec for the physical shape of a broadcast tensor. Args: logical_sharding_spec (ShardingSpec): the sharding spec for the broadcast tensor logical_shape (torch.Size): logical shape is the broadcast shape of a tensor physical_shape (torch.Size): the shape of the tensor before broadcasting """ # if the two shapes are the same, no broadcast occurs # we directly return the current sharding spec # recording the sharding dimensions removed during logical shape converting to physical one removed_dims = [] if list(logical_shape) == list(physical_shape): return logical_sharding_spec, removed_dims # get the number of dimensions logical_num_dims = len(logical_shape) physical_num_dims = len(physical_shape) # get the broadcast info logical_dim_broadcast_info = get_broadcast_dim_info(logical_shape, physical_shape) # generate the sharding spec for the physical shape physical_dim_partition = {} logical_dim_partition = logical_sharding_spec.dim_partition_dict for shape_dim, mesh_dim in logical_dim_partition.items(): logical_broadcast_type = logical_dim_broadcast_info[shape_dim] if logical_broadcast_type == BroadcastType.PADDING or logical_broadcast_type == BroadcastType.MULTIPLE: removed_dims.extend(mesh_dim) else: # get the corresponding physical dim physical_dim = physical_num_dims - (logical_num_dims - shape_dim) physical_dim_partition[physical_dim] = mesh_dim physical_sharding_spec = ShardingSpec( device_mesh=logical_sharding_spec.device_mesh, entire_shape=physical_shape, dim_partition_dict=physical_dim_partition, ) return physical_sharding_spec, removed_dims def comm_actions_for_oprands( node: Node, removed_dims: List[int], op_data: OperationData, sharding_spec: ShardingSpec ) -> CommAction: """ This method is used to generate communication actions for oprands which lose information during convert logical shape to physical shape. """ if len(removed_dims) == 1: # if list length is 1, extract element from list to avoid using flatten device mesh removed_dims = removed_dims[0] comm_spec = CommSpec( comm_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, sharding_spec=sharding_spec, logical_process_axis=removed_dims, ) if op_data.type == OperationDataType.PARAM: comm_type = CommType.HOOK else: comm_type = CommType.BEFORE arg_index = -1 for index, arg in enumerate(node.args): if op_data.name == str(arg): arg_index = index assert arg_index >= 0, f"op_data should be an argument of node." comm_action = CommAction( comm_spec=comm_spec, comm_type=comm_type, arg_index=arg_index, ) return comm_action
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/utils/reshape.py
colossalai/auto_parallel/tensor_shard/utils/reshape.py
from enum import Enum from typing import Dict, List, Tuple import torch class PreviousStatus(Enum): """ This class shows the status of previous comparison. """ RESET = 0 # ORIGIN means the dimension size of original tensor is larger in the previous comparison. ORIGIN = 1 # TGT means the dimension size of target tensor is larger in the previous comparison. TGT = 2 def detect_reshape_mapping(origin_shape: torch.Size, tgt_shape: torch.Size) -> Dict[Tuple[int], Tuple[int]]: """ This method is used to detect the reshape mapping between original tensor and target tensor. Returns: reshape_mapping_dict: The dictionary shows how a tuple of origin dims(keys) mapping to the related target dims(values) during reshaping operation. Examples: import torch origin_shape = torch.Size([4, 4, 4]) tgt_shape = torch.Size([2, 8, 2, 2]) reshape_mapping_dict = detect_reshape_mapping(origin_shape, tgt_shape) print(reshape_mapping_dict) Output: {(2,): (3, 2), (1, 0): (1,), (0,): (0, 1)} """ # reverse the shape object origin_shape = list(origin_shape) tgt_shape = list(tgt_shape) origin_shape.reverse() tgt_shape.reverse() # initialize arguments reshape_mapping_dict = {} origin_len = len(origin_shape) tgt_len = len(tgt_shape) origin_index = 0 tgt_index = 0 original_dimension_size = origin_shape[origin_index] tgt_dimension_size = tgt_shape[tgt_index] tgt_dims = [tgt_len - tgt_index - 1] origin_dims = [origin_len - origin_index - 1] previous_label = PreviousStatus.RESET while origin_index != len(origin_shape) or tgt_index != len(tgt_shape): if original_dimension_size == tgt_dimension_size: reshape_mapping_dict[tuple(origin_dims)] = tuple(tgt_dims) # if the origin_dims has no element, it means the original tensor has been fully matched. # Therefore, we do not have to increase the origin_index for that case. if len(origin_dims) > 0: origin_index += 1 # if the tgt_dims has no element, it means the original tensor has been fully matched. # Therefore, we do not have to increase the tgt_index for that case. if len(tgt_dims) > 0: tgt_index += 1 # the last step of loop should always end with condition # so we need to manually skip the preparation for next step # in the last step. if origin_index == len(origin_shape) and tgt_index == len(tgt_shape): continue # If origin_index equals to origin_len, we just need to set the original_dimension_size # to 1 to match the remaining '1's in the target tensor shape. if origin_index == len(origin_shape): original_dimension_size = 1 origin_dims = [] else: original_dimension_size = origin_shape[origin_index] origin_dims = [origin_len - origin_index - 1] # If tgt_index equals to tgt_len, we just need to set the tgt_dimension_size # to 1 to match the remaining '1's in the original tensor shape. if tgt_index == len(tgt_shape): tgt_dimension_size = 1 tgt_dims = [] else: tgt_dimension_size = tgt_shape[tgt_index] tgt_dims = [tgt_len - tgt_index - 1] previous_label = PreviousStatus.RESET elif original_dimension_size > tgt_dimension_size: tgt_index += 1 if previous_label == PreviousStatus.TGT: # if the target dimension size is larger in the previous comparison, which means # the origin dimension size has already accumulated larger than target dimension size, so # we need to offload the origin dims and tgt dims into the reshape_mapping_dict. reshape_mapping_dict[tuple(origin_dims)] = tuple(tgt_dims) original_dimension_size = original_dimension_size // tgt_dimension_size origin_dims = [origin_len - origin_index - 1] tgt_dimension_size = tgt_shape[tgt_index] tgt_dims = [tgt_len - tgt_index - 1, tgt_len - tgt_index] # reset the previous_label after offloading the origin dims and tgt dims previous_label = PreviousStatus.RESET else: # accumulate the tgt_dimension_size until tgt_dimension_size larger than original_dimension_size tgt_dimension_size *= tgt_shape[tgt_index] tgt_dims.append(tgt_len - tgt_index - 1) previous_label = PreviousStatus.ORIGIN else: origin_index += 1 if previous_label == PreviousStatus.ORIGIN: # if the origin element is larger in the previous comparison, which means # the target element has already accumulated larger than origin element, so # we need to offload the origin dims and tgt dims into the reshape_mapping_dict. reshape_mapping_dict[tuple(origin_dims)] = tuple(tgt_dims) tgt_dimension_size = tgt_dimension_size // original_dimension_size tgt_dims = [tgt_len - tgt_index - 1] original_dimension_size = origin_shape[origin_index] origin_dims = [origin_len - origin_index - 1, origin_len - origin_index] # reset the previous_label after offloading the origin dims and tgt dims previous_label = PreviousStatus.RESET else: # accumulate the original_dimension_size until original_dimension_size larger than tgt_dimension_size original_dimension_size *= origin_shape[origin_index] origin_dims.append(origin_len - origin_index - 1) previous_label = PreviousStatus.TGT return reshape_mapping_dict def check_keep_sharding_status( input_dim_partition_dict: Dict[int, List[int]], reshape_mapping_dict: Dict[Tuple[int], Tuple[int]] ) -> bool: """ This method is used to check whether the reshape operation could implement without converting the input to fully replicated status. Rule: For a sharded dimension of input tensor, if it is not the minimum element of the input tuple, the function will return false. To illustrate this issue, there are two cases to analyze: 1. no sharded dims in the input tuple: we could do the reshape operation safely just as the normal operation without distributed tensor. 2. sharded dims in the input tuple: the sharded dim must be the minimum element, then during shape consistency process, torch.cat will be implemented on the sharded dim, and everything after the sharded dim get recovered. Examples: # the second dimension of the input has been sharded. input_dim_partition_dict = {1: [1]} origin_shape = torch.Size([8, 4, 2]) tgt_shape = torch.Size([2, 4, 8]) reshape_mapping_dict = detect_reshape_mapping(origin_shape, tgt_shape) # {(2, 1): (2,), (0,): (1, 0)} # the sharded dim of input is 1, which is the minimum element of the tuple (2, 1), # so we do not have to convert the input to fully replicated status. print(check_keep_sharding_status(input_dim_partition_dict, reshape_mapping_dict)) Output: True """ sharded_dims = list(input_dim_partition_dict.keys()) for input_dims in reshape_mapping_dict.keys(): # if input_dims has no element, we could just skip this iteration. if len(input_dims) == 0: continue min_element = min(input_dims) for dim in input_dims: if dim in sharded_dims and dim is not min_element: return False return True def infer_output_dim_partition_dict( input_dim_partition_dict: Dict[int, List[int]], reshape_mapping_dict: Dict[Tuple[int], Tuple[int]] ) -> Dict[Tuple[int], Tuple[int]]: """ This method is used to infer the output dim partition dict for a reshape operation, given the input dim partition dict and reshape mapping dict. """ assert check_keep_sharding_status( input_dim_partition_dict, reshape_mapping_dict ), "we only infer output dim partition dict for the reshape operation could keep sharding spec." sharded_dims = list(input_dim_partition_dict.keys()) output_dim_partition_dict = {} for input_dims, output_dims in reshape_mapping_dict.items(): for dim in input_dims: if dim in sharded_dims: output_dim_partition_dict[min(output_dims)] = input_dim_partition_dict[dim] # we could break because input dims cannot contain two sharded dims, otherwise # the keep sharding status check will fail. break return output_dim_partition_dict
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/utils/misc.py
colossalai/auto_parallel/tensor_shard/utils/misc.py
import functools from typing import Any, Callable, Tuple, Type, Union import torch from colossalai.logging import get_dist_logger from colossalai.tensor.sharding_spec import ShardingSpec, ShardingSpecException __all__ = ["ignore_sharding_exception", "pytree_map"] def ignore_sharding_exception(func): """ A function wrapper to handle the ShardingSpecException in the function. If ShardingSpecException occurs, this function will return None. Usage: # mute the assertion error in the function @ignore_sharding_exception def do_something(): ... """ @functools.wraps(func) def wrapper(*args, **kwargs): try: logger = get_dist_logger() rst = func(*args, **kwargs) return rst except ShardingSpecException as e: logger.debug(e) return None return wrapper def check_sharding_spec_validity(sharding_spec: ShardingSpec, tensor: torch.Tensor): """ This function checks whether the ShardingSpec is valid for the physical tensor. This check includes 3 items: 1. the sharding spec covers all dimensions of the physical tensor 2. the sharding spec for each dimension is divisible by the number of devices. 3. the sharding spec's entire shape must match the tensor shape # """ # make sure all dims are covered in sharding spec sharding_len = len(sharding_spec.sharding_sequence) tensor_num_dim = tensor.dim() num_devices_in_col = sharding_spec.device_mesh.shape[0] num_devices_in_row = sharding_spec.device_mesh.shape[1] assert ( sharding_len == tensor_num_dim ), f"The ShardingSpec ({sharding_spec.sharding_sequence}) is created for {sharding_len}-dimension tensor, but the given tensor is {tensor_num_dim}-dimension ({tensor.shape})." # make sure the sharding is valid for each dim for i in range(tensor_num_dim): dim_size = tensor.shape[i] dim_spec = sharding_spec.sharding_sequence[i] if str(dim_spec).startswith("S"): devices_str = str(dim_spec).lstrip("S") num_devices = 1 if "0" in devices_str: num_devices *= num_devices_in_col if "1" in devices_str: num_devices *= num_devices_in_row assert ( dim_size >= num_devices and dim_size % num_devices == 0 ), f"The dimension at index {i} has value {dim_size}, but it is sharded over {num_devices} devices." # make sure the entire shape matches the physical tensor shape assert ( sharding_spec.entire_shape == tensor.shape ), f"The entire_shape of the sharding spec {sharding_spec.entire_shape} does not match the tensor shape {tensor.shape}" def pytree_map(obj: Any, fn: Callable, process_types: Union[Type, Tuple[Type]] = (), map_all: bool = False) -> Any: """process object recursively, like pytree Args: obj (:class:`Any`): object to process fn (:class:`Callable`): a function to process subobject in obj process_types (:class: `type | tuple[type]`): types to determine the type to process map_all (:class: `bool`): if map_all is True, then any type of element will use fn Returns: :class:`Any`: returns have the same structure of `obj` and type in process_types after map of `fn` """ if isinstance(obj, dict): return {k: pytree_map(obj[k], fn, process_types, map_all) for k in obj} elif isinstance(obj, tuple): return tuple(pytree_map(o, fn, process_types, map_all) for o in obj) elif isinstance(obj, list): return list(pytree_map(o, fn, process_types, map_all) for o in obj) elif isinstance(obj, process_types): return fn(obj) else: return fn(obj) if map_all else obj
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/utils/factory.py
colossalai/auto_parallel/tensor_shard/utils/factory.py
import copy import operator import warnings from functools import reduce from typing import Dict, List, Optional, Union import torch from torch.fx.node import Node from torch.utils._pytree import tree_map from colossalai.device.device_mesh import DeviceMesh from colossalai.tensor.shape_consistency import ShapeConsistencyManager from colossalai.tensor.sharding_spec import ShardingSpec from ..constants import INFINITY_COST __all__ = ["generate_sharding_spec", "generate_resharding_costs"] def generate_sharding_spec( input_: Union[Node, torch.Tensor], device_mesh: DeviceMesh, dim_partition_dict: Dict[int, List[int]] ) -> ShardingSpec: """ Generate the sharding spec of the tensor based on the given dim_partition_dict. Args: input_ (Union[Node, torch.Tensor]): the input can be a Node object or a PyTorch tensor. If a node is used, it will look for its meta data associated with this node. device_mesh (DeviceMesh): a DeviceMesh object which contains the meta information about the cluster. dim_partition_dict (Dict[int, List[int]]): a dictionary to specify the sharding specs, the key is the tensor dimension and the value is the mesh dimension for sharding. """ if isinstance(input_, Node): assert hasattr(input_, "_meta_data"), f"The given node has no attribute _meta_data" meta_tensor = input_._meta_data assert meta_tensor is not None, "The given node's _meta_data attribute is None" shape = meta_tensor.shape elif isinstance(input_, torch.Tensor): shape = input_.shape else: raise TypeError( f"We cannot generate sharding spec for {type(input_)} type, only torch.fx.Node or torch.Tensor is expected." ) for dim_index, sharding_index_list in dim_partition_dict.items(): sharding_list = [device_mesh.mesh_shape[sharding_index] for sharding_index in sharding_index_list] sharding_size = reduce(operator.mul, sharding_list, 1) assert ( shape[dim_index] % sharding_size == 0 ), f"we cannot shard the {dim_index} dimension of tensor into {sharding_size} partitions." sharding_spec = ShardingSpec(device_mesh=device_mesh, entire_shape=shape, dim_partition_dict=dim_partition_dict) return sharding_spec def generate_resharding_costs( nodes: List[Node], sharding_specs: List[ShardingSpec], count_backward: Optional[bool] = True, dtype: Optional[torch.dtype] = None, index=None, ): """ Compute the resharding costs with this specific strategy. Argument: nodes (List[Node]): a list of nodes sharding_spec_for_input(ShardingSpec): a list of ShardingSpec for the nodes. count_backward (Optional[bool]): whether to include the cost of resharding in the backward pass, default is True. False can be used for inference. dtype (Optional[torch.dtype]): the data type for cost calculation, default is None. """ # The resharding_cost of weight is counted due to sharing weight cases. resharding_costs = {} size_per_elem_bytes = torch.tensor([], dtype=dtype).element_size() # shape consistency manager is a singleton class shape_consistency_manager = ShapeConsistencyManager() for input_node, input_spec in zip(nodes, sharding_specs): resharding_costs[input_node] = [] for strategy in input_node.strategies_vector: input_sharding_spec = strategy.output_sharding_spec if not isinstance(input_sharding_spec, ShardingSpec): assert isinstance(input_sharding_spec, list), "only ShardingSpec or List[ShardingSpec] is expected." input_sharding_spec = input_sharding_spec[index] assert isinstance(input_sharding_spec, ShardingSpec), f"The input node should NOT be a tuple of tensor." try: # compute the resharding cost _, _, total_resharding_cost = shape_consistency_manager.shape_consistency( input_sharding_spec, input_spec ) # we need multiply the size of elem dtype to get correct communication cost resharding_cost = total_resharding_cost["total"] * size_per_elem_bytes except AssertionError as e: warnings.warn(f"{e}") resharding_cost = INFINITY_COST resharding_costs[input_node].append(resharding_cost) return resharding_costs def find_repeat_blocks(node_list: List[torch.fx.Node], root_module, common_length_threshold: int = 20): """ Find the largest repeat blocks in the graph, whose length is larger than the threshold. Args: gm (GraphModule): the graph module to be analyzed. common_length_threshold (int): the threshold of the repeat block length. """ # graph = gm.graph def _process_args(args): new_args = [] for arg in args: if hasattr(arg, "_meta_data"): meta_data = arg._meta_data else: meta_data = arg def _process_arg(data): if isinstance(data, torch.Tensor): data = data.size() elif isinstance(data, slice): data = (data.start, data.step, data.stop) return data new_meta_data = tree_map(_process_arg, meta_data) new_args.append(new_meta_data) return new_args def _all_equal(check_list, check_fn): base_value = check_list[-1] for e in check_list: if not check_fn(e, base_value): return False return True def _check_node_list_equal(l1, l2): if len(l1) != len(l2): return False for node1, node2 in zip(l1, l2): if hash(node1.hash_key) != hash(node2.hash_key): return False return True def _check_node_equal(node1, node2): if hash(node1.hash_key) == hash(node2.hash_key): return True return False for index, node in enumerate(node_list): if node.op == "call_module": target = node.target submod = root_module.get_submodule(target) submod_type = type(submod) target = submod_type else: target = node.target new_args = _process_args(node.args) if node.op != "get_attr": hash_key = (node.op, target, *new_args) else: hash_key = (node.op,) setattr(node, "hash_key", hash_key) hash_value_to_node_dict = {} for index, node in enumerate(node_list): hash_value = hash(node.hash_key) if hash_value not in hash_value_to_node_dict: hash_value_to_node_dict[hash_value] = [] hash_value_to_node_dict[hash_value].append(index) # node_list = list(graph.nodes) node_list_start = 0 max_common_length = common_length_threshold common_blocks_index = [] for index, node in enumerate(node_list): # the comparison will be triggered if a common node appears if len(hash_value_to_node_dict[hash(node.hash_key)]) >= 2: start_index_list = hash_value_to_node_dict[hash(node.hash_key)] check_block_list = [node_list[start : start + max_common_length] for start in start_index_list] common_label = True if not _all_equal(check_block_list, _check_node_list_equal): common_label = False if common_label: common_blocks_index = copy.deepcopy(start_index_list) max_step = len(node_list) - common_blocks_index[-1] - max_common_length - 1 for i in range(max_step): # add assertion to avoid out of index next_node_list = [node_list[index + max_common_length + i] for index in start_index_list] if not _all_equal(next_node_list, _check_node_equal): max_step = i break max_common_length += max_step node_list_start += max_common_length # recover common subgraph from the index common_blocks = [] for start in common_blocks_index: common_blocks.append(node_list[start : start + max_common_length]) return common_blocks
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/utils/__init__.py
colossalai/auto_parallel/tensor_shard/utils/__init__.py
from .broadcast import ( BroadcastType, comm_actions_for_oprands, get_broadcast_shape, is_broadcastable, recover_sharding_spec_for_broadcast_shape, ) from .factory import generate_resharding_costs, generate_sharding_spec from .misc import check_sharding_spec_validity, ignore_sharding_exception, pytree_map from .reshape import check_keep_sharding_status, detect_reshape_mapping, infer_output_dim_partition_dict from .sharding import ( enumerate_all_possible_1d_sharding, enumerate_all_possible_2d_sharding, generate_sharding_size, transpose_partition_dim, update_partition_dim, ) __all__ = [ "BroadcastType", "get_broadcast_shape", "is_broadcastable", "recover_sharding_spec_for_broadcast_shape", "generate_resharding_costs", "generate_sharding_spec", "ignore_sharding_exception", "check_sharding_spec_validity" "transpose_partition_dim", "update_partition_dim", "enumerate_all_possible_1d_sharding", "enumerate_all_possible_2d_sharding", "generate_sharding_size", "comm_actions_for_oprands", "pytree_map", "detect_reshape_mapping", "check_keep_sharding_status", "infer_output_dim_partition_dict", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/utils/sharding.py
colossalai/auto_parallel/tensor_shard/utils/sharding.py
import operator from copy import deepcopy from functools import reduce from typing import Dict import torch from colossalai.tensor.sharding_spec import ShardingSpec __all__ = [ "transpose_partition_dim", "update_partition_dim", "enumerate_all_possible_1d_sharding", "enumerate_all_possible_2d_sharding", "generate_sharding_size", ] def transpose_partition_dim(sharding_spec: ShardingSpec, dim1: int, dim2: int) -> ShardingSpec: """ Switch the sharding mesh dimensions for two tensor dimensions. This operation is in-place. Args: sharding_spec (ShardingSpec): the sharding spec for which partition dim are switched dim1 (int): the tensor dimension to switch dim2 (int): the tensor dimension to switch """ assert len(sharding_spec.entire_shape) >= 2, "The entire_shape of the sharding spec must have at least 2 dimensions" dim_partition_dict = sharding_spec.dim_partition_dict # transpose the dim partition dim1_partition = dim_partition_dict.pop(dim1, None) dim2_partition = dim_partition_dict.pop(dim2, None) if dim1_partition: dim_partition_dict[dim2] = dim1_partition if dim2_partition: dim_partition_dict[dim1] = dim2_partition # get the transposed shape new_shape = list(sharding_spec.entire_shape[:]) new_shape[dim2], new_shape[dim1] = new_shape[dim1], new_shape[dim2] new_shape = torch.Size(new_shape) # re-init the sharding spec sharding_spec.__init__(sharding_spec.device_mesh, new_shape, dim_partition_dict) return sharding_spec def update_partition_dim( sharding_spec: ShardingSpec, dim_mapping: Dict[int, int], physical_shape: torch.Size, inplace: bool = False ): """ This method is used to update the partition dim dict from the logical one to the physical one. Args: sharding_spec (ShardingSpec): the sharding spec for which partition dims are updated dim_mapping (Dict[int, int]): the mapping from the logical tensor dimension to the physical tensor dimension physical_shape (torch.Size): the physical shape for the tensor """ if inplace: current_sharding_spec = sharding_spec else: current_sharding_spec = deepcopy(sharding_spec) old_dim_partition_dict = current_sharding_spec.dim_partition_dict new_dim_partition_dict = {} # assign new dim for old_dim, new_dim in dim_mapping.items(): mesh_dims = old_dim_partition_dict.pop(old_dim) new_dim_partition_dict[new_dim] = mesh_dims for tensor_dim, mesh_dims in old_dim_partition_dict.items(): if tensor_dim in new_dim_partition_dict: raise KeyError(f"There are duplicated entries for the tensor sharding dimension {tensor_dim}") else: new_dim_partition_dict[tensor_dim] = mesh_dims # update sharding spec current_sharding_spec.__init__( device_mesh=sharding_spec.device_mesh, entire_shape=physical_shape, dim_partition_dict=new_dim_partition_dict ) return current_sharding_spec def enumerate_all_possible_2d_sharding(mesh_dim_0, mesh_dim_1, dim_size): dim_partition_list = [] # enumerate all the 2D sharding cases for i in range(dim_size): for j in range(i + 1, dim_size): dim_partition_dict_0 = {i: [mesh_dim_0], j: [mesh_dim_1]} dim_partition_dict_1 = {i: [mesh_dim_1], j: [mesh_dim_0]} dim_partition_list.append(dim_partition_dict_0) dim_partition_list.append(dim_partition_dict_1) for i in range(dim_size): dim_partition_dict_flatten = {i: [mesh_dim_0, mesh_dim_1]} dim_partition_list.append(dim_partition_dict_flatten) return dim_partition_list def enumerate_all_possible_1d_sharding(mesh_dim_0, dim_size): dim_partition_list = [] # enumerate all the 1D sharding cases for i in range(dim_size): dim_partition_dict_0 = {i: [mesh_dim_0]} dim_partition_list.append(dim_partition_dict_0) return dim_partition_list def generate_sharding_size(dim_partition_dict, device_mesh): total_sharding_size = 1 for mesh_dim_list in dim_partition_dict.values(): mesh_dim_sharding_size = [device_mesh.shape[mesh_dim] for mesh_dim in mesh_dim_list] sharding_size = reduce(operator.mul, mesh_dim_sharding_size) total_sharding_size *= sharding_size return total_sharding_size
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/default_reshape_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/default_reshape_handler.py
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import MetaInfoNodeHandler from .registry import operator_registry from .strategy import DefaultReshapeGenerator, StrategyGenerator __all__ = ["DefaultReshapeHandler"] @operator_registry.register(torch.flatten) @operator_registry.register(torch.Tensor.unsqueeze) @operator_registry.register(torch.nn.AdaptiveAvgPool2d) class DefaultReshapeHandler(MetaInfoNodeHandler): """ A DefaultReshapeHandler which deals with the sharding strategies for Reshape Op, such as torch.reshape. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(DefaultReshapeGenerator(op_data_mapping, self.device_mesh, self.node.args[0])) return generators def infer_logical_shape(self, data): """ This function is used to infer logical shape for operands. Notes: This function is only used for the operands whose data are not only in type of tensor, such as tuple of tensor. """ if isinstance(data, torch.Tensor): return data.shape else: assert isinstance(data, tuple), "input_data should be a tuple of tensor or a tensor." logical_shape = [] for tensor in data: assert isinstance(tensor, torch.Tensor), "input_data should be a tuple of tensor or a tensor." logical_shape.append(tensor.shape) logical_shape = tuple(logical_shape) return logical_shape def get_operation_data_mapping(self) -> Dict[str, OperationData]: # use transposed shape for strategies # the strategies will be transformed back to its original shape in self.post_process # check if the input operand is a parameter if isinstance(self.node.args[0]._meta_data, torch.nn.parameter.Parameter): data_type = OperationDataType.PARAM else: data_type = OperationDataType.ARG input_data = self.node.args[0]._meta_data input_logical_shape = self.infer_logical_shape(input_data) physical_input_operand = OperationData( name=str(self.node.args[0]), type=data_type, data=input_data, logical_shape=input_logical_shape ) output_data = self.node._meta_data output_logical_shape = self.infer_logical_shape(output_data) physical_output = OperationData( name=str(self.node), type=OperationDataType.OUTPUT, data=output_data, logical_shape=output_logical_shape ) mapping = {"input": physical_input_operand, "output": physical_output} return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/tensor_constructor_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/tensor_constructor_handler.py
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import StrategyGenerator from .strategy.tensor_constructor_generator import TensorConstructorGenerator __all__ = ["TensorConstructorHandler"] @operator_registry.register(torch.arange) class TensorConstructorHandler(NodeHandler): """ A TensorConstructorHandler which deals with the sharding strategies for tensor constructor operations, such as torch.arange. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(TensorConstructorGenerator(op_data_mapping, self.device_mesh)) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: output_data = self.node._meta_data physical_output_operand = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=output_data) mapping = {"output": physical_output_operand} return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/layer_norm_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/layer_norm_handler.py
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import MetaInfoModuleHandler from .registry import operator_registry from .strategy import LayerNormGenerator, StrategyGenerator __all__ = ["LayerNormModuleHandler"] @operator_registry.register(torch.nn.LayerNorm) class LayerNormModuleHandler(MetaInfoModuleHandler): """ A LayerNormModuleHandler which deals with the sharding strategies for nn.LayerNorm module. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(LayerNormGenerator(op_data_mapping, self.device_mesh)) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # use transposed shape for strategies # the strategies will be transformed back to its original shape in self.post_process physical_input_operand = OperationData( name=str(self.node.args[0]), type=OperationDataType.ARG, data=self.node.args[0]._meta_data ) physical_other_operand = OperationData( name="weight", type=OperationDataType.PARAM, data=self.named_parameters["weight"], logical_shape=self.named_parameters["weight"].shape, ) physical_output = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=self.node._meta_data) mapping = {"input": physical_input_operand, "other": physical_other_operand, "output": physical_output} if self.named_parameters["bias"] is not None: physical_bias_operand = OperationData( name="bias", type=OperationDataType.PARAM, data=self.named_parameters["bias"] ) mapping["bias"] = physical_bias_operand return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/registry.py
colossalai/auto_parallel/tensor_shard/node_handler/registry.py
class Registry: def __init__(self, name): self.name = name self.store = {} def register(self, source): def wrapper(func): if isinstance(source, (list, tuple)): # support register a list of items for this func for element in source: self.store[element] = func else: self.store[source] = func return func return wrapper def get(self, source): assert source in self.store, f"{source} not found in the {self.name} registry" target = self.store[source] return target def has(self, source): return source in self.store operator_registry = Registry("operator")
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/matmul_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/matmul_handler.py
import operator from abc import ABC, abstractmethod from copy import deepcopy from enum import Enum from functools import reduce from typing import Dict, List, Union import torch from colossalai.auto_parallel.tensor_shard.utils.broadcast import ( BroadcastType, get_broadcast_dim_info, get_broadcast_shape, ) from colossalai.tensor.sharding_spec import ShardingSpecException from ..sharding_strategy import OperationData, OperationDataType, ShardingStrategy from ..utils import recover_sharding_spec_for_broadcast_shape from .node_handler import MetaInfoNodeHandler from .registry import operator_registry from .strategy import ( BatchedMatMulStrategyGenerator, DotProductStrategyGenerator, LinearProjectionStrategyGenerator, MatVecStrategyGenerator, StrategyGenerator, ) class MatMulType(Enum): """ The MatMulType is categorized into 4 types based on the reference of torch.matmul in https://pytorch.org/docs/stable/generated/torch.matmul.html. DOT: dot product, both tensors are 1D, these two tensors need to have the same number of elements MM: matrix-matrix product, both tensors are 2D or the 1st tensor is 1D and the 2nd tensor is 2D MV: matrix-vector product: the 1st tensor is 2D and the 2nd tensor is 1D BMM: batched matrix-matrix multiplication, one tensor is at least 1D and the other is at least 3D """ DOT = 0 MM = 1 MV = 2 BMM = 3 def get_matmul_type(input_dim: int, other_dim: int): """ Determine which type of matmul operation should be executed for the given tensor dimensions. Args: input_dim (int): the number of dimensions for the input tensor other_dim (int): the number of dimensions for the other tensor """ if input_dim == 1 and other_dim == 1: matmul_type = MatMulType.DOT elif input_dim in [1, 2] and other_dim == 2: matmul_type = MatMulType.MM elif input_dim == 2 and other_dim == 1: matmul_type = MatMulType.MV elif input_dim >= 1 and other_dim >= 1 and (input_dim > 2 or other_dim > 2): matmul_type = MatMulType.BMM else: raise ValueError( f"The input and other tensors are of {input_dim} and {other_dim} which cannot used to execute matmul operation" ) return matmul_type class BmmTransform(ABC): """ BmmTransform is an abstraction of the shape conversion between logical and physical operation data during the strategy generation. """ @abstractmethod def apply(self, shape_mapping: Dict[str, List[int]]): pass @abstractmethod def recover(self, op_data_mapping: Dict[str, OperationData], strategy: ShardingStrategy): pass class Padder(BmmTransform): """ Add padding to the matrix dimensions for batched matrix multiplication. """ def __init__(self) -> None: # keep the padding dim, op_name -> padded_dim self.padded_dim_mapping = {} def apply(self, shape_mapping: Dict[str, List[int]]): mapping_copy = deepcopy(shape_mapping) input_shape = mapping_copy["input"] other_shape = mapping_copy["other"] if len(input_shape) == 1: # if the input is a 1D tensor, 1 is prepended to its shape # and it will be removed afterwards input_shape.insert(0, 1) self.padded_dim_mapping["input"] = -2 self.padded_dim_mapping["output"] = -2 elif len(other_shape) == 1: # if the other is a 1D tensor, 1 is appended to its shape # and it will be removed afterwards other_shape = other_shape.append(1) self.padded_dim_mapping["other"] = -1 self.padded_dim_mapping["output"] = -1 return mapping_copy def recover(self, op_data_mapping: Dict[str, OperationData], strategy: ShardingStrategy): op_data_mapping["input"] op_data_mapping["other"] def _remove_padded_dim(key, strategy): op_data = op_data_mapping[key] sharding_spec = strategy.get_sharding_spec_by_name(op_data.name) tensor_shape = list(sharding_spec.entire_shape) dim_partition_list = [None] * len(tensor_shape) # padded dim is a negative number as the padded dim must be a matrix dim padded_dim = self.padded_dim_mapping[key] # compute the new dim partition for tensor_dim, mesh_dims in sharding_spec.dim_partition_dict.items(): dim_partition_list[tensor_dim] = mesh_dims dim_partition_list.pop(padded_dim) unpadded_dim_partition_list = {k: v for k, v in enumerate(dim_partition_list) if v is not None} # compute unpadded tensor shape tensor_shape.pop(padded_dim) assert tensor_shape == list(op_data.data.shape), f"{tensor_shape} vs {list(op_data.data.shape)}" # update sharding spec sharding_spec.__init__(sharding_spec.device_mesh, tensor_shape, unpadded_dim_partition_list) # enumerate all sharding strategies strategies = [] try: strategy_copy = strategy.clone() # only one of input and other will be padded if "input" in self.padded_dim_mapping: _remove_padded_dim("input", strategy_copy) _remove_padded_dim("output", strategy_copy) elif "other" in self.padded_dim_mapping: _remove_padded_dim("other", strategy_copy) _remove_padded_dim("output", strategy_copy) strategies.append(strategy_copy) except ShardingSpecException: pass return strategies class Broadcaster(BmmTransform): """ Broadcast the non-matrix dimensions for batched matrix multiplication. """ def __init__(self) -> None: self.broadcast_dim_info = {} def apply(self, shape_mapping: Dict[str, List[int]]): mapping_copy = shape_mapping.copy() # get shapes input_shape = mapping_copy["input"] other_shape = mapping_copy["other"] # sanity check assert len(input_shape) > 1 and len(other_shape) > 1 # broadcast the batch dim and record bcast_non_matrix_dims = get_broadcast_shape(input_shape[:-2], other_shape[:-2]) # store the broadcast dim info input_broadcast_dim_info = get_broadcast_dim_info(bcast_non_matrix_dims, input_shape[:-2]) other_broadcast_dim_info = get_broadcast_dim_info(bcast_non_matrix_dims, other_shape[:-2]) self.broadcast_dim_info["input"] = input_broadcast_dim_info self.broadcast_dim_info["other"] = other_broadcast_dim_info # create the full logical shape input_shape = bcast_non_matrix_dims + input_shape[-2:] other_shape = bcast_non_matrix_dims + other_shape[-2:] assert len(input_shape) == len(other_shape) mapping_copy["input"] = input_shape mapping_copy["other"] = other_shape return mapping_copy def recover(self, op_data_mapping: Dict[str, OperationData], strategy: ShardingStrategy): # remove sharding on the broadcast dim def _remove_sharding_on_broadcast_dim(key, strategy): op_data = op_data_mapping[key] sharding_spec = strategy.get_sharding_spec_by_name(op_data.name) tensor_shape = list(sharding_spec.entire_shape) for dim_idx, broadcast_type in self.broadcast_dim_info[key].items(): if broadcast_type == BroadcastType.MULTIPLE: # if the dim is originally 1 and multiplied during broadcast # we set its sharding to R # e.g. [1, 2, 4] x [4, 4, 8] -> [4, 2, 8] # the dim 0 of [1, 2, 4] is multiplied to 4 tensor_shape[dim_idx] = 1 elif broadcast_type == BroadcastType.PADDING: # if the dim is padded # we remove its sharding tensor_shape[dim_idx] = None tensor_shape_before_broadcast = [dim for dim in tensor_shape if dim is not None] physical_sharding_spec, removed_dims = recover_sharding_spec_for_broadcast_shape( logical_sharding_spec=sharding_spec, logical_shape=sharding_spec.entire_shape, physical_shape=tensor_shape_before_broadcast, ) strategy.sharding_specs[op_data] = physical_sharding_spec # enumerate all sharding strategies strategies = [] try: strategy_copy = strategy.clone() _remove_sharding_on_broadcast_dim("input", strategy_copy) _remove_sharding_on_broadcast_dim("other", strategy_copy) strategies.append(strategy_copy) except ShardingSpecException: pass return strategies class Viewer(BmmTransform): """ Change the shape of the tensor from N-D to 3D """ def __init__(self) -> None: self.batch_dims_before_view = None def apply(self, shape_mapping: Dict[str, List[int]]): mapping_copy = shape_mapping.copy() self.batch_dims_before_view = list(mapping_copy["input"][:-2]) # get shapes input_shape = shape_mapping["input"] other_shape = shape_mapping["other"] # view to 3d tensor assert len(input_shape) >= 3 and len(other_shape) >= 3 input_shape = [reduce(operator.mul, input_shape[:-2])] + input_shape[-2:] other_shape = [reduce(operator.mul, other_shape[:-2])] + other_shape[-2:] output_shape = input_shape[:2] + other_shape[2:] mapping_copy["input"] = input_shape mapping_copy["other"] = other_shape mapping_copy["output"] = output_shape return mapping_copy def recover(self, op_data_mapping: Dict[str, OperationData], strategy: ShardingStrategy): # get operation data def _update_sharding_spec(key, strategy, physical_batch_dim): """ Map the logical batch dim to the physical batch dim """ op_data = op_data_mapping[key] sharding_spec = strategy.get_sharding_spec_by_name(op_data.name) dim_partition_dict = sharding_spec.dim_partition_dict entire_shape = sharding_spec.entire_shape # update the dimension index for the matrix dimensions if 2 in dim_partition_dict: dim_partition_dict[len(self.batch_dims_before_view) + 1] = dim_partition_dict.pop(2) if 1 in dim_partition_dict: dim_partition_dict[len(self.batch_dims_before_view)] = dim_partition_dict.pop(1) # map the logical batch dim to physical batch dim if 0 in dim_partition_dict: batch_dim_shard = dim_partition_dict.pop(0) dim_partition_dict[physical_batch_dim] = batch_dim_shard # the new shape will be the batch dims + the last 2 matrix dims shape_before_view = self.batch_dims_before_view + list(entire_shape[-2:]) sharding_spec.__init__(sharding_spec.device_mesh, shape_before_view, dim_partition_dict) num_batch_dim_before_view = len(self.batch_dims_before_view) # enumerate all sharding strategies strategies = [] for i in range(num_batch_dim_before_view): # create a new strategy strategy_copy = strategy.clone() try: _update_sharding_spec("input", strategy_copy, i) _update_sharding_spec("other", strategy_copy, i) _update_sharding_spec("output", strategy_copy, i) strategies.append(strategy_copy) except ShardingSpecException: continue return strategies def _get_bmm_logical_shape(input_shape, other_shape, transforms): """ Compute the logical shapes for BMM operation. BMM has a general representation [b, i, k] = [b, i, j] x [b, j, k] The dimension b is called non-matrix (batch) dimension and the remaining dimensions are called matrix dimensions The logical shape for the bmm operands will undergo three stages 1. append/prepend the 1 to the 1D tensor if there is any 2. broadcast the non-matrix dimensions 3. reshape to 3 dimensions """ shape_mapping = {"input": input_shape, "other": other_shape} for transform in transforms: shape_mapping = transform.apply(shape_mapping) input_shape = shape_mapping.get("input", None) other_shape = shape_mapping.get("other", None) output_shape = shape_mapping.get("output", None) return input_shape, other_shape, output_shape @operator_registry.register(torch.matmul) @operator_registry.register(torch.Tensor.matmul) class MatMulHandler(MetaInfoNodeHandler): """ The MatMulHandler is a node handler which handles the sharding strategy generation for the matmul operation. According to https://pytorch.org/docs/stable/generated/torch.matmul.html, the operations will vary depending on the operands. """ def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) # check which type of operation this matmul will call self.input_meta_data = self.node.args[0]._meta_data self.other_meta_data = self.node.args[1]._meta_data self.output_meta_data = self.node._meta_data input_dim = self.input_meta_data.dim() other_dim = self.other_meta_data.dim() self.matmul_type = get_matmul_type(input_dim, other_dim) if self.matmul_type == MatMulType.BMM: # bmm operation can possibly involve padding, broadcasting and view # these transforms will be used to create logical shape and # recover physical sharding spec self.transforms = [Padder(), Broadcaster(), Viewer()] else: self.transforms = None def get_strategy_generator(self) -> List[StrategyGenerator]: generators = [] op_data_mapping = self.get_operation_data_mapping() if self.matmul_type == MatMulType.BMM: generators.append(BatchedMatMulStrategyGenerator(op_data_mapping, self.device_mesh)) elif self.matmul_type == MatMulType.DOT: generators.append(DotProductStrategyGenerator(op_data_mapping, self.device_mesh)) elif self.matmul_type == MatMulType.MV: generators.append(MatVecStrategyGenerator(op_data_mapping, self.device_mesh)) elif self.matmul_type == MatMulType.MM: generators.append( LinearProjectionStrategyGenerator(op_data_mapping, self.device_mesh, linear_projection_type="linear") ) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: logical_shape_func = { MatMulType.DOT: self._get_logical_shape_for_dot, MatMulType.MM: self._get_logical_shape_for_mm, MatMulType.MV: self._get_logical_shape_for_mv, MatMulType.BMM: self._get_logical_shape_for_bmm, } logical_shapes = logical_shape_func[self.matmul_type]() op_data_mapping = self._get_op_data_mapping(*logical_shapes) return op_data_mapping def _get_op_data_mapping(self, input_logical_shape, other_logical_shape, output_logical_shape): # convert list to torch.Size if input_logical_shape: input_logical_shape = torch.Size(input_logical_shape) if other_logical_shape: other_logical_shape = torch.Size(other_logical_shape) if output_logical_shape: output_logical_shape = torch.Size(output_logical_shape) # create op data input_op_data = OperationData( name=str(self.node.args[0]), type=OperationDataType.ARG, data=self.input_meta_data, logical_shape=input_logical_shape, ) other_op_data = OperationData( name=str(self.node.args[1]), type=OperationDataType.ARG, data=self.other_meta_data, logical_shape=other_logical_shape, ) output_op_data = OperationData( name=str(self.node), type=OperationDataType.OUTPUT, data=self.output_meta_data, logical_shape=output_logical_shape, ) mapping = {"input": input_op_data, "other": other_op_data, "output": output_op_data} return mapping def _get_logical_shape_for_dot(self): """ The operands for the dot operation have the same logical shape as the physical shape """ return None, None, None def _get_logical_shape_for_mm(self): """ We need to handle the input tensor for a matrix-matrix multiplication as the input tensor can be a 1D or 2D tensor. If it is a 1D tensor, 1 will be prepended to its shape (e.g. [4] -> [1, 4]). """ if self.input_meta_data.dim() == 1: input_logical_shape = [1] + list(self.input_meta_data.shape) input_logical_shape = torch.Size(input_logical_shape) else: input_logical_shape = None return input_logical_shape, None, None def _get_logical_shape_for_mv(self): """ No broadcasting or dim insertion occurs for matrix-vector operation. """ return None, None, None def _get_logical_shape_for_bmm(self): input_physical_shape = list(self.input_meta_data.shape) other_physical_shape = list(self.other_meta_data.shape) return _get_bmm_logical_shape(input_physical_shape, other_physical_shape, self.transforms) def post_process(self, strategy: ShardingStrategy) -> Union[ShardingStrategy, List[ShardingStrategy]]: if self.matmul_type in [MatMulType.DOT, MatMulType.MV]: return strategy elif self.matmul_type == MatMulType.MM: if self.input_meta_data.dim() == 1: # if a 1 is prepended to the input shape (this occurs when input is a 1D tensor) # we need to remove that dim input_sharding_spec = strategy.get_sharding_spec_by_name(str(self.node.args[0])) input_physical_shape = self.node.args[0]._meta_data.shape dim_partition_dict = input_sharding_spec.dim_partition_dict # remove the partitioning in the dim 0 if 0 in dim_partition_dict: dim_partition_dict.pop(0, None) # move the partitioning in dim 1 to dim 0 if -1 in dim_partition_dict: shard = dim_partition_dict.pop(-1) dim_partition_dict[0] = shard if 1 in dim_partition_dict: shard = dim_partition_dict.pop(1) dim_partition_dict[0] = shard # re-init the sharding spec input_sharding_spec.__init__( input_sharding_spec.device_mesh, entire_shape=input_physical_shape, dim_partition_dict=dim_partition_dict, ) return strategy else: return strategy elif self.matmul_type == MatMulType.BMM: op_data_mapping = self.get_operation_data_mapping() strategies = [strategy] # recover the physical sharding spec for transform in self.transforms[::-1]: recovered_stragies = [] for strategy_ in strategies: output = transform.recover(op_data_mapping, strategy_) if isinstance(output, ShardingStrategy): recovered_stragies.append(output) elif isinstance(output, (list, tuple)): recovered_stragies.extend(output) else: raise TypeError( f"Found unexpected output type {type(output)} from the recover method of BmmTransform" ) strategies = recovered_stragies for index, strategies in enumerate(strategies): strategies.name = f"{strategies.name}_{index}" return strategies
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/where_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/where_handler.py
import copy from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType, ShardingStrategy from ..utils import recover_sharding_spec_for_broadcast_shape from .node_handler import NodeHandler from .registry import operator_registry from .strategy import StrategyGenerator, WhereGenerator __all__ = ["WhereHandler"] @operator_registry.register(torch.where) class WhereHandler(NodeHandler): """ A WhereHandler which deals with the sharding strategies for torch.where. """ def get_strategy_generator(self) -> List[StrategyGenerator]: logical_op_data_mapping, _ = self.get_operation_data_mapping() generators = [] generators.append(WhereGenerator(logical_op_data_mapping, self.device_mesh)) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # use transposed shape for strategies # the strategies will be transformed back to its original shape in self.post_process physical_condition_operand = OperationData( name=str(self.node.args[0]), type=OperationDataType.ARG, data=self.node.args[0]._meta_data ) physical_x_operand = OperationData( name=str(self.node.args[1]), type=OperationDataType.ARG, data=self.node.args[1]._meta_data ) physical_y_operand = OperationData( name=str(self.node.args[2]), type=OperationDataType.ARG, data=self.node.args[2]._meta_data ) physical_output = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=self.node._meta_data) physical_mapping = { "condition": physical_condition_operand, "x": physical_x_operand, "y": physical_y_operand, "output": physical_output, } logical_shape_for_all = self.node._meta_data.shape logical_mapping = {} for key, physical_operand in physical_mapping.items(): logical_mapping[key] = self.convert_physical_operand_to_logical_operand( physical_operand, logical_shape_for_all ) return logical_mapping, physical_mapping def convert_physical_operand_to_logical_operand(self, physical_operand, target_shape): logical_operand = copy.deepcopy(physical_operand) logical_operand.logical_shape = target_shape return logical_operand def post_process(self, strategy: ShardingStrategy): logical_op_data_mapping, physical_op_data_mapping = self.get_operation_data_mapping() for key in logical_op_data_mapping.keys(): logical_sharding_spec = strategy.sharding_specs[logical_op_data_mapping[key]] logical_shape = logical_op_data_mapping[key].logical_shape physical_shape = physical_op_data_mapping[key].logical_shape physical_sharding_spec, removed_dims = recover_sharding_spec_for_broadcast_shape( logical_sharding_spec, logical_shape, physical_shape ) strategy.sharding_specs.pop(logical_op_data_mapping[key]) strategy.sharding_specs[physical_op_data_mapping[key]] = physical_sharding_spec strategy.name = f"{strategy.sharding_specs[physical_op_data_mapping['output']].sharding_sequence} = {strategy.sharding_specs[physical_op_data_mapping['condition']].sharding_sequence} x {strategy.sharding_specs[physical_op_data_mapping['x']].sharding_sequence} x {strategy.sharding_specs[physical_op_data_mapping['y']].sharding_sequence}" return strategy
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/unary_elementwise_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/unary_elementwise_handler.py
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import MetaInfoNodeHandler from .registry import operator_registry from .strategy import StrategyGenerator, UnaryElementwiseGenerator __all__ = ["UnaryElementwiseHandler"] @operator_registry.register(torch.Tensor.to) @operator_registry.register(torch.Tensor.type) @operator_registry.register(torch.abs) @operator_registry.register(torch.nn.ReLU) @operator_registry.register(torch.nn.Tanh) @operator_registry.register(torch.tanh) @operator_registry.register(torch.nn.modules.dropout.Dropout) @operator_registry.register(torch.Tensor.contiguous) @operator_registry.register(torch.nn.functional.dropout) class UnaryElementwiseHandler(MetaInfoNodeHandler): """ A UnaryElementwiseHandler which deals with the sharding strategies for UnaryElementwise Op. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(UnaryElementwiseGenerator(op_data_mapping, self.device_mesh, self.node.args[0])) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # use transposed shape for strategies # the strategies will be transformed back to its original shape in self.post_process physical_input_operand = OperationData( name=str(self.node.args[0]), type=OperationDataType.ARG, data=self.node.args[0]._meta_data ) physical_output = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=self.node._meta_data) mapping = {"input": physical_input_operand, "output": physical_output} return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/view_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/view_handler.py
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import StrategyGenerator, ViewGenerator __all__ = ["ViewHandler"] @operator_registry.register(torch.Tensor.reshape) @operator_registry.register(torch.reshape) @operator_registry.register(torch.Tensor.view) class ViewHandler(NodeHandler): """ A ViewHandler which deals with the sharding strategies for Reshape Op, such as torch.reshape. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(ViewGenerator(op_data_mapping, self.device_mesh, self.node.args[0])) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # use transposed shape for strategies # the strategies will be transformed back to its original shape in self.post_process # check if the input operand is a parameter if isinstance(self.node.args[0]._meta_data, torch.nn.parameter.Parameter): data_type = OperationDataType.PARAM else: data_type = OperationDataType.ARG input_data = self.node.args[0]._meta_data physical_input_operand = OperationData(name=str(self.node.args[0]), type=data_type, data=input_data) target_shape = self.node._meta_data.shape physical_shape_operand = OperationData(name="tgt_shape", type=OperationDataType.ARG, data=target_shape) output_data = self.node._meta_data physical_output_operand = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=output_data) mapping = { "input": physical_input_operand, "tgt_shape": physical_shape_operand, "output": physical_output_operand, } return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/split_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/split_handler.py
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import SplitGenerator, StrategyGenerator __all__ = ["SplitHandler"] @operator_registry.register(torch.Tensor.split) @operator_registry.register(torch.split) class SplitHandler(NodeHandler): """ A SplitHandler which deals with the sharding strategies for torch.permute or torch.split. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(SplitGenerator(op_data_mapping, self.device_mesh, self.node.args[0])) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # check if the input operand is a parameter if isinstance(self.node.args[0]._meta_data, torch.nn.parameter.Parameter): data_type = OperationDataType.PARAM else: data_type = OperationDataType.ARG input_data = self.node.args[0]._meta_data physical_input_operand = OperationData(name=str(self.node.args[0]), type=data_type, data=input_data) split_size = self.node.args[1] if len(self.node.args) == 3: # (input, split_size, split_dim) split_dim = self.node.args[2] else: if self.node.kwargs: split_dim = self.node.kwargs["dim"] else: split_dim = 0 num_dims = self.node.args[0]._meta_data.dim() # recover negative value to positive if split_dim < 0: split_dim += num_dims split_info = (split_size, split_dim) physical_shape_operand = OperationData(name="split_info", type=OperationDataType.ARG, data=split_info) output_data = self.node._meta_data physical_output_operand = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=output_data) mapping = { "input": physical_input_operand, "split_info": physical_shape_operand, "output": physical_output_operand, } return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/binary_elementwise_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/binary_elementwise_handler.py
from typing import Dict, List, Union import torch from torch.fx.node import Node from colossalai.auto_parallel.tensor_shard.sharding_strategy import OperationData, OperationDataType, ShardingStrategy from ..constants import BCAST_FUNC_OP from ..utils import comm_actions_for_oprands, recover_sharding_spec_for_broadcast_shape from .node_handler import MetaInfoNodeHandler from .registry import operator_registry from .strategy import BinaryElementwiseStrategyGenerator, StrategyGenerator __all__ = ["BinaryElementwiseHandler"] @operator_registry.register(BCAST_FUNC_OP) class BinaryElementwiseHandler(MetaInfoNodeHandler): """ An BinaryBcastOpHandler is a node handler which deals with operations which have two operands and broadcasting occurs such as torch.add. """ def get_operation_data_mapping(self) -> Dict[str, OperationData]: bcast_shape = self.node._meta_data.shape def _get_op_data_type(tensor): if isinstance(tensor, torch.nn.parameter.Parameter): return OperationDataType.PARAM else: return OperationDataType.ARG def _get_arg_value(idx): non_tensor = False if isinstance(self.node.args[idx], Node): meta_data = self.node.args[idx]._meta_data # The meta_data of node type argument could also possibly be a non-tensor object. if not isinstance(meta_data, torch.Tensor): assert isinstance(meta_data, (int, float)) meta_data = torch.Tensor([meta_data]).to("meta") non_tensor = True else: # this is in fact a real data like int 1 # but we can deem it as meta data # as it won't affect the strategy generation assert isinstance(self.node.args[idx], (int, float)) meta_data = torch.Tensor([self.node.args[idx]]).to("meta") non_tensor = True return meta_data, non_tensor input_meta_data, non_tensor_input = _get_arg_value(0) other_meta_data, non_tensor_other = _get_arg_value(1) output_meta_data = self.node._meta_data # we need record op_data with non-tensor data in this list, # and filter the non-tensor op_data in post_process. self.non_tensor_list = [] # assert False input_op_data = OperationData( name=str(self.node.args[0]), type=_get_op_data_type(input_meta_data), data=input_meta_data, logical_shape=bcast_shape, ) other_op_data = OperationData( name=str(self.node.args[1]), type=_get_op_data_type(other_meta_data), data=other_meta_data, logical_shape=bcast_shape, ) output_op_data = OperationData( name=str(self.node), type=OperationDataType.OUTPUT, data=output_meta_data, logical_shape=bcast_shape ) if non_tensor_input: self.non_tensor_list.append(input_op_data) if non_tensor_other: self.non_tensor_list.append(other_op_data) mapping = {"input": input_op_data, "other": other_op_data, "output": output_op_data} return mapping def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(BinaryElementwiseStrategyGenerator(op_data_mapping, self.device_mesh)) return generators def post_process(self, strategy: ShardingStrategy) -> Union[ShardingStrategy, List[ShardingStrategy]]: # convert bias from its logical sharding spec to its physical sharding spec op_data_mapping = self.get_operation_data_mapping() for op_name, op_data in op_data_mapping.items(): if op_data in self.non_tensor_list: # remove the sharding spec if the op_data is not a tensor, e.g. torch.pow(tensor, 2) strategy.sharding_specs.pop(op_data) else: # convert the logical sharding spec to physical sharding spec if broadcast # e.g. torch.rand(4, 4) + torch.rand(4) physical_shape = op_data.data.shape logical_shape = op_data.logical_shape sharding_spec = strategy.get_sharding_spec_by_name(op_data.name) sharding_spec, removed_dims = recover_sharding_spec_for_broadcast_shape( sharding_spec, logical_shape, physical_shape ) strategy.sharding_specs[op_data] = sharding_spec if len(removed_dims) > 0: comm_action = comm_actions_for_oprands( node=self.node, removed_dims=removed_dims, op_data=op_data, sharding_spec=sharding_spec ) strategy.communication_actions[op_data] = comm_action return strategy
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/placeholder_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/placeholder_handler.py
from typing import Dict, List from torch.fx.node import Node from colossalai.device.device_mesh import DeviceMesh from ..sharding_strategy import OperationData, OperationDataType, StrategiesVector from .node_handler import NodeHandler from .strategy import PlaceholderGenerator, StrategyGenerator __all__ = ["PlaceholderHandler"] class PlaceholderHandler(NodeHandler): """ A PlaceholderHandler which deals with the sharding strategies for Placeholder Node. """ def __init__( self, node: Node, device_mesh: DeviceMesh, strategies_vector: StrategiesVector, placeholder_option: str ) -> None: super().__init__(node, device_mesh, strategies_vector) self.placeholder_option = placeholder_option def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append( PlaceholderGenerator(op_data_mapping, self.device_mesh, placeholder_option=self.placeholder_option) ) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # use transposed shape for strategies # the strategies will be transformed back to its original shape in self.post_process physical_output = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=self.node._meta_data) mapping = {"output": physical_output} return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/transpose_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/transpose_handler.py
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import StrategyGenerator, TransposeGenerator __all__ = ["TransposeHandler"] @operator_registry.register(torch.Tensor.transpose) @operator_registry.register(torch.transpose) class TransposeHandler(NodeHandler): """ A TransposeHandler which deals with the sharding strategies for torch.permute or torch.transpose. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(TransposeGenerator(op_data_mapping, self.device_mesh, self.node.args[0])) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # check if the input operand is a parameter if isinstance(self.node.args[0]._meta_data, torch.nn.parameter.Parameter): data_type = OperationDataType.PARAM else: data_type = OperationDataType.ARG input_data = self.node.args[0]._meta_data physical_input_operand = OperationData(name=str(self.node.args[0]), type=data_type, data=input_data) transpose_dims = [] # torch.transpose (input, dim0, dim1) for arg in self.node.args: if isinstance(arg, torch.fx.Node): if isinstance(arg._meta_data, int): transpose_dims.append(arg._meta_data) else: transpose_dims.append(arg) num_dims = self.node._meta_data.dim() for i in range(2): # recover negative value to positive if transpose_dims[i] < 0: transpose_dims[i] += num_dims physical_shape_operand = OperationData( name="transpose_dims", type=OperationDataType.ARG, data=list(transpose_dims) ) output_data = self.node._meta_data physical_output_operand = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=output_data) mapping = { "input": physical_input_operand, "transpose_dims": physical_shape_operand, "output": physical_output_operand, } return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/conv_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/conv_handler.py
from typing import Dict, List import torch import torch.nn.functional as F from ..sharding_strategy import OperationData, OperationDataType, ShardingStrategy from ..utils import transpose_partition_dim from .node_handler import MetaInfoModuleHandler, MetaInfoNodeHandler from .registry import operator_registry from .strategy import ConvStrategyGenerator, StrategyGenerator __all__ = ["ConvModuleHandler", "ConvFunctionHandler"] @operator_registry.register(torch.nn.Conv1d) @operator_registry.register(torch.nn.Conv2d) @operator_registry.register(torch.nn.Conv3d) class ConvModuleHandler(MetaInfoModuleHandler): """ A ConvModuleHandler which deals with the sharding strategies for nn.Convxd module. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(ConvStrategyGenerator(op_data_mapping, self.device_mesh)) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # use transposed shape for strategies # the strategies will be transformed back to its original shape in self.post_process physical_input_operand = OperationData( name=str(self.node.args[0]), type=OperationDataType.ARG, data=self.node.args[0]._meta_data ) logical_shape_for_weight = list(self.named_parameters["weight"].shape) logical_shape_for_weight[0], logical_shape_for_weight[1] = ( logical_shape_for_weight[1], logical_shape_for_weight[0], ) physical_other_operand = OperationData( name="weight", type=OperationDataType.PARAM, data=self.named_parameters["weight"], logical_shape=torch.Size(logical_shape_for_weight), ) physical_output = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=self.node._meta_data) mapping = {"input": physical_input_operand, "other": physical_other_operand, "output": physical_output} if "bias" in self.named_parameters: physical_bias_operand = OperationData( name="bias", type=OperationDataType.PARAM, data=self.named_parameters["bias"] ) mapping["bias"] = physical_bias_operand return mapping def post_process(self, strategy: ShardingStrategy): """ Convert the sharding spec of the weight parameter back to its original shape. """ for op_data, sharding_spec in strategy.input_sharding_specs.items(): if op_data.name == "weight": transpose_partition_dim(sharding_spec, 0, 1) return strategy @operator_registry.register(F.conv1d) @operator_registry.register(F.conv2d) @operator_registry.register(F.conv3d) class ConvFunctionHandler(MetaInfoNodeHandler): """ A ConvFunctionHandler which deals with the sharding strategies for nn.functional.ConvXd functions. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(ConvStrategyGenerator(op_data_mapping, self.device_mesh)) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # use transposed shape for strategies # the strategies will be transformed back to its original shape in self.post_process physical_input_operand = OperationData( name=str(self.node.args[0]), type=OperationDataType.ARG, data=self.node.args[0]._meta_data ) # check if the other operand is a parameter if isinstance(self.node.args[1]._meta_data, torch.nn.parameter.Parameter): data_type = OperationDataType.PARAM else: data_type = OperationDataType.ARG logical_shape_for_weight = list(self.node.args[1]._meta_data.shape) logical_shape_for_weight[0], logical_shape_for_weight[1] = ( logical_shape_for_weight[1], logical_shape_for_weight[0], ) physical_other_operand = OperationData( name=str(self.node.args[1]), type=data_type, data=self.node.args[1]._meta_data, logical_shape=torch.Size(logical_shape_for_weight), ) physical_output = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=self.node._meta_data) mapping = {"input": physical_input_operand, "other": physical_other_operand, "output": physical_output} if "bias" in self.node.kwargs and self.node.kwargs["bias"] is not None: # check if the other operand is a parameter if isinstance(self.node.kwargs["bias"]._meta_data, torch.nn.parameter.Parameter): data_type = OperationDataType.PARAM else: data_type = OperationDataType.ARG physical_bias_operand = OperationData( name=str(self.node.kwargs["bias"]), type=data_type, data=self.node.kwargs["bias"]._meta_data ) mapping["bias"] = physical_bias_operand return mapping def post_process(self, strategy: ShardingStrategy): """ Convert the sharding spec of the weight parameter back to its original shape. """ for op_data, sharding_spec in strategy.input_sharding_specs.items(): if op_data.name == str(self.node.args[1]): transpose_partition_dim(sharding_spec, 0, 1) return strategy
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/node_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/node_handler.py
from abc import ABC, abstractmethod from typing import Dict, List, Tuple, Union import torch from torch.fx.node import Node from colossalai.auto_parallel.meta_profiler.shard_metainfo import ShardMetaInfo, meta_register from colossalai.auto_parallel.tensor_shard.options import ShardOption, SolverPerference from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( OperationData, ShardingSpec, ShardingStrategy, StrategiesVector, TrainCycleItem, ) from colossalai.auto_parallel.tensor_shard.utils import check_sharding_spec_validity from colossalai.device.device_mesh import DeviceMesh from colossalai.logging import get_dist_logger from colossalai.tensor.shape_consistency import ShapeConsistencyManager from .strategy import StrategyGenerator class NodeHandler(ABC): """ The NodeHandler is an abstract class used to generate every possible strategies for an operator node. Args: node (Node): the input node in node argument list. device_mesh (DeviceMesh): A logical view of a physical mesh. strategies_vector (StrategiesVector): all the strategies generated in this handler will be recorded into the strategies_vector. """ def __init__( self, node: Node, device_mesh: DeviceMesh, strategies_vector: StrategiesVector, shard_option: ShardOption = ShardOption.STANDARD, solver_perference: SolverPerference = SolverPerference.STANDARD, ) -> None: self.node = node self.predecessor_node = list(node._input_nodes.keys()) self.successor_node = list(node.users.keys()) self.device_mesh = device_mesh self.strategies_vector = strategies_vector self.shard_option = shard_option self.solver_perference = solver_perference def update_resharding_cost(self, strategy: ShardingStrategy) -> None: """ Compute the resharding costs and save the costs in the ShardingStrategy object. """ # TODO: test this function when other handlers are ready resharding_costs = {} shape_consistency_manager = ShapeConsistencyManager() for node in self.predecessor_node: node_name = str(node) # get the current sharding spec generated by this node handler # we will not compute the resharding costs for the node not counted in the strategy. # And the node with tuple or list output need to be handled below. node_in_strategy = [op_data.name for op_data in strategy.sharding_specs.keys()] if str(node) not in node_in_strategy: continue op_data = strategy.get_op_data_by_name(node_name) current_sharding_spec = strategy.sharding_specs[op_data] # get the sharding specs for this node generated # in its own node handler assert hasattr( node, "strategies_vector" ), f"The predecessor node {node_name} has no strategy vector to compute the resharding cost." prev_strategy_vector = node.strategies_vector prev_sharding_specs = [ prev_strategy.get_sharding_spec_by_name(node_name) for prev_strategy in prev_strategy_vector ] # create data structure to store costs if node not in resharding_costs: resharding_costs[node] = [] def _compute_resharding_cost( prev_sharding_spec: Union[ShardingSpec, List[ShardingSpec]], current_sharding_spec: Union[ShardingSpec, List[ShardingSpec]], data: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]], ) -> TrainCycleItem: """ This is a helper function to compute the resharding cost for a specific strategy of a node. """ if prev_sharding_spec is None: return TrainCycleItem(fwd=0, bwd=0, total=0) elif isinstance(prev_sharding_spec, ShardingSpec): if isinstance(data, torch.Tensor): dtype = data.dtype size_per_elem_bytes = torch.tensor([], dtype=dtype).element_size() _, _, consistency_cost = shape_consistency_manager.shape_consistency( prev_sharding_spec, current_sharding_spec ) resharding_cost = TrainCycleItem( fwd=consistency_cost["forward"] * size_per_elem_bytes, bwd=consistency_cost["backward"] * size_per_elem_bytes, total=consistency_cost["total"] * size_per_elem_bytes, ) return resharding_cost else: # This raise is used to check if we have missed any type of data. # It could be merged into Parameter branch, which means we won't handle # non-tensor arguments. raise ValueError(f"Unsupported data type {type(data)}") else: assert isinstance( prev_sharding_spec, (tuple, list) ), f"prev_sharding_spec should be in type of ShardingSpec, List[ShardingSpec], \ or Tuple[ShardingSpec], but got {type(prev_sharding_spec)}" fwd_cost = 0 bwd_cost = 0 total_cost = 0 for index, (prev_sharding_spec_item, current_sharding_spec_item) in enumerate( zip(prev_sharding_spec, current_sharding_spec) ): item_cost = _compute_resharding_cost( prev_sharding_spec_item, current_sharding_spec_item, data[index] ) fwd_cost += item_cost.fwd bwd_cost += item_cost.bwd total_cost += item_cost.total resharding_cost = TrainCycleItem(fwd=fwd_cost, bwd=bwd_cost, total=total_cost) return resharding_cost # for each sharding spec generated by the predecessor's node handler # compute the resharding cost to switch to the sharding spec generated # by the current node handler for prev_sharding_spec in prev_sharding_specs: resharding_cost = _compute_resharding_cost(prev_sharding_spec, current_sharding_spec, op_data.data) resharding_costs[node].append(resharding_cost) strategy.resharding_costs = resharding_costs return strategy def get_target_function(self) -> callable: """ This function is used to get the target function for the node handler. The target function is used to analyze the costs of strategies. """ if self.node.op in ("placeholder", "get_attr", "output"): return None if self.node.op == "call_module": target = self.node.graph.owning_module.get_submodule(self.node.target) elif self.node.op == "call_function": target = self.node.target elif self.node.op == "call_method": target = getattr(self.node.args[0]._meta_data.__class__, self.node.target) else: raise ValueError(f"Unsupported node type: {self.node.op}") return target def register_strategy(self, compute_resharding_cost: bool = True) -> StrategiesVector: """ Register different sharding strategies for the current node. """ strategy_generators = self.get_strategy_generator() for generator in strategy_generators: strategies = generator.generate() # postprocess a strategy # postprocess can produce one strategy or multiple strategies post_processed_strategies_map = map(self.post_process, strategies) post_processed_strategies = [] for strategy in post_processed_strategies_map: if isinstance(strategy, (list, tuple)): post_processed_strategies.extend(strategy) else: post_processed_strategies.append(strategy) # compute the resharding costs based on the previous node # strategies if specified if compute_resharding_cost: updated_strategies = map(self.update_resharding_cost, post_processed_strategies) post_processed_strategies = list(updated_strategies) self.strategies_vector.extend(post_processed_strategies) # validating the correctness of the sharding strategy for strategy in self.strategies_vector: for op_data, sharding_spec in strategy.sharding_specs.items(): if op_data.data is not None and isinstance(op_data.data, torch.Tensor): check_sharding_spec_validity(sharding_spec, op_data.data) remove_strategy_list = [] for strategy in self.strategies_vector: shard_axis_list = [] last_axis = len(self.device_mesh.shape) - 1 for op_data, sharding_spec in strategy.sharding_specs.items(): if op_data.data is not None and isinstance(op_data.data, torch.Tensor): for dim, shard_axes in sharding_spec.dim_partition_dict.items(): for shard_axis in shard_axes: if shard_axis not in shard_axis_list: shard_axis_list.append(shard_axis) shard_level = len(shard_axis_list) using_last_axis = last_axis in shard_axis_list or -1 in shard_axis_list if self.shard_option == ShardOption.SHARD and shard_level == 0: remove_strategy_list.append(strategy) if self.shard_option == ShardOption.FULL_SHARD and shard_level <= 1: remove_strategy_list.append(strategy) if self.shard_option == ShardOption.SHARD_LAST_AXIS: if shard_level != 1 or using_last_axis == False: remove_strategy_list.append(strategy) for strategy in remove_strategy_list: self.strategies_vector.remove(strategy) return self.strategies_vector def post_process(self, strategy: ShardingStrategy) -> Union[ShardingStrategy, List[ShardingStrategy]]: # transform the strategy generated # e.g. to process the sharding strategy for the transposed weights return strategy @abstractmethod def get_strategy_generator(self) -> List[StrategyGenerator]: """ Define which generators should be used by this NodeHandler object. """ @abstractmethod def get_operation_data_mapping(self) -> Dict[str, OperationData]: """ Returns the mapping between the logical operation data to its physical data. A logical operation data is a data associated with an operation, which can be input and output. It is defined by the strategy generator, for example, a matrix multiplication operation has two operands "input" and "other" and one result "output". For a nn.Linear module, the physical operand for "input" is the module input, the physical operand for "other" is the module weight, and the physical result for "output" is the module output. Note that the operand name is specified by the StrategyGenerator object. For example: # for a linear layer mapping = { "input": Operand(name=str(self.node.args[0]), type=OperationDataType.ARG, data=self.node.args[0]._meta_data), "other": Operand(name="weight", type=OperationDataType.PARAM, data=self.named_parameters['weight']), "bias": Operand(name="bias", type=OperationDataType.PARAM, data=self.named_parameters['bias']), "output": Operand(name=str(self.node), type=OperationDataType.OUTPUT, data=self.node._meta_data), } """ class MetaInfoNodeHandler(NodeHandler): """ This is a base class to handle the nodes patched in the meta profiler. Note: this class will be integrated into the NodeHandler class in the future, after all the functions are patched. """ def register_strategy(self, compute_resharding_cost: bool = True) -> StrategiesVector: """ This method is inherited from NodeHandler. It will register the strategies first, and rewrite the memory_cost and compute_cost of the strategy using the ShardMetaInfo class. """ super().register_strategy(compute_resharding_cost=compute_resharding_cost) target = self.get_target_function() # Currently we haven't patched all the torch functions and modules, so if the target # is not patched, we will use the default cost model to compute the cost. # TODO: patch all torch functions and modules to make it clean if meta_register.has(target.__class__) or meta_register.has(target): strategies_info = [] for strategy in self.strategies_vector: metainfo = ShardMetaInfo(strategy, target) strategy.compute_cost = metainfo.compute_cost strategy.memory_cost = metainfo.memory_cost strategies_info.append(metainfo) # attach metainfos to the handler setattr(self, "strategies_info", strategies_info) else: logger = get_dist_logger() logger.warning(f"The target function {target} is not patched yet, ") return self.strategies_vector class ModuleHandler(NodeHandler): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) # set attributes to access module parameters for convenience assert ( self.node.graph.owning_module is not None ), f"The graph is not associated with a module, please make sure it can be used to instantiate a GraphModule object." module = self.node.graph.owning_module.get_submodule(self.node.target) named_parameters = list(module.named_parameters(recurse=False)) named_buffers = list(module.named_buffers(recurse=False)) # convert named parameters from list to dict named_parameters = {k: v for k, v in named_parameters} named_buffers = {k: v for k, v in named_buffers} self.module = module self.named_parameters = named_parameters self.named_buffers = named_buffers class MetaInfoModuleHandler(ModuleHandler): """ This is a base class to handle the module patched in the meta profiler. Note: this class will be integrated into the ModuleHandler class in the future, after all the modules are patched. """ def register_strategy(self, compute_resharding_cost: bool = True) -> StrategiesVector: """ This method is inherited from NodeHandler. It will register the strategies first, and rewrite the memory_cost and compute_cost of the strategy using the ShardMetaInfo class. """ super().register_strategy(compute_resharding_cost=compute_resharding_cost) target = self.get_target_function() # Currently we haven't patched all the torch functions and modules, so if the target # is not patched, we will use the default cost model to compute the cost. # TODO: patch all torch functions and modules to make it clean if meta_register.has(target.__class__) or meta_register.has(target): strategies_info = [] for strategy in self.strategies_vector: metainfo = ShardMetaInfo(strategy, target) strategy.compute_cost = metainfo.compute_cost strategy.memory_cost = metainfo.memory_cost strategies_info.append(metainfo) # attach metainfos to the handler setattr(self, "strategies_info", strategies_info) else: logger = get_dist_logger() logger.warning(f"The target function {target} is not patched yet") return self.strategies_vector
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/addmm_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/addmm_handler.py
from typing import Dict, List, Union import torch from ..sharding_strategy import OperationData, OperationDataType, ShardingStrategy from ..utils import comm_actions_for_oprands, recover_sharding_spec_for_broadcast_shape from .node_handler import NodeHandler from .registry import operator_registry from .strategy import LinearProjectionStrategyGenerator, StrategyGenerator __all__ = ["ADDMMFunctionHandler"] @operator_registry.register(torch.addmm) @operator_registry.register(torch.Tensor.addmm) class ADDMMFunctionHandler(NodeHandler): """ This is a NodeHandler class which deals with the batched matrix multiplication operation in PyTorch. Such operations including `torch.bmm` and `torch.Tensor.bmm` require the tensor to be 3D, thus, there is no logical-physical shape conversion in this handler. """ def _infer_op_data_type(self, tensor: torch.Tensor) -> OperationDataType: if isinstance(tensor, torch.nn.parameter.Parameter): data_type = OperationDataType.PARAM else: data_type = OperationDataType.ARG return data_type def get_operation_data_mapping(self) -> Dict[str, OperationData]: # input operand input_data = self.node.args[1]._meta_data physical_input_operand = OperationData( name=str(self.node.args[1]), type=self._infer_op_data_type(input_data), data=input_data ) # other operand other_data = self.node.args[2]._meta_data physical_other_operand = OperationData( name=str(self.node.args[2]), type=self._infer_op_data_type(other_data), data=other_data ) # bias physical shape bias_logical_shape = self.node._meta_data.shape bias_data = self.node.args[0]._meta_data physical_bias_operand = OperationData( name=str(self.node.args[0]), type=self._infer_op_data_type(bias_data), data=bias_data, logical_shape=bias_logical_shape, ) # output physical_output = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=self.node._meta_data) mapping = { "input": physical_input_operand, "other": physical_other_operand, "output": physical_output, "bias": physical_bias_operand, } return mapping def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append( LinearProjectionStrategyGenerator(op_data_mapping, self.device_mesh, linear_projection_type="addmm") ) return generators def post_process(self, strategy: ShardingStrategy) -> Union[ShardingStrategy, List[ShardingStrategy]]: # convert bias from its logical sharding spec to its physical sharding spec op_data_mapping = self.get_operation_data_mapping() bias_op_data = op_data_mapping["bias"] bias_physical_shape = bias_op_data.data.shape bias_logical_shape = bias_op_data.logical_shape bias_sharding_spec = strategy.get_sharding_spec_by_name(bias_op_data.name) bias_sharding_spec, removed_dims = recover_sharding_spec_for_broadcast_shape( bias_sharding_spec, bias_logical_shape, bias_physical_shape ) strategy.sharding_specs[bias_op_data] = bias_sharding_spec if len(removed_dims) > 0: comm_action = comm_actions_for_oprands( node=self.node, removed_dims=removed_dims, op_data=bias_op_data, sharding_spec=bias_sharding_spec ) strategy.communication_actions[bias_op_data] = comm_action return strategy
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/getattr_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/getattr_handler.py
from typing import Dict, List from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .strategy import GetattrGenerator, StrategyGenerator __all__ = ["GetattrHandler"] class GetattrHandler(NodeHandler): """ A GetattrHandler which deals with the sharding strategies for Getattr Node. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(GetattrGenerator(op_data_mapping, self.device_mesh)) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # use transposed shape for strategies # the strategies will be transformed back to its original shape in self.post_process # There are only two possible types for get_attr node: # 1. torch.Tensor(torch.nn.Parameters or torch.nn.Buffers) # 2. torch.nn.Module # temporarily, we just support first case in Tracer, so we don't have to worry about # issue related to the node._meta_data type. physical_output = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=self.node._meta_data) mapping = {"output": physical_output} return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/normal_pooling_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/normal_pooling_handler.py
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import MetaInfoModuleHandler from .registry import operator_registry from .strategy import NormalPoolStrategyGenerator, StrategyGenerator __all__ = ["NormPoolingHandler"] @operator_registry.register(torch.nn.MaxPool1d) @operator_registry.register(torch.nn.MaxPool2d) @operator_registry.register(torch.nn.MaxPool1d) @operator_registry.register(torch.nn.AvgPool1d) @operator_registry.register(torch.nn.AvgPool2d) @operator_registry.register(torch.nn.AvgPool3d) class NormPoolingHandler(MetaInfoModuleHandler): """ A NormPoolingHandler which deals with the sharding strategies for nn.MaxPoolxd module. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(NormalPoolStrategyGenerator(op_data_mapping, self.device_mesh)) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # use transposed shape for strategies # the strategies will be transformed back to its original shape in self.post_process physical_input_operand = OperationData( name=str(self.node.args[0]), type=OperationDataType.ARG, data=self.node.args[0]._meta_data ) physical_weight_operand = OperationData(name="kernel", type=OperationDataType.ARG, data=self.module.kernel_size) physical_output = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=self.node._meta_data) mapping = {"input": physical_input_operand, "other": physical_weight_operand, "output": physical_output} return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/__init__.py
colossalai/auto_parallel/tensor_shard/node_handler/__init__.py
from .addmm_handler import ADDMMFunctionHandler from .batch_norm_handler import BatchNormModuleHandler from .binary_elementwise_handler import BinaryElementwiseHandler from .bmm_handler import AddBMMFunctionHandler, BMMFunctionHandler from .conv_handler import ConvFunctionHandler, ConvModuleHandler from .default_reshape_handler import DefaultReshapeHandler from .embedding_handler import EmbeddingFunctionHandler, EmbeddingModuleHandler from .getattr_handler import GetattrHandler from .getitem_handler import GetItemHandler from .layer_norm_handler import LayerNormModuleHandler from .linear_handler import LinearFunctionHandler, LinearModuleHandler from .matmul_handler import MatMulHandler from .normal_pooling_handler import NormPoolingHandler from .output_handler import OutputHandler from .permute_handler import PermuteHandler from .placeholder_handler import PlaceholderHandler from .registry import operator_registry from .softmax_handler import SoftmaxHandler from .split_handler import SplitHandler from .sum_handler import SumHandler from .tensor_constructor_handler import TensorConstructorHandler from .transpose_handler import TransposeHandler from .unary_elementwise_handler import UnaryElementwiseHandler from .view_handler import ViewHandler from .where_handler import WhereHandler __all__ = [ "LinearFunctionHandler", "LinearModuleHandler", "BMMFunctionHandler", "AddBMMFunctionHandler", "LayerNormModuleHandler", "BatchNormModuleHandler", "ConvModuleHandler", "ConvFunctionHandler", "UnaryElementwiseHandler", "DefaultReshapeHandler", "PlaceholderHandler", "OutputHandler", "WhereHandler", "NormPoolingHandler", "BinaryElementwiseHandler", "MatMulHandler", "operator_registry", "ADDMMFunctionHandler", "GetItemHandler", "GetattrHandler", "ViewHandler", "PermuteHandler", "TensorConstructorHandler", "EmbeddingModuleHandler", "EmbeddingFunctionHandler", "SumHandler", "SoftmaxHandler", "TransposeHandler", "SplitHandler", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/permute_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/permute_handler.py
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import PermuteGenerator, StrategyGenerator __all__ = ["PermuteHandler"] @operator_registry.register(torch.Tensor.permute) @operator_registry.register(torch.permute) class PermuteHandler(NodeHandler): """ A PermuteHandler which deals with the sharding strategies for torch.permute or torch.transpose. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(PermuteGenerator(op_data_mapping, self.device_mesh, self.node.args[0])) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # check if the input operand is a parameter if isinstance(self.node.args[0]._meta_data, torch.nn.parameter.Parameter): data_type = OperationDataType.PARAM else: data_type = OperationDataType.ARG input_data = self.node.args[0]._meta_data physical_input_operand = OperationData(name=str(self.node.args[0]), type=data_type, data=input_data) permute_dims = [] if self.node.op == "call_method": # torch.Tensor.permute (input, *dims) for arg in self.node.args: if isinstance(arg, torch.fx.Node): if isinstance(arg._meta_data, int): permute_dims.append(arg._meta_data) else: assert isinstance(arg, int), "The argument in permute node should be either type of Node or int." permute_dims.append(arg) else: # torch.permute (input, dims) for arg in self.node.args: if isinstance(arg, torch.fx.Node): if isinstance(arg._meta_data, (tuple, list)): permute_dims.extend(arg._meta_data) else: assert isinstance( arg, (tuple, list) ), "The argument in permute node should be type of Node, Tuple[int] or List[int]." permute_dims.extend(arg) num_dims = self.node._meta_data.dim() for i in range(num_dims): # recover negative value to positive if permute_dims[i] < 0: permute_dims[i] += num_dims physical_shape_operand = OperationData(name="permute_dims", type=OperationDataType.ARG, data=list(permute_dims)) output_data = self.node._meta_data physical_output_operand = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=output_data) mapping = { "input": physical_input_operand, "permute_dims": physical_shape_operand, "output": physical_output_operand, } return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/softmax_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/softmax_handler.py
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import SoftmaxGenerator, StrategyGenerator __all__ = ["SoftmaxHandler"] @operator_registry.register(torch.nn.Softmax) @operator_registry.register(torch.nn.functional.softmax) class SoftmaxHandler(NodeHandler): """ A SoftmaxHandler which deals with the sharding strategies for torch.nn.Softmax or torch.nn.functional.softmax. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(SoftmaxGenerator(op_data_mapping, self.device_mesh, self.node.args[0])) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # check if the input operand is a parameter if isinstance(self.node.args[0]._meta_data, torch.nn.parameter.Parameter): data_type = OperationDataType.PARAM else: data_type = OperationDataType.ARG input_data = self.node.args[0]._meta_data physical_input_operand = OperationData(name=str(self.node.args[0]), type=data_type, data=input_data) softmax_dim = self.node.kwargs["dim"] num_dims = self.node.args[0]._meta_data.dim() # recover negative value to positive if softmax_dim < 0: softmax_dim += num_dims physical_dim_operand = OperationData(name="softmax_dim", type=OperationDataType.ARG, data=softmax_dim) output_data = self.node._meta_data physical_output_operand = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=output_data) mapping = { "input": physical_input_operand, "softmax_dim": physical_dim_operand, "output": physical_output_operand, } return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/linear_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/linear_handler.py
from typing import Dict, List, Union import torch import torch.nn.functional as F from colossalai.auto_parallel.tensor_shard.utils import transpose_partition_dim, update_partition_dim from colossalai.logging import get_dist_logger from colossalai.tensor.sharding_spec import ShardingNotDivisibleError from ..sharding_strategy import OperationData, OperationDataType, ShardingStrategy from .node_handler import MetaInfoModuleHandler, MetaInfoNodeHandler from .registry import operator_registry from .strategy import LinearProjectionStrategyGenerator, StrategyGenerator __all__ = ["LinearModuleHandler", "LinearFunctionHandler"] def _update_sharding_spec_for_transposed_weight_for_linear( strategy: ShardingStrategy, weight_name: str ) -> ShardingStrategy: """ This function is a helper function used by both module node handler and function node handler. This function will convert the sharding spec for the transposed weight to the correct partition spec. Args: strategy (ShardingStrategy): the strategy generated by the strategy generator. weight_name (str): the name of the OperationData object for the weight. """ # switch the dimensions of the transposed weight sharding_spec = strategy.get_sharding_spec_by_name(weight_name) op_data = strategy.get_op_data_by_name(weight_name) assert ( op_data.logical_shape[0] == op_data.data.shape[1] and op_data.logical_shape[1] == op_data.data.shape[0] ), "Expected the logical shape of the linear operator's weight is equal to transposed physical shape" dim_size = len(op_data.logical_shape) transpose_partition_dim(sharding_spec, 0, dim_size - 1) return strategy def _convert_logical_sharding_to_physical_sharding_spec_for_linear( strategy: ShardingStrategy, input_name: str, output_name: str ) -> List[ShardingStrategy]: """ This function converts the logical sharding spec to the physical sharding spec for both the input and output of the linear operation. The input and output should have the same sharding spec. Args: strategy (ShardingStrategy): the logical strategy generated by the strategy generator. input_name (str): the name of the OperationData object for the input. output_name (str): the name of the OperationData object for the output. """ # the result will be a list of strategies sharding_strategies = [] # get operation data input_op_data = strategy.get_op_data_by_name(input_name) output_op_data = strategy.get_op_data_by_name(output_name) input_sharding_spec = strategy.get_sharding_spec_by_name(input_op_data.name) output_sharding_spec = strategy.get_sharding_spec_by_name(output_op_data.name) # recover the last logical dimension to physical dimension last_logical_input_dims = len(input_op_data.logical_shape) - 1 last_logical_output_dims = len(output_op_data.logical_shape) - 1 last_physical_input_dims = input_op_data.data.dim() - 1 last_physical_output_dims = output_op_data.data.dim() - 1 if last_logical_input_dims in input_sharding_spec.dim_partition_dict: input_last_dim_mapping = {last_logical_input_dims: last_physical_input_dims} else: input_last_dim_mapping = {} if last_logical_output_dims in output_sharding_spec.dim_partition_dict: output_last_dim_mapping = {last_logical_output_dims: last_physical_output_dims} else: output_last_dim_mapping = {} # get logger for debug message logger = get_dist_logger() # for the input of the linear operation, it can be multi-dimensional. The sharding spec generated is only # 2D, where the first dimension is non-matrix dimension and the last dimension is the matrix dimension. # the logical non-matrix dimension can belong to the 0th to (N-1)th dimension of the physical input shape. # Thus, we enumerate to get all possible cases. if 0 in input_sharding_spec.dim_partition_dict: # if 0 is in the dim_partition_dict, it means that the # the generated sharding strategy does shard the non-matrix dimension, # in this case, we need to do enumeration num_input_dims = input_op_data.data.dim() for i in range(num_input_dims - 1): strategy_copy = strategy.clone() input_sharding_spec = strategy_copy.get_sharding_spec_by_name(input_op_data.name) output_sharding_spec = strategy_copy.get_sharding_spec_by_name(output_op_data.name) try: # replace the 0th dimension in the logical sharding with ith dimension in the physical sharding input_dim_mapping = {0: i} input_dim_mapping.update(input_last_dim_mapping) update_partition_dim( sharding_spec=input_sharding_spec, dim_mapping=input_dim_mapping, physical_shape=input_op_data.data.shape, inplace=True, ) output_dim_mapping = {0: i} output_dim_mapping.update(output_last_dim_mapping) update_partition_dim( sharding_spec=output_sharding_spec, dim_mapping=output_dim_mapping, physical_shape=output_op_data.data.shape, inplace=True, ) strategy_copy.name = f"{strategy.name}_{i}" sharding_strategies.append(strategy_copy) except ShardingNotDivisibleError as e: logger.debug( f"Errored occurred when converting the logical sharding spec to the physical one. Error details: {e}" ) else: # the generated sharding strategy does not shard the non-matrix dimension, # in this case, we don't need to do enumeration # but instead, we still need to convert the logical shape to physical shape strategy_copy = strategy.clone() input_sharding_spec = strategy_copy.get_sharding_spec_by_name(input_op_data.name) output_sharding_spec = strategy_copy.get_sharding_spec_by_name(output_op_data.name) # after updating, the logical shape will be replaced by the physical shape input_dim_mapping = {} input_dim_mapping.update(input_last_dim_mapping) update_partition_dim( sharding_spec=input_sharding_spec, dim_mapping=input_dim_mapping, physical_shape=input_op_data.data.shape, inplace=True, ) output_dim_mapping = {} output_dim_mapping.update(output_last_dim_mapping) update_partition_dim( sharding_spec=output_sharding_spec, dim_mapping=output_dim_mapping, physical_shape=output_op_data.data.shape, inplace=True, ) sharding_strategies.append(strategy_copy) return sharding_strategies @operator_registry.register(torch.nn.Linear) class LinearModuleHandler(MetaInfoModuleHandler): """ A LinearModuleHandler which deals with the sharding strategies for nn.Linear module. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append( LinearProjectionStrategyGenerator( op_data_mapping, self.device_mesh, linear_projection_type="linear", solver_perference=self.solver_perference, ) ) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # use transposed shape for strategies # the strategies will be transformed back to its original shape in self.post_process input_meta_data = self.node.args[0]._meta_data input_logical_shape = input_meta_data.view(-1, input_meta_data.shape[-1]).shape physical_input_operand = OperationData( name=str(self.node.args[0]), type=OperationDataType.ARG, data=input_meta_data, logical_shape=input_logical_shape, ) physical_other_operand = OperationData( name="weight", type=OperationDataType.PARAM, data=self.named_parameters["weight"], logical_shape=self.named_parameters["weight"].shape[::-1], ) output_meta_data = self.node._meta_data output_logical_shape = output_meta_data.view(-1, output_meta_data.shape[-1]).shape physical_output = OperationData( name=str(self.node), type=OperationDataType.OUTPUT, data=output_meta_data, logical_shape=output_logical_shape, ) mapping = {"input": physical_input_operand, "other": physical_other_operand, "output": physical_output} if "bias" in self.named_parameters is not None: physical_bias_operand = OperationData( name="bias", type=OperationDataType.PARAM, data=self.named_parameters["bias"] ) mapping["bias"] = physical_bias_operand return mapping def post_process(self, strategy: ShardingStrategy) -> Union[ShardingStrategy, List[ShardingStrategy]]: """ Convert the sharding spec from the logical shape to the physical shape. In this function, two tasks are completed: 1. the sharding spec is updated for the transposed weight 2. the input and output sharding specs are updated to physical shape. """ # switch the dimensions of the transposed weight strategy = _update_sharding_spec_for_transposed_weight_for_linear(strategy=strategy, weight_name="weight") # create multiple sharding strategies for the inputs # as input can be multi-dimensional and the partition dim is only 2D, # we need to map the partition at dim 0 to one of the first few dimensions of the input strategies = _convert_logical_sharding_to_physical_sharding_spec_for_linear( strategy=strategy, input_name=str(self.node.args[0]), output_name=str(self.node) ) return strategies @operator_registry.register(F.linear) class LinearFunctionHandler(MetaInfoNodeHandler): """ A LinearFunctionHandler which deals with the sharding strategies for F.Linear. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append( LinearProjectionStrategyGenerator(op_data_mapping, self.device_mesh, linear_projection_type="linear") ) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # use transposed shape for strategies # the strategies will be transformed back to its original shape in self.post_process input_meta_data = self.node.args[0]._meta_data input_logical_shape = input_meta_data.view(-1, input_meta_data.shape[-1]).shape physical_input_operand = OperationData( name=str(self.node.args[0]), type=OperationDataType.ARG, data=self.node.args[0]._meta_data, logical_shape=input_logical_shape, ) # check if the other operand is a parameter if isinstance(self.node.args[1]._meta_data, torch.nn.parameter.Parameter): data_type = OperationDataType.PARAM else: data_type = OperationDataType.ARG physical_other_operand = OperationData( name=str(self.node.args[1]), type=data_type, data=self.node.args[1]._meta_data, logical_shape=self.node.args[1]._meta_data.shape[::-1], ) output_meta_data = self.node._meta_data output_logical_shape = output_meta_data.view(-1, output_meta_data.shape[-1]).shape physical_output = OperationData( name=str(self.node), type=OperationDataType.OUTPUT, data=self.node._meta_data, logical_shape=output_logical_shape, ) mapping = {"input": physical_input_operand, "other": physical_other_operand, "output": physical_output} if "bias" in self.node.kwargs and self.node.kwargs["bias"] is not None: # check if the other operand is a parameter if isinstance(self.node.kwargs["bias"]._meta_data, torch.nn.parameter.Parameter): data_type = OperationDataType.PARAM else: data_type = OperationDataType.ARG physical_bias_operand = OperationData( name=str(self.node.kwargs["bias"]), type=data_type, data=self.node.kwargs["bias"]._meta_data ) mapping["bias"] = physical_bias_operand return mapping def post_process(self, strategy: ShardingStrategy): # switch the dimensions of the transposed weight strategy = _update_sharding_spec_for_transposed_weight_for_linear( strategy=strategy, weight_name=str(self.node.args[1]) ) # create multiple sharding strategies for the inputs # as input can be multi-dimensional and the partition dim is only 2D, # we need to map the partition at dim 0 to one of the first few dimensions of the input strategies = _convert_logical_sharding_to_physical_sharding_spec_for_linear( strategy=strategy, input_name=str(self.node.args[0]), output_name=str(self.node) ) return strategies
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/embedding_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/embedding_handler.py
from typing import Dict, List, Union import torch import torch.nn.functional as F from colossalai.auto_parallel.tensor_shard.utils import update_partition_dim from colossalai.logging import get_dist_logger from colossalai.tensor.sharding_spec import ShardingNotDivisibleError from ..sharding_strategy import OperationData, OperationDataType, ShardingStrategy from .node_handler import ModuleHandler, NodeHandler from .registry import operator_registry from .strategy import EmbeddingStrategyGenerator, StrategyGenerator __all__ = ["EmbeddingModuleHandler", "EmbeddingFunctionHandler"] def _convert_logical_sharding_to_physical_sharding_spec_for_embedding( strategy: ShardingStrategy, input_name: str, output_name: str ) -> List[ShardingStrategy]: """ This function converts the logical sharding spec to the physical sharding spec for both the input and output of the embedding operation. Args: strategy (ShardingStrategy): the logical strategy generated by the strategy generator. input_name (str): the name of the OperationData object for the input. output_name (str): the name of the OperationData object for the output. """ # the result will be a list of strategies sharding_strategies = [] # get operation data input_op_data = strategy.get_op_data_by_name(input_name) output_op_data = strategy.get_op_data_by_name(output_name) input_sharding_spec = strategy.get_sharding_spec_by_name(input_op_data.name) output_sharding_spec = strategy.get_sharding_spec_by_name(output_op_data.name) # recover the last logical dimension to physical dimension last_logical_output_dims = len(output_op_data.logical_shape) - 1 last_physical_output_dims = output_op_data.data.dim() - 1 # get logger for debug message logger = get_dist_logger() # For the input of the embedding operation, it can be multi-dimensional. The sharding spec is only generated for # logical 1D non-matrix dimension, the logical non-matrix dimension can belong to the 0th to Nth dimension of the # physical input shape. Thus, we enumerate to get all possible cases. if input_sharding_spec.dim_partition_dict: # if bool(input_sharding_spec.dim_partition_dict), it means that the # the generated sharding strategy does shard the non-matrix dimension, # in this case, we need to do enumeration num_input_dims = input_op_data.data.dim() for i in range(num_input_dims): strategy_copy = strategy.clone() input_sharding_spec = strategy_copy.get_sharding_spec_by_name(input_op_data.name) output_sharding_spec = strategy_copy.get_sharding_spec_by_name(output_op_data.name) try: # replace the 0th dimension in the logical sharding with ith dimension in the physical sharding update_partition_dim( sharding_spec=input_sharding_spec, dim_mapping={0: i}, physical_shape=input_op_data.data.shape, inplace=True, ) if last_logical_output_dims in output_sharding_spec.dim_partition_dict: dim_mapping = {0: i, last_logical_output_dims: last_physical_output_dims} else: dim_mapping = {0: i} update_partition_dim( sharding_spec=output_sharding_spec, dim_mapping=dim_mapping, physical_shape=output_op_data.data.shape, inplace=True, ) strategy_copy.name = f"{strategy.name}_{i}" sharding_strategies.append(strategy_copy) except ShardingNotDivisibleError as e: logger.debug( f"Errored occurred when converting the logical sharding spec to the physical one. Error details: {e}" ) else: # the generated sharding strategy does not shard the non-matrix dimension, # in this case, we don't need to do enumeration # but instead, we still need to convert the logical shape to physical shape strategy_copy = strategy.clone() input_sharding_spec = strategy_copy.get_sharding_spec_by_name(input_op_data.name) output_sharding_spec = strategy_copy.get_sharding_spec_by_name(output_op_data.name) # after updating, the logical shape will be replaced by the physical shape update_partition_dim( sharding_spec=input_sharding_spec, dim_mapping={}, physical_shape=input_op_data.data.shape, inplace=True ) if last_logical_output_dims in output_sharding_spec.dim_partition_dict: dim_mapping = {last_logical_output_dims: last_physical_output_dims} else: dim_mapping = {} update_partition_dim( sharding_spec=output_sharding_spec, dim_mapping=dim_mapping, physical_shape=output_op_data.data.shape, inplace=True, ) sharding_strategies.append(strategy_copy) return sharding_strategies @operator_registry.register(torch.nn.Embedding) class EmbeddingModuleHandler(ModuleHandler): """ A EmbeddingModuleHandler which deals with the sharding strategies for nn.Embedding module. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(EmbeddingStrategyGenerator(op_data_mapping, self.device_mesh)) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # In nn.Embedding operation, all the dimensions of input will be treated as the batch dimension, # and then the sharding spec will be generated based on the logical 1D tensor. # After that, the logical sharding info will be enumerated among all the physical dimensions. # Finally, the input will be transformed back to its original shape in self.post_process input_meta_data = self.node.args[0]._meta_data input_logical_shape = input_meta_data.view(-1).shape physical_input_operand = OperationData( name=str(self.node.args[0]), type=OperationDataType.ARG, data=input_meta_data, logical_shape=input_logical_shape, ) physical_other_operand = OperationData( name="weight", type=OperationDataType.PARAM, data=self.named_parameters["weight"] ) # Same as input, in nn.Embedding operation, all the dimensions of output will be treated as # (batch dimension, embedding dimension), and then the sharding spec will be generated based # on the logical 2D tensor. # After that, the logical sharding info of batch dimension will be enumerated among all the physical dimensions. # Finally, the output will be transformed back to its original shape in self.post_process output_meta_data = self.node._meta_data output_logical_shape = output_meta_data.view(-1, output_meta_data.shape[-1]).shape physical_output = OperationData( name=str(self.node), type=OperationDataType.OUTPUT, data=output_meta_data, logical_shape=output_logical_shape, ) mapping = {"input": physical_input_operand, "other": physical_other_operand, "output": physical_output} return mapping def post_process(self, strategy: ShardingStrategy) -> Union[ShardingStrategy, List[ShardingStrategy]]: """ Convert the sharding spec from the logical shape to the physical shape. """ # create multiple sharding strategies for the inputs # as input can be multi-dimensional and the partition dim is only 2D, # we need to map the partition at logical dim 0 to one of the first few dimensions of the input and output strategies = _convert_logical_sharding_to_physical_sharding_spec_for_embedding( strategy=strategy, input_name=str(self.node.args[0]), output_name=str(self.node) ) return strategies @operator_registry.register(F.embedding) class EmbeddingFunctionHandler(NodeHandler): """ A EmbeddingFunctionHandler which deals with the sharding strategies for F.embedding. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(EmbeddingStrategyGenerator(op_data_mapping, self.device_mesh)) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # In F.embedding operation, all the dimensions of input will be treated as the batch dimension, # and then the sharding spec will be generated based on the logical 1D tensor. # After that, the logical sharding info will be enumerated among all the physical dimensions. # Finally, the input will be transformed back to its original shape in self.post_process input_meta_data = self.node.args[0]._meta_data input_logical_shape = input_meta_data.view(-1).shape physical_input_operand = OperationData( name=str(self.node.args[0]), type=OperationDataType.ARG, data=self.node.args[0]._meta_data, logical_shape=input_logical_shape, ) # check if the other operand is a parameter if isinstance(self.node.args[1]._meta_data, torch.nn.parameter.Parameter): data_type = OperationDataType.PARAM else: data_type = OperationDataType.ARG physical_other_operand = OperationData( name=str(self.node.args[1]), type=data_type, data=self.node.args[1]._meta_data ) # Same as input, in F.embedding operation, all the dimensions of output will be treated as # (batch dimension, embedding dimension), and then the sharding spec will be generated based # on the logical 2D tensor. # After that, the logical sharding info of batch dimension will be enumerated among all the physical dimensions. # Finally, the output will be transformed back to its original shape in self.post_process output_meta_data = self.node._meta_data output_logical_shape = output_meta_data.view(-1, output_meta_data.shape[-1]).shape physical_output = OperationData( name=str(self.node), type=OperationDataType.OUTPUT, data=self.node._meta_data, logical_shape=output_logical_shape, ) mapping = {"input": physical_input_operand, "other": physical_other_operand, "output": physical_output} return mapping def post_process(self, strategy: ShardingStrategy): """ Convert the sharding spec from the logical shape to the physical shape. """ # create multiple sharding strategies for the inputs # as input can be multi-dimensional and the partition dim is only 2D, # we need to map the partition at logical dim 0 to one of the first few dimensions of the input and output strategies = _convert_logical_sharding_to_physical_sharding_spec_for_embedding( strategy=strategy, input_name=str(self.node.args[0]), output_name=str(self.node) ) return strategies
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/batch_norm_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/batch_norm_handler.py
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import MetaInfoModuleHandler from .registry import operator_registry from .strategy import BatchNormStrategyGenerator, StrategyGenerator __all__ = ["BatchNormModuleHandler"] @operator_registry.register(torch.nn.BatchNorm1d) @operator_registry.register(torch.nn.BatchNorm2d) @operator_registry.register(torch.nn.BatchNorm3d) class BatchNormModuleHandler(MetaInfoModuleHandler): """ A BatchNormModuleHandler which deals with the sharding strategies for nn.BatchNormXd module. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(BatchNormStrategyGenerator(op_data_mapping, self.device_mesh)) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # use transposed shape for strategies # the strategies will be transformed back to its original shape in self.post_process physical_input_operand = OperationData( name=str(self.node.args[0]), type=OperationDataType.ARG, data=self.node.args[0]._meta_data ) physical_other_operand = OperationData( name="weight", type=OperationDataType.PARAM, data=self.named_parameters["weight"], logical_shape=self.named_parameters["weight"].shape, ) physical_output = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=self.node._meta_data) physical_running_mean_operand = OperationData( name="running_mean", type=OperationDataType.BUFFER, data=self.named_buffers["running_mean"], logical_shape=self.named_buffers["running_mean"].shape, ) physical_running_var_operand = OperationData( name="running_var", type=OperationDataType.BUFFER, data=self.named_buffers["running_var"], logical_shape=self.named_buffers["running_var"].shape, ) physical_num_batches_tracked_operand = OperationData( name="num_batches_tracked", type=OperationDataType.BUFFER, data=self.named_buffers["num_batches_tracked"], logical_shape=self.named_buffers["num_batches_tracked"].shape, ) mapping = { "input": physical_input_operand, "other": physical_other_operand, "output": physical_output, "running_mean": physical_running_mean_operand, "running_var": physical_running_var_operand, "num_batches_tracked": physical_num_batches_tracked_operand, } if self.named_parameters["bias"] is not None: physical_bias_operand = OperationData( name="bias", type=OperationDataType.PARAM, data=self.named_parameters["bias"] ) mapping["bias"] = physical_bias_operand return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/getitem_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/getitem_handler.py
import operator from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import StrategyGenerator, TensorStrategyGenerator, TensorTupleStrategyGenerator __all__ = ["GetItemHandler"] @operator_registry.register(operator.getitem) class GetItemHandler(NodeHandler): """ A GetItemHandler which deals with the sharding strategies for operator.getitem. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] if isinstance(op_data_mapping["input"].data, torch.Tensor): generators.append(TensorStrategyGenerator(op_data_mapping, self.device_mesh, self.node.args[0])) else: generators.append(TensorTupleStrategyGenerator(op_data_mapping, self.device_mesh, self.node.args[0])) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # use transposed shape for strategies # the strategies will be transformed back to its original shape in self.post_process physical_input_operand = OperationData( name=str(self.node.args[0]), type=OperationDataType.ARG, data=self.node.args[0]._meta_data ) physical_other_operand = OperationData(name="index", type=OperationDataType.ARG, data=self.node.args[1]) physical_output = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=self.node._meta_data) mapping = {"input": physical_input_operand, "index": physical_other_operand, "output": physical_output} return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/output_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/output_handler.py
from typing import Dict, List import torch from colossalai.device.device_mesh import DeviceMesh from ..sharding_strategy import OperationData, OperationDataType, StrategiesVector from .node_handler import NodeHandler from .strategy import OutputGenerator, StrategyGenerator __all__ = ["OutputHandler"] class OutputHandler(NodeHandler): """ A OutputHandler which deals with the sharding strategies for Output Node. """ def __init__( self, node: torch.fx.node.Node, device_mesh: DeviceMesh, strategies_vector: StrategiesVector, output_option: str ) -> None: super().__init__(node, device_mesh, strategies_vector) self.output_option = output_option def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(OutputGenerator(op_data_mapping, self.device_mesh, self.predecessor_node, self.output_option)) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # use transposed shape for strategies # the strategies will be transformed back to its original shape in self.post_process mapping = {} output_meta_data = [] for index, input_node in enumerate(self.predecessor_node): input_meta_data = input_node._meta_data physical_inputs = OperationData(name=str(input_node), type=OperationDataType.ARG, data=input_meta_data) name_key = f"input_{index}" mapping[name_key] = physical_inputs output_meta_data.append(input_meta_data) assert len(output_meta_data) > 0, f"Output node {self.node} has no input node." if len(output_meta_data) == 1: output_meta_data = output_meta_data[0] else: output_meta_data = tuple(output_meta_data) self.node._meta_data = output_meta_data physical_output = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=self.node._meta_data) mapping["output"] = physical_output return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/sum_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/sum_handler.py
from typing import Dict, List import torch from ..sharding_strategy import OperationData, OperationDataType from .node_handler import NodeHandler from .registry import operator_registry from .strategy import StrategyGenerator, SumGenerator __all__ = ["SumHandler"] @operator_registry.register(torch.Tensor.sum) @operator_registry.register(torch.sum) class SumHandler(NodeHandler): """ A SumHandler which deals with the sharding strategies for torch.sum or torch.Tensor.sum. """ def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(SumGenerator(op_data_mapping, self.device_mesh, self.node.args[0])) return generators def get_operation_data_mapping(self) -> Dict[str, OperationData]: # check if the input operand is a parameter if isinstance(self.node.args[0]._meta_data, torch.nn.parameter.Parameter): data_type = OperationDataType.PARAM else: data_type = OperationDataType.ARG input_data = self.node.args[0]._meta_data physical_input_operand = OperationData(name=str(self.node.args[0]), type=data_type, data=input_data) if len(self.node.args) > 1: sum_dims = self.node.args[1] else: sum_dims = tuple(range(self.node.args[0]._meta_data.dim())) if isinstance(sum_dims, int): sum_dims = (sum_dims,) # recover negative value to positive num_dims = self.node.args[0]._meta_data.dim() for i in range(len(sum_dims)): if sum_dims[i] < 0: sum_dims[i] += num_dims # mapping the input dims to output dims # For examples: # input: torch.rand(2, 3, 4, 5) # output: torch.sum(input, (0, 2)) # sum_mapping_dict = {1: 0, 3: 1} # sum_mapping_dict[1] = 0 means the 0th dim of output is the 1st dim of input # sum_mapping_dict[3] = 1 means the 1st dim of output is the 3rd dim of input sum_mapping_dict = {} if "keepdim" in self.node.kwargs and self.node.kwargs["keepdim"]: for i in range(num_dims): sum_mapping_dict.update({i: i}) else: output_index = 0 for i in range(num_dims): if i not in sum_dims: sum_mapping_dict.update({i: output_index}) output_index += 1 assert output_index == self.node._meta_data.dim() sum_info = (sum_dims, sum_mapping_dict) physical_shape_operand = OperationData(name="sum_info", type=OperationDataType.ARG, data=sum_info) output_data = self.node._meta_data physical_output_operand = OperationData(name=str(self.node), type=OperationDataType.OUTPUT, data=output_data) mapping = { "input": physical_input_operand, "sum_info": physical_shape_operand, "output": physical_output_operand, } return mapping
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/bmm_handler.py
colossalai/auto_parallel/tensor_shard/node_handler/bmm_handler.py
from typing import Dict, List, Union import torch from ..sharding_strategy import OperationData, OperationDataType, ShardingStrategy from ..utils import comm_actions_for_oprands, recover_sharding_spec_for_broadcast_shape from .node_handler import NodeHandler from .registry import operator_registry from .strategy import BatchedMatMulStrategyGenerator, StrategyGenerator __all__ = ["BMMFunctionHandler", "AddBMMFunctionHandler"] def _get_data_mapping_for_bmm_op(node, input_idx, other_idx, bias_idx=None): """ This function is a helper function which extracts the common logic for both `bmm` and `addbmm` node handler to reduce code redundancy. """ # input operand physical_input_operand = OperationData( name=str(node.args[input_idx]), type=OperationDataType.ARG, data=node.args[input_idx]._meta_data ) # other operand physical_other_operand = OperationData( name=str(node.args[other_idx]), type=OperationDataType.ARG, data=node.args[other_idx]._meta_data ) # output physical_output = OperationData(name=str(node), type=OperationDataType.OUTPUT, data=node._meta_data) mapping = {"input": physical_input_operand, "other": physical_other_operand, "output": physical_output} if bias_idx is not None: # bias physical shape bias_logical_shape = node._meta_data.shape physical_bias_operand = OperationData( name=str(node.args[bias_idx]), type=OperationDataType.ARG, data=node.args[bias_idx]._meta_data, logical_shape=bias_logical_shape, ) mapping["bias"] = physical_bias_operand return mapping @operator_registry.register(torch.bmm) @operator_registry.register(torch.Tensor.bmm) class BMMFunctionHandler(NodeHandler): """ This is a NodeHandler class which deals with the batched matrix multiplication operation in PyTorch. Such operations including `torch.bmm` and `torch.Tensor.bmm` require the tensor to be 3D, thus, there is no logical-physical shape conversion in this handler. """ def get_operation_data_mapping(self) -> Dict[str, OperationData]: mapping = _get_data_mapping_for_bmm_op(node=self.node, input_idx=0, other_idx=1) return mapping def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generators.append(BatchedMatMulStrategyGenerator(op_data_mapping, self.device_mesh)) return generators @operator_registry.register(torch.addbmm) @operator_registry.register(torch.Tensor.addbmm) class AddBMMFunctionHandler(NodeHandler): """ This is a NodeHandler class which deals with the addition + batched matrix multiplication operation in PyTorch. Such operations including `torch.addbmm` and `torch.Tensor.addbmm` require the two matmul tensor to be 3D. However, due to the addition, logical-physical shape conversion is required for the bias term. As the addbmm operation will reduce the batch dimension, the bias is maximum 2D. """ def get_operation_data_mapping(self) -> Dict[str, OperationData]: mapping = _get_data_mapping_for_bmm_op(node=self.node, input_idx=1, other_idx=2, bias_idx=0) return mapping def get_strategy_generator(self) -> List[StrategyGenerator]: op_data_mapping = self.get_operation_data_mapping() generators = [] generator = BatchedMatMulStrategyGenerator(op_data_mapping, self.device_mesh) # addbmm will shrink the first batch dim generator.squeeze_batch_dim = True generators.append(generator) return generators def post_process(self, strategy: ShardingStrategy) -> Union[ShardingStrategy, List[ShardingStrategy]]: # convert bias from its logical sharding spec to its physical sharding spec op_data_mapping = self.get_operation_data_mapping() if "bias" in op_data_mapping: bias_op_data = op_data_mapping["bias"] bias_physical_shape = bias_op_data.data.shape bias_logical_shape = bias_op_data.logical_shape bias_sharding_spec = strategy.get_sharding_spec_by_name(bias_op_data.name) bias_sharding_spec, removed_dims = recover_sharding_spec_for_broadcast_shape( bias_sharding_spec, bias_logical_shape, bias_physical_shape ) strategy.sharding_specs[bias_op_data] = bias_sharding_spec if len(removed_dims) > 0: comm_action = comm_actions_for_oprands( node=self.node, removed_dims=removed_dims, op_data=bias_op_data, sharding_spec=bias_sharding_spec ) strategy.communication_actions[bias_op_data] = comm_action return strategy
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/getattr_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/getattr_generator.py
from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, ShardingStrategy, TrainCycleItem from colossalai.auto_parallel.tensor_shard.utils import ( enumerate_all_possible_1d_sharding, enumerate_all_possible_2d_sharding, ignore_sharding_exception, ) from colossalai.tensor.sharding_spec import ShardingSpecException from .strategy_generator import StrategyGenerator __all__ = ["GetattrGenerator"] class GetattrGenerator(StrategyGenerator): """ PlaceholderGenerator is a generic class to generate strategies for placeholder node. """ def validate(self) -> bool: return super().validate() def update_compute_cost(self, strategy: ShardingStrategy): compute_cost = TrainCycleItem(fwd=10, bwd=10, total=20) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy): """ Compute the memory cost per device with this specific strategy. """ forward_size_mapping = {"output": self._compute_size_in_bytes(strategy, "output")} # compute fwd cost incurred # fwd_cost = output fwd_activation_cost = sum([v for k, v in forward_size_mapping.items()]) fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=0) bwd_mem_cost = MemoryCost(activation=0, parameter=0) # compute total cost total_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=0) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost @ignore_sharding_exception def enumerate_all_possible_output(self, mesh_dim_0, mesh_dim_1): # we check for the output logical shape to get the number of dimensions dim_partition_list = [] dim_size = len(self.op_data["output"].logical_shape) # enumerate all the 2D sharding cases sharding_list_2d = enumerate_all_possible_2d_sharding(mesh_dim_0, mesh_dim_1, dim_size) dim_partition_list.extend(sharding_list_2d) # enumerate all the 1D sharding cases sharding_list_1d_on_dim_0 = enumerate_all_possible_1d_sharding(mesh_dim_0, dim_size) dim_partition_list.extend(sharding_list_1d_on_dim_0) sharding_list_1d_on_dim_1 = enumerate_all_possible_1d_sharding(mesh_dim_1, dim_size) dim_partition_list.extend(sharding_list_1d_on_dim_1) # add empty dict for fully replicated case dim_partition_list.append({}) # sharding strategy bookkeeping strategy_list = [] # convert these dim partition dict to sharding strategy for dim_partition_dict in dim_partition_list: dim_partition_dict_mapping = dict(output=dim_partition_dict) try: sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) communication_action_mapping = {} # get name name = f"get_attr {sharding_spec_mapping['output'].sharding_sequence}" sharding_strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) strategy_list.append(sharding_strategy) except ShardingSpecException: continue return strategy_list def collate_strategies(self) -> List[ShardingStrategy]: return self.enumerate_all_possible_output(0, 1)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/reshape_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/reshape_generator.py
import copy from typing import List from colossalai.auto_parallel.tensor_shard.node_handler.strategy.strategy_generator import FollowingStrategyGenerator from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommAction, CommType, MemoryCost, ShardingStrategy, TrainCycleItem, ) from colossalai.auto_parallel.tensor_shard.utils import ( check_keep_sharding_status, detect_reshape_mapping, infer_output_dim_partition_dict, ) from colossalai.tensor.shape_consistency import CollectiveCommPattern from colossalai.tensor.sharding_spec import ShardingSpec __all__ = ["ReshapeGenerator", "ViewGenerator", "PermuteGenerator", "TransposeGenerator", "SplitGenerator"] class ReshapeGenerator(FollowingStrategyGenerator): """ ReshapeGenerator is the base class for all the reshape operation. """ def validate(self) -> bool: return super().validate() def update_compute_cost(self, strategy: ShardingStrategy): compute_cost = TrainCycleItem(fwd=10, bwd=10, total=20) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy): """ Compute the memory cost per device with this specific strategy. """ forward_size_mapping = { "input": self._compute_size_in_bytes(strategy, "input"), "output": self._compute_size_in_bytes(strategy, "output"), } backward_size_mapping = copy.deepcopy(forward_size_mapping) backward_size_mapping.pop("output") # compute fwd cost incurred # fwd_cost = input + output fwd_activation_cost = sum([v for k, v in forward_size_mapping.items() if not self.is_param(k)]) fwd_parameter_cost = sum([v for k, v in forward_size_mapping.items() if self.is_param(k)]) fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=fwd_parameter_cost) # compute bwd cost incurred # bwd_cost = input_grad bwd_activation_cost = sum([v for k, v in backward_size_mapping.items() if not self.is_param(k)]) bwd_parameter_cost = sum([v for k, v in backward_size_mapping.items() if self.is_param(k)]) bwd_mem_cost = MemoryCost(activation=bwd_activation_cost, parameter=bwd_parameter_cost) # compute total cost total_mem_cost = MemoryCost( activation=fwd_activation_cost + bwd_activation_cost, parameter=fwd_parameter_cost + bwd_parameter_cost ) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost def collate_strategies(self) -> List[ShardingStrategy]: return super().collate_strategies() class ViewGenerator(ReshapeGenerator): """ ViewGenerator deals with the sharding strategies of view op. """ def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] for index, strategy in enumerate(self.predecessor_node.strategies_vector): dim_partition_dict_mapping = {} communication_action_mapping = {} input_sharding_spec = strategy.output_sharding_specs[self.op_data["input"]] origin_shape = self.op_data["input"].data.shape tgt_shape = self.op_data["tgt_shape"].data reshape_mapping_dict = detect_reshape_mapping(origin_shape, tgt_shape) dim_partition_dict_for_input = input_sharding_spec.dim_partition_dict keep_sharding_status = check_keep_sharding_status(dim_partition_dict_for_input, reshape_mapping_dict) if keep_sharding_status: dim_partition_dict_for_output = infer_output_dim_partition_dict( dim_partition_dict_for_input, reshape_mapping_dict ) else: dim_partition_dict_for_output = {} dim_partition_dict_mapping = { "input": dim_partition_dict_for_input, "output": dim_partition_dict_for_output, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # add index into name to pass the duplicated check # we keep same strategies with different name for node merging, and it will not increase the searching space, # because in solver, this node will be merged into other nodes, and solver will not create a new variable for this node. if keep_sharding_status: name = f'{sharding_spec_mapping["input"].sharding_sequence} -> {sharding_spec_mapping["output"].sharding_sequence}_{index}' else: name = f'{sharding_spec_mapping["input"].sharding_sequence} -> FULLY REPLICATED_{index}' # add comm action for converting input to fully replicated total_mesh_dim_list = [] for mesh_dim_list in dim_partition_dict_for_input.values(): total_mesh_dim_list.extend(mesh_dim_list) # if there is only one sharding dimension, we should use the value instead of list as logical_process_axis. if len(total_mesh_dim_list) == 1: total_mesh_dim_list = total_mesh_dim_list[0] # the total mesh dim list only has one element, so the shard dim has only one element as well. shard_dim = list(dim_partition_dict_for_input.keys())[0] input_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["input"], communication_pattern=CollectiveCommPattern.GATHER_FWD_SPLIT_BWD, logical_process_axis=total_mesh_dim_list, comm_type=CommType.BEFORE, arg_index=0, ) # it will gather the input through gather_dim during forward phase. input_comm_action.comm_spec.gather_dim = shard_dim # it will split the input activation grad through shard_dim during backward phase. input_comm_action.comm_spec.shard_dim = shard_dim elif len(total_mesh_dim_list) >= 2: source_spec = sharding_spec_mapping["input"] target_spec = ShardingSpec( device_mesh=self.device_mesh, entire_shape=source_spec.entire_shape, dim_partition_dict={} ) comm_spec = {"src_spec": source_spec, "tgt_spec": target_spec} input_comm_action = CommAction(comm_spec=comm_spec, comm_type=CommType.BEFORE, arg_index=0) else: input_comm_action = None if input_comm_action is not None: communication_action_mapping["input"] = input_comm_action strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) strategy_list.append(strategy) return strategy_list class PermuteGenerator(ReshapeGenerator): """ PermuteGenerator deals with the sharding strategies of permute op. """ def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] for index, strategy in enumerate(self.predecessor_node.strategies_vector): dim_partition_dict_mapping = {} communication_action_mapping = {} input_sharding_spec = strategy.output_sharding_specs[self.op_data["input"]] permute_dims = self.op_data["permute_dims"].data dim_partition_dict_for_input = input_sharding_spec.dim_partition_dict dim_partition_dict_for_output = {} for dim_index, permute_dim in enumerate(permute_dims): if permute_dim in dim_partition_dict_for_input: dim_partition_dict_for_output[dim_index] = dim_partition_dict_for_input[permute_dim] dim_partition_dict_mapping = { "input": dim_partition_dict_for_input, "output": dim_partition_dict_for_output, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # add index into name to pass the duplicated check # we keep same strategies with different name for node merging, and it will not increase the searching space, # because in solver, this node will be merged into other nodes, and solver will not create a new variable for this node. name = f'{sharding_spec_mapping["input"].sharding_sequence} -> {sharding_spec_mapping["output"].sharding_sequence}_{index}' strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) strategy_list.append(strategy) return strategy_list class TransposeGenerator(ReshapeGenerator): """ TransposeGenerator deals with the sharding strategies of permute op. """ def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] for index, strategy in enumerate(self.predecessor_node.strategies_vector): dim_partition_dict_mapping = {} communication_action_mapping = {} input_sharding_spec = strategy.output_sharding_specs[self.op_data["input"]] dim_partition_dict_for_input = input_sharding_spec.dim_partition_dict dim_partition_dict_for_output = {} transpose_dims = self.op_data["transpose_dims"].data dim_0 = transpose_dims[0] dim_1 = transpose_dims[1] for dim, sharded_dims in dim_partition_dict_for_input.items(): if dim == dim_0: dim_partition_dict_for_output[dim_1] = dim_partition_dict_for_input[dim_0] elif dim == dim_1: dim_partition_dict_for_output[dim_0] = dim_partition_dict_for_input[dim_1] else: dim_partition_dict_for_output[dim] = sharded_dims dim_partition_dict_mapping = { "input": dim_partition_dict_for_input, "output": dim_partition_dict_for_output, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # add index into name to pass the duplicated check # we keep same strategies with different name for node merging, and it will not increase the searching space, # because in solver, this node will be merged into other nodes, and solver will not create a new variable for this node. name = f'{sharding_spec_mapping["input"].sharding_sequence} -> {sharding_spec_mapping["output"].sharding_sequence}_{index}' strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) strategy_list.append(strategy) return strategy_list class SplitGenerator(ReshapeGenerator): """ SplitGenerator deals with the sharding strategies of split op. """ def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] for index, strategy in enumerate(self.predecessor_node.strategies_vector): recover_dims = None dim_partition_dict_mapping = {} communication_action_mapping = {} input_sharding_spec = strategy.output_sharding_specs[self.op_data["input"]] dim_partition_dict_for_input = copy.deepcopy(input_sharding_spec.dim_partition_dict) split_size, split_dim = self.op_data["split_info"].data if split_dim in dim_partition_dict_for_input: recover_dims = dim_partition_dict_for_input.pop(split_dim) dim_partition_dict_for_output = [ copy.deepcopy(dim_partition_dict_for_input) for _ in range(len(self.op_data["output"].data)) ] assert len(dim_partition_dict_for_output) >= 2 dim_partition_dict_mapping = { "input": dim_partition_dict_for_input, "output": dim_partition_dict_for_output, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # add index into name to pass the duplicated check # we keep same strategies with different name for node merging, and it will not increase the searching space, # because in solver, this node will be merged into other nodes, and solver will not create a new variable for this node. name = f'{sharding_spec_mapping["input"].sharding_sequence}_{index}' # add comm action if the input need to be recovered to replica in the split dimension. if recover_dims: # if there is only one sharding dimension, we should use the value instead of list as logical_process_axis. if len(recover_dims) == 1: recover_dims = recover_dims[0] input_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["input"], communication_pattern=CollectiveCommPattern.GATHER_FWD_SPLIT_BWD, logical_process_axis=recover_dims, comm_type=CommType.BEFORE, arg_index=0, ) # it will gather the input through gather_dim during forward phase. input_comm_action.comm_spec.gather_dim = split_dim # it will split the input activation grad through split_dim during backward phase. input_comm_action.comm_spec.shard_dim = split_dim elif len(recover_dims) >= 2: # original sharding spec source_spec = input_sharding_spec # target sharding spec target_spec = sharding_spec_mapping["input"] comm_spec = {"src_spec": source_spec, "tgt_spec": target_spec} input_comm_action = CommAction(comm_spec=comm_spec, comm_type=CommType.BEFORE, arg_index=0) else: input_comm_action = None if input_comm_action is not None: communication_action_mapping["input"] = input_comm_action strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) strategy_list.append(strategy) return strategy_list class DefaultReshapeGenerator(ReshapeGenerator): """ DefaultReshapeGenerator which deals with the sharding strategies of Reshape Op which have to recover the tensor to Replica status. """ def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] # For default reshape strategy, to keep the computing correctness we keep the # sharding spec of input is fully replicated. In addition, we will keep the output # in replica status and let the successor node choose the way to resharding the # output node. Therefore, the different strategies of input node with same # output sharding spec will generate same strategy for reshape function. for index, strategy in enumerate(self.predecessor_node.strategies_vector): dim_partition_dict_mapping = {} communication_action_mapping = {} input_sharding_spec = strategy.output_sharding_specs[self.op_data["input"]] dim_partition_dict_for_input = input_sharding_spec.dim_partition_dict dim_partition_dict_for_output = {} if isinstance(self.op_data["output"].data, tuple): dim_partition_dict_for_output = [{} for _ in range(len(self.op_data["output"].data))] dim_partition_dict_mapping = { "input": dim_partition_dict_for_input, "output": dim_partition_dict_for_output, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # add index into name to pass the duplicated check # we keep same strategies with different name for node merging, and it will not increase the searching space, # because in solver, this node will be merged into other nodes, and solver will not create a new variable for this node. name = f'{sharding_spec_mapping["input"].sharding_sequence} -> FULLY REPLICATED_{index}' total_mesh_dim_list = [] for mesh_dim_list in dim_partition_dict_for_input.values(): total_mesh_dim_list.extend(mesh_dim_list) # if there is only one sharding dimension, we should use the value instead of list as logical_process_axis. if len(total_mesh_dim_list) == 1: total_mesh_dim_list = total_mesh_dim_list[0] input_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["input"], communication_pattern=CollectiveCommPattern.GATHER_FWD_SPLIT_BWD, logical_process_axis=total_mesh_dim_list, comm_type=CommType.BEFORE, arg_index=0, ) input_comm_action.comm_spec.gather_dim = total_mesh_dim_list input_comm_action.comm_spec.shard_dim = total_mesh_dim_list elif len(total_mesh_dim_list) >= 2: source_spec = sharding_spec_mapping["input"] target_spec = ShardingSpec( device_mesh=self.device_mesh, entire_shape=source_spec.entire_shape, dim_partition_dict={} ) comm_spec = {"src_spec": source_spec, "tgt_spec": target_spec} input_comm_action = CommAction(comm_spec=comm_spec, comm_type=CommType.BEFORE, arg_index=0) else: input_comm_action = None if input_comm_action is not None: communication_action_mapping["input"] = input_comm_action strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) strategy_list.append(strategy) return strategy_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/binary_elementwise_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/binary_elementwise_generator.py
import operator from functools import reduce from typing import List import torch from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, ShardingStrategy, TrainCycleItem from colossalai.auto_parallel.tensor_shard.utils import ( enumerate_all_possible_1d_sharding, enumerate_all_possible_2d_sharding, ignore_sharding_exception, ) from colossalai.tensor.sharding_spec import ShardingSpecException from .strategy_generator import StrategyGenerator __all__ = ["BinaryElementwiseStrategyGenerator"] class BinaryElementwiseStrategyGenerator(StrategyGenerator): """ An BinaryElementwiseStrategyGenerator is a node handler which deals with elementwise operations which have two operands and broadcasting occurs such as torch.add. The logical shape for this operation will be `input <op> other`. """ def validate(self) -> bool: assert ( len(self.op_data) == 3 ), f"BinaryElementwiseStrategyGenerator only accepts three operation data (input, other and output), but got {len(self.op_data)}" for name, op_data in self.op_data.items(): if not isinstance(op_data.data, (torch.Tensor, int, float)): raise TypeError(f"The operation data {name} is not a torch.Tensor/int/float.") def update_compute_cost(self, strategy: ShardingStrategy) -> ShardingStrategy: shape = strategy.sharding_specs[self.op_data["input"]].get_sharded_shape_per_device() # since elementwise ops are not compute-intensive, # we approximate the backward compute cost # to be twice the fwd compute cost fwd_compute_cost = reduce(operator.mul, shape) bwd_compute_cost = fwd_compute_cost * 2 compute_cost = TrainCycleItem( fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost ) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy) -> ShardingStrategy: # all input, output and outputs have the same shape strategy.sharding_specs[self.op_data["input"]].get_sharded_shape_per_device() # compute fwd memory cost in bytes # as the elementwise ops are not memory-intensive # we approximate the fwd memory cost to be the output # and the backward memory cost to be grad of input and other input_bytes = self._compute_size_in_bytes(strategy, "input") other_bytes = self._compute_size_in_bytes(strategy, "other") output_bytes = self._compute_size_in_bytes(strategy, "output") fwd_memory_cost = MemoryCost(activation=output_bytes) bwd_memory_cost = MemoryCost(activation=input_bytes + other_bytes) total_memory_cost = MemoryCost(activation=input_bytes + other_bytes + output_bytes) memory_cost = TrainCycleItem(fwd=fwd_memory_cost, bwd=bwd_memory_cost, total=total_memory_cost) strategy.memory_cost = memory_cost @ignore_sharding_exception def enumerate_all_possible_output(self, mesh_dim_0, mesh_dim_1): # we check for the output logical shape to get the number of dimensions dim_partition_list = [] dim_size = len(self.op_data["output"].logical_shape) # enumerate all the 2D sharding cases sharding_list_2d = enumerate_all_possible_2d_sharding(mesh_dim_0, mesh_dim_1, dim_size) dim_partition_list.extend(sharding_list_2d) # enumerate all the 1D sharding cases sharding_list_1d_on_dim_0 = enumerate_all_possible_1d_sharding(mesh_dim_0, dim_size) dim_partition_list.extend(sharding_list_1d_on_dim_0) sharding_list_1d_on_dim_1 = enumerate_all_possible_1d_sharding(mesh_dim_1, dim_size) dim_partition_list.extend(sharding_list_1d_on_dim_1) # add empty dict for fully replicated case dim_partition_list.append({}) # sharding strategy bookkeeping strategy_list = [] # convert these dim partition dict to sharding strategy for dim_partition_dict in dim_partition_list: dim_partition_dict_mapping = dict( input=dim_partition_dict, other=dim_partition_dict, output=dim_partition_dict ) try: sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) communication_action_mapping = {} # get name sharding_seq = sharding_spec_mapping["input"].sharding_sequence name = f"{sharding_seq} = {sharding_seq} <binary-elementwise-op> {sharding_seq}" sharding_strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) strategy_list.append(sharding_strategy) except ShardingSpecException: continue return strategy_list def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = self.enumerate_all_possible_output(0, 1) return strategy_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/matmul_strategy_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/matmul_strategy_generator.py
import operator from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.options import SolverPerference from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommType, MemoryCost, ShardingStrategy, TrainCycleItem, ) from colossalai.auto_parallel.tensor_shard.utils import ignore_sharding_exception from colossalai.tensor.shape_consistency import CollectiveCommPattern from .strategy_generator import StrategyGenerator class MatMulStrategyGenerator(StrategyGenerator): """ MatMulStrategyGenerator is a generic class to cover all matrix multiplication cases. The operation data is defined as `output = input x other + bias`. """ def update_memory_cost(self, strategy: ShardingStrategy) -> ShardingStrategy: size_mapping = { "input": self._compute_size_in_bytes(strategy, "input"), "other": self._compute_size_in_bytes(strategy, "other"), "output": self._compute_size_in_bytes(strategy, "output"), } if self.has_bias: bias_size = self._compute_size_in_bytes(strategy, "bias") size_mapping["bias"] = bias_size # compute fwd cost incurred # fwd_cost = input + other + bias + output fwd_activation_cost = sum([v for k, v in size_mapping.items() if not self.is_param(k)]) fwd_parameter_cost = sum([v for k, v in size_mapping.items() if self.is_param(k)]) fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=fwd_parameter_cost) # compute bwd cost incurred # bwd_cost = input_grad + bias_grad bwd_activation_cost = sum([v for k, v in size_mapping.items() if k in ["input", "other", "bias"]]) bwd_mem_cost = MemoryCost(activation=bwd_activation_cost, parameter=0) # compute total cost total_mem_cost = MemoryCost( activation=fwd_activation_cost + bwd_activation_cost, parameter=fwd_parameter_cost + 0 ) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost class DotProductStrategyGenerator(MatMulStrategyGenerator): def validate(self) -> bool: input_op_data = self.op_data["input"] other_op_data = self.op_data["other"] assert input_op_data.data.dim() == 1 and other_op_data.data.dim() == 1 def update_compute_cost(self, strategy: ShardingStrategy) -> ShardingStrategy: sharded_input_shape = strategy.sharding_specs[self.op_data["input"]].get_sharded_shape_per_device() fwd_compute_cost = sharded_input_shape[0] bwd_compute_cost = fwd_compute_cost * 2 compute_cost = TrainCycleItem( fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost ) return compute_cost @ignore_sharding_exception def no_split(self): name = f"R = R dot R" dim_partition_dict = {"input": {}, "other": {}, "output": {}, "bias": {}} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict) communication_action_mapping = {} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_one_dim(self, mesh_dim): name = f"R = S{mesh_dim} dot S{mesh_dim}" # get sharding spec dim_partition_dict = {"input": {0: [mesh_dim]}, "other": {0: [mesh_dim]}, "output": {}, "bias": {0: [mesh_dim]}} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict) # get communication action output_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["output"], communication_pattern=CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD, logical_process_axis=mesh_dim, comm_type=CommType.AFTER, ) communication_action_mapping = {"output": output_comm_action} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] # do not split dimensions for dot product # R = R dot R strategy_list.append(self.no_split()) # split two tensors in the same dimensions # S = S dot S strategy_list.append(self.split_one_dim(0)) strategy_list.append(self.split_one_dim(1)) return strategy_list class MatVecStrategyGenerator(MatMulStrategyGenerator): def validate(self) -> bool: input_op_data = self.op_data["input"] other_op_data = self.op_data["other"] assert input_op_data.data.dim() == 2 and other_op_data.data.dim() == 1 def update_compute_cost(self, strategy: ShardingStrategy) -> ShardingStrategy: sharded_input_shape = strategy.sharding_specs[self.op_data["input"]].get_sharded_shape_per_device() fwd_compute_cost = sharded_input_shape[0] bwd_compute_cost = fwd_compute_cost * 2 compute_cost = TrainCycleItem( fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost ) return compute_cost @ignore_sharding_exception def no_split(self): name = "R = R x R" dim_partition_dict = {"input": {}, "other": {}, "output": {}} if self.has_bias: dim_partition_dict["bias"] = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict) return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping={} ) @ignore_sharding_exception def split_input_batch(self, mesh_dim): name = f"S{mesh_dim}R = S{mesh_dim}R x R" # get sharding spec dim_partition_dict = { "input": {0: [mesh_dim]}, "other": {}, "output": {0: [mesh_dim]}, } if self.has_bias: dim_partition_dict["bias"] = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict) # get communication action communication_action_mapping = {} if self.is_param("other"): other_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim, comm_type=CommType.HOOK, ) else: other_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim, comm_type=CommType.BEFORE, arg_index=1, ) communication_action_mapping["other"] = other_comm_action if self.has_bias: if self.is_param("bias"): bias_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim, comm_type=CommType.HOOK, ) else: bias_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim, comm_type=CommType.BEFORE, arg_index=2, ) communication_action_mapping["bias"] = bias_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] # no split strategy_list.append(self.no_split()) # split the batch dim for the first tensor only strategy_list.append(self.split_input_batch(0)) strategy_list.append(self.split_input_batch(1)) return strategy_list class LinearProjectionStrategyGenerator(MatMulStrategyGenerator): def __init__( self, operation_data_mapping, device_mesh, linear_projection_type="linear", solver_perference=SolverPerference.STANDARD, ): super().__init__(operation_data_mapping, device_mesh) self.linear_projection_type = linear_projection_type self.solver_perference = solver_perference def update_compute_cost(self, strategy: ShardingStrategy) -> ShardingStrategy: # C = AB # C: [M, N], A: [M, P], B: [P, N] # fwd cost = MNP (only count mul) # bwd: 2 x fwd_cost sharded_input_shape = strategy.sharding_specs[self.op_data["input"]].get_sharded_shape_per_device() sharded_other_shape = strategy.sharding_specs[self.op_data["other"]].get_sharded_shape_per_device() dim_m_val = reduce(operator.mul, sharded_input_shape[:-1]) dim_n_val = sharded_other_shape[-1] dim_p_val = sharded_other_shape[0] fwd_compute_cost = dim_m_val * dim_n_val * dim_p_val bwd_compute_cost = fwd_compute_cost * 2 compute_cost = TrainCycleItem( fwd=bwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost ) strategy.compute_cost = compute_cost def dp_strategies(self) -> List[ShardingStrategy]: strategies = [] # S01R = S01R x RR strategies.append(self.split_lhs_1st_dim_1d(0, 1)) return strategies def tp_strategies(self) -> List[ShardingStrategy]: strategies = [] # RR = RS01 x S01R strategies.append(self.split_lhs_2nd_dim_1d(0, 1)) # RS01 = RR x RS01 strategies.append(self.split_rhs_2nd_dim_1d(0, 1)) # RS = RS x SS strategies.append(self.split_rhs_space_both_contract(0, 1)) strategies.append(self.split_rhs_space_both_contract(1, 0)) # RR= RS x SR strategies.append(self.recompute_split_both_contract(0)) strategies.append(self.recompute_split_both_contract(1)) # RS = RR x RS strategies.append(self.split_rhs_space_only(0)) strategies.append(self.split_rhs_space_only(1)) return strategies def mix_strategies(self) -> List[ShardingStrategy]: strategies = [] # SS = SR x RS strategies.append(self.split_lhs_space_rhs_space(0, 1)) strategies.append(self.split_lhs_space_rhs_space(1, 0)) # SR = SS x SR strategies.append(self.split_lhs_space_both_contract(0, 1)) strategies.append(self.split_lhs_space_both_contract(1, 0)) # RR = RR x RR strategies.append(self.non_split()) return strategies def collate_strategies(self) -> List[ShardingStrategy]: strategies = [] if self.solver_perference == SolverPerference.STANDARD: strategies.extend(self.dp_strategies()) strategies.extend(self.tp_strategies()) strategies.extend(self.mix_strategies()) elif self.solver_perference == SolverPerference.DP: strategies.extend(self.dp_strategies()) elif self.solver_perference == SolverPerference.TP: strategies.extend(self.tp_strategies()) return strategies @ignore_sharding_exception def split_lhs_space_rhs_space(self, mesh_dim_0, mesh_dim_1): # handle case SS = SR x RS name = f"S{mesh_dim_0}S{mesh_dim_1} = S{mesh_dim_0}R x RS{mesh_dim_1}" dim_partition_dict_mapping = { "input": {0: [mesh_dim_0]}, "other": {-1: [mesh_dim_1]}, "output": {0: [mesh_dim_0], -1: [mesh_dim_1]}, } # linear bias only has one dimension, but addmm bias has same dimensions # as the output logically. if self.linear_projection_type == "linear": dim_partition_dict_mapping["bias"] = {-1: [mesh_dim_1]} elif self.linear_projection_type == "addmm": dim_partition_dict_mapping["bias"] = {0: [mesh_dim_0], -1: [mesh_dim_1]} else: raise ("Unsupported linear projection type") sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # set communication action communication_action_mapping = {} input_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["input"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_1, comm_type=CommType.BEFORE, arg_index=0, ) if self.is_param("other"): other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.HOOK, ) else: other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.BEFORE, arg_index=1, ) communication_action_mapping["input"] = input_comm_action communication_action_mapping["other"] = other_comm_action # we only add allreduce comm action for linear bias, because # allreduce comm action for addmm bias will be considered in post processing if self.has_bias and self.linear_projection_type == "linear": if self.is_param("bias"): bias_comm_action = self.get_communication_action( sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.HOOK, ) else: bias_comm_action = self.get_communication_action( sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.BEFORE, key_for_kwarg="bias", ) communication_action_mapping["bias"] = bias_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_lhs_space_both_contract(self, mesh_dim_0, mesh_dim_1): # handle the case SR = SS x SR name = f"S{mesh_dim_0}R = S{mesh_dim_0}S{mesh_dim_1} x S{mesh_dim_1}R" # get sharding spec mapping dim_partition_dict_mapping = { "input": {0: [mesh_dim_0], -1: [mesh_dim_1]}, "other": {0: [mesh_dim_1]}, "bias": {}, "output": {0: [mesh_dim_0]}, } # linear bias only has one dimension, but addmm bias has same dimensions # as the output logically. if self.linear_projection_type == "linear": dim_partition_dict_mapping["bias"] = {} elif self.linear_projection_type == "addmm": dim_partition_dict_mapping["bias"] = {0: [mesh_dim_0]} else: raise ("Unsupported linear projection type") sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # get communication action mapping communication_action_mapping = {} output_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["output"], communication_pattern=CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD, logical_process_axis=mesh_dim_1, comm_type=CommType.AFTER, ) if self.is_param("other"): other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.HOOK, ) else: other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.BEFORE, arg_index=1, ) communication_action_mapping["other"] = other_comm_action communication_action_mapping["output"] = output_comm_action # we only add allreduce comm action for linear bias, because # allreduce comm action for addmm bias will be considered in post processing if self.has_bias and self.linear_projection_type == "linear": if self.is_param("bias"): bias_comm_action = self.get_communication_action( sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.HOOK, ) else: bias_comm_action = self.get_communication_action( sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.BEFORE, key_for_kwarg="bias", ) communication_action_mapping["bias"] = bias_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_rhs_space_both_contract(self, mesh_dim_0, mesh_dim_1): name = f"RS{mesh_dim_1} = RS{mesh_dim_0} x S{mesh_dim_0}S{mesh_dim_1}" # get sharding specs dim_partition_dict_mapping = { "input": {-1: [mesh_dim_0]}, "other": {0: [mesh_dim_0], -1: [mesh_dim_1]}, "bias": {-1: [mesh_dim_1]}, "output": {-1: [mesh_dim_1]}, } # We don't have to do anything special for bias here, because # the bias is already the same sharding spec as the output. sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # get communication actions communication_action_mapping = {} output_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["output"], communication_pattern=CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.AFTER, ) input_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["input"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_1, comm_type=CommType.BEFORE, arg_index=0, ) communication_action_mapping["input"] = input_comm_action communication_action_mapping["output"] = output_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def recompute_split_both_contract(self, mesh_dim): name = f"RR = RS{mesh_dim} x S{mesh_dim}R" # get sharding spec dim_partition_dict_mapping = { "input": {-1: [mesh_dim]}, "other": {0: [mesh_dim]}, "bias": {}, "output": {}, } # We don't have to do anything special for bias here, because # the bias is already the same sharding spec as the output. sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # get communication action communication_action_mapping = {} output_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["output"], communication_pattern=CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD, logical_process_axis=mesh_dim, comm_type=CommType.AFTER, ) communication_action_mapping["output"] = output_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_rhs_space_only(self, mesh_dim): name = f"RS{mesh_dim} = RR x RS{mesh_dim}" # get sharding spec dim_partition_dict_mapping = { "input": {}, "other": {-1: [mesh_dim]}, "bias": {-1: [mesh_dim]}, "output": {-1: [mesh_dim]}, } # We don't have to do anything special for bias here, because # the bias is already the same sharding spec as the output. sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # get communication actions communication_action_mapping = {} input_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["input"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim, comm_type=CommType.BEFORE, arg_index=0, ) communication_action_mapping["input"] = input_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_lhs_1st_dim_1d(self, mesh_dim_0, mesh_dim_1): name = f"S{mesh_dim_0}{mesh_dim_1}R = S{mesh_dim_0}{mesh_dim_1}R x RR" # get sharding spec dim_partition_dict_mapping = { "input": {0: [mesh_dim_0, mesh_dim_1]}, "other": {}, "bias": {}, "output": {0: [mesh_dim_0, mesh_dim_1]}, } # linear bias only has one dimension, but addmm bias has same dimensions # as the output logically. if self.linear_projection_type == "linear": dim_partition_dict_mapping["bias"] = {} elif self.linear_projection_type == "addmm": dim_partition_dict_mapping["bias"] = {0: [mesh_dim_0, mesh_dim_1]} else: raise ("Unsupported linear projection type") sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # get communication action communication_action_mapping = {} if self.is_param("other"): other_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.HOOK, ) else: other_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.BEFORE, arg_index=1, ) communication_action_mapping["other"] = other_comm_action # we only add allreduce comm action for linear bias, because # allreduce comm action for addmm bias will be considered in post processing if self.has_bias and self.linear_projection_type == "linear": if self.is_param("bias"): bias_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.HOOK, ) else: bias_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.BEFORE, key_for_kwarg="bias", ) communication_action_mapping["bias"] = bias_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_lhs_2nd_dim_1d(self, mesh_dim_0, mesh_dim_1): name = f"RR = RS{mesh_dim_0}{mesh_dim_1} x S{mesh_dim_0}{mesh_dim_1}R" # get sharding spec dim_partition_dict_mapping = { "input": {-1: [mesh_dim_0, mesh_dim_1]}, "other": {0: [mesh_dim_0, mesh_dim_1]}, "bias": {}, "output": {}, } # We don't have to do anything special for bias here, because # the bias is already the same sharding spec as the output. sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # get communication action communication_action_mapping = {} output_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["output"], communication_pattern=CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.AFTER, ) communication_action_mapping["output"] = output_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_rhs_2nd_dim_1d(self, mesh_dim_0, mesh_dim_1): name = f"RS{mesh_dim_0}{mesh_dim_1} = RR x RS{mesh_dim_0}{mesh_dim_1}" # get sharding spec dim_partition_dict_mapping = { "input": {}, "other": {-1: [mesh_dim_0, mesh_dim_1]}, "bias": {-1: [mesh_dim_0, mesh_dim_1]}, "output": {-1: [mesh_dim_0, mesh_dim_1]}, } # We don't have to do anything special for bias here, because # the bias is already the same sharding spec as the output. sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # get communication action communication_action_mapping = {} input_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["input"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.BEFORE, arg_index=0, ) communication_action_mapping["input"] = input_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def non_split(self): name = f"RR = RR x RR" # get sharding spec dim_partition_dict_mapping = { "input": {}, "other": {}, "bias": {}, "output": {}, } # We don't have to do anything special for bias here, because # the bias is already the same sharding spec as the output. sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # get communication action communication_action_mapping = {} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) def validate(self) -> bool: assert "input" in self.op_data assert "other" in self.op_data # make sure the other has 2 dim input_data = self.op_data["input"] other_data = self.op_data["other"] assert input_data.data.dim() > 0 and other_data.data.dim() == 2 assert other_data.logical_shape[0] == input_data.logical_shape[-1] if self.has_bias: bias_data = self.op_data["bias"] assert bias_data.logical_shape[-1] == other_data.logical_shape[-1] class BatchedMatMulStrategyGenerator(MatMulStrategyGenerator): """ Generate sharding strategies for the batched matrix multiplication. A batched matrix multiplication can be viewed as [b, i, k] x [b, k, j] -> [b, i, j] The bias term is considered to have a 2D logical shape. Note: This class will be used to generate strategies for torch.bmm and torch.addbmm. However, the result of torch.addbmm is not correct, some extra runtime apply actions are required to keep numerical correctness. """ # TODO: torch.addbmm correctness issue need to be fixed. def __init__(self, *args, **kwargs): self.squeeze_batch_dim = False super().__init__(*args, **kwargs) def _pop_batch_dim_sharding_for_output(self, dim_partition_dict): # remove partition dict for dim 0 dim_partition_dict["output"].pop(0, None) # decrease the remaining dim index by 1 temp_dim_partition = {} keys = list(dim_partition_dict["output"].keys()) for key in keys: val = dim_partition_dict["output"].pop(key) temp_dim_partition[key - 1] = val dim_partition_dict["output"].update(temp_dim_partition) def validate(self) -> bool: input_op_data = self.op_data["input"] other_op_data = self.op_data["other"] assert len(input_op_data.logical_shape) == 3 or len(other_op_data.logical_shape) == 3 if "bias" in self.op_data: bias_op_data = self.op_data["bias"] assert bias_op_data.data.dim() < 3 and len(bias_op_data.logical_shape) == 2 def update_compute_cost(self, strategy: ShardingStrategy) -> ShardingStrategy: fwd_compute_cost = self.op_data["input"].data.shape[-1] * reduce( operator.mul, self.op_data["output"].data.shape ) bwd_compute_cost = fwd_compute_cost * 2 compute_cost = TrainCycleItem( fwd=fwd_compute_cost, bwd=bwd_compute_cost, total=fwd_compute_cost + bwd_compute_cost ) strategy.compute_cost = compute_cost @ignore_sharding_exception def split_one_batch_dim(self, mesh_dim): name = f"Sb{mesh_dim} = Sb{mesh_dim} x Sb{mesh_dim}" # get sharding_spec dim_partition_dict = {"input": {0: [mesh_dim]}, "other": {0: [mesh_dim]}, "bias": {}, "output": {0: [mesh_dim]}} if self.squeeze_batch_dim: self._pop_batch_dim_sharding_for_output(dim_partition_dict) sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict) # get communication actions communication_action_mapping = {} if self.has_bias: bias_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["bias"],
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/output_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/output_generator.py
from typing import Dict, List from torch.fx import Node from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( MemoryCost, OperationData, ShardingStrategy, TrainCycleItem, ) from colossalai.device.device_mesh import DeviceMesh from .strategy_generator import OutputStrategyGenerator __all__ = ["OutputGenerator"] class OutputGenerator(OutputStrategyGenerator): """ OutputGenerator is a generic class to generate strategies for Output Node. """ def __init__( self, operation_data_mapping: Dict[str, OperationData], device_mesh: DeviceMesh, predecessor_nodes: List[Node], output_option: str, ): super().__init__(operation_data_mapping, device_mesh, predecessor_nodes) self.output_option = output_option def validate(self) -> bool: return super().validate() def update_compute_cost(self, strategy: ShardingStrategy): compute_cost = TrainCycleItem(fwd=10, bwd=10, total=20) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy): """ Compute the memory cost per device with this specific strategy. """ fwd_mem_cost = MemoryCost(activation=0, parameter=0) bwd_mem_cost = MemoryCost(activation=0, parameter=0) # compute total cost total_mem_cost = MemoryCost(activation=0, parameter=0) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost def replica_strategy(self) -> List[ShardingStrategy]: """ Generate replica strategy for output node. """ dim_partition_dict_mapping = {} dim_partition_dict_for_output = [] for index, _ in enumerate(self.predecessor_nodes): mapping_name = f"input_{index}" if isinstance(self.op_data[mapping_name].data, (tuple, list)): dim_partition_dict_for_input = [{} for _ in range(len(self.op_data[mapping_name].data))] else: dim_partition_dict_for_input = {} dim_partition_dict_mapping[mapping_name] = dim_partition_dict_for_input dim_partition_dict_for_output.append(dim_partition_dict_for_input) if len(dim_partition_dict_for_output) == 1: dim_partition_dict_for_output = dim_partition_dict_for_output[0] else: dim_partition_dict_for_output = tuple(dim_partition_dict_for_output) dim_partition_dict_mapping["output"] = dim_partition_dict_for_output communication_action_mapping = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) name = "Replica Output" strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) return strategy def distributed_strategy(self, mesh_list: List[List[int]] = None) -> List[ShardingStrategy]: """ Generate distributed strategy for output node. """ # TODO: need to take care of the case when the first element of output only need to be sharded. output_op_data = self.op_data["output"] if isinstance(output_op_data.data, tuple): length = len(output_op_data.data) dim_partition_dict_mapping = { "output": [{0: mesh_list}] * length, } else: dim_partition_dict_mapping = { "output": {0: mesh_list}, } for index, _ in enumerate(self.predecessor_nodes): mapping_name = f"input_{index}" dim_partition_dict_mapping[mapping_name] = {0: mesh_list} communication_action_mapping = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) name = "Distributed Output" strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) return strategy def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] mesh_list = [0, 1] if self.output_option == "replicated": strategy_list.append(self.replica_strategy()) elif self.output_option == "distributed": strategy_list.append(self.distributed_strategy(mesh_list)) return strategy_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/batch_norm_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/batch_norm_generator.py
import copy import operator from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommType, MemoryCost, ShardingStrategy, TrainCycleItem, ) from colossalai.auto_parallel.tensor_shard.utils import ignore_sharding_exception from colossalai.tensor.shape_consistency import CollectiveCommPattern from .strategy_generator import StrategyGenerator __all__ = ["BatchNormStrategyGenerator"] class BatchNormStrategyGenerator(StrategyGenerator): """ A StrategyGenerator which deals with the sharding strategies of batch normalization. To keep the math consistency, there are two way to do BatchNorm if the input shards on batch dimension: 1. We gather the input partitions through batch dimension, then do the normal BatchNorm. 2. We do the SyncBatchNorm on the each input partition separately, the SyncBN op will help us to keep the computing correctness. In this generator, both methods will be considered. """ def validate(self) -> bool: """ In sanity check, we need make sure the input data having correct dimension size. For BatchNorm1d, the dim of input data should be 3([N, C, L]). For BatchNorm2d, the dim of input data should be 4([N, C, H, W]). For BatchNorm3d, the dim of input data should be 5([N, C, H, W, D]). """ input_op_data = self.op_data["input"] assert input_op_data.data.dim() in ( 3, 4, 5, ), f"We suppose the dim of input fed into conv op should in range of [3, 5]." def update_compute_cost(self, strategy: ShardingStrategy): """ Compute the computation cost per device with this specific strategy. Note: compute_cost need to be divided by TFLOPS, now it just shows the computation size. """ # TODO: a constant coefficient need to be added. # 1D: (L) * N * Cin # 2D: (H * W) * N * Cin # 3D: (H * W * D) * N * Cin sharded_input_shape = strategy.sharding_specs[self.op_data["input"]].get_sharded_shape_per_device() sharded_output_shape = strategy.sharding_specs[self.op_data["output"]].get_sharded_shape_per_device() if self.has_bias: # bias add is an element wise operation, so the cost is equal to product of output shape. bias_compute_cost = reduce(operator.mul, sharded_output_shape) input_product = reduce(operator.mul, sharded_input_shape, 1) forward_compute_cost = input_product backward_activation_compute_cost = input_product backward_weight_compute_cost = input_product backward_compute_cost = backward_weight_compute_cost + backward_activation_compute_cost if self.has_bias: forward_compute_cost += bias_compute_cost backward_compute_cost += bias_compute_cost total_compute_cost = forward_compute_cost + backward_compute_cost compute_cost = TrainCycleItem(fwd=forward_compute_cost, bwd=backward_compute_cost, total=total_compute_cost) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy): forward_size_mapping = { "input": self._compute_size_in_bytes(strategy, "input"), "other": self._compute_size_in_bytes(strategy, "other"), "output": self._compute_size_in_bytes(strategy, "output"), "running_mean": self._compute_size_in_bytes(strategy, "running_mean"), "running_var": self._compute_size_in_bytes(strategy, "running_var"), } if self.has_bias: bias_size = self._compute_size_in_bytes(strategy, "bias") forward_size_mapping["bias"] = bias_size backward_size_mapping = copy.deepcopy(forward_size_mapping) backward_size_mapping.pop("output") # compute fwd cost incurred # fwd_cost = input + other + bias + output fwd_activation_cost = sum( [v for k, v in forward_size_mapping.items() if not self.is_param(k) and not self.is_buffer(k)] ) fwd_parameter_cost = sum([v for k, v in forward_size_mapping.items() if self.is_param(k)]) fwd_buffer_cost = sum([v for k, v in forward_size_mapping.items() if self.is_buffer(k)]) fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=fwd_parameter_cost, buffer=fwd_buffer_cost) # compute bwd cost incurred # bwd_cost = input_grad + other_grad + bias_grad bwd_activation_cost = sum( [v for k, v in backward_size_mapping.items() if not self.is_param(k) and not self.is_buffer(k)] ) bwd_parameter_cost = sum([v for k, v in backward_size_mapping.items() if self.is_param(k)]) bwd_mem_cost = MemoryCost(activation=bwd_activation_cost, parameter=bwd_parameter_cost) # compute total cost total_mem_cost = MemoryCost( activation=fwd_activation_cost + bwd_activation_cost, parameter=fwd_parameter_cost + bwd_parameter_cost, buffer=fwd_buffer_cost, ) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost @ignore_sharding_exception def split_input_channel(self, mesh_dim_0): name = f"RS{mesh_dim_0} = RS{mesh_dim_0} x S{mesh_dim_0}" dim_partition_dict_mapping = { "input": {1: [mesh_dim_0]}, "other": {0: [mesh_dim_0]}, "output": {1: [mesh_dim_0]}, "running_mean": {0: [mesh_dim_0]}, "running_var": {0: [mesh_dim_0]}, "num_batches_tracked": {}, } if self.has_bias: dim_partition_dict_mapping["bias"] = {0: [mesh_dim_0]} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) communication_action_mapping = {} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_input_channel_1d(self, mesh_dim_0, mesh_dim_1): name = f"RS{mesh_dim_0}{mesh_dim_1} = RS{mesh_dim_0}{mesh_dim_1} x S{mesh_dim_0}{mesh_dim_1}" dim_partition_dict_mapping = { "input": {1: [mesh_dim_0, mesh_dim_1]}, "other": {0: [mesh_dim_0, mesh_dim_1]}, "output": {1: [mesh_dim_0, mesh_dim_1]}, "running_mean": {0: [mesh_dim_0, mesh_dim_1]}, "running_var": {0: [mesh_dim_0, mesh_dim_1]}, "num_batches_tracked": {}, } if self.has_bias: dim_partition_dict_mapping["bias"] = {0: [mesh_dim_0, mesh_dim_1]} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) communication_action_mapping = {} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def non_split(self): name = f"RR = RR x R" dim_partition_dict_mapping = { "input": {}, "other": {}, "output": {}, "running_mean": {}, "running_var": {}, "num_batches_tracked": {}, } if self.has_bias: dim_partition_dict_mapping["bias"] = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) communication_action_mapping = {} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_input_batch(self, mesh_dim_0): name = f"S{mesh_dim_0}R = S{mesh_dim_0}R x R WITH SYNC_BN" dim_partition_dict_mapping = { "input": {0: [mesh_dim_0]}, "other": {}, "output": {0: [mesh_dim_0]}, "running_mean": {}, "running_var": {}, "num_batches_tracked": {}, } if self.has_bias: dim_partition_dict_mapping["bias"] = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # set communication action # For SyncBN case, we don't need to do communication for weight and bias. # TODO: the communication happens internally at SyncBN operation. We need to replace the BN operation # to SyncBN operation instead of inserting a communication node. output_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["output"], communication_pattern=CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.IMPLICIT, ) # TODO: Temporary solution has no communication cost, # above action should be added after the SyncBN replace pass completed. communication_action_mapping = {} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_input_batch_1d(self, mesh_dim_0, mesh_dim_1): name = f"S{mesh_dim_0}{mesh_dim_1}R = S{mesh_dim_0}{mesh_dim_1}R x R WITH SYNC_BN" dim_partition_dict_mapping = { "input": {0: [mesh_dim_0, mesh_dim_1]}, "other": {}, "output": {0: [mesh_dim_0, mesh_dim_1]}, "running_mean": {}, "running_var": {}, "num_batches_tracked": {}, } if self.has_bias: dim_partition_dict_mapping["bias"] = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # set communication action # For SyncBN case, we don't need to do communication for gradients of weight and bias. # TODO: the communication happens internally at SyncBN operation. We need to replace the BN operation # to SyncBN operation instead of inserting a communication node. output_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["output"], communication_pattern=CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.IMPLICIT, ) # TODO: Temporary solution has no communication cost, # above action should be added after the SyncBN replace pass completed. communication_action_mapping = {} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_input_both_dim(self, mesh_dim_0, mesh_dim_1): name = f"S{mesh_dim_0}S{mesh_dim_1} = S{mesh_dim_0}S{mesh_dim_1} x S{mesh_dim_1} WITH SYNC_BN" dim_partition_dict_mapping = { "input": { 0: [mesh_dim_0], 1: [mesh_dim_1], }, "other": { 0: [mesh_dim_1], }, "output": { 0: [mesh_dim_0], 1: [mesh_dim_1], }, "running_mean": { 0: [mesh_dim_1], }, "running_var": { 0: [mesh_dim_1], }, "num_batches_tracked": {}, } if self.has_bias: dim_partition_dict_mapping["bias"] = { 0: [mesh_dim_1], } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # set communication action # For SyncBN case, we don't need to do communication for gradients of weight and bias. # TODO: the communication happens internally at SyncBN operation. We need to replace the BN operation # to SyncBN operation instead of inserting a communication node. output_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["output"], communication_pattern=CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD, logical_process_axis=[mesh_dim_0], comm_type=CommType.IMPLICIT, ) # TODO: Temporary solution has no communication cost, # above action should be added after the SyncBN replace pass completed. communication_action_mapping = {} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) def collate_strategies(self) -> List[ShardingStrategy]: """ Generate every possible strategies for a BatchNorm node, and record all strategies into the strategies_vector. """ strategy_list = [] # RS = RS x S strategy_list.append(self.split_input_channel(0)) strategy_list.append(self.split_input_channel(1)) # RR = RR x R strategy_list.append(self.non_split()) # RS01 = RS01 x S01 strategy_list.append(self.split_input_channel_1d(0, 1)) # The strategies with SYNC_BN are temporarily commented, # because it requires some additional passes to keep runtime # computation correctness. # TODO: The strategies below should be uncommented after runtime # passes ready. # SR = SR x R WITH SYNC_BN strategy_list.append(self.split_input_batch(0)) strategy_list.append(self.split_input_batch(1)) # SS = SS x S WITH SYNC_BN strategy_list.append(self.split_input_both_dim(0, 1)) strategy_list.append(self.split_input_both_dim(1, 0)) # S01R = S01R x R WITH SYNC_BN strategy_list.append(self.split_input_batch_1d(0, 1)) return strategy_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/sum_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/sum_generator.py
import copy import operator from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.node_handler.strategy.strategy_generator import FollowingStrategyGenerator from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, ShardingStrategy, TrainCycleItem __all__ = ["SumGenerator"] class SumGenerator(FollowingStrategyGenerator): """ SumGenerator deals with the sharding strategies of torch.sum op. """ def validate(self) -> bool: return super().validate() def update_compute_cost(self, strategy: ShardingStrategy): sharded_input_shape = strategy.sharding_specs[self.op_data["input"]].get_sharded_shape_per_device() sharded_output_shape = strategy.sharding_specs[self.op_data["output"]].get_sharded_shape_per_device() input_size_product = reduce(operator.mul, sharded_input_shape) output_size_product = reduce(operator.mul, sharded_output_shape) compute_cost = TrainCycleItem( fwd=input_size_product, bwd=output_size_product, total=input_size_product + output_size_product ) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy): """ Compute the memory cost per device with this specific strategy. """ forward_size_mapping = { "input": self._compute_size_in_bytes(strategy, "input"), "output": self._compute_size_in_bytes(strategy, "output"), } backward_size_mapping = copy.deepcopy(forward_size_mapping) backward_size_mapping.pop("output") # compute fwd cost incurred # fwd_cost = input + output fwd_activation_cost = sum([v for k, v in forward_size_mapping.items() if not self.is_param(k)]) fwd_parameter_cost = sum([v for k, v in forward_size_mapping.items() if self.is_param(k)]) fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=fwd_parameter_cost) # compute bwd cost incurred # bwd_cost = input_grad bwd_activation_cost = sum([v for k, v in backward_size_mapping.items() if not self.is_param(k)]) bwd_parameter_cost = sum([v for k, v in backward_size_mapping.items() if self.is_param(k)]) bwd_mem_cost = MemoryCost(activation=bwd_activation_cost, parameter=bwd_parameter_cost) # compute total cost total_mem_cost = MemoryCost( activation=fwd_activation_cost + bwd_activation_cost, parameter=fwd_parameter_cost + bwd_parameter_cost ) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] for index, strategy in enumerate(self.predecessor_node.strategies_vector): dim_partition_dict_mapping = {} communication_action_mapping = {} input_sharding_spec = strategy.output_sharding_specs[self.op_data["input"]] dim_partition_dict_for_input = copy.deepcopy(input_sharding_spec.dim_partition_dict) sum_dims, sum_mapping_dict = self.op_data["sum_info"].data # TODO: a better way to handle the distributed sum is sum all the data on chip and then do all reduce # among all the shard groups recover_dims = [] dim_partition_dict_for_output = {} for dim in dim_partition_dict_for_input: if dim in sum_dims: recover_dims.append(dim) elif dim in sum_mapping_dict: dim_partition_dict_for_output[sum_mapping_dict[dim]] = dim_partition_dict_for_input[dim] else: raise RuntimeError(f"dim {dim} is not in sum_mapping_dict or sum_dims") for dim in recover_dims: dim_partition_dict_for_input.pop(dim) dim_partition_dict_mapping = { "input": dim_partition_dict_for_input, "output": dim_partition_dict_for_output, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # add index into name to pass the duplicated check # we keep same strategies with different name for node merging, and it will not increase the searching space, # because in solver, this node will be merged into other nodes, and solver will not create a new variable for this node. name = f'{sharding_spec_mapping["input"].sharding_sequence} -> {sharding_spec_mapping["output"].sharding_sequence}_{index}' strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) strategy_list.append(strategy) return strategy_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/getitem_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/getitem_generator.py
import copy from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, ShardingStrategy, TrainCycleItem from colossalai.logging import get_dist_logger from colossalai.tensor.sharding_spec import ShardingSpecException from .strategy_generator import FollowingStrategyGenerator __all__ = ["GetItemStrategyGenerator", "TensorStrategyGenerator", "TensorTupleStrategyGenerator"] class GetItemStrategyGenerator(FollowingStrategyGenerator): """ GetItemStrategyGenerator is a generic class to generate strategies for operator.getitem. The operation data is defined as `output = input[other]`. There are mainly three use cases: 1. args_0._meta_data: torch.Tensor, args_1._meta_data: int 2. args_0._meta_data: torch.Tensor, args_1._meta_data: slice 3. args_0._meta_data: Tuple[torch.Tensor], args_1._meta_data: int """ def validate(self) -> bool: return super().validate() def update_compute_cost(self, strategy: ShardingStrategy): compute_cost = TrainCycleItem(fwd=10, bwd=10, total=20) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy): """ Compute the memory cost per device with this specific strategy. """ forward_size_mapping = { "input": self._compute_size_in_bytes(strategy, "input"), "output": self._compute_size_in_bytes(strategy, "output"), } backward_size_mapping = copy.deepcopy(forward_size_mapping) backward_size_mapping.pop("output") # compute fwd cost incurred # fwd_cost = input + output fwd_activation_cost = sum([v for k, v in forward_size_mapping.items() if not self.is_param(k)]) fwd_parameter_cost = sum([v for k, v in forward_size_mapping.items() if self.is_param(k)]) fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=fwd_parameter_cost) # compute bwd cost incurred # bwd_cost = input_grad bwd_activation_cost = sum([v for k, v in backward_size_mapping.items() if not self.is_param(k)]) bwd_parameter_cost = sum([v for k, v in backward_size_mapping.items() if self.is_param(k)]) bwd_mem_cost = MemoryCost(activation=bwd_activation_cost, parameter=bwd_parameter_cost) # compute total cost total_mem_cost = MemoryCost( activation=fwd_activation_cost + bwd_activation_cost, parameter=fwd_parameter_cost + bwd_parameter_cost ) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost class TensorStrategyGenerator(GetItemStrategyGenerator): """ Deal with case 1 and 2. """ def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] getitem_index = self.op_data["index"].data for index, strategy in enumerate(self.predecessor_node.strategies_vector): try: logger = get_dist_logger() dim_partition_dict_mapping = {} communication_action_mapping = {} dim_partition_dict_for_input = copy.deepcopy( strategy.output_sharding_specs[self.op_data["input"]].dim_partition_dict ) int_index = False if isinstance(getitem_index, int): int_index = True getitem_dims = [ 0, ] shift_length = 1 elif isinstance(getitem_index, slice): getitem_dims = [ 0, ] else: getitem_dims = [i for i in range(len(getitem_index))] if isinstance(getitem_index[0], int): int_index = True shift_length = len(getitem_index) gather_dims = [] for dim in getitem_dims: if dim in dim_partition_dict_for_input: gather_dims.append(dim) for dim in gather_dims: dim_partition_dict_for_input.pop(dim) dim_partition_dict_for_output = copy.deepcopy(dim_partition_dict_for_input) if int_index: shift_dim_partition_dict_for_output = {} for dim, mesh_dim_list in dim_partition_dict_for_output.items(): shift_dim_partition_dict_for_output[dim - shift_length] = mesh_dim_list dim_partition_dict_for_output = shift_dim_partition_dict_for_output dim_partition_dict_mapping = { "input": dim_partition_dict_for_input, "output": dim_partition_dict_for_output, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) name = f'{sharding_spec_mapping["output"].sharding_sequence} = {sharding_spec_mapping["input"].sharding_sequence}_{index}' strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) except ShardingSpecException as e: logger.debug(e) continue strategy_list.append(strategy) for strategy in strategy_list: self.update_communication_cost(strategy) self.update_compute_cost(strategy) self.update_memory_cost(strategy) return strategy_list class TensorTupleStrategyGenerator(GetItemStrategyGenerator): """ Deal with case 3. """ def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] index = self.op_data["index"].data for strategy_index, strategy in enumerate(self.predecessor_node.strategies_vector): # the sharding spec for input in this case is a tuple of ShardingSpec. sharding_spec_for_input = strategy.output_sharding_specs[self.op_data["input"]] dim_partition_dict_for_output = sharding_spec_for_input[index].dim_partition_dict dim_partition_dict_mapping = {} communication_action_mapping = {} dim_partition_dict_mapping = { "output": dim_partition_dict_for_output, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) sharding_spec_mapping["input"] = sharding_spec_for_input input_sharding_info = f"get the {index} element from (" for sharding_spec in sharding_spec_for_input: input_sharding_info += f"{sharding_spec.sharding_sequence}, " input_sharding_info += ")" name = f'{sharding_spec_mapping["output"].sharding_sequence} = {input_sharding_info}_{strategy_index}' strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) strategy_list.append(strategy) return strategy_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/unary_elementwise_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/unary_elementwise_generator.py
import copy from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, ShardingStrategy, TrainCycleItem from .strategy_generator import FollowingStrategyGenerator __all__ = ["UnaryElementwiseGenerator"] class UnaryElementwiseGenerator(FollowingStrategyGenerator): """ UnaryElementwiseGenerator which deals with the sharding strategies of UnaryElementwiseOp. """ def validate(self) -> bool: return super().validate() def update_compute_cost(self, strategy: ShardingStrategy): compute_cost = TrainCycleItem(fwd=10, bwd=10, total=20) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy): """ Compute the memory cost per device with this specific strategy. """ forward_size_mapping = { "input": self._compute_size_in_bytes(strategy, "input"), "output": self._compute_size_in_bytes(strategy, "output"), } backward_size_mapping = copy.deepcopy(forward_size_mapping) backward_size_mapping.pop("output") # compute fwd cost incurred # fwd_cost = input + output fwd_activation_cost = sum([v for k, v in forward_size_mapping.items() if not self.is_param(k)]) fwd_parameter_cost = sum([v for k, v in forward_size_mapping.items() if self.is_param(k)]) fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=fwd_parameter_cost) # compute bwd cost incurred # bwd_cost = input_grad bwd_activation_cost = sum([v for k, v in backward_size_mapping.items() if not self.is_param(k)]) bwd_parameter_cost = sum([v for k, v in backward_size_mapping.items() if self.is_param(k)]) bwd_mem_cost = MemoryCost(activation=bwd_activation_cost, parameter=bwd_parameter_cost) # compute total cost total_mem_cost = MemoryCost( activation=fwd_activation_cost + bwd_activation_cost, parameter=fwd_parameter_cost + bwd_parameter_cost ) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] # For element-wise function, we keep the sharding spec of output node same as # the input. Therefore, the different strategies of input node with same # output sharding spec will generate same strategy for element-wise function. for index, strategy in enumerate(self.predecessor_node.strategies_vector): dim_partition_dict_mapping = {} communication_action_mapping = {} input_sharding_spec = strategy.output_sharding_specs[self.op_data["input"]] dim_partition_dict_for_input = input_sharding_spec.dim_partition_dict dim_partition_dict_for_output = copy.deepcopy(dim_partition_dict_for_input) dim_partition_dict_mapping = { "input": dim_partition_dict_for_input, "output": dim_partition_dict_for_output, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # add index into name to pass the duplicated check # we keep same strategies with different name for node merging, and it will not increase the searching space, # because in solver, this node will be merged into other nodes, and solver will not create a new variable for this node. name = f'{sharding_spec_mapping["input"].sharding_sequence} -> {sharding_spec_mapping["output"].sharding_sequence}_{index}' strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) strategy_list.append(strategy) return strategy_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/conv_strategy_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/conv_strategy_generator.py
import copy import operator from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommType, MemoryCost, ShardingStrategy, TrainCycleItem, ) from colossalai.auto_parallel.tensor_shard.utils import ignore_sharding_exception from colossalai.tensor.shape_consistency import CollectiveCommPattern from .strategy_generator import StrategyGenerator class ConvStrategyGenerator(StrategyGenerator): """ ConvStrategyGenerator is a generic class to generate strategies. The operation data is defined as `output = input x other + bias`. """ def validate(self) -> bool: """ In sanity check, we need make sure the input data having correct dimension size. For Conv1d, the dim of input data should be 3([N, C, L]). For Conv2d, the dim of input data should be 4([N, C, H, W]). For Conv3d, the dim of input data should be 5([N, C, H, W, D]). """ input_op_data = self.op_data["input"] assert input_op_data.data.dim() in ( 3, 4, 5, ), f"We suppose the dim of input fed into conv op should in range of [3, 5]." def update_compute_cost(self, strategy: ShardingStrategy): """ Compute the computation cost per device with this specific strategy. Note: compute_cost need to be divided by TFLOPS, now it just shows the computation size. """ # TODO: compute_cost need to be divided by TFLOPS, now it just shows the computation size. # 1D: (L) * N * Cout * Cin * kernel # 2D: (H * W) * N * Cout * Cin * kernel # 3D: (H * W * D) * N * Cout * Cin * kernel sharded_input_shape = strategy.sharding_specs[self.op_data["input"]].get_sharded_shape_per_device() sharded_other_shape = strategy.sharding_specs[self.op_data["other"]].get_sharded_shape_per_device() sharded_output_shape = strategy.sharding_specs[self.op_data["output"]].get_sharded_shape_per_device() if self.has_bias: # bias add is an element wise operation, so the cost is equal to product of output shape. bias_compute_cost = reduce(operator.mul, sharded_output_shape) output_size = sharded_output_shape[2:] output_size_product = reduce(operator.mul, output_size) input_size = sharded_input_shape[2:] input_size_product = reduce(operator.mul, input_size, 1) kernel_size = sharded_other_shape[2:] kernel_size_product = reduce(operator.mul, kernel_size, 1) batch_size = sharded_input_shape[0] channel_in = sharded_input_shape[1] channel_out = sharded_other_shape[1] forward_compute_cost = output_size_product * batch_size * channel_in * channel_out * kernel_size_product backward_activation_cost = input_size_product * batch_size * channel_in * channel_out * kernel_size_product backward_weight_cost = output_size_product * batch_size * channel_in * channel_out * kernel_size_product backward_compute_cost = backward_weight_cost + backward_activation_cost if self.has_bias: forward_compute_cost += bias_compute_cost backward_compute_cost += bias_compute_cost total_compute_cost = forward_compute_cost + backward_compute_cost compute_cost = TrainCycleItem(fwd=forward_compute_cost, bwd=backward_compute_cost, total=total_compute_cost) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy): forward_size_mapping = { "input": self._compute_size_in_bytes(strategy, "input"), "other": self._compute_size_in_bytes(strategy, "other"), "output": self._compute_size_in_bytes(strategy, "output"), } if self.has_bias: bias_size = self._compute_size_in_bytes(strategy, "bias") forward_size_mapping["bias"] = bias_size backward_size_mapping = copy.deepcopy(forward_size_mapping) backward_size_mapping.pop("output") # compute fwd cost incurred # fwd_cost = input + other + bias + output fwd_activation_cost = sum([v for k, v in forward_size_mapping.items() if not self.is_param(k)]) fwd_parameter_cost = sum([v for k, v in forward_size_mapping.items() if self.is_param(k)]) fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=fwd_parameter_cost) # compute bwd cost incurred # bwd_cost = input_grad + other_grad + bias_grad bwd_activation_cost = sum([v for k, v in backward_size_mapping.items() if not self.is_param(k)]) bwd_parameter_cost = sum([v for k, v in backward_size_mapping.items() if self.is_param(k)]) bwd_mem_cost = MemoryCost(activation=bwd_activation_cost, parameter=bwd_parameter_cost) # compute total cost total_mem_cost = MemoryCost( activation=fwd_activation_cost + bwd_activation_cost, parameter=fwd_parameter_cost + bwd_parameter_cost ) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost @ignore_sharding_exception def split_input_batch_weight_out_channel(self, mesh_dim_0, mesh_dim_1): name = f"S{mesh_dim_0}S{mesh_dim_1} = S{mesh_dim_0}R x RS{mesh_dim_1}" dim_partition_dict_mapping = { "input": {0: [mesh_dim_0]}, "other": {1: [mesh_dim_1]}, "output": {0: [mesh_dim_0], 1: [mesh_dim_1]}, } if self.has_bias: dim_partition_dict_mapping["bias"] = {0: [mesh_dim_1]} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # set communication action input_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["input"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_1, comm_type=CommType.BEFORE, arg_index=0, ) communication_action_mapping = {"input": input_comm_action} if self.is_param("other"): other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.HOOK, ) else: other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.BEFORE, arg_index=1, ) communication_action_mapping["other"] = other_comm_action if self.has_bias: if self.is_param("bias"): bias_comm_action = self.get_communication_action( sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.HOOK, ) else: bias_comm_action = self.get_communication_action( sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.BEFORE, key_for_kwarg="bias", ) communication_action_mapping["bias"] = bias_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_input_batch(self, mesh_dim_0): name = f"S{mesh_dim_0}R = S{mesh_dim_0}R x RR" dim_partition_dict_mapping = { "input": {0: [mesh_dim_0]}, "other": {}, "output": { 0: [mesh_dim_0], }, } if self.has_bias: dim_partition_dict_mapping["bias"] = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) communication_action_mapping = {} if self.is_param("other"): other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.HOOK, ) else: other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.BEFORE, arg_index=1, ) communication_action_mapping["other"] = other_comm_action if self.has_bias: if self.is_param("bias"): bias_comm_action = self.get_communication_action( sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.HOOK, ) else: bias_comm_action = self.get_communication_action( sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.BEFORE, key_for_kwarg="bias", ) communication_action_mapping["bias"] = bias_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_input_both_dim_weight_in_channel(self, mesh_dim_0, mesh_dim_1): name = f"S{mesh_dim_0}R = S{mesh_dim_0}S{mesh_dim_1} x S{mesh_dim_1}R" dim_partition_dict_mapping = { "input": { 0: [mesh_dim_0], 1: [mesh_dim_1], }, "other": {0: [mesh_dim_1]}, "output": { 0: [mesh_dim_0], }, } if self.has_bias: dim_partition_dict_mapping["bias"] = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # set communication action output_comm_action = self.get_communication_action( sharding_spec_mapping["output"], communication_pattern=CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD, logical_process_axis=mesh_dim_1, comm_type=CommType.AFTER, ) communication_action_mapping = {"output": output_comm_action} if self.is_param("other"): other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.HOOK, ) else: other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.BEFORE, arg_index=1, ) communication_action_mapping["other"] = other_comm_action if self.has_bias: if self.is_param("bias"): bias_comm_action = self.get_communication_action( sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.HOOK, ) else: bias_comm_action = self.get_communication_action( sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.BEFORE, key_for_kwarg="bias", ) communication_action_mapping["bias"] = bias_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_input_in_channel_weight_both_channel(self, mesh_dim_0, mesh_dim_1): name = f"RS{mesh_dim_1} = RS{mesh_dim_0} x S{mesh_dim_0}S{mesh_dim_1}" dim_partition_dict_mapping = { "input": { 1: [mesh_dim_0], }, "other": { 0: [mesh_dim_0], 1: [mesh_dim_1], }, "output": { 1: [mesh_dim_1], }, } if self.has_bias: dim_partition_dict_mapping["bias"] = { 0: [mesh_dim_1], } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # set communication action output_comm_action = self.get_communication_action( sharding_spec_mapping["output"], communication_pattern=CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.AFTER, ) input_comm_action = self.get_communication_action( sharding_spec_mapping["input"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_1, comm_type=CommType.BEFORE, arg_index=0, ) communication_action_mapping = {"output": output_comm_action, "input": input_comm_action} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_input_in_channel_weight_in_channel(self, mesh_dim_0): name = f"RR = RS{mesh_dim_0} x S{mesh_dim_0}R" dim_partition_dict_mapping = { "input": { 1: [mesh_dim_0], }, "other": { 0: [mesh_dim_0], }, "output": {}, } if self.has_bias: dim_partition_dict_mapping["bias"] = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # set communication action output_comm_action = self.get_communication_action( sharding_spec_mapping["output"], communication_pattern=CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.AFTER, ) communication_action_mapping = {"output": output_comm_action} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_weight_out_channel(self, mesh_dim_0): name = f"RS{mesh_dim_0} = RR x RS{mesh_dim_0}" dim_partition_dict_mapping = { "input": {}, "other": { 1: [mesh_dim_0], }, "output": { 1: [mesh_dim_0], }, } if self.has_bias: dim_partition_dict_mapping["bias"] = { 0: [mesh_dim_0], } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # set communication action input_comm_action = self.get_communication_action( sharding_spec_mapping["input"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.BEFORE, arg_index=0, ) communication_action_mapping = {"input": input_comm_action} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def non_split(self): name = f"RR = RR x RR" dim_partition_dict_mapping = { "input": {}, "other": {}, "output": {}, } if self.has_bias: dim_partition_dict_mapping["bias"] = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping={} ) @ignore_sharding_exception def split_1d_parallel_on_input_batch(self, mesh_dim_0, mesh_dim_1): name = f"S{mesh_dim_0}{mesh_dim_1}R = S{mesh_dim_0}{mesh_dim_1}R x RR" dim_partition_dict_mapping = { "input": { 0: [mesh_dim_0, mesh_dim_1], }, "other": {}, "output": { 0: [mesh_dim_0, mesh_dim_1], }, } if self.has_bias: dim_partition_dict_mapping["bias"] = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) communication_action_mapping = {} if self.is_param("other"): other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.HOOK, ) else: other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.BEFORE, arg_index=1, ) communication_action_mapping["other"] = other_comm_action if self.has_bias: if self.is_param("bias"): bias_comm_action = self.get_communication_action( sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.HOOK, ) else: bias_comm_action = self.get_communication_action( sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.BEFORE, key_for_kwarg="bias", ) communication_action_mapping["bias"] = bias_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_1d_parallel_on_in_channel(self, mesh_dim_0, mesh_dim_1): name = f"RR = RS{mesh_dim_0}{mesh_dim_1} x S{mesh_dim_0}{mesh_dim_1}R" dim_partition_dict_mapping = { "input": { 1: [mesh_dim_0, mesh_dim_1], }, "other": { 0: [mesh_dim_0, mesh_dim_1], }, "output": {}, } if self.has_bias: dim_partition_dict_mapping["bias"] = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # set communication action output_comm_action = self.get_communication_action( sharding_spec_mapping["output"], communication_pattern=CollectiveCommPattern.ALLREDUCE_FWD_IDENTITY_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.AFTER, ) communication_action_mapping = {"output": output_comm_action} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_1d_parallel_on_out_channel(self, mesh_dim_0, mesh_dim_1): name = f"RS{mesh_dim_0}{mesh_dim_1} = RR x RS{mesh_dim_0}{mesh_dim_1}" dim_partition_dict_mapping = { "input": {}, "other": { 1: [mesh_dim_0, mesh_dim_1], }, "output": { 1: [mesh_dim_0, mesh_dim_1], }, } if self.has_bias: dim_partition_dict_mapping["bias"] = { 0: [mesh_dim_0, mesh_dim_1], } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # set communication action input_comm_action = self.get_communication_action( sharding_spec_mapping["input"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.BEFORE, arg_index=0, ) communication_action_mapping = {"input": input_comm_action} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) def collate_strategies(self) -> List[ShardingStrategy]: strategies = [] # SS = SR x RS strategies.append(self.split_input_batch_weight_out_channel(0, 1)) strategies.append(self.split_input_batch_weight_out_channel(1, 0)) # SR = SR x RR strategies.append(self.split_input_batch(0)) strategies.append(self.split_input_batch(1)) # SR = SS x SR strategies.append(self.split_input_both_dim_weight_in_channel(0, 1)) strategies.append(self.split_input_both_dim_weight_in_channel(1, 0)) # RS = RS x SS strategies.append(self.split_input_in_channel_weight_both_channel(0, 1)) strategies.append(self.split_input_in_channel_weight_both_channel(1, 0)) # RR = RS x SR strategies.append(self.split_input_in_channel_weight_in_channel(0)) strategies.append(self.split_input_in_channel_weight_in_channel(1)) # RS = RR x RS strategies.append(self.split_weight_out_channel(0)) strategies.append(self.split_weight_out_channel(1)) # RR= RR x RR strategies.append(self.non_split()) # S01R = S01R x RR strategies.append(self.split_1d_parallel_on_input_batch(0, 1)) # RR = RS01 x S01R strategies.append(self.split_1d_parallel_on_in_channel(0, 1)) # RS01 = RR x RS01 strategies.append(self.split_1d_parallel_on_out_channel(0, 1)) return strategies
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/layer_norm_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/layer_norm_generator.py
import copy import operator from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommType, MemoryCost, ShardingStrategy, TrainCycleItem, ) from colossalai.auto_parallel.tensor_shard.utils import ( enumerate_all_possible_1d_sharding, enumerate_all_possible_2d_sharding, ignore_sharding_exception, ) from colossalai.tensor.shape_consistency import CollectiveCommPattern from .strategy_generator import StrategyGenerator __all__ = ["LayerNormGenerator"] class LayerNormGenerator(StrategyGenerator): """ LayerNormGenerator is a generic class to generate strategies for LayerNorm operation. The operation data is defined as `output = input x other + bias`. """ def validate(self) -> bool: return super().validate() def update_compute_cost(self, strategy: ShardingStrategy): """ Compute the computation cost per device with this specific strategy. Note: compute_cost need to be divided by TFLOPS, now it just shows the computation size. """ # TODO: compute_cost need to be divided by TFLOPS, now it just shows the computation size. # TODO: a constant coefficient need to be added. sharded_input_shape = strategy.sharding_specs[self.op_data["input"]].get_sharded_shape_per_device() sharded_weight_shape = strategy.sharding_specs[self.op_data["other"]].get_sharded_shape_per_device() if self.has_bias: # bias add is an element wise operation, so the cost is equal to product of output shape. bias_compute_cost = reduce(operator.mul, sharded_weight_shape) # in LayerNorm context, batch dimensions mean all the dimensions do not join the normalization. input_batch_shape = sharded_input_shape[: -len(sharded_weight_shape)] input_batch_product = reduce(operator.mul, input_batch_shape, 1) norm_kernel_product = reduce(operator.mul, sharded_weight_shape, 1) forward_compute_cost = input_batch_product * norm_kernel_product backward_activation_compute_cost = input_batch_product * norm_kernel_product # To compute gradient of on norm kernel element requires input_batch_product times computation, so # the total cost is input_batch_product * norm_kernel_product backward_weight_compute_cost = input_batch_product * norm_kernel_product backward_compute_cost = backward_activation_compute_cost + backward_weight_compute_cost if self.has_bias: forward_compute_cost += bias_compute_cost backward_compute_cost += bias_compute_cost total_compute_cost = forward_compute_cost + backward_compute_cost compute_cost = TrainCycleItem(fwd=forward_compute_cost, bwd=backward_compute_cost, total=total_compute_cost) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy): """ Compute the memory cost per device with this specific strategy. """ forward_size_mapping = { "input": self._compute_size_in_bytes(strategy, "input"), "other": self._compute_size_in_bytes(strategy, "other"), "output": self._compute_size_in_bytes(strategy, "output"), } if self.has_bias: bias_size = self._compute_size_in_bytes(strategy, "bias") forward_size_mapping["bias"] = bias_size backward_size_mapping = copy.deepcopy(forward_size_mapping) backward_size_mapping.pop("output") # compute fwd cost incurred # fwd_cost = input + other + bias + output fwd_activation_cost = sum([v for k, v in forward_size_mapping.items() if not self.is_param(k)]) fwd_parameter_cost = sum([v for k, v in forward_size_mapping.items() if self.is_param(k)]) fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=fwd_parameter_cost) # compute bwd cost incurred # bwd_cost = input_grad + other_grad + bias_grad bwd_activation_cost = sum([v for k, v in backward_size_mapping.items() if not self.is_param(k)]) bwd_parameter_cost = sum([v for k, v in backward_size_mapping.items() if self.is_param(k)]) bwd_mem_cost = MemoryCost(activation=bwd_activation_cost, parameter=bwd_parameter_cost) # compute total cost total_mem_cost = MemoryCost( activation=fwd_activation_cost + bwd_activation_cost, parameter=fwd_parameter_cost + bwd_parameter_cost ) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost @ignore_sharding_exception def _generate_strategy_with_dim_partition(self, dim_partition): dim_partition_dict_mapping = { "input": dim_partition, "other": {}, "output": dim_partition, } if self.has_bias: dim_partition_dict_mapping["bias"] = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) name = f'{sharding_spec_mapping["output"].sharding_sequence} = {sharding_spec_mapping["input"].sharding_sequence} x {sharding_spec_mapping["other"].sharding_sequence}' total_mesh_dim_list = [] for mesh_dim_list in dim_partition.values(): total_mesh_dim_list.extend(mesh_dim_list) # if there is only one sharding dimension, we should use the value instead of list as logical_process_axis. if len(total_mesh_dim_list) == 1: total_mesh_dim_list = total_mesh_dim_list[0] communication_action_mapping = {} other_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=total_mesh_dim_list, comm_type=CommType.HOOK, ) communication_action_mapping["other"] = other_comm_action if self.has_bias: bias_comm_action = self.get_communication_action( sharding_spec=sharding_spec_mapping["bias"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=total_mesh_dim_list, comm_type=CommType.HOOK, ) communication_action_mapping["bias"] = bias_comm_action strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) return strategy def split_input_batch_single_mesh_dim(self, mesh_dim_0, batch_dimension_length): strategy_list = [] dim_partition_list = enumerate_all_possible_1d_sharding(mesh_dim_0, batch_dimension_length) for dim_partition in dim_partition_list: strategy = self._generate_strategy_with_dim_partition(dim_partition) strategy_list.append(strategy) return strategy_list def split_input_batch_both_mesh_dim(self, mesh_dim_0, mesh_dim_1, batch_dimension_length): strategy_list = [] dim_partition_list = enumerate_all_possible_2d_sharding(mesh_dim_0, mesh_dim_1, batch_dimension_length) for dim_partition in dim_partition_list: strategy = self._generate_strategy_with_dim_partition(dim_partition) strategy_list.append(strategy) return strategy_list @ignore_sharding_exception def non_split(self): name = f"RR = RR x R" dim_partition_dict_mapping = { "input": {}, "other": {}, "output": {}, } if self.has_bias: dim_partition_dict_mapping["bias"] = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) communication_action_mapping = {} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) def collate_strategies(self) -> List[ShardingStrategy]: """ Generate every possible strategies for a LayerNorm node, and record all strategies into the strategies_vector. """ strategy_list = [] input_data_dim = len(self.op_data["input"].logical_shape) weight_data_dim = len(self.op_data["other"].logical_shape) # in LayerNorm context, batch dimensions mean all the dimensions do not join the normalization. batch_dimension_length = input_data_dim - weight_data_dim # SR = SR x R with single mesh dim on batch dimensions strategy_list.extend(self.split_input_batch_single_mesh_dim(0, batch_dimension_length)) strategy_list.extend(self.split_input_batch_single_mesh_dim(1, batch_dimension_length)) # SR = SR x R with both mesh dims on batch dimensions strategy_list.extend(self.split_input_batch_both_mesh_dim(0, 1, batch_dimension_length)) # RR = RR x R strategy_list.append(self.non_split()) return strategy_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/strategy_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/strategy_generator.py
import operator from abc import ABC, abstractmethod from functools import reduce from typing import Any, Dict, List, Union import torch from torch.fx import Node from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommAction, CommType, OperationData, OperationDataType, ShardingStrategy, TrainCycleItem, ) from colossalai.device.device_mesh import DeviceMesh from colossalai.tensor.shape_consistency import CollectiveCommPattern, CommSpec, ShapeConsistencyManager from colossalai.tensor.sharding_spec import ShardingSpec from colossalai.tensor.utils import convert_dim_partition_dict class StrategyGenerator(ABC): """ StrategyGenerator is used to generate the same group of sharding strategies. TODO: remove the original strategy_generator.py after refactoring """ def __init__(self, operation_data_mapping: Dict[str, OperationData], device_mesh: DeviceMesh): self.op_data = operation_data_mapping self.device_mesh = device_mesh # validate the whether operation data is of desired value self.validate() @property def has_bias(self): """ A utility method to check for the existence of bias operand for convenience. """ return "bias" in self.op_data def is_param(self, op_data_name): other_data = self.op_data[op_data_name] return other_data.type == OperationDataType.PARAM def is_buffer(self, op_data_name): other_data = self.op_data[op_data_name] return other_data.type == OperationDataType.BUFFER def get_sharding_strategy( self, name: str, sharding_spec_mapping: Dict[str, ShardingSpec], communication_action_mapping: Dict[str, CommSpec], ): """ A factory method to produce a ShardingStrategy object. Args: sharding_spec_mapping (Dict[str, ShardingSpec]): the mapping between the operation data name and the ShardingSpec object. communication_action_mapping (Dict[str, CommSpec]): the mapping between the operation data name and the CommSpec object. """ sharding_specs = self.replace_op_name_with_op_data(sharding_spec_mapping) communication_actions = self.replace_op_name_with_op_data(communication_action_mapping) return ShardingStrategy(name=name, sharding_specs=sharding_specs, communication_actions=communication_actions) def to_sharding_spec_mapping(self, mapping: Dict[str, Dict[int, List[int]]]): """ A utility method to convert the the dim partition dict to a ShardingSpec object. Args: mapping (Dict[str, Dict[int, List[int]]]): the key of the mapping is the operation data name and the value is a dim partition dictionary. Notes: The op_data.data is commonly type of torch.Tensor, torch.nn.Parameter, so the sharding spec is easy to create from the shape of the data. However, if the op_data.data is of other non-iterative types, such as float or int, we should return None. If the op_data.data is of some iterative types, such as list or tuple, we should return a list of ShardingSpec objects follow the same rule as above mentioned. """ results = {} for op_data_name, dim_partition_dict in mapping.items(): if op_data_name in self.op_data: op_data = self.op_data[op_data_name] def _to_sharding_spec( data: any, logical_shape: any, dim_partition_dict: Dict[int, List[int]] ) -> Union[ShardingSpec, List[ShardingSpec], None]: """ This is a recursive function to convert the dim partition dict to a ShardingSpec object. """ if isinstance(data, torch.Tensor): dim_size = len(logical_shape) dim_partition_dict = convert_dim_partition_dict(dim_size, dim_partition_dict) sharding_spec = ShardingSpec( device_mesh=self.device_mesh, entire_shape=logical_shape, dim_partition_dict=dim_partition_dict, ) return sharding_spec elif isinstance(data, (list, tuple)): sharding_spec = [] for data_element, logical_shape_element, dim_partition_dict_element in zip( data, logical_shape, dim_partition_dict ): sharding_spec.append( _to_sharding_spec(data_element, logical_shape_element, dim_partition_dict_element) ) return sharding_spec else: return None sharding_spec = _to_sharding_spec(op_data.data, op_data.logical_shape, dim_partition_dict) results[op_data_name] = sharding_spec return results def replace_op_name_with_op_data(self, mapping: Dict[str, Any]): """ Convert the key of the dictionary from the operation data name to an OperationData object. """ results = {} for k, v in mapping.items(): op_data = self.op_data[k] results[op_data] = v return results def get_communication_spec( self, sharding_spec: ShardingSpec, communication_pattern: CollectiveCommPattern, logical_process_axis: Union[int, List[int]], ): """ A factory method to produce a CommSpec object. """ return CommSpec( comm_pattern=communication_pattern, sharding_spec=sharding_spec, logical_process_axis=logical_process_axis ) def get_communication_action( self, sharding_spec: ShardingSpec, communication_pattern: CollectiveCommPattern, logical_process_axis: Union[int, List[int]], comm_type: CommType, arg_index: int = -1, key_for_kwarg: any = None, ) -> CommAction: """ A factory method to produce a CommAction object. """ return CommAction( comm_spec=self.get_communication_spec( sharding_spec=sharding_spec, communication_pattern=communication_pattern, logical_process_axis=logical_process_axis, ), comm_type=comm_type, arg_index=arg_index, key_for_kwarg=key_for_kwarg, ) def update_communication_cost(self, strategy: ShardingStrategy) -> ShardingStrategy: """ Compute the communication cost involved in the forward and backward iteration. """ comm_cost = TrainCycleItem(fwd=0, bwd=0, total=0) def _compute_and_add(op_data: OperationData, comm_spec: CommSpec): num_ele_in_comm = comm_spec.get_comm_cost() dtype = op_data.data.dtype size_per_elem_bytes = torch.tensor([], dtype=dtype).element_size() for phase, cost in num_ele_in_comm.items(): num_ele_in_comm[phase] = num_ele_in_comm[phase] * size_per_elem_bytes comm_cost.fwd += num_ele_in_comm["forward"] comm_cost.bwd += num_ele_in_comm["backward"] comm_cost.total += num_ele_in_comm["total"] # check if communication action exists # if so, loop over each action and compute the cost of each action if strategy.communication_actions is not None: for operand, comm_action in strategy.communication_actions.items(): if isinstance(comm_action, CommAction): comm_spec = comm_action.comm_spec else: # this condition branch will be removed after all the handler updated. comm_spec = comm_action if isinstance(comm_spec, dict): src_spec = comm_spec["src_spec"] tgt_spec = comm_spec["tgt_spec"] shape_consistency_manager = ShapeConsistencyManager() _, comm_action_sequence, _ = shape_consistency_manager.shape_consistency(src_spec, tgt_spec) for comm_spec_ in comm_action_sequence: _compute_and_add(operand, comm_spec_) else: _compute_and_add(operand, comm_spec) # update the communication cost attribute in-place strategy.communication_cost = comm_cost return strategy @abstractmethod def update_compute_cost(self, strategy: ShardingStrategy) -> ShardingStrategy: """ Customize this method to compute the computation flops. """ @abstractmethod def update_memory_cost(self, strategy: ShardingStrategy) -> ShardingStrategy: """ Customize this method to compute the memory cost in bytes. """ def _compute_size_in_bytes(self, strategy: ShardingStrategy, key: str): """ Compute the size of a tensor in bytes. Args: strategy (ShardingStrategy): the ShardingStrategy generated. key (str): the name of the operation data defined by the generator. """ op_data = self.op_data[key] def _compute_size_in_bytes_helper(sharding_spec, meta_data): sharded_shape = sharding_spec.get_sharded_shape_per_device() if len(sharded_shape) == 0: num_elements = 1 else: num_elements = reduce(operator.mul, sharded_shape) dtype = getattr(meta_data, "dtype") size_per_elem_bytes = torch.tensor([], dtype=dtype).element_size() return num_elements * size_per_elem_bytes if isinstance(op_data.data, tuple): assert isinstance( strategy.sharding_specs[op_data], list ), "sharding_spec of op_data should be a list of sharding specs if op_data.data is a tuple." total_bytes = 0 for index, sharding_spec in enumerate(strategy.sharding_specs[op_data]): meta_data = op_data.data[index] if isinstance(meta_data, torch.Tensor): element_bytes = _compute_size_in_bytes_helper(sharding_spec, meta_data) else: # if meta_data is not a tensor, we count the memory as 0 element_bytes = 0 total_bytes += element_bytes else: if isinstance(op_data.data, torch.Tensor): total_bytes = _compute_size_in_bytes_helper(strategy.sharding_specs[op_data], op_data.data) else: # if op_data.data is not a tensor, we count the memory as 0 total_bytes = 0 return total_bytes def generate(self) -> List[ShardingStrategy]: """ Generate all possible sharding strategies for this operation. """ strategies = self.collate_strategies() # some strategies may be None as ignore_sharding_exception may return None # when ShardingSpecException occurs. # thus, remove those None values strategies = [strategy for strategy in strategies if strategy] # update the costs # update mete info on cost # these update methods are all in-place, the default method will do nothing # the cost info will only be added if the child class overrides these methods for strategy in strategies: self.update_communication_cost(strategy) self.update_compute_cost(strategy) self.update_memory_cost(strategy) return strategies @abstractmethod def collate_strategies(self) -> List[ShardingStrategy]: pass @abstractmethod def validate(self) -> bool: """ Validate if the operands are of desired shape. If True, means this generator can be used for the current operation. """ class FollowingStrategyGenerator(StrategyGenerator): """ FollowingStrategyGenerator is used to generate the sharding strategies which depends on its predecessor node. TODO: remove the original strategy_generator.py after refactoring """ def __init__( self, operation_data_mapping: Dict[str, OperationData], device_mesh: DeviceMesh, predecessor_node: Node ): self.op_data = operation_data_mapping self.device_mesh = device_mesh self.predecessor_node = predecessor_node class OutputStrategyGenerator(StrategyGenerator): """ OutputStrategyGenerator is used to generate the sharding strategies for Output Node. """ def __init__( self, operation_data_mapping: Dict[str, OperationData], device_mesh: DeviceMesh, predecessor_nodes: List[Node] ): super().__init__(operation_data_mapping, device_mesh) self.predecessor_nodes = predecessor_nodes
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/normal_pooling_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/normal_pooling_generator.py
import copy import operator from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, ShardingStrategy, TrainCycleItem from colossalai.auto_parallel.tensor_shard.utils import ( enumerate_all_possible_1d_sharding, enumerate_all_possible_2d_sharding, ignore_sharding_exception, ) from .strategy_generator import StrategyGenerator class NormalPoolStrategyGenerator(StrategyGenerator): """ NormalPoolStrategyGenerator is a generic class to generate strategies for pool operation like MaxPoolxd. The reason we call this normal pool is AvgPoolxd and MaxPoolxd are taking the kernel size element from image, and reduce them depending on the operation type. """ def validate(self) -> bool: """ In sanity check, we need make sure the input data having correct dimension size. For Pool1d, the dim of input data should be 3([N, C, L]). For Pool2d, the dim of input data should be 4([N, C, H, W]). For Pool3d, the dim of input data should be 5([N, C, H, W, D]). """ input_op_data = self.op_data["input"] assert input_op_data.data.dim() in ( 3, 4, 5, ), f"We suppose the dim of input fed into Pool op should in range of [3, 5]." def update_compute_cost(self, strategy: ShardingStrategy) -> TrainCycleItem: """ Compute the computation cost per device with this specific strategy. Note: compute_cost need to be divided by TFLOPS, now it just shows the computation size. """ # TODO: compute_cost need to be divided by TFLOPS, now it just shows the computation size. # 1D: (Lout) * N * C * kernel # 2D: (H * W) * N * Cout * Cin * kernel # 3D: (H * W * D) * N * Cout * Cin * kernel sharded_output_shape = strategy.sharding_specs[self.op_data["output"]].get_sharded_shape_per_device() sharded_input_shape = strategy.sharding_specs[self.op_data["input"]].get_sharded_shape_per_device() kernel_size = self.op_data["other"].data if isinstance(kernel_size, int): kernel_size = [kernel_size] * (len(sharded_output_shape) - 2) kernel_size_product = reduce(operator.mul, kernel_size) output_size_product = reduce(operator.mul, sharded_output_shape) input_size_product = reduce(operator.mul, sharded_input_shape) forward_compute_cost = output_size_product * kernel_size_product backward_compute_cost = input_size_product * kernel_size_product total_compute_cost = forward_compute_cost + backward_compute_cost compute_cost = TrainCycleItem(fwd=forward_compute_cost, bwd=backward_compute_cost, total=total_compute_cost) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy) -> ShardingStrategy: forward_size_mapping = { "input": self._compute_size_in_bytes(strategy, "input"), "output": self._compute_size_in_bytes(strategy, "output"), } backward_size_mapping = copy.deepcopy(forward_size_mapping) backward_size_mapping.pop("output") # compute fwd cost incurred # fwd_cost = input + output fwd_activation_cost = sum([v for k, v in forward_size_mapping.items()]) fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=0) # compute bwd cost incurred # bwd_cost = input_grad bwd_activation_cost = sum([v for k, v in backward_size_mapping.items()]) bwd_mem_cost = MemoryCost(activation=bwd_activation_cost, parameter=0) # compute total cost total_mem_cost = MemoryCost(activation=fwd_activation_cost + bwd_activation_cost, parameter=0) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost @ignore_sharding_exception def _generate_strategy_with_dim_partition(self, dim_partition): dim_partition_dict_mapping = {"input": dim_partition, "output": dim_partition} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) name = ( f'{sharding_spec_mapping["output"].sharding_sequence} = {sharding_spec_mapping["input"].sharding_sequence}' ) communication_action_mapping = {} strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) return strategy def enumerate_all_possible_batch_dimensions_dim_partition(self, mesh_dim_0, mesh_dim_1): dim_partition_list = [] dim_partition_list.extend(enumerate_all_possible_1d_sharding(mesh_dim_0, 2)) dim_partition_list.extend(enumerate_all_possible_1d_sharding(mesh_dim_1, 2)) dim_partition_list.extend(enumerate_all_possible_2d_sharding(mesh_dim_0, mesh_dim_1, 2)) # append {} for non_split case dim_partition_list.append({}) return dim_partition_list def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] dim_partition_list = self.enumerate_all_possible_batch_dimensions_dim_partition(0, 1) for dim_partition in dim_partition_list: strategy = self._generate_strategy_with_dim_partition(dim_partition) strategy_list.append(strategy) return strategy_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/tensor_constructor_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/tensor_constructor_generator.py
from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, ShardingStrategy, TrainCycleItem from .strategy_generator import StrategyGenerator __all__ = ["TensorConstructorGenerator"] class TensorConstructorGenerator(StrategyGenerator): """ TensorConstructorGenerator which deals with the sharding strategies for tensor constructor operation, such as torch.arange. """ def validate(self) -> bool: return super().validate() def update_compute_cost(self, strategy: ShardingStrategy): compute_cost = TrainCycleItem(fwd=10, bwd=10, total=20) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy): """ Compute the memory cost per device with this specific strategy. """ forward_size_mapping = {"output": self._compute_size_in_bytes(strategy, "output")} # compute fwd cost incurred # fwd_cost = input + output fwd_activation_cost = sum([v for k, v in forward_size_mapping.items() if not self.is_param(k)]) fwd_parameter_cost = sum([v for k, v in forward_size_mapping.items() if self.is_param(k)]) fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=fwd_parameter_cost) # compute bwd cost incurred bwd_mem_cost = MemoryCost(activation=0, parameter=0) # compute total cost total_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=fwd_parameter_cost) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] dim_partition_dict_mapping = { "output": {}, } communication_action_mapping = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) name = "Replica Tensor Constructor" strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) strategy_list.append(strategy) return strategy_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/placeholder_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/placeholder_generator.py
from typing import Dict, List from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( MemoryCost, OperationData, ShardingStrategy, TrainCycleItem, ) from colossalai.device.device_mesh import DeviceMesh from .strategy_generator import StrategyGenerator __all__ = ["PlaceholderGenerator"] class PlaceholderGenerator(StrategyGenerator): """ PlaceholderGenerator is a generic class to generate strategies for placeholder node. """ def __init__( self, operation_data_mapping: Dict[str, OperationData], device_mesh: DeviceMesh, placeholder_option: str ): super().__init__(operation_data_mapping, device_mesh) self.placeholder_option = placeholder_option def validate(self) -> bool: return super().validate() def update_compute_cost(self, strategy: ShardingStrategy): compute_cost = TrainCycleItem(fwd=10, bwd=10, total=20) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy): """ Compute the memory cost per device with this specific strategy. """ forward_size_mapping = {"output": self._compute_size_in_bytes(strategy, "output")} # compute fwd cost incurred # fwd_cost = output fwd_activation_cost = sum([v for k, v in forward_size_mapping.items()]) fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=0) bwd_mem_cost = MemoryCost(activation=0, parameter=0) # compute total cost total_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=0) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost def replica_placeholder(self) -> ShardingStrategy: """ Generate replica strategy for placeholder node. """ dim_partition_dict_mapping = { "output": {}, } communication_action_mapping = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) name = "Replica Placeholder" strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) return strategy def distributed_placeholder(self, mesh_list) -> ShardingStrategy: """ Generate distributed strategy for placeholder node. """ dim_partition_dict_mapping = { "output": {0: mesh_list}, } communication_action_mapping = {} sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) name = "Distributed Placeholder" strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) return strategy def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] if self.placeholder_option == "distributed": mesh_list = [0, 1] distributed_strategy = self.distributed_placeholder(mesh_list) strategy_list.append(distributed_strategy) else: assert ( self.placeholder_option == "replicated" ), f"placeholder_option {self.placeholder_option} is not supported" replicated_strategy = self.replica_placeholder() strategy_list.append(replicated_strategy) return strategy_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/__init__.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/__init__.py
from .batch_norm_generator import BatchNormStrategyGenerator from .binary_elementwise_generator import BinaryElementwiseStrategyGenerator from .conv_strategy_generator import ConvStrategyGenerator from .embedding_generator import EmbeddingStrategyGenerator from .getattr_generator import GetattrGenerator from .getitem_generator import GetItemStrategyGenerator, TensorStrategyGenerator, TensorTupleStrategyGenerator from .layer_norm_generator import LayerNormGenerator from .matmul_strategy_generator import ( BatchedMatMulStrategyGenerator, DotProductStrategyGenerator, LinearProjectionStrategyGenerator, MatVecStrategyGenerator, ) from .normal_pooling_generator import NormalPoolStrategyGenerator from .output_generator import OutputGenerator from .placeholder_generator import PlaceholderGenerator from .reshape_generator import ( DefaultReshapeGenerator, PermuteGenerator, SplitGenerator, TransposeGenerator, ViewGenerator, ) from .softmax_generator import SoftmaxGenerator from .strategy_generator import StrategyGenerator from .sum_generator import SumGenerator from .tensor_constructor_generator import TensorConstructorGenerator from .unary_elementwise_generator import UnaryElementwiseGenerator from .where_generator import WhereGenerator __all__ = [ "StrategyGenerator", "DotProductStrategyGenerator", "MatVecStrategyGenerator", "LinearProjectionStrategyGenerator", "BatchedMatMulStrategyGenerator", "ConvStrategyGenerator", "UnaryElementwiseGenerator", "BatchNormStrategyGenerator", "GetItemStrategyGenerator", "TensorStrategyGenerator", "TensorTupleStrategyGenerator", "LayerNormGenerator", "PlaceholderGenerator", "OutputGenerator", "WhereGenerator", "NormalPoolStrategyGenerator", "BinaryElementwiseStrategyGenerator", "GetattrGenerator", "TensorConstructorGenerator", "EmbeddingStrategyGenerator", "SumGenerator", "SoftmaxGenerator", "ViewGenerator", "PermuteGenerator", "TransposeGenerator", "SplitGenerator", "DefaultReshapeGenerator", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/softmax_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/softmax_generator.py
import copy import operator from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.node_handler.strategy.strategy_generator import FollowingStrategyGenerator from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, ShardingStrategy, TrainCycleItem __all__ = ["SoftmaxGenerator"] class SoftmaxGenerator(FollowingStrategyGenerator): """ SoftmaxGenerator is used to generate strategies for torch.nn.Softmax or F.softmax. """ def validate(self) -> bool: return super().validate() def update_compute_cost(self, strategy: ShardingStrategy): """ Compute the computation cost per device with this specific strategy. """ sharded_input_shape = strategy.sharding_specs[self.op_data["input"]].get_sharded_shape_per_device() sharded_output_shape = strategy.sharding_specs[self.op_data["output"]].get_sharded_shape_per_device() input_size_product = reduce(operator.mul, sharded_input_shape) output_size_product = reduce(operator.mul, sharded_output_shape) forward_compute_cost = output_size_product * 2 backward_compute_cost = input_size_product total_compute_cost = forward_compute_cost + backward_compute_cost compute_cost = TrainCycleItem(fwd=forward_compute_cost, bwd=backward_compute_cost, total=total_compute_cost) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy): """ Compute the memory cost per device with this specific strategy. """ forward_size_mapping = { "input": self._compute_size_in_bytes(strategy, "input"), "output": self._compute_size_in_bytes(strategy, "output"), } backward_size_mapping = copy.deepcopy(forward_size_mapping) backward_size_mapping.pop("output") # compute fwd cost incurred # fwd_cost = input + output fwd_activation_cost = sum([v for k, v in forward_size_mapping.items() if not self.is_param(k)]) fwd_parameter_cost = sum([v for k, v in forward_size_mapping.items() if self.is_param(k)]) fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=fwd_parameter_cost) # compute bwd cost incurred # bwd_cost = input_grad bwd_activation_cost = sum([v for k, v in backward_size_mapping.items() if not self.is_param(k)]) bwd_parameter_cost = sum([v for k, v in backward_size_mapping.items() if self.is_param(k)]) bwd_mem_cost = MemoryCost(activation=bwd_activation_cost, parameter=bwd_parameter_cost) # compute total cost total_mem_cost = MemoryCost( activation=fwd_activation_cost + bwd_activation_cost, parameter=fwd_parameter_cost + bwd_parameter_cost ) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost def collate_strategies(self) -> List[ShardingStrategy]: strategy_list = [] for index, strategy in enumerate(self.predecessor_node.strategies_vector): dim_partition_dict_mapping = {} communication_action_mapping = {} input_sharding_spec = strategy.output_sharding_specs[self.op_data["input"]] dim_partition_dict_for_input = copy.deepcopy(input_sharding_spec.dim_partition_dict) softmax_dim = self.op_data["softmax_dim"].data if softmax_dim in dim_partition_dict_for_input: dim_partition_dict_for_input.pop(softmax_dim) dim_partition_dict_for_output = copy.deepcopy(dim_partition_dict_for_input) dim_partition_dict_mapping = { "input": dim_partition_dict_for_input, "output": dim_partition_dict_for_output, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # add index into name to pass the duplicated check # we keep same strategies with different name for node merging, and it will not increase the searching space, # because in solver, this node will be merged into other nodes, and solver will not create a new variable for this node. name = f'{sharding_spec_mapping["input"].sharding_sequence} -> {sharding_spec_mapping["output"].sharding_sequence}_{index}' strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) strategy_list.append(strategy) return strategy_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/where_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/where_generator.py
import copy from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, ShardingStrategy, TrainCycleItem from colossalai.auto_parallel.tensor_shard.utils import ( enumerate_all_possible_1d_sharding, enumerate_all_possible_2d_sharding, ignore_sharding_exception, ) from .strategy_generator import StrategyGenerator __all__ = ["WhereGenerator"] class WhereGenerator(StrategyGenerator): """ WhereGenerator is a generic class to generate strategies for Where operation. """ def validate(self) -> bool: return super().validate() def update_compute_cost(self, strategy: ShardingStrategy): compute_cost = TrainCycleItem(fwd=10, bwd=10, total=20) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy): """ Compute the memory cost per device with this specific strategy. """ forward_size_mapping = { "condition": self._compute_size_in_bytes(strategy, "condition"), "x": self._compute_size_in_bytes(strategy, "x"), "y": self._compute_size_in_bytes(strategy, "y"), "output": self._compute_size_in_bytes(strategy, "output"), } backward_size_mapping = copy.deepcopy(forward_size_mapping) backward_size_mapping.pop("output") # compute fwd cost incurred # fwd_cost = condition + x + y + output fwd_activation_cost = sum([v for k, v in forward_size_mapping.items()]) fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=0) # compute bwd cost incurred # bwd_cost = condition_grad + x_grad + y_grad bwd_activation_cost = sum([v for k, v in backward_size_mapping.items()]) bwd_mem_cost = MemoryCost(activation=bwd_activation_cost, parameter=0) # compute total cost total_mem_cost = MemoryCost(activation=fwd_activation_cost + bwd_activation_cost, parameter=0) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost @ignore_sharding_exception def _generate_strategy_with_dim_partition(self, dim_partition): dim_partition_dict_mapping = { "condition": dim_partition, "x": dim_partition, "y": dim_partition, "output": dim_partition, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) name = f'{sharding_spec_mapping["output"].sharding_sequence} = {sharding_spec_mapping["condition"].sharding_sequence} x {sharding_spec_mapping["x"].sharding_sequence} x {sharding_spec_mapping["y"].sharding_sequence}' communication_action_mapping = {} strategy = self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) return strategy def enumerate_all_possible_output_spec(self, mesh_dim_0, mesh_dim_1, dimension_length): dim_partition_list = [] dim_partition_list.extend(enumerate_all_possible_1d_sharding(mesh_dim_0, dimension_length)) dim_partition_list.extend(enumerate_all_possible_1d_sharding(mesh_dim_1, dimension_length)) dim_partition_list.extend(enumerate_all_possible_2d_sharding(mesh_dim_0, mesh_dim_1, dimension_length)) # append {} for non_split case dim_partition_list.append({}) return dim_partition_list def collate_strategies(self) -> List[ShardingStrategy]: """ Generate every possible strategies for a where node, and record all strategies into the strategies_vector. """ strategy_list = [] dimension_length = len(self.op_data["output"].logical_shape) dim_partition_list = self.enumerate_all_possible_output_spec(0, 1, dimension_length) for dim_partition in dim_partition_list: strategy = self._generate_strategy_with_dim_partition(dim_partition) strategy_list.append(strategy) return strategy_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/tensor_shard/node_handler/strategy/embedding_generator.py
colossalai/auto_parallel/tensor_shard/node_handler/strategy/embedding_generator.py
import copy import operator from functools import reduce from typing import List from colossalai.auto_parallel.tensor_shard.sharding_strategy import ( CommType, MemoryCost, ShardingStrategy, TrainCycleItem, ) from colossalai.auto_parallel.tensor_shard.utils import ignore_sharding_exception from colossalai.tensor.shape_consistency import CollectiveCommPattern from .strategy_generator import StrategyGenerator class EmbeddingStrategyGenerator(StrategyGenerator): """ EmbeddingStrategyGenerator is a generic class to generate strategies for nn.Embedding or F.embedding. The operation data is defined as `output = input x other`. """ def validate(self) -> bool: return super().validate() def update_compute_cost(self, strategy: ShardingStrategy): """ Compute the computation cost per device with this specific strategy. Note: The computation cost for the embedding handler is estimated as dense computing now. It may not be accurate. """ # TODO: estimate the embedding computation cost as sparse operation sharded_input_shape = strategy.sharding_specs[self.op_data["input"]].get_sharded_shape_per_device() sharded_other_shape = strategy.sharding_specs[self.op_data["other"]].get_sharded_shape_per_device() sharded_output_shape = strategy.sharding_specs[self.op_data["output"]].get_sharded_shape_per_device() input_size_product = reduce(operator.mul, sharded_input_shape) other_size_product = reduce(operator.mul, sharded_other_shape) output_size_product = reduce(operator.mul, sharded_output_shape) forward_compute_cost = input_size_product * other_size_product backward_activation_cost = other_size_product * output_size_product / sharded_output_shape[-1] backward_weight_cost = input_size_product * other_size_product backward_compute_cost = backward_weight_cost + backward_activation_cost total_compute_cost = forward_compute_cost + backward_compute_cost compute_cost = TrainCycleItem(fwd=forward_compute_cost, bwd=backward_compute_cost, total=total_compute_cost) strategy.compute_cost = compute_cost def update_memory_cost(self, strategy: ShardingStrategy): forward_size_mapping = { "input": self._compute_size_in_bytes(strategy, "input"), "other": self._compute_size_in_bytes(strategy, "other"), "output": self._compute_size_in_bytes(strategy, "output"), } backward_size_mapping = copy.deepcopy(forward_size_mapping) backward_size_mapping.pop("output") # compute fwd cost incurred # fwd_cost = input + other + output fwd_activation_cost = sum([v for k, v in forward_size_mapping.items() if not self.is_param(k)]) fwd_parameter_cost = sum([v for k, v in forward_size_mapping.items() if self.is_param(k)]) fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=fwd_parameter_cost) # compute bwd cost incurred # bwd_cost = input_grad + other_grad bwd_activation_cost = sum([v for k, v in backward_size_mapping.items() if not self.is_param(k)]) bwd_parameter_cost = sum([v for k, v in backward_size_mapping.items() if self.is_param(k)]) bwd_mem_cost = MemoryCost(activation=bwd_activation_cost, parameter=bwd_parameter_cost) # compute total cost total_mem_cost = MemoryCost( activation=fwd_activation_cost + bwd_activation_cost, parameter=fwd_parameter_cost + bwd_parameter_cost ) memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost) strategy.memory_cost = memory_cost @ignore_sharding_exception def non_split(self): name = f"RR = R x RR" dim_partition_dict_mapping = { "input": {}, "other": {}, "output": {}, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping={} ) @ignore_sharding_exception def split_input(self, mesh_dim_0): name = f"S{mesh_dim_0}R = S{mesh_dim_0} x RR" dim_partition_dict_mapping = { "input": {0: [mesh_dim_0]}, "other": {}, "output": { 0: [mesh_dim_0], }, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) communication_action_mapping = {} if self.is_param("other"): other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.HOOK, ) else: other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.BEFORE, arg_index=1, ) communication_action_mapping["other"] = other_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_input_and_embedding_dim(self, mesh_dim_0, mesh_dim_1): name = f"S{mesh_dim_0}S{mesh_dim_1} = S{mesh_dim_0} x RS{mesh_dim_1}" dim_partition_dict_mapping = { "input": { 0: [mesh_dim_0], }, "other": { 1: [mesh_dim_1], }, "output": { 0: [mesh_dim_0], 1: [mesh_dim_1], }, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # set communication action input_comm_action = self.get_communication_action( sharding_spec_mapping["input"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_1, comm_type=CommType.BEFORE, arg_index=0, ) communication_action_mapping = {"input": input_comm_action} if self.is_param("other"): other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.HOOK, ) else: other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.BEFORE, arg_index=1, ) communication_action_mapping["other"] = other_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_1d_parallel_on_input(self, mesh_dim_0, mesh_dim_1): name = f"S{mesh_dim_0}{mesh_dim_1}R = S{mesh_dim_0}{mesh_dim_1} x RR" dim_partition_dict_mapping = { "input": {0: [mesh_dim_0, mesh_dim_1]}, "other": {}, "output": { 0: [mesh_dim_0, mesh_dim_1], }, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # set communication action communication_action_mapping = {} if self.is_param("other"): other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.HOOK, ) else: other_comm_action = self.get_communication_action( sharding_spec_mapping["other"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.BEFORE, arg_index=1, ) communication_action_mapping["other"] = other_comm_action return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_embedding_dim(self, mesh_dim_0): name = f"RS{mesh_dim_0} = R x RS{mesh_dim_0}" dim_partition_dict_mapping = { "input": {}, "other": { 1: [mesh_dim_0], }, "output": { 1: [mesh_dim_0], }, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # set communication action input_comm_action = self.get_communication_action( sharding_spec_mapping["input"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=mesh_dim_0, comm_type=CommType.BEFORE, arg_index=0, ) communication_action_mapping = {"input": input_comm_action} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) @ignore_sharding_exception def split_1d_parallel_on_embedding_dim(self, mesh_dim_0, mesh_dim_1): name = f"RS{mesh_dim_0}{mesh_dim_1} = R x RS{mesh_dim_0}{mesh_dim_1}" dim_partition_dict_mapping = { "input": {}, "other": { 1: [mesh_dim_0, mesh_dim_1], }, "output": { 1: [mesh_dim_0, mesh_dim_1], }, } sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping) # set communication action input_comm_action = self.get_communication_action( sharding_spec_mapping["input"], communication_pattern=CollectiveCommPattern.IDENTITY_FWD_ALLREDUCE_BWD, logical_process_axis=[mesh_dim_0, mesh_dim_1], comm_type=CommType.BEFORE, arg_index=0, ) communication_action_mapping = {"input": input_comm_action} return self.get_sharding_strategy( name=name, sharding_spec_mapping=sharding_spec_mapping, communication_action_mapping=communication_action_mapping, ) def collate_strategies(self) -> List[ShardingStrategy]: strategies = [] # RR= R x RR strategies.append(self.non_split()) # SR = S x RR strategies.append(self.split_input(0)) strategies.append(self.split_input(1)) # SS = S x RS strategies.append(self.split_input_and_embedding_dim(0, 1)) strategies.append(self.split_input_and_embedding_dim(1, 0)) # S01R = S01 x RR strategies.append(self.split_1d_parallel_on_input(0, 1)) # RS = R x RS strategies.append(self.split_embedding_dim(0)) strategies.append(self.split_embedding_dim(1)) # RS01 = R x RS01 strategies.append(self.split_1d_parallel_on_embedding_dim(0, 1)) return strategies
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/pipeline_shard/__init__.py
colossalai/auto_parallel/pipeline_shard/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/offload/amp_optimizer.py
colossalai/auto_parallel/offload/amp_optimizer.py
from enum import Enum from typing import Dict, Tuple import torch from torch.optim import Optimizer from colossalai.accelerator import get_accelerator from colossalai.amp.naive_amp.grad_scaler import DynamicGradScaler from colossalai.interface import OptimizerWrapper from colossalai.logging import get_dist_logger from .base_offload_module import BaseOffloadModule from .region import Region from .region_manager import RegionManager class OptimState(Enum): SCALED = 0 UNSCALED = 1 class AMPOptimizer(OptimizerWrapper): """ A wrapper for Optimizer. Code reference: https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/optimizer/zero_optimizer.py Args: optimizer (Optimizer): An Optimizer instance. module (BaseOffloadModule): A ``BaseOffloadModule`` instance. initial_scale (float, optional): Initial scale used by DynamicGradScaler. Defaults to 2**16. growth_factor (float, optional): growth_factor used by DynamicGradScaler. Defaults to 2. backoff_factor (float, optional): backoff_factor used by DynamicGradScaler. Defaults to 0.5. growth_interval (float, optional): growth_interval used by DynamicGradScaler. Defaults to 1000. hysteresis (float, optional): hysteresis used by DynamicGradScaler. Defaults to 2. min_scale (float, optional): Min scale used by DynamicGradScaler. Defaults to 1. max_scale (int, optional): max_scale used by DynamicGradScaler. Defaults to 2**32. norm_type (float, optional): norm_type used for `clip_grad_norm`. """ def __init__( self, optimizer: Optimizer, module: BaseOffloadModule, initial_scale: float = 2**16, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, min_scale: float = 1, max_scale: float = 2**32, clipping_norm: float = 0.0, norm_type: float = 2.0, ): super().__init__(optimizer) self.module = module self.optim_state = OptimState.UNSCALED self.clipping_flag = clipping_norm > 0.0 self.max_norm = clipping_norm self.region_manager: RegionManager = self.module.region_manager self.param_to_range: Dict[torch.nn.Parameter, Tuple[int, int]] = dict() self.param_to_region: Dict[torch.nn.Parameter, Region] = dict() self.fp32_to_fp16_params: Dict[torch.Tensor, torch.nn.Parameter] = dict() if self.clipping_flag: assert norm_type == 2.0, "AMPOptimizer only supports L2 norm now" self.__init__optimizer() # Grad scaler self.grad_scaler = DynamicGradScaler( initial_scale=initial_scale, min_scale=min_scale, growth_factor=growth_factor, backoff_factor=backoff_factor, growth_interval=growth_interval, hysteresis=hysteresis, max_scale=max_scale, ) self._found_overflow: torch.Tensor = torch.zeros( 1, dtype=torch.int64, device=get_accelerator().get_current_device() ) self._logger = get_dist_logger() def _set_grad_ptr(self): for group in self.param_groups: for fake_param in group["params"]: region = self.param_to_region[fake_param] begin, end = self.param_to_range[fake_param] fake_param.data = region.cpu_grad[begin:end] fake_param.grad = fake_param.data fake_param.data = region.fp32_data[begin:end] def _update_fp16_params(self): none_tensor = torch.empty([0]) for group in self.param_groups: for fake_param in group["params"]: assert fake_param.grad is None fake_param.data = none_tensor self.param_to_region[fake_param].cpu_grad = None def _check_overflow(self): # clear previous overflow record self._found_overflow.fill_(self.module.overflow_counter.item()) return self._found_overflow.item() > 0 def _get_combined_scale(self): loss_scale = 1 if self.optim_state == OptimState.SCALED: loss_scale = self.loss_scale self.optim_state = OptimState.UNSCALED combined_scale = loss_scale if combined_scale == 1: return -1 else: return combined_scale @property def loss_scale(self): return self.grad_scaler.scale.item() def zero_grad(self, *args, **kwargs): self.module.overflow_counter = torch.tensor([0], dtype=torch.int, device=get_accelerator().get_current_device()) return self.optim.zero_grad(set_to_none=True) def step(self, *args, **kwargs): # Copy gradients from model params to main params. self._set_grad_ptr() found_inf = self._check_overflow() if found_inf: self.optim_state = OptimState.UNSCALED # no need to unscale grad self.grad_scaler.update(found_inf) # update gradient scaler self._logger.info(f"Found overflow. Skip step") self.zero_grad() # reset all gradients self._update_fp16_params() return # get combined scale. combined scale = loss scale * clipping norm # so that gradient = gradient / combined scale combined_scale = self._get_combined_scale() self.grad_scaler.update(found_inf) ret = self.optim.step(div_scale=combined_scale, *args, **kwargs) self.zero_grad() self._update_fp16_params() return ret def clip_grad_norm(self, model: torch.nn.Module, max_norm: float, norm_type: float = 2.0): raise NotImplementedError def backward(self, loss: torch.Tensor): loss = self.loss_scale * loss self.optim_state = OptimState.SCALED self.module.backward(loss) def __init__optimizer(self): for group in self.optim.param_groups: fake_params_list = list() for param in group["params"]: region = self.region_manager.get_region(param) fake_param = torch.nn.Parameter(torch.empty([0])) self.param_to_range[fake_param] = region.param_to_range[param] self.param_to_region[fake_param] = region fake_params_list.append(fake_param) # Reset existing state dict key to the new main param. if param in self.optim.state: self.optim.state[fake_param] = self.optim.state.pop(param) group["params"] = fake_params_list # Leverage state_dict() and load_state_dict() to # recast preexisting per-param state tensors self.optim.load_state_dict(self.optim.state_dict())
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/offload/runtime.py
colossalai/auto_parallel/offload/runtime.py
from typing import List import torch from torch.fx.node import Node from .region import Region from .util import GlobalRuntimeInfo, requires_upload_p_in_fwd class SynPreFwdPostBwdOP(torch.autograd.Function): """ A customized prefetch and offload operation. Args: input_: input tensor. fwd_info: information dict, which contains region indices that need to be uploaded or freed during forward pass. bwd_info: information dict, which contains region indices that need to be uploaded during backward pass. """ @staticmethod def forward(ctx, input_, fwd_info, bwd_info): ctx.bwd_info = bwd_info d2h_rid = fwd_info.get("d2h_rid", None) if d2h_rid is not None: free_region = GlobalRuntimeInfo().region_list[d2h_rid] assert isinstance(free_region, Region) free_region.free_cuda_data() h2d_rid = fwd_info.get("h2d_rid", None) if h2d_rid is not None: h2d_region = GlobalRuntimeInfo().region_list[h2d_rid] assert isinstance(h2d_region, Region) h2d_region.move_param_to_cuda() return input_ @staticmethod def backward(ctx, grad_output): h2d_rid = ctx.bwd_info.get("h2d_rid", None) if h2d_rid is not None: pref_region = GlobalRuntimeInfo().region_list[h2d_rid] assert isinstance(pref_region, Region) pref_region.move_param_to_cuda() return grad_output, None, None class AsynPreFwdPostBwdOP(torch.autograd.Function): """ A customized prefetch and offload operation. Args: input_: input tensor. fwd_info: information dict, which contains region indices that need to be prefetched, waited, or freed during forward pass. bwd_info: information dict, which contains region indices that need to be prefetched or waited during backward pass. """ @staticmethod def forward(ctx, input_, fwd_info, bwd_info): ctx.bwd_info = bwd_info sync_rid = fwd_info.get("sync_rid", None) if sync_rid is not None: prefetch_event = GlobalRuntimeInfo().fwd_prefetch_event_map.get(sync_rid, None) if prefetch_event: prefetch_event.wait() h2d_rid = fwd_info.get("h2d_rid", None) if h2d_rid is not None: pref_region = GlobalRuntimeInfo().region_list[h2d_rid] assert isinstance(pref_region, Region) master_stream = torch.cuda.current_stream() with torch.cuda.stream(GlobalRuntimeInfo().h2d_stream): GlobalRuntimeInfo().h2d_stream.wait_stream(master_stream) pref_region.move_param_to_cuda() prefetch_event = torch.cuda.Event() prefetch_event.record(GlobalRuntimeInfo().h2d_stream) GlobalRuntimeInfo().fwd_prefetch_event_map[h2d_rid] = prefetch_event return input_ @staticmethod def backward(ctx, grad_output): sync_rid = ctx.bwd_info.get("sync_rid", None) if sync_rid is not None: wait_region = GlobalRuntimeInfo().region_list[sync_rid] assert isinstance(wait_region, Region) prefetch_event = GlobalRuntimeInfo().bwd_prefetch_event_map.get(sync_rid, None) if prefetch_event: prefetch_event.wait() else: wait_region.move_param_to_cuda() h2d_rid = ctx.bwd_info.get("h2d_rid", None) if h2d_rid is not None: pref_region = GlobalRuntimeInfo().region_list[h2d_rid] assert isinstance(pref_region, Region) master_stream = torch.cuda.current_stream() with torch.cuda.stream(GlobalRuntimeInfo().h2d_stream): GlobalRuntimeInfo().h2d_stream.wait_stream(master_stream) pref_region.move_param_to_cuda() prefetch_event = torch.cuda.Event() prefetch_event.record(GlobalRuntimeInfo().h2d_stream) GlobalRuntimeInfo().bwd_prefetch_event_map[h2d_rid] = prefetch_event return grad_output, None, None def convert_fwd_upload_bwd_offload_to_action(tensor, fwd_info, bwd_info): """ Convert Upload and Offload operation into runtime action. Argument: tensor(torch.Tensor): input tensor. fwd_info(dict): information dict, which contains region indices that need to be uploaded, or freed during forward pass. bwd_info(dict): information dict, which contains region indices that need to be uploaded during backward pass. """ with torch._C.DisableTorchFunction(): ret = SynPreFwdPostBwdOP.apply(tensor, fwd_info, bwd_info) return ret def convert_fwd_prefetch_bwd_offload_to_action(tensor, fwd_info, bwd_info): """ Convert Prefetch and Offload operation into runtime action. Argument: tensor(torch.Tensor): input tensor. fwd_info(dict): information dict, which contains region indices that need to be prefetched, waited, or freed during forward pass. bwd_info(dict): information dict, which contains region indices that need to be prefetched or waited during backward pass. """ with torch._C.DisableTorchFunction(): ret = AsynPreFwdPostBwdOP.apply(tensor, fwd_info, bwd_info) return ret def replace_node_users(orig_node: Node, inserted_node: Node, rep_user_nodes: List[Node] = None): user_list = list(orig_node.users.keys()) if rep_user_nodes is not None: user_list = rep_user_nodes for user in user_list: if user == inserted_node: continue new_args = list(user.args) new_kwargs = dict(user.kwargs) # the origin node may be a positional argument or key word argument of user node if orig_node in new_args: # substitute the origin node with offload_apply_node new_args[new_args.index(orig_node)] = inserted_node user.args = tuple(new_args) elif str(orig_node) in new_kwargs: # substitute the origin node with offload_apply_node new_kwargs[str(orig_node)] = inserted_node user.kwargs = new_kwargs def runtime_syn_offload_apply_pass(gm: torch.fx.GraphModule, region_list: List[Region]): """ This pass is used to add the synchronous upload and offload spec apply node to the origin graph. """ mod_graph = gm.graph last_inp_node = tuple(mod_graph.nodes)[0] for r_idx, region in enumerate(region_list): # forward upload fwd_info = {} if requires_upload_p_in_fwd(region_list[region.shared_rid]): fwd_info["h2d_rid"] = region.r_id # forward offload if r_idx > 0 and region_list[r_idx - 1].need_offload: fwd_info["d2h_rid"] = r_idx - 1 bwd_info = {} # backward upload if r_idx > 0 and region_list[r_idx - 1].need_offload: bwd_info["h2d_rid"] = region_list[r_idx - 1].r_id if fwd_info or bwd_info: with mod_graph.inserting_after(last_inp_node): new_node = mod_graph.create_node( "call_function", convert_fwd_upload_bwd_offload_to_action, args=(last_inp_node, fwd_info, bwd_info) ) replace_node_users(last_inp_node, new_node) last_inp_node = region.nodes[-1] return gm def runtime_asyn_offload_apply_pass(gm: torch.fx.GraphModule, region_list: List[Region]): """ This pass is used to add the asynchronous prefetch and offload spec apply node to the origin graph. """ mod_graph = gm.graph # upload parameters of the first region last_inp_node = tuple(mod_graph.nodes)[0] first_region_with_p = [region for region in region_list if region.param_size][0] fwd_info = {"h2d_rid": first_region_with_p.r_id} with mod_graph.inserting_after(last_inp_node): upload_apply_node = mod_graph.create_node( "call_function", convert_fwd_upload_bwd_offload_to_action, args=(last_inp_node, fwd_info, {}) ) replace_node_users(last_inp_node, upload_apply_node) last_inp_node = upload_apply_node for r_idx, region in enumerate(region_list): # forward prefetch fwd_info = {} if region.param_size: fwd_info["sync_rid"] = region.r_id fwd_prefetch_region = region.fwd_prefetch_region if fwd_prefetch_region and requires_upload_p_in_fwd(region_list[fwd_prefetch_region.shared_rid]): fwd_info["h2d_rid"] = fwd_prefetch_region.r_id # forward offload if r_idx > 0 and region_list[r_idx - 1].need_offload: fwd_info["d2h_rid"] = r_idx - 1 bwd_info = {} # backward prefetch if r_idx > 0 and region_list[r_idx - 1].need_offload: bwd_info["sync_rid"] = r_idx - 1 if r_idx > 0 and region_list[r_idx - 1].bwd_prefetch_region: bwd_info["h2d_rid"] = region_list[r_idx - 1].bwd_prefetch_region.r_id if fwd_info or bwd_info: with mod_graph.inserting_after(last_inp_node): new_node = mod_graph.create_node( "call_function", convert_fwd_prefetch_bwd_offload_to_action, args=(last_inp_node, fwd_info, bwd_info), ) replace_node_users(last_inp_node, new_node) last_inp_node = region.nodes[-1] if region.bwd_prefetch_region: bwd_info = {"h2d_rid": region.bwd_prefetch_region.r_id} with mod_graph.inserting_after(last_inp_node): new_node = mod_graph.create_node( "call_function", convert_fwd_prefetch_bwd_offload_to_action, args=(last_inp_node, {}, bwd_info) ) replace_node_users(last_inp_node, new_node) # gm.graph.print_tabular() return gm
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/offload/base_offload_module.py
colossalai/auto_parallel/offload/base_offload_module.py
from functools import partial from typing import Optional, Set import torch import torch.nn as nn from colossalai.utils import _cast_float, get_current_device from colossalai.utils.common import free_storage from .region_manager import RegionManager from .util import GlobalRuntimeInfo class BaseOffloadModule: """ BaseOffloadModule: A model wrapper for parameter offloading. Args: model (nn.Module): model to apply offloading. region_manager (RegionManager): a ``RegionManager`` instance. is_sync (bool): synchronous mode or not. """ def __init__(self, model: nn.Module, region_manager: RegionManager, is_sync=True): self.model = model self.region_manager = region_manager self.grad_hook_list = [] self.overflow_counter = torch.tensor([0], dtype=torch.int, device=get_current_device()) self.grad_offload_stream = torch.cuda.current_stream() if is_sync else GlobalRuntimeInfo.d2h_stream self._cast_buffers() def register_grad_hook(self): for p in self.model.parameters(): if p.requires_grad: self.grad_hook_list.append(p.register_hook(partial(self.grad_handle, p))) def remove_grad_hook(self): for hook in self.grad_hook_list: hook.remove() def __call__(self, *args, **kwargs): return self.forward(*args, **kwargs) def _pre_forward(self): self.register_grad_hook() for region in self.region_manager.region_list: region.cpu_grad = None def forward(self, *args, **kwargs): args, kwargs = _cast_float(args, torch.half), _cast_float(kwargs, torch.half) self.model.zero_grad(set_to_none=True) self._pre_forward() outputs = self.model(*args, **kwargs) return outputs def backward(self, loss): loss.backward() self._post_backward() def _post_backward(self): torch.cuda.synchronize() self.remove_grad_hook() for p in self.model.parameters(): p.grad = None GlobalRuntimeInfo().fwd_prefetch_event_map.clear() GlobalRuntimeInfo().bwd_prefetch_event_map.clear() def grad_handle(self, p, grad): empty_grad = torch.empty_like(grad) free_storage(empty_grad) with torch._C.DisableTorchFunction(): region = self.region_manager.get_region(p) region.copy_grad_to_region_slice(p, grad) if region.can_release: self.overflow_counter += region.has_inf_or_nan master_stream = torch.cuda.current_stream() with torch.cuda.stream(self.grad_offload_stream): GlobalRuntimeInfo().d2h_stream.wait_stream(master_stream) region.move_grad_to_cpu() return empty_grad def _cast_buffers(self): for buffer in self.model.buffers(): buffer.data = buffer.cuda() def parameters(self, recurse: bool = True): return self.model.parameters(recurse) def named_parameters(self, prefix: str = "", recurse: bool = True): return self.model.named_parameters(prefix, recurse) def named_buffers(self, prefix: str = "", recurse: bool = True): return self.model.named_buffers(prefix, recurse) def named_children(self): return self.model.named_children() def named_modules( self, memo: Optional[Set[torch.nn.Module]] = None, prefix: str = "", remove_duplicate: bool = True ): return self.model.named_modules(memo, prefix, remove_duplicate)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/offload/util.py
colossalai/auto_parallel/offload/util.py
from dataclasses import dataclass from typing import List import torch from colossalai.context.singleton_meta import SingletonMeta from colossalai.fx.profiler import calculate_fwd_out, calculate_fwd_tmp from .region import Region @dataclass class NodeInfo: node_id: int = 0 runtime_fwd_mem: float = 0 runtime_bwd_mem: float = 0 class NvDevicePower: """ NVIDIA GPU computing performance (TFLOPs). """ RTX3080_FP16 = 70 RTX3080_FP32 = 34.1 RTX3090_FP16 = 71 RTX3090_FP32 = 35.7 V100_FP16 = 31.4 V100_FP32 = 15.7 A100_FP16 = 78 A100_FP32 = 19.5 class GlobalRuntimeInfo(metaclass=SingletonMeta): def __init__(self): self.h2d_stream = torch.cuda.Stream() self.d2h_stream = torch.cuda.Stream() self.fwd_prefetch_event_map = {} self.bwd_prefetch_event_map = {} self.region_list = [] def compute_act_peak_mem(region_list: List[Region]) -> float: act_peak_mem = 0 runtime_mem = 0 # forward for region in region_list: for node in region.nodes: runtime_mem = runtime_mem + calculate_fwd_tmp(node) + calculate_fwd_out(node) act_peak_mem = max(runtime_mem, act_peak_mem) # backward bwd_deps = {} for region in region_list.__reversed__(): for node in region.nodes.__reversed__(): runtime_mem -= calculate_fwd_out(node) runtime_mem = runtime_mem + node.meta["bwd_mem_tmp"] + node.meta["bwd_mem_out"] act_peak_mem = max(runtime_mem, act_peak_mem) runtime_mem = runtime_mem - node.meta["bwd_mem_tmp"] - calculate_fwd_tmp(node) # free bwd_mem_out bwd_deps[node] = len(node.all_input_nodes) for user_node in node.users: if user_node in bwd_deps: bwd_deps[user_node] -= 1 if bwd_deps[user_node] <= 0: runtime_mem -= user_node.meta["bwd_mem_out"] return act_peak_mem def compute_max_param_mem(region_list: List[Region]) -> float: return max(region.param_size for region in region_list) def compute_total_param_mem(region_list: List[Region]) -> float: return sum(region.param_size for region in region_list if region.r_id <= region.shared_rid) def requires_upload_p_in_fwd(shared_reg: Region): return (shared_reg.r_id >= shared_reg.shared_rid) or ( shared_reg.r_id < shared_reg.shared_rid and shared_reg.need_offload ) def requires_release_p_in_bwd(shared_reg: Region): return (shared_reg.r_id >= shared_reg.shared_rid) or ( shared_reg.r_id < shared_reg.shared_rid and shared_reg.need_offload ) def requires_offload_g_in_bwd(region: Region): return region.param_size and (region.r_id <= region.shared_rid)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/offload/region_manager.py
colossalai/auto_parallel/offload/region_manager.py
from typing import Any, Dict, List, Tuple import torch from torch.fx import Graph, Node from .region import Region from .solver import SolverFactory from .training_simulator import TrainingSimulator from .util import NodeInfo class RegionManager: """ RegionManager is used to construct and manage the offload plan for the model execution. Args: graph (Graph): a Graph object used for analysis and strategy generation. solver_name (str): a solver name which specifies the preferences for plan searching. memory_budget (float): the given memory budget. cnode (List[str], optional): Common node List, should be the subset of input. """ def __init__(self, graph: Graph, solver_name: str = "asyn", memory_budget: float = -1.0, cnode: List[str] = None): self.graph = graph assert graph.owning_module is not None, "The given graph is not associated with a owning_module" self.root_module = self.graph.owning_module self.nodes = list(graph.nodes) self.cnode = cnode self.only_param_ops = [] self.param_region_map: Dict[torch.nn.Parameter, Region] = dict() self.shared_region_pairs: List[Tuple[Region, Region]] = list() self.region_list: List[Region] = list() self.rid_in_pool: List[int] = list() self.mem_block_size: int = 0 self.memory_budget = memory_budget self.solver_name = solver_name self.require_pool: bool = solver_name == "asyn" self.reg_to_block: Dict[int, int] = dict() def _build_regions(self): """ 1. Pre-processing, mainly contains linearized computing graph and merge smaller regions into larger ones. 2. Construct a solver to search for an efficient offload strategy. 3. Post-processing, mainly contains early region placement if using asynchronous mode, and initialize region data. """ self._pre_process() solver_cls = SolverFactory.create(self.solver_name) solver = solver_cls(self.region_list, self.memory_budget) solver._call_solver() self._post_process(solver.best_ts) def _pre_process(self): init_region_list = self._linearize_graph() if len(self.shared_region_pairs) > 1: raise NotImplementedError("The current version only considers at most one pair of parameter sharing.") elif len(self.shared_region_pairs) == 1: shared_regs = self.shared_region_pairs[0] assert shared_regs[0].shared_rid == shared_regs[1].r_id and shared_regs[1].shared_rid == shared_regs[0].r_id fst_id = shared_regs[0].r_id lst_id = shared_regs[1].r_id regs_left_out = init_region_list[: fst_id + 1] regs_right_out = init_region_list[lst_id:] hold_regs = init_region_list[fst_id + 1 : lst_id] else: regs_left_out = [] regs_right_out = [] hold_regs = init_region_list self.mem_block_size = self._search_block_size(hold_regs) hold_regs = self._merge_small_regions(hold_regs) if self.require_pool: for reg in hold_regs: reg.in_mem_pool_flag = True self.rid_in_pool.append(reg.r_id) self.region_list.extend(regs_left_out) self.region_list.extend(hold_regs) for reg in regs_right_out: reg.r_id = self.region_list[-1].r_id + 1 self.region_list[reg.shared_rid].shared_rid = reg.r_id self.region_list.append(reg) self._process_shared_region() self.max_param_num = max([reg.param_num for reg in self.region_list]) self.memory_budget -= self.max_param_num * torch.tensor([], dtype=torch.float32).element_size() def _post_process(self, ts: TrainingSimulator = None): if self.require_pool: self._early_region_placement(ts) self._init_region_data() def _early_region_placement(self, ts: TrainingSimulator): """ Implemented the early region placement strategy to avoid GPU memory fragmentation. It maps all region data into a contiguous memory space and reuses the same memory space for regions that do not coexist. Args: ts (TrainingSimulator): the best training simulator, which records region execution flow. Raises: NotImplementedError: due to the naive implementation, it may not find a suitable region placement strategy for the given execution flow. """ reg_flow = torch.cat([ts.fwd_reg_flow, ts.bwd_reg_flow], dim=0) mem_block_num = torch.max(torch.sum(reg_flow[:, self.rid_in_pool], dim=1)) coexist_matrix = torch.logical_or(ts.fwd_reg_flow, ts.bwd_reg_flow) block_to_regs = {} for block_idx in range(mem_block_num): block_to_regs[block_idx] = [] for reg in self.region_list: if reg.r_id in self.rid_in_pool: cur_reg_appears = coexist_matrix[:, reg.r_id] cur_reg_coexists = torch.sum(coexist_matrix[cur_reg_appears], dim=0).bool() for block_idx in range(mem_block_num): if not any(cur_reg_coexists[block_to_regs[block_idx]]): block_to_regs[block_idx].append(reg.r_id) self.reg_to_block[reg.r_id] = block_idx break if reg.r_id not in self.reg_to_block: raise NotImplementedError( f"can not find a block from the memory pool to store parameters of the region" ) self.memory_pool = torch.chunk( torch.zeros(int(mem_block_num * self.mem_block_size / 2), dtype=torch.half, device="cuda"), chunks=int(mem_block_num), ) def _merge_small_regions(self, orig_reg_list: List[Region]) -> List[Region]: """ Merge smaller regions into larger ones for better bandwidth utilization and easier management. It is inspired by Gemini. Args: orig_reg_list (List[Region]): original region list. Returns: List[Region]: region list after merging. """ r_id = orig_reg_list[0].r_id region = Region(r_id=r_id) region_list = [region] for orig_reg in orig_reg_list: if region_list[-1].param_size + orig_reg.param_size > self.mem_block_size: r_id += 1 region = Region(r_id=r_id) region_list.append(region) region.param_size += orig_reg.param_size region.param_num += orig_reg.param_num region.nodes.extend(orig_reg.nodes) region.fp16_params.extend(orig_reg.fp16_params) self.__update_param_region_map(orig_reg.fp16_params, region) return region_list def _search_block_size( self, region_list: List[Region], search_interval_byte: int = 1024, search_range_byte: int = 128 * 1024**2 ) -> int: """ Search for a suitable memory block size. Args: region_list (List[Region]): region list. search_interval_byte (int): searching interval in byte. search_range_byte (int): searching range in byte. Returns: int: the best memory block size. """ def _get_wasted_mem(size_list: List[int], blk_size: int): """ Get wasted byte for a certain block size. """ acc_wasted = 0 left = 0 for s in size_list: if left + s > blk_size: acc_wasted += blk_size - left left = s left += s acc_wasted += blk_size - left return acc_wasted param_size_list = [region.param_size for region in region_list if region.r_id == region.shared_rid] start_size = max(param_size_list) min_mem_waste = float("+inf") best_block_size = start_size for block_size in range(start_size, start_size + search_range_byte + 1, search_interval_byte): temp_waste = 0 temp_waste += _get_wasted_mem(param_size_list, block_size) if temp_waste < min_mem_waste: min_mem_waste = temp_waste best_block_size = block_size return best_block_size def _init_region_data(self): """ Initialize region data, which maps the parameters in the region to a contiguous memory space. """ self.temp_fp32_data = torch.zeros(self.max_param_num, device="cuda", dtype=torch.float32) for region in self.region_list: pre_alloc_tensor = None if self.require_pool and region.r_id in self.rid_in_pool: block_idx = self.reg_to_block[region.r_id] pre_alloc_tensor = self.memory_pool[block_idx] if region.r_id <= region.shared_rid: region.init_param_data(pre_alloc_tensor) else: shared_region = self.region_list[region.shared_rid] region.fp16_data = shared_region.fp16_data region.fp32_data = shared_region.fp32_data region.param_to_range = shared_region.param_to_range region.temp_fp32_data = self.temp_fp32_data[: region.param_num].detach() torch.cuda.empty_cache() def _process_shared_region(self): """ Special processing for the shared region, which uses GPT2 and Bert case as a priori knowledge. """ if len(self.shared_region_pairs): assert len(self.shared_region_pairs) <= 1 former_reg, latter_reg = self.shared_region_pairs[0] assert latter_reg.param_num >= former_reg.param_num embedding_node = former_reg.nodes[-1] assert embedding_node.op == "call_module" and isinstance( self.root_module.get_submodule(embedding_node.target), torch.nn.Embedding ) if latter_reg.param_num > former_reg.param_num: for idx, n in enumerate(latter_reg.nodes): if ( n.op == "call_module" and isinstance(self.root_module.get_submodule(n.target), torch.nn.Linear) ) or (n.op == "call_function" and n.target is torch.nn.functional.linear): cut_node_idx = idx + 1 break assert len(latter_reg.fp16_params) == 2 new_reg = latter_reg.split(cut_node_idx, 1) for p in new_reg.fp16_params: self.param_region_map[p] = new_reg self.region_list.insert(new_reg.r_id, new_reg) for reg in self.region_list[new_reg.r_id + 1 :]: reg.r_id += 1 latter_reg.shared_rid = former_reg.r_id former_reg.shared_rid = latter_reg.r_id def _linearize_graph(self) -> List[Region]: """Linearizing the graph Args: graph (Graph): The computing graph to be optimized. Returns: List[Region]: each region contains the actual 'node' in linearized manner. Remarks: Do merge the inplace ops and shape-consistency ops into the previous node. """ # List of target name that could be seen as common node common_ops = ["getattr", "getitem", "size"] def _is_cop(target: Any) -> bool: """Check if an op could be seen as common node Args: target (Any): node target Returns: bool """ if isinstance(target, str): return target in common_ops else: return target.__name__ in common_ops def _is_act(data: Any) -> bool: """Check if an op could be seen as parameter computation start Args: data (Any): meta_data Returns: bool """ label = False if isinstance(data, torch.Tensor): return True elif isinstance(data, (tuple, list)): for d in data: label = label or _is_act(d) return label def _maybe_param_comp_start() -> bool: """Check if an op could be seen as parameter computation start Args: n (Node): node Returns: bool """ label = False if n.op == "get_attr": label = True elif n.op == "call_module": target = n.target submod = self.root_module.get_submodule(target) if ( len(list(submod.named_parameters(recurse=False))) != 0 or len(list(submod.named_buffers(recurse=False))) != 0 ): label = True return label and not sum([v for _, v in param_op_deps.items()]) def _is_param_comp_end() -> bool: """Check if an op could be seen as parameter computation end Args: n (Node): node Returns: bool """ def _is_inplace(n: Node): """Get the inplace argument from ``torch.fx.Node``""" inplace = False if n.op == "call_function": inplace = n.kwargs.get("inplace", False) elif n.op == "call_module": inplace = getattr(n.graph.owning_module.get_submodule(n.target), "inplace", False) return inplace label = False if n.op == "call_module": target = n.target submod = self.root_module.get_submodule(target) if ( len(list(submod.named_parameters(recurse=False))) != 0 or len(list(submod.named_buffers(recurse=False))) != 0 ): label = True elif n.op == "call_function": label = any(map(lambda x: x.name in self.only_param_ops, n.all_input_nodes)) and any( map(lambda x: x.name not in self.only_param_ops and not _is_cop(n.target), n.all_input_nodes) ) return label and not sum([v for _, v in param_op_deps.items()]) and not any(map(_is_inplace, n.users)) def _exception_node_handling(): # TODO meta info prop bug if n.name.__contains__("transpose") and n.meta["fwd_out"][0].dim() <= 2: n.meta["fwd_out"] = [] # make sure that item in cnode is valid if self.cnode: for name in self.cnode: try: assert ( next(node for node in self.graph.nodes if node.name == name).op == "placeholder" ), f"Common node {name} is not an input of the model." except StopIteration: raise ValueError(f"Common node name {name} not in graph.") else: self.cnode = [] node_id = 0 region_id = 0 param_op_deps = {} deps = {} region_list = [] region = Region(r_id=region_id) act_n = None for n in self.graph.nodes: if n.op != "placeholder" and n.op != "output": for n_par in n.all_input_nodes: if n_par.op != "placeholder" and n_par.name not in self.cnode: deps[n_par] -= 1 if n_par.op != "placeholder" and n_par.name in self.only_param_ops: param_op_deps[n_par] -= 1 if act_n in region.nodes and _maybe_param_comp_start(): ns = [] border_n_idx = region.nodes.index(act_n) if border_n_idx < len(region.nodes): ns = region.nodes[border_n_idx + 1 :] region.nodes = region.nodes[: border_n_idx + 1] region_list.append(region) region_id += 1 region = Region(r_id=region_id) region.nodes = ns _exception_node_handling() region.nodes.append(n) self._set_node_and_region_info(node_id, n, region) node_id += 1 # if the node could free all dependencies in graph # we could begin a new region if _is_param_comp_end(): region_list.append(region) region_id += 1 region = Region(r_id=region_id) # propagate common node attr if possible if len(n.all_input_nodes) == len( [node for node in n.all_input_nodes if node.name in self.cnode] ) or _is_cop(n.target): self.cnode.append(n.name) else: deps[n] = len([user for user in n.users if user.op != "output"]) # propagate param node attr if possible if ( len(n.all_input_nodes) == len([node for node in n.all_input_nodes if node.name in self.only_param_ops]) or n.op == "get_attr" ): self.only_param_ops.append(n.name) param_op_deps[n] = len([user for user in n.users if user.op != "output"]) # record last activation node if _is_act(n._meta_data): act_n = n if len(region.nodes): region_list.append(region) return region_list def _set_node_and_region_info(self, node_id: int, cur_n: Node, cur_reg: Region): cur_n.node_info = NodeInfo(node_id) if cur_n.op == "call_module": target = cur_n.target submod = self.root_module.get_submodule(target) for p in list(submod.parameters(recurse=False)): if p in self.param_region_map: cur_reg.shared_rid = self.param_region_map[p].r_id self.param_region_map[p].shared_rid = cur_reg.r_id self.shared_region_pairs.append((self.param_region_map[p], cur_reg)) else: self.param_region_map[p] = cur_reg cur_reg.fp16_params.append(p) cur_reg.param_num += p.data.numel() cur_reg.param_size += p.data.numel() * p.data.element_size() elif cur_n.op == "get_attr": attr_itr = self.root_module atoms = cur_n.target.split(".") for atom in atoms: attr_itr = getattr(attr_itr, atom) if isinstance(attr_itr, torch.nn.Parameter): if attr_itr in self.param_region_map: cur_reg.shared_rid = self.param_region_map[attr_itr].r_id self.param_region_map[attr_itr].shared_rid = cur_reg.r_id self.shared_region_pairs.append((self.param_region_map[attr_itr], cur_reg)) else: self.param_region_map[attr_itr] = cur_reg cur_reg.fp16_params.append(attr_itr) cur_reg.param_num += attr_itr.data.numel() cur_reg.param_size += attr_itr.data.numel() * attr_itr.data.element_size() def get_region(self, param: torch.nn.Parameter) -> Region: """ Return the region owning the parameter. Args: param (torch.nn.Parameter): a torch parameter object """ return self.param_region_map[param] def __update_param_region_map(self, params: List[torch.nn.Parameter], region: Region): for p in params: self.param_region_map[p] = region
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/offload/mem_optimize.py
colossalai/auto_parallel/offload/mem_optimize.py
from typing import Dict import torch import torch.fx from torch.fx import GraphModule from torch.utils._pytree import tree_map from colossalai.fx import ColoTracer, is_compatible_with_meta from colossalai.fx.passes.meta_info_prop import MetaInfoProp from .base_offload_module import BaseOffloadModule from .region_manager import RegionManager from .runtime import runtime_asyn_offload_apply_pass, runtime_syn_offload_apply_pass from .util import GlobalRuntimeInfo, compute_act_peak_mem, compute_max_param_mem, compute_total_param_mem def memory_optimize( model: torch.nn.Module, inps: Dict[str, torch.Tensor], memory_budget: float = -1.0, solver_name: str = "asyn" ): model = model.cpu().half() tracer = ColoTracer() assert is_compatible_with_meta() wrap_fn = lambda x: x.to("meta") if isinstance(x, torch.Tensor) else x meta_args = tree_map(wrap_fn, inps) graph = tracer.trace(model, meta_args=meta_args) gm = GraphModule(model, graph, model.__class__.__name__) interp = MetaInfoProp(gm) interp.propagate(*meta_args.values()) region_manager = RegionManager(graph, solver_name=solver_name, memory_budget=memory_budget) region_manager._build_regions() GlobalRuntimeInfo().region_list = region_manager.region_list act_peak_mem = compute_act_peak_mem(region_manager.region_list) / 1024**2 max_param_mem = compute_max_param_mem(region_manager.region_list) / 1024**2 total_param_mem = compute_total_param_mem(region_manager.region_list) / 1024**2 print( f"act_peak_mem={act_peak_mem:.3f} MB | max_param_mem={max_param_mem:.3f} MB | total_param_mem={total_param_mem:.3f}" ) if solver_name == "syn": gm = runtime_syn_offload_apply_pass(gm, region_manager.region_list) elif solver_name == "asyn": gm = runtime_asyn_offload_apply_pass(gm, region_manager.region_list) else: raise TypeError(f"Unknown solver name {solver_name}!") gm.recompile() optimized_model = BaseOffloadModule(gm, region_manager, solver_name == "syn") return optimized_model
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/offload/region.py
colossalai/auto_parallel/offload/region.py
from typing import Dict, List, Tuple import torch from torch.fx import Node from colossalai.utils.common import free_storage from colossalai.zero.gemini.chunk.chunk import alloc_storage class Region: """ Region: A container owning a piece of contiguous nodes in the DNN computing graph. Args: r_id (int): the index of the region in the computing graph. """ def __init__(self, r_id: int = 0) -> None: self.r_id: int = r_id self.fp16_params: List[torch.nn.Parameter] = [] self.param_size: int = 0 self.shared_rid: int = self.r_id self.param_num: int = 0 self.grad_num: int = 0 self.fp16_data = None self.fp32_data = None self.cpu_grad = None self.temp_fp32_data = None self.param_to_range: Dict[torch.nn.Parameter, Tuple[int, int]] = dict() self.need_offload: bool = False self.is_syn: bool = False self.nodes: List[Node] = [] self.fwd_prefetch_region = None self.bwd_prefetch_region = None self.in_mem_pool_flag: bool = False @property def can_release(self) -> bool: """ Check if the region can be released. """ return self.grad_num == self.param_num @property def has_inf_or_nan(self) -> bool: """ Check if the grad of the region has inf or nan values on CUDA. """ return torch.isinf(self.fp16_data).any() | torch.isnan(self.fp16_data).any() def init_param_data(self, pre_alloc_tensor: torch.Tensor = None): """ Map the parameters in the region to a contiguous memory space. """ self.fp16_data = torch.zeros(self.param_num, dtype=torch.half, device="cuda") offset = 0 for param in self.fp16_params: param.data = param.data.cuda() p_num = param.data.numel() self.fp16_data[offset : offset + p_num].copy_(param.data.flatten()) param.data = self.fp16_data[offset : offset + p_num].view(param.data.shape) self.param_to_range[param] = (offset, offset + p_num) offset += p_num self.fp32_data = self.fp16_data.float().cpu().pin_memory() free_storage(self.fp16_data) if self.in_mem_pool_flag and pre_alloc_tensor is not None: self.fp16_data = pre_alloc_tensor def move_param_to_cuda(self): """ Move parameters from CPU to GPU. It first moves float32 parameters to GPU and then transforms float32 parameters to half-precision on the GPU. The reason is that the performance of precision conversion on the CPU is much slower than the data transfer overhead. """ self.temp_fp32_data.copy_(self.fp32_data, non_blocking=True) self.temp_fp32_data.record_stream(torch.cuda.current_stream()) if not self.in_mem_pool_flag: alloc_storage(self.fp16_data) self.fp16_data[: self.param_num].copy_(self.temp_fp32_data) self.fp16_data.record_stream(torch.cuda.current_stream()) self.__update_params_ptr() def move_grad_to_cpu(self): """ Move gradients from GPU to CPU. """ self.cpu_grad = torch.empty(self.param_num, dtype=torch.half, pin_memory=True) self.cpu_grad.copy_(self.fp16_data[: self.param_num], non_blocking=True) self.fp16_data.record_stream(torch.cuda.current_stream()) if not self.in_mem_pool_flag: self.free_cuda_data() self.grad_num = 0 def free_cuda_data(self): free_storage(self.fp16_data) # torch.cuda.empty_cache() def copy_grad_to_region_slice(self, param: torch.nn.Parameter, data_slice: torch.Tensor) -> None: """ Copy data slice to the memory space indexed by the input tensor in the region. Args: param (torch.nn.Parameter): the param used to retrieve meta information data_slice (torch.Tensor): the tensor to be copied to the region """ begin, end = self.param_to_range[param] self.fp16_data[begin:end].copy_(data_slice.data.flatten()) param.data = self.fp16_data[begin:end].view(param.data.shape) self.grad_num += data_slice.numel() def split(self, cut_node_idx: int, cut_param_idx: int): """ Split the region into two and return the latter. """ new_reg = Region(r_id=self.r_id + 1) new_reg.nodes = self.nodes[cut_node_idx:] new_reg.fp16_params = self.fp16_params[cut_param_idx:] for p in new_reg.fp16_params: new_reg.param_size += p.data.numel() * p.data.element_size() new_reg.param_num += p.data.numel() self.nodes = self.nodes[:cut_node_idx] self.fp16_params = self.fp16_params[:cut_param_idx] self.param_size -= new_reg.param_size self.param_num -= new_reg.param_num return new_reg def __update_params_ptr(self) -> None: for param in self.fp16_params: begin, end = self.param_to_range[param] param.data = self.fp16_data[begin:end].view(param.data.shape)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/offload/solver.py
colossalai/auto_parallel/offload/solver.py
import time from abc import ABC, abstractmethod from typing import Dict, List, Type NOT_NVML = False try: from pynvml import * except: NOT_NVML = True import torch from torch.fx.node import Node from colossalai.accelerator import get_accelerator from .region import Region from .training_simulator import AsynTrainingSimulator, SynTrainingSimulator, TrainingSimulator from .util import NodeInfo, NvDevicePower def benchmark_func(func, number=1, repeat=1, warmup=3): """ benchmark data transfer cost. """ for i in range(warmup): func() costs = [] for i in range(repeat): torch.cuda.synchronize() begin = time.time() for i in range(number): func() torch.cuda.synchronize() costs.append((time.time() - begin) / number) return sum(costs) / len(costs) class Solver(ABC): """ The parameter offload solver. Args: region_list (List[Region]): represents the linearized DNN computing graph. memory_budget (float): the given memory budget. error_factor (float): the error factor. It is used to reduce the memory budget. Due to some errors in the estimation of peak memory and execution time. """ def __init__(self, region_list: List[Region], memory_budget: float = -1.0, error_factor: float = 0.95) -> None: self.region_list = region_list self.error_factor: float = error_factor if memory_budget > 0: self.memory_budget = memory_budget * self.error_factor else: self.memory_budget = ( torch.cuda.get_device_properties(get_accelerator().get_current_device()).total_memory * self.error_factor ) self.link_to_bandwidth: Dict[str, Dict[float, float]] = self._profile_bandwidth() self.comp_power: float = self._extract_computing_power() @abstractmethod def _call_solver(self): raise NotImplementedError @abstractmethod def _try_to_offload(self, *args): raise NotImplementedError @abstractmethod def _eval_one_choice(self, *args): raise NotImplementedError def _compute_offload_profit(self, total_mem_saving: float, peak_mem_saving: float, extra_cost: float): """ Compute the profits of the offload strategies, which packages the memory savings information for subsequent comparisons. Args: total_mem_saving (float): the total memory saving of the offload strategy. peak_mem_saving (float): the peak memory saving of the offload strategy. extra_cost (float): extra data transfer cost. Returns: tuple: profit information, the first term represents memory savings per unit of time. """ if extra_cost == 0: # means data transfer overhead can be completely overlapped return (float("inf"), total_mem_saving, peak_mem_saving) return (total_mem_saving / extra_cost, total_mem_saving, peak_mem_saving) def _compare_profit(self, profit_a: tuple, profit_b: tuple) -> bool: """ Compare the profits of the two offload strategies using the dictionary order algorithm. Args: profit_a (tuple): the profit of a offload strategy. profit_b (tuple): the profit of another offload strategy. Returns: bool: whether profit_a is greater than profit_b. """ for val1, val2 in zip(profit_a, profit_b): if val1 != val2: return val1 > val2 return False def _update_state(self, best_ts: TrainingSimulator): """ Update the solver state. """ self.best_ts = best_ts self._update_node_mem_info(best_ts.fwd_node_mem, best_ts.bwd_node_mem) def _update_node_mem_info(self, fwd_mem_info: Dict[Node, float], bwd_mem_info: Dict[Node, float]): """ Update the runtime memory information of the node. Args: fwd_mem_info (Dict[Node, float]): the runtime memory of each node in forward pass. bwd_mem_info (Dict[Node, float]): the runtime memory of each node in backward pass. """ for node, mem in fwd_mem_info.items(): assert hasattr(node, "node_info") and isinstance(node.node_info, NodeInfo) node.node_info.runtime_fwd_mem = mem for node, mem in bwd_mem_info.items(): assert hasattr(node, "node_info") and isinstance(node.node_info, NodeInfo) node.node_info.runtime_bwd_mem = mem def _extract_computing_power(self): """ return the FP16 computing performance of the current NVIDIA GPU. Raises: TypeError: Unknown NVIDIA GPU device. """ nvmlInit() handle = nvmlDeviceGetHandleByIndex(0) device_name = nvmlDeviceGetName(handle) units = 1e12 if device_name.__contains__("RTX 3080"): return NvDevicePower.RTX3080_FP16 * units elif device_name.__contains__("RTX 3090"): return NvDevicePower.RTX3090_FP16 * units elif device_name.__contains__("V100"): return NvDevicePower.V100_FP16 * units elif device_name.__contains__("A100"): return NvDevicePower.A100_FP16 * units else: raise TypeError(f"Unknown NVIDIA GPU device name {device_name}") def _profile_bandwidth(self): """ Profile the bidirectional communication bandwidth between CPU and GPU using data volumes ranging from 1KB to 1GB. """ print("profiling bandwidth ......") link_to_bandwidth = {} links = ["h2d", "d2h"] for link in links: t_size = 1024 size_to_bandwidth = {} # from 1KB to 1GB for i in range(21): if link == "h2d": src_tensor = torch.ones(int(t_size), dtype=torch.int8, pin_memory=True) dst_tensor = torch.ones((int(t_size)), dtype=torch.int8, device="cuda") elif link == "d2h": src_tensor = torch.ones(int(t_size), dtype=torch.int8, device="cuda") dst_tensor = torch.ones((int(t_size)), dtype=torch.int8, pin_memory=True) def func(): dst_tensor.copy_(src_tensor) size_to_bandwidth[t_size] = t_size / benchmark_func(func, number=5, repeat=3) print( f"size: {t_size / 1024 ** 2:.3f} MB, " f"{src_tensor.device.type}-to-{dst_tensor.device.type} " f"bandwidth: {size_to_bandwidth[t_size] / 1024 ** 3:.3f} GB/s" ) t_size *= 2 link_to_bandwidth[link] = size_to_bandwidth return link_to_bandwidth class SynGreedySolver(Solver): def __init__(self, region_list: List[Region], memory_budget: float = -1.0) -> None: super().__init__(region_list, memory_budget) self.best_ts: SynTrainingSimulator = None self._init_state() def _init_state(self): """ Initialize the solver state when without offloading. """ ts = SynTrainingSimulator(self.region_list, self.comp_power, self.link_to_bandwidth) ts.execute() self._update_state(ts) def _call_solver(self): """ Call the solver to search an efficient parameter offloading strategy for the linearized graph. The solver adopts greedy algorithm. Raises: NotImplementedError: Unable to find a solution for the given memory budget. """ print("search offloading strategy ......") while self.best_ts.peak_mem > self.memory_budget: offload_region = None best_ts = None max_profit = (0,) # search which region should be offloaded, # the last region does not need to be offloaded. for region in self.region_list[:-1]: if region.param_size and not region.need_offload: temp_ts, profit = self._try_to_offload(region) if self._compare_profit(profit, max_profit): offload_region = region max_profit = profit best_ts = temp_ts if offload_region is not None and best_ts is not None: offload_region.need_offload = True offload_region.is_syn = True self._update_state(best_ts) else: raise NotImplementedError( f"can't find the offload strategy met the memory budget {self.memory_budget / 1024 ** 2} MB, " f"it needs {self.best_ts.peak_mem / 1024 ** 2:.3f} MB at least!" ) def _call_solver_l2l(self): """ The layer-wise offload strategy. """ for region in self.region_list[:-1]: region.need_offload = True region.is_syn = True def _try_to_offload(self, offload_region: Region): # record previous information orig_need_offload = offload_region.need_offload assert not orig_need_offload offload_region.need_offload = True ts, profit = self._eval_one_choice(offload_region) # restore previous information offload_region.need_offload = orig_need_offload return ts, profit def _eval_one_choice(self, offload_region: Region): """ Evaluate the profit of a strategy choice. Args: offload_region (Region): the offload region of current choice. Returns: SynTrainingSimulator: the training simulator corresponding to the current strategy. tuple: contains memory saving and cost information of the current strategy. """ ts = SynTrainingSimulator(self.region_list, self.comp_power, self.link_to_bandwidth) ts.execute() extra_comm_cost = 2.0 * ts._get_communication_overhead("h2d", offload_region.param_size) # the shared region needs to be moved twice if offload_region.r_id < offload_region.shared_rid: extra_comm_cost *= 2.0 profit = self._compute_offload_profit(ts.total_mem_saving, self.best_ts.peak_mem - ts.peak_mem, extra_comm_cost) return ts, profit class AsynGreedySolver(Solver): def __init__(self, region_list: List[Region], memory_budget: float = -1.0, search_window_size: int = 3): super().__init__(region_list, memory_budget) self.search_window_size = search_window_size # Records the prefetch execution location of the offloaded region self.region_to_region_map = {} self.best_ts: AsynTrainingSimulator = None self._init_state() def _init_state(self): """ Initialize the solver state when without offloading. """ ts = AsynTrainingSimulator(self.region_list, self.comp_power, self.link_to_bandwidth) ts.execute() self._update_state(ts) print("init peak memory", self.best_ts.peak_mem / 1024**2, "MB") def _call_solver(self): """ Call the solver to search an efficient parameter offloading strategy for the linearized graph. The solver adopts greedy algorithm. Raises: NotImplementedError: Unable to find a solution for the given memory budget. """ print("search for offloading strategy ......") # Records the prefetch execution location of the offloaded region region_to_region_map = {} while self.best_ts.peak_mem > self.memory_budget: region_to_offload = None max_offload_profit = (0,) best_offl_ts = None # search which region should be offloaded, # the last region does not need to be offloaded for region in self.region_list[:-1]: if region.param_size and not region.need_offload: max_prefetch_profit = (0,) best_pref_ts = None # search when to prefetch the region offloaded for host_region in self.region_list[region.r_id + 1 : region.r_id + 1 + self.search_window_size]: if host_region.bwd_prefetch_region is not None: continue temp_ts, profit = self._try_to_offload(host_region, region) if self._compare_profit(profit, max_prefetch_profit): region_to_region_map[region.r_id] = host_region max_prefetch_profit = profit best_pref_ts = temp_ts if profit[0] == float("inf"): break if self._compare_profit(max_prefetch_profit, max_offload_profit): region_to_offload = region max_offload_profit = max_prefetch_profit best_offl_ts = best_pref_ts if (region_to_offload is not None) and (best_offl_ts is not None): region_to_offload.need_offload = True if region_to_region_map[region_to_offload.r_id] == region_to_offload: region_to_offload.is_syn = True else: region_to_region_map[region_to_offload.r_id].bwd_prefetch_region = region_to_offload self.region_to_region_map[region_to_offload.r_id] = region_to_region_map[region_to_offload.r_id] self._update_state(best_offl_ts) elif self.region_to_region_map.__len__() > 0: self._repair_strategy() else: raise NotImplementedError( f"can't find the offload strategy met the memory budget {self.memory_budget / 1024 ** 2} MB, " f"it needs {self.best_ts.peak_mem / 1024 ** 2:.3f} MB at least!" ) region_to_region_map.clear() def _try_to_offload(self, host_region: Region, offload_region: Region): """ Attempts to offload the region and prefetch it in backward pass. """ # record previous information orig_prefetch = host_region.bwd_prefetch_region orig_is_syn = offload_region.is_syn orig_need_offload = offload_region.need_offload if host_region == offload_region: offload_region.is_syn = True else: host_region.bwd_prefetch_region = offload_region offload_region.need_offload = True ts, profit = self._eval_one_choice() # restore previous information host_region.bwd_prefetch_region = orig_prefetch offload_region.is_syn = orig_is_syn offload_region.need_offload = orig_need_offload return ts, profit def _try_convert_to_syn_upload(self, host_region: Region, offload_region: Region): """ Attempts to convert asynchronous prefetch into synchronous upload operations. """ # record previous information orig_prefetch = host_region.bwd_prefetch_region orig_is_syn = offload_region.is_syn assert orig_prefetch is not None and not orig_is_syn host_region.bwd_prefetch_region = None offload_region.is_syn = True ts, profit = self._eval_one_choice() # restore previous information host_region.bwd_prefetch_region = orig_prefetch offload_region.is_syn = orig_is_syn return ts, profit def _repair_strategy(self): """ Repair offload strategy. It attempts to convert asynchronous prefetch into synchronous upload operations and selects the best one. The repair process does not end until peak memory is reduced or there is no asynchronous prefetch operation. """ print("repair strategy ......") peak_mem_saving = 0 while len(self.region_to_region_map) and peak_mem_saving <= 0: max_profit = (0,) best_ts = None undo_host_region = None undo_offload_region = None for offload_region_id, host_region in self.region_to_region_map.items(): offload_region = self.region_list[offload_region_id] assert host_region.bwd_prefetch_region == offload_region assert offload_region.need_offload assert not offload_region.is_syn ts, profit = self._try_convert_to_syn_upload(host_region, offload_region) if self._compare_profit(profit, max_profit): undo_host_region = host_region undo_offload_region = offload_region max_profit = profit best_ts = ts if best_ts is None: raise NotImplementedError("repair error!") assert not undo_offload_region.is_syn undo_offload_region.is_syn = True undo_host_region.bwd_prefetch_region = None peak_mem_saving = self.best_ts.peak_mem - best_ts.peak_mem self._update_state(best_ts) self.region_to_region_map.pop(undo_offload_region.r_id) return best_ts def _eval_one_choice(self): """ Evaluate the profit of a strategy choice. Returns: AsynTrainingSimulator: the training simulator corresponding to the current strategy. tuple: contains memory saving and cost information of the current strategy. """ ts = AsynTrainingSimulator(self.region_list, self.comp_power, self.link_to_bandwidth) ts.execute() extra_comm_cost = max(ts.iter_end_time - self.best_ts.iter_end_time, 0) profit = self._compute_offload_profit(ts.total_mem_saving, self.best_ts.peak_mem - ts.peak_mem, extra_comm_cost) return ts, profit class SolverFactory: solvers: Dict[str, Type[Solver]] = {"syn": SynGreedySolver, "asyn": AsynGreedySolver} @staticmethod def create(solver_name: str) -> Type[Solver]: if solver_name not in SolverFactory.solvers: raise TypeError(f"Unknown parameter offload policy {solver_name}") return SolverFactory.solvers[solver_name] @staticmethod def get_solver_names(): return tuple(SolverFactory.solvers.keys())
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/offload/__init__.py
colossalai/auto_parallel/offload/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/offload/training_simulator.py
colossalai/auto_parallel/offload/training_simulator.py
import bisect from abc import ABC, abstractmethod from collections import OrderedDict from typing import Dict, List from torch.fx.node import Node from .region import Region from .util import * @dataclass class ExecutionPeriod: start_time: float = 0 end_time: float = 0 class TrainingSimulator(ABC): """ The Training Simulator is used to simulate the training process. It records computation, communication, and runtime memory during forward and backward passes. Args: region_list (List[Region]): represents the linearized DNN computing graph. comp_power (float): the NVIDIA GPU FP16 computing power. link_to_bw (Dict[str, Dict[float, float]]): communication links and the corresponding bandwidth. """ def __init__(self, region_list: List[Region], comp_power: float, link_to_bw: Dict[str, Dict[float, float]]) -> None: self.region_list = region_list self.region_num = len(region_list) self.runtime_mem: int = 0 self.peak_mem: int = 0 self.total_mem_saving: int = 0 self.fwd_node_mem: Dict[Node, float] = {} self.bwd_node_mem: Dict[Node, float] = {} # Node dependencies in backward pass self.bwd_node_deps: Dict[Node, int] = {} self.comp_power: float = comp_power self.link_to_bandwidth: Dict[str, Dict[float, float]] = link_to_bw @abstractmethod def execute(self): raise NotImplementedError @abstractmethod def _eval_fwd_mem_per_region(self, region: Region): raise NotImplementedError @abstractmethod def _eval_bwd_mem_per_region(self, region: Region): raise NotImplementedError def _get_bandwidth(self, link: str, comm_volumn: float) -> float: """ Get the data transfer bandwidth. Args: link (str): the data transfer link. comm_volumn (float): the amount of data transferred. Returns: float: the data transfer bandwidth. """ assert len(self.link_to_bandwidth) if link not in self.link_to_bandwidth: raise TypeError(f"Unknown data transfer link {link}") # size_list = sorted(list(map(float, self.link_to_bandwidth[link].keys()))) size_list = sorted(self.link_to_bandwidth[link].keys()) d_idx = bisect.bisect_left(size_list, comm_volumn) return self.link_to_bandwidth[link][size_list[d_idx]] def _get_communication_overhead(self, link: str, comm_volumn: float) -> float: return comm_volumn / self._get_bandwidth(link, comm_volumn) def _get_computing_overhead(self, flop: float) -> float: return flop / self.comp_power class SynTrainingSimulator(TrainingSimulator): def __init__(self, region_list: List[Region], comp_power: float, link_to_bw: Dict[str, Dict[float, float]]) -> None: super().__init__(region_list, comp_power, link_to_bw) def execute(self): """ Simulate synchronous training process. """ for reg in self.region_list: self._eval_fwd_mem_per_region(reg) for reg in self.region_list.__reversed__(): self._eval_bwd_mem_per_region(reg) def _eval_fwd_mem_per_region(self, region: Region): """ Evaluate the runtime and peak memory when the forward execution reaches the current region. """ # upload parameters of the current region if requires_upload_p_in_fwd(self.region_list[region.shared_rid]): self.runtime_mem += region.param_size for node in region.nodes: self.runtime_mem += calculate_fwd_tmp(node) + calculate_fwd_out(node) self.fwd_node_mem[node] = self.runtime_mem self.peak_mem = max(self.runtime_mem, self.peak_mem) self.total_mem_saving += node.node_info.runtime_fwd_mem - self.runtime_mem if region.need_offload: self.runtime_mem -= region.param_size def _eval_bwd_mem_per_region(self, region: Region): """ Evaluate the runtime and peak memory when the backward execution reaches the current region. """ # upload parameters of the current region if region.need_offload: self.runtime_mem += region.param_size # add the gradient of the parameter if region.r_id < region.shared_rid: # gradient accumulation is required for shared parameters self.runtime_mem += 2.0 * region.param_size else: self.runtime_mem += region.param_size for node in region.nodes.__reversed__(): self.runtime_mem -= calculate_fwd_out(node) self.runtime_mem += node.meta["bwd_mem_tmp"] + node.meta["bwd_mem_out"] self.peak_mem = max(self.runtime_mem, self.peak_mem) # The memory savings of a node may be negative due to parameter prefetch. self.total_mem_saving += node.node_info.runtime_bwd_mem - self.runtime_mem self.bwd_node_mem[node] = self.runtime_mem self.runtime_mem -= node.meta["bwd_mem_tmp"] + calculate_fwd_tmp(node) # free bwd_mem_out self.bwd_node_deps[node] = len(node.all_input_nodes) for user_node in node.users: if user_node in self.bwd_node_deps: self.bwd_node_deps[user_node] -= 1 if self.bwd_node_deps[user_node] <= 0: self.runtime_mem -= user_node.meta["bwd_mem_out"] if self.runtime_mem < 0: raise ValueError( f"region id: {region.r_id}, node name: {node.name}, " f"runtime_mem: {self.runtime_mem / 1024 ** 2:.3f}MB ---" f"runtime memory computed less than 0, which is miscalculated!" ) # release parameter and offload gradient in region if region.r_id == region.shared_rid: self.runtime_mem -= 2.0 * region.param_size elif region.r_id < region.shared_rid: self.runtime_mem -= 3.0 * region.param_size elif self.region_list[region.shared_rid].need_offload: self.runtime_mem -= region.param_size class AsynTrainingSimulator(TrainingSimulator): def __init__(self, region_list: List[Region], comp_power: float, link_to_bw: Dict[str, Dict[float, float]]) -> None: super().__init__(region_list, comp_power, link_to_bw) self.iter_end_time: int = 0 # the last computation execution period self.last_comp: ExecutionPeriod = ExecutionPeriod(start_time=0, end_time=0) # the last parameter prefetch execution period self.last_h2d: ExecutionPeriod = ExecutionPeriod(start_time=0, end_time=0) # the last gradient offload execution period self.last_d2h: ExecutionPeriod = ExecutionPeriod(start_time=0, end_time=0) # the forward computation execution period of the region self.fwd_reg_to_comp: OrderedDict[int, ExecutionPeriod] = OrderedDict() # the forward parameter prefetch execution period of the region self.fwd_reg_to_pref: OrderedDict[int, ExecutionPeriod] = OrderedDict() # the backward computation execution period of the region self.bwd_reg_to_comp: OrderedDict[int, ExecutionPeriod] = OrderedDict() # the backward parameter prefetch execution period of the region self.bwd_reg_to_pref: OrderedDict[int, ExecutionPeriod] = OrderedDict() # the gradient offload execution period of the region # which is divided into those that are waiting and those that have been released self.bwd_reg_to_offl_waiting: OrderedDict[int, ExecutionPeriod] = OrderedDict() self.bwd_reg_to_offl_freed: OrderedDict[int, ExecutionPeriod] = OrderedDict() # the region buffer, which records regions that are offloaded but not released self.reg_buffer_to_free: List[int] = [] # node dependencies in backward pass self.bwd_node_deps: Dict[Node, int] = {} # the region execution flow, # where fwd_reg_flow[i,j] denotes whether the parameters of j-th region are in the GPU # when the execution reaches the i-th region. self.fwd_reg_flow = torch.zeros((self.region_num, self.region_num)).bool() self.bwd_reg_flow = torch.zeros((self.region_num, self.region_num)).bool() def execute(self): """ Simulate asynchronous training process. In forward pass, parameter prefetching is advanced by one region. In backward pass, parameter prefetching is executed at the specified location, and gradient offloading is urgent. """ for reg in self.region_list: if reg.param_size and reg.r_id < self.region_num - 1: for nr in self.region_list[reg.r_id + 1 :]: if nr.param_size and requires_upload_p_in_fwd(self.region_list[nr.shared_rid]): reg.fwd_prefetch_region = nr break self._eval_fwd_cost_per_region(reg) self._eval_fwd_mem_per_region(reg) for reg in self.region_list.__reversed__(): self._eval_bwd_cost_per_region(reg) self._eval_bwd_mem_per_region(reg) # release remaining grads for reg_id, offl_exec in self.bwd_reg_to_offl_waiting.items(): self.bwd_reg_to_offl_freed[reg_id] = offl_exec self.runtime_mem -= self.region_list[reg_id].param_size self.bwd_reg_to_offl_waiting.clear() self.iter_end_time = max(self.last_comp.end_time, self.last_d2h.end_time) def _insert_h2d_exec(self, region: Region, is_fwd: bool = True): """ Insert parameter prefetch execution period of the current region to the end of the h2d stream """ pref_start_time = max(self.last_h2d.end_time, self.last_comp.end_time) pref_end_time = pref_start_time + 2.0 * self._get_communication_overhead("h2d", region.param_size) pref_ep = ExecutionPeriod(start_time=pref_start_time, end_time=pref_end_time) if is_fwd: self.fwd_reg_to_pref[region.r_id] = pref_ep else: self.bwd_reg_to_pref[region.r_id] = pref_ep self.last_h2d = pref_ep def _insert_comp_exec(self, region: Region, is_fwd: bool = True): """ Insert computation execution period of the current region to the end of the computing stream """ if is_fwd: reg_to_comp = self.fwd_reg_to_comp reg_to_pref = self.fwd_reg_to_pref flop_key = "fwd_flop" else: reg_to_comp = self.bwd_reg_to_comp reg_to_pref = self.bwd_reg_to_pref flop_key = "bwd_flop" comp_start_time = max(self.last_comp.end_time, reg_to_pref.get(region.r_id, ExecutionPeriod(0, 0)).end_time) comp_end_time = comp_start_time + sum( [self._get_computing_overhead(node.meta.get(flop_key, 0)) for node in region.nodes] ) comp_ep = ExecutionPeriod(start_time=comp_start_time, end_time=comp_end_time) reg_to_comp[region.r_id] = comp_ep self.last_comp = comp_ep def _insert_d2h_exec(self, region: Region): """ Insert gradient offload execution period of the current region to the end of the d2h stream """ offl_start_time = max(self.last_d2h.end_time, self.last_comp.end_time) offl_end_time = offl_start_time + self._get_communication_overhead("d2h", region.param_size) offl_ep = ExecutionPeriod(start_time=offl_start_time, end_time=offl_end_time) self.bwd_reg_to_offl_waiting[region.r_id] = offl_ep self.last_d2h = offl_ep def _eval_fwd_cost_per_region(self, region: Region): """ Evaluate computation and communication execution period of the region in forward pass. """ # upload parameters of the first region if region.r_id == 0: self._insert_h2d_exec(region) # prefetch parameters of the next region fwd_prefetch_region = region.fwd_prefetch_region if fwd_prefetch_region and requires_upload_p_in_fwd(self.region_list[fwd_prefetch_region.shared_rid]): self._insert_h2d_exec(fwd_prefetch_region) # execute computation self._insert_comp_exec(region) def _eval_fwd_mem_per_region(self, region: Region): """ Evaluate the runtime and peak memory when the forward execution reaches the current region. """ # upload parameters of the current region if region.r_id <= 0: self.runtime_mem += region.param_size self.fwd_reg_flow[region.r_id, region.r_id] = True else: self.fwd_reg_flow[region.r_id] = self.fwd_reg_flow[region.r_id - 1] self.fwd_reg_flow[region.r_id, self.reg_buffer_to_free] = False self.reg_buffer_to_free.clear() # prefetch parameters of the next region fwd_prefetch_region = region.fwd_prefetch_region if fwd_prefetch_region and requires_upload_p_in_fwd(self.region_list[fwd_prefetch_region.shared_rid]): self.runtime_mem += fwd_prefetch_region.param_size self.fwd_reg_flow[region.r_id, fwd_prefetch_region.r_id] = True for node in region.nodes: self.runtime_mem += calculate_fwd_tmp(node) + calculate_fwd_out(node) self.peak_mem = max(self.runtime_mem, self.peak_mem) self.total_mem_saving += node.node_info.runtime_fwd_mem - self.runtime_mem self.fwd_node_mem[node] = self.runtime_mem if region.need_offload: self.runtime_mem -= region.param_size assert len(self.reg_buffer_to_free) <= 1, f"{len(self.reg_buffer_to_free)}" self.reg_buffer_to_free.append(region.r_id) def _eval_bwd_cost_per_region(self, region: Region): """ Evaluate computation and communication execution period of the region in backward pass. """ # upload parameters of the current region if region.is_syn: assert region.need_offload self._insert_h2d_exec(region, is_fwd=False) # prefetch parameters of the region choiced, which is parallel to computation if region.bwd_prefetch_region is not None: self._insert_h2d_exec(region.bwd_prefetch_region, is_fwd=False) # execute computation self._insert_comp_exec(region, is_fwd=False) # offload gradient if requires_offload_g_in_bwd(region): self._insert_d2h_exec(region) assert len(self.reg_buffer_to_free) == 0 for reg_id, offl_exec in self.bwd_reg_to_offl_waiting.items(): if offl_exec.end_time >= self.last_comp.start_time: break self.reg_buffer_to_free.append(reg_id) self.bwd_reg_to_offl_freed[reg_id] = offl_exec for reg_id in self.reg_buffer_to_free: self.bwd_reg_to_offl_waiting.pop(reg_id) def _eval_bwd_mem_per_region(self, region: Region): """ Evaluate the runtime and peak memory when the backward execution reaches the current region. """ if region.r_id + 1 < self.region_num: self.bwd_reg_flow[region.r_id] = self.bwd_reg_flow[region.r_id + 1] else: self.bwd_reg_flow[region.r_id] = self.fwd_reg_flow[-1] self.bwd_reg_flow[region.r_id, self.reg_buffer_to_free] = False # free gradients in the buffer while len(self.reg_buffer_to_free): reg_id = self.reg_buffer_to_free.pop(0) self.runtime_mem -= self.region_list[reg_id].param_size # upload parameters of the current region if region.is_syn: self.runtime_mem += region.param_size self.bwd_reg_flow[region.r_id, region.r_id] = True # prefetch parameters of the region choiced bwd_prefetch_region = region.bwd_prefetch_region if bwd_prefetch_region: self.runtime_mem += bwd_prefetch_region.param_size self.bwd_reg_flow[region.r_id, bwd_prefetch_region.r_id] = True # add the gradient of the parameter if region.r_id < region.shared_rid: # gradient accumulation is required for shared parameters self.runtime_mem += 2.0 * region.param_size else: self.runtime_mem += region.param_size for node in region.nodes.__reversed__(): self.runtime_mem -= calculate_fwd_out(node) self.runtime_mem += node.meta["bwd_mem_tmp"] + node.meta["bwd_mem_out"] self.peak_mem = max(self.runtime_mem, self.peak_mem) # The memory savings of a node may be negative due to parameter prefetch. self.total_mem_saving += node.node_info.runtime_bwd_mem - self.runtime_mem self.bwd_node_mem[node] = self.runtime_mem self.runtime_mem -= node.meta["bwd_mem_tmp"] + calculate_fwd_tmp(node) # free bwd_mem_out self.bwd_node_deps[node] = len(node.all_input_nodes) for user_node in node.users: if user_node in self.bwd_node_deps: self.bwd_node_deps[user_node] -= 1 if self.bwd_node_deps[user_node] <= 0: self.runtime_mem -= user_node.meta["bwd_mem_out"] if self.runtime_mem < 0: raise ValueError( f"region id: {region.r_id}, node name: {node.name}, " f"runtime_mem: {self.runtime_mem / 1024 ** 2:.3f}MB ---" f"runtime memory computed less than 0, which is miscalculated!" ) # release parameters of the region if requires_release_p_in_bwd(self.region_list[region.shared_rid]): self.runtime_mem -= region.param_size
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/checkpoint/build_c_ext.py
colossalai/auto_parallel/checkpoint/build_c_ext.py
import os from setuptools import Extension, setup this_dir = os.path.dirname(os.path.abspath(__file__)) ext_modules = [ Extension( "rotorc", sources=[os.path.join(this_dir, "ckpt_solver_rotor.c")], ) ] setup( name="rotor c extension", version="0.1", description="rotor c extension for faster dp computing", ext_modules=ext_modules, )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/checkpoint/ckpt_solver_chen.py
colossalai/auto_parallel/checkpoint/ckpt_solver_chen.py
import math from copy import deepcopy from typing import List, Set, Tuple from torch.fx import Graph, Node from colossalai.fx.profiler import calculate_fwd_in, calculate_fwd_tmp from .ckpt_solver_base import CheckpointSolverBase __all__ = ["CheckpointSolverChen"] class CheckpointSolverChen(CheckpointSolverBase): def __init__(self, graph: Graph, cnode: List[str] = None, num_grids: int = 6): """ This is the simple implementation of Algorithm 3 in https://arxiv.org/abs/1604.06174. Note that this algorithm targets at memory optimization only, using techniques in appendix A. Usage: Assume that we have a ``GraphModule``, and we have already done the extractions to the graph to retrieve all information needed, then we could use the following code to find a solution using ``CheckpointSolverChen``: >>> solver = CheckpointSolverChen(gm.graph) >>> chen_graph = solver.solve() >>> gm.graph = chen_graph # set the graph to a new graph Args: graph (Graph): The computing graph to be optimized. cnode (List[str], optional): Common node List, should be the subset of input. Defaults to None. num_grids (int, optional): Number of grids to search for b. Defaults to 6. """ super().__init__(graph, 0, 0, True, cnode) self.num_grids = num_grids def solve(self) -> Graph: """Solve the checkpointing problem using Algorithm 3. Returns: graph (Graph): The optimized graph, should be a copy of the original graph. """ checkpointable_op = ["call_module", "call_method", "call_function", "get_attr"] ckpt = self.grid_search() for i, seg in enumerate(ckpt): for idx in range(*seg): nodes = self.node_list[idx] for n in nodes: if n.op in checkpointable_op: n.meta["activation_checkpoint"] = i return deepcopy(self.graph) def run_chen_greedy(self, b: int = 0) -> Tuple[Set, int]: """ This is the simple implementation of Algorithm 3 in https://arxiv.org/abs/1604.06174. """ ckpt_intv = [] temp = 0 x = 0 y = 0 prev_idx = 2 for idx, nodes in enumerate(self.node_list): for n in nodes: n: Node temp += calculate_fwd_in(n) + calculate_fwd_tmp(n) y = max(y, temp) if temp > b and idx > prev_idx: x += calculate_fwd_in(nodes[0]) temp = 0 ckpt_intv.append((prev_idx, idx + 1)) prev_idx = idx + 1 return ckpt_intv, math.floor(math.sqrt(x * y)) def grid_search(self) -> Set: """ Search ckpt strategy with b = 0, then run the allocation algorithm again with b = √xy. Grid search over [√2/2 b, √2 b] for ``ckpt_opt`` over ``num_grids`` as in appendix A. """ _, b_approx = self.run_chen_greedy(0) b_min, b_max = math.floor(b_approx / math.sqrt(2)), math.ceil(b_approx * math.sqrt(2)) b_opt = math.inf for b in range(b_min, b_max, (b_max - b_min) // self.num_grids): ckpt_intv, b_approx = self.run_chen_greedy(b) if b_approx < b_opt: b_opt = b_approx ckpt_opt = ckpt_intv return ckpt_opt
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/checkpoint/ckpt_solver_base.py
colossalai/auto_parallel/checkpoint/ckpt_solver_base.py
from abc import ABC, abstractmethod from copy import deepcopy from typing import Any, List import torch from torch.fx import Graph, Node from colossalai.auto_parallel.passes.runtime_apply_pass import ( runtime_apply, runtime_apply_for_iterable_object, runtime_comm_spec_apply, ) from colossalai.fx.codegen.activation_checkpoint_codegen import ActivationCheckpointCodeGen __all___ = ["CheckpointSolverBase"] def _copy_output(src: Graph, dst: Graph): """Copy the output node from src to dst""" for n_src, n_dst in zip(src.nodes, dst.nodes): if n_src.op == "output": n_dst.meta = n_src.meta def _get_param_size(module: torch.nn.Module): """Get the size of the parameters in the module""" return sum([p.numel() * torch.tensor([], dtype=p.dtype).element_size() for p in module.parameters()]) class CheckpointSolverBase(ABC): def __init__( self, graph: Graph, free_memory: float = -1.0, requires_linearize: bool = False, cnode: List[str] = None, optim_multiplier: float = 1.0, ): """``CheckpointSolverBase`` class will integrate information provided by the components and use an existing solver to find a possible optimal strategies combination for target computing graph. Existing Solvers: Chen's Greedy solver: https://arxiv.org/abs/1604.06174 (CheckpointSolverChen) Rotor solver: https://hal.inria.fr/hal-02352969 (CheckpointSolverRotor) Args: graph (Graph): The computing graph to be optimized. free_memory (float): Memory constraint for the solution. requires_linearize (bool): Whether the graph needs to be linearized. cnode (List[str], optional): Common node List, should be the subset of input. Default to None. optim_multiplier (float, optional): The multiplier of extra weight storage for the ``torch.optim.Optimizer``. Default to 1.0. Warnings: Meta information of the graph is required for any ``CheckpointSolver``. """ # super-dainiu: this graph is a temporary graph which can refer to # the owning module, but we will return another deepcopy of it after # the solver is executed. self.graph = deepcopy(graph) self.graph.owning_module = graph.owning_module _copy_output(graph, self.graph) self.graph.set_codegen(ActivationCheckpointCodeGen()) # check if has meta information if any(len(node.meta) == 0 for node in self.graph.nodes): raise RuntimeError( "Nodes meta information hasn't been prepared! Please extract from graph before constructing the solver!" ) # parameter memory = parameter size + optimizer extra weight storage self.free_memory = free_memory - _get_param_size(self.graph.owning_module) * (optim_multiplier + 1) self.cnode = cnode self.requires_linearize = requires_linearize if self.requires_linearize: self.node_list = self._linearize_graph() else: self.node_list = self.get_node_list() @abstractmethod def solve(self): """Solve the checkpointing problem and return the solution.""" def get_node_list(self): """Get the node list.""" return [[node] for node in self.graph.nodes] def _linearize_graph(self) -> List[List[Node]]: """Linearizing the graph Args: graph (Graph): The computing graph to be optimized. Returns: List[List[Node]]: List of list, each inside list of Node presents the actual 'node' in linearized manner. Remarks: Do merge the inplace ops and shape-consistency ops into the previous node. """ # Common nodes are type of nodes that could be seen as attributes and remain # unchanged throughout the whole model, it will be used several times by # different blocks of model, so that it is hard for us to linearize the graph # when we encounter those kinds of nodes. We let users to annotate some of the # input as common node, such as attention mask, and the followings are some of # the ops that could actually be seen as common nodes. With our common node prop, # we could find some of the "real" common nodes (e.g. the real attention mask # used in BERT and GPT), the rule is simple, for node who's parents are all common # nodes or it's op belongs to the following operations, we view this node as a # newly born common node. # List of target name that could be seen as common node common_ops = ["getattr", "getitem", "size"] def _is_cop(target: Any) -> bool: """Check if an op could be seen as common node Args: target (Any): node target Returns: bool """ if isinstance(target, str): return target in common_ops else: return target.__name__ in common_ops def _is_sink() -> bool: """Check if we can free all dependencies Returns: bool """ def _is_inplace(n: Node): """Get the inplace argument from ``torch.fx.Node``""" inplace = False if n.op == "call_function": inplace = n.kwargs.get("inplace", False) elif n.op == "call_module": inplace = getattr(n.graph.owning_module.get_submodule(n.target), "inplace", False) return inplace def _is_shape_consistency(n: Node): """Check if this node is shape-consistency node (i.e. ``runtime_apply`` or ``runtime_apply_for_iterable_object``)""" return n.target in [runtime_apply, runtime_apply_for_iterable_object, runtime_comm_spec_apply] return ( not sum([v for _, v in deps.items()]) and not any(map(_is_inplace, n.users)) and not any(map(_is_shape_consistency, n.users)) ) # make sure that item in cnode is valid if self.cnode: for name in self.cnode: try: assert ( next(node for node in self.graph.nodes if node.name == name).op == "placeholder" ), f"Common node {name} is not an input of the model." except StopIteration: raise ValueError(f"Common node name {name} not in graph.") else: self.cnode = [] deps = {} node_list = [] region = [] for n in self.graph.nodes: if n.op != "placeholder" and n.op != "output": for n_par in n.all_input_nodes: if n_par.op != "placeholder" and n_par.name not in self.cnode: deps[n_par] -= 1 region.append(n) # if the node could free all dependencies in graph # we could begin a new node if _is_sink(): node_list.append(region) region = [] # propagate common node attr if possible if len(n.all_input_nodes) == len( [node for node in n.all_input_nodes if node.name in self.cnode] ) or _is_cop(n.target): self.cnode.append(n.name) else: deps[n] = len([user for user in n.users if user.op != "output"]) return node_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/checkpoint/ckpt_solver_rotor.py
colossalai/auto_parallel/checkpoint/ckpt_solver_rotor.py
from copy import deepcopy from typing import Any, List, Tuple from torch import Tensor from torch.fx import Graph, Node from colossalai.auto_parallel.passes.runtime_apply_pass import runtime_apply, runtime_comm_spec_apply from colossalai.fx.codegen.activation_checkpoint_codegen import _find_nested_ckpt_regions from colossalai.fx.profiler import ( activation_size, calculate_bwd_time, calculate_fwd_out, calculate_fwd_time, calculate_fwd_tmp, ) from colossalai.logging import get_dist_logger from .ckpt_solver_base import CheckpointSolverBase from .operation import Backward, Chain, ForwardCheck, ForwardEnable, ForwardNograd, Loss, Sequence __all__ = ["CheckpointSolverRotor"] class CheckpointSolverRotor(CheckpointSolverBase): def __init__( self, graph: Graph, free_memory: float = -1, cnode: List[str] = None, memory_slots: int = 500, optim_multiplier: float = 1.0, ): """This is the simple implementation of dynamic programming algorithm rotor in https://hal.inria.fr/hal-02352969. Some code are adapted from https://gitlab.inria.fr/hiepacs/rotor. Usage: Assume that we have a ``GraphModule``, and we have already done the extractions to the graph to retrieve all information needed, then we could use the following code to find a solution using ``CheckpointSolverRotor``: >>> solver = CheckpointSolverRotor(gm.graph, free_memory=torch.cuda.mem_get_info(device=0)[0]) >>> rotor_graph = solver.solve(force_python=True) # otherwise use C solver >>> gm.graph = rotor_graph # set the graph to a new graph Args: graph (Graph): The computing graph to be optimized. free_memory (float, optional): Memory constraint for the solution, unit is byte. Use ``torch.cuda.mem_get_info(device=0)[0]`` to estimate the free_memory. Defaults to -1. cnode (List[str], optional): Common node List, should be the subset of input. Defaults to None. memory_slots (int, optional): Number of slots for discretizing memory budget. Defaults to 500. optim_multiplier (float, optional): The multiplier of extra weight storage for the ``torch.optim.Optimizer``. Default to 1.0. """ super().__init__(graph, free_memory, True, cnode, optim_multiplier) self.memory_slots = memory_slots # construct chain unit = self.free_memory // self.memory_slots self.chain = self._construct_chain(self.graph, self.node_list) self.chain.discretize_all(unit) self.cost_table = None self.back_ptr = None self.sequence = None def solve(self, force_python: bool = False, verbose: bool = False) -> Graph: """Solve the checkpointing problem using rotor algorithm. Args: force_python (bool, optional): Use Python version of solver, else use C version. Defaults to False. verbose (bool, optional): Print verbose information. Defaults to False. Returns: graph (Graph): The optimized graph, should be a copy of the original graph. """ chain = self.chain # compute cost table if force_python: self.cost_table, self.back_ptr = self._compute_table(chain, self.memory_slots) else: self.cost_table, self.back_ptr = self._compute_table_c(chain, self.memory_slots) if verbose: self.print_chain() # backtrack try: self.sequence = self._backtrack( chain, 0, len(chain), self.memory_slots - chain.x[0], self.cost_table, self.back_ptr ) self._annotate_from_sequence(self.sequence, self.node_list) except ValueError as e: # using logger to annonce that the solver is failed logger = get_dist_logger() logger.warning(f"Checkpoint solver failed: {e}") raise ValueError if verbose: self.print_sequence() return deepcopy(self.graph) def print_chain(self): print("[input]", self.chain.x[0], self.chain.xbar[0], self.chain.ftmp[0], self.chain.btmp[0]) for idx in range(len(self.node_list) - 1): print( self.node_list[idx], self.chain.x[idx + 1], self.chain.xbar[idx + 1], self.chain.ftmp[idx], self.chain.btmp[idx], ) print(f"Chain = {self.chain}") def print_sequence(self): print(f"Sequence = {self.sequence}") @classmethod def _construct_chain(cls, graph: Graph, node_list: List[List[Node]]) -> Chain: input_tensors = cls._extract_input(graph) ftime, btime, ftmp, btmp = list(), list(), list(), list() xbar, x = [activation_size(input_tensors)], [activation_size(input_tensors)] for node in node_list: node_info = cls._extract_node_info(node) ftime.append(node_info[0]) btime.append(node_info[1]) x.append(node_info[2]) xbar.append(node_info[3]) ftmp.append(node_info[4]) btmp.append(node_info[5]) # currently we view loss backward temp as zero btime.append(0) btmp.append(0) return Chain(ftime, btime, x, xbar, ftmp, btmp) @classmethod def _extract_node_info(cls, node: List[Node]) -> Tuple[int, ...]: """Extract node info from a list of nodes""" xbar = 0 ftime = 0 btime = 0 fwd_mem_peak = 0 for n in node: assert isinstance(n, Node), f"{n} is not a Node" if n.target == runtime_apply or n.target == runtime_comm_spec_apply: # in this case we need to calculate memory usage directly based on the statics that hooked in node.meta xbar += n.meta["fwd_mem_out"] fwd_mem_peak = max(fwd_mem_peak, xbar + n.meta["fwd_mem_tmp"]) else: xbar += calculate_fwd_tmp(n) + calculate_fwd_out(n) fwd_mem_peak = max(fwd_mem_peak, xbar + n.meta["fwd_mem_tmp"] + cls._extract_unused_output(n)) # minimum flop count is required ftime += max(calculate_fwd_time(n), 1.0) btime += max(calculate_bwd_time(n), 1.0) x = calculate_fwd_out(node[-1]) xbar = max(x, xbar) ftmp = fwd_mem_peak - xbar btmp = cls._extract_btmp(node) return ftime, btime, x, xbar, ftmp, btmp @staticmethod def _extract_input(graph: Graph) -> Tuple[Tensor, ...]: """Extract input tensors from a Graph""" input_tensors = [] for node in graph.nodes: if node.op == "placeholder": input_tensors.append(node.meta["fwd_out"]) return input_tensors @staticmethod def _extract_unused_output(node: Node) -> int: """Extract unused output from `torch.fx.Node`""" return activation_size(node.meta["fwd_out"]) - calculate_fwd_out(node) @staticmethod def _extract_btmp(node: List[Node]) -> int: """Extract btmp from a list of nodes""" def _extract_deps_size(): deps_size = 0 for k, v in deps.items(): k: Node if v > 0: deps_size += k.meta["bwd_mem_out"] if v == float("-inf"): deps_size -= calculate_fwd_tmp(k) + calculate_fwd_out(k) return deps_size btmp = 0 deps = {} for n in reversed(node): deps[n] = len(n.all_input_nodes) btmp = max(btmp, _extract_deps_size() + n.meta["bwd_mem_tmp"]) for child in n.users: if child in deps: deps[child] -= 1 if deps[child] <= 0: deps[child] = float("-inf") # free return btmp @staticmethod def _compute_table(chain: Chain, mmax: int) -> Tuple: """Compute the table using dynamic programming. Returns the cost table and the backtracking pointer. Args: chain (Chain): A basic linearized structure for solving the dynamic programming problem. mmax (int): Maximum number of memory slots. Returns: cost_table (List): cost_table[m][lhs][rhs] indicates the optimal cost of the subproblem from lhs to rhs with m memory slots. back_ptr (List): back_ptr[m][lhs][rhs] indicates the best operation at this point. It is (True,) if the optimal choice is a chain checkpoint, it is (False, j) if the optimal choice is a leaf checkpoint of length j """ ftime = chain.ftime + [0.0] btime = chain.btime x = chain.x + [0] xbar = chain.xbar + [0] ftmp = chain.ftmp + [0] btmp = chain.btmp + [0] # Build table cost_table = [[{} for _ in range(len(chain) + 1)] for _ in range(mmax + 1)] back_ptr = [[{} for _ in range(len(chain) + 1)] for _ in range(mmax + 1)] # Initialize corner cases where length of sequence equals to 1, i.e. lhs == rhs for m in range(mmax + 1): for i in range(len(chain) + 1): limit = max(x[i + 1] + xbar[i + 1] + ftmp[i], x[i + 1] + xbar[i + 1] + btmp[i]) if m >= limit: cost_table[m][i][i] = ftime[i] + btime[i] else: cost_table[m][i][i] = float("inf") # Compute tables for m in range(mmax + 1): for d in range(1, len(chain) + 1): for i in range(len(chain) + 1 - d): idx = i + d mmin = x[idx + 1] + x[i + 1] + ftmp[i] if idx > i + 1: mmin = max(mmin, x[idx + 1] + max(x[j] + x[j + 1] + ftmp[j] for j in range(i + 1, idx))) if m < mmin: cost_table[m][i][idx] = float("inf") else: leaf_checkpoints = [ (j, sum(ftime[i:j]) + cost_table[m - x[j]][j][idx] + cost_table[m][i][j - 1]) for j in range(i + 1, idx + 1) if m >= x[j] ] if leaf_checkpoints: best_leaf = min(leaf_checkpoints, key=lambda t: t[1]) else: best_leaf = None if m >= xbar[i + 1]: chain_checkpoint = cost_table[m][i][i] + cost_table[m - xbar[i + 1]][i + 1][idx] else: chain_checkpoint = float("inf") if best_leaf and best_leaf[1] <= chain_checkpoint: cost_table[m][i][idx] = best_leaf[1] back_ptr[m][i][idx] = (False, best_leaf[0]) else: cost_table[m][i][idx] = chain_checkpoint back_ptr[m][i][idx] = (True,) return cost_table, back_ptr @staticmethod def _compute_table_c(chain: Chain, mmax: int) -> Tuple: try: from .rotorc import compute_table # build module if module not found except ModuleNotFoundError: import os import subprocess import sys logger = get_dist_logger() logger.info("rotorc hasn't been built! Building library...", ranks=[0]) this_dir = os.path.dirname(os.path.abspath(__file__)) result = subprocess.Popen( [ f"{sys.executable}", f"{os.path.join(this_dir, 'build_c_ext.py')}", "build_ext", f"--build-lib={this_dir}", ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) if result.wait() == 0: logger.info("rotorc has been built!", ranks=[0]) from .rotorc import compute_table else: logger.warning("rotorc built failed! Using python version!", ranks=[0]) return CheckpointSolverRotor._compute_table(chain, mmax) return compute_table(chain, mmax) @staticmethod def _backtrack( chain: Chain, lhs: int, rhs: int, budget: int, cost_table: List[Any], back_ptr: List[Any] ) -> "Sequence": """Backtrack the cost table and retrieve the optimal checkpointing strategy. Args: chain (Chain): A basic linearized structure for solving the dynamic programming problem. lhs (int): The left index of the interval to backtrack. rhs (int): The right index of the interval to backtrack. budget (int): The memory budget for processing this interval. cost_table (List[Any]): See ``._compute_table()`` for definitions back_ptr (List[Any]): See ``._compute_table()`` for definitions Raises: ValueError: Can not process the chain. Returns: sequence (Sequence): The sequence of executing nodes with checkpoints. """ if budget <= 0: raise ValueError(f"Can not process a chain with negative memory {budget}") elif cost_table[budget][lhs][rhs] == float("inf"): raise ValueError(f"Can not process this chain from index {lhs} to {rhs} with memory {budget}") sequence = Sequence() if rhs == lhs: if lhs == len(chain): sequence += [Loss()] else: sequence += [ForwardEnable(lhs), Backward(lhs)] return sequence if back_ptr[budget][lhs][rhs][0]: sequence += [ ForwardEnable(lhs), CheckpointSolverRotor._backtrack( chain, lhs + 1, rhs, budget - chain.xbar[lhs + 1], cost_table, back_ptr ), Backward(lhs), ] else: best_leaf = back_ptr[budget][lhs][rhs][1] sequence += [ForwardCheck(lhs)] sequence += [ForwardNograd(k) for k in range(lhs + 1, best_leaf)] sequence += [ CheckpointSolverRotor._backtrack( chain, best_leaf, rhs, budget - chain.x[best_leaf], cost_table, back_ptr ), CheckpointSolverRotor._backtrack(chain, lhs, best_leaf - 1, budget, cost_table, back_ptr), ] return sequence @staticmethod def _annotate_from_sequence(sequence: Sequence, node_list: List[List[Node]]): """Annotate the nodes in the ``node_list`` with activation checkpoint from the sequence. Args: sequence (Sequence): The sequence of executing nodes with activation checkpoint annotations. node_list (List[List[Node]]): The list of nodes to annotate. """ op_list = sequence.list_operations() loss_op = next(op for op in op_list if isinstance(op, Loss)) fwd_list = op_list[: op_list.index(loss_op)] bwd_list = op_list[op_list.index(loss_op) + 1 :] ckpt_idx = 0 in_ckpt = False ckpt_region = [] # forward annotation for idx, op in enumerate(fwd_list, 0): if in_ckpt: if isinstance(op, ForwardNograd): ckpt_region.append(idx) elif isinstance(op, ForwardEnable): in_ckpt = False for node_idx in ckpt_region: for n in node_list[node_idx]: n.meta["activation_checkpoint"] = [ckpt_idx] ckpt_idx += 1 ckpt_region = [] elif isinstance(op, ForwardCheck): for node_idx in ckpt_region: for n in node_list[node_idx]: n.meta["activation_checkpoint"] = [ckpt_idx] ckpt_idx += 1 ckpt_region = [idx] else: if isinstance(op, ForwardCheck): in_ckpt = True ckpt_region.append(idx) # annotate the backward if there is any nested activation checkpoint in_recompute = False for op in bwd_list: if in_recompute: if isinstance(op, ForwardNograd): ckpt_region.append(op.index) elif isinstance(op, ForwardEnable): for node_idx in ckpt_region: for n in node_list[node_idx]: n.meta["activation_checkpoint"].append(ckpt_idx) ckpt_idx += 1 ckpt_region = [] elif isinstance(op, ForwardCheck): for node_idx in ckpt_region: for n in node_list[node_idx]: n.meta["activation_checkpoint"].append(ckpt_idx) ckpt_idx += 1 ckpt_region = [op.index] elif isinstance(op, Backward): for node_idx in ckpt_region: for n in node_list[node_idx]: n.meta["activation_checkpoint"].append(ckpt_idx) in_recompute = False else: if not isinstance(op, Backward): in_recompute = True ckpt_idx = 0 ckpt_region = [] if isinstance(op, ForwardCheck): ckpt_region.append(op.index) # postprocess, make sure every activation checkpoint label in the # same activation checkpoint region (level = 0) has the same length op_list = [] for node in node_list: op_list += node ckpt_regions = _find_nested_ckpt_regions(op_list) for start_idx, end_idx in ckpt_regions: nested_length = max( len(op_list[idx].meta["activation_checkpoint"]) for idx in range(start_idx, end_idx + 1) ) for idx in range(start_idx, end_idx + 1): op_list[idx].meta["activation_checkpoint"] += [None] * ( nested_length - len(op_list[idx].meta["activation_checkpoint"]) )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/checkpoint/__init__.py
colossalai/auto_parallel/checkpoint/__init__.py
from .ckpt_solver_base import CheckpointSolverBase from .ckpt_solver_chen import CheckpointSolverChen from .ckpt_solver_rotor import CheckpointSolverRotor
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/checkpoint/operation.py
colossalai/auto_parallel/checkpoint/operation.py
import math from abc import ABC from typing import List from torch.utils._pytree import tree_map class Chain: def __init__( self, ftime: List[float], btime: List[float], x: List[int], xbar: List[int], ftmp: List[int], btmp: List[int], check_consistency: bool = True, ): """The chain is a basic linearized structure for solving the dynamic programming problem for activation checkpoint. See paper https://hal.inria.fr/hal-02352969 for details. Args: ftime (List[float]): The forward time of each node. btime (List[float]): The backward time of each node. x (List[int]): The forward memory of each node (if save_output). Same as `a` in the paper. xbar (List[int]): The forward memory of each node (if save_all). Same as `a_bar` in the paper. ftmp (List[int]): The temporary forward memory of each node. btmp (List[int]): The temporary backward memory of each node, can be used to control memory budget. check_consistency (bool, optional): Check the lengths consistency for the `Chain`. Defaults to True. """ self.ftime = ftime self.btime = btime self.x = x self.xbar = xbar self.ftmp = ftmp self.btmp = btmp if check_consistency and not self.check_lengths(): raise AttributeError("In Chain, input lists do not have consistent lengths") def check_lengths(self): return ( (len(self.ftime) == len(self)) and (len(self.btime) == len(self) + 1) and (len(self.x) == len(self) + 1) and (len(self.ftmp) == len(self)) and (len(self.btmp) == len(self) + 1) and (len(self.xbar) == len(self) + 1) ) def __repr__(self): chain_list = [] for i in range(len(self)): chain_list.append((self.ftime[i], self.btime[i], self.x[i], self.xbar[i], self.ftmp[i], self.btmp[i])) i = len(self) chain_list.append((None, self.btime[i], self.x[i], self.xbar[i], None, self.btmp[i])) return chain_list.__repr__() def __len__(self): return len(self.ftime) def discretize_all(self, unit: int): """Discretize the chain into a list of chains according to unit size.""" discretizer = lambda val: math.ceil(val / unit) self.x = tree_map(discretizer, self.x) self.xbar = tree_map(discretizer, self.xbar) self.ftmp = tree_map(discretizer, self.ftmp) self.btmp = tree_map(discretizer, self.btmp) class Operation(ABC): name = "Op" def __repr__(self) -> str: return f"{self.name}_{self.index}" def shift(self, value): if type(self.index) is tuple: self.index = tuple(x + value for x in self.index) else: self.index += value class Forward(Operation): name = "F" def __init__(self, index): self.index = index def cost(self, chain: Chain): if chain is not None: return chain.ftime[self.index] else: return 1 class ForwardEnable(Forward): name = "Fe" class ForwardNograd(Forward): name = "Fn" class ForwardCheck(Forward): name = "CF" class Forwards(Operation): def __init__(self, start, end): self.index = (start, end) def __repr__(self): return "F_{i}->{j}".format(i=self.index[0], j=self.index[1]) def cost(self, chain: Chain): if chain is not None: return sum(chain.ftime[self.index[0] : self.index[1] + 1]) else: return self.index[1] - self.index[0] + 1 def isForward(op): return type(op) is Forward or type(op) is Forwards class Backward(Operation): name = "B" def __init__(self, index): self.index = index def cost(self, chain: Chain): if chain is not None: return chain.btime[self.index] else: return 1 class Loss(Operation): def __init__(self): pass def __repr__(self): return "L" def cost(self, chain): return 0 class MemoryAccess(Operation): name = "MA" def __init__(self, index): self.index = index def cost(self, chain: Chain): return 0 class WriteMemory(MemoryAccess): name = "WM" class ReadMemory(MemoryAccess): name = "RM" class DiscardMemory(MemoryAccess): name = "DM" class Sequence(list): def __init__(self): super().__init__() def __repr__(self): return repr(self.list_operations()) def list_operations(self): op_list = [] for x in self: if isinstance(x, Operation): op_list.append(x) else: assert isinstance(x, Sequence) op_list += x.list_operations() return op_list
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/passes/runtime_apply_pass.py
colossalai/auto_parallel/passes/runtime_apply_pass.py
from typing import Dict, List import torch from torch.fx.node import Node from colossalai._analyzer.fx.node_util import MetaInfo from colossalai.auto_parallel.tensor_shard.sharding_strategy import CommType, OperationDataType from colossalai.tensor.comm_spec import CommSpec from colossalai.tensor.shape_consistency import ShapeConsistencyManager from colossalai.tensor.sharding_spec import ShardingSpec shape_consistency_manager = ShapeConsistencyManager() def runtime_apply(node: Node, origin_dict: Dict, input_dict: Dict, node_index: int, user_node_index: int): """ This method will be invoked during runtime to do the shape consistency, which make sure the activations is converted into the user node expected form. """ origin_sharding_spec = origin_dict[node_index] target_sharding_spec = input_dict[node_index][user_node_index] return shape_consistency_manager.apply_for_autoparallel_runtime(node, origin_sharding_spec, target_sharding_spec) def runtime_apply_for_iterable_object( node: Node, origin_dict: Dict, input_dict: Dict, node_index: int, user_node_index: int ): """ This method will be invoked during runtime to do the shape consistency, which makes sure the activations in type of tuple or list is converted into the user node expected form. """ rst = [] for index, (origin_sharding_spec, target_sharding_spec) in enumerate( zip(origin_dict[node_index], input_dict[node_index][user_node_index]) ): rst.append( shape_consistency_manager.apply_for_autoparallel_runtime( node[index], origin_sharding_spec, target_sharding_spec ) ) rst = type(node)(rst) return rst def runtime_comm_spec_apply(tensor: torch.Tensor, comm_actions_dict: Dict, node_index: int, op_data_name: str): """ This method will be invoked during runtime to apply the comm action following the instruction of comm spec. """ comm_action = comm_actions_dict[node_index][op_data_name] if isinstance(comm_action.comm_spec, CommSpec): rst = comm_action.comm_spec.covert_spec_to_action(tensor) else: origin_sharding_spec = comm_action.comm_spec["src_spec"] tgt_sharding_spec = comm_action.comm_spec["tgt_spec"] rst = shape_consistency_manager.apply_for_autoparallel_runtime(tensor, origin_sharding_spec, tgt_sharding_spec) return rst def _preprocess_graph(nodes: List[Node]): """ This method is used to extract all the placeholders with sharding information, and mapping the nodes into the index of the origin graph. """ # mapping the node into the origin graph index node_to_index_dict = {} index = 0 for node in nodes: if node.target == "sharding_spec_convert_dict": input_dict_node = node continue if node.target == "origin_node_sharding_spec_dict": origin_dict_node = node continue if node.target == "comm_actions_dict": comm_actions_dict_node = node continue if not hasattr(node, "best_strategy"): continue node_to_index_dict[node] = index index += 1 return input_dict_node, origin_dict_node, comm_actions_dict_node, node_to_index_dict def _shape_consistency_apply(gm: torch.fx.GraphModule): """ This pass is used to add the shape consistency node to the origin graph. """ mod_graph = gm.graph nodes = tuple(mod_graph.nodes) input_dict_node, origin_dict_node, _, node_to_index_dict = _preprocess_graph(nodes) for node in nodes: if not hasattr(node, "best_strategy") or node.op == "output": continue for user_node_index, user_node in enumerate(node.strategies_vector.successor_nodes): if isinstance(node.sharding_spec, (list, tuple)): assert isinstance( node.target_sharding_specs, (list, tuple) ), "target sharding specs should be tuple or list when node.sharding_spec is tuple or list" total_difference = 0 for sharding_spec, target_sharding_spec in zip( node.sharding_spec, node.target_sharding_specs[user_node_index] ): total_difference += sharding_spec.sharding_sequence_difference(target_sharding_spec) if total_difference == 0: continue with mod_graph.inserting_before(user_node): shape_consistency_node = mod_graph.create_node( "call_function", runtime_apply_for_iterable_object, args=(node, origin_dict_node, input_dict_node, node_to_index_dict[node], user_node_index), ) else: assert isinstance( node.sharding_spec, ShardingSpec ), "node.sharding_spec should be type of ShardingSpec, tuple or list." if node.sharding_spec.sharding_sequence_difference(node.target_sharding_specs[user_node_index]) == 0: continue with mod_graph.inserting_before(user_node): shape_consistency_node = mod_graph.create_node( "call_function", runtime_apply, args=(node, origin_dict_node, input_dict_node, node_to_index_dict[node], user_node_index), ) if hasattr(user_node.meta["info"], "activation_checkpoint"): MetaInfo( shape_consistency_node, mod_dir=user_node.meta["info"].mod_dir, activation_checkpoint=tuple(user_node.meta["info"].activation_checkpoint), ) new_args = list(user_node.args) new_kwargs = dict(user_node.kwargs) # the origin node may be a positional argument or key word argument of user node if node in new_args: # substitute the origin node with shape_consistency_node origin_index_args = new_args.index(node) new_args[origin_index_args] = shape_consistency_node user_node.args = tuple(new_args) elif str(node) in new_kwargs: # substitute the origin node with shape_consistency_node new_kwargs[str(node)] = shape_consistency_node user_node.kwargs = new_kwargs return gm def _comm_spec_apply(gm: torch.fx.GraphModule): """ This pass is used to add the comm spec apply node to the origin graph. """ mod_graph = gm.graph nodes = tuple(mod_graph.nodes) _, _, comm_actions_dict_node, node_to_index_dict = _preprocess_graph(nodes) for node in nodes: if not hasattr(node, "best_strategy") or node.op == "output": continue comm_actions = node.best_strategy.communication_actions for op_data, comm_action in comm_actions.items(): if comm_action.comm_type == CommType.HOOK: continue if comm_action.comm_type == CommType.BEFORE: if op_data.type == OperationDataType.OUTPUT: comm_object = node elif comm_action.key_for_kwarg is not None: comm_object = node.kwargs[comm_action.key_for_kwarg] else: comm_object = node.args[comm_action.arg_index] with mod_graph.inserting_before(node): comm_spec_apply_node = mod_graph.create_node( "call_function", runtime_comm_spec_apply, args=(comm_object, comm_actions_dict_node, node_to_index_dict[node], op_data.name), ) # the origin node may be a positional argument or key word argument of user node if comm_action.key_for_kwarg is not None: # substitute the origin node with comm_spec_apply_node new_kwargs = dict(node.kwargs) new_kwargs[comm_action.key_for_kwarg] = comm_spec_apply_node node.kwargs = new_kwargs else: # substitute the origin node with comm_spec_apply_node new_args = list(node.args) new_args[comm_action.arg_index] = comm_spec_apply_node node.args = tuple(new_args) elif comm_action.comm_type == CommType.AFTER: with mod_graph.inserting_after(node): comm_spec_apply_node = mod_graph.create_node( "call_function", runtime_comm_spec_apply, args=(node, comm_actions_dict_node, node_to_index_dict[node], op_data.name), ) user_list = list(node.users.keys()) for user in user_list: if user == comm_spec_apply_node: continue new_args = list(user.args) new_kwargs = dict(user.kwargs) # the origin node may be a positional argument or key word argument of user node if node in new_args: # substitute the origin node with comm_spec_apply_node new_args[new_args.index(node)] = comm_spec_apply_node user.args = tuple(new_args) elif str(node) in new_kwargs: # substitute the origin node with comm_spec_apply_node new_kwargs[str(node)] = comm_spec_apply_node user.kwargs = new_kwargs if hasattr(node.meta["info"], "activation_checkpoint"): MetaInfo( comm_spec_apply_node, mod_dir=node.meta["info"].mod_dir, activation_checkpoint=tuple(node.meta["info"].activation_checkpoint), ) return gm def _act_annotation_pass(gm: torch.fx.GraphModule): """ This pass is used to add the act annotation to the new inserted nodes. """ mod_graph = gm.graph nodes = tuple(mod_graph.nodes) for node in nodes: if not hasattr(node.meta, "activation_checkpoint"): pass user_act_annotation = -1 input_act_annotation = -1 for user_node in node.users.keys(): if "activation_checkpoint" in user_node.meta: user_act_annotation = user_node.meta["activation_checkpoint"] break for input_node in node._input_nodes.keys(): if "activation_checkpoint" in input_node.meta: input_act_annotation = input_node.meta["activation_checkpoint"] break if user_act_annotation == input_act_annotation and user_act_annotation != -1: node.meta["activation_checkpoint"] = user_act_annotation return gm def runtime_apply_pass(gm: torch.fx.GraphModule): """ The method manages all the passes acting on the distributed training runtime. """ gm = _shape_consistency_apply(gm) gm = _comm_spec_apply(gm) return gm
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/passes/constants.py
colossalai/auto_parallel/passes/constants.py
import torch OUTPUT_SAVED_OPS = [torch.nn.functional.relu, torch.nn.functional.softmax, torch.flatten] OUTPUT_SAVED_MOD = [ torch.nn.ReLU, torch.nn.Softmax, ] # SHAPE_ARGUMENT_OPS contains node with (input, *shape) style args. # This list could be extended if any other method has the same # argument style as view and reshape. SHAPE_ARGUMENT_OPS = [torch.Tensor.view, torch.Tensor.reshape, torch.reshape]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/passes/comm_metainfo_pass.py
colossalai/auto_parallel/passes/comm_metainfo_pass.py
from typing import Dict import torch from torch.fx import GraphModule from torch.fx.node import Node from colossalai.auto_parallel.meta_profiler import ShardMetaInfo from colossalai.auto_parallel.passes.runtime_apply_pass import runtime_apply, runtime_comm_spec_apply from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, TrainCycleItem from colossalai.tensor.comm_spec import CommSpec from colossalai.tensor.shape_consistency import ShapeConsistencyManager from colossalai.tensor.sharding_spec import ShardingSpec shape_consistency_manager = ShapeConsistencyManager() def _construct_shard_meta_info( node: Node, origin_sharding_spec: ShardingSpec, target_sharding_spec: ShardingSpec ) -> ShardMetaInfo: # get comm_action_sequence and total_cost from shape_consistency_manager _, comm_action_sequence, total_cost = shape_consistency_manager.shape_consistency( origin_sharding_spec, target_sharding_spec ) meta_info = ShardMetaInfo() # NOTE: the cost in shape_consistency_manager.mem_cost is the count in number of numel # get mem cost for ShardMetaInfo mem_cost = shape_consistency_manager.mem_cost(comm_action_sequence) # extract user that has _meta_data and extract element length input_node = next(n for n in node._input_nodes if hasattr(n, "_meta_data")) element_length = input_node._meta_data.element_size() mem_cost.fwd.activation *= element_length mem_cost.fwd.temp *= element_length mem_cost.bwd.activation *= element_length mem_cost.bwd.temp *= element_length mem_cost.total.activation *= element_length meta_info.memory_cost = mem_cost # get computation cost for ShardMetaInfo meta_info.compute_cost = TrainCycleItem( total_cost["forward"] * element_length, total_cost["backward"] * element_length, total_cost["total"] * element_length, ) # get tensor shape for ShardMetaInfo origin_sharding_spec: ShardingSpec target_sharding_spec: ShardingSpec input_shape = origin_sharding_spec.get_sharded_shape_per_device() output_shape = target_sharding_spec.get_sharded_shape_per_device() meta_info.fwd_in = [torch.rand(input_shape, device="meta")] meta_info.fwd_buffer = [] meta_info.fwd_out = [torch.rand(output_shape, device="meta")] return meta_info def _runtime_apply_meta_info(node: Node, origin_spec_dict, sharding_spec_dict) -> ShardMetaInfo: """ This method is used to construct `MetaInto` for shape consistency node """ # extract node index and user node index args = node.args node_index, user_node_index = args[3], args[4] origin_sharding_spec, target_sharding_spec = ( origin_spec_dict[node_index], sharding_spec_dict[node_index][user_node_index], ) return _construct_shard_meta_info(node, origin_sharding_spec, target_sharding_spec) def _runtime_comm_spec_apply_meta_info(node: Node, comm_actions_dict: Dict) -> ShardMetaInfo: # extract node_index and op_data_name node_index, op_data_name = node.args[2], node.args[3] comm_action = comm_actions_dict[node_index][op_data_name] if isinstance(comm_action.comm_spec, CommSpec): # this case is for all_reduce, there will be no memory cost meta_info = ShardMetaInfo() meta_info.memory_cost = TrainCycleItem(MemoryCost(), MemoryCost(), MemoryCost) output_node = next(n for n in node.users if hasattr(n, "_meta_data")) element_length = output_node._meta_data.element_size() total_cost = comm_action.comm_spec.get_comm_cost() meta_info.compute_cost = TrainCycleItem( total_cost["forward"] * element_length, total_cost["backward"] * element_length, total_cost["total"] * element_length, ) input_shape = output_shape = comm_action.comm_spec.sharding_spec.get_sharded_shape_per_device() meta_info.fwd_in = [torch.rand(input_shape, device="meta")] meta_info.fwd_buffer = [] meta_info.fwd_out = [torch.rand(output_shape, device="meta")] else: # this case will be handled by shape consistency manager origin_sharding_spec, target_sharding_spec = ( comm_action.comm_spec["src_spec"], comm_action.comm_spec["tgt_spec"], ) meta_info = _construct_shard_meta_info(node, origin_sharding_spec, target_sharding_spec) return meta_info def comm_metainfo_pass( gm: GraphModule, sharding_spec_dict: Dict, origin_spec_dict: Dict, comm_actions_dict: Dict ) -> GraphModule: """ The method manages all the metainfo of the communication node (run_time_apply, runtime_comm_spec_apply) in the graph. """ for node in gm.graph.nodes: if node.target == runtime_apply: setattr(node, "best_strategy_info", _runtime_apply_meta_info(node, origin_spec_dict, sharding_spec_dict)) elif node.target == runtime_comm_spec_apply: setattr(node, "best_strategy_info", _runtime_comm_spec_apply_meta_info(node, comm_actions_dict)) else: pass return gm
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/passes/meta_info_prop.py
colossalai/auto_parallel/passes/meta_info_prop.py
import uuid from dataclasses import asdict from typing import List import torch import torch.fx from torch.fx import GraphModule from torch.fx.node import Node from colossalai.auto_parallel.meta_profiler import ShardMetaInfo from colossalai.auto_parallel.passes.constants import OUTPUT_SAVED_MOD, OUTPUT_SAVED_OPS from colossalai.fx._compatibility import compatibility from colossalai.fx.profiler import GraphInfo def _normalize_tuple(x): if not isinstance(x, tuple): return (x,) return x @compatibility(is_backward_compatible=False) class MetaInfoProp: def __init__(self, module: GraphModule) -> None: self.module = module self.func_dict = { "placeholder": self.placeholder_handler, "get_attr": self.get_attr_handler, "output": self.output_handler, "call_function": self.node_handler, "call_module": self.node_handler, "call_method": self.node_handler, } def _set_data_ptr(self, x): """ Set uuid to tensor """ if isinstance(x, torch.Tensor): if not x.data_ptr(): data_ptr = uuid.uuid4() x.data_ptr = lambda: data_ptr def _is_inplace(self, node: Node): """ Check if the node is inplace operation. """ if node.op == "call_module": return node.graph.owning_module.get_submodule(node.target).__class__ in OUTPUT_SAVED_MOD elif node.op == "call_function": return node.target in OUTPUT_SAVED_OPS return False def run(self) -> GraphModule: """ Run the meta information propagation pass on the module. """ for node in self.module.graph.nodes: node: Node self.func_dict[node.op](node) @compatibility(is_backward_compatible=False) def placeholder_handler(self, node: Node) -> None: """ Handle the placeholder node. """ graph_info = GraphInfo() out = _normalize_tuple(getattr(node, "_meta_data", None)) graph_info.fwd_out = list(out) if out[0] is not None else [] node.meta = {**asdict(graph_info)} @compatibility(is_backward_compatible=False) def get_attr_handler(self, node: Node) -> None: """ Handle the get_attr node. """ graph_info = GraphInfo() node.meta = {**asdict(graph_info)} @compatibility(is_backward_compatible=False) def output_handler(self, node: Node) -> None: """ Handle the output node. """ graph_info = GraphInfo() output_tensors = [] for par in node._input_nodes: if par.meta: output_tensors += par.meta["fwd_out"] graph_info.fwd_in = output_tensors node.meta = {**asdict(graph_info)} @compatibility(is_backward_compatible=False) def node_handler(self, node: Node) -> None: """ Handle other kind of nodes """ assert hasattr(node, "best_strategy_info"), f"Cannot find best_strategy_info in node {node}, {node.op}" graph_info = GraphInfo() meta_info = node.best_strategy_info meta_info: ShardMetaInfo # set data_ptr for input_tensor in ShardMetaInfo class input_tensors: List[torch.Tensor] = meta_info.fwd_in buffer_tensors: List[torch.Tensor] = meta_info.fwd_buffer output_tensors: List[torch.Tensor] = meta_info.fwd_out if self._is_inplace(node): # inplace operation will not create new tensor, and it only has one parent node # TODO: Verify this observation # set data_ptr for input_tensor, buffer_tensor and output_tensor of current node parent_node = list(node._input_nodes.keys())[0] parent_tensor = parent_node.meta.get("fwd_out")[0] parent_tensor: torch.Tensor for tensor in input_tensors: tensor.data_ptr = parent_tensor.data_ptr for tensor in buffer_tensors: tensor.data_ptr = parent_tensor.data_ptr for tensor in output_tensors: tensor.data_ptr = parent_tensor.data_ptr else: for par in node._input_nodes: # set data_ptr for the input_tensor of current node from the output_tensor of its parent node for tensor in par.meta.get("fwd_out", []): tensor: torch.Tensor target_input_tensor = next( (x for x in input_tensors if not x.data_ptr() and x.shape == tensor.shape), None ) if target_input_tensor is not None: target_input_tensor.data_ptr = tensor.data_ptr # set data_ptr for tensor in input_tensor that is not set for tensor in input_tensors: if not tensor.data_ptr(): self._set_data_ptr(tensor) # set data_ptr for buffer_tensor for tensor in buffer_tensors: self._set_data_ptr(tensor) # set data_ptr for output_tensor for tensor in output_tensors: self._set_data_ptr(tensor) # attach them to graph_info graph_info.fwd_in = input_tensors graph_info.fwd_tmp = buffer_tensors graph_info.fwd_out = output_tensors # fetch other memory information memory_cost = meta_info.memory_cost graph_info.fwd_mem_tmp = memory_cost.fwd.temp graph_info.fwd_mem_out = memory_cost.fwd.activation graph_info.bwd_mem_tmp = memory_cost.bwd.temp graph_info.bwd_mem_out = memory_cost.bwd.activation # fetch flop information # here we use fwd_time and bwd_time to deal with the case that # communication cost is a float compute_cost = meta_info.compute_cost graph_info.fwd_time = compute_cost.fwd graph_info.bwd_time = compute_cost.bwd node.meta = {**asdict(graph_info)}
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/passes/__init__.py
colossalai/auto_parallel/passes/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/auto_parallel/passes/runtime_preparation_pass.py
colossalai/auto_parallel/passes/runtime_preparation_pass.py
import operator from typing import Dict, List, Union import torch from torch.fx.node import Node from colossalai._analyzer.fx.node_util import MetaInfo from colossalai.auto_parallel.tensor_shard.constants import RESHAPE_FUNC_OP from colossalai.auto_parallel.tensor_shard.sharding_strategy import CommType, OperationDataType from colossalai.auto_parallel.tensor_shard.solver.strategies_constructor import StrategiesConstructor from colossalai.device.device_mesh import DeviceMesh from colossalai.tensor.comm_spec import _all_reduce from colossalai.tensor.shape_consistency import ShapeConsistencyManager from colossalai.tensor.sharding_spec import ShardingSpec from .constants import SHAPE_ARGUMENT_OPS shape_consistency_manager = ShapeConsistencyManager() def size_processing( size: Union[int, torch.Size], dim_partition_dict: Dict[int, List[int]], device_mesh_info: Dict[int, int], target_dim: int = None, node_name: str = None, ): """ This method will be invoked during runtime to convert size node value depending on distributed information. """ if target_dim is not None: assert isinstance(size, int) if target_dim in dim_partition_dict: total_shard_size = 1 for shard_dim in dim_partition_dict[target_dim]: total_shard_size *= device_mesh_info[shard_dim] size = size * total_shard_size else: size = list(size) for dim, dim_size in enumerate(size): if dim in dim_partition_dict: total_shard_size = 1 for shard_dim in dim_partition_dict[dim]: total_shard_size *= device_mesh_info[shard_dim] size[dim] = dim_size * total_shard_size size = torch.Size(size) return size def solution_annotation_pass( gm: torch.fx.GraphModule, solution: List[int], strategies_constructor: StrategiesConstructor ): """ This method is used to stick the solution strategy to the nodes and add the information required in runtime into graph as placeholder nodes. """ mod_graph = gm.graph nodes = [strategies_vector.node for strategies_vector in strategies_constructor.leaf_strategies] no_strategy_nodes = strategies_constructor.no_strategy_nodes # the dict to get origin sharding spec of node origin_node_sharding_spec_dict = {} for node_index, (node, strategy_index) in enumerate(zip(nodes, solution)): strategies_vector = node.strategies_vector # stick the solution strategy to the corresponding node setattr(node, "best_strategy", strategies_vector[strategy_index]) setattr(node, "sharding_spec", strategies_vector[strategy_index].get_sharding_spec_by_name(str(node))) origin_node_sharding_spec_dict[node_index] = strategies_vector[strategy_index].get_sharding_spec_by_name( str(node) ) # attach the corresponding metainfo if node has the attribute `strategies_info` if hasattr(node, "strategies_info"): setattr(node, "best_strategy_info", node.strategies_info[strategy_index]) # the dict to get input sharding specs of user node sharding_spec_convert_dict = {} # the dict to record comm actions of nodes comm_actions_dict = {} for index, node in enumerate(nodes): target_sharding_specs = [] for user_node in node.strategies_vector.successor_nodes: if user_node in no_strategy_nodes: target_sharding_spec = node.best_strategy.get_sharding_spec_by_name(str(node.name)) else: target_sharding_spec = user_node.best_strategy.get_sharding_spec_by_name(str(node.name)) target_sharding_specs.append(target_sharding_spec) sharding_spec_convert_dict[index] = target_sharding_specs setattr(node, "target_sharding_specs", target_sharding_specs) # the get_attr node strategy is kind of pending strategy, which means we will change it # to the same strategy of the user node. if node.op == "get_attr": assert len(target_sharding_specs) == 1, f"sharing weight is not supported in current version." target_node = node.strategies_vector.successor_nodes[0] node_name = str(node) if target_node.op == "call_function" and target_node.target in RESHAPE_FUNC_OP: node_name = str(target_node) target_node = target_node.strategies_vector.successor_nodes[0] user_strategy = target_node.best_strategy op_data_in_user = user_strategy.get_op_data_by_name(node_name) origin_pending_strategy = node.best_strategy origin_op_data = origin_pending_strategy.get_op_data_by_name(str(node)) new_communication_actions = {} if op_data_in_user in user_strategy.communication_actions: new_communication_action = user_strategy.communication_actions.pop(op_data_in_user) new_communication_action.arg_index = 0 new_communication_actions[origin_op_data] = new_communication_action node.best_strategy.communication_actions = new_communication_actions comm_action_dict = {} for op_data, comm_action in node.best_strategy.communication_actions.items(): comm_action_dict[op_data.name] = comm_action comm_actions_dict[index] = comm_action_dict # add above dicts into graph for node in nodes: if node.op != "placeholder": with mod_graph.inserting_before(node): input_specs_node = mod_graph.create_node("placeholder", target="sharding_spec_convert_dict") origin_specs_node = mod_graph.create_node("placeholder", target="origin_node_sharding_spec_dict") comm_actions_dict_node = mod_graph.create_node("placeholder", target="comm_actions_dict") break return gm, sharding_spec_convert_dict, origin_node_sharding_spec_dict, comm_actions_dict def size_value_converting_pass(gm: torch.fx.GraphModule, device_mesh: DeviceMesh): """ In the auto parallel system, tensors may get shard on different devices, so the size of tensors need to be converted to the size of original tensor and managed by the users, such as torch.view, torch.reshape, etc. These nodes have enough information like input sharding_spec and output sharding_spec to decide how to convert the size value. """ mod_graph = gm.graph nodes = tuple(mod_graph.nodes) node_pairs = {} # DeviceMesh information instructs the scaling of the size value device_mesh_info = {} for dim, dim_size in enumerate(device_mesh.shape): device_mesh_info[dim] = dim_size def _extract_target_dim(node): """ A helper function to extract the target dimension from size node. There are two usages of torch.Tensor.size: 1. tensor.size() 2. tensor.size(dim) If a target_dim is assigned, then the output will be in type of int, instead of torch.Size. Otherwise, the output will be in type of torch.Size and this function will return None. """ target_dim = None if len(node.args) > 1: target_dim = node.args[1] if target_dim < 0: target_dim += node.args[0]._meta_data.dim() return target_dim def _post_processing(node, size_processing_node): """ This function is used to process the dependency between the size node and its users after inserting the size_process_node. """ # store original node and processing node pair in node_pairs dictionary # It will be used to replace the original node with processing node in slice object node_pairs[node] = size_processing_node size_processing_node._meta_data = node._meta_data if hasattr(node.meta["info"], "activation_checkpoint"): MetaInfo( size_processing_node, mod_dir=node.meta["info"].mod_dir, activation_checkpoint=tuple(node.meta["info"].activation_checkpoint), ) user_list = list(node.users.keys()) for user in user_list: if user == size_processing_node: continue new_args = list(user.args) new_kwargs = dict(user.kwargs) # the origin node may be a positional argument or key word argument of user node if node in new_args: # substitute the origin node with size_processing_node new_args[new_args.index(node)] = size_processing_node user.args = tuple(new_args) elif str(node) in new_kwargs: # substitute the origin node with size_processing_node new_kwargs[str(node)] = size_processing_node user.kwargs = new_kwargs def _update_slice_object_args(slice_object): """ This function is used to update the slice object argument list. If the slice object contains the Node argument, then the size node will be replaced with """ if isinstance(slice_object, slice): start = slice_object.start stop = slice_object.stop step = slice_object.step if start in node_pairs: start = node_pairs[start] if stop in node_pairs: stop = node_pairs[stop] if step in node_pairs: step = node_pairs[step] return slice(start, stop, step) elif isinstance(slice_object, int): if slice_object in node_pairs: return node_pairs[slice_object] else: return slice_object else: raise RuntimeError(f"Unsupported slice object type: {type(slice_object)}") for node in nodes: if node.op == "call_method" and node.target == "size": # extract useful information from size node # dim_partition_dict will instruct the size value on which # dimension should be enlarged. sharding_spec = node.args[0].sharding_spec dim_partition_dict = sharding_spec.dim_partition_dict target_dim = _extract_target_dim(node) # insert size_processing node with mod_graph.inserting_after(node): size_processing_node = mod_graph.create_node( "call_function", size_processing, args=(node, dim_partition_dict, device_mesh_info, target_dim, node.name), ) _post_processing(node, size_processing_node) if node.op == "call_function" and node.target == operator.getitem: getitem_index = node.args[1] # slice object is quite special in torch.fx graph, # On one side, we treat slice object same as type of int, # so we do not create a node for slice object. On the other side, # slice object could take fx.Node as its argument. And the user # relationship cannot be tracked in fx graph. # Therefore, I record the node_pairs in this pass, and use the it # to replace the original node argument inside the slice object if # it has been processed in above pass. # There are three main usages of operator.getitem: # getitem(input, int) # getitem(input, slice) # getitem(input, Tuple[slice]) # In this pass, we need process the last two cases because # node arguments may potentially appear in these cases. if isinstance(getitem_index, slice): new_slice_item = _update_slice_object_args(getitem_index) new_args = (node.args[0], new_slice_item) node.args = new_args elif isinstance(getitem_index, (tuple, list)): if not isinstance(getitem_index[0], slice): continue new_slice_items = [] for slice_item in getitem_index: if slice_item is None: new_slice_items.append(None) continue new_slice_item = _update_slice_object_args(slice_item) new_slice_items.append(new_slice_item) new_args = (node.args[0], tuple(new_slice_items)) node.args = new_args return gm def node_args_converting_pass(gm: torch.fx.GraphModule, device_mesh: DeviceMesh): """ This pass will process node args to adapt the distributed tensor layout. """ mod_graph = gm.graph nodes = tuple(mod_graph.nodes) def _extract_info_from_sharding_spec(sharding_spec): """ This function is used to extract the dim_partition_dict and device_mesh from sharding spec instance or a list of sharding spec. """ if isinstance(sharding_spec, ShardingSpec): dim_partition_dict = sharding_spec.dim_partition_dict device_mesh = sharding_spec.device_mesh return dim_partition_dict, device_mesh if sharding_spec is None: return None, None assert isinstance( sharding_spec, (tuple, list) ), "sharding_spec should be type of ShardingSpec, tuple, list or None" device_mesh = sharding_spec[0].device_mesh dim_partition_dict = [] for element in sharding_spec: dim_partition_dict.append(_extract_info_from_sharding_spec(element)) return dim_partition_dict, sharding_spec def _process_node_arguments(node): new_args = [] for arg in node.args: # There are two args style: # 1. (input, *shape) # 2. (input, shape) # We will extract the elements from shape and add them into the new_args # Finally, the args style of new_args will be unified to (input, *shape) if isinstance(arg, Node): if isinstance(arg._meta_data, (tuple, list)): new_args.extend(arg._meta_data) elif isinstance(arg._meta_data, int): new_args.append(arg._meta_data) else: new_args.append(arg) else: assert isinstance( arg, (int, tuple, list) ), "The argument in view node should be either type of Node or int." if isinstance(arg, (tuple, list)): new_args.extend(arg) else: new_args.append(arg) return new_args def _scale_args_adapt_sharding_spec(dim_partition_dict, device_mesh, node): new_args = _process_node_arguments(node) if node.op == "call_method": args_to_process = list(new_args[1:]) else: args_to_process = list(new_args) for dim, shard_dims in dim_partition_dict.items(): total_shard_size = 1 for shard_dim in shard_dims: total_shard_size *= device_mesh.shape[shard_dim] # we will skip the dim with -1 value if args_to_process[dim] == -1: continue else: # TODO: add assertion here to make sure the dim size is divisible by total_shard_size args_to_process[dim] //= total_shard_size args_to_process = tuple(args_to_process) if node.op == "call_method": new_args = (new_args[0],) + args_to_process else: new_args = args_to_process node.args = new_args def _filter_node_with_shape_args(node): if node.op == "call_method": target = getattr(node.args[0]._meta_data.__class__, node.target) elif node.op == "call_function": target = node.target else: target = None if target in SHAPE_ARGUMENT_OPS: return True return False for node in nodes: # skip the placeholder node added in _solution_annotation pass if not hasattr(node, "sharding_spec"): continue output_dim_partition_dict, device_mesh = _extract_info_from_sharding_spec(node.sharding_spec) if _filter_node_with_shape_args(node): _scale_args_adapt_sharding_spec(output_dim_partition_dict, device_mesh, node) return gm def module_params_sharding_pass(gm: torch.fx.GraphModule, device_mesh: DeviceMesh, overlap=False): """ Apply the sharding action to the module parameters and buffers following the instructions of solver solution. """ mod_graph = gm.graph nodes = tuple(mod_graph.nodes) # This stream is created for overlapping the communication and computation. reduction_stream = torch.cuda.Stream() def _add_hook_for_grad_communication(node, param, name=None): comm_actions = node.best_strategy.communication_actions def _filter_param_to_hook(node, op_data, comm_action, name): if ( node.op == "call_module" and op_data.type == OperationDataType.PARAM and op_data.name == name and comm_action.comm_type == CommType.HOOK ): return True if ( node.op == "get_attr" and isinstance(node._meta_data, torch.nn.parameter.Parameter) and comm_action.comm_type == CommType.HOOK ): return True return False for operation_data, comm_action in comm_actions.items(): comm_spec_to_use = comm_action.comm_spec # register hook to the parameters if _filter_param_to_hook(node, operation_data, comm_action, name=name): def wrapper(param, comm_spec, stream, overlap): def hook_fn(grad): if overlap: with torch.cuda.stream(stream): _all_reduce(grad, comm_spec, async_op=True) else: _all_reduce(grad, comm_spec, async_op=False) param.register_hook(hook_fn) wrapper(param, comm_spec_to_use, reduction_stream, overlap=overlap) def _shard_param(param, target_sharding_spec): # apply the sharding spec of parameters if target_sharding_spec.dim_partition_dict != {}: origin_sharding_spec = ShardingSpec(device_mesh, param.shape, {}) setattr(param, "sharding_spec", origin_sharding_spec) # TODO: build a ColoParameter class to manager the distributed parameters # we could use .data here, because all the operations just happen before the real training # loop, so we don't need to track these operations in the autograd graph. param = torch.nn.Parameter( shape_consistency_manager.apply_for_autoparallel_runtime( param.data, param.sharding_spec, target_sharding_spec ) .detach() .clone() ) return param for node in nodes: if node.op == "call_module": target_module = node.graph.owning_module.get_submodule(node.target) # TODO: we need to do more actions to take care of the shared parameters. if hasattr(target_module, "processed") and target_module.processed: continue setattr(target_module, "processed", True) for name, param in target_module.named_parameters(): target_sharding_spec = node.best_strategy.get_sharding_spec_by_name(name) param = _shard_param(param, target_sharding_spec) setattr(target_module, name, param) _add_hook_for_grad_communication(node, param, name) sharded_buffer_dict = {} # apply the sharding spec of buffers for name, buffer in target_module.named_buffers(): origin_sharding_spec = ShardingSpec(device_mesh, buffer.shape, {}) setattr(buffer, "sharding_spec", origin_sharding_spec) target_sharding_spec = node.best_strategy.get_sharding_spec_by_name(name) buffer_sharded = shape_consistency_manager.apply(buffer, target_sharding_spec) sharded_buffer_dict[name] = buffer_sharded for name, buffer_sharded in sharded_buffer_dict.items(): setattr(target_module, name, buffer_sharded.detach().clone()) if node.op == "get_attr": root = node.graph.owning_module atoms = node.target.split(".") attr_len = len(atoms) if attr_len == 1: target_module = root target = getattr(root, atoms[0]) else: target_module = root for atom in atoms[:-1]: target_module = getattr(target_module, atom) target = getattr(target_module, atoms[-1]) target_sharding_spec = node.sharding_spec target = _shard_param(target, target_sharding_spec) assert hasattr(target_module, atoms[-1]) setattr(target_module, atoms[-1], target) _add_hook_for_grad_communication(node, target) return gm def implicit_comm_action_apply(gm: torch.fx.GraphModule): """ replace the origin kernel into kernel with implicit communication inside. """ def runtime_preparation_pass( gm: torch.fx.GraphModule, solution: List[int], device_mesh: DeviceMesh, strategies_constructor: StrategiesConstructor, overlap=False, ): gm, sharding_spec_convert_dict, origin_node_sharding_spec_dict, comm_actions_dict = solution_annotation_pass( gm, solution, strategies_constructor ) gm = size_value_converting_pass(gm, device_mesh) gm = node_args_converting_pass(gm, device_mesh) # TODO: the pass below should be uncommented after the implementation of implicit_comm_action_apply_pass completed. # gm = implicit_comm_action_apply(gm) gm = module_params_sharding_pass(gm, device_mesh, overlap=overlap) return gm, sharding_spec_convert_dict, origin_node_sharding_spec_dict, comm_actions_dict
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cli/cli.py
colossalai/cli/cli.py
import click from .check import check from .launcher import run class Arguments: def __init__(self, arg_dict): for k, v in arg_dict.items(): self.__dict__[k] = v @click.group() def cli(): pass cli.add_command(run) cli.add_command(check) if __name__ == "__main__": cli()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cli/__init__.py
colossalai/cli/__init__.py
from .cli import cli __all__ = ["cli"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cli/launcher/run.py
colossalai/cli/launcher/run.py
import os import sys from typing import List import click import torch from packaging import version from colossalai.context import Config from .hostinfo import HostInfo, HostInfoList from .multinode_runner import MultiNodeRunner # Constants that define our syntax NODE_SEP = "," def fetch_hostfile(hostfile_path: str, ssh_port: int) -> HostInfoList: """ Parse the hostfile to obtain a list of hosts. A hostfile should look like: worker-0 worker-1 worker-2 ... Args: hostfile_path (str): the path to the hostfile ssh_port (int): the port to connect to the host """ if not os.path.isfile(hostfile_path): click.echo(f"Error: Unable to find the hostfile, no such file: {hostfile_path}") exit() with open(hostfile_path, "r") as fd: device_pool = HostInfoList() for line in fd.readlines(): line = line.strip() if line == "": # skip empty lines continue # build the HostInfo object hostname = line.strip() hostinfo = HostInfo(hostname=hostname, port=ssh_port) if device_pool.has(hostname): click.echo(f"Error: found duplicate host {hostname} in the hostfile") exit() device_pool.append(hostinfo) return device_pool def parse_device_filter(device_pool: HostInfoList, include_str=None, exclude_str=None) -> HostInfoList: """Parse an inclusion or exclusion string and filter a hostfile dictionary. Examples: include_str="worker-0,worker-1" will execute jobs only on worker-0 and worker-1. exclude_str="worker-1" will use all available devices except worker-1. Args: device_pool (HostInfoList): a list of HostInfo objects include_str (str): --include option passed by user, default None exclude_str (str): --exclude option passed by user, default None Returns: filtered_hosts (HostInfoList): filtered hosts after inclusion/exclusion """ # Ensure include/exclude are mutually exclusive if include_str and exclude_str: click.echo("--include and --exclude are mutually exclusive, only one can be used") exit() # no-op if include_str is None and exclude_str is None: return device_pool # Either build from scratch or remove items if include_str: parse_str = include_str filtered_hosts = HostInfoList() elif exclude_str: parse_str = exclude_str filtered_hosts = device_pool # foreach node in the list for node_config in parse_str.split(NODE_SEP): hostname = node_config hostinfo = device_pool.get_hostinfo(hostname) # sanity check hostname if not device_pool.has(hostname): click.echo(f"Error: Hostname '{hostname}' not found in hostfile") exit() if include_str: filtered_hosts.append(hostinfo) elif exclude_str: filtered_hosts.remove(hostname) return filtered_hosts def get_launch_command( master_addr: str, master_port: int, nproc_per_node: int, user_script: str, user_args: List[str], node_rank: int, num_nodes: int, run_as_module: bool, extra_launch_args: str = None, ) -> str: """ Generate a command for distributed training. Args: master_addr (str): the host of the master node master_port (str): the port of the master node nproc_per_node (str): the number of processes to launch on each node user_script (str): the user Python file user_args (str): the arguments for the user script node_rank (int): the unique ID for the node num_nodes (int): the number of nodes to execute jobs Returns: cmd (str): the command the start distributed training """ def _arg_dict_to_list(arg_dict): ret = [] for k, v in arg_dict.items(): if v: ret.append(f"--{k}={v}") else: ret.append(f"--{k}") return ret if extra_launch_args: extra_launch_args_dict = dict() for arg in extra_launch_args.split(","): if "=" in arg: k, v = arg.split("=") extra_launch_args_dict[k] = v else: extra_launch_args_dict[arg] = None extra_launch_args = extra_launch_args_dict else: extra_launch_args = dict() torch_version = version.parse(torch.__version__) assert torch_version.major >= 1 if torch_version.major < 2 and run_as_module: raise ValueError("Torch version < 2.0 does not support running as module") if torch_version.major == 1 and torch_version.minor < 9: # torch distributed launch cmd with torch < 1.9 cmd = [ sys.executable, "-m", "torch.distributed.launch", f"--nproc_per_node={nproc_per_node}", f"--master_addr={master_addr}", f"--master_port={master_port}", f"--nnodes={num_nodes}", f"--node_rank={node_rank}", ] else: # extra launch args for torch distributed launcher with torch >= 1.9 default_torchrun_rdzv_args = dict(master_addr=master_addr, master_port=master_port) # update rdzv arguments for key in default_torchrun_rdzv_args.keys(): if key in extra_launch_args: value = extra_launch_args.pop(key) default_torchrun_rdzv_args[key] = value if torch_version.major == 1 and torch_version.minor == 9: # torch distributed launch cmd with torch == 1.9 cmd = [ sys.executable, "-m", "torch.distributed.run", f"--nproc_per_node={nproc_per_node}", f"--nnodes={num_nodes}", f"--node_rank={node_rank}", ] else: # torch distributed launch cmd with torch > 1.9 cmd = [ "torchrun", f"--nproc_per_node={nproc_per_node}", f"--nnodes={num_nodes}", f"--node_rank={node_rank}", ] cmd += _arg_dict_to_list(default_torchrun_rdzv_args) cmd += _arg_dict_to_list(extra_launch_args) if run_as_module: cmd.append("-m") cmd += [user_script] + user_args cmd = " ".join(cmd) return cmd def launch_multi_processes(args: Config) -> None: """ Launch multiple processes on a single node or multiple nodes. The overall logic can be summarized as the pseudo code below: if hostfile given: hostinfo = parse_hostfile(hostfile) hostinfo = include_or_exclude_hosts(hostinfo) launch_on_multi_nodes(hostinfo) elif hosts given: hostinfo = parse_hosts(hosts) launch_on_multi_nodes(hostinfo) else: launch_on_current_node() Args: args (Config): the arguments taken from command line """ assert isinstance(args, Config) if args.nproc_per_node is None: click.echo("--nproc_per_node did not receive any value") exit() # cannot accept hosts and hostfile at the same time if args.host and args.hostfile: click.echo("Error: hostfile and hosts are mutually exclusive, only one is required") # check if hostfile is given if args.hostfile: device_pool = fetch_hostfile(args.hostfile, ssh_port=args.ssh_port) active_device_pool = parse_device_filter(device_pool, args.include, args.exclude) if args.num_nodes > 0: # only keep the first num_nodes to execute jobs updated_active_device_pool = HostInfoList() for count, hostinfo in enumerate(active_device_pool): if args.num_nodes == count: break updated_active_device_pool.append(hostinfo) active_device_pool = updated_active_device_pool else: active_device_pool = None env = os.environ.copy() # use hosts if hostfile is not given if args.host and active_device_pool is None: active_device_pool = HostInfoList() host_list = args.host.strip().split(NODE_SEP) for hostname in host_list: hostinfo = HostInfo(hostname=hostname, port=args.ssh_port) active_device_pool.append(hostinfo) if not active_device_pool: # run on local node if not hosts or hostfile is given # add local node to host info list active_device_pool = HostInfoList() localhost_info = HostInfo(hostname="127.0.0.1", port=args.ssh_port) active_device_pool.append(localhost_info) # launch distributed processes runner = MultiNodeRunner() curr_path = os.path.abspath(".") # collect current path env env = dict() for k, v in os.environ.items(): # do not support multi-line env var if v and "\n" not in v: env[k] = v # establish remote connection runner.connect(host_info_list=active_device_pool, workdir=curr_path, env=env) # overwrite master addr when num_nodes > 1 and not specified if len(active_device_pool) > 1 and args.master_addr == "127.0.0.1": args.master_addr = active_device_pool.hostinfo_list[0].hostname # execute distributed launching command for node_id, hostinfo in enumerate(active_device_pool): cmd = get_launch_command( master_addr=args.master_addr, master_port=args.master_port, nproc_per_node=args.nproc_per_node, user_script=args.user_script, user_args=args.user_args, node_rank=node_id, num_nodes=len(active_device_pool), run_as_module=args.m, extra_launch_args=args.extra_launch_args, ) runner.send(hostinfo=hostinfo, cmd=cmd) # start training msg_from_node = runner.recv_from_all() has_error = False # print node status click.echo("\n====== Training on All Nodes =====") for hostname, msg in msg_from_node.items(): click.echo(f"{hostname}: {msg}") # check if a process failed if msg == "failure": has_error = True # stop all nodes runner.stop_all() # receive the stop status msg_from_node = runner.recv_from_all() # print node status click.echo("\n====== Stopping All Nodes =====") for hostname, msg in msg_from_node.items(): click.echo(f"{hostname}: {msg}") # give the process an exit code # so that it behaves like a normal process if has_error: sys.exit(1) else: sys.exit(0)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cli/launcher/hostinfo.py
colossalai/cli/launcher/hostinfo.py
import socket class HostInfo: """ A data class to store host connection-related data. Args: hostname (str): name or IP address of the host port (str): the port for ssh connection """ def __init__( self, hostname: str, port: str = None, ): self.hostname = hostname self.port = port self.is_local_host = HostInfo.is_host_localhost(hostname, port) @staticmethod def is_host_localhost(hostname: str, port: str = None) -> None: """ Check if the host refers to the local machine. Args: hostname (str): name or IP address of the host port (str): the port for ssh connection Returns: bool: True if it is local, False otherwise """ if port is None: port = 22 # no port specified, lets just use the ssh port # socket.getfqdn("127.0.0.1") does not return localhost # on some users' machines # thus, we directly return True if hostname is localhost, 127.0.0.1 or 0.0.0.0 if hostname in ("localhost", "127.0.0.1", "0.0.0.0"): return True hostname = socket.getfqdn(hostname) localhost = socket.gethostname() localaddrs = socket.getaddrinfo(localhost, port) targetaddrs = socket.getaddrinfo(hostname, port) return localaddrs == targetaddrs def __str__(self): return f"hostname: {self.hostname}, port: {self.port}" def __repr__(self): return self.__str__() class HostInfoList: """ A data class to store a list of HostInfo objects. """ def __init__(self): self.hostinfo_list = [] def append(self, hostinfo: HostInfo) -> None: """ Add an HostInfo object to the list. Args: hostinfo (HostInfo): host information """ self.hostinfo_list.append(hostinfo) def remove(self, hostname: str) -> None: """ Add an HostInfo object to the list. Args: hostname (str): the name of the host """ hostinfo = self.get_hostinfo(hostname) self.hostinfo_list.remove(hostinfo) def get_hostinfo(self, hostname: str) -> HostInfo: """ Return the HostInfo object which matches with the hostname. Args: hostname (str): the name of the host Returns: hostinfo (HostInfo): the HostInfo object which matches with the hostname """ for hostinfo in self.hostinfo_list: if hostinfo.hostname == hostname: return hostinfo raise Exception(f"Hostname {hostname} is not found") def has(self, hostname: str) -> bool: """ Check if the hostname has been added. Args: hostname (str): the name of the host Returns: bool: True if added, False otherwise """ for hostinfo in self.hostinfo_list: if hostinfo.hostname == hostname: return True return False def __iter__(self): return iter(self.hostinfo_list) def __len__(self): return len(self.hostinfo_list)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cli/launcher/multinode_runner.py
colossalai/cli/launcher/multinode_runner.py
from multiprocessing import Pipe, Process from multiprocessing import connection as mp_connection import click import fabric from .hostinfo import HostInfo, HostInfoList def run_on_host( hostinfo: HostInfo, workdir: str, recv_conn: mp_connection.Connection, send_conn: mp_connection.Connection, env: dict, ) -> None: """ Use fabric connection to execute command on local or remote hosts. Args: hostinfo (HostInfo): host information workdir (str): the directory to execute the command recv_conn (multiprocessing.connection.Connection): receive messages from the master sender send_conn (multiprocessing.connection.Connection): send messages to the master receiver env (dict): a dictionary for environment variables """ fab_conn = fabric.Connection(hostinfo.hostname, port=hostinfo.port) finish = False env_msg = " ".join([f'{k}="{v}"' for k, v in env.items()]) # keep listening until exit while not finish: # receive cmd cmds = recv_conn.recv() if cmds == "exit": # exit from the loop finish = True break else: # execute the commands try: # cd to execute directory with fab_conn.cd(workdir): # propagate the runtime environment with fab_conn.prefix(f"export {env_msg}"): if hostinfo.is_local_host: # execute on the local machine fab_conn.local(cmds, hide=False) else: # execute on the remote machine fab_conn.run(cmds, hide=False) send_conn.send("success") except Exception as e: click.echo( f"Error: failed to run {cmds} on {hostinfo.hostname}, is localhost: {hostinfo.is_local_host}, exception: {e}" ) send_conn.send("failure") # shutdown send_conn.send("finish") fab_conn.close() class MultiNodeRunner: """ A runner to execute commands on an array of machines. This runner is inspired by Nezha (https://github.com/zhuzilin/NeZha). """ def __init__(self): self.processes = {} self.master_send_conns = {} self.master_recv_conns = {} def connect(self, host_info_list: HostInfoList, workdir: str, env: dict) -> None: """ Establish connections to a list of hosts Args: host_info_list (HostInfoList): a list of HostInfo objects workdir (str): the directory where command is executed env (dict): environment variables to propagate to hosts """ for hostinfo in host_info_list: master_send_conn, worker_recv_conn = Pipe() master_recv_conn, worker_send_conn = Pipe() p = Process(target=run_on_host, args=(hostinfo, workdir, worker_recv_conn, worker_send_conn, env)) p.start() self.processes[hostinfo.hostname] = p self.master_recv_conns[hostinfo.hostname] = master_recv_conn self.master_send_conns[hostinfo.hostname] = master_send_conn def send(self, hostinfo: HostInfo, cmd: str) -> None: """ Send a command to a local/remote host. Args: hostinfo (HostInfo): host information cmd (str): the command to execute """ assert hostinfo.hostname in self.master_send_conns, f"{hostinfo} is not found in the current connections" conn = self.master_send_conns[hostinfo.hostname] conn.send(cmd) def stop_all(self) -> None: """ Stop connections to all hosts. """ for hostname, conn in self.master_send_conns.items(): conn.send("exit") def recv_from_all(self) -> dict: """ Receive messages from all hosts Returns: msg_from_node (dict): a dictionary which contains messages from each node """ msg_from_node = dict() for hostname, conn in self.master_recv_conns.items(): msg_from_node[hostname] = conn.recv() return msg_from_node
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cli/launcher/__init__.py
colossalai/cli/launcher/__init__.py
import click from colossalai.context import Config from .run import launch_multi_processes @click.command( help="Launch distributed training on a single node or multiple nodes", context_settings=dict(ignore_unknown_options=True), ) @click.option( "-H", "-host", "--host", type=str, default=None, help="the list of hostnames to launch in the format <host1>,<host2>", ) @click.option( "--hostfile", type=str, default=None, help="Hostfile path that defines the device pool available to the job, each line in the file is a hostname", ) @click.option( "--include", type=str, default=None, help="Specify computing devices to use during execution. String format is <host1>,<host2>," " only effective when used with --hostfile.", ) @click.option( "--exclude", type=str, default=None, help="Specify computing devices to NOT use during execution. Mutually exclusive with --include. Formatting is the same as --include," " only effective when used with --hostfile.", ) @click.option( "--num_nodes", type=int, default=-1, help="Total number of worker nodes to use, only effective when used with --hostfile.", ) @click.option("--nproc_per_node", type=int, default=None, help="Number of GPUs to use on each node.") @click.option( "--master_port", type=int, default=29500, help="(optional) Port used by PyTorch distributed for communication during distributed training.", ) @click.option( "--master_addr", type=str, default="127.0.0.1", help="(optional) IP address of node 0, will be inferred via 'hostname -I' if not specified.", ) @click.option( "--extra_launch_args", type=str, default=None, help="Set additional torch distributed launcher arguments such as --standalone. The format is --extra_launch_args arg1=1,arg2=2. " "This will be converted to --arg1=1 --arg2=2 during execution", ) @click.option("--ssh-port", type=int, default=None, help="(optional) the port used for ssh connection") @click.option("-m", type=str, default=None, help="run library module as a script (terminates option list)") @click.argument("user_script", type=str, required=False, default=None) @click.argument("user_args", nargs=-1) def run( host: str, hostfile: str, num_nodes: int, nproc_per_node: int, include: str, exclude: str, master_addr: str, master_port: int, extra_launch_args: str, ssh_port: int, m: str, user_script: str, user_args: tuple, ) -> None: """ To launch multiple processes on a single node or multiple nodes via command line. Usage:: # run with 4 GPUs on the current node use default port 29500 colossalai run --nprocs_per_node 4 train.py # run with 2 GPUs on the current node at port 29550 colossalai run --nprocs_per_node 4 --master_port 29550 train.py # run on two nodes colossalai run --host <host1>,<host2> --master_addr host1 --nprocs_per_node 4 train.py # run with hostfile colossalai run --hostfile <file_path> --master_addr <host> --nprocs_per_node 4 train.py # run with hostfile with only included hosts colossalai run --hostfile <file_path> --master_addr host1 --include host1,host2 --nprocs_per_node 4 train.py # run with hostfile excluding the hosts selected colossalai run --hostfile <file_path> --master_addr host1 --exclude host2 --nprocs_per_node 4 train.py """ if m is not None: if m.endswith(".py"): click.echo(f"Error: invalid Python module {m}. Did you use a wrong option? Try colossalai run --help") exit() if user_script is not None: user_args = (user_script,) + user_args user_script = m m = True else: if user_script is None: click.echo("Error: missing script argument. Did you use a wrong option? Try colossalai run --help") exit() if not user_script.endswith(".py"): click.echo( f"Error: invalid Python file {user_script}. Did you use a wrong option? Try colossalai run --help" ) exit() m = False args_dict = locals() args = Config(args_dict) args.user_args = list(args.user_args) launch_multi_processes(args)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cli/check/check_installation.py
colossalai/cli/check/check_installation.py
import subprocess import click import torch from torch.utils.cpp_extension import CUDA_HOME import colossalai def to_click_output(val): # installation check output to understandable symbols for readability VAL_TO_SYMBOL = {True: "\u2713", False: "x", None: "N/A"} if val in VAL_TO_SYMBOL: return VAL_TO_SYMBOL[val] else: return val def check_installation(): """ This function will check the installation of colossalai, specifically, the version compatibility of colossalai, pytorch and cuda. Example: ```text ``` Returns: A table of installation information. """ found_aot_cuda_ext = _check_aot_built_cuda_extension_installed() cuda_version = _check_cuda_version() torch_version, torch_cuda_version = _check_torch_version() colossalai_version, prebuilt_torch_version_required, prebuilt_cuda_version_required = _parse_colossalai_version() # if cuda_version is None, that means either # CUDA_HOME is not found, thus cannot compare the version compatibility if not cuda_version: sys_torch_cuda_compatibility = None else: sys_torch_cuda_compatibility = _is_compatible([cuda_version, torch_cuda_version]) # if cuda_version or cuda_version_required is None, that means either # CUDA_HOME is not found or AOT compilation is not enabled # thus, there is no need to compare the version compatibility at all if not cuda_version or not prebuilt_cuda_version_required: sys_colossalai_cuda_compatibility = None else: sys_colossalai_cuda_compatibility = _is_compatible([cuda_version, prebuilt_cuda_version_required]) # if torch_version_required is None, that means AOT compilation is not enabled # thus there is no need to compare the versions if prebuilt_torch_version_required is None: torch_compatibility = None else: torch_compatibility = _is_compatible([torch_version, prebuilt_torch_version_required]) click.echo(f"#### Installation Report ####") click.echo(f"\n------------ Environment ------------") click.echo(f"Colossal-AI version: {to_click_output(colossalai_version)}") click.echo(f"PyTorch version: {to_click_output(torch_version)}") click.echo(f"System CUDA version: {to_click_output(cuda_version)}") click.echo(f"CUDA version required by PyTorch: {to_click_output(torch_cuda_version)}") click.echo("") click.echo(f"Note:") click.echo(f"1. The table above checks the versions of the libraries/tools in the current environment") click.echo(f"2. If the System CUDA version is N/A, you can set the CUDA_HOME environment variable to locate it") click.echo( f"3. If the CUDA version required by PyTorch is N/A, you probably did not install a CUDA-compatible PyTorch. This value is give by torch.version.cuda and you can go to https://pytorch.org/get-started/locally/ to download the correct version." ) click.echo(f"\n------------ CUDA Extensions AOT Compilation ------------") click.echo(f"Found AOT CUDA Extension: {to_click_output(found_aot_cuda_ext)}") click.echo(f"PyTorch version used for AOT compilation: {to_click_output(prebuilt_torch_version_required)}") click.echo(f"CUDA version used for AOT compilation: {to_click_output(prebuilt_cuda_version_required)}") click.echo("") click.echo(f"Note:") click.echo( f"1. AOT (ahead-of-time) compilation of the CUDA kernels occurs during installation when the environment variable BUILD_EXT=1 is set" ) click.echo(f"2. If AOT compilation is not enabled, stay calm as the CUDA kernels can still be built during runtime") click.echo(f"\n------------ Compatibility ------------") click.echo(f"PyTorch version match: {to_click_output(torch_compatibility)}") click.echo(f"System and PyTorch CUDA version match: {to_click_output(sys_torch_cuda_compatibility)}") click.echo(f"System and Colossal-AI CUDA version match: {to_click_output(sys_colossalai_cuda_compatibility)}") click.echo(f"") click.echo(f"Note:") click.echo(f"1. The table above checks the version compatibility of the libraries/tools in the current environment") click.echo( f" - PyTorch version mismatch: whether the PyTorch version in the current environment is compatible with the PyTorch version used for AOT compilation" ) click.echo( f" - System and PyTorch CUDA version match: whether the CUDA version in the current environment is compatible with the CUDA version required by PyTorch" ) click.echo( f" - System and Colossal-AI CUDA version match: whether the CUDA version in the current environment is compatible with the CUDA version used for AOT compilation" ) def _is_compatible(versions): """ Compare the list of versions and return whether they are compatible. """ if None in versions: return False # split version into [major, minor, patch] versions = [version.split(".") for version in versions] for version in versions: if len(version) == 2: # x means unknown version.append("x") for idx, version_values in enumerate(zip(*versions)): equal = len(set(version_values)) == 1 if idx in [0, 1] and not equal: return False elif idx == 1: return True else: continue def _parse_colossalai_version(): """ Get the Colossal-AI version information. Returns: colossalai_version: Colossal-AI version. torch_version_for_aot_build: PyTorch version used for AOT compilation of CUDA kernels. cuda_version_for_aot_build: CUDA version used for AOT compilation of CUDA kernels. """ # colossalai version can be in two formats # 1. X.X.X+torchX.XXcuXX.X (when colossalai is installed with CUDA extensions) # 2. X.X.X (when colossalai is not installed with CUDA extensions) # where X represents an integer. colossalai_version = colossalai.__version__.split("+")[0] try: torch_version_for_aot_build = colossalai.__version__.split("torch")[1].split("cu")[0] cuda_version_for_aot_build = colossalai.__version__.split("cu")[1] except: torch_version_for_aot_build = None cuda_version_for_aot_build = None return colossalai_version, torch_version_for_aot_build, cuda_version_for_aot_build def _check_aot_built_cuda_extension_installed(): """ According to `op_builder/README.md`, the CUDA extension can be built with either AOT (ahead-of-time) or JIT (just-in-time) compilation. AOT compilation will build CUDA extensions to `colossalai._C` during installation. JIT (just-in-time) compilation will build CUDA extensions to `~/.cache/colossalai/torch_extensions` during runtime. """ try: found_aot_cuda_ext = True except ImportError: found_aot_cuda_ext = False return found_aot_cuda_ext def _check_torch_version(): """ Get the PyTorch version information. Returns: torch_version: PyTorch version. torch_cuda_version: CUDA version required by PyTorch. """ # get torch version # torch version can be of two formats # - 1.13.1+cu113 # - 1.13.1.devxxx torch_version = torch.__version__.split("+")[0] torch_version = ".".join(torch_version.split(".")[:3]) # get cuda version in pytorch build try: torch_cuda_major = torch.version.cuda.split(".")[0] torch_cuda_minor = torch.version.cuda.split(".")[1] torch_cuda_version = f"{torch_cuda_major}.{torch_cuda_minor}" except: torch_cuda_version = None return torch_version, torch_cuda_version def _check_cuda_version(): """ Get the CUDA version information. Returns: cuda_version: CUDA version found on the system. """ # get cuda version if CUDA_HOME is None: cuda_version = CUDA_HOME else: try: raw_output = subprocess.check_output([CUDA_HOME + "/bin/nvcc", "-V"], universal_newlines=True) output = raw_output.split() release_idx = output.index("release") + 1 release = output[release_idx].split(".") bare_metal_major = release[0] bare_metal_minor = release[1][0] cuda_version = f"{bare_metal_major}.{bare_metal_minor}" except: cuda_version = None return cuda_version
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/cli/check/__init__.py
colossalai/cli/check/__init__.py
import click from .check_installation import check_installation __all__ = ["check"] @click.command(help="Check if Colossal-AI is correct based on the given option") @click.option("-i", "--installation", is_flag=True, help="Check if Colossal-AI is built correctly") def check(installation): if installation: check_installation() return click.echo("No option is given")
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/logit_processors.py
colossalai/inference/logit_processors.py
# This code is adapted from huggingface transformers: https://github.com/huggingface/transformers/blob/v4.36.2/src/transformers/generation/logits_process.py import logging from typing import List, Union import torch import torch.nn.functional as F _LOGITS_PROCESSOR_MAP = {} def register_logits_processor(process_type): """ register flops computation function for operation. """ def register(func): global _LOGITS_PROCESSOR_MAP _LOGITS_PROCESSOR_MAP[process_type] = func return func return register @register_logits_processor("no_repeat_ngram_size") def apply_no_repeat_ngram_size(logits, ngram_size: int, batch_token_ids: List[List[int]]): """ enforces no repetition of n-grams to avoid repetitions of word sequences. """ if not isinstance(ngram_size, int) or ngram_size < 0: raise ValueError(f"'temperature={ngram_size}' should be a strictly positive integer.") if ngram_size != 0: batch_size = len(batch_token_ids) for batch_id in range(batch_size): current_token_ids = batch_token_ids[batch_id] current_len = len(current_token_ids) if current_len + 1 < ngram_size: continue ngrams_dict = {} for ngram in zip(*[current_token_ids[i:] for i in range(ngram_size)]): prev_ngram_tuple = tuple(ngram[:-1]) ngrams_dict[prev_ngram_tuple] = ngrams_dict.get(prev_ngram_tuple, []) + [ngram[-1]] prev_ngrams = tuple(current_token_ids[current_len + 1 - ngram_size : current_len]) banned_token = ngrams_dict.get(prev_ngrams, []) logits[batch_id, banned_token] = -float("inf") return logits @register_logits_processor("repetition_penalty") def apply_repetition_penalty(logits, penalty: float, batch_token_ids: List[List[int]]): """ apply the penalty to the tokens present in the prompt. """ if not isinstance(penalty, float) or not (penalty > 0): raise ValueError(f"'penalty={penalty}' has to be a strictly positive float and greater than 0.") logits_list = [] # TODO(yuehuayingxueluo) This is only a temporary implementation. Later, we will implement presence_penalties, frequency_penalties, and repetition_penalties using CUDA kernels. if penalty != 1.0: for batch_id in range(len(batch_token_ids)): current_logit = logits[batch_id] current_token = torch.tensor(batch_token_ids[batch_id], dtype=torch.long, device=logits.device) curretn_socre = torch.gather(current_logit, 0, current_token) curretn_socre = torch.where(curretn_socre < 0, curretn_socre * penalty, curretn_socre / penalty) logits_list.append(current_logit.scatter(0, current_token, curretn_socre)) logits = torch.stack(logits_list) return logits @register_logits_processor("temperature") def apply_temperature(logits, temperature: float): """ apply temperature scaling. """ if not isinstance(temperature, float) or not (0.0 < temperature <= 1.0): except_msg = f"'temperature={temperature}' should be a strictly positive float, less than or equal to 1.0 and greater than 0." if temperature == 0.0: except_msg += "if you want to use greedy decoding strategies, set `do_sample=False`." raise ValueError(except_msg) return logits if temperature == 1.0 else logits / temperature @register_logits_processor("top_k") def apply_top_k(logits, top_k: int): """ top_k logit processor """ if not isinstance(top_k, int) or top_k <= 0: raise ValueError(f"`top_k` should be a strictly positive integer, but got {top_k}.") indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] logits[indices_to_remove] = -float("inf") return logits @register_logits_processor("top_p") def apply_top_p(logits, top_p: float): """ top_p logit processor """ if top_p < 0 or top_p > 1.0: raise ValueError(f"`top_p` should be a float > 0 and < 1, but got {top_p}.") sorted_logits, sorted_indices = torch.sort(logits, descending=True) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cumulative_probs > top_p sorted_indices_to_remove = torch.roll(sorted_indices_to_remove, 1, -1) sorted_indices_to_remove[..., 0] = 0 indices_to_remove = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove) logits[indices_to_remove] = -float("inf") return logits @register_logits_processor("forced_eos_token_id") def apply_forced_eos_token_id( logits: torch.Tensor, sequence_lengths: Union[torch.Tensor, List[int]], max_lengths: Union[torch.Tensor, List[int]], eos_token_id: Union[int, List[int]], ): """ Enforces the specified token as the last generated token when the maximum output length is reached. Notice that the maximum output lengths for different sequences, even if they're in the same batch, can be different. Args: logits(torch.Tensor): logits sequence_lengths(torch.Tensor): sequence lengths including prompt and output tokens max_lengths(torch.Tensor): the maximum length for each sequence eos_token_id(Union[int, List[int]]): forced eos token id """ if isinstance(eos_token_id, int): eos_token_id = [eos_token_id] if isinstance(sequence_lengths, torch.Tensor): sequence_lengths = sequence_lengths.tolist() if isinstance(max_lengths, torch.Tensor): max_lengths = max_lengths.tolist() select_indexes = [] num_sequences = logits.shape[0] sequence_lengths = sequence_lengths[:num_sequences] max_lengths = max_lengths[:num_sequences] for i, (sequence_length, max_out_length) in enumerate(zip(sequence_lengths, max_lengths)): if sequence_length == max_out_length - 1: select_indexes.append(i) if select_indexes: logits[select_indexes, :] = -float("inf") logits[select_indexes, eos_token_id] = 0 return logits def get_logits_processor(processor: str, logits, *args, **kwargs): """ do logit process for given logits. Args: processor(str): the type of logit processor logits(torch.Tensor): input logits Returns: logits after process """ if processor not in _LOGITS_PROCESSOR_MAP: logging.warning(f"Unsupported processor {processor}. Fall back to the original logits.") else: func = _LOGITS_PROCESSOR_MAP[processor] logits = func(logits, *args, **kwargs) return logits
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/struct.py
colossalai/inference/struct.py
import enum from dataclasses import dataclass from typing import Any, List from colossalai.inference.config import DiffusionGenerationConfig from colossalai.logging import get_dist_logger logger = get_dist_logger(__name__) """ The abstraction of request and sequence are defined here. """ class RequestStatus(enum.Enum): """ The status of Sentences """ # running status WAITING = enum.auto() RUNNING = enum.auto() ABORTED = enum.auto() # completion status OVERLENGTH = enum.auto() COMPLETED = enum.auto() LENGTH_CAPPED = enum.auto() # recycle status RECYCLED = enum.auto() @staticmethod def is_finished(status: "RequestStatus") -> bool: return status in [ RequestStatus.OVERLENGTH, RequestStatus.COMPLETED, RequestStatus.LENGTH_CAPPED, ] @staticmethod def is_running(status: "RequestStatus") -> bool: return status == RequestStatus.RUNNING @staticmethod def is_waiting(status: "RequestStatus") -> bool: return status == RequestStatus.WAITING @dataclass class DiffusionSequence: """ parameters for diffusion """ request_id: int prompt: str generation_config: DiffusionGenerationConfig @dataclass class Sequence: """Store information of input sequence. Args: request_id (int): The ID of input sequence. prompt (str): The prompt of input sequence. input_token_id (List[int]): The tokens ID of input sequence. block_size (int): The block size of input sequence. sample_params (SampleParams): The sample_params of input sequence. block_table (torch.Tensor): The index of input sequence in block_table. eos_token_id (int): The eos token id for this inference process. pad_token_id (int): The pad token id for this inference process. max_output_len (int): Maximum output length. ignore_eos(bool): Whether to ignore the EOS token and continue generating tokens when encountering the EOS token. output(str): The output of sequence """ request_id: int prompt: str input_token_id: List[int] block_size: int sample_params: Any # SampleParams needs to be imported later. eos_token_id: int pad_token_id: int max_output_len: int = 256 # NOTE(caidi) This is a temporary solution. It's better to move the logic to turn on or off the flag in sampling module in future. ignore_eos: bool = False output: str = None def __post_init__(self): self.output_token_id = [] self.status = RequestStatus.WAITING @property def sentence_len(self) -> int: """ Get length of current sentence. """ return len(self.input_token_id) + len(self.output_token_id) @property def input_len(self) -> int: """ Get length of input sentence. """ return len(self.input_token_id) @property def output_len(self) -> int: """ Get length of output sentence. """ return len(self.output_token_id) def check_finish(self) -> bool: """ Check whether the inference is finished. Returns: bool: Whether the inference is finished. """ if RequestStatus.is_finished(self.status): return True if self.output_token_id: if ( self.output_token_id[-1] == self.eos_token_id and not self.ignore_eos ) or self.output_len >= self.max_output_len: self.status = RequestStatus.COMPLETED return True return False def revoke_finished_status(self) -> None: """ Revoke the finished status of the sequence. This is only used by speculative decoding for now. """ if RequestStatus.is_finished(self.status): self.status = RequestStatus.RUNNING def __hash__(self): return hash(self.request_id) def mark_running(self) -> None: """ Set status for prefill reqs. """ assert ( self.status == RequestStatus.WAITING or RequestStatus.RECYCLED ), "Sequence is not in WAITTING/RECYCLED STATUS" self.status = RequestStatus.RUNNING def mark_finished(self) -> None: """ Set status for finished reqs. """ self.status = RequestStatus.COMPLETED def mark_aborted(self) -> None: """ Set status for aborted reqs. """ self.status = RequestStatus.ABORTED def recycle(self) -> None: """ Recycle a running sequnce to waiitting list """ assert ( not self.check_finish() and not self.status == RequestStatus.ABORTED ), "The running sequence \ is already done but it still in running list" self.status = RequestStatus.RECYCLED def __repr__(self) -> str: return ( f"(request_id={self.request_id}, " f"prompt={self.prompt},\n" f"output_token_id={self.output_token_id},\n" f"output={self.output},\n" f"status={self.status.name},\n" f"sample_params={self.sample_params},\n" f"input_len={self.input_len},\n" f"output_len={self.output_len})\n" ) def _pad_to_max(x: List[int], max_len: int, pad: int) -> List[int]: assert len(x) <= max_len return [pad] * (max_len - len(x)) + x
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/sampler.py
colossalai/inference/sampler.py
from typing import List, Optional, Tuple, Union import torch from transformers.generation import GenerationConfig from colossalai.inference.logit_processors import get_logits_processor def greedy_sample( logprobs: torch.Tensor, ) -> torch.Tensor: """ Sample tokens greedyly. """ results = torch.argmax(logprobs, dim=-1) return results def multinomial_sample( probs: torch.Tensor, ) -> torch.Tensor: """ Sample tokens in a random phase. """ random_results = torch.multinomial(probs, num_samples=1).squeeze(1) return random_results def beam_search_sample( beam_width: int, logprobs: torch.Tensor, is_prompt: bool = False, ) -> List[Tuple[List[int], List[int]]]: """ Sample tokens with beam search. We sample 2 * beam_width candidates to make sure that with high probability we can get `beam_width` candidates in addition to the finished sequences for the next iteration. ref: https://github.com/tensorflow/tensor2tensor/blob/bafdc1b67730430d38d6ab802cbd51f9d053ba2e/tensor2tensor/utils/beam_search.py#L557-L563 for details. See also HF reference: https://github.com/huggingface/transformers/blob/a4dd53d88e4852f023332d284ff07a01afcd5681/src/transformers/generation/utils.py#L3063-L3065 # NOTE: this beam search sample function is wrong now. """ results = [] if is_prompt: # Prompt phase. parent_ids = [0] * (2 * beam_width) _, next_token_ids = torch.topk(logprobs[0], 2 * beam_width) next_token_ids = next_token_ids.tolist() else: # Generation phase. # cumulative_logprobs = [seq_data[seq_id].cumulative_logprob for seq_id in seq_ids] cumulative_logprobs = torch.tensor(logprobs, dtype=torch.float, device=seq_group_logprobs.device) seq_group_logprobs = seq_group_logprobs + cumulative_logprobs.unsqueeze(dim=1) _, topk_ids = torch.topk(logprobs.flatten(), 2 * beam_width) results.append((next_token_ids, parent_ids)) return results def search_tokens( generation_config: Union[GenerationConfig, dict], logits, is_prompt: bool = False, batch_token_ids: Optional[List[List[int]]] = None, ): """ Sample tokens for finished requests. """ # NOTE: need to decide the granularity to process logits (sequence or batch) # convert GenerationConfig to dict # temporary fix for compatibility with the usage of RPCInferenceEngine if isinstance(generation_config, GenerationConfig): generation_config = generation_config.to_dict() if (repetition_penalty := generation_config.get("repetition_penalty", 1.0)) != 1.0: logits = get_logits_processor("repetition_penalty", logits, repetition_penalty, batch_token_ids) if (no_repeat_ngram_size := generation_config.get("no_repeat_ngram_size", 0)) > 0: logits = get_logits_processor("no_repeat_ngram_size", logits, no_repeat_ngram_size, batch_token_ids) if (forced_eos_token_id := generation_config.get("forced_eos_token_id", None)) is not None: sequence_lengths = [len(batch_token_ids[i]) for i in range(len(batch_token_ids))] max_out_lengths = [generation_config.max_length for _ in range(len(batch_token_ids))] logits = get_logits_processor( "forced_eos_token_id", logits, sequence_lengths, max_out_lengths, forced_eos_token_id ) if generation_config.get("do_sample"): if (temperature := generation_config.get("temperature", 1.0)) != 1.0: logits = get_logits_processor("temperature", logits, temperature) if (top_k := generation_config.get("top_k", 0)) != 0: logits = get_logits_processor("top_k", logits, top_k) if (top_p := generation_config.get("top_p", 1.0)) < 1.0: logits = get_logits_processor("top_p", logits, top_p) # calculate probs probs = torch.softmax(logits, dim=-1, dtype=torch.float) logprobs = torch.log_softmax(logits, dim=-1, dtype=torch.float) # sample the next tokens if generation_config.get("num_beams", 1) != 1: raise NotImplementedError("Beam search is not supported yet.") if generation_config.get("do_sample", False): sample_tokens = multinomial_sample(probs) else: sample_tokens = greedy_sample(logprobs) return sample_tokens
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/graph_runner.py
colossalai/inference/graph_runner.py
from typing import Dict, List import torch from torch import nn from colossalai.inference.config import InputMetaData from colossalai.logging import get_dist_logger class CUDAGraphRunner: def __init__(self, model: nn.Module): self.model = model self.graph = None self.input_buffers: Dict[str, torch.Tensor] = {} self.output_buffers: Dict[str, torch.Tensor] = {} self.logger = get_dist_logger(__name__) def capture( self, input_tokens_ids: torch.Tensor, output_tensor: torch.Tensor, inputmetadata: InputMetaData, k_caches: List[torch.Tensor] = None, v_caches: List[torch.Tensor] = None, memory_pool=None, ) -> None: assert self.graph is None # run kernel once to cache the kernel, avoid stream capture error hidden_states_origin_model = self.model( input_tokens_ids, output_tensor, inputmetadata, k_caches, v_caches, ) torch.cuda.synchronize() # Capture the graph. # self.logger.info(f"begin capture model...") self.graph = torch.cuda.CUDAGraph() with torch.cuda.graph(self.graph, pool=memory_pool): hidden_states_cuda_graph = self.model( input_tokens_ids, output_tensor, inputmetadata, k_caches, v_caches, ) torch.cuda.synchronize() # Save the input and output buffers, because replay always uses the same virtual memory space self.input_buffers = { "input_tokens_ids": input_tokens_ids, "output_tensor": output_tensor, "block_tables": inputmetadata.block_tables, "sequence_lengths": inputmetadata.sequence_lengths, # "fd_inter_tensor_mid_output": inputmetadata.fd_inter_tensor._mid_output, # "fd_inter_tensor_mid_output_lse": inputmetadata.fd_inter_tensor._mid_output_lse, "k_caches": k_caches, "v_caches": v_caches, } self.output_buffers = {"logits": hidden_states_cuda_graph} return def forward( self, input_tokens_ids: torch.Tensor, output_tensor: torch.Tensor, inputmetadata: InputMetaData, k_caches: List[torch.Tensor] = None, v_caches: List[torch.Tensor] = None, ) -> torch.Tensor: # Copy the input tensors to the input buffers. self.input_buffers["input_tokens_ids"].copy_(input_tokens_ids, non_blocking=True) self.input_buffers["output_tensor"].copy_(output_tensor, non_blocking=True) # for flexible block_table self.input_buffers["block_tables"].fill_(-1) M, N = inputmetadata.block_tables.shape self.input_buffers["block_tables"][:M, :N].copy_(inputmetadata.block_tables, non_blocking=True) self.input_buffers["sequence_lengths"].copy_(inputmetadata.sequence_lengths, non_blocking=True) # we only have a global fd_inter_tensor so we don't need to copy them # self.input_buffers["fd_inter_tensor_mid_output"].copy_(inputmetadata.fd_inter_tensor.mid_output, non_blocking=True) # self.input_buffers["fd_inter_tensor_mid_output_lse"].copy_(inputmetadata.fd_inter_tensor.mid_output_lse, non_blocking=True) # KV caches are fixed tensors, so we don't need to copy them. # self.input_buffers["k_caches"].copy_(k_caches, non_blocking=True) # self.input_buffers["v_caches"].copy_(v_caches, non_blocking=True) # Run the graph. self.graph.replay() # Return the output tensor. return self.output_buffers["logits"] def __call__(self, *args, **kwargs): return self.forward(*args, **kwargs)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/flash_decoding_utils.py
colossalai/inference/flash_decoding_utils.py
import torch from colossalai.context.singleton_meta import SingletonMeta from colossalai.utils import get_current_device class FDIntermTensors(metaclass=SingletonMeta): """Singleton class to hold tensors used for storing intermediate values in flash-decoding. For now, it holds intermediate output and logsumexp (which will be used in reduction step along kv) """ def __init__(self): self._tensors_initialized = False def _reset(self): self._tensors_initialized = False del self._mid_output del self._mid_output_lse del self._exp_sums del self._max_logits @property def is_initialized(self): return self._tensors_initialized @property def mid_output(self): assert self.is_initialized, "Intermediate tensors not initialized yet" return self._mid_output @property def mid_output_lse(self): assert self.is_initialized, "Intermediate tensors not initialized yet" return self._mid_output_lse @property def exp_sums(self): assert self.is_initialized, "Intermediate tensors not initialized yet" return self._exp_sums @property def max_logits(self): assert self.is_initialized, "Intermediate tensors not initialized yet" return self._max_logits def initialize( self, max_batch_size: int, num_attn_heads: int, kv_max_split_num: int, head_dim: int, dtype: torch.dtype = torch.float32, device: torch.device = get_current_device(), ) -> None: """Initialize tensors. Args: max_batch_size (int): The maximum batch size over all the model forward. This could be greater than the batch size in attention forward func when using dynamic batch size. num_attn_heads (int)): Number of attention heads. kv_max_split_num (int): The maximum number of blocks splitted on kv in flash-decoding algorithm. **The maximum length/size of blocks splitted on kv should be the kv cache block size.** head_dim (int): Head dimension. dtype (torch.dtype, optional): Data type to be assigned to intermediate tensors. device (torch.device, optional): Device used to initialize intermediate tensors. """ assert not self.is_initialized, "Intermediate tensors used for Flash-Decoding have been initialized." self._mid_output = torch.empty( size=(max_batch_size, num_attn_heads, kv_max_split_num, head_dim), dtype=dtype, device=device ) self._mid_output_lse = torch.empty( size=(max_batch_size, num_attn_heads, kv_max_split_num), dtype=dtype, device=device ) self._exp_sums = torch.empty( size=(max_batch_size, num_attn_heads, kv_max_split_num), dtype=dtype, device=device ) self._max_logits = torch.empty( size=(max_batch_size, num_attn_heads, kv_max_split_num), dtype=dtype, device=device ) self._tensors_initialized = True
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/utils.py
colossalai/inference/utils.py
""" Utils for model inference """ import math import os import re from enum import Enum from pathlib import Path from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline from torch import nn from colossalai.logging import get_dist_logger from colossalai.testing import free_port logger = get_dist_logger(__name__) def init_to_get_rotary(self, base=10000, use_elem=False): """ This function initializes the rotary positional embedding, it is compatible for all models and is called in ShardFormer Args: self : Model that holds the rotary positional embedding base : calculation arg use_elem : activated when using chatglm-based models """ self.config.head_dim_ = self.config.hidden_size // self.config.num_attention_heads if not hasattr(self.config, "rope_scaling"): rope_scaling_factor = 1.0 else: rope_scaling_factor = self.config.rope_scaling.factor if self.config.rope_scaling is not None else 1.0 if hasattr(self.config, "max_sequence_length"): max_seq_len = self.config.max_sequence_length elif hasattr(self.config, "max_position_embeddings"): max_seq_len = self.config.max_position_embeddings * rope_scaling_factor else: max_seq_len = 2048 * rope_scaling_factor base = float(base) # NTK ref: https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ ntk_alpha = os.environ.get("INFER_NTK_ALPHA", None) if ntk_alpha is not None: ntk_alpha = float(ntk_alpha) assert ntk_alpha >= 1, "NTK alpha must be greater than or equal to 1" if ntk_alpha > 1: print(f"Note: NTK enabled, alpha set to {ntk_alpha}") max_seq_len *= ntk_alpha base = base * (ntk_alpha ** (self.head_dim_ / (self.head_dim_ - 2))) # Base change formula n_elem = self.config.head_dim_ if use_elem: n_elem //= 2 inv_freq = 1.0 / (base ** (torch.arange(0, n_elem, 2, device="cpu", dtype=torch.float32) / n_elem)) t = torch.arange(max_seq_len + 1024 * 64, device="cpu", dtype=torch.float32) / rope_scaling_factor freqs = torch.outer(t, inv_freq) self._cos_cached = torch.cos(freqs).to(self.dtype).cuda() self._sin_cached = torch.sin(freqs).to(self.dtype).cuda() def has_index_file(checkpoint_path: str) -> Tuple[bool, Optional[Path]]: """ Check whether the checkpoint has an index file. Args: checkpoint_path (str): path to the checkpoint. Returns: Tuple[bool, Optional[Path]]: a tuple of (has_index_file, index_file_path) """ checkpoint_path = Path(checkpoint_path) if checkpoint_path.is_file(): # check if it is .index.json reg = re.compile("(.*?).index((\..*)?).json") if reg.fullmatch(checkpoint_path.name) is not None: return True, checkpoint_path else: return False, None elif checkpoint_path.is_dir(): index_files = list(checkpoint_path.glob("*.index.*json")) for index_file in index_files: if "safetensors" in index_file.__str__(): return True, index_file.__str__() # return the safetensors file first if len(index_files) == 1: return True, index_files[0] else: assert ( len(index_files) == 1 ), f"Expected to find one .index.json file in {checkpoint_path}, but found {len(index_files)}" return False, None else: raise RuntimeError(f"Invalid checkpoint path {checkpoint_path}. Expected a file or a directory.") def get_model_size(model: nn.Module): """Calculates the total size of the model weights (including biases) in bytes. Args: model: The PyTorch model to analyze. Returns: The total size of the model weights in bytes. """ total_size = 0 for key, param in model.named_parameters(): total_size += param.element_size() * param.numel() return total_size / (1024**3) def find_available_ports(num: int): try: free_ports = [free_port() for i in range(num)] except OSError as e: print(f"An OS error occurred: {e}") raise RuntimeError("Error finding available ports") return free_ports def get_alibi_slopes(num_heads: int, device: torch.device) -> torch.Tensor: """ Alibi slopes calculation adapted from https://github.com/huggingface/transformers/blob/v4.36.0/src/transformers/models/bloom/modeling_bloom.py#L57 Args: num_heads (int): The number of attention heads. device (torch.device): The device to use. Returns: torch.Tensor: The Alibi slopes. """ closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = torch.tensor(2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), dtype=torch.float32, device=device) powers = torch.arange(1, 1 + closest_power_of_2, dtype=torch.int32, device=device) slopes = torch.pow(base, powers) if closest_power_of_2 != num_heads: extra_base = torch.tensor( 2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), dtype=torch.float32, device=device ) num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, dtype=torch.int32, device=device) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) return slopes def can_use_flash_attn2(dtype: torch.dtype) -> bool: """ Check flash attention2 availability. """ if dtype not in (torch.float16, torch.bfloat16): return False try: from flash_attn import flash_attn_varlen_func # noqa return True except ImportError: logger.warning(f"flash_attn2 has not been installed yet, we will use triton flash attn instead.") return False class ModelType(Enum): DIFFUSION_MODEL = "Diffusion Model" LLM = "Large Language Model (LLM)" UNKNOWN = "Unknown Model Type" def get_model_type(model_or_path: Union[nn.Module, str, DiffusionPipeline]): if isinstance(model_or_path, DiffusionPipeline): return ModelType.DIFFUSION_MODEL elif isinstance(model_or_path, nn.Module): return ModelType.LLM elif isinstance(model_or_path, str): try: from transformers import AutoConfig hf_config = AutoConfig.from_pretrained(model_or_path, trust_remote_code=True) return ModelType.LLM except: """ model type is not `ModelType.LLM` """ try: DiffusionPipeline.load_config(model_or_path) return ModelType.DIFFUSION_MODEL except: """ model type is not `ModelType.DIFFUSION_MODEL` """ else: return ModelType.UNKNOWN
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/config.py
colossalai/inference/config.py
""" Our config contains various options for inference optimization, it is a unified API that wraps all the configurations for inference. """ import logging from abc import ABC, abstractmethod from dataclasses import dataclass, fields from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers.generation import GenerationConfig from colossalai.inference.flash_decoding_utils import FDIntermTensors from colossalai.inference.utils import can_use_flash_attn2 GibiByte = 1024**3 logger = logging.Logger(__name__) _DTYPE_MAPPING = { "fp16": torch.float16, "bf16": torch.bfloat16, "fp32": torch.float32, } _ALLOWED_DTYPES = [torch.float16, torch.bfloat16, torch.float32] _DEFAULT_PROMPT_TEMPLATES = { "llama": "[INST] <<SYS>>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\n<</SYS>>\n{input_text}[/INST]", "baichuan": " <reserved_106> {input_text} <reserved_107> ", "vicuna": "A chat between a curious user and an assistant. The assistant gives helpful, detailed, accurate, uncensored responses to the user input. USER: {input_text}\nASSISTANT: ", } class RPC_PARAM(ABC): """ NOTE(lry89757) We use rpyc to transport param between client and server. Rpyc only support the type of `POD` in python as the param, so we should take some smart ways to transport the data like tensor or some sophisticated classes. Drawing on the logic of `__setstate__`, `__getstate__`, we will let some classes(will be rpc param later) inherit this base class, and rewrite the to_rpc_param and from_rpc_param. We will invoke `to_rpc_param` in client to pass the params and recover the param in server side by `from_rpc_param`. """ @abstractmethod def to_rpc_param(self): return NotImplementedError @staticmethod @abstractmethod def from_rpc_param(): return NotImplementedError @dataclass class InputMetaData(RPC_PARAM): """The input info for a single step Args: block_tables (torch.Tensor, optional): Sequences' BlockTables Defaults to None. sequence_lengths (torch.Tensor): A tensor containing sequence lengths. fd_inter_tensor (torch.Tensor, optional): A tensor representing intermediate data for flash decoding. Defaults to None. batch_size (int, optional): The current batch size. Defaults to 64. is_prompts (bool, optional): Indicates whether prefill or decoding. Defaults to False(decoding). use_cuda_kernel(bool): Whether to use cuda kernel, faster but lose some precision occasionally use_cuda_graph (bool, optional): Indicates whether to use the CUDA graph. Defaults to False. kv_seq_len (int, optional): Key-value sequence length. Defaults to 512. head_dim (int, optional): Head dimension. Defaults to 32. high_precision(bool, optional): Whether to use float32 for underlying calculations of float16 data to achieve higher precision, Defaults to False. dtype (torch.dtype, optional): The computation type of tensor, Defaults to torch.float32. use_spec_dec (bool): Indicate whether to use speculative decoding. num_tokens_to_verify (int): The number of tokens to verify in speculative decoding. Only valid when `use_spec_dec` is set to True. batch_token_ids (List[List[int]], optional): input_token_ids + output_token_ids of current batch. Only used for `repetition_penalty`, `no_repeat_ngram_size` in sampler process. """ block_tables: torch.Tensor = None sequence_lengths: torch.Tensor = None fd_inter_tensor: FDIntermTensors = None batch_size: int = 64 # current_batch_size is_prompts: bool = False use_cuda_kernel: bool = False use_cuda_graph: bool = False kv_seq_len: int = 512 head_dim: int = 32 high_precision: bool = False dtype: torch.dtype = torch.float32 use_spec_dec: bool = False num_tokens_to_verify: int = 0 batch_token_ids: Optional[List[List[int]]] = ( None # for `repetition_penalty`, `no_repeat_ngram_size` in sampler process ) def to_rpc_param(self) -> Dict[str, any]: return { "block_tables": self.block_tables.tolist(), "sequence_lengths": self.sequence_lengths.tolist(), "batch_size": self.batch_size, "is_prompts": self.is_prompts, "use_cuda_kernel": self.use_cuda_kernel, "use_cuda_graph": self.use_cuda_graph, "kv_seq_len": self.kv_seq_len, "head_dim": self.head_dim, "high_precision": self.high_precision, "dtype": str(self.dtype).split(".")[-1], "use_spec_dec": self.use_spec_dec, "num_tokens_to_verify": self.num_tokens_to_verify, "batch_token_ids": self.batch_token_ids, } @staticmethod def from_rpc_param(rpc_dict: Dict[str, any]) -> "InputMetaData": """ We intentionally don't use `dict.get` method to ensure we pass the right rpc param, or program will show error message """ from colossalai.accelerator import get_accelerator dtype = getattr(torch, rpc_dict["dtype"]) return InputMetaData( block_tables=torch.tensor( rpc_dict["block_tables"], dtype=torch.int, device=get_accelerator().get_current_device() ), sequence_lengths=torch.tensor( rpc_dict["sequence_lengths"], dtype=torch.int, device=get_accelerator().get_current_device() ), batch_size=rpc_dict["batch_size"], is_prompts=rpc_dict["is_prompts"], use_cuda_kernel=rpc_dict["use_cuda_kernel"], use_cuda_graph=rpc_dict["use_cuda_graph"], kv_seq_len=rpc_dict["kv_seq_len"], head_dim=rpc_dict["head_dim"], high_precision=rpc_dict["high_precision"], dtype=dtype, use_spec_dec=rpc_dict["use_spec_dec"], num_tokens_to_verify=rpc_dict["num_tokens_to_verify"], batch_token_ids=rpc_dict["batch_token_ids"], ) def __repr__(self) -> str: return ( f"InputMetaData(block_tables={self.block_tables}, " f"sequence_lengths={self.sequence_lengths}, " f"fd_inter_tensor={self.fd_inter_tensor}, " f"batch_size={self.batch_size}, " f"is_prompts={self.is_prompts}, " f"use_cuda_kernel={self.use_cuda_kernel}, " f"use_cuda_graph={self.use_cuda_graph}, " f"kv_seq_len={self.kv_seq_len}, " f"use_spec_dec={self.use_spec_dec}, " f"num_tokens_to_verify={self.num_tokens_to_verify})" ) @dataclass class InferenceConfig(RPC_PARAM): """The inference configuration. Args: max_batch_size (int): Maximum batch size, defaults to 8. max_output_len (int): Maximum output length, defaults to 256. max_input_len (int): Maximum input length, defaults to 256. dtype (Union[str, torch.dtype]): The data type for weights and activations. kv_cache_dtype (Optional[str]): The data type of kv_cache, defaults to None. prompt_template (Optional[str]): The prompt template for generation, defaults to None. do_sample (bool): Whether to use sampling for generation, defaults to False. beam_width (int): The maximum beam width used to initialize KV Cache, defaults to 1. During generation, the beam width provided as sampling parameter should be less than or equivalent to this value. prefill_ratio (Optional[float]): A controling ratio for prefill and decoding in running list, defaults to 1.2. We will do a step of prefill when the actual value exceeds this ratio. pad_input: Whether to pad all inputs to the max length. early_stopping (Optional[bool]): Whether to stop the generation when all beam hypotheses have finished or not, defaults to False. top_k (Optional[int]): The number of highest probability vocabulary tokens to keep for top-k-filtering, defaults to None. top_p (Optional[float]): The cumulative probability threshold for retaining tokens with a total probability above it, defaults to None. temperature (Optional[float]): Randomness used to control randomization, defaults to 1.0. no_repeat_ngram_size (Optional[int]): If no_repeat_ngram_size > 0, the consecutive tokens of ngram size can only appear once in inference sentences. repetition_penalty (Optional[float]): The parameter that influences the model's treatment of new tokens in relation to their appearance in the prompt and the generated text. Values greater than 1 incentivize the model to introduce new tokens, whereas values less than 1 incentivize token repetition., defaults to 1.0. ignore_eos(bool): Whether to ignore the EOS token and continue generating tokens when encountering the EOS token. use_spec_dec (bool): Indicate whether to use speculative decoding, defaults to False. max_n_spec_tokens (int): The maximum number of speculating tokens, defaults to None. glimpse_large_kv (bool): Whether to use large KV in drafter model, defaults to False. block_size (int): The number of blocks in a logical block, defaults to 16. tp_size (int): Tensor parallel size, defaults to 1. pp_size (int): Pipeline parallel size, defaults to 1. micro_batch_size (int): the micro batch size, defaults to 1. Only useful when `pp_size` > 1. micro_batch_buffer_size (int): the buffer size for micro batch. Normally, it should be the same as the number of pipeline stages. use_cuda_kernel(bool): Whether to use cuda kernel, faster but lose some precision occasionally high_precision(Optional[bool]): Whether to use float32 for underlying calculations of float16 data to achieve higher precision, defaults to False. use_cuda_graph (bool): Whether to enforce CUDA graph execution. If False, we will disable CUDA graph and always execute the model in eager mode. If True, we will use eager execution in hybrid. max_context_len_to_capture (int): max context len that could be captured by CUDA Graph, per sequence enable_streamingllm(bool): Whether to use StreamingLLM, the relevant algorithms refer to the paper at https://arxiv.org/pdf/2309.17453 for implementation. start_token_size(int): The size of the start tokens, when using StreamingLLM. generated_token_size(int): The size of the generated tokens, When using StreamingLLM. patched_parallelism_size(int): Patched Parallelism Size, When using Distrifusion """ # NOTE: arrange configs according to their importance and frequency of usage # runtime limit max_batch_size: int = 8 max_output_len: int = 256 max_input_len: int = 256 # general configs dtype: Union[str, torch.dtype] = torch.float16 # use fp16 by default kv_cache_dtype: Optional[str] = None # generation configs prompt_template: Optional[str] = None do_sample: bool = False beam_width: int = 1 # TODO: beam search is not support for now prefill_ratio: Optional[float] = ( 1.2 # the ratio of prefill sequences to decoding sequences, we do prefill step once the actual value exceeds ratio ) pad_input: bool = False early_stopping: Optional[bool] = False top_k: Optional[int] = 50 top_p: Optional[float] = 1.0 temperature: Optional[float] = 1.0 no_repeat_ngram_size: Optional[int] = 0 repetition_penalty: Optional[float] = 1.0 forced_eos_token_id: int = None ignore_eos: bool = False # speculative decoding configs use_spec_dec: bool = False max_n_spec_tokens: int = 5 glimpse_large_kv: bool = False # paged attention configs block_size: int = 16 # model parallelism configs tp_size: int = 1 pp_size: int = 1 micro_batch_size: int = 1 micro_batch_buffer_size: int = None # cuda kernel option use_cuda_kernel: bool = False high_precision: Optional[bool] = False # cuda_graph use_cuda_graph: bool = ( False # NOTE only when we have the graph for specific decoding batch size can we use the cuda graph for inference ) max_context_len_to_capture: int = 512 # StreamingLLM (sliding window attention with attention sinks) enable_streamingllm: bool = False start_token_size: int = 4 generated_token_size: int = 512 # Acceleration for Diffusion Model(PipeFusion or Distrifusion) patched_parallelism_size: int = 1 # for distrifusion # pipeFusion_m_size: int = 1 # for pipefusion # pipeFusion_n_size: int = 1 # for pipefusion def __post_init__(self): self.max_context_len_to_capture = self.max_input_len + self.max_output_len self._verify_config() def _verify_config(self) -> None: """ Verify the input config """ # check dtype if isinstance(self.dtype, str): # convert string dtype to torch dtype assert ( self.dtype in _DTYPE_MAPPING ), f"Expected the dtype string argument to be in {list(_DTYPE_MAPPING.keys())} but found an unknown dtype: {self.dtype}" self.dtype = _DTYPE_MAPPING[self.dtype] assert ( self.dtype in _ALLOWED_DTYPES ), f"Expected dtype to be in {_ALLOWED_DTYPES} but found an unknown dtype: {self.dtype}" if self.kv_cache_dtype: assert ( self.use_cuda_kernel and self.kv_cache_dtype == "fp8" ), f"FP8 kv_cache is only supported with use_cuda_kernel open now" self.kv_cache_dtype = torch.uint8 # skip using casting when the data type is float32 if self.dtype == torch.float32: self.high_precision = False # check StreamingLLM assert ( self.start_token_size <= self.block_size ), f"According to the paper https://arxiv.org/pdf/2309.17453, the start_token_size greater than 4 has little impact on inference performance. Therefore, we assume that the start_token_size should be less or equal than the block_size={self.block_size}, but got {self.start_token_size}." assert ( self.generated_token_size % self.block_size == 0 ), f"We assume that the generated_token_size should be a multiple of the block_size, got generated_token_size={self.generated_token_size}." # Our StreamingLLM implementation (sliding window attention with attention sinks) references https://arxiv.org/pdf/2309.17453 and has been optimized # based on our framework's kvcache management mechanism. According to the paper, a start_token_size of 4 is sufficient. Therefore, # we assume the start_token_size is less than or equal to the block size. When the start_token_size is smaller than the block size, # we fill the first block with the start_token_size and subsequently generated tokens, using these as the "start tokens." # Thereafter, we swap out tokens in units of blocks, and always swapping out the second block when the generated tokens exceeded the limit. self.start_token_size = self.block_size # check Distrifusion # TODO(@lry89757) need more detailed check if self.patched_parallelism_size > 1: # self.use_patched_parallelism = True self.tp_size = ( self.patched_parallelism_size ) # this is not a real tp, because some annoying check, so we have to set this to patched_parallelism_size # check prompt template if self.prompt_template is None: return if self.prompt_template in _DEFAULT_PROMPT_TEMPLATES: self.prompt_template = _DEFAULT_PROMPT_TEMPLATES[self.prompt_template] else: # make sure the template can be formatted with input_text assert ( "{input_text}" in self.prompt_template ), "The prompt template should contain '{input_text}' for formatting the input text. For example: 'USER: {input_text}\n\nASSISTANT: '" def to_generation_config(self, model_config) -> GenerationConfig: meta_config = { "max_length": self.max_input_len + self.max_output_len, "max_new_tokens": self.max_output_len, "early_stopping": self.early_stopping, "do_sample": self.do_sample, "num_beams": self.beam_width, } for type in ["repetition_penalty", "no_repeat_ngram_size", "temperature", "top_k", "top_p"]: if hasattr(self, type): meta_config[type] = getattr(self, type) for type in ["pad_token_id", "bos_token_id", "eos_token_id"]: if hasattr(model_config, type): meta_config[type] = getattr(model_config, type) return GenerationConfig.from_dict(meta_config) def to_model_shard_inference_config(self) -> "ModelShardInferenceConfig": use_flash_attn = can_use_flash_attn2(self.dtype) model_inference_config = ModelShardInferenceConfig( dtype=self.dtype, use_cuda_kernel=self.use_cuda_kernel, use_spec_dec=self.use_spec_dec, use_flash_attn=use_flash_attn, patched_parallelism_size=self.patched_parallelism_size, ) return model_inference_config def to_rpc_param(self) -> dict: kwargs = { "dtype": str(self.dtype).split(".")[-1], "max_n_spec_tokens": self.max_n_spec_tokens, "max_batch_size": self.max_batch_size, "max_input_len": self.max_input_len, "max_output_len": self.max_output_len, "tp_size": self.tp_size, "pp_size": self.pp_size, "pad_input": self.pad_input, "early_stopping": self.early_stopping, "do_sample": self.do_sample, "beam_width": self.beam_width, "kv_cache_dtype": str(self.kv_cache_dtype).split(".")[-1], } return kwargs @staticmethod def from_rpc_param(rpc_dict: dict) -> "InferenceConfig": """ We intentionally don't use `dict.get` method to ensure we pass the right rpc param, or program will show error message """ return InferenceConfig( dtype=getattr(torch, rpc_dict["dtype"]), max_n_spec_tokens=rpc_dict["max_n_spec_tokens"], max_batch_size=rpc_dict["max_batch_size"], max_input_len=rpc_dict["max_input_len"], max_output_len=rpc_dict["max_output_len"], tp_size=rpc_dict["tp_size"], pp_size=rpc_dict["pp_size"], pad_input=rpc_dict["pad_input"], early_stopping=rpc_dict["early_stopping"], do_sample=rpc_dict["do_sample"], beam_width=rpc_dict["beam_width"], kv_cache_dtype=getattr(torch, rpc_dict["kv_cache_dtype"], None), ) @classmethod def from_dict(cls, config_dict: Dict[str, Any]) -> "InferenceConfig": # Get the list of attributes of this dataclass. attrs = [attr.name for attr in fields(cls)] inference_config_args = {} for attr in attrs: if attr in config_dict: inference_config_args[attr] = config_dict[attr] else: inference_config_args[attr] = getattr(cls, attr) # Set the attributes from the parsed arguments. inference_config = cls(**inference_config_args) return inference_config @dataclass class ModelShardInferenceConfig: """ Configurations used during init of module for inference modeling. Args: dtype (torch.dtype): The data type for weights and activations. use_cuda_kernel (bool): Whether to use cuda kernel, faster but lose some precision occasionally use_spec_dec (bool): Indicate whether to use speculative decoding. use_flash_attn (bool): Indicate whether to use flash attention. """ dtype: torch.dtype = None use_cuda_kernel: bool = False use_spec_dec: bool = False use_flash_attn: bool = False patched_parallelism_size: int = 1 # for diffusion model, Distrifusion Technique @dataclass class DiffusionGenerationConfig: """ Param for diffusion model forward """ prompt_2: Optional[Union[str, List[str]]] = None prompt_3: Optional[Union[str, List[str]]] = None height: Optional[int] = None width: Optional[int] = None num_inference_steps: int = None timesteps: List[int] = None guidance_scale: float = None negative_prompt: Optional[Union[str, List[str]]] = ( None # NOTE(@lry89757) in pixart default to "", in sd3 default to None ) negative_prompt_2: Optional[Union[str, List[str]]] = None negative_prompt_3: Optional[Union[str, List[str]]] = None num_images_per_prompt: Optional[int] = None generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None latents: Optional[torch.FloatTensor] = None prompt_embeds: Optional[torch.FloatTensor] = None negative_prompt_embeds: Optional[torch.FloatTensor] = None pooled_prompt_embeds: Optional[torch.FloatTensor] = None negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None output_type: Optional[str] = None # "pil" return_dict: bool = None joint_attention_kwargs: Optional[Dict[str, Any]] = None clip_skip: Optional[int] = None callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None callback_on_step_end_tensor_inputs: List[str] = None def to_dict(self) -> Dict[str, Any]: # NOTE(@lry89757) Only return the dict that not the default value None result = {} for field in fields(self): value = getattr(self, field.name) if value is not None: result[field.name] = value return result @classmethod def from_kwargs(cls, **kwargs) -> "DiffusionGenerationConfig": return cls(**kwargs)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false