repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/__init__.py
colossalai/inference/__init__.py
from .config import InferenceConfig from .core import InferenceEngine __all__ = ["InferenceConfig", "InferenceEngine"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/batch_bucket.py
colossalai/inference/batch_bucket.py
from typing import Callable, List, Optional, Tuple, Union import torch from colossalai.inference.struct import Sequence from colossalai.utils import get_current_device class BatchBucket: """Container for a batch of Sequences, which is used to manage the batch of sequences. Attrs: _sequences_dict (Dict[int, Sequence]): Map sequence uid to sequence struct seq_uid -> Sequence _sequences_indexes (Dict[int, int]): Map sequence uid to index in the batch seq_uid -> index in the batch (indexing used in sequence_lengths and block_tables) _sequence_lengths (torch.Tensor): Length of each sequence in the batch. The size of the tensor is (max_batch_size,) _block_tables (torch.Tensor): Block table of each sequence in the batch The size of the tensor is (max_batch_size, max_blocks_per_seq) """ def __init__( self, num_heads, head_dim, max_batch_size, max_length, block_size, kv_max_split_num, fd_interm_tensor=None, device=None, dtype=torch.float16, enable_streamingllm: bool = False, start_token_size: int = 4, generated_token_size: int = 512, ): self.num_heads = num_heads self.head_dim = head_dim self.max_batch_size = max_batch_size self.max_length = max_length # in + out len self.block_size = block_size self.kv_max_split_num = kv_max_split_num # Hint used for flash decoding self.fd_interm_tensor = fd_interm_tensor self.device = device or get_current_device() self.dtype = dtype self._use_spec_dec = False self._num_tokens_to_verify = None self.enable_streamingllm = enable_streamingllm self.start_token_size = start_token_size self.generated_token_size = generated_token_size self._current_batch_size = 0 self._sequences_dict = dict() self._sequences_indexes = dict() # deque(maxlen=self.max_batch_size) self._sequence_lengths = torch.zeros((self.max_batch_size,), dtype=torch.int32) self._sequence_lengths_helper = torch.zeros_like(self._sequence_lengths) if enable_streamingllm: max_blocks_per_seq = (start_token_size + generated_token_size + block_size - 1) // block_size + 1 else: max_blocks_per_seq = (self.max_length + block_size - 1) // block_size self._block_tables = torch.full((self.max_batch_size, max_blocks_per_seq), -1, dtype=torch.int32) self._block_tables_helper = torch.full_like(self._block_tables, -1) @property def is_empty(self): return self._current_batch_size == 0 @property def current_batch_size(self): return self._current_batch_size def __len__(self): return self._current_batch_size @property def available_batch_size(self): return self.max_batch_size - self._current_batch_size @property def block_tables(self): return self._block_tables @property def seq_lengths(self): return self._sequence_lengths @property def seqs_ids(self): return list(self._sequences_dict.keys()) @property def seqs_li(self): return list(self._sequences_dict.values()) @property def is_compact(self): assert len(self._sequences_dict) == len(self._sequences_indexes), "BatchBucket indexing is not consistent" return ( len(self._sequences_dict) == torch.nonzero(self._sequence_lengths).view(-1).numel() == torch.nonzero(self._block_tables[:, 0] >= 0).numel() ) @property def use_spec_dec(self) -> bool: return self._use_spec_dec @property def num_tokens_to_verify(self) -> int: return self._num_tokens_to_verify @property def batch_token_ids(self) -> List[List[int]]: out = [] for seq in self.seqs_li: out.append(seq.input_token_id + seq.output_token_id) return out def streamingllm_update_batch(self, start_token_size: int, generated_token_size: int): """ Update sequence_lengths and block_tables when it is necessary to swap out a block. """ updated_block_ids = [] if self.current_batch_size > 0: need_update = False sequence_lengths_list = self._sequence_lengths.tolist() block_tables_list = self._block_tables[: self._current_batch_size].tolist() for batch_id in range(self.current_batch_size): # We assume that the start token occupies the entire first block. if sequence_lengths_list[batch_id] == start_token_size + generated_token_size + self.block_size - 1: need_update = True sequence_lengths_list[batch_id] = start_token_size + generated_token_size - 1 block_id = block_tables_list[batch_id].pop(1) updated_block_ids.append(block_id) block_tables_list[batch_id].append(-1) if need_update: self._sequence_lengths = torch.tensor( sequence_lengths_list, dtype=self._sequence_lengths.dtype, device=self.device ) self._block_tables = torch.tensor(block_tables_list, dtype=self._block_tables.dtype, device=self.device) return updated_block_ids def set_use_spec_dec(self, num_tokens_to_verify: int = 5) -> None: """Set batch bucket to use speculatvie decoding. This will notify the adjust the lengths of inputs during modeling, and let the main model verifies tokens in parallel. """ self._use_spec_dec = True self._num_tokens_to_verify = num_tokens_to_verify def reset_use_spec_dec(self) -> None: """Reset the usage of speculative decoding for the batch bucket""" self._use_spec_dec = False self._num_tokens_to_verify = None def _make_compact(self) -> None: # Clean and Compress the batch based on its sequences dict. # Namely,compress sequences to the front and clean the seq lengths and block tables tensors. # NOTE Prevent calling this method multiple times in a single step if self.is_compact: return valid_seq_ids = self._sequences_dict.keys() valid_num = len(valid_seq_ids) valid_indexes = [self._sequences_indexes[seq_id] for seq_id in valid_seq_ids] assert valid_num == len(self._sequences_indexes), "BatchBucket indexing is not consistent" self._sequence_lengths_helper[:valid_num] = self._sequence_lengths[valid_indexes] self._sequence_lengths[:] = self._sequence_lengths_helper[:] self._block_tables_helper[:valid_num, :] = self.block_tables[valid_indexes] self.block_tables[:] = self._block_tables_helper[:] new_idx = 0 for seq_id in valid_seq_ids: self._sequences_indexes[seq_id] = new_idx new_idx += 1 self._sequence_lengths_helper.fill_(0) self._block_tables_helper.fill_(-1) self._current_batch_size = valid_num def add_seq( self, seq: Sequence, alloc_block_table: torch.Tensor = None, alloc_block_table_fn: Callable[[torch.Tensor, int], None] = None, ) -> Union[torch.Tensor, None]: """Add a single sequence to the batch. User could opt to provide either a block table or a function to allocate block tables. Args: seq (Sequence): The sequence to be added to the batch alloc_block_table (torch.Tensor): The block tables to be copied and used for the sequence alloc_block_table_fn (Callable[[torch.Tensor, int], None]): The function to allocate blocks for the sequence, which is expected to reserve blocks and update status of kv-cache manager. Returns: block_table (torch.Tensor): The block table of the added sequence, used for block allocation in kv-cache manager. None if the sequence cannot be added. """ block_table = None # TODO might consider sorting by length if self._current_batch_size < self.max_batch_size: self._sequences_dict[seq.request_id] = seq self._sequences_indexes[seq.request_id] = self._current_batch_size self._sequence_lengths[self._current_batch_size] = seq.sentence_len # NOTE the added seq still require block table allocation by kvcache manager block_table = self._block_tables[self._current_batch_size - 1] if alloc_block_table is not None: # copy block ids from provided block tables self._block_tables[self._current_batch_size - 1] = alloc_block_table elif alloc_block_table_fn: alloc_block_table_fn(block_table, self._sequence_lengths[self._current_batch_size - 1].item()) self._current_batch_size += 1 return block_table def add_seqs( self, seqs: List[Sequence], alloc_block_tables: torch.Tensor = None, alloc_block_tables_fn: Callable[[torch.Tensor, torch.Tensor], None] = None, ) -> Union[torch.Tensor, None]: """Add a list of sequences to the batch. User could opt to provide either block tables or a function to allocate block tables. Args: seqs (List[Sequence]): The sequences to be added to the batch alloc_block_tables (torch.Tensor): The block tables to be copied and used for the sequence alloc_block_table_fn (Callable[[torch.Tensor, torch.Tensor], None]): The function to allocate blocks for multiple sequences, which is expected to reserve blocks and update status of kv-cache manager. Returns: block_tables (torch.Tensor): The block tables of the added sequences, used for block allocation in kv-cache manager. None if the sequences cannot be added. """ assert ( alloc_block_tables is None or alloc_block_tables_fn is None ), "`alloc_block_tables` and `alloc_block_tables_fn` cannot be provided at the same time" num_seqs_to_add = min(self.max_batch_size - self._current_batch_size, len(seqs)) block_tables = None if num_seqs_to_add > 0: for i, seq in enumerate(seqs[:num_seqs_to_add]): self._sequences_dict[seq.request_id] = seq self._sequences_indexes[seq.request_id] = self._current_batch_size + i # TODO external (rename): modify Sequence.sentence_len to seq_len self._sequence_lengths[self._current_batch_size : self._current_batch_size + num_seqs_to_add] = ( torch.tensor([seq.sentence_len for seq in seqs[:num_seqs_to_add]], dtype=torch.int32) ) # NOTE block tables to be updated by kvcache manager block_tables = self._block_tables[self._current_batch_size : self._current_batch_size + num_seqs_to_add] if alloc_block_tables is not None: # copy block ids from provided block tables self._block_tables[self._current_batch_size : self._current_batch_size + num_seqs_to_add] = ( alloc_block_tables ) elif alloc_block_tables_fn: alloc_block_tables_fn( block_tables, self._sequence_lengths[self._current_batch_size : self._current_batch_size + num_seqs_to_add], ) self._current_batch_size += num_seqs_to_add seqs[:] = seqs[num_seqs_to_add:] return block_tables def pop_seq_update_batch( self, request_id: int, free_block_table_fn: Callable[[torch.Tensor], None] = None ) -> Tuple[Sequence, Union[torch.Tensor, None]]: """Pop a single sequence by id from the batch, and update the batch bucket status. Args: request_id (int): The uid of the sequence free_block_table_fn (Callable): The function to free the block table of a sequence, if not provided, then we have to release the block table manually after calling this method Returns: A tuple of: seq (Sequence): The target sequence and block_table (torch.Tensor): block table of the target sequence indicating corresponding blocks, none if the sequence is not found or free_block_table_fn is provided. """ seq: Sequence = self._sequences_dict.get(request_id) block_table = None if seq is not None: assert request_id in self._sequences_indexes, "Inconsistency in BatchBucket indexing" self._sequences_dict.pop(request_id) seq_b_idx = self._sequences_indexes.get(request_id) if self.current_batch_size > 1: # replace seq length of the target seq with that of the last seq in the batch last_seq_b_idx = self.current_batch_size - 1 last_seq_id = next( (uid for uid, index in self._sequences_indexes.items() if index == last_seq_b_idx), None, ) assert last_seq_id is not None self._sequences_indexes[last_seq_id] = seq_b_idx self._sequence_lengths[seq_b_idx] = self._sequence_lengths[last_seq_b_idx] self._sequence_lengths[last_seq_b_idx].fill_(0) # free the block table of the seq, or return a copy of the block table (to be processed outside) if free_block_table_fn: free_block_table_fn(self._block_tables[seq_b_idx]) else: block_table = self._block_tables[seq_b_idx].detach().clone() # replace block table of the target seq with that of the last seq in the batch self._block_tables[seq_b_idx] = self._block_tables[last_seq_b_idx] self._block_tables[last_seq_b_idx].fill_(-1) else: if free_block_table_fn: free_block_table_fn(self._block_tables[0]) else: block_table = self._block_tables[0].detach().clone() self._sequence_lengths[0].fill_(0) self._block_tables[0].fill_(-1) self._sequences_indexes.pop(request_id) self._current_batch_size -= 1 return seq, block_table def pop_seqs( self, request_ids: List[int], free_block_table_fn: Callable[[torch.Tensor], None] = None ) -> Tuple[List[Sequence], List[torch.Tensor]]: """Iteratively pop a list of sequences by uid. Args: request_ids (List[int]): The uids of the sequences free_block_table_fn (Callable): The function to free the block table of a sequence, if not provided, then we have to release the block table manually after calling this method Returns: A tuple of: seqs (List[Sequence]): The target sequences and block_tables (List[torch.Tensor]): block tables of the target sequences indicating corresponding blocks """ seqs = [] block_tables = [] for request_id in request_ids: seq, block_table = self.pop_seq_update_batch(request_id, free_block_table_fn) if seq is not None: seqs.append(seq) if block_table is not None: block_tables.append(block_table) return seqs, block_tables def pop_n_seqs( self, n: int, free_block_table_fn: Callable[[torch.Tensor], None] = None ) -> Tuple[List[Sequence], List[torch.Tensor]]: """Pop the first n sequences in the batch (FIFO). If n is greater than the current batch szie, pop all the sequences in the batch. Args: n (int): The number of sequences to pop out free_block_table_fn (Callable): The function to free the block table of a single sequence Returns: A tuple of: seqs (List[Sequence]): The target sequences, and block_tables (List[torch.Tensor]): block tables of the target sequences indicating corresponding blocks """ # NOTE Prevent calling this method multiple times in a single step seqs = [] block_tables = [] n = min(n, self.current_batch_size) seq_ids = list(self._sequences_dict.keys())[:n] for seq_id in seq_ids: seq = self._sequences_dict.pop(seq_id) seq_b_idx = self._sequences_indexes.pop(seq_id) if free_block_table_fn: free_block_table_fn(self.block_tables[seq_b_idx]) else: block_tables.append(self.block_tables[seq_b_idx].detach().clone()) seqs.append(seq) if not self.is_compact: self._make_compact() return seqs, block_tables def pop_finished( self, free_block_table_fn: Callable[[torch.Tensor], None] = None ) -> Tuple[List[Sequence], List[torch.Tensor]]: """Pop finished sequences in the batch and a list of block tables of the finished sequences, if free_block_table_fn is not provided. Args: free_block_table_fn (Callable): The function to free the block table of a single sequence Returns: A tuple of: finished_seqs (List[Sequence]): The finished sequences, and finished_block_tables (List[torch.Tensor]): block tables of the finished sequences. """ finished_seqs = [] finished_block_tables = [] for seq in self._sequences_dict.values(): if seq.check_finish(): finished_seqs.append(seq) # Use `pop_seq_update_batch`` to update the batch status for just a few of finished seqs, # otherwise, pop seqs directly and then call `_make_compact` to compress the batch. # For now, the performance difference is not significant, so we use the frist method to pop seqs. # Precise evaluations to be done. for seq in finished_seqs: _, block_table = self.pop_seq_update_batch(seq.request_id, free_block_table_fn) if block_table is not None: finished_block_tables.append(block_table) return finished_seqs, finished_block_tables # TODO arg type not support beam search sampling yet def append_batch_tokens(self, tokens: torch.Tensor) -> None: """Append a batch of tokens to the sequences in the batch""" assert self.current_batch_size == tokens.size(0), "Batch size mismatch" if self.current_batch_size > 0: tokens = tokens.tolist() for seq_id, seq in self._sequences_dict.items(): index_in_b = self._sequences_indexes[seq_id] curr_tokens = tokens[index_in_b] if not isinstance(curr_tokens, list): curr_tokens = [curr_tokens] seq.output_token_id += curr_tokens seq.check_finish() self._sequence_lengths[: self.current_batch_size] += 1 def revoke_batch_tokens(self, n_tokens: int, n_seqs: int = 1) -> None: """Revoke the last n output tokens of the sequences in the batch Args: n_tokens (int): The number of output tokens to revoke from each sequence. It does not count in the context tokens (input tokens). n_seqs (int): The first n sequences to revoke tokens from. Defaults to 1. For now, speculative decoding only supports batch size 1. """ if n_tokens >= 1: seqs_iter = iter(self._sequences_dict.items()) for _ in range(n_seqs): seq_id, seq = next(seqs_iter) assert seq.output_len >= n_tokens, "Revoking len exceeds the current output len of the sequence" seq.output_token_id = seq.output_token_id[:-n_tokens] seq.revoke_finished_status() self._sequence_lengths[self._sequences_indexes[seq_id]] -= n_tokens def clear(self, free_block_tables_fn: Optional[Callable[[torch.Tensor], None]]) -> List[int]: """Clear all the sequences in the batch. free_block_tables_fn (Optional[Callable]): The function to free the block tables of all the sequences in a batch """ seqs = list(self._sequences_dict.values()) self._sequences_dict.clear() self._sequences_indexes.clear() if free_block_tables_fn: free_block_tables_fn(self.block_tables, self._current_batch_size) self._block_tables.fill_(-1) self._sequence_lengths.fill_(0) self._current_batch_size = 0 return seqs def merge(self, other: "BatchBucket") -> List[int]: """Merge the sequences in the other batch into the current batch. Merge as possible as the current batch can, if it does not have available spaces holding all the sequences in the other batch Usage: > New incoming sequence added to prefil batch prefill bb curr batch size < prefil_ratio * prefill bb max batch size > New incoming sequence added to prefil batch prefill bb curr batch size == prefil_ratio * prefill bb max batch size > Pause Decoding > Prefill > Move sequences in prefill bb => decoding bb > Put back the out-of-volume sequences into the running pool Returns: unmerged_ids (List[int]): a list of sequence uids that are not merged into the current batch """ unmerged_ids = [] num_seqs_to_merge = min(self.available_batch_size, other.current_batch_size) if num_seqs_to_merge > 0: seqs, block_tables_li = other.pop_n_seqs(num_seqs_to_merge) block_tables = torch.stack(block_tables_li) self.add_seqs(seqs, alloc_block_tables=block_tables) unmerged_ids = other.seqs_ids return unmerged_ids ########## The following methods are expected to be used in modeling ########### # For compatibility. # NOTE: This is an assumption way to determine the stage of the batch. @property def is_prompts(self) -> bool: assert len(self._sequences_dict) > 0, "No sequence in the batch" first_seq = next(iter(self._sequences_dict.values())) if first_seq.output_len == 0: return True return False def get_1D_inputs_spec_dec(self, n: int) -> torch.Tensor: # Used for main model verification in **Decoding Stage** # `n` is the number of tokens to be verified, # and so that prepare the last `n` tokens of each sequence as the inputs assert len(self._sequences_dict) > 0, "No sequence in the batch" assert all( seq.output_len >= n for seq in self._sequences_dict.values() ), "Sequence output tokens must be greater than or equal to the number of tokens to be verified." out_li = [] seq_ids = sorted(self._sequences_indexes.keys(), key=lambda x: self._sequences_indexes[x]) for seq_id in seq_ids: seq: Sequence = self._sequences_dict[seq_id] out_li.extend(seq.output_token_id[-n:]) return torch.tensor(out_li, dtype=torch.long, device=self.device) # For compatibility def get_1D_inputs(self) -> torch.Tensor: assert len(self._sequences_dict) > 0, "No sequence in the batch" first_seq = next(iter(self._sequences_dict.values())) # not exactly the first sequence if first_seq.output_len == 0: # Assume prefill stage assert all( seq.output_len == 0 for seq in self._sequences_dict.values() ), "Sequence stage (Prefill/Decoding) must be the same in the batch" out_li = [] seq_ids = sorted(self._sequences_indexes.keys(), key=lambda x: self._sequences_indexes[x]) for seq_id in seq_ids: seq: Sequence = self._sequences_dict[seq_id] out_li.extend(seq.input_token_id) return torch.tensor(out_li, dtype=torch.long, device=self.device) else: # Assume decoding stage if self.use_spec_dec: # For Speculative Decoding # the number of tokens to be verified in parallel plus the correct token in the last step return self.get_1D_inputs_spec_dec(self.num_tokens_to_verify + 1) assert all( seq.output_len > 0 for seq in self._sequences_dict.values() ), "Sequence stage (Prefill/Decoding) must be the same in the batch" assert self.is_compact, "BatchBucket is not compact" out = torch.empty([self.current_batch_size], dtype=torch.long) for seq_id, index_in_b in self._sequences_indexes.items(): seq: Sequence = self._sequences_dict[seq_id] out[index_in_b] = seq.output_token_id[-1] return out.to(device=self.device) # For compatibility def get_block_table_tensor(self) -> torch.Tensor: assert self.is_compact # Debug usage block_table = self.block_tables[: self.current_batch_size] return block_table.to(device=self.device) # For compatibility def get_sequence_lengths(self) -> torch.Tensor: assert self.is_compact # Debug usage sequence_lengths = self.seq_lengths[: self.current_batch_size] return sequence_lengths.to(device=self.device) # For compatibility @property def fd_inter_tensor(self) -> None: assert self.fd_interm_tensor is not None, "fd_interm_tensor is not provided" return self.fd_interm_tensor def __repr__(self) -> str: return f"(sequences_dict={self._sequences_dict}, is_prompts={self.is_prompts})"
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/kv_cache/kvcache_manager.py
colossalai/inference/kv_cache/kvcache_manager.py
from typing import List, Tuple import torch from transformers.configuration_utils import PretrainedConfig from colossalai.inference.config import InferenceConfig from colossalai.inference.struct import Sequence from colossalai.logging import get_dist_logger from colossalai.utils import get_current_device from .block_cache import CacheBlock __all__ = ["KVCacheManager"] GIGABYTE = 1024**3 class KVCacheManager: """KVCacheManager manages both the logical cache blocks and physical KV cache (tensors). NOTE: The KVCacheManager is designed to be interacted with indices of logical blocks. That is, it won't allocate and return a physical cache to the engine or scheduler; instead, it will mark the logical block as allocated and update the block id representing the physical cache to the caller. The physical cache is actually used and updated in kernels. Example A block table of a single sequence before block allocation might be: | -1 | -1 | -1 | -1 | -1 | -1 | where the maximum blocks per sequence is 6 The block table after block allocation might be: | 0 | 1 | 2 | -1 | -1 | -1 | Then the logical blocks with id 0, 1, and 2, are allocated for this sequence, and the physical caches, each with size of `block_size * kv_head_num * head_size * elem_size` for a single layer, corresponding to these blocks will be used to read/write KV Caches in kernels. For a batch of sequences, the block tables after allocation might be: | 0 | 1 | 2 | -1 | -1 | -1 | | 3 | 4 | 5 | 6 | 7 | -1 | | 8 | 9 | 10 | 11 | -1 | -1 | | 12 | 13 | 14 | 15 | -1 | -1 | where 16 logical cache blocks are allocated and the same number of physical cache blocks will be used in kernels. Currently, allocations and updates are done at granularity of a single sequence. That is, the block table should be a 1D tensor of shape [max_blocks_per_sequence]. And it's possible to have a batch of sequences with different lengths of block tables. """ def __init__(self, config: InferenceConfig, model_config: PretrainedConfig) -> None: self.logger = get_dist_logger(__name__) self.device = get_current_device() # Parallel settings self.tp_size = config.tp_size # Model settings self.dtype = config.dtype if config.kv_cache_dtype is None: self.kv_cache_dtype = config.dtype else: self.kv_cache_dtype = config.kv_cache_dtype self.elem_size_in_bytes = torch.tensor([], dtype=self.dtype).element_size() self.num_layers = model_config.num_hidden_layers self.head_num = model_config.num_attention_heads self.head_size = model_config.hidden_size // self.head_num if hasattr(model_config, "num_key_value_heads"): self.kv_head_num = model_config.num_key_value_heads else: self.kv_head_num = self.head_num assert ( self.kv_head_num % self.tp_size == 0 ), f"Cannot shard {self.kv_head_num} heads with tp size {self.tp_size}" self.kv_head_num //= self.tp_size self.beam_width = config.beam_width self.max_batch_size = config.max_batch_size self.max_input_length = config.max_input_len self.max_output_length = config.max_output_len # Cache block settings self.block_size = config.block_size # NOTE: `num_blocks` is not prompted, but evaluated from the maximum input/output length, and the maximum batch size if config.enable_streamingllm: self.max_blocks_per_sequence = ( config.start_token_size + config.generated_token_size + self.block_size - 1 ) // self.block_size + 1 else: self.max_blocks_per_sequence = ( self.max_input_length + self.max_output_length + self.block_size - 1 ) // self.block_size self.num_blocks = self.max_blocks_per_sequence * self.max_batch_size * self.beam_width # Physical cache allocation if config.use_cuda_kernel: x = 16 // torch.tensor([], dtype=config.dtype).element_size() kalloc_shape = (self.num_blocks, self.kv_head_num, self.head_size // x, self.block_size, x) valloc_shape = (self.num_blocks, self.kv_head_num, self.block_size, self.head_size) self.logger.info( f"Allocating K cache with shape: {kalloc_shape}, V cache with shape: {valloc_shape} consisting of {self.num_blocks} blocks." ) self._kv_caches = self._init_device_caches(kalloc_shape, valloc_shape) else: alloc_shape = (self.num_blocks, self.kv_head_num, self.block_size, self.head_size) self.logger.info(f"Allocating KV cache with shape: {alloc_shape} consisting of {self.num_blocks} blocks.") self._kv_caches = self._init_device_caches(alloc_shape, alloc_shape) self.total_physical_cache_size_in_bytes = ( self.elem_size_in_bytes * self.num_layers * 2 * self.num_blocks * self.block_size * self.kv_head_num * self.head_size ) self.logger.info( f"Allocated {self.total_physical_cache_size_in_bytes / GIGABYTE:.2f} GB of KV cache on device {self.device}." ) # Logical cache blocks allocation self._available_blocks = self.num_blocks self._cache_blocks = tuple(self._init_logical_caches()) # block availablity state 0->allocated, 1->free self._block_states = torch.ones((self.num_blocks,), dtype=torch.bool) self._block_states_cum = torch.zeros(size=(self.num_blocks + 1,), dtype=torch.int64) self._block_finder = torch.zeros((self.num_blocks,), dtype=torch.int64) @property def total_num_blocks(self) -> int: """Get the total number of logical cache blocks.""" return self.num_blocks @property def num_available_blocks(self) -> int: """Get the number of available cache blocks.""" return self._available_blocks def get_head_size(self): return self.head_size def get_kv_cache(self): """Get k_cache and v_cache""" return self._kv_caches def get_max_blocks_per_sequence(self) -> int: """Get the maximum number of blocks that can be allocated for a single sequence.""" # TODO Consider removing this function as we plan to implement "half-dynamic" batching in schduler/request handler, # which will make the max_blocks_per_sequence dynamic based on the prompt lengths of sequences # in the current batch. return self.max_blocks_per_sequence def check_allocation(self, seq: Sequence) -> bool: num_blocks_needed = (seq.input_len + self.max_output_length + self.block_size - 1) // self.block_size return num_blocks_needed <= self.num_available_blocks def get_block_kv_ptrs(self, block_id: int, layer_id: int) -> Tuple[List[int], List[int]]: """Get the key and value pointers of physical caches (of specific layer) corresponding to a logical cache block.""" block: CacheBlock = self._cache_blocks[block_id] return block.k_ptrs[layer_id], block.v_ptrs[layer_id] def get_block_table_kv_ptrs(self, block_table: torch.Tensor, layer_id: int) -> Tuple[int, int]: """Get the key and value pointers of physical caches (of specific layer) corresponding to logical cache blocks indicated by the block table.""" k_ptrs = [] v_ptrs = [] for block_id in block_table: if block_id >= 0: block: CacheBlock = self._cache_blocks[block_id] k_ptrs.append(block.k_ptrs[layer_id]) v_ptrs.append(block.v_ptrs[layer_id]) return k_ptrs, v_ptrs def allocate_context_from_block_table(self, block_table: torch.Tensor, context_len: int) -> None: """Allocate the logical cache blocks for a single sequence during prefill stage, and updates the provided block table with the allocated block ids. Args: block_table: A 1D tensor of shape [max_blocks_per_sequence], mapping of token_position_id -> block_id. context_len: The length of the processing sequnece. """ assert block_table.dim() == 1 if not torch.all(block_table < 0): self.logger.error("Some slots on provided block table have been allocated.") blocks_required = (context_len + self.block_size - 1) // self.block_size if blocks_required > self._available_blocks: self.logger.warning( f"No enough blocks to allocate. Available blocks {self._available_blocks}; context length {context_len}." ) return # Try contiguous allocation torch.cumsum(self._block_states, dim=-1, out=self._block_states_cum[1:]) torch.subtract( self._block_states_cum[blocks_required:], self._block_states_cum[:-blocks_required], out=self._block_finder[blocks_required - 1 :], ) end_indexes = torch.nonzero(self._block_finder == blocks_required, as_tuple=False).view(-1) if end_indexes.numel() > 0: # contiguous cache exists end_idx = end_indexes[0].item() + 1 # open interval start_idx = end_idx - blocks_required # closed interval block_indexes = torch.arange(start_idx, end_idx, device=block_table.device) else: # non-contiguous cache available_block_indexes = torch.nonzero(self._block_states == 0).view(-1) block_indexes = available_block_indexes[:blocks_required] # Update block table block_table[:blocks_required] = block_indexes # Update cache blocks self._block_states[block_indexes] = 0 self._available_blocks -= blocks_required for block_id in block_indexes.tolist(): block: CacheBlock = self._cache_blocks[block_id] block.add_ref() if block_id == block_indexes[-1].item(): self._allocate_on_block( block, block.block_size if context_len % block.block_size == 0 else context_len % block.block_size ) else: self._allocate_on_block(block, block.block_size) def allocate_context_from_block_tables(self, block_tables: torch.Tensor, context_lengths: torch.Tensor) -> None: """Allocate logical cache blocks for a batch of sequences during prefill stage. Args: block_tables (torch.Tensor): [bsz, max_blocks_per_sequence] context_lengths (torch.Tensor): [bsz]] """ assert block_tables.dim() == 2 assert block_tables.size(0) == context_lengths.size(0) if not torch.all(block_tables < 0): self.logger.error("Some slots on provided block table have been allocated.") blocks_required = (context_lengths + self.block_size - 1) // self.block_size num_blocks_required = torch.sum(blocks_required).item() assert isinstance(num_blocks_required, int) if num_blocks_required > self._available_blocks: self.logger.warning( f"Lacking blocks to allocate. Available blocks {self._available_blocks}; blocks asked {num_blocks_required}." ) return bsz = block_tables.size(0) # Try contiguous allocation torch.cumsum(self._block_states, dim=-1, out=self._block_states_cum[1:]) torch.subtract( self._block_states_cum[num_blocks_required:], self._block_states_cum[:-num_blocks_required], out=self._block_finder[num_blocks_required - 1 :], ) end_indexes = torch.nonzero(self._block_finder == num_blocks_required, as_tuple=False).view(-1) if end_indexes.numel() > 0: # contiguous cache exists end_idx = end_indexes[0].item() + 1 # open interval start_idx = end_idx - num_blocks_required # closed interval alloc_block_ids = torch.arange(start_idx, end_idx) for i in range(bsz): curr_required = blocks_required[i] block_tables[i, :curr_required] = torch.arange( start_idx, start_idx + curr_required, device=block_tables.device ) start_idx += curr_required else: # non-contiguous cache available_block_ids = torch.nonzero(self._block_states > 0).view(-1) alloc_block_ids = available_block_ids[:num_blocks_required] alloc_block_ids = alloc_block_ids.to(dtype=block_tables.dtype, device=block_tables.device) start_idx = 0 for i in range(bsz): curr_required = blocks_required[i] block_tables[i, :curr_required] = alloc_block_ids[start_idx, start_idx + curr_required] start_idx += curr_required # Update cache blocks self._block_states[alloc_block_ids] = 0 self._available_blocks -= num_blocks_required last_block_locs = torch.cumsum(blocks_required, dim=0) - 1 last_block_locs = last_block_locs.to(device=alloc_block_ids.device) for i, block_id in enumerate(alloc_block_ids[last_block_locs]): block: CacheBlock = self._cache_blocks[block_id] block.add_ref() self._allocate_on_block( block, ( block.block_size if context_lengths[i] % block.block_size == 0 else context_lengths[i].item() % block.block_size ), ) for block_id in alloc_block_ids: if block_id in alloc_block_ids[last_block_locs]: continue block: CacheBlock = self._cache_blocks[block_id] block.add_ref() self._allocate_on_block(block, block.block_size) def allocate_token_from_block_table(self, block_table: torch.Tensor, context_len: int) -> None: """Allocate the logical cache block for a single sequence during decoding stage, and updates the provided block table if a new cache block is needed. Args: block_table: A 1D tensor of shape [max_blocks_per_sequence], mapping of token_position_id -> block_id. context_len: The length of the processing sequnece (already-allocated length). """ assert block_table.dim() == 1 # The last allocated block may be either partially or fully occupied. # `alloc_local_block_idx` is the index of block to be allocated on provided block table. alloc_local_block_idx = context_len // self.block_size return self.allocate_single_block(block_table, alloc_local_block_idx) def allocate_tokens_from_block_tables( self, block_tables: torch.Tensor, context_lens: torch.Tensor, bsz: int = None ) -> List[int]: """Allocate logical cache blocks for a batch of sequences during decoding stage. Usage: allocate_context_from_block_tables model forward (block tables & context lengths passed) update context lengths allocate_tokens_from_block_tables model forward update context lengths allocate_tokens_from_block_tables model forward update context lengths ... Args: block_tables (torch.Tensor): [bsz, max_blocks_per_sequence] context_lengths (torch.Tensor): [bsz] Returns: List[int]: list of sequence uid to be recycled """ assert block_tables.dim() == 2 assert context_lens.dim() == 1 bsz = block_tables.size(0) if bsz is None else bsz alloc_local_block_indexes = (context_lens[:bsz]) // self.block_size block_global_ids = block_tables[torch.arange(0, bsz), alloc_local_block_indexes] seqs_to_recycle = [] new_blocks_required = torch.sum(block_global_ids < 0).item() seqs_req_new_blocks = torch.nonzero(block_global_ids < 0).squeeze() if new_blocks_required > 0: if new_blocks_required > self._available_blocks: # TODO might want to revise the logic here # Process the first (_available_blocks) sequences that require new blocks # Put the rest of the sequences back to recycled seqs_req_new_blocks, seqs_to_recycle = ( seqs_req_new_blocks[: self._available_blocks], seqs_req_new_blocks[self._available_blocks :], ) for seq_id in seqs_to_recycle: self.free_block_table(block_tables[seq_id]) new_blocks_required = self._available_blocks # NOTE might want to alloc contiguous logic free_block_ids = torch.nonzero(self._block_states > 0).view(-1) alloc_block_ids = free_block_ids[:new_blocks_required].to( dtype=block_tables.dtype, device=block_tables.device ) for block_id in alloc_block_ids: block: CacheBlock = self._cache_blocks[block_id] block.add_ref() self._block_states[block_id] = 0 self._available_blocks -= 1 block_tables[seqs_req_new_blocks, alloc_local_block_indexes[seqs_req_new_blocks]] = alloc_block_ids block_global_ids = block_tables[torch.arange(0, bsz), alloc_local_block_indexes] for block_id in block_global_ids: self._allocate_on_block(self._cache_blocks[block_id], 1) return seqs_to_recycle def allocate_n_tokens_from_block_tables( self, block_tables: torch.Tensor, context_lens: torch.Tensor, bsz: int, n: int, ) -> List[int]: """Allocate logical cache blocks for `n` new tokens for a batch of sequences during decoding stage.""" assert block_tables.dim() == 2 assert context_lens.dim() == 1 bsz = block_tables.size(0) if bsz is None else bsz assert bsz == 1, "Support bsz 1 for now" # TODO support bsz > 1 seqs_to_recycle = [] for i in range(n): seqs_to_recycle += self.allocate_tokens_from_block_tables(block_tables, context_lens - n + i + 1, bsz) return seqs_to_recycle def allocate_single_block(self, block_table: torch.Tensor, block_local_idx: int) -> int: """Allocate space asked on a single block in the block table, specified by the provided position id, and updates the provided block table with the allocated block. Args: block_table: A 1D tensor of shape [max_blocks_per_sequence], mapping of token_position_id -> block_id. block_local_idx: The index of the block in the block table. space_asked: i.e. The number of tokens to be assigned space for. Returns: The remaining space required to be allocated (in other blocks). """ space_asked = 1 block_global_id = block_table[block_local_idx].item() if block_global_id < 0: # Allocate a new block if the current position is not assigned a block yet if self._available_blocks <= 0: # No available blocks to allocate, we free current sequence and return it to self.free_block_table(block_table) return True free_block_id = torch.nonzero(self._block_states == 1).view(-1)[0] block: CacheBlock = self._cache_blocks[free_block_id] block.add_ref() block_global_id = block.block_id self._available_blocks -= 1 self._block_states[block_global_id] = 0 block_table[block_local_idx] = block_global_id block: CacheBlock = self._cache_blocks[block_global_id] return self._allocate_on_block(block, space_asked) # only when space asked if fully satisfied, the return value will be zero. def free_block_table(self, block_table: torch.Tensor) -> None: """Free the logical cache blocks for **a single sequence**.""" assert block_table.dim() == 1 for i, global_block_id in enumerate(block_table.tolist()): if global_block_id < 0: return block: CacheBlock = self._cache_blocks[global_block_id] block.remove_ref() if not block.has_ref(): block.allocated_size = 0 self._available_blocks += 1 self._block_states[global_block_id] = 1 # reset the block id in the block table (if we maintain a 2D tensors as block tables in Engine) block_table[i] = -1 def free_block_tables(self, block_tables: torch.Tensor, first_n: int = None) -> None: """Release the logical cache blocks for a batch of sequences. If `first_n` is provided, only the blocks for the first several sequences will be released. """ assert block_tables.dim() == 2 first_n = block_tables.size(0) if first_n is None else first_n for block_table in block_tables[:first_n]: self.free_block_table(block_table) def clear_all(self) -> None: """Clear all the references and allocations on all the cache blocks.""" for block in self._cache_blocks: block.clear() self._available_blocks = self.num_blocks self._block_states[:] = 1 def streamingllm_free_block_tables(self, updated_block_ids: List[int]): """ Free the block that needs to be swapped out. """ for global_block_id in updated_block_ids: if global_block_id < 0: return block: CacheBlock = self._cache_blocks[global_block_id] block.remove_ref() if not block.has_ref(): block.allocated_size = 0 self._available_blocks += 1 self._block_states[global_block_id] = 1 def get_physical_cache(self, layer_id: int, block_idx: int) -> Tuple[torch.Tensor, torch.Tensor]: """Get the tensor corresponding to the cache block with the prompted id for a specific layer.""" return self._kv_caches[0][layer_id][block_idx], self._kv_caches[1][layer_id][block_idx] def _allocate_on_block(self, block: CacheBlock, space_asked: int) -> int: """Allocate a specific size of space on a provided cache block. Returns: The remaining space required to be allocated (in other blocks). """ assert block.available_space > 0, f"Found no available space left in the chosen block {block}." space_to_allocate = min(block.available_space, space_asked) block.allocate(space_to_allocate) return space_asked - space_to_allocate def _init_logical_caches(self): """Initialize the logical cache blocks. NOTE This function should be called only after the physical caches have been allocated. The data pointers of physical caches will be binded to each logical cache block. """ assert self._kv_caches is not None and len(self._kv_caches[0]) > 0 blocks = [] physical_block_size = self.elem_size_in_bytes * self.block_size * self.kv_head_num * self.head_size k_ptrs = [ self._kv_caches[0][layer_idx].data_ptr() - physical_block_size for layer_idx in range(self.num_layers) ] v_ptrs = [ self._kv_caches[1][layer_idx].data_ptr() - physical_block_size for layer_idx in range(self.num_layers) ] for i in range(self.num_blocks): k_ptrs = [first_block_ptr + physical_block_size for first_block_ptr in k_ptrs] v_ptrs = [first_block_ptr + physical_block_size for first_block_ptr in v_ptrs] cache_block = CacheBlock(i, self.block_size, self.elem_size_in_bytes, k_ptrs, v_ptrs) blocks.append(cache_block) return blocks def _init_device_caches( self, kalloc_shape: Tuple[int, ...], valloc_shape: Tuple[int, ...] ) -> Tuple[torch.Tensor, torch.Tensor]: """Initialize the physical cache on the device. For each layer of the model, we allocate two tensors for key and value respectively, with shape of [num_blocks, num_kv_heads, block_size, head_size] """ k_cache: List[torch.Tensor] = [] v_cache: List[torch.Tensor] = [] for _ in range(self.num_layers): k_cache.append(torch.zeros(kalloc_shape, dtype=self.kv_cache_dtype, device=self.device)) v_cache.append(torch.zeros(valloc_shape, dtype=self.kv_cache_dtype, device=self.device)) return k_cache, v_cache class RPCKVCacheManager(KVCacheManager): def __init__(self, config: InferenceConfig, model_config: PretrainedConfig, verbose: bool = False) -> None: self.logger = get_dist_logger(__name__) self.device = get_current_device() self.config = config # Parallel settings self.tp_size = config.tp_size # Model settings self.dtype = config.dtype self.elem_size_in_bytes = torch.tensor([], dtype=self.dtype).element_size() self.num_layers = model_config.num_hidden_layers self.head_num = model_config.num_attention_heads self.head_size = model_config.hidden_size // self.head_num if hasattr(model_config, "num_key_value_heads"): self.kv_head_num = model_config.num_key_value_heads else: self.kv_head_num = self.head_num if config.kv_cache_dtype is None: self.kv_cache_dtype = config.dtype else: self.kv_cache_dtype = config.kv_cache_dtype assert ( self.kv_head_num % self.tp_size == 0 ), f"Cannot shard {self.kv_head_num} heads with tp size {self.tp_size}" self.kv_head_num //= self.tp_size self.beam_width = config.beam_width self.max_batch_size = config.max_batch_size self.max_input_length = config.max_input_len self.max_output_length = config.max_output_len # Cache block settings self.block_size = config.block_size # NOTE: `num_blocks` is not prompted, but evaluated from the maximum input/output length, and the maximum batch size if config.enable_streamingllm: self.max_blocks_per_sequence = ( config.start_token_size + config.generated_token_size + self.block_size - 1 ) // self.block_size + 1 else: self.max_blocks_per_sequence = ( self.max_input_length + self.max_output_length + self.block_size - 1 ) // self.block_size self.num_blocks = self.max_blocks_per_sequence * self.max_batch_size * self.beam_width # Logical cache blocks allocation self._available_blocks = self.num_blocks self._cache_blocks = tuple(self._init_logical_caches()) # block availablity state 0->allocated, 1->free self._block_states = torch.ones((self.num_blocks,), dtype=torch.bool) self._block_states_cum = torch.zeros(size=(self.num_blocks + 1,), dtype=torch.int64) self._block_finder = torch.zeros((self.num_blocks,), dtype=torch.int64) def get_physical_cache_shape(self) -> Tuple[Tuple[int, ...], Tuple[int, ...]]: # Physical cache allocation if self.config.use_cuda_kernel: x = 16 // torch.tensor([], dtype=self.config.dtype).element_size() kalloc_shape = (self.num_blocks, self.kv_head_num, self.head_size // x, self.block_size, x) valloc_shape = (self.num_blocks, self.kv_head_num, self.block_size, self.head_size) self.logger.info( f"Allocating K cache with shape: {kalloc_shape}, V cache with shape: {valloc_shape} consisting of {self.num_blocks} blocks." ) else: alloc_shape = (self.num_blocks, self.kv_head_num, self.block_size, self.head_size) kalloc_shape = alloc_shape valloc_shape = alloc_shape self.logger.info(f"Allocating KV cache with shape: {alloc_shape} consisting of {self.num_blocks} blocks.") return kalloc_shape, valloc_shape def get_kv_cache(self): """Get k_cache and v_cache""" return NotImplementedError def _init_logical_caches(self): """Initialize the logical cache blocks.""" blocks = [] for i in range(self.num_blocks): cache_block = CacheBlock(i, self.block_size, self.elem_size_in_bytes, k_ptrs=None, v_ptrs=None) blocks.append(cache_block) return blocks
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/kv_cache/__init__.py
colossalai/inference/kv_cache/__init__.py
from .block_cache import CacheBlock from .kvcache_manager import KVCacheManager, RPCKVCacheManager __all__ = ["CacheBlock", "KVCacheManager", "RPCKVCacheManager"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/kv_cache/block_cache.py
colossalai/inference/kv_cache/block_cache.py
from typing import Any __all__ = ["CacheBlock"] class CacheBlock: """A simplified version of logical cache block used for Paged Attention.""" def __init__(self, block_id: int, block_size: int, elem_size: int, k_ptrs: Any = None, v_ptrs: Any = None): # Unique id of a cache block self.block_id = block_id # size/capacity of the block in terms of the number of tokens it can hold self.block_size = block_size # element size in bytes self.elem_size = elem_size # For common cases, we track the relationships between logical and physical caches in KV Cache Manager, # Additionally, k, v pointers can be optionally used for tracking the physical cache by CacheBlock itself. self.k_ptrs = k_ptrs self.v_ptrs = v_ptrs self.ref_count = 0 # the number of slots that have been allocated (i.e. the number of tokens occupying the block) self.allocated_size = 0 # the token ids whose KV Cache would be written to corresponding physical caches # TODO add logics to update token_ids self.token_ids = [None] * self.block_size @property def available_space(self) -> int: # `allocated_size` is ensured to be less than or equal to `block_size` return self.block_size - self.allocated_size def add_ref(self) -> None: self.ref_count += 1 def remove_ref(self) -> None: assert self.ref_count > 0, f"Block#{self.block_id} has no reference to remove." self.ref_count -= 1 def has_ref(self) -> bool: return self.ref_count > 0 def allocate(self, size: int) -> None: assert size <= self.available_space, f"Block#{self.block_id} has no available space to allocate." self.allocated_size += size def is_empty(self): return self.allocated_size < 1 def clear(self) -> None: self.ref_count = 0 self.allocated_size = 0 def __repr__(self): return f"CacheBlock#{self.block_id}(ref#{self.ref_count}, allocated#{self.allocated_size})"
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/__init__.py
colossalai/inference/modeling/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/models/pixart_alpha.py
colossalai/inference/modeling/models/pixart_alpha.py
# Code adapted from: # https://github.com/huggingface/diffusers/blob/v0.29.0-release/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py from typing import Callable, List, Optional, Union import PIL.Image import torch from diffusers.pipelines.pixart_alpha.pipeline_pixart_alpha import ( ASPECT_RATIO_256_BIN, ASPECT_RATIO_512_BIN, ASPECT_RATIO_1024_BIN, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import retrieve_timesteps from colossalai.logging import get_dist_logger from ..layers.diffusion import DiffusionPipe logger = get_dist_logger(__name__) @torch.no_grad() def pixart_alpha_forward( self: DiffusionPipe, prompt: Union[str, List[str]] = None, negative_prompt: str = "", num_inference_steps: int = 20, timesteps: List[int] = None, sigmas: List[float] = None, guidance_scale: float = 4.5, num_images_per_prompt: Optional[int] = 1, height: Optional[int] = None, width: Optional[int] = None, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, prompt_attention_mask: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_attention_mask: Optional[torch.Tensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, callback_steps: int = 1, clean_caption: bool = True, use_resolution_binning: bool = True, max_sequence_length: int = 120, **kwargs, ) -> PIL.Image: # 1. Check inputs. Raise error if not correct height = height or self.transformer.config.sample_size * self.vae_scale_factor width = width or self.transformer.config.sample_size * self.vae_scale_factor if use_resolution_binning: if self.transformer.config.sample_size == 128: aspect_ratio_bin = ASPECT_RATIO_1024_BIN elif self.transformer.config.sample_size == 64: aspect_ratio_bin = ASPECT_RATIO_512_BIN elif self.transformer.config.sample_size == 32: aspect_ratio_bin = ASPECT_RATIO_256_BIN else: raise ValueError("Invalid sample size") orig_height, orig_width = height, width height, width = self.image_processor.classify_height_width_bin(height, width, ratios=aspect_ratio_bin) self.check_inputs( prompt, height, width, negative_prompt, callback_steps, prompt_embeds, negative_prompt_embeds, prompt_attention_mask, negative_prompt_attention_mask, ) # 2. Default height and width to transformer if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # 3. Encode input prompt ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = self.encode_prompt( prompt, do_classifier_free_guidance, negative_prompt=negative_prompt, num_images_per_prompt=num_images_per_prompt, device=device, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, prompt_attention_mask=prompt_attention_mask, negative_prompt_attention_mask=negative_prompt_attention_mask, clean_caption=clean_caption, max_sequence_length=max_sequence_length, ) if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps, sigmas) # 5. Prepare latents. latent_channels = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, latent_channels, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 6.1 Prepare micro-conditions. added_cond_kwargs = {"resolution": None, "aspect_ratio": None} if self.transformer.config.sample_size == 128: resolution = torch.tensor([height, width]).repeat(batch_size * num_images_per_prompt, 1) aspect_ratio = torch.tensor([float(height / width)]).repeat(batch_size * num_images_per_prompt, 1) resolution = resolution.to(dtype=prompt_embeds.dtype, device=device) aspect_ratio = aspect_ratio.to(dtype=prompt_embeds.dtype, device=device) if do_classifier_free_guidance: resolution = torch.cat([resolution, resolution], dim=0) aspect_ratio = torch.cat([aspect_ratio, aspect_ratio], dim=0) added_cond_kwargs = {"resolution": resolution, "aspect_ratio": aspect_ratio} # 7. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) current_timestep = t if not torch.is_tensor(current_timestep): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) is_mps = latent_model_input.device.type == "mps" if isinstance(current_timestep, float): dtype = torch.float32 if is_mps else torch.float64 else: dtype = torch.int32 if is_mps else torch.int64 current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device) elif len(current_timestep.shape) == 0: current_timestep = current_timestep[None].to(latent_model_input.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML current_timestep = current_timestep.expand(latent_model_input.shape[0]) # predict noise model_output noise_pred = self.transformer( latent_model_input, encoder_hidden_states=prompt_embeds, encoder_attention_mask=prompt_attention_mask, timestep=current_timestep, added_cond_kwargs=added_cond_kwargs, return_dict=False, )[0] # perform guidance if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: noise_pred = noise_pred.chunk(2, dim=1)[0] else: noise_pred = noise_pred # compute previous image: x_t -> x_t-1 if num_inference_steps == 1: # For DMD one step sampling: https://arxiv.org/abs/2311.18828 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).pred_original_sample else: latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if callback is not None and i % callback_steps == 0: step_idx = i // getattr(self.scheduler, "order", 1) callback(step_idx, t, latents) output_type = "pil" # TODO(@lry89757) temporarily image, please support more return output if not output_type == "latent": image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0] if use_resolution_binning: image = self.image_processor.resize_and_crop_tensor(image, orig_width, orig_height) else: image = latents if not output_type == "latent": image = self.image_processor.postprocess(image, output_type=output_type) # Offload all models # self.maybe_free_model_hooks() return image
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/models/stablediffusion3.py
colossalai/inference/modeling/models/stablediffusion3.py
# This code is adapted from huggingface diffusers: https://github.com/huggingface/diffusers/blob/v0.29.0-release/src/diffusers/pipelines/stable_diffusion_3/pipeline_stable_diffusion_3.py from typing import Any, Callable, Dict, List, Optional, Union import torch from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3 import retrieve_timesteps from ..layers.diffusion import DiffusionPipe # TODO(@lry89757) temporarily image, please support more return output @torch.no_grad() def sd3_forward( self: DiffusionPipe, prompt: Union[str, List[str]] = None, prompt_2: Optional[Union[str, List[str]]] = None, prompt_3: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 28, timesteps: List[int] = None, guidance_scale: float = 7.0, negative_prompt: Optional[Union[str, List[str]]] = None, negative_prompt_2: Optional[Union[str, List[str]]] = None, negative_prompt_3: Optional[Union[str, List[str]]] = None, num_images_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, pooled_prompt_embeds: Optional[torch.FloatTensor] = None, negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, joint_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], ): height = height or self.default_sample_size * self.vae_scale_factor width = width or self.default_sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs( prompt, prompt_2, prompt_3, height, width, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, ) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._joint_attention_kwargs = joint_attention_kwargs self._interrupt = False # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] device = self._execution_device ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = self.encode_prompt( prompt=prompt, prompt_2=prompt_2, prompt_3=prompt_3, negative_prompt=negative_prompt, negative_prompt_2=negative_prompt_2, negative_prompt_3=negative_prompt_3, do_classifier_free_guidance=self.do_classifier_free_guidance, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, device=device, clip_skip=self.clip_skip, num_images_per_prompt=num_images_per_prompt, ) if self.do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0) # 4. Prepare timesteps timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) self._num_timesteps = len(timesteps) # 5. Prepare latent variables num_channels_latents = self.transformer.config.in_channels latents = self.prepare_latents( batch_size * num_images_per_prompt, num_channels_latents, height, width, prompt_embeds.dtype, device, generator, latents, ) # 6. Denoising loop with self.progress_bar(total=num_inference_steps) as progress_bar: for i, t in enumerate(timesteps): if self.interrupt: continue # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]) noise_pred = self.transformer( hidden_states=latent_model_input, timestep=timestep, encoder_hidden_states=prompt_embeds, pooled_projections=pooled_prompt_embeds, joint_attention_kwargs=self.joint_attention_kwargs, return_dict=False, )[0] # perform guidance if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents_dtype = latents.dtype latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] if latents.dtype != latents_dtype: if torch.backends.mps.is_available(): # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 latents = latents.to(latents_dtype) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) latents = callback_outputs.pop("latents", latents) prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) negative_pooled_prompt_embeds = callback_outputs.pop( "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds ) # call the callback, if provided if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() if output_type == "latent": image = latents else: latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor image = self.vae.decode(latents, return_dict=False)[0] image = self.image_processor.postprocess(image, output_type=output_type) return image
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/models/__init__.py
colossalai/inference/modeling/models/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/models/glide_llama.py
colossalai/inference/modeling/models/glide_llama.py
# This is modified from huggingface transformers # https://github.com/huggingface/transformers/blob/v4.36.2/src/transformers/models/llama/modeling_llama.py import warnings from types import MethodType from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from transformers.cache_utils import DynamicCache from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from transformers.models.llama.modeling_llama import ( LlamaAttention, LlamaConfig, LlamaDecoderLayer, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaRMSNorm, ) from colossalai.inference.spec import GlideInput from colossalai.kernel.triton import flash_decoding_attention from colossalai.logging import get_dist_logger logger = get_dist_logger(__name__) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_single_rotary_pos_emb(q, cos, sin, position_ids): # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] q_embed = (q * cos) + (rotate_half(q) * sin) return q_embed def glide_llama_causal_lm_forward( self: LlamaForCausalLM, input_ids: torch.LongTensor = None, glide_input: Optional[GlideInput] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, LlamaForCausalLM >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, glide_input=glide_input, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) logits = logits.float() if not return_dict: output = (logits,) + outputs[1:] return output return CausalLMOutputWithPast( loss=None, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def glide_llama_model_forward( self: LlamaModel, input_ids: torch.LongTensor = None, glide_input: GlideInput = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError( "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" ) if self.gradient_checkpointing and self.training and use_cache: logger.warning_once("`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.") use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache() if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) attention_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values) if hasattr(glide_input, "n_spec_tokens"): position_ids = position_ids + glide_input.n_spec_tokens # embed positions hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) # GlideLlamaDecoderLayer layer_outputs = decoder_layer( hidden_states, position_embeddings=position_embeddings, glide_input=glide_input, attention_mask=attention_mask, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) class GlideLlamaConfig(LlamaConfig): """Configuration class with specific arguments used by GLIDE llama model as a drafter""" def __init__( self, large_hidden_size=4096, large_num_attention_heads=32, **kwargs, ): super().__init__(**kwargs) self.large_hidden_size = large_hidden_size self.large_num_attention_heads = large_num_attention_heads class LlamaCrossAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: GlideLlamaConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = config.num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) # large model (verifier) configs self.large_hidden_size = config.large_hidden_size self.large_num_heads = config.large_num_attention_heads self.large_head_dim = self.large_hidden_size // self.large_num_heads self.q_proj = nn.Linear(self.hidden_size, self.large_num_heads * self.large_head_dim, bias=False) self.o_proj = nn.Linear(self.large_num_heads * self.large_head_dim, self.hidden_size, bias=False) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], position_ids: Optional[torch.LongTensor] = None, glide_input: GlideInput = None, # Used for glimpsing main model's KV caches attention_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Optional[torch.Tensor]: bsz, q_len, _ = hidden_states.size() block_tables = glide_input.block_tables large_k_cache = glide_input.large_k_cache large_v_cache = glide_input.large_v_cache sequence_lengths = glide_input.sequence_lengths cache_block_size = large_k_cache.size(-2) query_states = self.q_proj(hidden_states) kv_seq_len = sequence_lengths.max().item() query_states = query_states.view(bsz, -1, self.large_num_heads, self.large_head_dim).transpose(1, 2) # for RoPE cos, sin = position_embeddings query_states = apply_single_rotary_pos_emb(query_states, cos, sin, position_ids) query_states = query_states.transpose(1, 2) query_states = query_states.reshape(-1, self.large_num_heads, self.large_head_dim) attn_output = flash_decoding_attention( q=query_states, k_cache=large_k_cache, v_cache=large_v_cache, kv_seq_len=sequence_lengths, block_tables=block_tables, block_size=cache_block_size, max_seq_len_in_batch=kv_seq_len, ) # attn_output: [bsz * q_len, num_heads * head_dim] attn_output = attn_output.reshape(bsz, q_len, self.large_hidden_size) attn_output = self.o_proj(attn_output) return attn_output # A class to be used to replace LlamaDecoderLayer in a Llama Model as Drafter in speculative decoding. # Refer to GLIDE with a CAPE https://arxiv.org/pdf/2402.02082.pdf class GlideLlamaDecoderLayer(nn.Module): def __init__(self, config: GlideLlamaConfig, layer_idx: Optional[int] = None): super().__init__() self.hidden_size = config.hidden_size self.self_attn = LlamaAttention(config=config, layer_idx=layer_idx) self.cross_attn = LlamaCrossAttention(config=config) self.mlp = LlamaMLP(config) self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) @staticmethod def from_native_module(module: LlamaDecoderLayer, *args, **kwargs) -> "GlideLlamaDecoderLayer": """Build a GlideLlamaDecoderLayer from a native LlamaDecoderLayer""" config: LlamaConfig = module.mlp.config # XXX layer_idx = module.self_attn.layer_idx glide_config = GlideLlamaConfig(**config.to_dict()) glide_decoder_layer = GlideLlamaDecoderLayer(glide_config, layer_idx=layer_idx) return glide_decoder_layer def forward( self, hidden_states: torch.Tensor, position_embeddings: torch.Tensor = None, position_ids: Optional[torch.LongTensor] = None, glide_input: GlideInput = None, attention_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, **kwargs, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, query_sequence_length, key_sequence_length)` if default attention is used. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" ) residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, position_embeddings=position_embeddings, attention_mask=attention_mask, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, **kwargs, ) hidden_states = residual + hidden_states curr_q_len = hidden_states.size(1) # Cross attention if glide_input is None or not glide_input.glimpse_ready: warnings.warn( "Data used for glimpsing the past KV caches of the main model (verifier) is not complete. " "Fall back to normal decoder layer modeling (drafter). " "This might lead to incorrect results when using the Glide Models for speculative decoding." ) elif curr_q_len == 1: # Notice that we skip prefill stage # always use the output of the main model as the inputs for the next round of speculation residual = hidden_states hidden_states = self.cross_attn( hidden_states=hidden_states, position_embeddings=position_embeddings, position_ids=position_ids, glide_input=glide_input, attention_mask=attention_mask, output_attentions=output_attentions, use_cache=True, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) return outputs class GlideLlamaForCausalLM(LlamaForCausalLM): def __init__(self, config: GlideLlamaConfig): super().__init__(config) self.config = config bound_method = MethodType(glide_llama_causal_lm_forward, self) setattr(self, "forward", bound_method) bound_method = MethodType(glide_llama_model_forward, self.model) model = getattr(self, "model") setattr(model, "forward", bound_method) replaced_layers = nn.ModuleList( [GlideLlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) setattr(model, "layers", replaced_layers)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/models/nopadding_baichuan.py
colossalai/inference/modeling/models/nopadding_baichuan.py
# This code is adapted from huggingface baichuan model: hhttps://huggingface.co/baichuan-inc/Baichuan2-13B-Base/blob/main/modeling_baichuan.py from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from torch.distributed import ProcessGroup from colossalai.accelerator import get_accelerator from colossalai.inference.config import ModelShardInferenceConfig from colossalai.inference.flash_decoding_utils import FDIntermTensors from colossalai.inference.modeling.backends.attention_backend import AttentionMetaData, get_attention_backend from colossalai.inference.modeling.backends.pre_attention_backend import get_pre_attention_backend from colossalai.inference.modeling.models.nopadding_llama import NopadLlamaMLP from colossalai.inference.utils import get_alibi_slopes from colossalai.kernel.kernel_loader import InferenceOpsLoader from colossalai.kernel.triton import rms_layernorm from colossalai.logging import get_dist_logger from colossalai.shardformer.layer.parallel_module import ParallelModule from colossalai.tensor.d_tensor import is_distributed_tensor inference_ops = InferenceOpsLoader().load() logger = get_dist_logger(__name__) def baichuan_rmsnorm_forward( self, hidden_states: torch.Tensor, norm_output: torch.Tensor, residual: torch.Tensor = None, use_cuda_kernel: bool = True, ): # Used to address the issue of inconsistent epsilon variable names in baichuan2 7b and 13b. if hasattr(self, "variance_epsilon"): eps = self.variance_epsilon elif hasattr(self, "epsilon"): eps = self.epsilon else: TypeError( "Currently, the variable name for the epsilon of baichuan7B/13B should be 'variance_epsilon' or 'epsilon'." ) if use_cuda_kernel: if residual is not None: inference_ops.fused_add_rms_layernorm(hidden_states, residual, self.weight.data, eps) return hidden_states, residual if norm_output is None: norm_output = torch.empty_like(hidden_states) inference_ops.rms_layernorm(norm_output, hidden_states, self.weight.data, eps) return norm_output, hidden_states else: return rms_layernorm(hidden_states, self.weight.data, eps, norm_output, residual) class NopadBaichuanAttention(ParallelModule): def __init__( self, config, W_pack: ParallelModule = None, attn_oproj: ParallelModule = None, num_heads: int = None, hidden_size: int = None, model_shard_infer_config: ModelShardInferenceConfig = None, process_group: ProcessGroup = None, ): """This layer will replace the BaichuanAttention. Args: config (BaichuanConfig): Holding the Baichuan model config. W_pack (ParallelModule, optional): The packed weight. Defaults to None. attn_oproj (Linear1D_Row, optional): The Linear1D_Row o_proj. Defaults to None. """ ParallelModule.__init__(self) self.config = config self.num_heads = num_heads self.hidden_size = hidden_size self.head_dim = self.hidden_size // self.num_heads self.process_group = process_group self.W_pack = W_pack self.o_proj = attn_oproj self.use_cuda_kernel = model_shard_infer_config.use_cuda_kernel self.attention_backend = get_attention_backend(model_shard_infer_config) self.pre_attention_backend = get_pre_attention_backend(model_shard_infer_config) self.alibi_slopes = None self.use_alibi_attn = False # Used for Baichuan13B if config.hidden_size == 5120: slopes_start = self.process_group.rank() * num_heads self.use_alibi_attn = True self.alibi_slopes = get_alibi_slopes( config.num_attention_heads, device=get_accelerator().get_current_device() )[slopes_start : slopes_start + num_heads].contiguous() self.alibi_slopes = nn.Parameter(self.alibi_slopes) @staticmethod def from_native_module( module: nn.Module, process_group: Union[ProcessGroup, List[ProcessGroup]], *args, **kwargs ) -> "NopadBaichuanAttention": """Used for initialize the weight of NopadBaichuanAttention by origin BaichuanAttention. Args: module (nn.Module): The origin BaichuanAttention layer. """ config = module.config W_pack = module.W_pack attn_oproj = module.o_proj model_shard_infer_config = kwargs.get("model_shard_infer_config", None) attn_layer = NopadBaichuanAttention( config=config, W_pack=W_pack, attn_oproj=attn_oproj, model_shard_infer_config=model_shard_infer_config, num_heads=module.num_heads, hidden_size=module.hidden_size, process_group=process_group, ) return attn_layer def forward( self, hidden_states: torch.Tensor, block_tables: torch.Tensor, k_cache: torch.Tensor, v_cache: torch.Tensor, sequence_lengths: torch.Tensor, cos_sin: Tuple[torch.Tensor], fd_inter_tensor: FDIntermTensors, is_prompts: bool = True, is_verifier: bool = False, tokens_to_verify: int = None, kv_seq_len: int = 0, output_tensor: torch.Tensor = None, sm_scale: int = None, cu_seqlens: torch.Tensor = None, high_precision: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """ Args: hidden_states (torch.Tensor): input to the layer of shape [token_num, embed_dim]. block_tables (torch.Tensor): A 2D tensor of shape [batch_size, max_blocks_per_sequence], storing mapping of token_position_id -> block_id. k_cache (torch.Tensor): It holds the GPU memory for the key cache. v_cache (torch.Tensor): It holds the GPU memory for the key cache. sequence_lengths (torch.Tensor, optional): Holding the sequence length of each sequence. cos_sin (Tuple[torch.Tensor], optional): Holding cos and sin. fd_inter_tensor (FDIntermTensors, optional): Holding tensors used for storing intermediate values in flash-decoding. is_prompts (bool, optional): Whether the current inference process is in the context input phase. Defaults to True. kv_seq_len (int, optional): The max sequence length of input sequences. Defaults to 0. output_tensor (torch.Tensor, optional): The mid tensor holds the output of attention. Defaults to None. sm_scale (int, optional): Used for flash attention. Defaults to None. cu_seqlens(torch.Tensor, optional): Holding the cumulative sum of sequence length. high_precision(Optional[bool]): Whether to use float32 for underlying calculations of float16 data to achieve higher precision, defaults to False. """ token_nums = hidden_states.size(0) proj = self.W_pack(hidden_states) proj = proj.unflatten(-1, (3, self.hidden_size)).unsqueeze(0).transpose(0, -2).squeeze(-2) query_states = proj[0].view(token_nums, self.num_heads, self.head_dim) key_states = proj[1].view(token_nums, self.num_heads, self.head_dim) value_states = proj[2].view(token_nums, self.num_heads, self.head_dim) block_size = k_cache.size(-2) attn_metadata = AttentionMetaData( query_states=query_states, key_states=key_states, value_states=value_states, k_cache=k_cache, v_cache=v_cache, block_tables=block_tables, block_size=block_size, kv_seq_len=kv_seq_len, sequence_lengths=sequence_lengths, sm_scale=sm_scale, alibi_slopes=self.alibi_slopes, cu_seqlens=cu_seqlens, output_tensor=output_tensor, use_spec_dec=is_verifier, use_alibi_attn=self.use_alibi_attn, ) if is_prompts: # prefilling stage self.pre_attention_backend.prefill( attn_metadata, cos=cos_sin[0], sin=cos_sin[1], high_precision=high_precision, ) attn_output = self.attention_backend.prefill( attn_metadata, token_nums=token_nums, ) else: # decoding stage q_len = tokens_to_verify + 1 if is_verifier else 1 self.pre_attention_backend.decode( attn_metadata, q_len=q_len, ) attn_output = self.attention_backend.decode( attn_metadata, fd_inter_tensor=fd_inter_tensor, q_len=q_len, ) attn_output = attn_output.view(-1, self.hidden_size) attn_output = self.o_proj(attn_output) return attn_output # NOTE This will cause difference as out length increases. class NopadBaichuanMLP(NopadLlamaMLP): @staticmethod def from_native_module( module: nn.Module, process_group: Union[ProcessGroup, List[ProcessGroup]], *args, **kwargs ) -> ParallelModule: """Used for initialize the weight of NopadBaichuanMLP by origin MLP(Baichuan). Args: module (nn.Module): The origin MLP(Baichuan) layer. """ mlp_gproj_w = module.gate_proj.weight assert is_distributed_tensor( module.gate_proj.weight ), "gate_proj.weight must be dtensor so we could get the layout of the weight" mlp_uproj_w = module.up_proj.weight mlp_dproj = module.down_proj mlp_layer = NopadBaichuanMLP( config=None, mlp_gproj_w=mlp_gproj_w, mlp_uproj_w=mlp_uproj_w, mlp_dproj=mlp_dproj, process_group=process_group, ) return mlp_layer
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/models/nopadding_llama.py
colossalai/inference/modeling/models/nopadding_llama.py
# This code is adapted from huggingface transformers: https://github.com/huggingface/transformers/blob/v4.34.1/src/transformers/models/llama/modeling_llama.py import itertools from typing import List, Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from torch.distributed import ProcessGroup from transformers.models.llama.modeling_llama import ( LlamaAttention, LlamaConfig, LlamaDecoderLayer, LlamaForCausalLM, LlamaMLP, LlamaModel, LlamaRMSNorm, ) from colossalai.inference.config import InputMetaData, ModelShardInferenceConfig from colossalai.inference.flash_decoding_utils import FDIntermTensors from colossalai.inference.modeling.backends.attention_backend import AttentionMetaData, get_attention_backend from colossalai.inference.modeling.backends.pre_attention_backend import get_pre_attention_backend from colossalai.inference.utils import can_use_flash_attn2 from colossalai.kernel.kernel_loader import InferenceOpsLoader from colossalai.kernel.triton import get_xine_cache, rms_layernorm from colossalai.logging import get_dist_logger from colossalai.shardformer.layer.parallel_module import ParallelModule from colossalai.tensor.d_tensor import distribute_tensor, is_distributed_tensor inference_ops = InferenceOpsLoader().load() logger = get_dist_logger(__name__) def llama_causal_lm_forward( self: LlamaForCausalLM, input_tokens_ids: torch.Tensor, output_tensor: torch.Tensor, inputmetadata: InputMetaData, k_caches: List[torch.Tensor] = None, v_caches: List[torch.Tensor] = None, ) -> torch.Tensor: """This function will replace the forward function of LlamaForCausalLM. Args: batch (BatchInfo): It stores the necessary input information for this inference. k_caches (List[torch.Tensor]): It holds the GPU memory for the key cache. v_caches (List[torch.Tensor]): It holds the GPU memory for the value cache. high_precision(Optional[bool]): Whether to use float32 for underlying calculations of float16 data to achieve higher precision, defaults to False. """ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) hidden_states = llama_model_forward( self.model, input_tokens_ids=input_tokens_ids, output_tensor=output_tensor, inputmetadata=inputmetadata, k_caches=k_caches, v_caches=v_caches, use_cuda_kernel=inputmetadata.use_cuda_kernel, # Note currently the cuda kernel of layernorm, rotary_embedding_and_cache_copy couldn't pass the unitest but triton kernel could high_precision=inputmetadata.high_precision, ) logits = self.lm_head(hidden_states) return logits def llama_model_forward( self: LlamaModel, input_tokens_ids: torch.Tensor, output_tensor: torch.Tensor, inputmetadata: InputMetaData, k_caches: List[torch.Tensor] = None, v_caches: List[torch.Tensor] = None, use_cuda_kernel: Optional[bool] = True, high_precision: bool = False, ) -> torch.Tensor: """This function will replace the forward function of LlamaModel. Args: batch (BatchInfo, optional): It stores the necessary input information for this inference.. Defaults to None. k_caches (List[torch.Tensor], optional): It holds the GPU memory for the key cache. Defaults to None. v_caches (List[torch.Tensor], optional): It holds the GPU memory for the value cache. Defaults to None. high_precision(Optional[bool]): Whether to use float32 for underlying calculations of float16 data to achieve higher precision, defaults to False. """ block_tables = inputmetadata.block_tables sequence_lengths = inputmetadata.sequence_lengths kv_seq_len = inputmetadata.kv_seq_len # NOTE (yuanheng-zhao): fow now, only triton kernels support verification process # during speculative-decoding (`q_len > 1`) # We will expicitly disable `use_cuda_kernel` here when speculative-decoding is enabled if inputmetadata.use_spec_dec and use_cuda_kernel: use_cuda_kernel = False logger.warning("CUDA kernel is disabled for speculative-decoding.") hidden_states = self.embed_tokens(input_tokens_ids) cu_seqlens = None # NOTE (yuanheng-zhao): we do not use cuda kernels for speculative-decoding for now if inputmetadata.use_spec_dec: # For speculative-decoding Prefill and Verifying Stage if inputmetadata.is_prompts: # output tensor shape is the same as normal Prefill Stage rotary_indexes = [torch.arange(0, length) for length in sequence_lengths] else: # the number of tokens to be verified in parallel plus the correct token in the last step n_tokens = inputmetadata.num_tokens_to_verify + 1 assert n_tokens == hidden_states.size(0) rotary_indexes = [(length - n_tokens + i).view(-1) for i in range(n_tokens) for length in sequence_lengths] rotary_indexes = torch.cat(rotary_indexes, dim=-1) cos_sin = (self._cos_cached[rotary_indexes], self._sin_cached[rotary_indexes]) elif use_cuda_kernel: if can_use_flash_attn2(inputmetadata.dtype): cu_seqlens = F.pad(torch.cumsum(sequence_lengths, dim=0, dtype=torch.int32), (1, 0)) hidden_dim = self._cos_cached.size(-1) total_length = hidden_states.size(0) cos = torch.empty((total_length, hidden_dim), dtype=self._cos_cached.dtype, device=self._cos_cached.device) sin = torch.empty((total_length, hidden_dim), dtype=self._sin_cached.dtype, device=self._sin_cached.device) inference_ops.get_cos_and_sin( self._cos_cached, self._sin_cached, cos, sin, sequence_lengths, kv_seq_len, inputmetadata.is_prompts ) cos_sin = (cos, sin) else: cos_sin = get_xine_cache(sequence_lengths, self._cos_cached, self._sin_cached, inputmetadata.is_prompts) sm_scale = 1.0 / (inputmetadata.head_dim**0.5) norm_output = torch.empty_like(hidden_states) tokens_to_verify = inputmetadata.num_tokens_to_verify if inputmetadata.use_spec_dec else None residual = None for layer_id, decoder_layer in enumerate(self.layers): hidden_states, residual = decoder_layer( hidden_states, residual=residual, block_tables=block_tables, k_cache=k_caches[layer_id], v_cache=v_caches[layer_id], is_prompts=inputmetadata.is_prompts, is_verifier=inputmetadata.use_spec_dec, tokens_to_verify=tokens_to_verify, sequence_lengths=sequence_lengths, cos_sin=cos_sin, fd_inter_tensor=inputmetadata.fd_inter_tensor, kv_seq_len=kv_seq_len, output_tensor=output_tensor, norm_output=norm_output, sm_scale=sm_scale, use_cuda_kernel=use_cuda_kernel, cu_seqlens=cu_seqlens, high_precision=high_precision, ) if inputmetadata.is_prompts: seq_len_cumsum = sequence_lengths.cumsum(dim=0) hidden_states = hidden_states[seq_len_cumsum - 1].contiguous() residual = residual[seq_len_cumsum - 1].contiguous() norm_output = torch.empty_like(hidden_states) hidden_states, _ = self.norm(hidden_states, norm_output, residual, use_cuda_kernel) return hidden_states def llama_decoder_layer_forward( self: LlamaDecoderLayer, hidden_states: torch.Tensor, residual: torch.Tensor, block_tables: torch.Tensor, k_cache: torch.Tensor, v_cache: torch.Tensor, sequence_lengths: torch.Tensor, cos_sin: Tuple[torch.Tensor], fd_inter_tensor: FDIntermTensors, is_prompts: bool = True, is_verifier: bool = False, tokens_to_verify: int = None, kv_seq_len: int = 0, output_tensor: torch.Tensor = None, norm_output: torch.Tensor = None, sm_scale: int = None, use_cuda_kernel: bool = True, cu_seqlens: torch.Tensor = None, high_precision: bool = False, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """This function will replace the forward function of LlamaDecoderLayer. Args: hidden_states (torch.Tensor): input to the layer of shape [token_num, embed_dim]. residual (torch.Tensor): shape [token_num, embed_dim], used to be added to hidden_states in out_proj. block_tables (torch.Tensor): A 2D tensor of shape [batch_size, max_blocks_per_sequence], storing mapping of token_position_id -> block_id. k_cache (torch.Tensor): It holds the GPU memory for the key cache. v_cache (torch.Tensor): It holds the GPU memory for the key cache. sequence_lengths (torch.Tensor): Holding the sequence length of each sequence. cos_sin (Tuple[torch.Tensor]): Holding cos and sin. fd_inter_tensor (FDIntermTensors): Holding tensors used for storing intermediate values in flash-decoding. is_prompts (bool, optional): Whether the current inference process is in the context input phase. Defaults to True. kv_seq_len (int, optional): The max sequence length of input sequences. Defaults to 0. output_tensor (torch.Tensor, optional): The mid tensor holds the output of attention. Defaults to None. norm_output (torch.Tensor, optional): The mid tensor holds the output of layernorm. Defaults to None. sm_scale (int, optional): Used for flash attention. Defaults to None. use_cuda_kernel: (bool, optional): Whether to use cuda kernel. Defaults to True. cu_seqlens(torch.Tensor, optional): Holding the cumulative sum of sequence length. high_precision(Optional[bool]): Whether to use float32 for underlying calculations of float16 data to achieve higher precision, defaults to False. """ hidden_states, residual = self.input_layernorm(hidden_states, norm_output, residual, use_cuda_kernel) # Self Attention hidden_states = self.self_attn( hidden_states=hidden_states, block_tables=block_tables, k_cache=k_cache, v_cache=v_cache, is_prompts=is_prompts, is_verifier=is_verifier, tokens_to_verify=tokens_to_verify, sequence_lengths=sequence_lengths, cos_sin=cos_sin, fd_inter_tensor=fd_inter_tensor, kv_seq_len=kv_seq_len, output_tensor=output_tensor, sm_scale=sm_scale, cu_seqlens=cu_seqlens, high_precision=high_precision, ) # Fully Connected hidden_states, residual = self.post_attention_layernorm(hidden_states, norm_output, residual, use_cuda_kernel) hidden_states = self.mlp(hidden_states) return hidden_states, residual def llama_rmsnorm_forward( self: LlamaRMSNorm, hidden_states: torch.Tensor, norm_output: torch.Tensor, residual: torch.Tensor = None, use_cuda_kernel: bool = True, ): if use_cuda_kernel: if residual is not None: inference_ops.fused_add_rms_layernorm(hidden_states, residual, self.weight.data, self.variance_epsilon) return hidden_states, residual if norm_output is None: norm_output = torch.empty_like(hidden_states) inference_ops.rms_layernorm(norm_output, hidden_states, self.weight.data, self.variance_epsilon) return norm_output, hidden_states else: return rms_layernorm(hidden_states, self.weight.data, self.variance_epsilon, norm_output, residual) class NopadLlamaMLP(LlamaMLP, ParallelModule): def __init__( self, config: LlamaConfig, mlp_gproj_w: torch.Tensor = None, mlp_uproj_w: torch.Tensor = None, mlp_dproj: ParallelModule = None, process_group: ProcessGroup = None, ): """Replacement of LlamaMLP layer. Args: config (LlamaConfig): Holding the Llama model config. mlp_gproj_w (torch.Tensor, optional): The transposed gate_proj weight. Defaults to None. mlp_uproj_w (torch.Tensor, optional): The transposed up_proj weight. Defaults to None. mlp_dproj (Linear1D_Row, optional): The Linear1D_Row mlp_dproj weight. Defaults to None. """ ParallelModule.__init__(self) self.config = config assert is_distributed_tensor( mlp_gproj_w ), "mlp_gproj_w must be dtensor so we could get the layout of the weight" self.helper_layout = ( mlp_gproj_w.dist_layout ) # NOTE this is a hack for the right load/shard of gate_up_weight(used in _load_from_state_dict) self.gate_up_weight = nn.Parameter( torch.stack([mlp_gproj_w.transpose(0, 1), mlp_uproj_w.transpose(0, 1)], dim=0) ) self.gate_up_dict = { "gate_proj.weight": None, "up_proj.weight": None, } # used and delattr in load/shard of gate/up weight self.down_proj = mlp_dproj self.process_group = process_group @staticmethod def from_native_module( module: LlamaMLP, process_group: Union[ProcessGroup, List[ProcessGroup]], *args, **kwargs ) -> ParallelModule: """Used for initialize the weight of NopadLlamaMLP by origin LlamaMLP. Args: module (LlamaMLP): The origin LlamaMLP layer. """ config = module.config mlp_gproj_w = module.gate_proj.weight assert is_distributed_tensor( module.gate_proj.weight ), "gate_proj.weight must be dtensor so we could get the layout of the weight" mlp_uproj_w = module.up_proj.weight mlp_dproj = module.down_proj mlp_layer = NopadLlamaMLP( config=config, mlp_gproj_w=mlp_gproj_w, mlp_uproj_w=mlp_uproj_w, mlp_dproj=mlp_dproj, process_group=process_group, ) return mlp_layer def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): # NOTE This is a hack to ensure we could load the right weight from LlamaMLP checkpoint due to the use of torch.stack(gate_weight, up_weight) if hasattr(self, "gate_up_dict"): for hook in self._load_state_dict_pre_hooks.values(): hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) persistent_buffers = {k: v for k, v in self._buffers.items() if k not in self._non_persistent_buffers_set} local_name_params = itertools.chain(self._parameters.items(), persistent_buffers.items()) local_state = {k: v for k, v in local_name_params if v is not None} device_mesh = self.helper_layout.device_mesh sharding_spec = self.helper_layout.sharding_spec for weight_name in self.gate_up_dict: prefix_weight_name = prefix + weight_name if prefix_weight_name in state_dict.keys(): w = distribute_tensor(state_dict[prefix_weight_name], device_mesh, sharding_spec) self.gate_up_dict[weight_name] = w.T if None not in self.gate_up_dict.values(): # we've got all the weights of gate/up gate_up_w = torch.stack(list(self.gate_up_dict.values()), dim=0) input_param = nn.Parameter( gate_up_w ) # NOTE gate_up_weight doesn't have to be a distensor, Like input_param = sharded_tensor_to_param(input_param) key = "gate_up_weight" param = local_state.get(key, None) try: with torch.no_grad(): param.copy_(input_param) except Exception as ex: error_msgs.append( 'While copying the parameter named "{}", ' "whose dimensions in the model are {} and " "whose dimensions in the checkpoint are {}, " "an exception occurred : {}.".format(key, param.size(), input_param.size(), ex.args) ) del self.gate_up_dict strict = False # to avoid unexpected_keys super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: """ Args: hidden_states (torch.Tensor): input to the layer of shape [token_num, embed_dim]. """ hidden_states = hidden_states.expand(2, -1, -1) gate_up_proj_out = torch.bmm(hidden_states, self.gate_up_weight) act_out = inference_ops.silu_and_mul(gate_up_proj_out) return self.down_proj(act_out) def extra_repr(self) -> str: return f"gate_up_proj MergedLinear1D_Col: in_features={self.gate_up_weight.shape[1]}x2, out_features={self.gate_up_weight.shape[2]}, bias=False" class NopadLlamaAttention(LlamaAttention, ParallelModule): def __init__( self, config: LlamaConfig, layer_idx: Optional[int] = None, attn_qproj_w: torch.Tensor = None, attn_kproj_w: torch.Tensor = None, attn_vproj_w: torch.Tensor = None, attn_oproj: ParallelModule = None, process_group: ProcessGroup = None, model_shard_infer_config: ModelShardInferenceConfig = None, num_heads: int = None, hidden_size: int = None, num_key_value_heads: int = None, ): """This layer will replace the LlamaAttention. Args: config (LlamaConfig): Holding the Llama model config. layer_idx (Optional[int], optional): The decode layer id of this attention layer. Defaults to None. attn_qproj_w (torch.Tensor, optional): The transposed q_proj weight. Defaults to None. attn_kproj_w (torch.Tensor, optional): The transposed k_proj weight. Defaults to None. attn_vproj_w (torch.Tensor, optional): The transposed v_proj weight. Defaults to None. attn_oproj (Linear1D_Row, optional): The Linear1D_Row o_proj weight. Defaults to None. """ ParallelModule.__init__(self) self.config = config self.layer_idx = layer_idx self.o_proj = attn_oproj self.process_group = process_group self.attention_dropout = config.attention_dropout self.hidden_size = hidden_size self.num_heads = num_heads self.head_dim = self.hidden_size // self.num_heads self.num_key_value_heads = num_key_value_heads self.num_key_value_groups = self.num_heads // self.num_key_value_heads self.max_position_embeddings = config.max_position_embeddings self.rope_theta = config.rope_theta self.is_causal = True self.attention_backend = get_attention_backend(model_shard_infer_config) self.pre_attention_backend = get_pre_attention_backend(model_shard_infer_config) if self.num_heads == self.num_key_value_heads: qkv_weight_list = [attn_qproj_w.transpose(0, 1), attn_kproj_w.transpose(0, 1), attn_vproj_w.transpose(0, 1)] self.qkv_weight = nn.Parameter(torch.stack(qkv_weight_list, dim=0)) self.helper_layout = ( attn_qproj_w.dist_layout ) # NOTE this is a hack for the right load/shard of qkv_weight(used in _load_from_state_dict) self.qkv_dict = { "q_proj.weight": None, "k_proj.weight": None, "v_proj.weight": None, } # used and delattr in load/shard of qkv weight else: self.helper_layout = ( attn_qproj_w.dist_layout ) # NOTE this is a hack for the right load/shard of qkv_weight(used in _load_from_state_dict) self.q_proj_weight = nn.Parameter(attn_qproj_w.transpose(0, 1).contiguous()) self.k_proj_weight = nn.Parameter(attn_kproj_w.transpose(0, 1).contiguous()) self.v_proj_weight = nn.Parameter(attn_vproj_w.transpose(0, 1).contiguous()) @staticmethod def from_native_module( module: LlamaAttention, process_group: Union[ProcessGroup, List[ProcessGroup]], *args, **kwargs ) -> ParallelModule: """Used for initialize the weight of NopadLlamaAttention by origin LlamaAttention. Args: module (LlamaAttention): The origin LlamaAttention layer. """ config = module.config layer_idx = module.layer_idx attn_qproj_w = module.q_proj.weight attn_kproj_w = module.k_proj.weight attn_vproj_w = module.v_proj.weight assert is_distributed_tensor(attn_qproj_w), "attn_qproj_w must be dist tensor" attn_oproj = module.o_proj model_shard_infer_config = kwargs.get("model_shard_infer_config", None) attn_layer = NopadLlamaAttention( config=config, layer_idx=layer_idx, attn_qproj_w=attn_qproj_w, attn_kproj_w=attn_kproj_w, attn_vproj_w=attn_vproj_w, attn_oproj=attn_oproj, process_group=process_group, model_shard_infer_config=model_shard_infer_config, num_heads=module.config.num_attention_heads, hidden_size=module.config.hidden_size, num_key_value_heads=module.config.num_key_value_heads, ) return attn_layer # Replace transformers.models.llama.modeling_llama.LlamaAttention.forward def forward( self, hidden_states: torch.Tensor, block_tables: torch.Tensor, k_cache: torch.Tensor, v_cache: torch.Tensor, sequence_lengths: torch.Tensor, cos_sin: Tuple[torch.Tensor], fd_inter_tensor: FDIntermTensors, is_prompts: bool = True, is_verifier: bool = False, tokens_to_verify: int = None, kv_seq_len: int = 0, output_tensor: torch.Tensor = None, sm_scale: int = None, use_cuda_kernel: bool = True, cu_seqlens: torch.Tensor = None, high_precision: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """ Args: hidden_states (torch.Tensor): input to the layer of shape [token_num, embed_dim]. block_tables (torch.Tensor): A 2D tensor of shape [batch_size, max_blocks_per_sequence], storing mapping of token_position_id -> block_id. k_cache (torch.Tensor): It holds the GPU memory for the key cache. v_cache (torch.Tensor): It holds the GPU memory for the key cache. sequence_lengths (torch.Tensor, optional): Holding the sequence length of each sequence. cos_sin (Tuple[torch.Tensor], optional): Holding cos and sin. fd_inter_tensor (FDIntermTensors, optional): Holding tensors used for storing intermediate values in flash-decoding. is_prompts (bool, optional): Whether the current inference process is in the context input phase. Defaults to True. kv_seq_len (int, optional): The max sequence length of input sequences. Defaults to 0. output_tensor (torch.Tensor, optional): The mid tensor holds the output of attention. Defaults to None. sm_scale (int, optional): Used for flash attention. Defaults to None. use_cuda_kernel: (bool, optional): Whether to use cuda kernel. Defaults to True. cu_seqlens(torch.Tensor, optional): Holding the cumulative sum of sequence length. high_precision(Optional[bool]): Whether to use float32 for underlying calculations of float16 data to achieve higher precision, defaults to False. """ token_nums = hidden_states.size(0) if self.num_heads != self.num_key_value_heads: query_states = torch.mm(hidden_states, self.q_proj_weight).view(-1, self.num_heads, self.head_dim) key_states = torch.mm(hidden_states, self.k_proj_weight).view(-1, self.num_key_value_heads, self.head_dim) value_states = torch.mm(hidden_states, self.v_proj_weight).view(-1, self.num_key_value_heads, self.head_dim) else: # fused qkv hidden_states = hidden_states.expand(3, -1, -1) query_states, key_states, value_states = ( torch.bmm(hidden_states, self.qkv_weight).view(3, token_nums, self.num_heads, self.head_dim).unbind(0) ) block_size = k_cache.size(-2) attn_metadata = AttentionMetaData( query_states=query_states, key_states=key_states, value_states=value_states, k_cache=k_cache, v_cache=v_cache, block_tables=block_tables, block_size=block_size, kv_seq_len=kv_seq_len, sequence_lengths=sequence_lengths, sm_scale=sm_scale, alibi_slopes=None, cu_seqlens=cu_seqlens, output_tensor=output_tensor, use_spec_dec=is_verifier, use_alibi_attn=False, ) if is_prompts: # prefilling stage self.pre_attention_backend.prefill( attn_metadata, cos=cos_sin[0], sin=cos_sin[1], high_precision=high_precision, ) attn_output = self.attention_backend.prefill( attn_metadata, token_nums=token_nums, ) else: # decoding stage q_len = tokens_to_verify + 1 if is_verifier else 1 self.pre_attention_backend.decode( attn_metadata, cos=cos_sin[0], sin=cos_sin[1], q_len=q_len, ) attn_output = self.attention_backend.decode( attn_metadata, fd_inter_tensor=fd_inter_tensor, num_key_value_groups=self.num_key_value_groups, q_len=q_len, ) attn_output = attn_output.view(-1, self.hidden_size) attn_output = self.o_proj(attn_output) return attn_output def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): for hook in self._load_state_dict_pre_hooks.values(): hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) persistent_buffers = {k: v for k, v in self._buffers.items() if k not in self._non_persistent_buffers_set} local_name_params = itertools.chain(self._parameters.items(), persistent_buffers.items()) local_state = {k: v for k, v in local_name_params if v is not None} device_mesh = self.helper_layout.device_mesh sharding_spec = self.helper_layout.sharding_spec if self.num_heads == self.num_key_value_heads and hasattr(self, "qkv_dict"): # NOTE This is a hack to ensure we could load the right weight from LlamaAttention checkpoint due to the use of torch.stack(q_weight, k_weight, v_weight) key = "qkv_weight" # NOTE(@lry89757) We will load the sharded checkpoint file according to the weight map from *.index.json # Here we need the weight of q,k,v to stack the weights of q,k,v into one qkv weight. # Unfortunately, it is highly like that all weights of q,k,v are not in the same sharded checkpoint file(like meta-llama/llama3-70B) # so here we will stack them when we really collect all the three weights. for weight_name in self.qkv_dict: prefix_weight_name = prefix + weight_name if prefix_weight_name in state_dict.keys(): w = distribute_tensor(state_dict[prefix_weight_name], device_mesh, sharding_spec) self.qkv_dict[weight_name] = w.T if None not in self.qkv_dict.values(): # we've got all the weights of q, k, v qkv_w = torch.stack(list(self.qkv_dict.values()), dim=0) input_param = nn.Parameter( qkv_w ) # NOTE qkv_weight doesn't have to be a distensor, Like input_param = sharded_tensor_to_param(input_param) param = local_state[key] try: with torch.no_grad(): param.copy_(input_param) except Exception as ex: error_msgs.append( 'While copying the parameter named "{}", ' "whose dimensions in the model are {} and " "whose dimensions in the checkpoint are {}, " "an exception occurred : {}.".format(key, param.size(), input_param.size(), ex.args) ) del self.qkv_dict else: def _load(origin_weight_name="q_proj.weight", local_weight_name="q_proj_weight"): if prefix + origin_weight_name in state_dict.keys(): attn_qproj_w = state_dict[prefix + origin_weight_name] w = distribute_tensor(attn_qproj_w, device_mesh, sharding_spec) input_param = nn.Parameter(w.T) param = local_state[local_weight_name] try: with torch.no_grad(): param.copy_(input_param) except Exception as ex: key = local_weight_name error_msgs.append( 'While copying the parameter named "{}", ' "whose dimensions in the model are {} and " "whose dimensions in the checkpoint are {}, " "an exception occurred : {}.".format(key, param.size(), input_param.size(), ex.args) ) if prefix + "q_proj.weight" in state_dict.keys(): _load(origin_weight_name="q_proj.weight", local_weight_name="q_proj_weight") if prefix + "k_proj.weight" in state_dict.keys(): _load(origin_weight_name="k_proj.weight", local_weight_name="k_proj_weight") if prefix + "v_proj.weight" in state_dict.keys(): _load(origin_weight_name="v_proj.weight", local_weight_name="v_proj_weight") strict = False # to avoid unexpected_keys super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def extra_repr(self) -> str: return f"qkv_weight_proj MergedLinear1D_Col: in_features={self.qkv_weight.shape[1]}x3, out_features={self.qkv_weight.shape[2]}, bias=False"
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/layers/baichuan_tp_linear.py
colossalai/inference/modeling/layers/baichuan_tp_linear.py
from typing import List, Union import torch.distributed as dist import torch.nn as nn from torch.distributed import ProcessGroup from colossalai.lazy import LazyInitContext from colossalai.shardformer.layer import Linear1D_Col from colossalai.shardformer.layer.parallel_module import ParallelModule class BaichuanLMHeadLinear1D_Col(Linear1D_Col): @staticmethod def from_native_module( module: nn.Module, process_group: Union[ProcessGroup, List[ProcessGroup]], *args, **kwargs ) -> ParallelModule: LazyInitContext.materialize(module) module.in_features = module.weight.size(1) module.out_features = module.weight.size(0) module.bias = None module.weight.data = nn.functional.normalize( module.weight ) # NOTE(lry89757) This behavior may not apply to lazy init. When we use lazy init, the weight of shardformer is not the real weight. # So we should rewrite our own load_from_state_dict of `BaichuanLMHeadLinear1D_Col` to fix this potential issue. # get the attributes in_features = module.in_features out_features = module.out_features bias = module.bias is not None device = module.weight.device # ensure only one process group is passed if isinstance(process_group, (list, tuple)): assert len(process_group) == 1, f"Expected only one process group, got {len(process_group)}." process_group = process_group[0] tp_size = dist.get_world_size(process_group) if out_features < tp_size: return module if out_features % tp_size != 0: raise ValueError( f"The size of out_features:{out_features} is not integer multiples of tensor parallel size: {tp_size}!" ) lmhead_1d = BaichuanLMHeadLinear1D_Col( in_features=in_features, out_features=out_features, bias=bias, device=device, process_group=process_group, weight=module.weight, bias_=module.bias, **kwargs, ) return lmhead_1d def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): state_dict[prefix + "weight"] = nn.functional.normalize(state_dict[prefix + "weight"]) super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/layers/diffusion.py
colossalai/inference/modeling/layers/diffusion.py
import inspect import types import torch from torch import nn class DiffusionPipe(nn.Module): """ This Class convert a class of `DiffusionPipeline` into `nn.Module` and reserve most of origin attr,function and property. """ def __init__(self, source_obj) -> None: super(DiffusionPipe, self).__init__() for k, v in source_obj.__dict__.items(): if isinstance(v, nn.Module): self.add_module(k, v) else: setattr(self, k, v) skip_list = ["_execution_device", "to", "device"] # this for name, member in inspect.getmembers(source_obj.__class__): if name in skip_list: continue if not name.startswith("__") and not name.endswith("__"): if isinstance(member, property): setattr(self.__class__, name, member) elif inspect.isfunction(member) or inspect.ismethod(member): bound_method = types.MethodType(member, self) setattr(self, name, bound_method) elif not callable(member) and not isinstance(member, property): setattr(self, name, member) elif name == "__call__": bound_method = types.MethodType(member, self) setattr(self, "_forward", bound_method) @property def _execution_device(self): r""" Returns the device on which the pipeline's models will be executed. After calling [`~DiffusionPipeline.enable_sequential_cpu_offload`] the execution device can only be inferred from Accelerate's module hooks. """ # return self.device return torch.device("cuda") @property def device(self): next(self.parameters()).device def forward(self, *args, **kwargs): return self._forward(*args, **kwargs)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/layers/distrifusion.py
colossalai/inference/modeling/layers/distrifusion.py
# Code refer and adapted from: # https://github.com/huggingface/diffusers/blob/v0.29.0-release/src/diffusers # https://github.com/PipeFusion/PipeFusion import inspect from typing import Any, Dict, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn.functional as F from diffusers.models import attention_processor from diffusers.models.attention import Attention from diffusers.models.embeddings import PatchEmbed, get_2d_sincos_pos_embed from diffusers.models.transformers.pixart_transformer_2d import PixArtTransformer2DModel from diffusers.models.transformers.transformer_sd3 import SD3Transformer2DModel from torch import nn from torch.distributed import ProcessGroup from colossalai.inference.config import ModelShardInferenceConfig from colossalai.logging import get_dist_logger from colossalai.shardformer.layer.parallel_module import ParallelModule from colossalai.utils import get_current_device try: from flash_attn import flash_attn_func HAS_FLASH_ATTN = True except ImportError: HAS_FLASH_ATTN = False logger = get_dist_logger(__name__) # adapted from https://github.com/huggingface/diffusers/blob/v0.29.0-release/src/diffusers/models/transformers/transformer_2d.py def PixArtAlphaTransformer2DModel_forward( self: PixArtTransformer2DModel, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, timestep: Optional[torch.LongTensor] = None, added_cond_kwargs: Dict[str, torch.Tensor] = None, class_labels: Optional[torch.LongTensor] = None, cross_attention_kwargs: Dict[str, Any] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, return_dict: bool = True, ): assert hasattr( self, "patched_parallel_size" ), "please check your policy, `Transformer2DModel` Must have attribute `patched_parallel_size`" if cross_attention_kwargs is not None: if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") # ensure attention_mask is a bias, and give it a singleton query_tokens dimension. # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward. # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias. # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None and attention_mask.ndim == 2: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) # 1. Input batch_size = hidden_states.shape[0] height, width = ( hidden_states.shape[-2] // self.config.patch_size, hidden_states.shape[-1] // self.config.patch_size, ) hidden_states = self.pos_embed(hidden_states) timestep, embedded_timestep = self.adaln_single( timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype ) if self.caption_projection is not None: encoder_hidden_states = self.caption_projection(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1]) # 2. Blocks for block in self.transformer_blocks: hidden_states = block( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, ) # 3. Output shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None].to(self.scale_shift_table.device)).chunk( 2, dim=1 ) hidden_states = self.norm_out(hidden_states) # Modulation hidden_states = hidden_states * (1 + scale.to(hidden_states.device)) + shift.to(hidden_states.device) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.squeeze(1) # unpatchify hidden_states = hidden_states.reshape( shape=( -1, height // self.patched_parallel_size, width, self.config.patch_size, self.config.patch_size, self.out_channels, ) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=( -1, self.out_channels, height // self.patched_parallel_size * self.config.patch_size, width * self.config.patch_size, ) ) # enable Distrifusion Optimization if hasattr(self, "patched_parallel_size"): from torch import distributed as dist if (getattr(self, "output_buffer", None) is None) or (self.output_buffer.shape != output.shape): self.output_buffer = torch.empty_like(output) if (getattr(self, "buffer_list", None) is None) or (self.buffer_list[0].shape != output.shape): self.buffer_list = [torch.empty_like(output) for _ in range(self.patched_parallel_size)] output = output.contiguous() dist.all_gather(self.buffer_list, output, async_op=False) torch.cat(self.buffer_list, dim=2, out=self.output_buffer) output = self.output_buffer return (output,) # adapted from https://github.com/huggingface/diffusers/blob/v0.29.0-release/src/diffusers/models/transformers/transformer_sd3.py def SD3Transformer2DModel_forward( self: SD3Transformer2DModel, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor = None, pooled_projections: torch.FloatTensor = None, timestep: torch.LongTensor = None, joint_attention_kwargs: Optional[Dict[str, Any]] = None, return_dict: bool = True, ) -> Union[torch.FloatTensor]: assert hasattr( self, "patched_parallel_size" ), "please check your policy, `Transformer2DModel` Must have attribute `patched_parallel_size`" height, width = hidden_states.shape[-2:] hidden_states = self.pos_embed(hidden_states) # takes care of adding positional embeddings too. temb = self.time_text_embed(timestep, pooled_projections) encoder_hidden_states = self.context_embedder(encoder_hidden_states) for block in self.transformer_blocks: encoder_hidden_states, hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, temb=temb ) hidden_states = self.norm_out(hidden_states, temb) hidden_states = self.proj_out(hidden_states) # unpatchify patch_size = self.config.patch_size height = height // patch_size // self.patched_parallel_size width = width // patch_size hidden_states = hidden_states.reshape( shape=(hidden_states.shape[0], height, width, patch_size, patch_size, self.out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(hidden_states.shape[0], self.out_channels, height * patch_size, width * patch_size) ) # enable Distrifusion Optimization if hasattr(self, "patched_parallel_size"): from torch import distributed as dist if (getattr(self, "output_buffer", None) is None) or (self.output_buffer.shape != output.shape): self.output_buffer = torch.empty_like(output) if (getattr(self, "buffer_list", None) is None) or (self.buffer_list[0].shape != output.shape): self.buffer_list = [torch.empty_like(output) for _ in range(self.patched_parallel_size)] output = output.contiguous() dist.all_gather(self.buffer_list, output, async_op=False) torch.cat(self.buffer_list, dim=2, out=self.output_buffer) output = self.output_buffer return (output,) # Code adapted from: https://github.com/PipeFusion/PipeFusion/blob/main/pipefuser/modules/dit/patch_parallel/patchembed.py class DistrifusionPatchEmbed(ParallelModule): def __init__( self, module: PatchEmbed, process_group: Union[ProcessGroup, List[ProcessGroup]], model_shard_infer_config: ModelShardInferenceConfig = None, ): super().__init__() self.module = module self.rank = dist.get_rank(group=process_group) self.patched_parallelism_size = model_shard_infer_config.patched_parallelism_size @staticmethod def from_native_module(module: PatchEmbed, process_group: Union[ProcessGroup, List[ProcessGroup]], *args, **kwargs): model_shard_infer_config = kwargs.get("model_shard_infer_config", None) distrifusion_embed = DistrifusionPatchEmbed( module, process_group, model_shard_infer_config=model_shard_infer_config ) return distrifusion_embed def forward(self, latent): module = self.module if module.pos_embed_max_size is not None: height, width = latent.shape[-2:] else: height, width = latent.shape[-2] // module.patch_size, latent.shape[-1] // module.patch_size latent = module.proj(latent) if module.flatten: latent = latent.flatten(2).transpose(1, 2) # BCHW -> BNC if module.layer_norm: latent = module.norm(latent) if module.pos_embed is None: return latent.to(latent.dtype) # Interpolate or crop positional embeddings as needed if module.pos_embed_max_size: pos_embed = module.cropped_pos_embed(height, width) else: if module.height != height or module.width != width: pos_embed = get_2d_sincos_pos_embed( embed_dim=module.pos_embed.shape[-1], grid_size=(height, width), base_size=module.base_size, interpolation_scale=module.interpolation_scale, ) pos_embed = torch.from_numpy(pos_embed).float().unsqueeze(0).to(latent.device) else: pos_embed = module.pos_embed b, c, h = pos_embed.shape pos_embed = pos_embed.view(b, self.patched_parallelism_size, -1, h)[:, self.rank] return (latent + pos_embed).to(latent.dtype) # Code adapted from: https://github.com/PipeFusion/PipeFusion/blob/main/pipefuser/modules/dit/patch_parallel/conv2d.py class DistrifusionConv2D(ParallelModule): def __init__( self, module: nn.Conv2d, process_group: Union[ProcessGroup, List[ProcessGroup]], model_shard_infer_config: ModelShardInferenceConfig = None, ): super().__init__() self.module = module self.rank = dist.get_rank(group=process_group) self.patched_parallelism_size = model_shard_infer_config.patched_parallelism_size @staticmethod def from_native_module(module: nn.Conv2d, process_group: Union[ProcessGroup, List[ProcessGroup]], *args, **kwargs): model_shard_infer_config = kwargs.get("model_shard_infer_config", None) distrifusion_conv = DistrifusionConv2D(module, process_group, model_shard_infer_config=model_shard_infer_config) return distrifusion_conv def sliced_forward(self, x: torch.Tensor) -> torch.Tensor: b, c, h, w = x.shape stride = self.module.stride[0] padding = self.module.padding[0] output_h = x.shape[2] // stride // self.patched_parallelism_size idx = dist.get_rank() h_begin = output_h * idx * stride - padding h_end = output_h * (idx + 1) * stride + padding final_padding = [padding, padding, 0, 0] if h_begin < 0: h_begin = 0 final_padding[2] = padding if h_end > h: h_end = h final_padding[3] = padding sliced_input = x[:, :, h_begin:h_end, :] padded_input = F.pad(sliced_input, final_padding, mode="constant") return F.conv2d( padded_input, self.module.weight, self.module.bias, stride=stride, padding="valid", ) def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: output = self.sliced_forward(input) return output # Code adapted from: https://github.com/huggingface/diffusers/blob/v0.29.0-release/src/diffusers/models/attention_processor.py class DistrifusionFusedAttention(ParallelModule): def __init__( self, module: attention_processor.Attention, process_group: Union[ProcessGroup, List[ProcessGroup]], model_shard_infer_config: ModelShardInferenceConfig = None, ): super().__init__() self.counter = 0 self.module = module self.buffer_list = None self.kv_buffer_idx = dist.get_rank(group=process_group) self.patched_parallelism_size = model_shard_infer_config.patched_parallelism_size self.handle = None self.process_group = process_group self.warm_step = 5 # for warmup @staticmethod def from_native_module( module: attention_processor.Attention, process_group: Union[ProcessGroup, List[ProcessGroup]], *args, **kwargs ) -> ParallelModule: model_shard_infer_config = kwargs.get("model_shard_infer_config", None) return DistrifusionFusedAttention( module=module, process_group=process_group, model_shard_infer_config=model_shard_infer_config, ) def _forward( self, attn: Attention, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor = None, attention_mask: Optional[torch.FloatTensor] = None, *args, **kwargs, ) -> torch.FloatTensor: residual = hidden_states input_ndim = hidden_states.ndim if input_ndim == 4: batch_size, channel, height, width = hidden_states.shape hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2) context_input_ndim = encoder_hidden_states.ndim if context_input_ndim == 4: batch_size, channel, height, width = encoder_hidden_states.shape encoder_hidden_states = encoder_hidden_states.view(batch_size, channel, height * width).transpose(1, 2) batch_size = encoder_hidden_states.shape[0] # `sample` projections. query = attn.to_q(hidden_states) key = attn.to_k(hidden_states) value = attn.to_v(hidden_states) kv = torch.cat([key, value], dim=-1) # shape of kv now: (bs, seq_len // parallel_size, dim * 2) if self.patched_parallelism_size == 1: full_kv = kv else: if self.buffer_list is None: # buffer not created full_kv = torch.cat([kv for _ in range(self.patched_parallelism_size)], dim=1) elif self.counter <= self.warm_step: # logger.info(f"warmup: {self.counter}") dist.all_gather( self.buffer_list, kv, group=self.process_group, async_op=False, ) full_kv = torch.cat(self.buffer_list, dim=1) else: # logger.info(f"use old kv to infer: {self.counter}") self.buffer_list[self.kv_buffer_idx].copy_(kv) full_kv = torch.cat(self.buffer_list, dim=1) assert self.handle is None, "we should maintain the kv of last step" self.handle = dist.all_gather(self.buffer_list, kv, group=self.process_group, async_op=True) key, value = torch.split(full_kv, full_kv.shape[-1] // 2, dim=-1) # `context` projections. encoder_hidden_states_query_proj = attn.add_q_proj(encoder_hidden_states) encoder_hidden_states_key_proj = attn.add_k_proj(encoder_hidden_states) encoder_hidden_states_value_proj = attn.add_v_proj(encoder_hidden_states) # attention query = torch.cat([query, encoder_hidden_states_query_proj], dim=1) key = torch.cat([key, encoder_hidden_states_key_proj], dim=1) value = torch.cat([value, encoder_hidden_states_value_proj], dim=1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) hidden_states = hidden_states = F.scaled_dot_product_attention( query, key, value, dropout_p=0.0, is_causal=False ) # NOTE(@lry89757) for torch >= 2.2, flash attn has been already integrated into scaled_dot_product_attention, https://pytorch.org/blog/pytorch2-2/ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # Split the attention outputs. hidden_states, encoder_hidden_states = ( hidden_states[:, : residual.shape[1]], hidden_states[:, residual.shape[1] :], ) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if not attn.context_pre_only: encoder_hidden_states = attn.to_add_out(encoder_hidden_states) if input_ndim == 4: hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) if context_input_ndim == 4: encoder_hidden_states = encoder_hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width) return hidden_states, encoder_hidden_states def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, **cross_attention_kwargs, ) -> torch.Tensor: if self.handle is not None: self.handle.wait() self.handle = None b, l, c = hidden_states.shape kv_shape = (b, l, self.module.to_k.out_features * 2) if self.patched_parallelism_size > 1 and (self.buffer_list is None or self.buffer_list[0].shape != kv_shape): self.buffer_list = [ torch.empty(kv_shape, dtype=hidden_states.dtype, device=get_current_device()) for _ in range(self.patched_parallelism_size) ] self.counter = 0 attn_parameters = set(inspect.signature(self.module.processor.__call__).parameters.keys()) quiet_attn_parameters = {"ip_adapter_masks"} unused_kwargs = [ k for k, _ in cross_attention_kwargs.items() if k not in attn_parameters and k not in quiet_attn_parameters ] if len(unused_kwargs) > 0: logger.warning( f"cross_attention_kwargs {unused_kwargs} are not expected by {self.module.processor.__class__.__name__} and will be ignored." ) cross_attention_kwargs = {k: w for k, w in cross_attention_kwargs.items() if k in attn_parameters} output = self._forward( self.module, hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask, **cross_attention_kwargs, ) self.counter += 1 return output # Code adapted from: https://github.com/PipeFusion/PipeFusion/blob/main/pipefuser/modules/dit/patch_parallel/attn.py class DistriSelfAttention(ParallelModule): def __init__( self, module: Attention, process_group: Union[ProcessGroup, List[ProcessGroup]], model_shard_infer_config: ModelShardInferenceConfig = None, ): super().__init__() self.counter = 0 self.module = module self.buffer_list = None self.kv_buffer_idx = dist.get_rank(group=process_group) self.patched_parallelism_size = model_shard_infer_config.patched_parallelism_size self.handle = None self.process_group = process_group self.warm_step = 3 # for warmup @staticmethod def from_native_module( module: Attention, process_group: Union[ProcessGroup, List[ProcessGroup]], *args, **kwargs ) -> ParallelModule: model_shard_infer_config = kwargs.get("model_shard_infer_config", None) return DistriSelfAttention( module=module, process_group=process_group, model_shard_infer_config=model_shard_infer_config, ) def _forward(self, hidden_states: torch.FloatTensor, scale: float = 1.0): attn = self.module assert isinstance(attn, Attention) residual = hidden_states batch_size, sequence_length, _ = hidden_states.shape query = attn.to_q(hidden_states) encoder_hidden_states = hidden_states k = self.module.to_k(encoder_hidden_states) v = self.module.to_v(encoder_hidden_states) kv = torch.cat([k, v], dim=-1) # shape of kv now: (bs, seq_len // parallel_size, dim * 2) if self.patched_parallelism_size == 1: full_kv = kv else: if self.buffer_list is None: # buffer not created full_kv = torch.cat([kv for _ in range(self.patched_parallelism_size)], dim=1) elif self.counter <= self.warm_step: # logger.info(f"warmup: {self.counter}") dist.all_gather( self.buffer_list, kv, group=self.process_group, async_op=False, ) full_kv = torch.cat(self.buffer_list, dim=1) else: # logger.info(f"use old kv to infer: {self.counter}") self.buffer_list[self.kv_buffer_idx].copy_(kv) full_kv = torch.cat(self.buffer_list, dim=1) assert self.handle is None, "we should maintain the kv of last step" self.handle = dist.all_gather(self.buffer_list, kv, group=self.process_group, async_op=True) if HAS_FLASH_ATTN: # flash attn key, value = torch.split(full_kv, full_kv.shape[-1] // 2, dim=-1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim) key = key.view(batch_size, -1, attn.heads, head_dim) value = value.view(batch_size, -1, attn.heads, head_dim) hidden_states = flash_attn_func(query, key, value, dropout_p=0.0, causal=False) hidden_states = hidden_states.reshape(batch_size, -1, attn.heads * head_dim).to(query.dtype) else: # naive attn key, value = torch.split(full_kv, full_kv.shape[-1] // 2, dim=-1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention(query, key, value, dropout_p=0.0, is_causal=False) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) if attn.residual_connection: hidden_states = hidden_states + residual hidden_states = hidden_states / attn.rescale_output_factor return hidden_states def forward( self, hidden_states: torch.FloatTensor, encoder_hidden_states: Optional[torch.FloatTensor] = None, scale: float = 1.0, *args, **kwargs, ) -> torch.FloatTensor: # async preallocates memo buffer if self.handle is not None: self.handle.wait() self.handle = None b, l, c = hidden_states.shape kv_shape = (b, l, self.module.to_k.out_features * 2) if self.patched_parallelism_size > 1 and (self.buffer_list is None or self.buffer_list[0].shape != kv_shape): self.buffer_list = [ torch.empty(kv_shape, dtype=hidden_states.dtype, device=get_current_device()) for _ in range(self.patched_parallelism_size) ] self.counter = 0 output = self._forward(hidden_states, scale=scale) self.counter += 1 return output
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/layers/__init__.py
colossalai/inference/modeling/layers/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/layers/attention.py
colossalai/inference/modeling/layers/attention.py
import math import torch import torch.nn as nn import torch.nn.functional as F from transformers.modeling_attn_mask_utils import AttentionMaskConverter def copy_to_cache(source, cache, lengths, block_tables, type: str = "prefill"): """ Func: copy key/value into key/value cache. Args: key/value(source): shape [bsz,seq_len,num_heads,head_size] cache: shape [num_blocks, num_kv_heads, head_size, block_size] lengths: key/value lengths block_tables """ num_blocks, num_heads, block_size, head_size = cache.shape bsz, max_blocks_per_seq = block_tables.shape needed_blocks = (lengths + block_size - 1) // block_size if type == "prefill": for i in range(bsz): seq_len = lengths[i] block_num = needed_blocks[i] token_id = 0 for block_idx in range(block_num - 1): cache[block_tables[i][block_idx]] = source[i][token_id : token_id + block_size].permute(1, 0, 2) token_id += block_size cache[block_tables[i][block_num - 1], :, : seq_len - token_id, :] = source[i][token_id:seq_len].permute( 1, 0, 2 ) elif type == "decoding": assert source.size(1) == 1, "seq_len should be equal to 1 when decoding." source = source.squeeze(1) slot_idx = (lengths + block_size - 1) % block_size for i in range(bsz): cache[block_tables[i, needed_blocks[i] - 1], :, slot_idx[i], :] = source[i] return cache def convert_kvcache(cache, lengths, block_tables, pad_id=0): """ Func: convert key/value cache for calculation Args: cache: shape [num_blocks, num_heads, block_size, head_size] lengths: key/value length block_tables pad_id: padded_id """ num_blocks, num_heads, block_size, head_size = cache.shape needed_blocks = (lengths + block_size - 1) // block_size num_remaing_tokens = lengths % block_size num_remaing_tokens[num_remaing_tokens == 0] += block_size bsz = block_tables.shape[0] seq_len = max(lengths) padded_cache = [] for i in range(bsz): _cache = torch.cat( ( cache[block_tables[i][: needed_blocks[i] - 1]].permute((0, 2, 1, 3)).reshape(-1, num_heads, head_size), cache[block_tables[i][needed_blocks[i] - 1], :, : num_remaing_tokens[i], :].permute(1, 0, 2), ), dim=0, ) padding = seq_len - _cache.size(0) if padding > 0: _cache = F.pad(_cache, (0, 0, 0, 0, 0, padding), value=pad_id) padded_cache.append(_cache) return torch.stack(padded_cache, dim=0) class PagedAttention: """ Pure Torch implementation version of paged_attention. Holds different types of forward function and useful components. """ @staticmethod def pad_and_reshape(tensor, seq_lengths, max_seq_len, num_heads, head_size): """ Transform 1D no_pad tensor into 2D padded tensor with shape [bsz,seq_len,num_heads,head_size] """ bsz = len(seq_lengths) padded_tensor = torch.zeros(bsz, max_seq_len, num_heads, head_size, dtype=tensor.dtype) token_idx = 0 for i, seq_len in enumerate(seq_lengths): seq_tensor = tensor[token_idx : token_idx + seq_len] padded_tensor[i, :seq_len, :, :] = seq_tensor token_idx += seq_len return padded_tensor @staticmethod def generate_padding_mask(lengths, max_seq_len): range_tensor = torch.arange(max_seq_len).expand(len(lengths), max_seq_len) padding_mask = range_tensor < lengths.unsqueeze(1) return padding_mask @staticmethod def repeat_kv(hidden_states: torch.Tensor, n_rep: int = 1) -> torch.Tensor: """ Essential component for MQA. Equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). Args: hidden_states(batch, num_key_value_heads, seqlen, head_dim) n_rep: times of repeatition. Output: hidden_states (batch, num_attention_heads, seqlen, head_dim) """ if n_rep == 1: return hidden_states batch, num_key_value_heads, seq_len, head_dim = hidden_states.shape num_attention_heads = n_rep * num_key_value_heads hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, seq_len, head_dim) return hidden_states.reshape(batch, num_attention_heads, seq_len, head_dim) @staticmethod def nopad_context_forward( q: torch.Tensor, # [num_tokens, num_heads, head_size] k: torch.Tensor, # [num_tokens, num_kv_heads, head_size] v: torch.Tensor, k_cache: torch.Tensor, # [num_blocks, num_heads, block_size, head_size] v_cache: torch.Tensor, context_lengths: torch.Tensor, # [num_seqs] block_tables: torch.Tensor, # [num_seqs,max_blocks_per_sequence] ): """ NOTE: q,k,v are projected and applied rotary embedding, all aligned with triton version. """ # Fisrt, do shape verification num_tokens, num_heads, head_size = q.shape num_kv_heads = k.shape[-2] assert num_heads % num_kv_heads == 0, "num_kv_heads should be divisible by num_heads" num_kv_groups = num_heads // num_kv_heads block_size = k_cache.size(-2) bsz, max_blocks_per_sequence = block_tables.shape max_seq_len = max_blocks_per_sequence * block_size assert q.shape[-1] == k.shape[-1] == v.shape[-1] assert q.shape[0] == k.shape[0] == v.shape[0] assert context_lengths.shape[0] == block_tables.shape[0] shape = (bsz, max_seq_len, num_heads, head_size) input_shape = shape[:2] q = PagedAttention.pad_and_reshape( q, context_lengths, max_seq_len, num_heads, head_size ) # bsz,seqlen,num_heads,head_size k = PagedAttention.pad_and_reshape(k, context_lengths, max_seq_len, num_heads, head_size) v = PagedAttention.pad_and_reshape(v, context_lengths, max_seq_len, num_heads, head_size) copy_to_cache(k, k_cache, lengths=context_lengths, block_tables=block_tables) copy_to_cache(v, v_cache, lengths=context_lengths, block_tables=block_tables) attn_mask = AttentionMaskConverter._make_causal_mask(input_shape, q.dtype, q.device, past_key_values_length=0) attn_mask = attn_mask + PagedAttention.generate_padding_mask(context_lengths, max_seq_len) q = q.transpose(1, 2) k = PagedAttention.repeat_kv(k.transpose(1, 2), num_kv_groups) v = PagedAttention.repeat_kv(v.transpose(1, 2), num_kv_groups) # position_ids = torch.arange(0, max_seq_len, dtype=torch.long, device=query.device) # position_ids = position_ids.unsqueeze(0) # cos, sin = self.rotary_emb(value, max_seq_len) # query, key = apply_rotary_pos_emb(query, key, cos, sin, position_ids) attn_weights = torch.matmul(q, k.transpose(2, 3)) / math.sqrt(head_size) if attn_weights.size() != (bsz, num_heads, max_seq_len, max_seq_len): raise ValueError(f"Got wrong attn_weights, should be in shape {(bsz,num_heads,max_seq_len,max_seq_len)}.") if attn_mask is not None: attn_weights += attn_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(q.dtype) attn_output = torch.matmul(attn_weights, v) if attn_output.size() != (bsz, num_heads, max_seq_len, head_size): raise ValueError(f"Got wrong attn_output, should be in shape {(bsz,num_heads,max_seq_len,head_size)}.") attn_output = attn_output.transpose(1, 2).contiguous().reshape(bsz, max_seq_len, -1) del attn_weights return attn_output @staticmethod def pad_context_forward( q: torch.Tensor, # [batch_size, seq_len, num_heads, head_size] k: torch.Tensor, # [batch_size, seq_len, num_kv_heads, head_size] v: torch.Tensor, k_cache: torch.Tensor, # [num_blocks, num_heads, block_size, head_size] v_cache: torch.Tensor, context_lengths: torch.Tensor, # [num_seqs] block_tables: torch.Tensor, # [num_seqs,max_blocks_per_sequence] attn_mask: torch.Tensor = None, # [bsz, input_lengths + output_lengths] ): # Firt, do shape verification bsz, seq_len, num_heads, head_size = q.shape num_kv_heads = k.shape[-2] assert num_heads % num_kv_heads == 0, "num_kv_heads should be divisible by num_heads" num_kv_groups = num_heads // num_kv_heads block_size = k_cache.size(-2) assert q.shape[0] == k.shape[0] == v.shape[0] == block_tables.shape[0] block_tables.shape[-1] * block_size # Copy kv to memory(rotary embedded) copy_to_cache(k, k_cache, lengths=context_lengths, block_tables=block_tables) copy_to_cache(v, v_cache, lengths=context_lengths, block_tables=block_tables) q = q.transpose(1, 2) k = PagedAttention.repeat_kv(k.transpose(1, 2), num_kv_groups) v = PagedAttention.repeat_kv(v.transpose(1, 2), num_kv_groups) attn_weights = torch.matmul(q, k.transpose(2, 3)) / math.sqrt(head_size) padding_mask = None if attn_mask is not None: padding_mask = AttentionMaskConverter._expand_mask(attn_mask, q.dtype, seq_len) attn_mask = AttentionMaskConverter._make_causal_mask( (bsz, seq_len), q.dtype, q.device, past_key_values_length=seq_len - seq_len ) if padding_mask is not None: attn_mask = attn_mask.masked_fill(padding_mask.bool(), torch.finfo(q.dtype).min) if attn_weights.size() != (bsz, num_heads, seq_len, seq_len): raise ValueError(f"Got wrong attn_weights, should be in shape {(bsz,num_heads,seq_len,seq_len)}.") if attn_mask is not None: attn_weights += attn_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(q.dtype) attn_output = torch.matmul(attn_weights, v) if attn_output.size() != (bsz, num_heads, seq_len, head_size): raise ValueError(f"Got wrong attn_output, should be in shape {(bsz,num_heads,seq_len,head_size)}.") attn_output = attn_output.transpose(1, 2).contiguous().reshape(bsz, seq_len, -1) return attn_output @staticmethod def pad_decoding_forward( q: torch.Tensor, # [bsz, 1, num_heads, head_size] k: torch.Tensor, # [bsz, 1, num_kv_heads, head_size] v: torch.Tensor, k_cache: torch.Tensor, # [num_blocks, num_heads, block_size, head_size] v_cache: torch.Tensor, lengths: torch.Tensor, # [num_seqs]: input_lengths + output_lengths block_tables: torch.Tensor, # [num_seqs,max_blocks_per_sequence] attn_mask: torch.Tensor = None, # [bsz, input_lengths + output_lengths] ): # Firt, do shape verification. bsz, q_length, num_heads, head_size = q.shape num_kv_heads = k.shape[-2] assert num_heads % num_kv_heads == 0, "num_kv_heads should be divisible by num_heads" num_kv_groups = num_heads // num_kv_heads seq_len = max(lengths) assert q.shape[0] == k.shape[0] == v.shape[0] == block_tables.shape[0] copy_to_cache(k, k_cache, lengths=lengths, block_tables=block_tables, type="decoding") copy_to_cache(v, v_cache, lengths=lengths, block_tables=block_tables, type="decoding") k = convert_kvcache(k_cache, lengths, block_tables) # bsz, seqlen, v = convert_kvcache(v_cache, lengths, block_tables) q = q.transpose(1, 2) k = PagedAttention.repeat_kv(k.transpose(1, 2), num_kv_groups) v = PagedAttention.repeat_kv(v.transpose(1, 2), num_kv_groups) attn_weights = torch.matmul(q, k.transpose(2, 3)) / math.sqrt(head_size) if attn_weights.size() != (bsz, num_heads, 1, seq_len): raise ValueError(f"Got wrong attn_weights, should be in shape {(bsz,num_heads,1,seq_len)}.") padding_mask = None if attn_mask is not None: padding_mask = AttentionMaskConverter._expand_mask(attn_mask, q.dtype, q_length) attn_mask = AttentionMaskConverter._make_causal_mask( (bsz, q_length), q.dtype, q.device, past_key_values_length=seq_len - q_length ) if padding_mask is not None: attn_mask = attn_mask.masked_fill(padding_mask.bool(), torch.finfo(q.dtype).min) attn_weights += attn_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(q.dtype) attn_output = torch.matmul(attn_weights, v) if attn_output.size() != (bsz, num_heads, 1, head_size): raise ValueError(f"Got wrong attn_output, should be in shape {(bsz,num_heads,1,head_size)}.") attn_output = attn_output.transpose(1, 2).contiguous().reshape(bsz, 1, -1) return attn_output @staticmethod def no_pad_decoding_forward( self, q: torch.Tensor, # [num_tokens, num_heads, head_size] k: torch.Tensor, v: torch.Tensor, k_cache: torch.Tensor, # [num_blocks, num_heads, head_size, block_size] v_cache: torch.Tensor, lengths: torch.Tensor, # [num_seqs]: input_lengths + output_lengths block_tables: torch.Tensor, # [num_seqs,max_blocks_per_sequence] ): return self.pad_decoding_forward( q.unsqueeze(1), k.unsqueeze(1), v.unsqueeze(1), k_cache, v_cache, lengths, block_tables )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/backends/__init__.py
colossalai/inference/modeling/backends/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/backends/attention_backend.py
colossalai/inference/modeling/backends/attention_backend.py
from abc import ABC, abstractmethod from dataclasses import dataclass import torch from colossalai.inference.config import ModelShardInferenceConfig from colossalai.kernel.kernel_loader import InferenceOpsLoader from colossalai.kernel.triton import context_attention_unpadded, flash_decoding_attention @dataclass class AttentionMetaData: query_states: torch.Tensor key_states: torch.Tensor value_states: torch.Tensor k_cache: torch.Tensor v_cache: torch.Tensor block_tables: torch.Tensor block_size: int kv_seq_len: int = None sequence_lengths: torch.Tensor = None cu_seqlens: torch.Tensor = None sm_scale: int = None alibi_slopes: torch.Tensor = None output_tensor: torch.Tensor = None use_spec_dec: bool = False use_alibi_attn: bool = False class AttentionBackend(ABC): @abstractmethod def prefill(self, attn_metadata: AttentionMetaData, **kwargs): raise NotImplementedError @abstractmethod def decode(self, attn_metadatas: AttentionMetaData, **kwargs): raise NotImplementedError class CudaAttentionBackend(AttentionBackend): """ Attention backend when use_cuda_kernel is True but flash-attn not found. If flash-attn is not found, it uses Triton op `context_attention_unpadded` for prefilling and our cuda op `flash_decoding_attention` for decoding. """ def __init__(self, use_flash_attn: bool = False): super().__init__() self.inference_ops = InferenceOpsLoader().load() self.use_flash_attn = use_flash_attn def prefill(self, attn_metadata: AttentionMetaData, **kwargs): if self.use_flash_attn: token_nums = kwargs.get("token_nums", -1) from flash_attn import flash_attn_varlen_func attn_output = flash_attn_varlen_func( attn_metadata.query_states, attn_metadata.key_states, attn_metadata.value_states, cu_seqlens_q=attn_metadata.cu_seqlens, cu_seqlens_k=attn_metadata.cu_seqlens, max_seqlen_q=attn_metadata.kv_seq_len, max_seqlen_k=attn_metadata.kv_seq_len, dropout_p=0.0, softmax_scale=attn_metadata.sm_scale, causal=True, alibi_slopes=attn_metadata.alibi_slopes, ) attn_output = attn_output.view(token_nums, -1) else: attn_output = context_attention_unpadded( q=attn_metadata.query_states, k=attn_metadata.key_states, v=attn_metadata.value_states, k_cache=attn_metadata.k_cache, v_cache=attn_metadata.v_cache, context_lengths=attn_metadata.sequence_lengths, block_tables=attn_metadata.block_tables, block_size=attn_metadata.block_size, output=attn_metadata.output_tensor, alibi_slopes=attn_metadata.alibi_slopes, max_seq_len=attn_metadata.kv_seq_len, sm_scale=attn_metadata.sm_scale, use_new_kcache_layout=True, # use new k-cache layout ) return attn_output def decode(self, attn_metadata: AttentionMetaData, **kwargs): fd_inter_tensor = kwargs.get("fd_inter_tensor", None) output_tensor = attn_metadata.output_tensor self.inference_ops.flash_decoding_attention( output_tensor, attn_metadata.query_states, attn_metadata.k_cache, attn_metadata.v_cache, attn_metadata.sequence_lengths, attn_metadata.block_tables, attn_metadata.block_size, attn_metadata.kv_seq_len, fd_inter_tensor.mid_output, fd_inter_tensor.exp_sums, fd_inter_tensor.max_logits, attn_metadata.alibi_slopes, attn_metadata.sm_scale, ) return output_tensor class TritonAttentionBackend(AttentionBackend): """ Attention backend when use_cuda_kernel is False. It uses pure Triton ops for prefilling and decoding. """ def prefill(self, attn_metadata: AttentionMetaData, **kwargs): return context_attention_unpadded( q=attn_metadata.query_states, k=attn_metadata.key_states, v=attn_metadata.value_states, k_cache=attn_metadata.k_cache, v_cache=attn_metadata.v_cache, context_lengths=attn_metadata.sequence_lengths, block_tables=attn_metadata.block_tables, block_size=attn_metadata.block_size, output=attn_metadata.output_tensor, alibi_slopes=attn_metadata.alibi_slopes, max_seq_len=attn_metadata.kv_seq_len, sm_scale=attn_metadata.sm_scale, ) def decode(self, attn_metadata: AttentionMetaData, **kwargs): fd_inter_tensor = kwargs.get("fd_inter_tensor", None) return flash_decoding_attention( q=attn_metadata.query_states, k_cache=attn_metadata.k_cache, v_cache=attn_metadata.v_cache, kv_seq_len=attn_metadata.sequence_lengths, block_tables=attn_metadata.block_tables, block_size=attn_metadata.block_size, max_seq_len_in_batch=attn_metadata.kv_seq_len, output=attn_metadata.output_tensor, mid_output=fd_inter_tensor.mid_output, mid_output_lse=fd_inter_tensor.mid_output_lse, alibi_slopes=attn_metadata.alibi_slopes, sm_scale=attn_metadata.sm_scale, kv_group_num=kwargs.get("num_key_value_groups", 1), q_len=kwargs.get("q_len", 1), ) def get_attention_backend( model_shard_infer_config: ModelShardInferenceConfig, ) -> AttentionBackend: """ Get the attention backend based on the inference configurations. The modeling will use CUDA-kernel-based backend for attention module calculation only when: 1. using CUDA kernel (use_cuda_kernel=True) 2. can use flash attention (flash-attn installed and dtype is fp16 or bf16) 3. not using speculative decoding (currently cuda kernel not support speculative decoding) Otherwise, use Triton attention backend. If found flash-attn not installed while `use_cuda_kernel` is True, the Triton backend will use a new k cache layout for Triton kernels. """ # Currently only triton kernels support speculative decoding if model_shard_infer_config.use_spec_dec: return TritonAttentionBackend() if model_shard_infer_config.use_cuda_kernel: return CudaAttentionBackend(model_shard_infer_config.use_flash_attn) return TritonAttentionBackend()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/backends/pre_attention_backend.py
colossalai/inference/modeling/backends/pre_attention_backend.py
from abc import ABC, abstractmethod from colossalai.inference.config import ModelShardInferenceConfig from colossalai.inference.modeling.backends.attention_backend import AttentionMetaData from colossalai.kernel.kernel_loader import InferenceOpsLoader from colossalai.kernel.triton import copy_k_to_blocked_cache, decoding_fused_rotary_embedding, rotary_embedding class PreAttentionBackend(ABC): @abstractmethod def prefill(self, attn_metadata: AttentionMetaData, **kwargs): raise NotImplementedError @abstractmethod def decode(self, attn_metadata: AttentionMetaData, **kwargs): raise NotImplementedError class CudaPreAttentionBackend(PreAttentionBackend): """ CudaPreAttentionBackend handles KV cache initialization and positional encoding for CudaAttentionBackend. """ def __init__(self, use_flash_attn: bool): super().__init__() self.inference_ops = InferenceOpsLoader().load() self.use_flash_attn = use_flash_attn def prefill(self, attn_metadata: AttentionMetaData, **kwargs): if self.use_flash_attn: if not attn_metadata.use_alibi_attn: self.inference_ops.rotary_embedding( attn_metadata.query_states, attn_metadata.key_states, kwargs.get("cos", None), kwargs.get("sin", None), kwargs.get("high_precision", False), ) self.inference_ops.context_kv_cache_memcpy( attn_metadata.key_states, attn_metadata.value_states, attn_metadata.k_cache, attn_metadata.v_cache, attn_metadata.sequence_lengths, attn_metadata.cu_seqlens, attn_metadata.block_tables, attn_metadata.kv_seq_len, ) elif not attn_metadata.use_alibi_attn: rotary_embedding( attn_metadata.query_states, attn_metadata.key_states, kwargs.get("cos", None), kwargs.get("sin", None), ) def decode(self, attn_metadata: AttentionMetaData, **kwargs): if not attn_metadata.use_alibi_attn: self.inference_ops.rotary_embedding_and_cache_copy( attn_metadata.query_states, attn_metadata.key_states, attn_metadata.value_states, kwargs.get("cos", None), kwargs.get("sin", None), attn_metadata.k_cache, attn_metadata.v_cache, attn_metadata.sequence_lengths, attn_metadata.block_tables, kwargs.get("high_precision", None), ) else: self.inference_ops.decode_kv_cache_memcpy( attn_metadata.key_states, attn_metadata.value_states, attn_metadata.k_cache, attn_metadata.v_cache, attn_metadata.sequence_lengths, attn_metadata.block_tables, ) class TritonPreAttentionBackend(PreAttentionBackend): """ TritonPreAttentionBackend handles KV cache initialization and positional encoding for TritonAttentionBackend. """ def prefill(self, attn_metadata: AttentionMetaData, **kwargs): if not attn_metadata.use_alibi_attn: rotary_embedding( attn_metadata.query_states, attn_metadata.key_states, kwargs.get("cos", None), kwargs.get("sin", None), ) def decode(self, attn_metadata: AttentionMetaData, **kwargs): if not attn_metadata.use_spec_dec and not attn_metadata.use_alibi_attn: decoding_fused_rotary_embedding( attn_metadata.query_states, attn_metadata.key_states, attn_metadata.value_states, kwargs.get("cos", None), kwargs.get("sin", None), attn_metadata.k_cache, attn_metadata.v_cache, attn_metadata.block_tables, attn_metadata.sequence_lengths, ) else: # else if using speculative decoding if not attn_metadata.use_alibi_attn: rotary_embedding( attn_metadata.query_states, attn_metadata.key_states, kwargs.get("cos", None), kwargs.get("sin", None), ) copy_k_to_blocked_cache( attn_metadata.key_states, attn_metadata.k_cache, kv_lengths=attn_metadata.sequence_lengths, block_tables=attn_metadata.block_tables, n=kwargs.get("q_len", 1), ) copy_k_to_blocked_cache( attn_metadata.value_states, attn_metadata.v_cache, kv_lengths=attn_metadata.sequence_lengths, block_tables=attn_metadata.block_tables, n=kwargs.get("q_len", 1), ) def get_pre_attention_backend( model_shard_infer_config: ModelShardInferenceConfig, ) -> PreAttentionBackend: """ Get the backend for pre-attention computations, including potisional encoding like RoPE and KV cache initialization. It adopt the same selection logic as attention_backend/get_attention_backend. """ if model_shard_infer_config.use_spec_dec: return TritonPreAttentionBackend() if model_shard_infer_config.use_cuda_kernel: return CudaPreAttentionBackend(model_shard_infer_config.use_flash_attn) return TritonPreAttentionBackend()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/policy/pixart_alpha.py
colossalai/inference/modeling/policy/pixart_alpha.py
from diffusers.models.attention import BasicTransformerBlock from diffusers.models.transformers.pixart_transformer_2d import PixArtTransformer2DModel from torch import nn from colossalai.inference.config import RPC_PARAM from colossalai.inference.modeling.layers.diffusion import DiffusionPipe from colossalai.inference.modeling.layers.distrifusion import ( DistrifusionConv2D, DistrifusionPatchEmbed, DistriSelfAttention, PixArtAlphaTransformer2DModel_forward, ) from colossalai.inference.modeling.models.pixart_alpha import pixart_alpha_forward from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription class PixArtAlphaInferPolicy(Policy, RPC_PARAM): def __init__(self) -> None: super().__init__() def module_policy(self): policy = {} if self.shard_config.extra_kwargs["model_shard_infer_config"].patched_parallelism_size > 1: policy[PixArtTransformer2DModel] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="pos_embed.proj", target_module=DistrifusionConv2D, kwargs={"model_shard_infer_config": self.shard_config.extra_kwargs["model_shard_infer_config"]}, ), SubModuleReplacementDescription( suffix="pos_embed", target_module=DistrifusionPatchEmbed, kwargs={"model_shard_infer_config": self.shard_config.extra_kwargs["model_shard_infer_config"]}, ), ], attribute_replacement={ "patched_parallel_size": self.shard_config.extra_kwargs[ "model_shard_infer_config" ].patched_parallelism_size }, method_replacement={"forward": PixArtAlphaTransformer2DModel_forward}, ) policy[BasicTransformerBlock] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="attn1", target_module=DistriSelfAttention, kwargs={ "model_shard_infer_config": self.shard_config.extra_kwargs["model_shard_infer_config"], }, ) ] ) self.append_or_create_method_replacement( description={"forward": pixart_alpha_forward}, policy=policy, target_key=DiffusionPipe ) return policy def preprocess(self) -> nn.Module: return self.model def postprocess(self): return self.model def config_sanity_check(self): pass def to_rpc_param(self) -> str: return __class__.__name__ @staticmethod def from_rpc_param() -> "PixArtAlphaInferPolicy": return PixArtAlphaInferPolicy()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/policy/stablediffusion3.py
colossalai/inference/modeling/policy/stablediffusion3.py
from diffusers.models.attention import JointTransformerBlock from diffusers.models.transformers import SD3Transformer2DModel from torch import nn from colossalai.inference.config import RPC_PARAM from colossalai.inference.modeling.layers.diffusion import DiffusionPipe from colossalai.inference.modeling.layers.distrifusion import ( DistrifusionConv2D, DistrifusionFusedAttention, DistrifusionPatchEmbed, SD3Transformer2DModel_forward, ) from colossalai.inference.modeling.models.stablediffusion3 import sd3_forward from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription class StableDiffusion3InferPolicy(Policy, RPC_PARAM): def __init__(self) -> None: super().__init__() def module_policy(self): policy = {} if self.shard_config.extra_kwargs["model_shard_infer_config"].patched_parallelism_size > 1: policy[SD3Transformer2DModel] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="pos_embed.proj", target_module=DistrifusionConv2D, kwargs={"model_shard_infer_config": self.shard_config.extra_kwargs["model_shard_infer_config"]}, ), SubModuleReplacementDescription( suffix="pos_embed", target_module=DistrifusionPatchEmbed, kwargs={"model_shard_infer_config": self.shard_config.extra_kwargs["model_shard_infer_config"]}, ), ], attribute_replacement={ "patched_parallel_size": self.shard_config.extra_kwargs[ "model_shard_infer_config" ].patched_parallelism_size }, method_replacement={"forward": SD3Transformer2DModel_forward}, ) policy[JointTransformerBlock] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="attn", target_module=DistrifusionFusedAttention, kwargs={ "model_shard_infer_config": self.shard_config.extra_kwargs["model_shard_infer_config"], }, ) ] ) self.append_or_create_method_replacement( description={"forward": sd3_forward}, policy=policy, target_key=DiffusionPipe ) return policy def preprocess(self) -> nn.Module: return self.model def postprocess(self): return self.model def config_sanity_check(self): pass def to_rpc_param(self) -> str: return __class__.__name__ @staticmethod def from_rpc_param() -> "StableDiffusion3InferPolicy": return StableDiffusion3InferPolicy()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/policy/__init__.py
colossalai/inference/modeling/policy/__init__.py
from .glide_llama import GlideLlamaModelPolicy from .nopadding_baichuan import NoPaddingBaichuanModelInferPolicy from .nopadding_llama import NoPaddingLlamaModelInferPolicy from .pixart_alpha import PixArtAlphaInferPolicy from .stablediffusion3 import StableDiffusion3InferPolicy model_policy_map = { "nopadding_llama": NoPaddingLlamaModelInferPolicy, "nopadding_baichuan": NoPaddingBaichuanModelInferPolicy, "glide_llama": GlideLlamaModelPolicy, "StableDiffusion3Pipeline": StableDiffusion3InferPolicy, "PixArtAlphaPipeline": PixArtAlphaInferPolicy, } __all__ = [ "NoPaddingLlamaModelInferPolicy", "NoPaddingBaichuanModelInferPolicy", "GlideLlamaModelPolicy", "StableDiffusion3InferPolicy", "PixArtAlphaInferPolicy", "model_polic_map", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/policy/glide_llama.py
colossalai/inference/modeling/policy/glide_llama.py
from transformers.models.llama.modeling_llama import LlamaForCausalLM, LlamaModel from colossalai.inference.modeling.models.glide_llama import ( GlideLlamaDecoderLayer, glide_llama_causal_lm_forward, glide_llama_model_forward, ) from colossalai.inference.utils import init_to_get_rotary from colossalai.shardformer.policies.base_policy import SubModuleReplacementDescription from colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy class GlideLlamaModelPolicy(LlamaForCausalLMPolicy): def module_policy(self): policy = super().module_policy() num_layers = self.model.config.num_hidden_layers self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix=f"layers[{i}]", target_module=GlideLlamaDecoderLayer, ) for i in range(num_layers) ], policy=policy, target_key=LlamaModel, ) self.append_or_create_method_replacement( description={"forward": glide_llama_model_forward}, policy=policy, target_key=LlamaModel, ) self.append_or_create_method_replacement( description={"forward": glide_llama_causal_lm_forward}, policy=policy, target_key=LlamaForCausalLM, ) return policy def postprocess(self): for layer in self.model.model.layers: init_to_get_rotary(layer.cross_attn) return self.model
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/policy/nopadding_baichuan.py
colossalai/inference/modeling/policy/nopadding_baichuan.py
from colossalai.inference.config import RPC_PARAM from colossalai.inference.modeling.layers.baichuan_tp_linear import BaichuanLMHeadLinear1D_Col from colossalai.inference.modeling.models.nopadding_baichuan import ( NopadBaichuanAttention, NopadBaichuanMLP, baichuan_rmsnorm_forward, ) from colossalai.inference.modeling.models.nopadding_llama import ( llama_causal_lm_forward, llama_decoder_layer_forward, llama_model_forward, ) from colossalai.inference.utils import init_to_get_rotary from colossalai.shardformer.layer import FusedLinear1D_Col, Linear1D_Col, Linear1D_Row from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, SubModuleReplacementDescription from colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy class NoPaddingBaichuanModelInferPolicy(LlamaForCausalLMPolicy, RPC_PARAM): def __init__(self) -> None: super().__init__() def module_policy(self): policy = super().module_policy() if self.shard_config.enable_tensor_parallelism: decoder_attribute_replacement = { "self_attn.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "self_attn.num_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, } if getattr(self.model.config, "num_key_value_heads", False): decoder_attribute_replacement["self_attn.num_key_value_heads"] = ( self.model.config.num_key_value_heads // self.shard_config.tensor_parallel_size ) else: decoder_attribute_replacement = None # used for Baichuan 7B and 13B for baichuan DecoderLayer for DecoderLayer in ["DecoderLayer", "BaichuanLayer"]: policy[DecoderLayer] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, sub_module_replacement=[ SubModuleReplacementDescription( suffix="mlp.gate_proj", target_module=Linear1D_Col, ), SubModuleReplacementDescription( suffix="mlp.up_proj", target_module=Linear1D_Col, ), SubModuleReplacementDescription( suffix="mlp.down_proj", target_module=Linear1D_Row, ), SubModuleReplacementDescription( suffix="mlp", target_module=NopadBaichuanMLP, ), SubModuleReplacementDescription( suffix="self_attn.W_pack", target_module=FusedLinear1D_Col, kwargs={"split_sizes": [self.model.config.hidden_size] * 3}, ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=Linear1D_Row, ), SubModuleReplacementDescription( suffix="self_attn", target_module=NopadBaichuanAttention, kwargs={ "model_shard_infer_config": self.shard_config.extra_kwargs["model_shard_infer_config"], }, ), ], ) self.append_or_create_method_replacement( description={"forward": llama_decoder_layer_forward}, policy=policy, target_key=DecoderLayer ) policy["BaichuanForCausalLM"] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=BaichuanLMHeadLinear1D_Col, kwargs={"gather_output": True} ) ], ) self.append_or_create_method_replacement( description={"forward": llama_causal_lm_forward}, policy=policy, target_key="BaichuanForCausalLM" ) self.append_or_create_method_replacement( description={"forward": llama_model_forward}, policy=policy, target_key="BaichuanModel" ) self.append_or_create_method_replacement( description={"forward": baichuan_rmsnorm_forward}, policy=policy, target_key="RMSNorm" ) return policy def postprocess(self): init_to_get_rotary(self.model.model) return self.model def to_rpc_param(self) -> str: return __class__.__name__ @staticmethod def from_rpc_param() -> "NoPaddingBaichuanModelInferPolicy": return NoPaddingBaichuanModelInferPolicy()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/modeling/policy/nopadding_llama.py
colossalai/inference/modeling/policy/nopadding_llama.py
from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaForCausalLM, LlamaModel, LlamaRMSNorm from colossalai.inference.config import RPC_PARAM from colossalai.inference.modeling.models.nopadding_llama import ( NopadLlamaAttention, NopadLlamaMLP, llama_causal_lm_forward, llama_decoder_layer_forward, llama_model_forward, llama_rmsnorm_forward, ) from colossalai.inference.utils import init_to_get_rotary from colossalai.shardformer.layer import Linear1D_Col, Linear1D_Row from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, SubModuleReplacementDescription from colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy class NoPaddingLlamaModelInferPolicy(LlamaForCausalLMPolicy, RPC_PARAM): def __init__(self) -> None: super().__init__() def module_policy(self): policy = super().module_policy() if self.shard_config.enable_tensor_parallelism: decoder_attribute_replacement = { "self_attn.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "self_attn.num_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, } if getattr(self.model.config, "num_key_value_heads", False): decoder_attribute_replacement["self_attn.num_key_value_heads"] = ( self.model.config.num_key_value_heads // self.shard_config.tensor_parallel_size ) else: decoder_attribute_replacement = None policy[LlamaDecoderLayer] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, sub_module_replacement=[ SubModuleReplacementDescription( suffix="mlp.gate_proj", target_module=Linear1D_Col, ), SubModuleReplacementDescription( suffix="mlp.up_proj", target_module=Linear1D_Col, ), SubModuleReplacementDescription( suffix="mlp.down_proj", target_module=Linear1D_Row, ), SubModuleReplacementDescription( suffix="mlp", target_module=NopadLlamaMLP, ), SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=Linear1D_Col, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=Linear1D_Col, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=Linear1D_Col, ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=Linear1D_Row, ), SubModuleReplacementDescription( suffix="self_attn", target_module=NopadLlamaAttention, kwargs={ "model_shard_infer_config": self.shard_config.extra_kwargs["model_shard_infer_config"], }, ), ], ) policy[LlamaForCausalLM] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=Linear1D_Col, kwargs={"gather_output": True} ) ], ) # self.shard_config._infer() self.append_or_create_method_replacement( description={"forward": llama_causal_lm_forward}, policy=policy, target_key=LlamaForCausalLM ) self.append_or_create_method_replacement( description={"forward": llama_model_forward}, policy=policy, target_key=LlamaModel ) self.append_or_create_method_replacement( description={"forward": llama_decoder_layer_forward}, policy=policy, target_key=LlamaDecoderLayer ) self.append_or_create_method_replacement( description={"forward": llama_rmsnorm_forward}, policy=policy, target_key=LlamaRMSNorm ) return policy def postprocess(self): init_to_get_rotary(self.model.model, self.model.config.rope_theta) return self.model def to_rpc_param(self) -> str: return __class__.__name__ @staticmethod def from_rpc_param() -> "NoPaddingLlamaModelInferPolicy": return NoPaddingLlamaModelInferPolicy()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/spec/struct.py
colossalai/inference/spec/struct.py
from dataclasses import dataclass from typing import Optional, Tuple import torch @dataclass class DrafterOutput: """ Dataclass for drafter model outputs. Args: speculated_length (int): Speculated length of the output sequence It is always less than or equal to spec_num during drafter's speculation process logits (torch.FloatTensor): Logits of the output sequence next_tokens (torch.Tensor): Next token ids past_key_values (Optional[Tuple[Tuple[torch.FloatTensor]]]): Past key values of the output sequence """ speculated_length: int = None logits: torch.FloatTensor = None next_tokens: torch.Tensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None def __post_init__(self): assert self.speculated_length is not None and self.speculated_length >= 0 if self.past_key_values is not None: assert isinstance(self.past_key_values, tuple), "Past key values should be a tuple" assert all([isinstance(past_key_value, tuple) for past_key_value in self.past_key_values]) @dataclass class GlideInput: """Dataclass for Glide Models (e.g. `colossalai/inference/modeling/models/glide_llama.py`). Used for pack data that will be used during glimpsing KV Caches of the main model. Args: block_tables (torch.Tensor): [num_seqs, max_blocks_per_seq] The block table of KV Caches. large_k_cache (torch.Tensor): [num_blocks, num_kv_heads, block_size, head_size] Blocked key cache of the main model large_v_cache (torch.Tensor): Blocked value cache of the main model. It has the same shape as k cache. sequence_lengths (torch.Tensor): [num_seqs] Sequence lengths of the current batch. """ block_tables: torch.Tensor = None large_k_cache: torch.Tensor = None large_v_cache: torch.Tensor = None sequence_lengths: torch.Tensor = None n_spec_tokens: int = 5 @property def glimpse_ready(self): return all( attr is not None for attr in [self.block_tables, self.large_k_cache, self.large_v_cache, self.sequence_lengths] )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/spec/drafter.py
colossalai/inference/spec/drafter.py
from typing import Optional, Tuple import torch import torch.nn as nn from transformers import PreTrainedTokenizer from transformers.cache_utils import DynamicCache from colossalai.utils import get_current_device from .struct import DrafterOutput, GlideInput class Drafter: """Container for the Drafter Model (Assistant Model) used in Speculative Decoding. Args: model (nn.Module): The drafter model. tokenizer (transformers.PreTrainedTokenizer): The tokenizer for the drafter model. device (torch.device): The device for the drafter model. """ def __init__( self, model: nn.Module, tokenizer: PreTrainedTokenizer, device: torch.device = None, dtype: torch.dtype = torch.float16, ): self._tokenizer = tokenizer self._device = device or get_current_device() self._dtype = dtype self._drafter_model = model.to(self._device) self._drafter_model = model.to(self._dtype) self._drafter_model.eval() def get_model(self) -> nn.Module: return self._drafter_model @staticmethod def trim_kv_cache( past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]], invalid_token_num: int ) -> Tuple[Tuple[torch.FloatTensor]]: """Trim the last `invalid_token_num` kv caches. past_key_values (Tuple[Tuple[torch.FloatTensor]]): The past key values with shape num_layers x 2 x (bsz x num_heads x seq_len x head_dim) invalid_token_num (int): The number of invalid tokens to trim. """ if past_key_values is None or invalid_token_num < 1: return past_key_values trimmed_past_key_values = [] for layer_idx in range(len(past_key_values)): past_key_value = past_key_values[layer_idx] trimmed_past_key_values.append( ( past_key_value[0][:, :, :-invalid_token_num, :], past_key_value[1][:, :, :-invalid_token_num, :], ) ) past_key_values = tuple(trimmed_past_key_values) return past_key_values @torch.inference_mode() def speculate( self, input_ids: torch.Tensor, n_spec_tokens: int, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, glide_input: Optional[GlideInput] = None, ) -> DrafterOutput: """Generate n_spec_tokens tokens using the drafter model. Args: input_ids (torch.Tensor): Input token ids. n_spec_tokens (int): Number of tokens to speculate. past_key_values (Tuple[Tuple[torch.FloatTensor]]): The past key values of the input sequence. glide_input (Optional[GlideInput]): The packed input for glimpsing kv caches of the main model, when using the glide model as a drafter. """ assert n_spec_tokens >= 1, f"Invalid number {n_spec_tokens} to speculate" # For compatibility with transformers of versions before 4.38.0 if input_ids.dim() == 1: input_ids = input_ids.unsqueeze(0) logits = [] token_ids = [] kwargs = {"return_dict": True, "use_cache": True} if glide_input: # required only when using glide model kwargs["glide_input"] = glide_input for _ in range(n_spec_tokens): # update past key values outputs = self._drafter_model(input_ids, past_key_values=past_key_values, **kwargs) next_token_logits = outputs.logits[:, -1, :] # NOTE Only use greedy search for speculating. # As the drafter model usually has only a few layers with few parameters, # introducing sampling will make the speculation unstable and lead to worse performance. next_token_ids = torch.argmax(next_token_logits, dim=-1) logits.append(next_token_logits) token_ids.append(next_token_ids) if next_token_ids.item() == self._tokenizer.eos_token_id: # TODO(yuanheng-zhao) support bsz > 1 break input_ids = next_token_ids[:, None] past_key_values = outputs.past_key_values speculated_length = len(token_ids) # For now, only support bsz 1 logits = torch.concat(logits, dim=0) token_ids = torch.concat(token_ids, dim=-1) if isinstance(past_key_values, DynamicCache): past_key_values = past_key_values.to_legacy_cache() out = DrafterOutput( speculated_length=speculated_length, logits=logits, next_tokens=token_ids, past_key_values=past_key_values ) return out
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/spec/__init__.py
colossalai/inference/spec/__init__.py
from .drafter import Drafter from .struct import DrafterOutput, GlideInput __all__ = ["Drafter", "DrafterOutput", "GlideInput"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/executor/__init__.py
colossalai/inference/executor/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/executor/rpc_worker.py
colossalai/inference/executor/rpc_worker.py
from typing import List, Tuple, Union import rpyc import torch import torch.distributed as dist from torch import nn from transformers import AutoConfig, AutoModelForCausalLM from transformers.models.llama.modeling_llama import LlamaForCausalLM import colossalai from colossalai.accelerator import get_accelerator from colossalai.cluster import ProcessGroupMesh from colossalai.inference.config import InferenceConfig, InputMetaData from colossalai.inference.flash_decoding_utils import FDIntermTensors from colossalai.inference.modeling.policy import ( NoPaddingBaichuanModelInferPolicy, NoPaddingLlamaModelInferPolicy, model_policy_map, ) from colossalai.inference.sampler import search_tokens from colossalai.inference.utils import get_model_size, has_index_file from colossalai.interface import ModelWrapper from colossalai.lazy import LazyInitContext from colossalai.logging import get_dist_logger from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer import ShardConfig, ShardFormer from colossalai.shardformer.policies.base_policy import Policy PP_AXIS, TP_AXIS = 0, 1 _SUPPORTED_MODELS = { "LlamaForCausalLM": LlamaForCausalLM, "BaichuanForCausalLM": AutoModelForCausalLM, } _SUPPORTED_MODEL_POLICIES = { "NoPaddingLlamaModelInferPolicy": NoPaddingLlamaModelInferPolicy, "NoPaddingBaichuanModelInferPolicy": NoPaddingBaichuanModelInferPolicy, } logger = get_dist_logger(__name__) class rpcWorkerService(rpyc.Service): """ Execute the computation tasks and manage its own kv cache Func with prefix `exposed_` will be invoked by client. """ def exposed_init_dist_env(self, rank, world_size, master_address, master_port): logger.info(f"init process group for rank {rank}") colossalai.launch(rank=rank, world_size=world_size, port=master_port, host=master_address) logger.info(f"init process group done for rank {rank}") def exposed_init_model( self, inference_config_param: dict, model_or_path: Union[nn.Module, str], model_policy_param: str = None ): assert dist.is_initialized(), "invoke init_dist_env first please!" self.inference_config = InferenceConfig.from_rpc_param(inference_config_param) model_policy = _SUPPORTED_MODEL_POLICIES[model_policy_param]() if model_policy_param else None self.dtype = self.inference_config.dtype self.verbose = True self._init_model(model_or_path, model_policy) self._init_fd_tensor() self._init_output_tensor() logger.info(f"init model done for rank {dist.get_rank()}") def exposed_init_cache(self, alloc_shape: Tuple[Tuple[int, ...], Tuple[int, ...]]): """Initialize the physical cache on the device. For each layer of the model, we allocate two tensors for key and value respectively, with shape of [num_blocks, num_kv_heads, block_size, head_size] """ kalloc_shape, valloc_shape = alloc_shape num_layers = self.model_config.num_hidden_layers self.k_cache: List[torch.Tensor] = [] self.v_cache: List[torch.Tensor] = [] for _ in range(num_layers): self.k_cache.append( torch.zeros( kalloc_shape, dtype=self.inference_config.kv_cache_dtype, device=get_accelerator().get_current_device(), ) ) self.v_cache.append( torch.zeros( valloc_shape, dtype=self.inference_config.kv_cache_dtype, device=get_accelerator().get_current_device(), ) ) logger.info("physical cache init over") def exposed_execute_model_forward( self, input_token_ids_param: List[int], input_meta_data_param: dict, generation_config_param: dict ): # prepare the data for model forward input_meta_data = InputMetaData.from_rpc_param(input_meta_data_param) input_meta_data.fd_inter_tensor = self.fd_inter_tensor if input_meta_data.is_prompts: n_tokens = input_meta_data.sequence_lengths.sum().item() else: n_tokens = input_meta_data.batch_size input_token_ids = torch.tensor(input_token_ids_param, dtype=torch.int, device=self.device) # execute the model logits = self.model( input_token_ids, self.output_tensor[:n_tokens], input_meta_data, self.k_cache, self.v_cache, ) # sampler if self.inference_config.pad_input: logits = logits[:, -1, :] next_tokens = search_tokens( generation_config_param, logits, input_meta_data.is_prompts, input_meta_data.batch_token_ids, ) # return the tokens generated to scheduler return next_tokens.tolist() def _init_output_tensor(self): alloc_shape = ( self.inference_config.max_batch_size * (self.inference_config.max_input_len + self.inference_config.max_output_len), self.model_config.hidden_size // self.inference_config.tp_size, ) self.output_tensor = torch.zeros(alloc_shape, dtype=self.dtype, device=self.device) def _init_fd_tensor(self): fd_inter_tensor = FDIntermTensors() if fd_inter_tensor._tensors_initialized: fd_inter_tensor._reset() # For Spec-Dec, process the speculated tokens plus the token in the last step for each seq max_n_tokens = self.inference_config.max_batch_size max_n_tokens *= self.inference_config.max_n_spec_tokens + 1 inference_config = self.inference_config kv_max_split_num = ( inference_config.max_input_len + inference_config.max_output_len + inference_config.block_size - 1 ) // inference_config.block_size head_dim = self.model_config.hidden_size // self.model_config.num_attention_heads fd_inter_tensor.initialize( max_batch_size=max_n_tokens, num_attn_heads=self.model_config.num_attention_heads // self.inference_config.tp_size, kv_max_split_num=kv_max_split_num, head_dim=head_dim, dtype=self.dtype, device=get_accelerator().get_current_device(), ) self.fd_inter_tensor = fd_inter_tensor def _init_model(self, model_or_path: Union[nn.Module, str], model_policy: Policy = None): """ Shard model or/and Load weight Shard model: When we set tp_size > 1, we will shard the model by given model_policy. Load Weight: If we pass a local model path, we will load the model weight by checkpoint_io. If it is a remote-transformer url, we will use `AutoModel.from_pretrained` api of transformers lib Args: model_or_path Union[nn.Module, str]: path to the checkpoint or model of transformer format. model_policy (Policy): the policy to replace the model """ pretrained_path = None if isinstance(model_or_path, str): import colossalai.interface.pretrained as pretrained_utils try: hf_config = AutoConfig.from_pretrained(model_or_path, trust_remote_code=True, torch_dtype=self.dtype) arch = getattr(hf_config, "architectures")[0] if arch is "BaichuanForCausalLM": self.logger.warning( "Attention ! We use lazy init by default, which could be faster for model loading. For baichuan model, the output maybe have a slight difference with transformers" ) ctx = LazyInitContext(default_device="cuda") with ctx: model = _SUPPORTED_MODELS[arch].from_pretrained( model_or_path, trust_remote_code=True, torch_dtype=self.dtype ) pretrained_path = pretrained_utils.get_pretrained_path(model) except Exception as e: logger.error( f"An exception occurred during loading model: {e}, model should be loaded by transformers\n" ) else: model = model_or_path self.model_config = model.config torch.cuda.empty_cache() init_gpu_memory = torch.cuda.mem_get_info()[0] self.device = get_accelerator().get_current_device() torch.cuda.set_device(self.device) if self.verbose: logger.info(f"the device is {self.device}") model = model.to(dtype=self.dtype, non_blocking=False).eval() if self.verbose: logger.info( f"Before the shard, Rank: [{dist.get_rank()}], model size: {get_model_size(model)} GB, model's device is: {model.device}" ) if model_policy is None: if self.inference_config.pad_input: model_type = "padding_" + self.model_config.model_type else: model_type = "nopadding_" + self.model_config.model_type model_policy = model_policy_map[model_type]() pg_mesh = ProcessGroupMesh(self.inference_config.pp_size, self.inference_config.tp_size) tp_group = pg_mesh.get_group_along_axis(TP_AXIS) self.model = self._shardformer( model, model_policy, None, tp_group=tp_group, ) self.model = ModelWrapper(model).to(device=get_accelerator().get_current_device()) if self.verbose: logger.info( f"After the shard, Rank: [{dist.get_rank()}], model size: {get_model_size(self.model)} GB, model's device is: {model.device}" ) if pretrained_path: from colossalai.inference.core.plugin import InferCheckpoint_io cpt_io = InferCheckpoint_io() if_has_index_file, model_index_file = has_index_file(pretrained_path) assert if_has_index_file, "the model path is invalid" cpt_io.load_model(self.model, model_index_file) free_gpu_memory, total_gpu_memory = torch.cuda.mem_get_info() peak_memory = init_gpu_memory - free_gpu_memory if self.verbose: logger.info( f"Rank [{dist.get_rank()}], Model Weight Max Occupy {peak_memory / (1024 ** 3)} GB, Model size: {get_model_size(self.model)} GB" ) def _shardformer( self, model: nn.Module, model_policy: Policy, stage_manager: PipelineStageManager = None, tp_group: ProcessGroupMesh = None, ) -> nn.Module: """ Initialize ShardConfig and replace the model with shardformer. Args: model (nn.Module): Path or nn.Module of this model. model_policy (Policy): The policy to shardformer model which is determined by the model type. stage_manager (PipelineStageManager, optional): Used to manage pipeline stages. Defaults to None. tp_group (ProcessGroupMesh, optional): Used to manage the process TP group mesh. Defaults to None. Returns: nn.Module: The model optimized by Shardformer. """ shardconfig = ShardConfig( tensor_parallel_process_group=tp_group, pipeline_stage_manager=stage_manager, enable_tensor_parallelism=(self.inference_config.tp_size > 1), enable_fused_normalization=False, enable_all_optimization=False, enable_flash_attention=False, enable_jit_fused=False, enable_sequence_parallelism=False, ) shardformer = ShardFormer(shard_config=shardconfig) shard_model, _ = shardformer.optimize(model, model_policy) return shard_model def exposed_compute_only_for_test(self): dist_rank = dist.get_rank() # Dummy data for each worker data = torch.tensor([dist_rank], dtype=torch.float).cuda(dist_rank) dist.barrier() # Perform distributed all_reduce dist.all_reduce(data, op=dist.ReduceOp.SUM) dist.barrier() logger.info(f"Worker rank {dist_rank}: Sum after all_reduce: {data.item()}") return data.item()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/core/async_engine.py
colossalai/inference/core/async_engine.py
import asyncio import logging from functools import partial from typing import AsyncIterator, Dict, Iterable, List, Optional, Set, Tuple, Type from colossalai.inference.core.engine import InferenceEngine from colossalai.inference.sampler import search_tokens # CLI logger logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s") logger = logging.getLogger("colossalai-inference") def _raise_exception_on_finish(task: asyncio.Task, request_tracker: "Tracer") -> None: msg = "Task finished unexpectedly. This should never happen! " try: try: task.result() except asyncio.CancelledError: return except Exception as exc: raise RuntimeError(msg + " See stack trace above for the actual cause.") from exc raise RuntimeError(msg) except Exception as exc: request_tracker.propagate_exception(exc) raise exc class RequstStream: """ A stream of Output for a request that can be iterated over asynchronously. Attributes: 1.request_id: The id of the request. 2._future: A future that will be set when the request is finished. Methods: set_result and get_result, results will be set when finished, for once, and the `self.future` will be set to done. """ def __init__(self, request_id: int) -> None: self.request_id = request_id self._future = asyncio.Future() def set_result(self, result) -> None: """Set final result and signal taht it's ready""" if not self._future.done(): self._future.set_result(result) async def get_result(self): """Wait for the result to be set and return it.""" return await self._future @property def finished(self) -> bool: """Check if the stream has finished by checking if the future is done.""" return self._future.done() class Tracer: """ Recording new requests and finished requests. Attributes: 1._request_streams: We create one stream for each request to trace the output. 2._finished_requests: A queue to store the finished requests. 3._new_requests: New requests will be stored in this queue first, before sending them to the engine. 4.new_requests_event: An event to notify the engine that there are new requests. """ def __init__(self) -> None: self._request_streams: Dict[int, RequstStream] = {} self._finished_requests: asyncio.Queue[int] = asyncio.Queue() self._new_requests: asyncio.Queue[Tuple[RequstStream, dict]] = asyncio.Queue() self.new_requests_event = None def __contains__(self, item): return item in self._request_streams def init_event(self): self.new_requests_event = asyncio.Event() def propagate_exception(self, exc: Exception, request_id: Optional[int] = None) -> None: """ Propagate an exception to request streams (all if request_id is None). """ if request_id is not None: self._request_streams[request_id].set_result(exc) else: for stream in self._request_streams.values(): stream.set_result(exc) def process_finished_request(self, finished_request) -> None: """Process a finished request from the engine.""" request_id = finished_request.request_id try: self._request_streams[request_id].set_result(finished_request) except: raise RuntimeError(f"The request_id {request_id} is not found in our stream, please check") self.abort_request(request_id) def add_request(self, request_id: int, **engine_add_request_kwargs) -> RequstStream: """ Add a request to be sent to the engine on the next background loop iteration. """ if request_id in self._request_streams: raise KeyError(f"Request {request_id} already exists.") stream = RequstStream(request_id) logger.info(f"Added request {request_id}.") self._new_requests.put_nowait((stream, {"request_id": request_id, **engine_add_request_kwargs})) self.new_requests_event.set() return stream def abort_request(self, request_id: int, *, verbose: bool = False) -> None: """Abort a request during next background loop iteration.""" if verbose: logger.info(f"Aborted request {request_id}.") self._finished_requests.put_nowait(request_id) if request_id not in self._request_streams or self._request_streams[request_id].finished: # The request has already finished or been aborted. # The requests in new_requests will be aborted when try to get them(if marked aborted) return self._request_streams[request_id].set_result(None) def get_new_requests(self): """ Get new requests from http server. """ new_requests: List[Dict] = [] finished_requests: Set[int] = set() while not self._finished_requests.empty(): request_id = self._finished_requests.get_nowait() finished_requests.add(request_id) while not self._new_requests.empty(): stream, new_request = self._new_requests.get_nowait() if new_request["request_id"] in finished_requests: # The request has been aborted. stream.set_result(None) continue self._request_streams[stream.request_id] = stream new_requests.append(new_request) self.new_requests_event.clear() return new_requests async def wait_for_new_requests(self): await self.new_requests_event.wait() class _AsyncInferenceEngine(InferenceEngine): """ Async methods for Inference Engine. This engine is an extension for InferenceEngine, and the additional methods will only be used for Methods: 1. async_step: The async version of Engine.step() """ async def async_step(self) -> List[str]: """ The async version of Engine.step() Performs one decoding iteration and returns newly generated results. It first schedules the sequences to be executed in the next iteration. Then, it executes the model and updates the scheduler with the model outputs. Finally, it decodes the sequences and returns the newly generated results. """ batch = self.request_handler.schedule() input_token_ids, output_tensor, input_meta_data = self.prepare_input(batch) loop = asyncio.get_running_loop() if input_meta_data.use_cuda_graph: model_executable = self.graph_runners[input_meta_data.batch_size] else: model_executable = self.model # Use run_in_executor to asyncally run the sync method model.forward(). logits = await loop.run_in_executor( None, model_executable, input_token_ids, output_tensor, input_meta_data, self.k_cache, self.v_cache, ) if self.inference_config.pad_input: logits = logits[:, -1, :] next_tokens = search_tokens( self.generation_config, logits, input_meta_data.is_prompts, batch_token_ids=input_meta_data.batch_token_ids ) self.request_handler.append_next_tokens(next_tokens) finished_sequences = self.request_handler.update() for sequence in finished_sequences: sequence.output = self.tokenizer.decode(sequence.output_token_id) return finished_sequences, not self.request_handler.running_list.is_empty() def add_single_request(self, request_id: int, prompt: str, prompt_token_ids, generation_config=None): prompts = [prompt] gen_config_dict = generation_config.to_dict() if generation_config is not None else {} self.add_request(request_ids=request_id, prompts=prompts, prompts_token_ids=prompt_token_ids, **gen_config_dict) class AsyncInferenceEngine: """An asynchronous wrapper for the InferenceEngine class. This class is used to wrap the InferenceEngine class to make it asynchronous. It uses asyncio to create a background loop that keeps processing incoming requests. Note that this class does not hold model directly, when incoming a new request, it first called `add_request` and the Tracer will record the request, putting it to the background `InferenceEngine`(done in background loop) to process. You can consider this engine as an interface. """ _engine_class: Type[_AsyncInferenceEngine] = _AsyncInferenceEngine def __init__(self, start_engine_loop: bool = True, **kwargs): self.engine = self._init_engine(**kwargs) self.background_loop = None # reference to the unshielded loop self._background_loop_unshielded = None self.start_engine_loop = start_engine_loop self._request_tracer = Tracer() @property def background_loop_status(self): return self.background_loop is not None and not self.background_loop.done() def start_background_loop(self): if self.background_loop_status: raise RuntimeError("Existing loop is running") self._request_tracer.init_event() self._background_loop_unshielded = asyncio.get_event_loop().create_task(self.run_engine_loop()) self._background_loop_unshielded.add_done_callback( partial(_raise_exception_on_finish, request_tracker=self._request_tracer) ) self.background_loop = asyncio.shield(self._background_loop_unshielded) def _init_engine(self, **kwargs): return self._engine_class(**kwargs) async def step(self): """ Run engine to process requests Returns True if there are in-progress requests. """ new_requests = self._request_tracer.get_new_requests() for new_request in new_requests: self.engine.add_single_request(**new_request) newly_finished_seqs, has_running_requests = await self.engine.async_step() for seq in newly_finished_seqs: self._request_tracer.process_finished_request(seq) return has_running_requests async def _engine_abort(self, request_ids: Iterable[int]): self.engine.abort_request(request_ids) async def abort(self, request_id: int): """ Abort a single request """ if not self.background_loop_status: raise RuntimeError("Background loop is not running or launched correctly.") return self._abort(request_id) def _abort(self, request_id: int): self._request_tracer.abort_request(request_id) async def run_engine_loop(self): processing_requests = False while True: if not processing_requests: await self._request_tracer.wait_for_new_requests() processing_requests = await self.step() await asyncio.sleep(0) async def add_request( self, request_id: int, prompt: Optional[str], prompt_token_ids: Optional[List[int]] = None, generation_config=None, ) -> RequstStream: """ Add a request to the background tracker(waiting queue), start the background loop if needed. """ if not self.background_loop_status: if self.start_engine_loop: self.start_background_loop() else: raise RuntimeError("Background loop is not running.") stream = self._request_tracer.add_request( request_id, prompt=prompt, prompt_token_ids=prompt_token_ids, generation_config=generation_config, ) return stream async def generate( self, request_id: int, prompt: Optional[str], prompt_token_ids: Optional[List[int]] = None, generation_config=None, ) -> AsyncIterator[str]: """ Generate output from a request. It receives the request from http server, adds it into the waitting queue of Async Engine and streams the output sequence. """ try: stream = await self.add_request( request_id, prompt, prompt_token_ids=prompt_token_ids, generation_config=generation_config ) return await stream.get_result() except (Exception, asyncio.CancelledError) as e: # If there is an exception or coroutine is cancelled, abort the request. self._abort(request_id) raise e
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/core/llm_engine.py
colossalai/inference/core/llm_engine.py
import time from itertools import count from typing import Dict, List, Optional, Tuple, Type, Union import numpy as np import torch import torch.nn as nn from torch import distributed as dist from transformers import ( AutoConfig, AutoModelForCausalLM, GenerationConfig, PreTrainedTokenizer, PreTrainedTokenizerFast, ) from transformers.models.llama.modeling_llama import LlamaForCausalLM from colossalai.accelerator import get_accelerator from colossalai.cluster import ProcessGroupMesh from colossalai.inference.batch_bucket import BatchBucket from colossalai.inference.config import InferenceConfig, InputMetaData, ModelShardInferenceConfig from colossalai.inference.graph_runner import CUDAGraphRunner from colossalai.inference.modeling.policy import model_policy_map from colossalai.inference.sampler import search_tokens from colossalai.inference.spec import Drafter, GlideInput from colossalai.inference.struct import Sequence from colossalai.inference.utils import get_model_size, has_index_file from colossalai.interface import ModelWrapper from colossalai.lazy import LazyInitContext from colossalai.logging import get_dist_logger from colossalai.shardformer.policies.base_policy import Policy from .base_engine import BaseEngine from .request_handler import RequestHandler PP_AXIS, TP_AXIS = 0, 1 _supported_models = { "LlamaForCausalLM": LlamaForCausalLM, "BaichuanForCausalLM": AutoModelForCausalLM, } _BATCH_SIZES_TO_CAPTURE = [1, 2, 4] + [8 * i for i in range(1, 33)] class LLMEngine(BaseEngine): """ InferenceEngine which manages the inference process.. Args: model_or_path (nn.Module or str): Path or nn.Module of this model. tokenizer Optional[(Union[PreTrainedTokenizer, PreTrainedTokenizerFast])]: Path of the tokenizer to use. inference_config (Optional[InferenceConfig], optional): Store the configuration information related to inference. verbose (bool): Determine whether or not to log the generation process. model_policy ("Policy"): the policy to shardformer model. It will be determined by the model type if not provided. """ def __init__( self, model_or_path: Union[nn.Module, str], tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast] = None, inference_config: InferenceConfig = None, verbose: bool = False, model_policy: Union[Policy, type[Policy]] = None, ) -> None: self.inference_config = inference_config self.dtype = inference_config.dtype self.high_precision = inference_config.high_precision self.verbose = verbose self.logger = get_dist_logger(__name__) self.model_shard_infer_config = inference_config.to_model_shard_inference_config() self.init_model(model_or_path, model_policy, self.model_shard_infer_config) self.generation_config = inference_config.to_generation_config(self.model_config) self.generation_config_dict = self.generation_config.to_dict() self.tokenizer = tokenizer self.tokenizer.pad_token = self.tokenizer.eos_token self.request_handler = RequestHandler(self.inference_config, self.model_config) self.k_cache, self.v_cache = self.request_handler.get_kvcache() # DISCUSS maybe move this into batch info? self.counter = count() self.use_cuda_graph = self.inference_config.use_cuda_graph if self.use_cuda_graph: self.graph_runners: Dict[int, CUDAGraphRunner] = {} self.graph_memory_pool = None # Set during graph capture. if verbose: self.logger.info("Colossal AI CUDA Graph Capture on") self.capture_model(self.k_cache, self.v_cache) # Model and relatable attrs of speculative decoding will be set by `enable_spec_dec` self.use_spec_dec = self.inference_config.use_spec_dec self.drafter_model = None self.drafter = None self.use_glide = False self.n_spec_tokens = self.inference_config.max_n_spec_tokens self._verify_args() def init_model( self, model_or_path: Union[nn.Module, str], model_policy: Union[Policy, Type[Policy]] = None, model_shard_infer_config: ModelShardInferenceConfig = None, ): """ Shard model or/and Load weight Args: model_or_path Union[nn.Module, str]: path to the checkpoint or model of transformer format. model_policy (Policy): the policy to replace the model. model_inference_config: the configuration for modeling initialization when inference. model_shard_infer_config (ModelShardInferenceConfig): the configuration for init of module when inference. """ pretrained_path = None if isinstance(model_or_path, str): import colossalai.interface.pretrained as pretrained_utils try: hf_config = AutoConfig.from_pretrained(model_or_path, trust_remote_code=True, torch_dtype=self.dtype) arch = getattr(hf_config, "architectures")[0] if arch in _supported_models.keys(): if arch == "BaichuanForCausalLM": self.logger.warning( "Attention ! We use lazy init by default, which could be faster for model loading. For baichuan model, the output maybe have a slight difference with transformers" ) ctx = LazyInitContext(default_device="cuda") with ctx: model = _supported_models[arch].from_pretrained( model_or_path, trust_remote_code=True, torch_dtype=self.dtype ) pretrained_path = pretrained_utils.get_pretrained_path(model) else: # TODO(char-1ee): if the model not supported, use transformers APIs to load and generate raise ValueError(f"Model {arch} is not supported.") except Exception as e: self.logger.error( f"An exception occurred during loading model: {e}, model should be loaded by transformers\n" ) else: model = model_or_path self.model_config = model.config torch.cuda.empty_cache() init_gpu_memory = torch.cuda.mem_get_info()[0] self.device = get_accelerator().get_current_device() if self.verbose: self.logger.info(f"the device is {self.device}") model = model.to(self.dtype).eval() if self.verbose: self.logger.info( f"Before the shard, Rank: [{dist.get_rank()}], model size: {get_model_size(model)} GB, model's device is: {model.device}" ) if model_policy is None: prefix = "nopadding" if not self.inference_config.pad_input else "padding" model_policy_key = f"{prefix}_{getattr(self.model_config, 'model_type', None)}" model_policy = model_policy_map.get(model_policy_key) if not isinstance(model_policy, Policy): try: model_policy = model_policy() except Exception as e: raise ValueError(f"Unable to instantiate model policy: {e}") assert isinstance(model_policy, Policy), f"Invalid type of model policy: {type(model_policy)}" pg_mesh = ProcessGroupMesh(self.inference_config.pp_size, self.inference_config.tp_size) tp_group = pg_mesh.get_group_along_axis(TP_AXIS) self.model = self._shardformer( model, model_policy, model_shard_infer_config, None, tp_group=tp_group, ) self.model = ModelWrapper(model).to(self.device) if self.verbose: self.logger.info( f"After the shard, Rank: [{dist.get_rank()}], model size: {get_model_size(self.model)} GB, model's device is: {model.device}" ) if pretrained_path: from colossalai.inference.core.plugin import InferCheckpoint_io cpt_io = InferCheckpoint_io() if_has_index_file, model_index_file = has_index_file(pretrained_path) assert if_has_index_file, "the model path is invalid" cpt_io.load_model(self.model, model_index_file) free_gpu_memory, _ = torch.cuda.mem_get_info() peak_memory = init_gpu_memory - free_gpu_memory if self.verbose: self.logger.info( f"Rank [{dist.get_rank()}], Model Weight Max Occupy {peak_memory / (1024 ** 3)} GB, Model size: {get_model_size(self.model)} GB" ) @torch.inference_mode() def capture_model(self, k_cache: List[torch.Tensor], v_cache: List[torch.Tensor]): assert self.use_cuda_graph, "please turn on the cuda graph" if self.verbose: self.logger.info("Colossal AI CUDA Graph Capture begin") t_capture_begin = time.perf_counter() block_size = self.inference_config.block_size head_dim = self.model_config.hidden_size // self.model_config.num_attention_heads # Prepare dummy inputs. These will be reused for all batch sizes. max_batch_size = max(_BATCH_SIZES_TO_CAPTURE) max_context_len_to_capture = self.inference_config.max_context_len_to_capture max_num_blocks = (max_context_len_to_capture + block_size - 1) // block_size input_tokens_ids = torch.zeros(max_batch_size, dtype=torch.long).cuda() # self.graph_block_tables = np.zeros((max(_BATCH_SIZES_TO_CAPTURE), max_num_blocks), dtype=np.int32) self.graph_block_tables = np.full((max(_BATCH_SIZES_TO_CAPTURE), max_num_blocks), -1, dtype=np.int32) self.graph_block_tables[:, 0] = np.arange(max_num_blocks, max_num_blocks + max(_BATCH_SIZES_TO_CAPTURE)) self.graph_block_tables[0, :] = np.arange( 0, max_num_blocks ) # NOTE this is a hack to insure cuda grpah could capture the fixed cuda kernel grid in flash decoding, to make the first seqlen as the max_seq_len block_tables = torch.from_numpy(self.graph_block_tables).cuda() output_tensor = torch.zeros( (max_batch_size, self.model_config.num_attention_heads * head_dim), dtype=self.dtype, device=self.device ) fd_inter_tensor = self.request_handler.running_bb.fd_inter_tensor max_num_seqs = self.inference_config.max_batch_size batch_size_capture_list = [bs for bs in _BATCH_SIZES_TO_CAPTURE if bs <= max_num_seqs] sequence_lengths = torch.ones(max_batch_size, dtype=torch.int).cuda() # NOTE this is a hack to insure cuda grpah could capture the fixed cuda kernel grid in flash decoding, to make the first seqlen as the max_seq_len sequence_lengths[0] = torch.tensor( self.inference_config.max_context_len_to_capture - 1, dtype=torch.int32 ).cuda() # NOTE: Capturing the largest batch size first may help reduce the # memory usage of CUDA graph. for batch_size in reversed(batch_size_capture_list): if self.verbose: self.logger.info(f"batch size {batch_size} graph capturing") input_meta_data = InputMetaData( block_tables=block_tables[:batch_size], sequence_lengths=sequence_lengths[:batch_size], fd_inter_tensor=fd_inter_tensor, batch_size=batch_size, is_prompts=False, use_cuda_graph=True, high_precision=False, kv_seq_len=sequence_lengths[:batch_size].max().item(), head_dim=head_dim, dtype=self.dtype, ) graph_runner = CUDAGraphRunner(self.model) graph_runner.capture( input_tokens_ids[:batch_size], output_tensor[:batch_size], input_meta_data, k_caches=k_cache, v_caches=v_cache, memory_pool=self.graph_memory_pool, ) self.graph_memory_pool = graph_runner.graph.pool() self.graph_runners[batch_size] = graph_runner t_capture_end = time.perf_counter() if self.verbose: self.logger.info(f"CUDA Graph capture time: {t_capture_end - t_capture_begin} s") def _verify_args(self) -> None: """Verify the input args""" if not isinstance(self.inference_config, InferenceConfig): raise TypeError("Invalid type of inference config provided.") if not isinstance(self.model, nn.Module): raise TypeError(f"the model type must be nn.Module, but got {type(self.model)}") if not isinstance(self.tokenizer, (PreTrainedTokenizerFast, PreTrainedTokenizer)): raise TypeError( f"the tokenizer type must be PreTrainedTokenizer or PreTrainedTokenizerFast, but got {type(self.tokenizer)}" ) if isinstance(self.model, ModelWrapper): model = self.model.module assert ( model.__class__.__name__ in _supported_models.keys() ), f"Model {self.model.__class__.__name__} is not supported." def enable_spec_dec( self, drafter_model: nn.Module = None, n_spec_tokens: int = None, use_glide_drafter: bool = False, ) -> None: """Initialize drafter (if it has not yet), and enable Speculative Decoding for subsequent generations. Args: drafter_model (nn.Module): The drafter model (small model) used to speculate tokens. If provided, the previous drafter and drafter model, if exist, will be overwritten. n_spec_tokens (Optional[int]): The number of tokens to speculate in each round of speculating-verifying. If not provided, `max_n_spec_tokens` in InferenceConfig will be used. use_glide_drafter (bool): Whether to use glide model for speculative decoding. Defaults to False. If True, the drafter model will be replaced by a glide model. ```python ... engine = InferenceEngine(model, tokenizer, inference_config) engine.enable_spec_dec(drafter_model, n_spec_tokens=5) engine.generate(...) # Speculative Decoding engine.disable_spec_dec() engine.generate(...) # Normal generation engine.enable_spec_dec() engine.generate(...) # Speculative-Decoding using previously set drafter model and number of spec tokens engine.clear_spec_dec() ``` """ if drafter_model is None and self.drafter is None: raise ValueError("Drafter not initialized. Please provide a Drafter Model") if n_spec_tokens is not None: assert 1 < n_spec_tokens <= self.inference_config.max_n_spec_tokens self.n_spec_tokens = n_spec_tokens if drafter_model is not None: assert isinstance(drafter_model, nn.Module) # overwrite the drafter, if exists self.clear_spec_dec() self.drafter_model = drafter_model self.drafter = Drafter( self.drafter_model, self.tokenizer, device=self.device, dtype=self.dtype, ) # check if the provided drafter model is compatible with GLIDE structure # when `use_glide_drafter` is set to True if ( use_glide_drafter and hasattr(drafter_model, "model") and hasattr(drafter_model.model, "layers") and hasattr(drafter_model.model.layers[0], "cross_attn") ): self.use_glide = use_glide_drafter elif use_glide_drafter: self.logger.warning( f"`use_glide_drafter` is provided as {use_glide_drafter}, " f"but the provided drafter model is not compatible with GLIDE structure." f"Falling back to use the default drafter model (non-GLIDE)." ) self.request_handler.set_spec_dec_mode(self.n_spec_tokens) # using speculative decoding for subsequent generations self.use_spec_dec = True def disable_spec_dec(self) -> None: """Disable using speculative decoding for subsequent generations.""" self.request_handler.unset_spec_dec_mode() # set back to the maximum number of tokens to speculate self.n_spec_tokens = self.inference_config.max_n_spec_tokens self.use_glide = False self.use_spec_dec = False def clear_spec_dec(self) -> None: """Clear relatable structures of speculative decoding, if exist.""" if self.use_spec_dec: self.disable_spec_dec() if self.drafter_model or self.drafter: self.drafter_model = None self.drafter = None torch.cuda.empty_cache() self.use_glide = False self.use_spec_dec = False def steps_spec_dec(self) -> List[Sequence]: """ Run Speculative Decoding steps. This is like retrieving a single batch and launch inference with many steps of speculating by a drafter model as well as verifying by a main model. Returns: List[Sequence]: finished sequences generated by one step. """ batch = self.request_handler.schedule() # prefill batch assert batch.current_batch_size == 1, "Only support bsz 1 for speculative decoding for now." input_token_ids, output_tensor, input_meta_data = self.prepare_input(batch) if input_meta_data.use_cuda_graph: model_executable = self.graph_runners[input_meta_data.batch_size] else: model_executable = self.model # 1. Prefill small model (Drafter) - fill past kv cache for drafter model # NOTE For glide drafter models, we won't actually apply glide during prefill stage drafter_out = self.drafter.speculate(input_token_ids, 1, None) next_token_ids_spec = drafter_out.next_tokens drafter_past_key_values = drafter_out.past_key_values # 2. Prefill main model (Verifier) - fill past kv cache for main model logits = model_executable(input_token_ids, output_tensor, input_meta_data, self.k_cache, self.v_cache) next_tokens = search_tokens(self.generation_config, logits, batch_token_ids=batch.batch_token_ids) # append new inputs to the batch, temporarily batch.append_batch_tokens(next_tokens) self.request_handler.allocate_batch_spec_dec(batch, 1) already_allocated_kv_len = batch.seq_lengths[0].item() input_token_ids = batch.get_1D_inputs_spec_dec(1) finished_sequences = self.request_handler.update() while True: # HACK Retrieve the running batch # Using RequestHandler.schedule here will re-allocate same kv cache for the batch batch = self.request_handler.running_bb # running batch assert batch.current_batch_size == 1, "Only support bsz 1 for speculative decoding for now." # 3. Decoding - Drafter model speculates `n` tokens glide_input = None if self.use_glide: glide_input = GlideInput( batch.get_block_table_tensor(), self.k_cache[-1], # use kv cahces of the last layer self.v_cache[-1], batch.get_sequence_lengths(), n_spec_tokens=self.n_spec_tokens, ) drafter_out = self.drafter.speculate( input_token_ids, self.n_spec_tokens, drafter_past_key_values, glide_input=glide_input, ) next_token_ids_spec = drafter_out.next_tokens drafter_past_key_values = drafter_out.past_key_values drafter_spec_length = drafter_out.speculated_length for next_token_id_spec in next_token_ids_spec: self.request_handler.append_next_tokens(next_token_id_spec.unsqueeze(0)) cur_length = batch.seq_lengths[0].item() if already_allocated_kv_len < cur_length: self.request_handler.allocate_batch_spec_dec(batch, n=cur_length - already_allocated_kv_len) already_allocated_kv_len = cur_length # 4. Decoding - Main model verifies `n` tokens in parallel if drafter_spec_length < batch.num_tokens_to_verify: batch.set_use_spec_dec(num_tokens_to_verify=drafter_spec_length) input_token_ids, output_tensor, input_meta_data = self.prepare_input(batch) logits = model_executable(input_token_ids, output_tensor, input_meta_data, self.k_cache, self.v_cache) next_tokens = search_tokens(self.generation_config, logits, batch_token_ids=batch.batch_token_ids) # 5. Compare and process the results diff_indexes = torch.nonzero(~(next_tokens[:-1] == next_token_ids_spec)) n_matches = drafter_spec_length if diff_indexes.size(0) == 0 else diff_indexes[0][0].item() # revoke appended tokens for each Sequence in the current batch batch.revoke_batch_tokens(drafter_spec_length - n_matches) # revoke drafted tokens # append the last correct token generated by the main model self.request_handler.append_next_tokens(next_tokens[n_matches].unsqueeze(0)) # trim past key values of the drafter model drafter_past_key_values = Drafter.trim_kv_cache( drafter_past_key_values, drafter_spec_length - n_matches - 1 ) # prepare inputs for the next round of speculation n = 1 if n_matches < drafter_spec_length else 2 input_token_ids = batch.get_1D_inputs_spec_dec(n) self.request_handler.update_batch_finished(batch, generation_config=self.generation_config) finished_sequences = self.request_handler.update() if len(finished_sequences) > 0: break # Reset back the number of speculated tokens of the batch, # this is used to handle the last round of speculation, in which case the number of speculated tokens # by the drafter is less than the number of speculated tokens set to the engine. batch.set_use_spec_dec(num_tokens_to_verify=self.n_spec_tokens) return finished_sequences def generate( self, request_ids: Union[List[int], int] = None, prompts: Union[List[str], str] = None, prompts_token_ids: Union[List[int], torch.Tensor, np.ndarray] = None, return_token_ids: bool = False, generation_config: Optional[GenerationConfig] = None, ) -> Union[List[str], Tuple[List[str], List[List[int]]]]: """ Executing the inference step. Args: request_ids (List[int], optional): The request ID. Defaults to None. prompts (Union[List[str], optional): Input prompts. Defaults to None. prompts_token_ids (Union[List[int], torch.Tensor, np.ndarray], optional): token ids of input prompts. Defaults to None. return_token_ids (bool, optional): Whether to return output token ids. Defaults to False. generation_config (Optional[GenerationConfig], optional): Huggingface GenerationConfig used for inference. Defaults to None. Returns: Union[List[str], Tuple[List[str], List[List[int]]]]: Inference result returned by one generation. """ gen_config_dict = generation_config.to_dict() if generation_config is not None else {} prompts = [prompts] if isinstance(prompts, str) else prompts request_ids = [request_ids] if isinstance(request_ids, int) else request_ids with torch.inference_mode(): if prompts is not None or prompts_token_ids is not None: self.add_request( request_ids=request_ids, prompts=prompts, prompts_token_ids=prompts_token_ids, **gen_config_dict, ) output_seqs_list = [] total_tokens_list = [] # intuition: If user provide a generation config, we should replace the existing one. if generation_config is not None: self.generation_config = generation_config self.generation_config_dict = gen_config_dict if self.use_spec_dec: assert self.drafter is not None, "Drafter Model is not initialized." while self.request_handler.check_unfinished_reqs(): output_seqs_list += self.steps_spec_dec() else: while self.request_handler.check_unfinished_reqs(): output_seqs_list += self.step() output_seqs_list = sorted(output_seqs_list, key=lambda x: int(x.request_id)) for seq in output_seqs_list: total_tokens_list.append(seq.input_token_id + seq.output_token_id) output_str = self.tokenizer.batch_decode(total_tokens_list, skip_special_tokens=True) if return_token_ids: output_tokens_list = [seq.output_token_id for seq in output_seqs_list] return output_str, output_tokens_list else: return output_str @property def has_prompt_template(self) -> bool: """ """ return self.inference_config.prompt_template is not None def format_prompt(self, prompts: Union[List[str], str]) -> Union[List[str], str]: """ This method will format the input prompt according to the prompt template given to the InferenceConfig. """ assert ( self.has_prompt_template ), "Found the prompt_template is None. Please provide a valid prompt_template in InferenceConfig." if isinstance(prompts, (list, tuple)): return [self.inference_config.prompt_template.format(input_text=prompt) for prompt in prompts] elif isinstance(prompts, str): return self.inference_config.prompt_template.format(input_text=prompts) else: raise TypeError(f"Expected the input prompt to be one of list, tuple, or str, but got {type(prompts)}.") def add_request( self, request_ids: Union[List[int], int] = None, prompts: Union[List[str], str] = None, prompts_token_ids: Union[List[int], torch.Tensor, np.ndarray] = None, **kwargs, ) -> None: """ Add requests. Args: request_ids (List[int], optional): The request ID. Defaults to None. prompts (Union[List[str], optional): Input prompts. Defaults to None. prompts_token_ids (List[List[int]], optional): token ids of input prompts. Defaults to None. """ # apply the prompt template to the input prompts if self.has_prompt_template and prompts is not None: prompts = self.format_prompt(prompts) block_size = self.inference_config.block_size if request_ids is not None and not isinstance(request_ids, list): request_ids = [request_ids] if prompts is not None and not isinstance(prompts, list): prompts = [prompts] if prompts_token_ids is None: assert prompts, "When the prompts_token_ids is none, the input prompt list must be provided." prompts_token_ids = self.tokenizer.batch_encode_plus(prompts, padding=self.inference_config.pad_input)[ "input_ids" ] # list of torch Tensor if isinstance(prompts_token_ids, list): if isinstance(prompts_token_ids[0], torch.Tensor): prompts_token_ids = [prompt_token_id.tolist() for prompt_token_id in prompts_token_ids] elif isinstance(prompts_token_ids, torch.Tensor) or isinstance(prompts_token_ids, np.ndarray): prompts_token_ids = prompts_token_ids.tolist() else: raise TypeError( f"The dtype of prompts_token_ids must be one of list, torch.Tensor, np.ndarray, but got {type(prompts_token_ids)}." ) assert ( len(prompts_token_ids[0]) <= self.inference_config.max_input_len ), f"The length of input prompts {len(prompts_token_ids[0])} must be less than max_input_len {self.inference_config.max_input_len}." prompts_num = len(prompts_token_ids) for i in range(prompts_num): if request_ids: assert isinstance( request_ids[0], int ), f"The request_id type must be int, but got {type(request_ids[0])}" assert len(request_ids) == prompts_num request_id = request_ids[i] else: request_id = next(self.counter) if prompts == None: prompt = None else: prompt = prompts[i] max_length = kwargs.get("max_length", None) max_new_tokens = kwargs.get("max_new_tokens", None) if max_length is None and max_new_tokens is None: max_new_tokens = self.generation_config.max_new_tokens or self.inference_config.max_output_len elif max_length is not None: max_new_tokens = max_length - len(prompts_token_ids[i]) if not self.inference_config.enable_streamingllm: assert ( self.inference_config.max_output_len >= max_new_tokens ), f"max_new_tokens={max_new_tokens} must be less than max_output_len={self.inference_config.max_output_len}." sequence = Sequence( request_id, prompt, prompts_token_ids[i], block_size, None, self.tokenizer.eos_token_id, self.tokenizer.pad_token_id, max_output_len=max_new_tokens, ignore_eos=self.inference_config.ignore_eos, ) self.request_handler.add_sequence(sequence) def prepare_input(self, batch: BatchBucket) -> Tuple[torch.Tensor, torch.Tensor, InputMetaData]: input_ids = batch.get_1D_inputs() sequence_lengths = batch.get_sequence_lengths() if batch.is_prompts: n_tokens = sequence_lengths.sum().item() else: n_tokens = batch.current_batch_size if batch.use_spec_dec: n_tokens = batch.num_tokens_to_verify + 1 assert n_tokens == input_ids.size(0) n_tokens = n_tokens * batch.current_batch_size output_tensor = torch.zeros( (n_tokens, batch.num_heads * batch.head_dim), dtype=batch.dtype, device=batch.device ) batch_token_ids = None if ( self.generation_config.repetition_penalty != 1.0 or self.generation_config.no_repeat_ngram_size > 0 or self.generation_config.forced_eos_token_id is not None ): batch_token_ids = batch.batch_token_ids # only when we have the graph for specific decoding batch size can we use the cuda graph for inference use_cuda_graph = False if self.use_cuda_graph and not batch.is_prompts and batch.current_batch_size in self.graph_runners.keys(): use_cuda_graph = True input_meta_data = InputMetaData( block_tables=batch.get_block_table_tensor(), sequence_lengths=sequence_lengths, fd_inter_tensor=batch.fd_inter_tensor, batch_size=batch.current_batch_size, is_prompts=batch.is_prompts, use_cuda_kernel=self.inference_config.use_cuda_kernel, use_cuda_graph=use_cuda_graph, high_precision=self.high_precision, kv_seq_len=sequence_lengths.max().item(), head_dim=batch.head_dim, dtype=batch.dtype, use_spec_dec=batch.use_spec_dec, num_tokens_to_verify=batch.num_tokens_to_verify, batch_token_ids=batch_token_ids, ) return input_ids, output_tensor, input_meta_data def step(self) -> List[str]: """ In each step, do the follows: 1. Run RequestHandler.schedule() and get the batch used for inference. 2. Get the input, inputinfo and output placeholder from the batchbucket
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/core/plugin.py
colossalai/inference/core/plugin.py
import logging import os from functools import reduce from pathlib import Path from typing import Optional import torch from colossalai.checkpoint_io.general_checkpoint_io import GeneralCheckpointIO from colossalai.checkpoint_io.index_file import CheckpointIndexFile from colossalai.checkpoint_io.utils import is_safetensors_available, load_shard_state_dict, load_state_dict_into_model from colossalai.cluster import DistCoordinator from colossalai.interface import ModelWrapper try: from torch.nn.modules.module import _EXTRA_STATE_KEY_SUFFIX except ImportError: _EXTRA_STATE_KEY_SUFFIX = "_extra_state" class InferCheckpoint_io(GeneralCheckpointIO): """ This class is for inference model loading, most codes are copied from colossalai.checkpoint_io.hybrid_parallel_checkpoint_io.HybridParallelCheckpointIO. Origin HybridParallelCheckpointIO contains some codes about MixPrecision-Training, so we remove them and build a relatively clean class specifically for Inference. """ def __init__( self, verbose: bool = True, ) -> None: super().__init__() self.verbose = verbose self.coordinator = DistCoordinator() def load_sharded_model(self, model: ModelWrapper, checkpoint_index_file: Path, strict: bool = False): """ Load sharded model with the given path to index file of checkpoint folder. Args: model (nn.Module): The model to be loaded. checkpoint_index_file (str): Path to the index file of checkpointing folder. strict (bool, optional): For name matching during loading state_dict. Defaults to False. This argument should be manually set to False since params on same device might be stored in different files. """ assert isinstance(model, ModelWrapper), "Please boost the model before loading!" model = model.unwrap() # Check whether the checkpoint uses safetensors. use_safetensors = False if "safetensors" in checkpoint_index_file.name: use_safetensors = True if use_safetensors and not is_safetensors_available(): raise ImportError("`safe_serialization` requires the `safetensors` library: `pip install safetensors`.") # Read checkpoint index file. ckpt_index_file = CheckpointIndexFile.from_file(checkpoint_index_file) ckpt_root_path = ckpt_index_file.root_path weight_map = ckpt_index_file.weight_map strict = False # Load params & buffers to model. # Keep a record of loaded files so that file will not be repeatedly loaded. loaded_file = set() missing_keys = [] missing_file_keys = [] def _load(name: str): if name not in weight_map: missing_file_keys.append(name) return filename = weight_map[name] # If this param/buffer has been loaded before, directly return. if filename in loaded_file: return file_path = os.path.join(ckpt_root_path, filename) state_dict = load_shard_state_dict(Path(file_path), use_safetensors) load_state_dict_into_model( model, state_dict, missing_keys=missing_keys, strict=strict, load_sub_module=True ) loaded_file.add(filename) # Load parameters. for name, _ in model.named_parameters(): _load(name) # Load buffers. non_persistent_buffers = set() for n, m in model.named_modules(): non_persistent_buffers |= set(".".join((n, b)) for b in m._non_persistent_buffers_set) for name, buf in model.named_buffers(): if buf is not None and name not in non_persistent_buffers: _load(name) # Load extra states. extra_state_key = _EXTRA_STATE_KEY_SUFFIX if ( getattr(model.__class__, "get_extra_state", torch.nn.Module.get_extra_state) is not torch.nn.Module.get_extra_state ): _load(extra_state_key) if self.verbose and self.coordinator.is_master(): logging.info(f"The model has been successfully loaded from sharded checkpoint: {ckpt_root_path}.") if len(missing_keys) == 0: raise RuntimeError( "No weigth is loaded into the model. Please check the checkpoint files and the model structure." ) remain_keys = reduce(lambda a, b: a & b, map(set, missing_keys)) remain_keys = remain_keys.union(set(missing_file_keys)) if len(remain_keys) > 0: if strict: error_msgs = [ "Missing key(s) in state_dict: {}. ".format(", ".join('"{}"'.format(k) for k in missing_keys)) ] raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format( self.__class__.__name__, "\n\t".join(error_msgs) ) ) else: if self.coordinator.is_master(): logging.info(f"The following keys are not loaded from checkpoint: {remain_keys}") def save_sharded_model( self, model: ModelWrapper, checkpoint: str, gather_dtensor: bool = True, prefix: Optional[str] = None, size_per_shard: int = 1024, use_safetensors: bool = False, ) -> None: return NotImplementedError
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/core/rpc_engine.py
colossalai/inference/core/rpc_engine.py
import asyncio from itertools import count from time import sleep from typing import List, Tuple, Union import rpyc import torch import torch.nn as nn from rpyc.utils.server import ThreadedServer from torch import multiprocessing as mp from transformers import AutoConfig, PreTrainedTokenizer, PreTrainedTokenizerFast from transformers.configuration_utils import PretrainedConfig from colossalai.inference.batch_bucket import BatchBucket from colossalai.inference.config import InferenceConfig, InputMetaData from colossalai.inference.executor.rpc_worker import rpcWorkerService from colossalai.inference.utils import find_available_ports from colossalai.logging import get_dist_logger from colossalai.shardformer.policies.base_policy import Policy from .engine import InferenceEngine from .request_handler import RPCRequestHandler __all__ = ["RPCInferenceEngine"] def run_server(host, port, event: mp.Event = None): server = ThreadedServer( rpcWorkerService, port=port, protocol_config={"allow_public_attrs": True, "allow_all_attrs": True} ) if event: event.set() server.start() class RPCInferenceEngine(InferenceEngine): """ InferenceEngine which manages the inference process.. NOTE This `RPCInferenceEngine` is designed for multiple-card/online serving. Original `InferenceEngine` is designed for single card and offline service, though it supports multi-card offline inference. Args: model_or_path (nn.Module or str): Path or nn.Module of this model, Currently we don't support `nn.Module` Format tokenizer Optional[(Union[PreTrainedTokenizer, PreTrainedTokenizerFast])]: Path of the tokenizer to use. inference_config (Optional[InferenceConfig], optional): Store the configuration information related to inference. verbose (bool): Determine whether or not to log the generation process. model_policy ("Policy"): the policy to shardformer model. It will be determined by the model type if not provided. """ def __init__( self, model_or_path: Union[nn.Module, str], tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], inference_config: InferenceConfig, verbose: bool = False, model_policy: Policy = None, ) -> None: """ If you input a real model loaded by transformers, the init will take quite a long time Currently we don't support model(nn.Module) format as the param. """ torch.multiprocessing.set_start_method("spawn", force=True) self.inference_config = inference_config self.tokenizer = tokenizer self.tokenizer.pad_token = self.tokenizer.eos_token self.verbose = verbose self.logger = get_dist_logger(__name__) try: if isinstance(model_or_path, str): self.model_config = AutoConfig.from_pretrained( model_or_path, trust_remote_code=True, torch_dtype=self.dtype ) elif isinstance(model_or_path, nn.Module): self.logger.error( f"An exception occurred during loading model Config: For {__class__.__name__}, we don't support param like nn.Module currently\n" ) # self.model_config = model_or_path.config else: self.logger.error( f"An exception occurred during loading model Config: Please pass right param for {__class__.__name__}\n" ) except Exception as e: self.logger.error( f"An exception occurred during loading model Config: {e}, The path should be transformers-like\n" ) self.generation_config = inference_config.to_generation_config(self.model_config) self.tp_size = inference_config.tp_size self.events = [mp.Event() for _ in range(self.tp_size)] # This operation will init the dist env and models self.workers: List[rpcWorkerService] = [] self.init_workers() asyncio.run(self.init_model(model_or_path, model_policy)) # init the scheduler and logic block manager self.request_handler = self.init_scheduler(self.inference_config, self.model_config) # init the physical cache alloc_shape = self.request_handler.cache_manager.get_physical_cache_shape() self.init_device_cache(alloc_shape) self.use_cuda_graph = self.inference_config.use_cuda_graph self.high_precision = inference_config.high_precision self.dtype = inference_config.dtype # Model and relatable attrs of speculative decoding will be set by `enable_spec_dec` self.use_spec_dec = False self.drafter_model = None self.drafter = None self.use_glide = False self.n_spec_tokens = self.inference_config.max_n_spec_tokens self.counter = count() self._verify_args() self.logger.info("engine init over ") def _verify_args(self) -> None: """Verify the input args""" if not isinstance(self.inference_config, InferenceConfig): raise TypeError("Invalid type of inference config provided.") if not isinstance(self.tokenizer, (PreTrainedTokenizerFast, PreTrainedTokenizer)): raise TypeError( f"the tokenizer type must be PreTrainedTokenizer or PreTrainedTokenizerFast, but got {type(self.tokenizer)}" ) def init_workers(self): rpc_ports = find_available_ports(self.tp_size) self.worker_processes = [] # mp.set_start_method('spawn') for event, rpc_port in zip(self.events, rpc_ports): p = mp.Process(target=run_server, args=("localhost", rpc_port, event)) p.start() self.worker_processes.append(p) self.logger.info(f"Starting RPC Worker on localhost:{rpc_port}...") # Wait for all servers to start for event in self.events: event.wait() event.clear() sleep(0.05) self.logger.info(f"init rpc server done.") for rpc_port in rpc_ports: try: conn = rpyc.connect( "localhost", rpc_port, config={"allow_pickle": True, "allow_public_attrs": True, "allow_all_attrs": True}, ) self.workers.append(conn.root) except: raise Exception("conn error!") self.logger.info(f"Build RPC Connection Success! Begin to load model...") asyncio.run(self.init_worker_env()) self.logger.info(f"init dist env over") async def async_parallel_wrapper(self, f, *args, **kwargs): async_res = rpyc.async_(f)(*args, **kwargs) await asyncio.to_thread(async_res.wait) assert async_res.ready return async_res.value async def init_worker_env(self): assert len(self.workers) == self.tp_size, "init workers first" dist_group_port = find_available_ports(1)[0] init_tasks = [ self.async_parallel_wrapper( worker.init_dist_env, rank, self.inference_config.tp_size, "127.0.0.1", dist_group_port ) for rank, worker in enumerate(self.workers) ] await asyncio.gather(*init_tasks) async def init_model(self, model_or_path: Union[nn.Module, str], model_policy: Policy = None): assert len(self.workers) == self.tp_size, "init workers first" inference_config_param = self.inference_config.to_rpc_param() model_path = model_or_path model_policy_param = model_policy.to_rpc_param() if model_policy else None init_tasks = [ self.async_parallel_wrapper(worker.init_model, inference_config_param, model_path, model_policy_param) for rank, worker in enumerate(self.workers) ] await asyncio.gather(*init_tasks) def init_scheduler(self, inference_config: InferenceConfig, model_config: PretrainedConfig) -> RPCRequestHandler: return RPCRequestHandler(inference_config, model_config) async def _init_device_cache(self, alloc_shape: Tuple[int, int, int, int]): assert len(self.workers) == self.tp_size, "init workers first" init_tasks = [self.async_parallel_wrapper(worker.init_cache, alloc_shape) for worker in self.workers] await asyncio.gather(*init_tasks) def init_device_cache(self, alloc_shape: Tuple[Tuple[int, ...], Tuple[int, ...]]): asyncio.run(self._init_device_cache(alloc_shape)) def prepare_input(self, batch: BatchBucket) -> Tuple[List[int], InputMetaData]: input_ids = batch.get_1D_inputs() sequence_lengths = batch.get_sequence_lengths() if batch.is_prompts: n_tokens = sequence_lengths.sum().item() else: n_tokens = batch.current_batch_size if batch.use_spec_dec: n_tokens = batch.num_tokens_to_verify + 1 assert n_tokens == input_ids.size(0) n_tokens = n_tokens * batch.current_batch_size batch_token_ids = None config_dict = self.generation_config.to_dict() # process repetition_penalty, no_repeat_ngram_size for type in ["repetition_penalty", "no_repeat_ngram_size"]: if type in config_dict and config_dict[type] is not None: batch_token_ids = batch.batch_token_ids # only when we have the graph for specific decoding batch size can we use the cuda graph for inference use_cuda_graph = False if self.use_cuda_graph and not batch.is_prompts and batch.current_batch_size in self.graph_runners.keys(): use_cuda_graph = True input_meta_data = InputMetaData( block_tables=batch.get_block_table_tensor(), sequence_lengths=sequence_lengths, fd_inter_tensor=None, batch_size=batch.current_batch_size, is_prompts=batch.is_prompts, use_cuda_kernel=self.inference_config.use_cuda_kernel, use_cuda_graph=use_cuda_graph, high_precision=self.high_precision, kv_seq_len=sequence_lengths.max().item(), head_dim=batch.head_dim, dtype=batch.dtype, use_spec_dec=batch.use_spec_dec, num_tokens_to_verify=batch.num_tokens_to_verify, batch_token_ids=batch_token_ids, ) return input_ids.tolist(), input_meta_data async def step_(self, input_token_ids, input_meta_data: InputMetaData): assert len(self.workers) == self.tp_size, "init workers first" init_tasks = [ self.async_parallel_wrapper( worker.execute_model_forward, input_token_ids, input_meta_data.to_rpc_param(), self.generation_config_dict, ) for worker in self.workers ] ret = await asyncio.gather(*init_tasks) return ret[0] def step(self) -> List[str]: batch = self.request_handler.schedule() input_token_ids, input_meta_data = self.prepare_input(batch) # TODO: padding_id is used for generating attn_mask and will be removed if nopad version is supported. next_tokens = asyncio.run(self.step_(input_token_ids, input_meta_data)) # update the request_handler next_tokens = torch.tensor(next_tokens, dtype=torch.int) self.request_handler.append_next_tokens(next_tokens) finished_sequences = self.request_handler.update() return finished_sequences def kill_workers(self): """ I don't find a good way to implicit invoke self.kill_workers """ assert len(self.workers) != 0 for proc in self.worker_processes: proc.kill() proc.join() self.logger.info(f"worker killed, serving end") def __del__(self): self.kill_workers()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/core/request_handler.py
colossalai/inference/core/request_handler.py
from typing import Dict, List, Union import torch from transformers.configuration_utils import PretrainedConfig from transformers.generation import GenerationConfig from colossalai.inference.batch_bucket import BatchBucket from colossalai.inference.config import InferenceConfig from colossalai.inference.flash_decoding_utils import FDIntermTensors from colossalai.inference.kv_cache import KVCacheManager, RPCKVCacheManager from colossalai.inference.struct import DiffusionSequence, RequestStatus, Sequence from colossalai.logging import get_dist_logger logger = get_dist_logger(__name__) __all__ = ["RunningList", "RequestHandler"] class RunningList: """ RunningList is an structure for recording the running sequences, contains prefill and decoding list. Prefilling samples will be hold until the actual ratio of prefill samples versus decoding samples exceeds ratio. Args: prefill_ratio: (float) A ratio for determing whether to perform prefill or not. _prefill (OrderedDict[Sequence]): Mapping of sequence uid -> Sequence. _decoding (OrderedDict[Sequence]): Mapping of sequence uid -> Sequence. """ def __init__(self, prefill_ratio: int, prefill: List[Sequence] = None) -> None: self.prefill_ratio = prefill_ratio self._decoding: Dict[int, Sequence] = dict() self._prefill: Dict[int, Sequence] = ( dict({seq.request_id: seq for seq in self._prefill}) if prefill is not None else dict() ) @property def decoding(self): return list(self._decoding.values()) @property def prefill(self): return list(self._prefill.values()) @property def prefill_seq_num(self): return len(self._prefill) @property def decoding_seq_num(self): return len(self._decoding) @property def total_seq_num(self): return self.prefill_seq_num + self.decoding_seq_num def append(self, seq: Sequence): assert (seq.request_id not in self._prefill) and ( seq.request_id not in self._decoding ), f"Sequence uid {seq.request_id} already exists." self._prefill[seq.request_id] = seq def extend(self, seqs: List[Sequence]): for seq in seqs: self._prefill[seq.request_id] = seq def find_seq(self, request_id) -> Union[Sequence, None]: seq = None if request_id in self._decoding: seq = self._decoding[request_id] elif request_id in self._prefill: seq = self._prefill[request_id] return seq def remove(self, seq: Sequence) -> None: if seq.request_id in self._decoding: self._decoding.pop(seq.request_id) elif seq.request_id in self._prefill: self._prefill.pop(seq.request_id) else: raise ValueError(f"Sequence {seq.request_id} is not in running list") def ready_for_prefill(self): if not self._decoding: return len(self._prefill) > 0 return len(self._prefill) / len(self._decoding) >= self.prefill_ratio def is_empty(self): return not self._decoding and not self._prefill def mark_prefill_running(self) -> None: for seq_id in self._prefill: self._prefill[seq_id].mark_running() def move_prefill_to_decoding(self, seq_ids: List[int]) -> None: for seq_id in seq_ids: assert seq_id in self._prefill, f"Sequence {seq_id} is not in prefill list" self._decoding[seq_id] = self._prefill.pop(seq_id) class NaiveRequestHandler: def __init__(self) -> None: self.running_list: List[DiffusionSequence] = [] self.waiting_list: List[str] = [] def _has_waiting(self) -> bool: return any(lst for lst in self.waiting_list) def _has_running(self) -> bool: return any(lst for lst in self.running_list) def check_unfinished_reqs(self): return self._has_waiting() or self._has_running() def add_sequence(self, seq: DiffusionSequence): """ Add the request to waiting list. """ assert not self._find_sequence(seq.request_id), f"Sequence {seq.request_id} already exists." self.waiting_list.append(seq) def _find_sequence(self, request_id: int) -> DiffusionSequence: """ Find the request by request_id. """ for lst in enumerate(self.waiting_list + self.running_list): for seq in lst: if seq.request_id == request_id: return seq return None def schedule(self): ret = None if self._has_waiting: ret = self.waiting_list[0] self.waiting_list = self.waiting_list[1:] return ret class RequestHandler(NaiveRequestHandler): """ RequestHandler is the core for handling existing requests and updating current batch. During generation process, we call schedule function each iteration to update current batch. Args: inference_config: Configuration for initialize and manage kv cache. model_config: Configuration for model dtype (torch.dtype): The data type for weights and activations. """ def __init__(self, inference_config: InferenceConfig, model_config: PretrainedConfig) -> None: self.inference_config = inference_config self.running_list: RunningList = RunningList(inference_config.prefill_ratio) self.waiting_list: List[List] = [[], [], []] self.done_list: List[Sequence] = [] self.dtype = inference_config.dtype self.max_batch_size = inference_config.max_batch_size # initialize cache self._init_cache(model_config) # initialize batch device = torch.cuda.current_device() kv_max_split_num = ( inference_config.max_input_len + inference_config.max_output_len + inference_config.block_size - 1 ) // inference_config.block_size head_dim = model_config.hidden_size // model_config.num_attention_heads fd_inter_tensor = FDIntermTensors() if fd_inter_tensor._tensors_initialized: fd_inter_tensor._reset() # For Spec-Dec, process the speculated tokens plus the token in the last step for each seq max_n_tokens = self.max_batch_size max_n_tokens *= self.inference_config.max_n_spec_tokens + 1 fd_inter_tensor.initialize( max_batch_size=max_n_tokens, num_attn_heads=model_config.num_attention_heads // inference_config.tp_size, kv_max_split_num=kv_max_split_num, head_dim=head_dim, dtype=self.dtype, device=device, ) # TODO In the continuous batching scenario, the batch size may be greater than max_batch_size, # which may cause bugs and this issue should be fixed later. self.running_bb = BatchBucket( num_heads=model_config.num_attention_heads // inference_config.tp_size, head_dim=head_dim, max_batch_size=self.max_batch_size, max_length=inference_config.max_input_len + inference_config.max_output_len, block_size=inference_config.block_size, kv_max_split_num=kv_max_split_num, fd_interm_tensor=fd_inter_tensor, dtype=self.dtype, device=device, enable_streamingllm=inference_config.enable_streamingllm, start_token_size=inference_config.start_token_size, generated_token_size=inference_config.generated_token_size, ) self.prefill_bb = BatchBucket( num_heads=model_config.num_attention_heads // inference_config.tp_size, head_dim=head_dim, max_batch_size=self.max_batch_size, max_length=inference_config.max_input_len + inference_config.max_output_len, block_size=inference_config.block_size, kv_max_split_num=kv_max_split_num, fd_interm_tensor=fd_inter_tensor, dtype=self.dtype, device=device, enable_streamingllm=inference_config.enable_streamingllm, start_token_size=inference_config.start_token_size, generated_token_size=inference_config.generated_token_size, ) def _has_running(self) -> bool: return not self.running_bb.is_empty() def _init_cache(self, model_config): self.cache_manager = KVCacheManager(self.inference_config, model_config) def get_kvcache(self): return self.cache_manager.get_kv_cache() def set_spec_dec_mode(self, n_spec_tokens: int): self.prefill_bb.set_use_spec_dec(n_spec_tokens) self.running_bb.set_use_spec_dec(n_spec_tokens) def unset_spec_dec_mode(self): self.prefill_bb.reset_use_spec_dec() self.running_bb.reset_use_spec_dec() def schedule(self): """ The main logic of request handler. """ if self._has_waiting(): # Try to allocate cache blocks for the sequence using a priority of prompt length. for lst in reversed(self.waiting_list): if lst: remove_list = [] for seq in lst: if seq.input_len > self.inference_config.max_input_len: # If the prompt length is longer than max_input_len, abort the sequence. logger.warning( f"the prompt(Request id = {seq.request_id}) length is longer than max_input_len, abort this sequence." ) self.abort_sequence(seq.request_id) remove_list.append(seq) break num_seqs_to_add = min(len(lst), self.max_batch_size - self.running_list.total_seq_num) # for now the recycle logic is not working remove_list.extend(lst[:num_seqs_to_add]) self.running_list.extend(lst[:num_seqs_to_add]) for seq in remove_list: lst.remove(seq) if self.running_list.ready_for_prefill(): num_seqs_to_add = min(self.running_list.prefill_seq_num, self.prefill_bb.available_batch_size) # overwrite the number of sequences to add to 1 if use_spec_dec is enabled # TODO (zhaoyuanheng): support speculative decoding for batch size > 1 if self.prefill_bb.use_spec_dec: num_seqs_to_add = 1 for seq in self.running_list.prefill[:num_seqs_to_add]: seq.mark_running() # allocate blocks for the prefill batch self.prefill_bb.add_seqs( self.running_list.prefill[:num_seqs_to_add], alloc_block_tables_fn=self.cache_manager.allocate_context_from_block_tables, ) return self.prefill_bb if not self.running_bb.is_empty: seqs_ids_to_recycle = self.cache_manager.allocate_tokens_from_block_tables( self.running_bb.block_tables, self.running_bb.seq_lengths, self.running_bb.current_batch_size ) if seqs_ids_to_recycle: seqs_to_recycle = self.running_bb.pop_seqs(seqs_ids_to_recycle) for seq in seqs_to_recycle: seq.recycle() self.running_list.remove(seq) self.waiting_list[-1].append(seq) # the recycled sequences are handled with highest priority. return self.running_bb def allocate_batch_spec_dec(self, batch: BatchBucket, n: int): assert batch.use_spec_dec if n > 0: self.cache_manager.allocate_n_tokens_from_block_tables( batch.block_tables, batch.seq_lengths, batch.current_batch_size, n=n ) def add_sequence(self, req: Sequence): """ Add the request to waiting list. """ assert not self._find_sequence(req.request_id), f"Sequence {req.request_id} already exists." assert ( req.input_len <= self.inference_config.max_input_len ), f"Sequence {req.request_id} exceeds input length limit" self.waiting_list[req.input_len * 3 // (self.inference_config.max_input_len + 1)].append(req) def abort_sequence(self, request_id: int): """ Abort the request. """ result = self._find_sequence(request_id) if result is not None: seq, priority = result if seq.status == RequestStatus.WAITING: seq.mark_aborted() self.waiting_list[priority].remove(seq) elif seq.status.is_running(): self.running_bb.pop_seq_update_batch(seq.request_id, self.cache_manager.free_block_table) self.running_list.remove(seq) else: try: self.done_list.remove(seq) except: return return def _find_sequence(self, request_id: int) -> Sequence: """ Find the request by request_id. """ for priority, lst in enumerate(self.waiting_list): for seq in lst: if seq.request_id == request_id: return seq, priority if self.running_list.find_seq(request_id): return seq, None return None def update_seq_finished(self, sequence: Sequence, generation_config: GenerationConfig): if ( sequence.output_token_id[-1] == generation_config.eos_token_id or sequence.output_len >= generation_config.max_length ): sequence.mark_finished() def update_batch_finished(self, batch: BatchBucket, generation_config: GenerationConfig): for seq in batch.seqs_li: max_length = generation_config.max_length max_new_tokens = generation_config.max_new_tokens if max_length is not None: max_new_tokens = max_length - seq.input_len if seq.output_token_id[-1] == generation_config.eos_token_id or seq.output_len >= max_new_tokens: seq.mark_finished() def check_unfinished_reqs(self) -> bool: return self._has_waiting() or not self.running_list.is_empty() def total_requests_in_batch_bucket(self) -> int: return self.prefill_bb.current_batch_size + self.running_bb.current_batch_size def append_next_tokens(self, sample_tokens: torch.Tensor): assert sample_tokens.dim() == 1 n_elements = sample_tokens.size(0) if not self.prefill_bb.is_empty: assert ( self.prefill_bb.current_batch_size == n_elements ), f"Incompatible size: {n_elements} tokens to append while prefill batch size {self.prefill_bb.current_batch_size}" self.prefill_bb.append_batch_tokens(sample_tokens) else: assert ( self.running_bb.current_batch_size == n_elements ), f"Incompatible size: {n_elements} tokens to append while running batch size {self.running_bb.current_batch_size}" self.running_bb.append_batch_tokens(sample_tokens) def update(self): """ Update current running list and done list """ if not self.prefill_bb.is_empty: self.running_list.move_prefill_to_decoding(self.prefill_bb.seqs_ids) self.running_bb.merge(self.prefill_bb) # clear the prefill batch without assigning a free_block_tables_fn # since we want to reuse the memory recorded on the block tables self.prefill_bb.clear(free_block_tables_fn=None) finished_seqs, _ = self.running_bb.pop_finished(self.cache_manager.free_block_table) for seq in finished_seqs: self.running_list.remove(seq) self.done_list.extend(finished_seqs) return finished_seqs def streamingllm_free_block_tables(self, updated_block_ids: List[int]): """ Free the block that needs to be swapped out. """ self.cache_manager.streamingllm_free_block_tables(updated_block_ids) class RPCRequestHandler(RequestHandler): """ RPC Version of request handler """ def __init__(self, inference_config: InferenceConfig, model_config: PretrainedConfig) -> None: self.inference_config = inference_config self.running_list: RunningList = RunningList(inference_config.prefill_ratio) self.waiting_list: List[List] = [[], [], []] self.done_list: List[Sequence] = [] self.dtype = inference_config.dtype self.max_batch_size = inference_config.max_batch_size # initialize cache self._init_cache(model_config) # initialize batch torch.cuda.current_device() kv_max_split_num = ( inference_config.max_input_len + inference_config.max_output_len + inference_config.block_size - 1 ) // inference_config.block_size head_dim = model_config.hidden_size // model_config.num_attention_heads # TODO In the continuous batching scenario, the batch size may be greater than max_batch_size, # which may cause bugs and this issue should be fixed later. self.running_bb = BatchBucket( num_heads=model_config.num_attention_heads // inference_config.tp_size, head_dim=head_dim, max_batch_size=self.max_batch_size, max_length=inference_config.max_input_len + inference_config.max_output_len, block_size=inference_config.block_size, kv_max_split_num=kv_max_split_num, fd_interm_tensor=None, dtype=self.dtype, ) self.prefill_bb = BatchBucket( num_heads=model_config.num_attention_heads // inference_config.tp_size, head_dim=head_dim, max_batch_size=self.max_batch_size, max_length=inference_config.max_input_len + inference_config.max_output_len, block_size=inference_config.block_size, kv_max_split_num=kv_max_split_num, fd_interm_tensor=None, dtype=self.dtype, ) def _init_cache(self, model_config): self.cache_manager = RPCKVCacheManager(self.inference_config, model_config)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/core/__init__.py
colossalai/inference/core/__init__.py
from .engine import InferenceEngine from .request_handler import RequestHandler __all__ = ["InferenceEngine", "RequestHandler"]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/core/base_engine.py
colossalai/inference/core/base_engine.py
from abc import ABC, abstractmethod import torch import torch.nn as nn from colossalai.cluster import ProcessGroupMesh from colossalai.inference.config import ModelShardInferenceConfig from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer import ShardConfig, ShardFormer from colossalai.shardformer.policies.base_policy import Policy class BaseEngine(ABC): @abstractmethod def __init__(self, model_or_path, inference_config=None, verbose=False, model_policy=None): pass @abstractmethod def init_model(self, model_or_path, model_policy=None, model_shard_infer_config=None): """ Init Model for Engine """ @abstractmethod def generate(self, request_ids=None, prompts=None, generation_config=None, **kwargs): """ Generate ouptput for coming requests """ @abstractmethod def add_request(self, prompts, request_ids=None, **kwargs): """ Add new request to Engine """ @abstractmethod def step(self): """ Perform one new step forward """ @abstractmethod def _verify_args(self): """ Verify the parameters and members of class """ @torch.inference_mode() def capture_model(self): """ Use cuda graph to capture model """ return NotImplementedError("This method should be implemented by subclasses") def _shardformer( self, model: nn.Module, model_policy: Policy, model_shard_infer_config: ModelShardInferenceConfig = None, stage_manager: PipelineStageManager = None, tp_group: ProcessGroupMesh = None, **kwargs, ) -> nn.Module: """ Initialize ShardConfig and replace the model with shardformer. Args: model (nn.Module): Path or nn.Module of this model. model_policy (Policy): The policy to shardformer model which is determined by the model type. stage_manager (PipelineStageManager, optional): Used to manage pipeline stages. Defaults to None. tp_group (ProcessGroupMesh, optional): Used to manage the process TP group mesh. Defaults to None. Returns: nn.Module: The model optimized by Shardformer. """ shardconfig = ShardConfig( tensor_parallel_process_group=tp_group, pipeline_stage_manager=stage_manager, enable_tensor_parallelism=(self.inference_config.tp_size > 1), enable_fused_normalization=False, enable_all_optimization=False, enable_flash_attention=False, enable_jit_fused=False, enable_sequence_parallelism=False, extra_kwargs={"model_shard_infer_config": model_shard_infer_config, **kwargs}, ) shardformer = ShardFormer(shard_config=shardconfig) shard_model, _ = shardformer.optimize(model, model_policy) return shard_model
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/core/diffusion_engine.py
colossalai/inference/core/diffusion_engine.py
from itertools import count from typing import List, Tuple, Type, Union import numpy as np import PIL.Image import torch import torch.nn as nn from diffusers.pipelines.pipeline_utils import DiffusionPipeline from torch import distributed as dist from colossalai.accelerator import get_accelerator from colossalai.cluster import ProcessGroupMesh from colossalai.inference.config import DiffusionGenerationConfig, InferenceConfig, ModelShardInferenceConfig from colossalai.inference.modeling.layers.diffusion import DiffusionPipe from colossalai.inference.modeling.policy import model_policy_map from colossalai.inference.struct import DiffusionSequence from colossalai.inference.utils import get_model_size, get_model_type from colossalai.logging import get_dist_logger from colossalai.shardformer.policies.base_policy import Policy from .base_engine import BaseEngine from .request_handler import NaiveRequestHandler PP_AXIS, TP_AXIS = 0, 1 class DiffusionEngine(BaseEngine): def __init__( self, model_or_path: DiffusionPipeline | str, inference_config: InferenceConfig = None, verbose: bool = False, model_policy: Policy | type[Policy] = None, ) -> None: self.inference_config = inference_config self.dtype = inference_config.dtype self.high_precision = inference_config.high_precision self.verbose = verbose self.logger = get_dist_logger(__name__) self.model_shard_infer_config = inference_config.to_model_shard_inference_config() self.model_type = get_model_type(model_or_path=model_or_path) self.init_model(model_or_path, model_policy, self.model_shard_infer_config) self.request_handler = NaiveRequestHandler() self.counter = count() self._verify_args() def _verify_args(self) -> None: assert isinstance(self.model, DiffusionPipe), "model must be DiffusionPipe" def init_model( self, model_or_path: Union[str, nn.Module, DiffusionPipeline], model_policy: Union[Policy, Type[Policy]] = None, model_shard_infer_config: ModelShardInferenceConfig = None, ): """ Shard model or/and Load weight Args: model_or_path Union[nn.Module, str]: path to the checkpoint or model of transformer format. model_policy (Policy): the policy to replace the model. model_inference_config: the configuration for modeling initialization when inference. model_shard_infer_config (ModelShardInferenceConfig): the configuration for init of module when inference. """ if isinstance(model_or_path, str): model = DiffusionPipeline.from_pretrained(model_or_path, torch_dtype=self.dtype) policy_map_key = model.__class__.__name__ model = DiffusionPipe(model) elif isinstance(model_or_path, DiffusionPipeline): policy_map_key = model_or_path.__class__.__name__ model = DiffusionPipe(model_or_path) else: self.logger.error(f"model_or_path support only str or DiffusionPipeline currently!") torch.cuda.empty_cache() init_gpu_memory = torch.cuda.mem_get_info()[0] self.device = get_accelerator().get_current_device() if self.verbose: self.logger.info(f"the device is {self.device}") if self.verbose: self.logger.info( f"Before the shard, Rank: [{dist.get_rank()}], model size: {get_model_size(model)} GB, model's device is: {model.device}" ) if model_policy is None: model_policy = model_policy_map.get(policy_map_key) if not isinstance(model_policy, Policy): try: model_policy = model_policy() except Exception as e: raise ValueError(f"Unable to instantiate model policy: {e}") assert isinstance(model_policy, Policy), f"Invalid type of model policy: {type(model_policy)}" pg_mesh = ProcessGroupMesh(self.inference_config.pp_size, self.inference_config.tp_size) tp_group = pg_mesh.get_group_along_axis(TP_AXIS) self.model = self._shardformer( model, model_policy, model_shard_infer_config, None, tp_group=tp_group, ) self.model = model.to(self.device) if self.verbose: self.logger.info( f"After the shard, Rank: [{dist.get_rank()}], model size: {get_model_size(self.model)} GB, model's device is: {model.device}" ) free_gpu_memory, _ = torch.cuda.mem_get_info() peak_memory = init_gpu_memory - free_gpu_memory if self.verbose: self.logger.info( f"Rank [{dist.get_rank()}], Model Weight Max Occupy {peak_memory / (1024 ** 3)} GB, Model size: {get_model_size(self.model)} GB" ) def generate( self, request_ids: Union[List[int], int] = None, prompts: Union[List[str], str] = None, generation_config: DiffusionGenerationConfig = None, **kwargs, ) -> Union[List[Union[str, List[PIL.Image.Image], np.ndarray]], Tuple[List[str], List[List[int]]]]: """ """ gen_config_dict = generation_config.to_dict() if generation_config is not None else {} prompts = [prompts] if isinstance(prompts, str) else prompts request_ids = [request_ids] if isinstance(request_ids, int) else request_ids with torch.inference_mode(): if prompts is not None: self.add_request( request_ids=request_ids, prompts=prompts, **gen_config_dict, **kwargs, ) output_reqs_list = [] # intuition: If user provide a generation config, we should replace the existing one. if generation_config is not None: self.generation_config = generation_config self.generation_config_dict = gen_config_dict while self.request_handler.check_unfinished_reqs(): output_reqs_list += self.step() return output_reqs_list def add_request( self, prompts: Union[List[str], str], request_ids: Union[List[int], int] = None, **kwargs, ): if request_ids is not None and not isinstance(request_ids, list): request_ids = [request_ids] if not isinstance(prompts, list): prompts = [prompts] generation_config = DiffusionGenerationConfig.from_kwargs(**kwargs) prompts_num = len(prompts) for i in range(prompts_num): if request_ids: assert isinstance( request_ids[0], int ), f"The request_id type must be int, but got {type(request_ids[0])}" assert len(request_ids) == prompts_num request_id = request_ids[i] else: request_id = next(self.counter) seq = DiffusionSequence(request_id=request_id, prompt=prompts[i], generation_config=generation_config) self.request_handler.add_sequence(seq) def step(self) -> List[PIL.Image.Image]: """ In each step, do the follows: 1. Run RequestHandler.schedule() and get the batch used for inference. 2. run forward to get List[Image] Returns: List[PIL.Image.Image]: Image Generated by one step. """ input = self.request_handler.schedule() ret = self.model(prompt=input.prompt, **input.generation_config.to_dict()) return ret
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/core/engine.py
colossalai/inference/core/engine.py
from typing import List, Tuple, Type, Union import numpy as np import PIL.Image import torch.nn as nn from diffusers import DiffusionPipeline from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast from colossalai.inference.config import InferenceConfig from colossalai.inference.utils import ModelType, get_model_type from colossalai.shardformer.policies.base_policy import Policy __all__ = ["InferenceEngine"] class InferenceEngine: """ InferenceEngine which manages the inference process.. Args: model_or_path (nn.Module or DiffusionPipeline or str): Path or nn.Module or DiffusionPipeline of this model. tokenizer Optional[(Union[PreTrainedTokenizer, PreTrainedTokenizerFast])]: Path of the tokenizer to use. inference_config (Optional[InferenceConfig], optional): Store the configuration information related to inference. verbose (bool): Determine whether or not to log the generation process. model_policy ("Policy"): the policy to shardformer model. It will be determined by the model type if not provided. """ def __init__( self, model_or_path: Union[nn.Module, str, DiffusionPipeline], tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast] = None, inference_config: InferenceConfig = None, verbose: bool = False, model_policy: Union[Policy, Type[Policy]] = None, ) -> None: self.__dict__["_initialized"] = False # use __dict__ directly to avoid calling __setattr__ self.model_type = get_model_type(model_or_path=model_or_path) self.engine = None if self.model_type == ModelType.LLM: from .llm_engine import LLMEngine self.engine = LLMEngine( model_or_path=model_or_path, tokenizer=tokenizer, inference_config=inference_config, verbose=verbose, model_policy=model_policy, ) elif self.model_type == ModelType.DIFFUSION_MODEL: from .diffusion_engine import DiffusionEngine self.engine = DiffusionEngine( model_or_path=model_or_path, inference_config=inference_config, verbose=verbose, model_policy=model_policy, ) elif self.model_type == ModelType.UNKNOWN: self.logger.error(f"Model Type either Difffusion or LLM!") self._initialized = True self._verify_args() def _verify_args(self) -> None: """Verify the input args""" assert self.engine is not None, "Please init Engine first" assert self._initialized, "Engine must be initialized" def generate( self, request_ids: Union[List[int], int] = None, prompts: Union[List[str], str] = None, *args, **kwargs, ) -> Union[List[Union[str, List[PIL.Image.Image], np.ndarray]], Tuple[List[str], List[List[int]]]]: """ Executing the inference step. Args: request_ids (List[int], optional): The request ID. Defaults to None. prompts (Union[List[str], optional): Input prompts. Defaults to None. """ assert self.engine is not None, "Please init Engine first" return self.engine.generate(request_ids=request_ids, prompts=prompts, *args, **kwargs) def add_request( self, request_ids: Union[List[int], int] = None, prompts: Union[List[str], str] = None, *args, **kwargs, ) -> None: """ Add requests. Args: request_ids (List[int], optional): The request ID. Defaults to None. prompts (Union[List[str], optional): Input prompts. Defaults to None. prompts_token_ids (List[List[int]], optional): token ids of input prompts. Defaults to None. kwargs: for LLM, it could be max_length, max_new_tokens, etc for diffusion, it could be prompt_2, prompt_3, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, negative_prompt_2, negative_prompt_3, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, clip_skip, which aligns with diffusers """ assert self.engine is not None, "Please init Engine first" self.engine.add_request(request_ids=request_ids, prompts=prompts, *args, **kwargs) def step(self): assert self.engine is not None, "Please init Engine first" return self.engine.step() def __getattr__(self, name): """ The Design logic of getattr, setattr: 1. Since InferenceEngine is a wrapper for DiffusionEngine/LLMEngine, we hope to invoke all the member of DiffusionEngine/LLMEngine like we just call the member of InferenceEngine. 2. When we call the __init__ of InferenceEngine, we don't want to setattr using self.__dict__["xxx"] = xxx, we want to use origin ways like self.xxx = xxx So we set the attribute `_initialized`. And after initialized, if we couldn't get the member from InferenceEngine, we will try to get the member from self.engine(DiffusionEngine/LLMEngine) """ if self.__dict__.get("_initialized", False): if name in self.__dict__: return self.__dict__[name] else: return getattr(self.engine, name) else: return self.__dict__[name] def __setattr__(self, name, value): if self.__dict__.get("_initialized", False): if name in self.__dict__: self.__dict__[name] = value else: setattr(self.engine, name, value) else: self.__dict__[name] = value
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/server/completion_service.py
colossalai/inference/server/completion_service.py
import asyncio from colossalai.inference.core.async_engine import AsyncInferenceEngine from .utils import id_generator class CompletionServing: def __init__(self, engine: AsyncInferenceEngine, served_model: str): self.engine = engine self.served_model = served_model try: asyncio.get_running_loop() except RuntimeError: pass async def create_completion(self, request, generation_config): request_dict = await request.json() request_id = id_generator() prompt = request_dict.pop("prompt") # it is not a intuitive way self.engine.engine.generation_config = generation_config result_generator = self.engine.generate(request_id, prompt=prompt, generation_config=generation_config) if await request.is_disconnected(): # Abort the request if the client disconnects. await self.engine.abort(request_id) raise RuntimeError("Client disconnected") final_res = await result_generator return final_res
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/server/chat_service.py
colossalai/inference/server/chat_service.py
import asyncio import codecs import logging from fastapi import Request from colossalai.inference.core.async_engine import AsyncInferenceEngine from .utils import ChatCompletionResponseStreamChoice, ChatMessage, DeltaMessage, id_generator logger = logging.getLogger("colossalai-inference") class ChatServing: def __init__( self, engine: AsyncInferenceEngine, served_model: str, tokenizer, response_role: str, chat_template=None ): self.engine = engine self.served_model = served_model self.tokenizer = tokenizer self.response_role = response_role self._load_chat_template(chat_template) try: asyncio.get_running_loop() except RuntimeError: pass async def create_chat(self, request: Request, generation_config): request_dict = await request.json() messages = request_dict["messages"] stream = request_dict.pop("stream", "false").lower() add_generation_prompt = request_dict.pop("add_generation_prompt", False) request_id = id_generator() try: prompt = self.tokenizer.apply_chat_template( conversation=messages, tokenize=False, add_generation_prompt=add_generation_prompt, ) except Exception as e: raise RuntimeError(f"Error in applying chat template from request: {str(e)}") # it is not a intuitive way self.engine.engine.generation_config = generation_config result_generator = self.engine.generate(request_id, prompt=prompt) if stream == "true": return self.chat_completion_stream_generator(request, request_dict, result_generator, request_id) else: return await self.chat_completion_full_generator(request, request_dict, result_generator, request_id) async def chat_completion_stream_generator(self, request, request_dict, result_generator, request_id: int): # Send first response for each request.n (index) with the role role = self.get_chat_request_role(request, request_dict) n = request_dict.get("n", 1) echo = request_dict.get("echo", "false").lower() for i in range(n): choice_data = ChatCompletionResponseStreamChoice(index=i, message=DeltaMessage(role=role)) data = choice_data.model_dump_json(exclude_unset=True) yield f"data: {data}\n\n" # Send response to echo the input portion of the last message if echo == "true": last_msg_content = "" if ( request_dict["messages"] and isinstance(request_dict["messages"], list) and request_dict["messages"][-1].get("content") and request_dict["messages"][-1].get("role") == role ): last_msg_content = request_dict["messages"][-1]["content"] if last_msg_content: for i in range(n): choice_data = ChatCompletionResponseStreamChoice( index=i, message=DeltaMessage(content=last_msg_content) ) data = choice_data.model_dump_json(exclude_unset=True) yield f"data: {data}\n\n" result = await result_generator choice_data = DeltaMessage(content=result.output) data = choice_data.model_dump_json(exclude_unset=True, exclude_none=True) yield f"data: {data}\n\n" # Send the final done message after all response.n are finished yield "data: [DONE]\n\n" async def chat_completion_full_generator( self, request: Request, request_dict: dict, result_generator, request_id, ): if await request.is_disconnected(): # Abort the request if the client disconnects. await self.engine.abort(request_id) return {"error_msg": "Client disconnected"} result = await result_generator assert result is not None role = self.get_chat_request_role(request, request_dict) choice_data = ChatMessage(role=role, content=result.output) echo = request_dict.get("echo", "false").lower() if echo == "true": last_msg_content = "" if ( request.messages and isinstance(request.messages, list) and request.messages[-1].get("content") and request.messages[-1].get("role") == role ): last_msg_content = request.messages[-1]["content"] full_message = last_msg_content + choice_data.content choice_data.content = full_message return choice_data def get_chat_request_role(self, request: Request, request_dict: dict) -> str: add_generation_prompt = request_dict.get("add_generation_prompt", False) if add_generation_prompt: return self.response_role else: return request_dict["messages"][-1]["role"] def _load_chat_template(self, chat_template): if chat_template is not None: try: with open(chat_template, "r") as f: self.tokenizer.chat_template = f.read() except OSError: # If opening a file fails, set chat template to be args to # ensure we decode so our escape are interpreted correctly self.tokenizer.chat_template = codecs.decode(chat_template, "unicode_escape") logger.info(f"Using supplied chat template:\n{self.tokenizer.chat_template}") elif self.tokenizer.chat_template is not None: logger.info(f"Using default chat template:\n{self.tokenizer.chat_template}") else: logger.warning("No chat template provided. Chat API will not work.")
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/server/utils.py
colossalai/inference/server/utils.py
from typing import Any, Optional from pydantic import BaseModel # make it singleton class NumericIDGenerator: _instance = None def __new__(cls): if cls._instance is None: cls._instance = super(NumericIDGenerator, cls).__new__(cls) cls._instance.current_id = 0 return cls._instance def __call__(self): self.current_id += 1 return self.current_id id_generator = NumericIDGenerator() class ChatMessage(BaseModel): role: str content: Any class DeltaMessage(BaseModel): role: Optional[str] = None content: Optional[Any] = None class ChatCompletionResponseStreamChoice(BaseModel): index: int message: DeltaMessage
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/server/__init__.py
colossalai/inference/server/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/inference/server/api_server.py
colossalai/inference/server/api_server.py
""" Doc: Feature: - FastAPI based http server for Colossal-Inference - Completion Service Supported Usage: (for local user) - First, Lauch an API locally. `python3 -m colossalai.inference.server.api_server --model path of your llama2 model` - Second, you can turn to the page `http://127.0.0.1:8000/docs` to check the api - For completion service, you can invoke it by using `curl -X POST http://127.0.0.1:8000/completion \ -H 'Content-Type: application/json' \ -d '{"prompt":"hello, who are you? ","stream":"False"}'` Version: V1.0 """ import argparse import json import uvicorn from fastapi import FastAPI, Request from fastapi.responses import JSONResponse, Response, StreamingResponse from transformers import AutoModelForCausalLM, AutoTokenizer import colossalai from colossalai.inference.config import InferenceConfig from colossalai.inference.server.chat_service import ChatServing from colossalai.inference.server.completion_service import CompletionServing from colossalai.inference.server.utils import id_generator from colossalai.inference.utils import find_available_ports from colossalai.inference.core.async_engine import AsyncInferenceEngine, InferenceEngine # noqa TIMEOUT_KEEP_ALIVE = 5 # seconds. prompt_template_choices = ["llama", "vicuna"] async_engine = None chat_serving = None completion_serving = None app = FastAPI() @app.get("/ping") def health_check() -> JSONResponse: """Health Check for server.""" return JSONResponse({"status": "Healthy"}) @app.get("/engine_check") def engine_check() -> bool: """Check if the background loop is running.""" loop_status = async_engine.background_loop_status if loop_status == False: return JSONResponse({"status": "Error"}) return JSONResponse({"status": "Running"}) @app.post("/generate") async def generate(request: Request) -> Response: """Generate completion for the request. NOTE: THIS API IS USED ONLY FOR TESTING, DO NOT USE THIS IF YOU ARE IN ACTUAL APPLICATION. A request should be a JSON object with the following fields: - prompts: the prompts to use for the generation. - stream: whether to stream the results or not. - other fields: """ request_dict = await request.json() prompt = request_dict.pop("prompt") stream = request_dict.pop("stream", "false") if isinstance(stream, str): stream = stream.lower() request_id = id_generator() generation_config = get_generation_config(request_dict) results = engine.generate(request_id, prompt, generation_config=generation_config) # Streaming case def stream_results(): for request_output in results: ret = {"text": request_output[len(prompt) :]} yield (json.dumps(ret) + "\0").encode("utf-8") if stream == "true" or stream == True: return StreamingResponse(stream_results()) # Non-streaming case final_output = None for request_output in results: if request.is_disconnected(): # Abort the request if the client disconnects. engine.abort(request_id) return Response(status_code=499) final_output = request_output[len(prompt) :] assert final_output is not None ret = {"text": final_output} return JSONResponse(ret) @app.post("/completion") async def create_completion(request: Request): request_dict = await request.json() stream = request_dict.pop("stream", "false") if isinstance(stream, str): stream = stream.lower() generation_config = get_generation_config(request_dict) result = await completion_serving.create_completion(request, generation_config) ret = {"request_id": result.request_id, "text": result.output} if stream == "true" or stream == True: return StreamingResponse(content=json.dumps(ret) + "\0", media_type="text/event-stream") else: return JSONResponse(content=ret) @app.post("/chat") async def create_chat(request: Request): request_dict = await request.json() stream = request_dict.get("stream", "false") if isinstance(stream, str): stream = stream.lower() generation_config = get_generation_config(request_dict) message = await chat_serving.create_chat(request, generation_config) if stream == "true" or stream == True: return StreamingResponse(content=message, media_type="text/event-stream") else: ret = {"role": message.role, "text": message.content} return ret def get_generation_config(request): generation_config = async_engine.engine.generation_config for arg in request: if hasattr(generation_config, arg): setattr(generation_config, arg, request[arg]) return generation_config def add_engine_config(parser): parser.add_argument( "-m", "--model", type=str, default="llama2-7b", help="name or path of the huggingface model to use" ) # Parallel arguments not supported now # KV cache arguments parser.add_argument("--block_size", type=int, default=16, choices=[16, 32], help="token block size") parser.add_argument("--max_batch_size", type=int, default=8, help="maximum number of batch size") parser.add_argument("-i", "--max_input_len", type=int, default=128, help="max input length") parser.add_argument("-o", "--max_output_len", type=int, default=128, help="max output length") parser.add_argument("-d", "--dtype", type=str, default="fp16", help="Data type", choices=["fp16", "fp32", "bf16"]) parser.add_argument("--use_cuda_kernel", action="store_true", help="Use CUDA kernel, use Triton by default") # generation arguments parser.add_argument( "--prompt_template", choices=prompt_template_choices, default=None, help=f"Allowed choices are {','.join(prompt_template_choices)}. Default to None.", ) return parser def parse_args(): parser = argparse.ArgumentParser(description="Colossal-Inference API server.") parser.add_argument("--host", type=str, default="127.0.0.1") parser.add_argument("--port", type=int, default=8000, help="port of FastAPI server.") parser.add_argument("--ssl-keyfile", type=str, default=None) parser.add_argument("--ssl-certfile", type=str, default=None) parser.add_argument( "--root-path", type=str, default=None, help="FastAPI root_path when app is behind a path based routing proxy" ) parser.add_argument( "--model-name", type=str, default=None, help="The model name used in the API. If not " "specified, the model name will be the same as " "the huggingface name.", ) parser.add_argument( "--chat-template", type=str, default=None, help="The file path to the chat template, " "or the template in single-line form " "for the specified model", ) parser.add_argument( "--response-role", type=str, default="assistant", help="The role name to return if " "`request.add_generation_prompt=true`.", ) parser = add_engine_config(parser) return parser.parse_args() if __name__ == "__main__": args = parse_args() inference_config = InferenceConfig.from_dict(vars(args)) tokenizer = AutoTokenizer.from_pretrained(args.model) colossalai_backend_port = find_available_ports(1)[0] colossalai.launch( rank=0, world_size=1, host=args.host, port=colossalai_backend_port, backend="nccl", ) model = AutoModelForCausalLM.from_pretrained(args.model) async_engine = AsyncInferenceEngine( start_engine_loop=True, model_or_path=model, tokenizer=tokenizer, inference_config=inference_config ) engine = async_engine.engine completion_serving = CompletionServing(async_engine, model.__class__.__name__) chat_serving = ChatServing( async_engine, served_model=model.__class__.__name__, tokenizer=tokenizer, response_role=args.response_role, chat_template=args.chat_template, ) app.root_path = args.root_path uvicorn.run( app=app, host=args.host, port=args.port, log_level="debug", timeout_keep_alive=TIMEOUT_KEEP_ALIVE, ssl_keyfile=args.ssl_keyfile, ssl_certfile=args.ssl_certfile, )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/__init__.py
colossalai/shardformer/__init__.py
from .shard import GradientCheckpointConfig, ModelSharder, PipelineGradientCheckpointConfig, ShardConfig, ShardFormer
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/_utils.py
colossalai/shardformer/_utils.py
import re def get_obj_list_element(obj, attr: str): r""" Get the element of the list in the object If the attr is a normal attribute, return the attribute of the object. If the attr is a index type, return the element of the index in the list, like `layers[0]`. Args: obj (Object): The object to get attr (str): The suffix of the attribute to get """ re_pattern = r"\[\d+\]" prog = re.compile(re_pattern) result = prog.search(attr) if result: matched_brackets = result.group() matched_index = matched_brackets.replace("[", "") matched_index = matched_index.replace("]", "") attr_ = attr.replace(matched_brackets, "") container_obj = getattr(obj, attr_) obj = container_obj[int(matched_index)] else: obj = getattr(obj, attr) return obj def set_obj_list_element(obj, attr: str, value): r""" Set the element to value of a list object It used like set_obj_list_element(obj, 'layers[0]', new_layer), it will set obj.layers[0] to value Args: obj (object): The object to set attr (str): the string including a list index like `layers[0]` """ re_pattern = r"\[\d+\]" prog = re.compile(re_pattern) result = prog.search(attr) if result: matched_brackets = result.group() matched_index = matched_brackets.replace("[", "") matched_index = matched_index.replace("]", "") attr_ = attr.replace(matched_brackets, "") container_obj = getattr(obj, attr_) container_obj[int(matched_index)] = value else: setattr(obj, attr, value) def hasattr_(obj, attr: str): r""" Check whether the object has the multi sublevel attr Args: obj (object): The object to check attr (str): The multi level attr to check """ attrs = attr.split(".") for a in attrs: try: obj = get_obj_list_element(obj, a) except AttributeError: return False return True def setattr_(obj, attr: str, value, ignore: bool = False): r""" Set the object's multi sublevel attr to value, if ignore, ignore when it doesn't exist Args: obj (object): The object to set attr (str): The multi level attr to set value (Any): The value to set ignore (bool): Whether to ignore when the attr doesn't exist """ attrs = attr.split(".") for a in attrs[:-1]: try: obj = get_obj_list_element(obj, a) except AttributeError: if ignore: return raise AttributeError(f"Object {obj.__class__.__name__} has no attribute {attr}") set_obj_list_element(obj, attrs[-1], value) def getattr_(obj, attr: str, ignore: bool = False): r""" Get the object's multi sublevel attr Args: obj (object): The object to set attr (str): The multi level attr to set ignore (bool): Whether to ignore when the attr doesn't exist """ attrs = attr.split(".") for a in attrs: try: obj = get_obj_list_element(obj, a) except AttributeError: if ignore: return None raise AttributeError(f"Object {obj.__class__.__name__} has no attribute {attr}") return obj
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/opt.py
colossalai/shardformer/policies/opt.py
import warnings from functools import partial from typing import Callable, Dict, List import torch.nn as nn from torch import Tensor, nn from colossalai.shardformer.layer import ( FusedLayerNorm, LayerNorm, Linear1D_Col, Linear1D_Row, LinearWithGradAccum, PaddingEmbedding, PaddingLMHead, VocabParallelEmbedding1D, VocabParallelLMHead1D, ) from .._utils import getattr_ from ..modeling.jit import get_jit_fused_dropout_add_func from ..modeling.opt import ( OPTPipelineForwards, get_jit_fused_opt_decoder_layer_forward, get_lm_forward_with_dist_cross_entropy, get_opt_decoder_forward_for_flash_attention, get_opt_flash_attention_forward, ) from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = [ "OPTPolicy", "OPTModelPolicy", "OPTForCausalLMPolicy", "OPTForSequenceClassificationPolicy", "OPTForQuestionAnsweringPolicy", ] class OPTPolicy(Policy): def __init__(self) -> None: super().__init__() def config_sanity_check(self): pass def preprocess(self): self.tie_weight = self.tie_weight_check() self.origin_attn_implement = self.model.config._attn_implementation return self.model def module_policy(self): from transformers.models.opt.modeling_opt import OPTAttention, OPTDecoder, OPTDecoderLayer, OptFlashAttention2 ATTN_IMPLEMENTATION = { "eager": OPTAttention, "flash_attention_2": OptFlashAttention2, } policy = {} attn_cls = ATTN_IMPLEMENTATION[self.model.config._attn_implementation] embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = PaddingEmbedding if self.shard_config.enable_fused_normalization: norm_cls = FusedLayerNorm else: norm_cls = LayerNorm if self.shard_config.enable_sequence_parallelism: self.shard_config.enable_sequence_parallelism = False warnings.warn("OPT doesn't support sequence parallelism now, will ignore the sequence parallelism flag.") use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: assert ( self.model.config.num_attention_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." policy[OPTDecoderLayer] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="fc1", target_module=Linear1D_Col, kwargs=dict( use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="fc2", target_module=Linear1D_Row, kwargs=dict( use_zbv=use_zbv, ), ), ] ) policy[attn_cls] = ModulePolicyDescription( attribute_replacement={ "embed_dim": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "num_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="q_proj", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="k_proj", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="v_proj", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="out_proj", target_module=Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) elif use_zbv: policy[OPTDecoderLayer] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="fc1", target_module=LinearWithGradAccum, kwargs=dict( use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="fc2", target_module=LinearWithGradAccum, kwargs=dict( use_zbv=use_zbv, ), ), ] ) policy[attn_cls] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="q_proj", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="k_proj", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="v_proj", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="out_proj", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="embed_tokens", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), policy=policy, target_key=OPTDecoder, ) # optimization configuration self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="final_layer_norm", target_module=norm_cls, ignore_if_not_exist=True, ), policy=policy, target_key=OPTDecoder, ) self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="self_attn_layer_norm", target_module=norm_cls, ignore_if_not_exist=True, ), SubModuleReplacementDescription( suffix="final_layer_norm", target_module=norm_cls, ignore_if_not_exist=True, ), ], policy=policy, target_key=OPTDecoderLayer, ) # use flash attention if self.shard_config.enable_flash_attention: self.append_or_create_method_replacement( description={ "forward": get_opt_flash_attention_forward(self.shard_config), }, policy=policy, target_key=attn_cls, ) if not self.shard_config.pipeline_stage_manager: self.append_or_create_method_replacement( description={ "forward": get_opt_decoder_forward_for_flash_attention(self.shard_config), }, policy=policy, target_key=OPTDecoder, ) # use jit fused operator if self.shard_config.enable_jit_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_opt_decoder_layer_forward(), "dropout_add": get_jit_fused_dropout_add_func(), }, policy=policy, target_key=OPTDecoderLayer, ) return policy def postprocess(self): return self.model def get_held_layers(self) -> List[nn.Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None if self.model.__class__.__name__ == "OPTModel": module = self.model.decoder else: module = self.model.model.decoder stage_manager = self.pipeline_stage_manager held_layers = [] layers_per_stage = stage_manager.distribute_layers(len(module.layers)) if stage_manager.is_interleave: assert stage_manager.num_model_chunks is not None if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.embed_tokens) held_layers.append(module.embed_positions) held_layers.append(module.project_in) stage_indices = stage_manager.get_stage_index(layers_per_stage) for start_idx, end_idx in stage_indices: held_layers.extend(module.layers[start_idx:end_idx]) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(module.final_layer_norm) held_layers.append(module.project_out) else: if stage_manager.is_first_stage(): held_layers.append(module.embed_tokens) held_layers.append(module.embed_positions) held_layers.append(module.project_in) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.layers[start_idx:end_idx]) if stage_manager.is_last_stage(): held_layers.append(module.final_layer_norm) held_layers.append(module.project_out) return held_layers def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if self.pipeline_stage_manager: stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "OPTModel": module = self.model.decoder else: module = self.model.model.decoder layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, stage_index=stage_index, shard_config=self.shard_config, ) } self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=model_cls ) class OPTModelPolicy(OPTPolicy): def module_policy(self): from transformers.models.opt.modeling_opt import OPTModel policy = super().module_policy() if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=OPTModel, new_forward=OPTPipelineForwards.opt_model_forward, policy=policy, ) return policy def get_held_layers(self) -> List[nn.Module]: return super().get_held_layers() def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in OPTModel.""" return [] class OPTForCausalLMPolicy(OPTPolicy): def module_policy(self): from transformers.models.opt.modeling_opt import OPTForCausalLM policy = super().module_policy() if self.shard_config.enable_tensor_parallelism: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="lm_head", target_module=VocabParallelLMHead1D, kwargs=dict( gather_output=not self.shard_config.parallel_output, make_vocab_size_divisible_by=self.shard_config.make_vocab_size_divisible_by, fp8_communication=self.shard_config.fp8_communication, ), ), policy=policy, target_key=OPTForCausalLM, ) if self.shard_config.parallel_output: method_replacement = {"forward": get_lm_forward_with_dist_cross_entropy(self.shard_config)} self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=OPTForCausalLM ) else: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="lm_head", target_module=PaddingLMHead, kwargs=dict(make_vocab_size_divisible_by=self.shard_config.make_vocab_size_divisible_by), ), policy=policy, target_key=OPTForCausalLM, ) if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=OPTForCausalLM, new_forward=OPTPipelineForwards.opt_for_causal_lm_forward, policy=policy, ) return policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.lm_head) else: if self.pipeline_stage_manager.is_last_stage(): held_layers.append(self.model.lm_head) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: opt_model = self.model if self.pipeline_stage_manager and self.pipeline_stage_manager.num_stages > 1: num_stages = self.pipeline_stage_manager.num_stages if id(opt_model.model.decoder.embed_tokens.weight) == id(opt_model.lm_head.weight): return [ { 0: opt_model.model.decoder.embed_tokens.weight, num_stages - 1: opt_model.lm_head.weight, } ] return [] def postprocess(self): if self.shard_config.enable_tensor_parallelism and self.pipeline_stage_manager is None: binding_map = { "model.decoder.embed_tokens": "lm_head", } for k, v in binding_map.items(): src_mod = getattr_(self.model, k) dst_mod = getattr_(self.model, v) dst_mod.weight = src_mod.weight return self.model class OPTForSequenceClassificationPolicy(OPTPolicy): def module_policy(self): from transformers.models.opt.modeling_opt import OPTForSequenceClassification policy = super().module_policy() if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=OPTForSequenceClassification, new_forward=OPTPipelineForwards.opt_for_sequence_classification_forward, policy=policy, ) return policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() if self.pipeline_stage_manager.is_last_stage(): held_layers.append(self.model.score) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: "no shared params in OPTForSequenceClassification" return [] class OPTForQuestionAnsweringPolicy(OPTPolicy): def module_policy(self): from transformers.models.opt.modeling_opt import OPTForQuestionAnswering policy = super().module_policy() if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=OPTForQuestionAnswering, new_forward=OPTPipelineForwards.opt_for_question_answering_forward, policy=policy, ) return policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.qa_outputs) else: if self.pipeline_stage_manager.is_last_stage(): held_layers.append(self.model.qa_outputs) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: "no shared params in OPTForSequenceClassification" return []
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/qwen2.py
colossalai/shardformer/policies/qwen2.py
from functools import partial from typing import Callable, Dict, List, Union import torch.nn as nn from torch import Tensor from torch.nn import Module from transformers.models.qwen2.modeling_qwen2 import ( Qwen2Attention, Qwen2DecoderLayer, Qwen2ForCausalLM, Qwen2ForSequenceClassification, Qwen2Model, ) from colossalai.shardformer.layer import ( FusedRMSNorm, Linear1D_Col, Linear1D_Row, LinearWithGradAccum, PaddingEmbedding, RMSNorm, VocabParallelEmbedding1D, VocabParallelLMHead1D, ) from ..modeling.qwen2 import ( Qwen2PipelineForwards, get_lm_forward_with_dist_cross_entropy, get_qwen2_flash_attention_forward, get_qwen2_model_forward_for_flash_attn, ) from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = ["Qwen2Policy", "Qwen2ForCausalLMPolicy", "Qwen2ForSequenceClassificationPolicy"] class Qwen2Policy(Policy): def __init__(self) -> None: super().__init__() import transformers from packaging.version import Version assert Version(transformers.__version__) >= Version( "4.39.1" ), "The Qwen2 model should run on a transformers version of 4.39.1." def config_sanity_check(self): pass def preprocess(self): self.tie_weight = self.tie_weight_check() self.origin_attn_implement = self.model.config._attn_implementation return self.model def module_policy(self) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: policy = {} embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = PaddingEmbedding norm_cls = FusedRMSNorm if self.shard_config.enable_fused_normalization else RMSNorm sp_mode = self.shard_config.sequence_parallelism_mode or None sp_size = self.shard_config.sequence_parallel_size or None sp_group = self.shard_config.sequence_parallel_process_group or None sp_partial_derived = sp_mode in ["split_gather", "ring"] if sp_mode == "all_to_all": decoder_attribute_replacement = { "num_heads": self.model.config.num_attention_heads // sp_size, } if getattr(self.model.config, "num_key_value_heads", False): decoder_attribute_replacement["num_key_value_heads"] = self.model.config.num_key_value_heads // sp_size policy[Qwen2Attention] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, ) use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: assert ( self.model.config.num_attention_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." if hasattr(self.model.config, "num_key_value_heads"): assert ( self.model.config.num_key_value_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of key_value heads must be divisible by tensor parallel size." decoder_attribute_replacement = { "self_attn.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "self_attn.num_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, } if getattr(self.model.config, "num_key_value_heads", False): decoder_attribute_replacement["self_attn.num_key_value_heads"] = ( self.model.config.num_key_value_heads // self.shard_config.tensor_parallel_size ) policy[Qwen2DecoderLayer] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=Linear1D_Row, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.gate_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.up_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.down_proj", target_module=Linear1D_Row, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), ], ) elif use_zbv: policy[Qwen2DecoderLayer] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.gate_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.up_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.down_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), ], ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="embed_tokens", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), policy=policy, target_key=Qwen2Model, ) # optimization configuration self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="input_layernorm", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), SubModuleReplacementDescription( suffix="post_attention_layernorm", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), ], policy=policy, target_key=Qwen2DecoderLayer, ) self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="norm", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), policy=policy, target_key=Qwen2Model, ) if self.shard_config.enable_flash_attention or self.shard_config.enable_sequence_parallelism: self.append_or_create_method_replacement( description={ "forward": get_qwen2_flash_attention_forward(self.shard_config, sp_mode, sp_size, sp_group), }, policy=policy, target_key=Qwen2Attention, ) if self.pipeline_stage_manager is None: # replace qwen2 model forward method self.append_or_create_method_replacement( description={ "forward": get_qwen2_model_forward_for_flash_attn( self.shard_config, sp_mode, sp_size, sp_group ), }, policy=policy, target_key=Qwen2Model, ) return policy def postprocess(self): return self.model def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if self.pipeline_stage_manager is None: return stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "Qwen2Model": module = self.model else: module = self.model.model if stage_manager.is_interleave: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_manager.stage_indices = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial(new_forward, stage_manager=stage_manager, shard_config=self.shard_config) } else: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, stage_index=stage_index, shard_config=self.shard_config ) } self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=model_cls ) self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=model_cls) def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None if self.model.__class__.__name__ == "Qwen2Model": module = self.model else: module = self.model.model stage_manager = self.pipeline_stage_manager held_layers = [] held_layers.append(module.rotary_emb) if stage_manager.is_interleave: assert stage_manager.num_model_chunks is not None layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_indices = stage_manager.get_stage_index(layers_per_stage) if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.embed_tokens) for start_idx, end_idx in stage_indices: held_layers.extend(module.layers[start_idx:end_idx]) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(module.norm) else: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) if stage_manager.is_first_stage(): held_layers.append(module.embed_tokens) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.layers[start_idx:end_idx]) if stage_manager.is_last_stage(): held_layers.append(module.norm) return held_layers class Qwen2ModelPolicy(Qwen2Policy): def module_policy(self): policy = super().module_policy() if self.pipeline_stage_manager: # set None as default self.set_pipeline_forward( model_cls=Qwen2Model, new_forward=Qwen2PipelineForwards.qwen2_model_forward, policy=policy ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" held_layers = super().get_held_layers() return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in Qwen2 model""" return [] class Qwen2ForCausalLMPolicy(Qwen2Policy): def module_policy(self): policy = super().module_policy() setattr(self.shard_config, "causal_lm", True) use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: # add a new item for casual lm new_item = { Qwen2ForCausalLM: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=VocabParallelLMHead1D, kwargs=dict( gather_output=not self.shard_config.parallel_output, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ) ], method_replacement={"forward": get_lm_forward_with_dist_cross_entropy(self.shard_config)}, ) } policy.update(new_item) elif use_zbv: # add a new item for casual lm new_item = { Qwen2ForCausalLM: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=LinearWithGradAccum, kwargs=dict(fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv), ), SubModuleReplacementDescription( suffix="lm_head", target_module=VocabParallelLMHead1D, kwargs={ "gather_output": not self.shard_config.parallel_output, "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, }, ), ], method_replacement={"forward": get_lm_forward_with_dist_cross_entropy(self.shard_config)}, ) } policy.update(new_item) if self.pipeline_stage_manager: # set None as default self.set_pipeline_forward( model_cls=Qwen2ForCausalLM, new_forward=Qwen2PipelineForwards.qwen2_for_causal_lm_forward, policy=policy ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.lm_head) else: if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.lm_head) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: qwen2_model = self.model.model if self.pipeline_stage_manager and self.pipeline_stage_manager.num_stages > 1: if ( id(qwen2_model.embed_tokens.weight) == id(self.model.lm_head.weight) and self.pipeline_stage_manager.num_stages > 1 ): # tie weights return [ { 0: qwen2_model.embed_tokens.weight, self.pipeline_stage_manager.num_stages - 1: self.model.lm_head.weight, } ] return [] class Qwen2ForSequenceClassificationPolicy(Qwen2Policy): def module_policy(self): policy = super().module_policy() use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: # add a new item for sequence classification new_item = { Qwen2ForSequenceClassification: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="score", target_module=Linear1D_Col, kwargs=dict( gather_output=True, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ) ] ) } policy.update(new_item) elif use_zbv: new_item = { Qwen2ForSequenceClassification: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="score", target_module=LinearWithGradAccum, kwargs=dict( gather_output=True, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ) ] ) } policy.update(new_item) # to be confirmed if self.pipeline_stage_manager: # set None as default self.set_pipeline_forward( model_cls=Qwen2ForSequenceClassification, new_forward=Qwen2PipelineForwards.qwen2_for_sequence_classification_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.score) else: if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.score) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in Qwen2 for sequence classification model""" return []
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/base_policy.py
colossalai/shardformer/policies/base_policy.py
# part of code modified from https://github.com/tunib-ai/parallelformers from abc import ABC, abstractmethod from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import torch.nn as nn from torch import Tensor from torch.nn import Module from colossalai.pipeline.stage_manager import PipelineStageManager from ..layer.normalization import BaseLayerNorm from ..layer.parallel_module import ParallelModule from ..shard.shard_config import ShardConfig __all__ = ["ParallelModule", "SubModuleReplacementDescription", "ModulePolicyDescription", "Policy"] @dataclass class SubModuleReplacementDescription: r""" Describe how a submodule will be replaced Args: suffix (str): used to get the submodule object target_module (ParallelModule): specifies the module class used to replace to submodule kwargs (Dict[str, Any]): the dictionary used to pass extra arguments to the `ParallelModule.from_native_module` method. ignore_if_not_exist (bool): if the submodule does not exist, ignore it or raise an exception """ suffix: str target_module: Union[ParallelModule, BaseLayerNorm] kwargs: Dict[str, Any] = None ignore_if_not_exist: bool = False @dataclass class ModulePolicyDescription: r""" Describe how the attributes and parameters will be transformed in a policy. Args: attribute_replacement (Dict[str, Any]): key is the attribute name, value is the attribute value after sharding param_replacement (List[Callable]): a list of functions to perform in-place param replacement. The function must receive only one arguments: module. One example is ```python def example_replace_weight(module: torch.nn.Module): weight = module.weight new_weight = shard_rowwise(weight, process_group) module.weight = torch.nn.Parameter(new_weight) ``` sub_module_replacement (List[SubModuleReplacementDescription]): each element in the list is a SubModuleReplacementDescription object which specifies the module to be replaced and the target module used to replacement. method_replace (Dict[str, Callable]): key is the method name, value is the method for replacement """ attribute_replacement: Dict[str, Any] = None param_replacement: List[Callable] = None sub_module_replacement: List[SubModuleReplacementDescription] = None method_replacement: Dict[str, Callable] = None class Policy(ABC): r""" The base class for all the policies. For each different model, it should have a different policy class, like BertPolicy for Bert Model or OPTPolicy for OPT model. Shardformer has provided many built-in sharding policies for the mainstream models. You can use the built-in policies by setting `policy = None`, which is already the default argument for `Shardformer.optimize`. If you want to define your own policy, you can inherit from this class and overwrite the methods you want to modify. """ def __init__(self) -> None: self.shard_config: Optional[ShardConfig] = None self.model: Optional[Module] = None self.is_causal = None # Whether we're doing causal lm, i.e. using cross entropy def set_model(self, model: nn.Module) -> None: r""" Set model as an attribute of the Policy object so that we can access the model's attributes. Args: model (:class:`nn.Module`): The model to be perform """ self.model = model def set_shard_config(self, shard_config: ShardConfig) -> None: r""" Set shard config as an attribute of the Policy object. Args: shard_config (:class:`ShardConfig`): The shard config to be perform """ self.shard_config = shard_config self.config_sanity_check() @property def pipeline_stage_manager(self) -> Optional[PipelineStageManager]: if self.shard_config is not None: return self.shard_config.pipeline_stage_manager return None @abstractmethod def config_sanity_check(self): """ Check if the shard config is valid for the model. Raise an exception if the config is invalid. This method is made abstractmethod with no default implementation because we want to the policy writer to take note of the feature supported by his/her model and policy. """ @abstractmethod def preprocess(self) -> nn.Module: r""" Perform some preprocessing of the model, like reshaping the embedding layer. """ @abstractmethod def module_policy(self) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: r""" This method returns the module policy, which is a dictionary. The key is the module name or the module object, and the value is the ModulePolicyDescription object. The ModulePolicyDescription object describes how the module will be transformed. """ @abstractmethod def postprocess(self) -> nn.Module: r""" Perform some postprocessing of the model, like binding the weight of embedding layer with the classifier layer """ def append_or_create_submodule_replacement( self, description: Union[SubModuleReplacementDescription, List[SubModuleReplacementDescription]], policy: Dict[Union[str, nn.Module], ModulePolicyDescription], target_key: Union[str, nn.Module], ) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: r""" Append or create a new submodule replacement description to the policy for the given key. Args: submodule_replace_desc (Union[SubModuleReplacementDescription, List[SubModuleReplacementDescription]]): the submodule replacement description to be appended policy (Dict[Union[str, nn.Module], ModulePolicyDescription]): the policy to be updated target_key (Union[str, nn.Module]): the key of the policy to be updated """ # convert to list if isinstance(description, SubModuleReplacementDescription): description = [description] # append or create a new description if target_key in policy: if policy[target_key].sub_module_replacement is None: policy[target_key].sub_module_replacement = description else: policy[target_key].sub_module_replacement.extend(description) else: policy[target_key] = ModulePolicyDescription(sub_module_replacement=description) return policy def append_or_create_method_replacement( self, description: Dict[str, Callable], policy: Dict[Union[str, nn.Module], ModulePolicyDescription], target_key: Union[str, nn.Module], ) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: r""" Append or create a new method replacement description to the policy for the given key. Args: description (Union[SubModuleReplacementDescription, List[SubModuleReplacementDescription]]): the submodule replacement description to be appended policy (Dict[Union[str, nn.Module], ModulePolicyDescription]): the policy to be updated target_key (Union[str, nn.Module]): the key of the policy to be updated """ if target_key in policy: if policy[target_key].method_replacement is None: policy[target_key].method_replacement = description else: policy[target_key].method_replacement.update(description) else: policy[target_key] = ModulePolicyDescription(method_replacement=description) return policy def get_held_layers(self) -> List[Module]: """Get layers that should be held in current stage. This method should be implemented by subclass. Returns: List[Module]: List of layers that should be hold in current stage """ raise NotImplementedError def get_shared_params(self) -> List[Dict[int, Tensor]]: """Get parameters that should be shared across stages. This method should be implemented by subclass. Returns: List[Dict[int, Tensor]]: List of parameters that should be shared across stages. E.g. [{0: module.model.embed_tokens.weight, 3: module.lm_head.weight}] """ return [] def tie_weight_check(self): input_embedding = self.model.get_input_embeddings() output_embedding = self.model.get_output_embeddings() return ( input_embedding is not None and output_embedding is not None and id(input_embedding.weight) == id(output_embedding.weight) )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/auto_policy.py
colossalai/shardformer/policies/auto_policy.py
import importlib from dataclasses import dataclass import torch.nn as nn from .base_policy import Policy __all__ = ["PolicyLocation", "get_autopolicy", "import_policy"] @dataclass class PolicyLocation: """ PolicyLocation describes the location of a policy class. Args: file_name (str): The file name of the policy under colossalai.shardformer.policies class_name (str): The class name of the policy class """ file_name: str class_name: str # we don't want to import all policies here # as each policy file imports its own model zoo library # we will allow the user to only import the policy file needed _POLICY_LIST = { # BERT "transformers.models.bert.modeling_bert.BertModel": PolicyLocation(file_name="bert", class_name="BertModelPolicy"), "transformers.models.bert.modeling_bert.BertForPreTraining": PolicyLocation( file_name="bert", class_name="BertForPreTrainingPolicy" ), "transformers.models.bert.modeling_bert.BertLMHeadModel": PolicyLocation( file_name="bert", class_name="BertLMHeadModelPolicy" ), "transformers.models.bert.modeling_bert.BertForMaskedLM": PolicyLocation( file_name="bert", class_name="BertForMaskedLMPolicy" ), "transformers.models.bert.modeling_bert.BertForSequenceClassification": PolicyLocation( file_name="bert", class_name="BertForSequenceClassificationPolicy" ), "transformers.models.bert.modeling_bert.BertForTokenClassification": PolicyLocation( file_name="bert", class_name="BertForTokenClassificationPolicy" ), "transformers.models.bert.modeling_bert.BertForNextSentencePrediction": PolicyLocation( file_name="bert", class_name="BertForNextSentencePredictionPolicy" ), "transformers.models.bert.modeling_bert.BertForMultipleChoice": PolicyLocation( file_name="bert", class_name="BertForMultipleChoicePolicy" ), "transformers.models.bert.modeling_bert.BertForQuestionAnswering": PolicyLocation( file_name="bert", class_name="BertForQuestionAnsweringPolicy" ), # LLaMA "transformers.models.llama.modeling_llama.LlamaModel": PolicyLocation( file_name="llama", class_name="LlamaModelPolicy" ), "transformers.models.llama.modeling_llama.LlamaForCausalLM": PolicyLocation( file_name="llama", class_name="LlamaForCausalLMPolicy" ), "transformers.models.llama.modeling_llama.LlamaForSequenceClassification": PolicyLocation( file_name="llama", class_name="LlamaForSequenceClassificationPolicy" ), # T5 "transformers.models.t5.modeling_t5.T5Model": PolicyLocation(file_name="t5", class_name="T5ModelPolicy"), "transformers.models.t5.modeling_t5.T5ForConditionalGeneration": PolicyLocation( file_name="t5", class_name="T5ForConditionalGenerationPolicy" ), "transformers.models.t5.modeling_t5.T5EncoderModel": PolicyLocation(file_name="t5", class_name="T5EncoderPolicy"), "transformers.models.t5.modeling_t5.T5ForTokenClassification": PolicyLocation( file_name="t5", class_name="T5ForTokenClassificationPolicy" ), # GPT2 "transformers.models.gpt2.modeling_gpt2.GPT2Model": PolicyLocation(file_name="gpt2", class_name="GPT2ModelPolicy"), "transformers.models.gpt2.modeling_gpt2.GPT2LMHeadModel": PolicyLocation( file_name="gpt2", class_name="GPT2LMHeadModelPolicy" ), "transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModel": PolicyLocation( file_name="gpt2", class_name="GPT2DoubleHeadsModelPolicy" ), "transformers.models.gpt2.modeling_gpt2.GPT2ForQuestionAnswering": PolicyLocation( file_name="gpt2", class_name="GPT2ForQuestionAnsweringPolicy" ), "transformers.models.gpt2.modeling_gpt2.GPT2ForTokenClassification": PolicyLocation( file_name="gpt2", class_name="GPT2ForTokenClassificationPolicy" ), "transformers.models.gpt2.modeling_gpt2.GPT2ForSequenceClassification": PolicyLocation( file_name="gpt2", class_name="GPT2ForSequenceClassificationPolicy" ), # GPTJ "transformers.models.gptj.modeling_gptj.GPTJModel": PolicyLocation(file_name="gptj", class_name="GPTJModelPolicy"), "transformers.models.gptj.modeling_gptj.GPTJForCausalLM": PolicyLocation( file_name="gptj", class_name="GPTJForCausalLMPolicy" ), "transformers.models.gptj.modeling_gptj.GPTJForQuestionAnswering": PolicyLocation( file_name="gptj", class_name="GPTJForQuestionAnsweringPolicy" ), "transformers.models.gptj.modeling_gptj.GPTJForSequenceClassification": PolicyLocation( file_name="gptj", class_name="GPTJForSequenceClassificationPolicy" ), # ViT "transformers.models.vit.modeling_vit.ViTModel": PolicyLocation(file_name="vit", class_name="ViTModelPolicy"), "transformers.models.vit.modeling_vit.ViTForImageClassification": PolicyLocation( file_name="vit", class_name="ViTForImageClassificationPolicy" ), "transformers.models.vit.modeling_vit.ViTForMaskedImageModeling": PolicyLocation( file_name="vit", class_name="ViTForMaskedImageModelingPolicy" ), # OPT "transformers.models.opt.modeling_opt.OPTModel": PolicyLocation(file_name="opt", class_name="OPTModelPolicy"), "transformers.models.opt.modeling_opt.OPTForCausalLM": PolicyLocation( file_name="opt", class_name="OPTForCausalLMPolicy" ), "transformers.models.opt.modeling_opt.OPTForSequenceClassification": PolicyLocation( file_name="opt", class_name="OPTForSequenceClassificationPolicy" ), "transformers.models.opt.modeling_opt.OPTForQuestionAnswering": PolicyLocation( file_name="opt", class_name="OPTForQuestionAnsweringPolicy" ), # Bloom "transformers.models.bloom.modeling_bloom.BloomModel": PolicyLocation( file_name="bloom", class_name="BloomModelPolicy" ), "transformers.models.bloom.modeling_bloom.BloomForCausalLM": PolicyLocation( file_name="bloom", class_name="BloomForCausalLMPolicy" ), "transformers.models.bloom.modeling_bloom.BloomForSequenceClassification": PolicyLocation( file_name="bloom", class_name="BloomForSequenceClassificationPolicy" ), "transformers.models.bloom.modeling_bloom.BloomForTokenClassification": PolicyLocation( file_name="bloom", class_name="BloomForTokenClassificationPolicy" ), "transformers.models.bloom.modeling_bloom.BloomForQuestionAnswering": PolicyLocation( file_name="bloom", class_name="BloomForQuestionAnsweringPolicy" ), # Whisper "transformers.models.whisper.modeling_whisper.WhisperModel": PolicyLocation( file_name="whisper", class_name="WhisperModelPolicy" ), "transformers.models.whisper.modeling_whisper.WhisperForConditionalGeneration": PolicyLocation( file_name="whisper", class_name="WhisperForConditionalGenerationPolicy" ), "transformers.models.whisper.modeling_whisper.WhisperForAudioClassification": PolicyLocation( file_name="whisper", class_name="WhisperForAudioClassificationPolicy" ), # Sam "transformers.models.sam.modeling_sam.SamModel": PolicyLocation(file_name="sam", class_name="SamModelPolicy"), # Blip2 "transformers.models.blip_2.modeling_blip_2.Blip2Model": PolicyLocation( file_name="blip2", class_name="Blip2ModelPolicy" ), "transformers.models.blip_2.modeling_blip_2.Blip2ForConditionalGeneration": PolicyLocation( file_name="blip2", class_name="Blip2ForConditionalGenerationPolicy" ), # ChatGLM "transformers_modules.modeling_chatglm.ChatGLMModel": PolicyLocation( file_name="chatglm2", class_name="ChatGLMModelPolicy" ), "transformers_modules.modeling_chatglm.ChatGLMForConditionalGeneration": PolicyLocation( file_name="chatglm2", class_name="ChatGLMForConditionalGenerationPolicy" ), # Deepseek "transformers_modules.modeling_deepseek.DeepseekModel": PolicyLocation( file_name="deepseek", class_name="DeepseekModelPolicy" ), "transformers_modules.modeling_deepseek.DeepseekForCausalLM": PolicyLocation( file_name="deepseek", class_name="DeepseekForCausalLMPolicy" ), # DeepseekV3 "transformers_modules.modeling_deepseek.DeepseekV3Model": PolicyLocation( file_name="deepseek_v3", class_name="DeepseekV3ModelPolicy" ), "transformers_modules.modeling_deepseek.DeepseekV3ForCausalLM": PolicyLocation( file_name="deepseek_v3", class_name="DeepseekV3ForCausalLMPolicy" ), # Falcon "transformers.models.falcon.modeling_falcon.FalconModel": PolicyLocation( file_name="falcon", class_name="FalconModelPolicy" ), "transformers.models.falcon.modeling_falcon.FalconForCausalLM": PolicyLocation( file_name="falcon", class_name="FalconForCausalLMPolicy" ), "transformers.models.falcon.modeling_falcon.FalconForSequenceClassification": PolicyLocation( file_name="falcon", class_name="FalconForSequenceClassificationPolicy" ), "transformers.models.falcon.modeling_falcon.FalconForTokenClassification": PolicyLocation( file_name="falcon", class_name="FalconForTokenClassificationPolicy" ), "transformers.models.falcon.modeling_falcon.FalconForQuestionAnswering": PolicyLocation( file_name="falcon", class_name="FalconForQuestionAnsweringPolicy" ), # mistral "transformers.models.mistral.modeling_mistral.MistralModel": PolicyLocation( file_name="mistral", class_name="MistralModelPolicy" ), "transformers.models.mistral.modeling_mistral.MistralForCausalLM": PolicyLocation( file_name="mistral", class_name="MistralForCausalLMPolicy" ), "transformers.models.mistral.modeling_mistral.MistralForSequenceClassification": PolicyLocation( file_name="mistral", class_name="MistralForSequenceClassificationPolicy" ), # mixtral "transformers.models.mixtral.modeling_mixtral.MixtralModel": PolicyLocation( file_name="mixtral", class_name="MixtralModelPolicy" ), "transformers.models.mixtral.modeling_mixtral.MixtralForCausalLM": PolicyLocation( file_name="mixtral", class_name="MixtralForCausalLMPolicy" ), "transformers.models.mixtral.modeling_mixtral.MixtralForSequenceClassification": PolicyLocation( file_name="mixtral", class_name="MixtralForSequenceClassificationPolicy" ), # Qwen2 "transformers.models.qwen2.modeling_qwen2.Qwen2Model": PolicyLocation( file_name="qwen2", class_name="Qwen2ModelPolicy" ), "transformers.models.qwen2.modeling_qwen2.Qwen2ForCausalLM": PolicyLocation( file_name="qwen2", class_name="Qwen2ForCausalLMPolicy" ), "transformers.models.qwen2.modeling_qwen2.Qwen2ForSequenceClassification": PolicyLocation( file_name="qwen2", class_name="Qwen2ForSequenceClassificationPolicy" ), # Qwen3 "transformers.models.qwen3.modeling_qwen3.Qwen3Model": PolicyLocation( file_name="qwen3", class_name="Qwen3ModelPolicy" ), "transformers.models.qwen3.modeling_qwen3.Qwen3ForCausalLM": PolicyLocation( file_name="qwen3", class_name="Qwen3ForCausalLMPolicy" ), "transformers.models.qwen3.modeling_qwen3.Qwen3ForSequenceClassification": PolicyLocation( file_name="qwen3", class_name="Qwen3ForSequenceClassificationPolicy" ), # command "transformers.models.cohere.modeling_cohere.CohereModel": PolicyLocation( file_name="command", class_name="CommandModelPolicy" ), "transformers.models.cohere.modeling_cohere.CohereForCausalLM": PolicyLocation( file_name="command", class_name="CommandForCausalLMPolicy" ), } def import_policy(policy_location: PolicyLocation) -> Policy: """ Dynamically import a Policy class based on the policy location. """ module_name = f"colossalai.shardformer.policies.{policy_location.file_name}" module = importlib.import_module(module_name) return getattr(module, policy_location.class_name) def _fullname(obj): """ Return the full name of an object, including the module name. """ klass = obj.__class__ module = klass.__module__ if module == "builtins": return klass.__qualname__ # avoid outputs like 'builtins.str' # patch custom models which are not in transformers # it can be like 'transformers_modules.THUDM.chatglm3-6b.103caa40027ebfd8450289ca2f278eac4ff26405.modeling_chatglm' (from huggingface hub) # or like 'transformers_modules.chatglm.modeling_chatglm' (from local directory) if module.startswith("peft"): klass = obj.base_model.model.__class__ module = klass.__module__ if module.startswith("transformers_modules"): split_module = module.split(".") if len(split_module) >= 2: module = f"{split_module[0]}.{split_module[-1]}" return module + "." + klass.__qualname__ def get_autopolicy(model: nn.Module) -> Policy: r""" Return the auto policy for the model Args: model (:class:`nn.Module`): The model to get the auto policy Return: :class:`Policy`: The auto policy for the model """ full_name = _fullname(model) policy_location = _POLICY_LIST.get(full_name, None) if policy_location is None: raise NotImplementedError( f"Auto policy for {model.__class__.__qualname__} ({full_name}) is not implemented\n. Supported models are {list(_POLICY_LIST.keys())}" ) else: policy = import_policy(policy_location) return policy()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/deepseek.py
colossalai/shardformer/policies/deepseek.py
from functools import partial from typing import Callable, Dict, List, Union import torch.nn as nn from torch import Tensor from torch.nn import Module from transformers.utils import is_flash_attn_greater_or_equal_2_10 from colossalai.shardformer.layer import FusedRMSNorm, Linear1D_Col, LinearWithGradAccum from colossalai.shardformer.layer.embedding import PaddingEmbedding, VocabParallelEmbedding1D from colossalai.shardformer.layer.linear import Linear1D_Row from colossalai.shardformer.modeling.deepseek import ( DeepseekMoEGate_Col, DeepseekPipelineForwards, EPDeepseekMoE, get_deepseek_flash_attention_forward, get_deepseek_flash_attention_model_forward, ) from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = ["DeepseekPolicy", "DeepseekForCausalLMPolicy"] class DeepseekPolicy(Policy): def config_sanity_check(self): pass def preprocess(self): self.tie_weight = self.tie_weight_check() self.origin_attn_implement = self.model.config._attn_implementation """ Because transformers library's bug for AutoModel/AutoConfig, who pop “attn_implement” twice from modeling_utils.py and configuration_utils.py. This bug causes attn_cls to be set to sdpa. Here we assign it to "flash_attention_2". """ # self.origin_attn_implement = "flash_attention_2" if self.shard_config.enable_tensor_parallelism: # Resize embedding vocab_size = self.model.config.vocab_size world_size = self.shard_config.tensor_parallel_size if vocab_size % world_size != 0: new_vocab_size = vocab_size + world_size - vocab_size % world_size self.model.resize_token_embeddings(new_vocab_size) return self.model def module_policy(self) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: ATTN_IMPLEMENTATION = { "eager": "DeepseekAttention", "flash_attention_2": "DeepseekFlashAttention2", "sdpa": "DeepseekSdpaAttention", } policy = {} attn_cls = ATTN_IMPLEMENTATION[self.origin_attn_implement] sp_mode = self.shard_config.sequence_parallelism_mode or None sp_size = self.shard_config.sequence_parallel_size or None sp_group = self.shard_config.sequence_parallel_process_group or None sp_partial_derived = sp_mode in ["split_gather", "ring"] tp_size = self.shard_config.tensor_parallel_size # modified for both SP and TP num_q_heads = self.model.config.num_attention_heads num_kv_heads = getattr(self.model.config, "num_key_value_heads", None) if sp_mode == "all_to_all": num_q_heads //= sp_size decoder_attribute_replacement = { "num_heads": num_q_heads, } if getattr(self.model.config, "num_key_value_heads", False): num_kv_heads //= sp_size decoder_attribute_replacement["num_key_value_heads"] = num_kv_heads policy[attn_cls] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, ) if self.shard_config.enable_sequence_parallelism: if self.pipeline_stage_manager is not None: # NOTE: we are replacing model forward for both sequence parallelism and pipeline parallelism # if both are enabled, one of them will be ignored raise NotImplementedError("Sequence parallelism is not supported with pipeline parallelism.") self.append_or_create_method_replacement( description={ "forward": get_deepseek_flash_attention_forward(self.shard_config, sp_mode, sp_size, sp_group), }, policy=policy, target_key=attn_cls, ) if self.pipeline_stage_manager is None: self.append_or_create_method_replacement( description={ "forward": get_deepseek_flash_attention_model_forward( self.shard_config, sp_mode=sp_mode, sp_size=sp_size, sp_group=sp_group, ), }, policy=policy, target_key="DeepseekModel", ) embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = PaddingEmbedding use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: # tensor parallelism for non-moe params assert ( self.model.config.num_attention_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." assert ( self.model.config.num_key_value_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of key_value heads must be divisible by tensor parallel size." decoder_attribute_replacement = { "self_attn.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, } num_q_heads //= tp_size decoder_attribute_replacement = { "self_attn.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "self_attn.num_heads": num_q_heads, } if num_kv_heads: num_kv_heads //= tp_size decoder_attribute_replacement["self_attn.num_key_value_heads"] = num_kv_heads policy["DeepseekDecoderLayer"] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=Linear1D_Col, kwargs={"fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv}, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=Linear1D_Col, kwargs={"fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv}, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=Linear1D_Col, kwargs={"fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv}, ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=Linear1D_Row, kwargs={"fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv}, ), SubModuleReplacementDescription( suffix="mlp.gate", target_module=DeepseekMoEGate_Col, kwargs={ "gather_output": True, "fp8_communication": self.shard_config.fp8_communication, "config": self.model.config, }, ignore_if_not_exist=True, ), ], ) elif use_zbv: policy["DeepseekDecoderLayer"] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=LinearWithGradAccum, kwargs={"fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv}, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=LinearWithGradAccum, kwargs={"fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv}, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=LinearWithGradAccum, kwargs={"fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv}, ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=LinearWithGradAccum, kwargs={"fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv}, ), SubModuleReplacementDescription( suffix="mlp.gate", target_module=DeepseekMoEGate_Col, kwargs={ "gather_output": True, "fp8_communication": self.shard_config.fp8_communication, "config": self.model.config, }, ignore_if_not_exist=True, ), ], ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="embed_tokens", target_module=embedding_cls, kwargs={ "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, }, ), policy=policy, target_key="DeepseekModel", ) if self.shard_config.ep_group: # expert parallel self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="mlp", target_module=EPDeepseekMoE, kwargs={ "ep_group": self.shard_config.ep_group, "tp_group": self.shard_config.tensor_parallel_process_group, "moe_dp_group": self.shard_config.moe_dp_group, "fp8_communication": self.shard_config.fp8_communication, }, ) ], policy=policy, target_key="DeepseekDecoderLayer", ) # optimization configuration if self.shard_config.enable_fused_normalization: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="input_layernorm", target_module=FusedRMSNorm, kwargs={"sp_partial_derived": sp_partial_derived}, ), SubModuleReplacementDescription( suffix="post_attention_layernorm", target_module=FusedRMSNorm, kwargs={"sp_partial_derived": sp_partial_derived}, ), ], policy=policy, target_key="DeepseekDecoderLayer", ) self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="norm", target_module=FusedRMSNorm, kwargs={"sp_partial_derived": sp_partial_derived}, ), policy=policy, target_key="DeepseekModel", ) if self.shard_config.enable_flash_attention: # NOTE: there is a bug for toggling flash attention in AutoModel, which has to be used for deepseek right now from transformers.dynamic_module_utils import get_class_from_dynamic_module flash_attn_cls = get_class_from_dynamic_module( "deepseek-ai/deepseek-moe-16b-base--modeling_deepseek.DeepseekFlashAttention2", "deepseek-ai/deepseek-moe-16b-base", ) class TargetFlashAttn: def __init__(self): raise RuntimeError("This class should not be instantiated") @staticmethod def from_native_module(original_attn: nn.Module, *args, **kwargs) -> nn.Module: original_attn.__class__ = flash_attn_cls original_attn._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() return original_attn self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="self_attn", target_module=TargetFlashAttn, ), policy=policy, target_key="DeepseekDecoderLayer", ) return policy def postprocess(self): return self.model def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if self.pipeline_stage_manager: if self.shard_config.enable_sequence_parallelism: # NOTE: we are replacing model forward for both sequence parallelism and pipeline parallelism # if both are enabled, one of them will be ignored raise NotImplementedError("Pipeline parallelism is not supported with sequence parallelism.") stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "DeepseekModel": module = self.model else: module = self.model.model layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = {"forward": partial(new_forward, stage_manager=stage_manager, stage_index=stage_index)} self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=model_cls ) return def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None if self.model.__class__.__name__ == "DeepseekModel": module = self.model else: module = self.model.model stage_manager = self.pipeline_stage_manager held_layers = [] if stage_manager.is_interleave: assert stage_manager.num_model_chunks is not None layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_indices = stage_manager.get_stage_index(layers_per_stage) if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.embed_tokens) for start_idx, end_idx in stage_indices: held_layers.extend(module.layers[start_idx:end_idx]) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(module.norm) else: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) if stage_manager.is_first_stage(): held_layers.append(module.embed_tokens) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.layers[start_idx:end_idx]) if stage_manager.is_last_stage(): held_layers.append(module.norm) return held_layers class DeepseekModelPolicy(DeepseekPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): policy = super().module_policy() if self.pipeline_stage_manager: # set None as default self.set_pipeline_forward( model_cls="DeepseekModel", new_forward=DeepseekPipelineForwards.deepseek_model_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" held_layers = super().get_held_layers() return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in llama model""" return [] class DeepseekForCausalLMPolicy(DeepseekPolicy): def module_policy(self): policy = super().module_policy() use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv # TODO: assign pg mesh from plugin to all modules if self.shard_config.enable_tensor_parallelism: # add a new item for casual lm new_item = { "DeepseekForCausalLM": ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=Linear1D_Col, kwargs=dict( gather_output=True, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ) ] ) } policy.update(new_item) elif use_zbv: # add a new item for casual lm new_item = { "DeepseekForCausalLM": ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=LinearWithGradAccum, kwargs=dict( gather_output=True, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ) ] ) } policy.update(new_item) if self.pipeline_stage_manager: # set None as default self.set_pipeline_forward( model_cls="DeepseekForCausalLM", new_forward=DeepseekPipelineForwards.deepseek_for_causal_lm_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.lm_head) else: if stage_manager.is_last_stage(): held_layers.append(self.model.lm_head) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: deepseek_model = self.model.model if self.pipeline_stage_manager and self.pipeline_stage_manager.num_stages > 1: if ( id(deepseek_model.embed_tokens.weight) == id(self.model.lm_head.weight) and self.pipeline_stage_manager.num_stages > 1 ): # tie weights return [ { 0: deepseek_model.embed_tokens.weight, self.pipeline_stage_manager.num_stages - 1: self.model.lm_head.weight, } ] return []
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/bert.py
colossalai/shardformer/policies/bert.py
import warnings from functools import partial from typing import Callable, Dict, List import torch.nn as nn from torch import Tensor from torch.nn import Module import colossalai.shardformer.layer as col_nn from ..modeling.bert import ( BertPipelineForwards, bert_sequence_parallel_forward_fn, get_bert_sequence_parallel_attention_forward, get_jit_fused_bert_intermediate_forward, get_jit_fused_bert_output_forward, get_jit_fused_bert_self_output_forward, ) from ..modeling.jit import get_jit_fused_dropout_add_func from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = [ "BertPolicy", "BertModelPolicy", "BertForPreTrainingPolicy", "BertLMHeadModelPolicy", "BertForMaskedLMPolicy", "BertForNextSentencePredictionPolicy", "BertForSequenceClassificationPolicy", "BertForTokenClassificationPolicy", "BertForMultipleChoicePolicy", "BertForQuestionAnsweringPolicy", ] class BertPolicy(Policy): def config_sanity_check(self): pass def preprocess(self): self.tie_weight = self.tie_weight_check() self.enable_bias_gelu_fused = self.shard_config.enable_jit_fused and self.model.config.hidden_act == "gelu" return self.model def module_policy(self): from transformers.models.bert.modeling_bert import ( BertEmbeddings, BertIntermediate, BertLayer, BertModel, BertOutput, BertSdpaSelfAttention, BertSelfOutput, ) policy = {} embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = col_nn.VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = col_nn.PaddingEmbedding if self.shard_config.enable_fused_normalization: norm_cls = col_nn.FusedLayerNorm else: norm_cls = col_nn.LayerNorm sp_mode = self.shard_config.sequence_parallelism_mode or None assert sp_mode != "all_to_all", "all_to_all sequence parallelism is not supported for Bert" if sp_mode == "ring": warnings.warn( f"For Bert, sequence parallelism is currently not support mode {sp_mode}, will set to be split_gather" ) sp_mode = "split_gather" sp_partial_derived = sp_mode == "split_gather" use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_sequence_parallelism: # Fix the tgt_len size in bert sequence parallel attention forward. self.append_or_create_method_replacement( description={ "forward": get_bert_sequence_parallel_attention_forward(self.shard_config), }, policy=policy, target_key=BertSdpaSelfAttention, ) if self.shard_config.enable_tensor_parallelism: assert ( self.model.config.num_attention_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." policy[BertLayer] = ModulePolicyDescription( attribute_replacement={ "attention.self.all_head_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "crossattention.self.all_head_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "attention.self.num_attention_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, "crossattention.self.num_attention_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="attention.self.query", target_module=col_nn.Linear1D_Col, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.self.key", target_module=col_nn.Linear1D_Col, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.self.value", target_module=col_nn.Linear1D_Col, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.self.dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="attention.output.dense", target_module=col_nn.Linear1D_Row, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.output.dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="intermediate.dense", target_module=col_nn.Linear1D_Col, kwargs={ "seq_parallel_mode": sp_mode, "skip_bias_add": self.enable_bias_gelu_fused, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="output.dense", target_module=col_nn.Linear1D_Row, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="output.dropout", target_module=col_nn.DropoutForParallelInput, ), ], ) policy[BertEmbeddings] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=col_nn.DropoutForReplicatedInput, ), ] ) if self.enable_bias_gelu_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_bert_intermediate_forward(), }, policy=policy, target_key=BertIntermediate, ) elif use_zbv: policy[BertLayer] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="attention.self.query", target_module=col_nn.LinearWithGradAccum, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.self.key", target_module=col_nn.LinearWithGradAccum, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.self.value", target_module=col_nn.LinearWithGradAccum, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.self.dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="attention.output.dense", target_module=col_nn.LinearWithGradAccum, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.output.dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="intermediate.dense", target_module=col_nn.LinearWithGradAccum, kwargs={ "seq_parallel_mode": sp_mode, "skip_bias_add": self.enable_bias_gelu_fused, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="output.dense", target_module=col_nn.LinearWithGradAccum, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="output.dropout", target_module=col_nn.DropoutForParallelInput, ), ], ) policy[BertEmbeddings] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=col_nn.DropoutForReplicatedInput, ), ] ) if self.enable_bias_gelu_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_bert_intermediate_forward(), }, policy=policy, target_key=BertIntermediate, ) if sp_mode == "split_gather": self.append_or_create_method_replacement( description={"forward": bert_sequence_parallel_forward_fn(self.shard_config)}, policy=policy, target_key=BertModel, ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="word_embeddings", target_module=embedding_cls, kwargs=( { "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {} ), ) ], policy=policy, target_key=BertEmbeddings, ) # optimization configuration # Handle bert layer self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="attention.output.LayerNorm", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), SubModuleReplacementDescription( suffix="output.LayerNorm", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), ], policy=policy, target_key=BertLayer, ) # handle embedding layer self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="LayerNorm", target_module=norm_cls, ) ], policy=policy, target_key=BertEmbeddings, ) # use jit operator if self.shard_config.enable_jit_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_bert_self_output_forward(), "dropout_add": get_jit_fused_dropout_add_func(), }, policy=policy, target_key=BertSelfOutput, ) self.append_or_create_method_replacement( description={ "forward": get_jit_fused_bert_output_forward(), "dropout_add": get_jit_fused_dropout_add_func(), }, policy=policy, target_key=BertOutput, ) return policy def add_lm_head_policy(self, base_policy): from transformers.models.bert.modeling_bert import BertLMPredictionHead # optimize for tensor parallelism if self.shard_config.enable_tensor_parallelism: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="decoder", target_module=col_nn.VocabParallelLMHead1D, kwargs={ "gather_output": True, "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, }, ), policy=base_policy, target_key=BertLMPredictionHead, ) else: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="decoder", target_module=col_nn.PaddingLMHead, kwargs={"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by}, ), policy=base_policy, target_key=BertLMPredictionHead, ) # optimize with fused normalization if self.shard_config.enable_fused_normalization: # Handle bert lm prediction head self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="transform.LayerNorm", target_module=col_nn.FusedLayerNorm, ), policy=base_policy, target_key=BertLMPredictionHead, ) return base_policy def add_lm_prediction_policy(self, base_policy): from transformers.models.bert.modeling_bert import BertLMPredictionHead method_replacement = { "_save_to_state_dict": col_nn.ParallelModule._save_to_state_dict, "_load_from_state_dict": col_nn.ParallelModule._load_from_state_dict, } self.append_or_create_method_replacement( description=method_replacement, policy=base_policy, target_key=BertLMPredictionHead, ) return base_policy def postprocess(self): return self.model def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """ If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy. """ if self.pipeline_stage_manager is None: return stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "BertModel": module = self.model else: module = self.model.bert if stage_manager.is_interleave: layers_per_stage = stage_manager.distribute_layers(len(module.encoder.layer)) stage_manager.stage_indices = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, shard_config=self.shard_config, ) } else: layers_per_stage = stage_manager.distribute_layers(len(module.encoder.layer)) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, stage_index=stage_index, shard_config=self.shard_config, ) } self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=model_cls) def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None if self.model.__class__.__name__ == "BertModel": module = self.model else: module = self.model.bert stage_manager = self.pipeline_stage_manager held_layers = [] if stage_manager.is_interleave: assert stage_manager.num_model_chunks is not None layers_per_stage = stage_manager.distribute_layers(len(module.encoder.layer)) stage_indices = stage_manager.get_stage_index(layers_per_stage) if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.embeddings) for start_idx, end_idx in stage_indices: held_layers.extend(module.encoder.layer[start_idx:end_idx]) if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(module.pooler) else: layers_per_stage = stage_manager.distribute_layers(len(module.encoder.layer)) if stage_manager.is_first_stage(): held_layers.append(module.embeddings) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.encoder.layer[start_idx:end_idx]) if stage_manager.is_last_stage(): held_layers.append(module.pooler) return held_layers # BertModel class BertModelPolicy(BertPolicy): def module_policy(self): policy = super().module_policy() from transformers.models.bert.modeling_bert import BertModel if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=BertModel, new_forward=BertPipelineForwards.bert_model_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" held_layers = super().get_held_layers() return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in bert model""" return [] # BertForPreTraining class BertForPreTrainingPolicy(BertPolicy): def module_policy(self): policy = super().module_policy() policy = self.add_lm_head_policy(policy) policy = self.add_lm_prediction_policy(policy) from transformers.models.bert.modeling_bert import BertForPreTraining if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=BertForPreTraining, new_forward=BertPipelineForwards.bert_for_pretraining_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage""" held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.cls) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: model = self.model if self.pipeline_stage_manager and self.pipeline_stage_manager.num_stages > 1: if id(model.bert.embeddings.word_embeddings.weight) == id(model.cls.predictions.decoder.weight): # tie weights return [ { 0: model.bert.embeddings.word_embeddings.weight, self.pipeline_stage_manager.num_stages - 1: model.cls.predictions.decoder.weight, } ] return [] # BertLMHeadModel class BertLMHeadModelPolicy(BertPolicy): def module_policy(self): policy = super().module_policy() policy = self.add_lm_head_policy(policy) policy = self.add_lm_prediction_policy(policy) from transformers.models.bert.modeling_bert import BertLMHeadModel if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=BertLMHeadModel, new_forward=BertPipelineForwards.bert_lm_head_model_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """ get pipeline layers for current stage """ held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.cls) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: bert_model = self.model.bert if self.pipeline_stage_manager and self.pipeline_stage_manager.num_stages > 1: if id(bert_model.embeddings.word_embeddings.weight) == id(self.model.cls.predictions.decoder.weight): # tie weights return [ { 0: bert_model.embeddings.word_embeddings.weight, self.pipeline_stage_manager.num_stages - 1: self.model.cls.predictions.decoder.weight, } ] return [] # BertForMaskedLM class BertForMaskedLMPolicy(BertPolicy): def module_policy(self): policy = super().module_policy() policy = self.add_lm_head_policy(policy) policy = self.add_lm_prediction_policy(policy) from transformers.models.bert.modeling_bert import BertForMaskedLM if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=BertForMaskedLM, new_forward=BertPipelineForwards.bert_for_masked_lm_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """ get pipeline layers for current stage """ held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.cls) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: bert_model = self.model.bert if self.pipeline_stage_manager and self.pipeline_stage_manager.num_stages > 1: if id(bert_model.embeddings.word_embeddings.weight) == id(self.model.cls.predictions.decoder.weight): # tie weights return [ { 0: bert_model.embeddings.word_embeddings.weight, self.pipeline_stage_manager.num_stages - 1: self.model.cls.predictions.decoder.weight, } ] return [] # BertForSequenceClassification class BertForSequenceClassificationPolicy(BertPolicy): def module_policy(self): from transformers.models.bert.modeling_bert import BertForSequenceClassification policy = super().module_policy() if self.shard_config.enable_tensor_parallelism: addon_module = { BertForSequenceClassification: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=col_nn.DropoutForParallelInput, ) ] ) } policy.update(addon_module) if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=BertForSequenceClassification, new_forward=BertPipelineForwards.bert_for_sequence_classification_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """ get pipeline layers for current stage """ held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.dropout) held_layers.append(self.model.classifier) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: # no shared params for sequence classification model return [] # BertForTokenClassification class BertForTokenClassificationPolicy(BertPolicy): def module_policy(self): from transformers.models.bert.modeling_bert import BertForTokenClassification policy = super().module_policy() if self.shard_config.enable_tensor_parallelism: addon_module = { BertForTokenClassification: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=col_nn.DropoutForParallelInput, ) ] ) } policy.update(addon_module) if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=BertForTokenClassification, new_forward=BertPipelineForwards.bert_for_token_classification_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """ get pipeline layers for current stage """ held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.dropout) held_layers.append(self.model.classifier) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: # no shared params for sequence classification model return [] # BertForNextSentencePrediction class BertForNextSentencePredictionPolicy(BertPolicy): def module_policy(self): policy = super().module_policy() from transformers.models.bert.modeling_bert import BertForNextSentencePrediction if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=BertForNextSentencePrediction, new_forward=BertPipelineForwards.bert_for_next_sentence_prediction_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """ get pipeline layers for current stage """ held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.cls) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: # no shared params for sequence classification model return [] # BertForMultipleChoice class BertForMultipleChoicePolicy(BertPolicy): def module_policy(self): from transformers.models.bert.modeling_bert import BertForMultipleChoice policy = super().module_policy() if self.shard_config.enable_tensor_parallelism: addon_module = { BertForMultipleChoice: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=col_nn.DropoutForParallelInput, ) ] ) } policy.update(addon_module) if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=BertForMultipleChoice, new_forward=BertPipelineForwards.bert_for_multiple_choice_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """ get pipeline layers for current stage """ held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.dropout) held_layers.append(self.model.classifier) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: # no shared params for sequence classification model return [] class BertForQuestionAnsweringPolicy(BertPolicy): def module_policy(self): from transformers.models.bert.modeling_bert import BertForQuestionAnswering policy = super().module_policy() if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=BertForQuestionAnswering, new_forward=BertPipelineForwards.bert_for_question_answering_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """ get pipeline layers for current stage """ held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.qa_outputs) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: # no shared params for sequence classification model return []
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/t5.py
colossalai/shardformer/policies/t5.py
from __future__ import annotations import warnings from functools import partial from typing import Callable, Dict, List, Tuple import numpy as np from torch import Tensor, nn from colossalai.shardformer.layer import ( DropoutForParallelInput, Embedding1D, FusedRMSNorm, Linear1D_Col, Linear1D_Row, LinearWithGradAccum, PaddingEmbedding, PaddingLMHead, RMSNorm, VocabParallelEmbedding1D, VocabParallelLMHead1D, ) from colossalai.shardformer.policies.base_policy import ModulePolicyDescription from ..modeling.jit import get_jit_fused_dropout_add_func from ..modeling.t5 import ( T5PipelineForwards, get_jit_fused_T5_layer_ff_forward, get_t5_flash_attention_forward, get_T5_layer_cross_attention_forward, get_T5_layer_self_attention_forward, ) from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = [ "distribute_t5_layers", "T5ModelPolicy", "T5ForConditionalGenerationPolicy", "T5EncoderPolicy", "T5ForTokenClassificationPolicy", ] class T5BasePolicy(Policy): def config_sanity_check(self): pass def preprocess(self): self.tie_weight = self.tie_weight_check() return self.model def module_policy(self): from transformers.models.t5.modeling_t5 import ( T5Attention, T5DenseActDense, T5DenseGatedActDense, T5LayerCrossAttention, T5LayerFF, T5LayerSelfAttention, T5Stack, ) policy = {} embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = PaddingEmbedding if self.shard_config.enable_fused_normalization: norm_cls = FusedRMSNorm else: norm_cls = RMSNorm if self.shard_config.enable_sequence_parallelism: self.shard_config.enable_sequence_parallelism = False warnings.warn("T5 doesn't support sequence parallelism now, will ignore the sequence parallelism flag.") use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: assert ( self.model.config.num_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." policy[T5Stack] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=DropoutForParallelInput, ), ] ) policy[T5LayerSelfAttention] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=DropoutForParallelInput, ), ] ) policy[T5LayerCrossAttention] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=DropoutForParallelInput, ) ] ) policy[T5Attention] = ModulePolicyDescription( attribute_replacement={ "d_model": self.model.config.d_model // self.shard_config.tensor_parallel_size, "n_heads": self.model.config.num_heads // self.shard_config.tensor_parallel_size, "inner_dim": self.model.config.num_heads * self.model.config.d_kv // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="q", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="k", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="v", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="o", target_module=Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="relative_attention_bias", target_module=Embedding1D, kwargs=dict( gather_output=False, fp8_communication=self.shard_config.fp8_communication, ), ignore_if_not_exist=True, ), ], ) policy[T5LayerFF] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=DropoutForParallelInput, ), ] ) policy[T5DenseGatedActDense] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="wi_0 ", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="wi_1", target_module=Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="wo", target_module=Linear1D_Col, kwargs=dict( gather_output=True, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="dropout", target_module=DropoutForParallelInput, ), ] ) policy[T5DenseActDense] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="wi", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="wo", target_module=Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="dropout", target_module=DropoutForParallelInput, ), ] ) elif use_zbv: policy[T5Stack] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=DropoutForParallelInput, ), ] ) policy[T5LayerSelfAttention] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=DropoutForParallelInput, ), ] ) policy[T5LayerCrossAttention] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=DropoutForParallelInput, ) ] ) policy[T5Attention] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="q", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="k", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="v", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="o", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="relative_attention_bias", target_module=Embedding1D, kwargs=dict( gather_output=False, fp8_communication=self.shard_config.fp8_communication, ), ignore_if_not_exist=True, ), ], ) policy[T5LayerFF] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=DropoutForParallelInput, ), ] ) policy[T5DenseGatedActDense] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="wi_0 ", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="wi_1", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="wo", target_module=LinearWithGradAccum, kwargs=dict( gather_output=True, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="dropout", target_module=DropoutForParallelInput, ), ] ) policy[T5DenseActDense] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="wi", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="wo", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="dropout", target_module=DropoutForParallelInput, ), ] ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="embed_tokens", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), policy=policy, target_key=T5Stack, ) # optimization configuration self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="layer_norm", target_module=norm_cls, ), policy=policy, target_key=T5LayerFF, ) self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription(suffix="layer_norm", target_module=norm_cls), policy=policy, target_key=T5LayerSelfAttention, ) self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription(suffix="layer_norm", target_module=norm_cls), policy=policy, target_key=T5LayerCrossAttention, ) self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription(suffix="final_layer_norm", target_module=norm_cls), policy=policy, target_key=T5Stack, ) # use flash attention if self.shard_config.enable_flash_attention: self.append_or_create_method_replacement( description={ "forward": get_t5_flash_attention_forward(), }, policy=policy, target_key=T5Attention, ) # use jit operator if self.shard_config.enable_jit_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_T5_layer_ff_forward(), "dropout_add": get_jit_fused_dropout_add_func(), }, policy=policy, target_key=T5LayerFF, ) self.append_or_create_method_replacement( description={ "forward": get_T5_layer_self_attention_forward(), "dropout_add": get_jit_fused_dropout_add_func(), }, policy=policy, target_key=T5LayerSelfAttention, ) self.append_or_create_method_replacement( description={ "forward": get_T5_layer_cross_attention_forward(), "dropout_add": get_jit_fused_dropout_add_func(), }, policy=policy, target_key=T5LayerCrossAttention, ) return policy def postprocess(self): return self.model def distribute_t5_layers( self, num_encoder_layers: int, num_decoder_layers: int, num_stages: int ) -> Tuple[List[int], int]: """ Distribute t5 layers into stages when pipeline parallel is used. Return the layer distribution as a list and the starting stage of decoder. If decoder doesn't exist, returned decoder starting stage is set to num_encoder_layers. """ stage_manager = self.pipeline_stage_manager assert stage_manager is not None, "Pipeline stage manager is not set." # number of encoder layers must be a positive integer if num_encoder_layers <= 0: raise ValueError("The number of encoder layers for T5 must be a positive integer.") # number of layers should be large enough to fill in every stage if num_encoder_layers + num_decoder_layers < num_stages: raise ValueError("The total number of layers can't be smaller than number of stages.") # in the case of T5EncoderModel, set decoder starting stage to num_stages since it doesn't exist if num_decoder_layers == 0: return stage_manager.distribute_layers(num_encoder_layers, num_stages), num_stages # the number of stages distributed between encoder and decoder is optimized in this way: # num_encoder_stages = argmin(abs(num_encoder_layers / encoder_stages - num_decoder_layers / decoder_stages)) # s.t. num_encoder_stages + num_decoder_stages = num_stages, num_encoder_stages >= 1, num_decoder_stages >= 1 def objective(num_encoder_stages): return abs(num_encoder_layers / num_encoder_stages - num_decoder_layers / (num_stages - num_encoder_stages)) num_encoder_stages = np.argmin([objective(i) for i in range(1, num_stages)]) + 1 num_decoder_stages = num_stages - num_encoder_stages encoder_distribution = stage_manager.distribute_layers(num_encoder_layers, num_encoder_stages) decoder_distribution = stage_manager.distribute_layers(num_decoder_layers, num_decoder_stages) return encoder_distribution + decoder_distribution, num_encoder_stages def get_t5_stage_index( self, layers_per_stage: List[int], stage: int, decoder_starting_stage: int ) -> Tuple[int, int]: """ Input the distribution of layers among stages, the current stage and the first stage of decoder. Return the starting/ending idx of layers in encoder/decoder """ stage_manager = self.pipeline_stage_manager assert stage_manager is not None, "Pipeline stage manager is not set." if stage < decoder_starting_stage: return stage_manager.get_stage_index(layers_per_stage[:decoder_starting_stage], stage) else: return stage_manager.get_stage_index( layers_per_stage[decoder_starting_stage:], stage - decoder_starting_stage ) def get_held_layers(self) -> List[nn.Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "T5ForTokenClassification": model = self.model.transformer else: model = self.model encoder = model.encoder decoder = getattr(model, "decoder", None) num_encoder_layers = len(encoder.block) num_decoder_layers = len(decoder.block) if decoder else 0 held_layers = [] if stage_manager.is_interleave: layers_per_stage, decoder_starting_stage = self.distribute_t5_layers( num_encoder_layers, num_decoder_layers, stage_manager.num_stages ) stage_indices = self.get_t5_stage_index(layers_per_stage, stage_manager.stage, decoder_starting_stage) if stage_manager.stage < decoder_starting_stage: # current stage is in t5's encoder if stage_manager.is_first_stage(): held_layers.append(model.shared) held_layers.append(encoder.embed_tokens) held_layers.append(encoder.dropout) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(encoder.final_layer_norm) held_layers.append(encoder.dropout) for start_idx, end_idx in stage_indices: held_layers.extend(encoder.block[start_idx:end_idx]) else: # current stage is in t5's decoder if stage_manager.stage == decoder_starting_stage: held_layers.append(decoder.embed_tokens) held_layers.append(decoder.dropout) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(decoder.final_layer_norm) held_layers.append(decoder.dropout) for start_idx, end_idx in stage_indices: held_layers.extend(decoder.block[start_idx:end_idx]) else: layers_per_stage, decoder_starting_stage = self.distribute_t5_layers( num_encoder_layers, num_decoder_layers, stage_manager.num_stages ) start_idx, end_idx = self.get_t5_stage_index(layers_per_stage, stage_manager.stage, decoder_starting_stage) if stage_manager.stage < decoder_starting_stage: # current stage is in t5's encoder if stage_manager.is_first_stage(): held_layers.append(model.shared) held_layers.append(encoder.embed_tokens) held_layers.append(encoder.dropout) if stage_manager.stage == decoder_starting_stage - 1: held_layers.append(encoder.final_layer_norm) held_layers.append(encoder.dropout) held_layers.extend(encoder.block[start_idx:end_idx]) else: # current stage is in t5's decoder if stage_manager.stage == decoder_starting_stage: held_layers.append(decoder.embed_tokens) held_layers.append(decoder.dropout) if stage_manager.is_last_stage(): held_layers.append(decoder.final_layer_norm) held_layers.append(decoder.dropout) held_layers.extend(decoder.block[start_idx:end_idx]) return held_layers def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if not self.pipeline_stage_manager: raise ValueError("set_pipeline_forward method can only be called when pipeline parallel is enabled.") stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "T5ForTokenClassification": encoder = self.model.transformer.encoder else: encoder = self.model.encoder decoder = getattr(self.model, "decoder", None) num_encoder_layers = len(encoder.block) num_decoder_layers = len(decoder.block) if decoder else 0 layers_per_stage, decoder_starting_stage = self.distribute_t5_layers( num_encoder_layers, num_decoder_layers, stage_manager.num_stages ) stage_index = self.get_t5_stage_index(layers_per_stage, stage_manager.stage, decoder_starting_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, stage_index=stage_index, decoder_starting_stage=decoder_starting_stage, ) } self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=model_cls) class T5ModelPolicy(T5BasePolicy): def module_policy(self): from transformers import T5Model policy = super().module_policy() embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = PaddingEmbedding if embedding_cls is not None: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="shared", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), policy=policy, target_key=T5Model, ) if self.pipeline_stage_manager is not None: self.set_pipeline_forward(model_cls=T5Model, new_forward=T5PipelineForwards.t5_model_forward, policy=policy) return policy def get_held_layers(self) -> List[nn.Module]: return super().get_held_layers() def get_shared_params(self) -> List[Dict[int, Tensor]]: module = self.model stage_manager = self.pipeline_stage_manager if stage_manager is not None and stage_manager.num_stages > 1: _, decoder_starting_stage = self.distribute_t5_layers( len(module.encoder.block), len(module.decoder.block), stage_manager.num_stages ) if id(module.decoder.embed_tokens.weight) == id(module.shared.weight): return [{0: module.shared.weight, decoder_starting_stage: module.decoder.embed_tokens.weight}] return [] class T5ForConditionalGenerationPolicy(T5BasePolicy): def module_policy(self): from transformers import T5ForConditionalGeneration policy = super().module_policy() embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = PaddingEmbedding if embedding_cls is not None: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="shared", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), policy=policy, target_key=T5ForConditionalGeneration, ) if self.shard_config.enable_tensor_parallelism: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="lm_head", target_module=VocabParallelLMHead1D, kwargs={ "gather_output": True, "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, }, ), policy=policy, target_key=T5ForConditionalGeneration, ) else: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="lm_head", target_module=PaddingLMHead, kwargs={"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by}, ), policy=policy, target_key=T5ForConditionalGeneration, ) if self.pipeline_stage_manager is not None: self.set_pipeline_forward( model_cls=T5ForConditionalGeneration, new_forward=T5PipelineForwards.t5_for_conditional_generation_forward, policy=policy, ) return policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.lm_head) else: if stage_manager.is_last_stage(): held_layers.append(self.model.lm_head) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: module = self.model stage_manager = self.pipeline_stage_manager if stage_manager is not None and stage_manager.num_stages > 1: _, decoder_starting_stage = self.distribute_t5_layers( len(module.encoder.block), len(module.decoder.block), stage_manager.num_stages ) shared_params = [] shared_embedding = {} if id(module.decoder.embed_tokens.weight) == id(module.shared.weight): shared_embedding[0] = module.shared.weight shared_embedding[decoder_starting_stage] = module.decoder.embed_tokens.weight if id(module.lm_head.weight) == id(module.shared.weight): shared_embedding[0] = module.shared.weight shared_embedding[stage_manager.num_stages - 1] = module.lm_head.weight if len(shared_embedding) > 0: shared_params.append(shared_embedding) return shared_params return [] class T5EncoderPolicy(T5BasePolicy): def module_policy(self): from transformers import T5EncoderModel
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/whisper.py
colossalai/shardformer/policies/whisper.py
import warnings from functools import partial from typing import Callable, Dict, List, Tuple import numpy as np import torch.nn as nn from torch import Tensor import colossalai.shardformer.layer as col_nn from ..modeling.jit import get_jit_fused_dropout_add_func from ..modeling.whisper import ( WhisperPipelineForwards, get_jit_fused_whisper_decoder_layer_forward, get_jit_fused_whisper_encoder_layer_forward, get_whisper_decoder_forward_for_flash_attention, get_whisper_flash_attention_forward, ) from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = [ "WhisperPolicy", "WhisperModelPolicy", "WhisperForConditionalGenerationPolicy", "WhisperForAudioClassificationPolicy", ] class WhisperPolicy(Policy): def __init__(self) -> None: super().__init__() def config_sanity_check(self): pass def preprocess(self): # reshape the embedding layer r""" Reshape the Embedding layer to make the embedding dimension divisible by world_size """ self.tie_weight = self.tie_weight_check() return self.model def module_policy(self): from transformers.models.whisper.modeling_whisper import ( WhisperAttention, WhisperDecoder, WhisperDecoderLayer, WhisperEncoder, WhisperEncoderLayer, WhisperFlashAttention2, WhisperSdpaAttention, ) policy = {} embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = col_nn.VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = col_nn.PaddingEmbedding if self.shard_config.enable_fused_normalization: norm_cls = col_nn.FusedLayerNorm else: norm_cls = col_nn.LayerNorm if self.shard_config.enable_sequence_parallelism: self.shard_config.enable_sequence_parallelism = False warnings.warn( "Whisper doesn't support sequence parallelism now, will ignore the sequence parallelism flag." ) use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv # TODO using the jit fused add_and_dropout affect the accuracy if self.shard_config.enable_jit_fused: self.shard_config.enable_jit_fused = False warnings.warn("Whisper doesn't support jit fused operator now, will ignore the jit fused operator flag.") if self.shard_config.enable_tensor_parallelism: assert ( self.model.config.encoder_attention_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." policy[WhisperEncoderLayer] = ModulePolicyDescription( attribute_replacement={ "self_attn.embed_dim": self.model.config.d_model // self.shard_config.tensor_parallel_size, "self_attn.num_heads": self.model.config.encoder_attention_heads // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.out_proj", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="fc1", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="fc2", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) policy[WhisperDecoderLayer] = ModulePolicyDescription( attribute_replacement={ "self_attn.embed_dim": self.model.config.d_model // self.shard_config.tensor_parallel_size, "self_attn.num_heads": self.model.config.decoder_attention_heads // self.shard_config.tensor_parallel_size, "encoder_attn.embed_dim": self.model.config.d_model // self.shard_config.tensor_parallel_size, "encoder_attn.num_heads": self.model.config.encoder_attention_heads // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.out_proj", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="encoder_attn.q_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="encoder_attn.k_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="encoder_attn.v_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="encoder_attn.out_proj", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="fc1", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="fc2", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) elif use_zbv: policy[WhisperEncoderLayer] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.out_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="fc1", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="fc2", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) policy[WhisperDecoderLayer] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.out_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="encoder_attn.q_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="encoder_attn.k_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="encoder_attn.v_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="encoder_attn.out_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="fc1", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="fc2", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="embed_tokens", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), ], policy=policy, target_key=WhisperDecoder, ) # optimization configuration # Handle encoder layer self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="self_attn_layer_norm", target_module=norm_cls, ), SubModuleReplacementDescription( suffix="final_layer_norm", target_module=norm_cls, ), ], policy=policy, target_key=WhisperEncoderLayer, ) # Handle decoder layer self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="self_attn_layer_norm", target_module=norm_cls, ), SubModuleReplacementDescription( suffix="final_layer_norm", target_module=norm_cls, ), ], policy=policy, target_key=WhisperDecoderLayer, ) # handle encoder layer self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="layer_norm", target_module=norm_cls, ) ], policy=policy, target_key=WhisperEncoder, ) # handle decoder layer self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="layer_norm", target_module=norm_cls, ) ], policy=policy, target_key=WhisperDecoder, ) # enable flash attention if self.shard_config.enable_flash_attention: self.append_or_create_method_replacement( description={ "forward": get_whisper_flash_attention_forward(), }, policy=policy, target_key=WhisperAttention, ) self.append_or_create_method_replacement( description={ "forward": get_whisper_flash_attention_forward(), }, policy=policy, target_key=WhisperFlashAttention2, ) self.append_or_create_method_replacement( description={ "forward": get_whisper_flash_attention_forward(), }, policy=policy, target_key=WhisperSdpaAttention, ) if not self.shard_config.pipeline_stage_manager: self.append_or_create_method_replacement( description={ "forward": get_whisper_decoder_forward_for_flash_attention(self.shard_config), }, policy=policy, target_key=WhisperDecoder, ) # use jit fused operator if self.shard_config.enable_jit_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_whisper_decoder_layer_forward(), "dropout_add": get_jit_fused_dropout_add_func(), }, policy=policy, target_key=WhisperDecoderLayer, ) self.append_or_create_method_replacement( description={ "forward": get_jit_fused_whisper_encoder_layer_forward(), "dropout_add": get_jit_fused_dropout_add_func(), }, policy=policy, target_key=WhisperEncoderLayer, ) return policy def add_lm_head_policy(self, base_policy): from transformers.models.whisper.modeling_whisper import WhisperForConditionalGeneration # optimize for tensor parallelism if self.shard_config.enable_tensor_parallelism: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="proj_out", target_module=col_nn.VocabParallelLMHead1D, kwargs={ "gather_output": True, "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, }, ), policy=base_policy, target_key=WhisperForConditionalGeneration, ) else: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="proj_out", target_module=col_nn.PaddingLMHead, kwargs={"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by}, ), policy=base_policy, target_key=WhisperForConditionalGeneration, ) return base_policy def postprocess(self): return self.model def distribute_whisper_layers( self, num_encoder_layers: int, num_decoder_layers: int, num_stages: int ) -> Tuple[List[int], int]: """ Distribute whisper layers into stages when pipeline parallel is used. Return the layer distribution as a list and the starting stage of decoder. If decoder doesn't exist, returned decoder starting stage is set to num_encoder_layers. """ stage_manager = self.pipeline_stage_manager assert stage_manager is not None, "pipeline_stage_manager is None" # number of encoder layers must be a positive integer if num_encoder_layers <= 0: raise ValueError("The number of encoder layers for whisper must be a positive integer.") # number of layers should be large enough to fill in every stage if num_encoder_layers + num_decoder_layers < num_stages: raise ValueError("The total number of layers can't be smaller than number of stages.") # in the case of whisperEncoderModel, set decoder starting stage to num_stages since it doesn't exist if num_decoder_layers == 0: return stage_manager.distribute_layers(num_encoder_layers, num_stages), num_stages # the number of stages distributed between encoder and decoder is optimized in this way: # num_encoder_stages = argmin(abs(num_encoder_layers / encoder_stages - num_decoder_layers / decoder_stages)) # s.t. num_encoder_stages + num_decoder_stages = num_stages, num_encoder_stages >= 1, num_decoder_stages >= 1 def objective(num_encoder_stages): return abs(num_encoder_layers / num_encoder_stages - num_decoder_layers / (num_stages - num_encoder_stages)) num_encoder_stages = np.argmin([objective(i) for i in range(1, num_stages)]) + 1 num_decoder_stages = num_stages - num_encoder_stages encoder_distribution = stage_manager.distribute_layers(num_encoder_layers, num_encoder_stages) decoder_distribution = stage_manager.distribute_layers(num_decoder_layers, num_decoder_stages) return encoder_distribution + decoder_distribution, num_encoder_stages def get_whisper_stage_index( self, layers_per_stage: List[int], stage: int, decoder_starting_stage: int ) -> Tuple[int, int]: """ Input the distribution of layers among stages, the current stage and the first stage of decoder. Return the starting/ending idx of layers in encoder/decoder """ stage_manager = self.pipeline_stage_manager assert stage_manager is not None, "pipeline_stage_manager is None" if stage < decoder_starting_stage: return stage_manager.get_stage_index(layers_per_stage[:decoder_starting_stage], stage) else: return stage_manager.get_stage_index( layers_per_stage[decoder_starting_stage:], stage - decoder_starting_stage, ) def get_held_layers(self) -> List[nn.Module]: assert self.pipeline_stage_manager is not None, "pipeline_stage_manager is None" stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "WhisperModel": model = self.model elif self.model.__class__.__name__ == "WhisperForConditionalGeneration": model = self.model.model else: model = None if model: encoder = self.model.get_encoder() decoder = self.model.get_decoder() else: # whisper for audio classification holds encoder only encoder = self.model.encoder decoder = None num_encoder_layers = len(encoder.layers) if decoder: num_decoder_layers = len(decoder.layers) else: num_decoder_layers = 0 held_layers = [] if stage_manager.is_interleave: layers_per_stage, decoder_starting_stage = self.distribute_whisper_layers( num_encoder_layers, num_decoder_layers, stage_manager.num_stages ) stage_indices = self.get_whisper_stage_index(layers_per_stage, stage_manager.stage, decoder_starting_stage) if stage_manager.stage < decoder_starting_stage: # current stage is in whisper's encoder if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(encoder.embed_positions) held_layers.append(encoder.conv1) held_layers.append(encoder.conv2) # interleaved: not use_zbv & stage_manager.stage == decoder_starting_stage - 1 # zbv: use_zbv & stage_manager.stage == first stage if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and decoder_starting_stage - 1 ): held_layers.append(encoder.layer_norm) for start_idx, end_idx in stage_indices: held_layers.extend(encoder.layers[start_idx:end_idx]) else: # current stage is in whisper's decoder # TODO:(Jianghai) We divide encoder and decoder layers into different parts here, # the case encoder and decoder put in same stage should be add in the future. if stage_manager.stage == decoder_starting_stage: held_layers.append(decoder.embed_tokens) held_layers.append(decoder.embed_positions) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(decoder.layer_norm) for start_idx, end_idx in stage_indices: held_layers.extend(encoder.layers[start_idx:end_idx]) else: layers_per_stage, decoder_starting_stage = self.distribute_whisper_layers( num_encoder_layers, num_decoder_layers, stage_manager.num_stages ) start_idx, end_idx = self.get_whisper_stage_index( layers_per_stage, stage_manager.stage, decoder_starting_stage ) if stage_manager.stage < decoder_starting_stage: # current stage is in whisper's encoder if stage_manager.is_first_stage(): held_layers.append(encoder.embed_positions) held_layers.append(encoder.conv1) held_layers.append(encoder.conv2) if stage_manager.stage == decoder_starting_stage - 1: held_layers.append(encoder.layer_norm) held_layers.extend(encoder.layers[start_idx:end_idx]) else: # current stage is in whisper's decoder # TODO:(Jianghai) We divide encoder and decoder layers into different parts here, # the case encoder and decoder put in same stage should be add in the future. if stage_manager.stage == decoder_starting_stage: held_layers.append(decoder.embed_tokens) held_layers.append(decoder.embed_positions) if stage_manager.is_last_stage(): held_layers.append(decoder.layer_norm) held_layers.extend(decoder.layers[start_idx:end_idx]) return held_layers def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if not self.pipeline_stage_manager: raise ValueError("set_pipeline_forward method can only be called when pipeline parallel is enabled.") stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "WhisperModel": model = self.model elif self.model.__class__.__name__ == "WhisperForConditionalGeneration": model = self.model.model else: model = None if model: encoder = self.model.get_encoder() decoder = self.model.get_decoder() else: encoder = self.model.encoder decoder = None num_encoder_layers = len(encoder.layers) if decoder: num_decoder_layers = len(decoder.layers) else: num_decoder_layers = 0 layers_per_stage, decoder_starting_stage = self.distribute_whisper_layers( num_encoder_layers, num_decoder_layers, stage_manager.num_stages ) stage_index = self.get_whisper_stage_index(layers_per_stage, stage_manager.stage, decoder_starting_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, stage_index=stage_index, decoder_starting_stage=decoder_starting_stage, shard_config=self.shard_config, ) } self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=model_cls) # WhisperModel class WhisperModelPolicy(WhisperPolicy): def module_policy(self): from transformers import WhisperModel policy = super().module_policy() if self.pipeline_stage_manager is not None: self.set_pipeline_forward( model_cls=WhisperModel, new_forward=WhisperPipelineForwards.whisper_model_forward, policy=policy, ) return policy def get_held_layers(self) -> List[nn.Module]: return super().get_held_layers() def get_shared_params(self) -> List[Dict[int, Tensor]]:
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/llama.py
colossalai/shardformer/policies/llama.py
from functools import partial from typing import Callable, Dict, List, Union import torch.nn as nn from torch import Tensor from torch.nn import Module from colossalai.shardformer.layer import ( FusedRMSNorm, Linear1D_Col, Linear1D_Row, LinearWithGradAccum, PaddingEmbedding, PaddingLMHead, RMSNorm, VocabParallelEmbedding1D, VocabParallelLMHead1D, ) from ..modeling.llama import LlamaPipelineForwards, get_llama_flash_attention_forward from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = ["LlamaPolicy", "LlamaForCausalLMPolicy", "LlamaForSequenceClassificationPolicy"] class LlamaPolicy(Policy): def config_sanity_check(self): pass def preprocess(self): self.tie_weight = self.tie_weight_check() self.origin_attn_implement = self.model.config._attn_implementation return self.model def module_policy(self) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: from transformers.models.llama.modeling_llama import LlamaAttention, LlamaDecoderLayer, LlamaModel policy = {} embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = PaddingEmbedding if self.shard_config.enable_fused_normalization: norm_cls = FusedRMSNorm else: norm_cls = RMSNorm use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv sp_mode = self.shard_config.sequence_parallelism_mode or None sp_size = self.shard_config.sequence_parallel_size or None sp_group = self.shard_config.sequence_parallel_process_group or None sp_partial_derived = sp_mode in ["split_gather", "ring"] if sp_mode == "ring_attn" and not self.is_causal: raise ValueError("Ring attention is only meant for causal language modeling.") tp_size = self.shard_config.tensor_parallel_size # Modified by SP and TP num_q_heads = self.model.config.num_attention_heads num_kv_heads = getattr(self.model.config, "num_key_value_heads", None) if sp_mode == "all_to_all": num_q_heads //= sp_size decoder_attribute_replacement = {"num_heads": num_q_heads} if num_kv_heads: num_kv_heads //= sp_size decoder_attribute_replacement["num_key_value_heads"] = num_kv_heads policy[LlamaAttention] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, ) if self.shard_config.enable_flash_attention or self.shard_config.enable_sequence_parallelism: self.append_or_create_method_replacement( description={ "forward": get_llama_flash_attention_forward(self.shard_config, sp_mode, sp_size, sp_group), }, policy=policy, target_key=LlamaAttention, ) if self.pipeline_stage_manager is None: self.append_or_create_method_replacement( description={ "forward": partial( LlamaPipelineForwards.llama_model_forward, shard_config=self.shard_config, ), }, policy=policy, target_key=LlamaModel, ) # enable tp, replace layer to tp Linear1D_Col,Linear1D_Row, if self.shard_config.enable_tensor_parallelism: assert ( num_q_heads % tp_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." if hasattr(self.model.config, "num_key_value_heads"): assert ( num_kv_heads >= tp_size and num_kv_heads % tp_size == 0 ), f"The number of key_value heads must be divisible by, and must not be less than tensor parallel size." num_q_heads //= tp_size decoder_attribute_replacement = { "self_attn.hidden_size": self.model.config.hidden_size // tp_size, "self_attn.num_heads": num_q_heads, } if getattr(self.model.config, "num_key_value_heads", False): num_kv_heads //= tp_size decoder_attribute_replacement["self_attn.num_key_value_heads"] = num_kv_heads policy[LlamaDecoderLayer] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=Linear1D_Row, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.gate_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.up_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.down_proj", target_module=Linear1D_Row, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), ], ) # not enable tp, replace layer to LinearWithGradAccum elif use_zbv: policy[LlamaDecoderLayer] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.gate_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.up_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.down_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), ], ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="embed_tokens", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), policy=policy, target_key=LlamaModel, ) # optimization configuration self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="input_layernorm", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), SubModuleReplacementDescription( suffix="post_attention_layernorm", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), ], policy=policy, target_key=LlamaDecoderLayer, ) self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="norm", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), policy=policy, target_key=LlamaModel, ) return policy def postprocess(self): return self.model def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if self.pipeline_stage_manager is None: return stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "LlamaModel": module = self.model else: module = self.model.model if stage_manager.is_interleave: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_manager.stage_indices = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial(new_forward, stage_manager=stage_manager, shard_config=self.shard_config) } else: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, stage_index=stage_index, shard_config=self.shard_config ) } self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=model_cls) def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None if self.model.__class__.__name__ == "LlamaModel": module = self.model else: module = self.model.model stage_manager = self.pipeline_stage_manager held_layers = [] held_layers.append(module.rotary_emb) if stage_manager.is_interleave: assert stage_manager.num_model_chunks is not None layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_indices = stage_manager.get_stage_index(layers_per_stage) if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.embed_tokens) for start_idx, end_idx in stage_indices: held_layers.extend(module.layers[start_idx:end_idx]) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(module.norm) else: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) if stage_manager.is_first_stage(): held_layers.append(module.embed_tokens) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.layers[start_idx:end_idx]) if stage_manager.is_last_stage(): held_layers.append(module.norm) return held_layers class LlamaModelPolicy(LlamaPolicy): def module_policy(self): policy = super().module_policy() from transformers.models.llama.modeling_llama import LlamaModel if self.pipeline_stage_manager: # set None as default self.set_pipeline_forward( model_cls=LlamaModel, new_forward=LlamaPipelineForwards.llama_model_forward, policy=policy ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" held_layers = super().get_held_layers() return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in llama model""" return [] class LlamaForCausalLMPolicy(LlamaPolicy): def module_policy(self): from transformers import LlamaForCausalLM self.is_causal = True policy = super().module_policy() if self.shard_config.enable_tensor_parallelism: # add a new item for causal lm new_item = { LlamaForCausalLM: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=VocabParallelLMHead1D, kwargs={ "gather_output": not self.shard_config.parallel_output, "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, }, ) ], ) } else: new_item = { LlamaForCausalLM: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=PaddingLMHead, kwargs={"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by}, ) ], ) } policy.update(new_item) if self.pipeline_stage_manager: # set None as default self.set_pipeline_forward( model_cls=LlamaForCausalLM, new_forward=LlamaPipelineForwards.llama_for_causal_lm_forward, policy=policy ) elif self.shard_config.enable_tensor_parallelism or self.shard_config.enable_sequence_parallelism: # Compute loss distributedly along the sequence dimension new_item[LlamaForCausalLM].method_replacement = { # "forward": get_lm_forward_with_dist_cross_entropy(self.shard_config) "forward": partial(LlamaPipelineForwards.llama_for_causal_lm_forward, shard_config=self.shard_config) } return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.lm_head) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: if self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv: return [] llama_model = self.model.model if self.pipeline_stage_manager and self.pipeline_stage_manager.num_stages > 1: if ( id(llama_model.embed_tokens.weight) == id(self.model.lm_head.weight) and self.pipeline_stage_manager.num_stages > 1 ): # tie weights return [ { 0: llama_model.embed_tokens.weight, self.pipeline_stage_manager.num_stages - 1: self.model.lm_head.weight, } ] return [] class LlamaForSequenceClassificationPolicy(LlamaPolicy): def module_policy(self): from transformers import LlamaForSequenceClassification policy = super().module_policy() use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv # enable tp, replace layer to tp Linear1D_Col,Linear1D_Row, if self.shard_config.enable_tensor_parallelism: # add a new item for sequence classification new_item = { LlamaForSequenceClassification: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="score", target_module=Linear1D_Col, kwargs=dict( gather_output=True, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ) ] ) } policy.update(new_item) # enable tp, replace layer to LinearWithGradAccum elif use_zbv: # add a new item for sequence classification new_item = { LlamaForSequenceClassification: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="score", target_module=LinearWithGradAccum, kwargs=dict( fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ) ] ) } policy.update(new_item) # to be confirmed if self.pipeline_stage_manager: # set None as default self.set_pipeline_forward( model_cls=LlamaForSequenceClassification, new_forward=LlamaPipelineForwards.llama_for_sequence_classification_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.score) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in llama for sequence classification model""" return []
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/vit.py
colossalai/shardformer/policies/vit.py
import warnings from typing import Callable, Dict, List, Union import torch.nn as nn import colossalai.shardformer.layer as col_nn from colossalai.shardformer.layer import DropoutForReplicatedInput, Linear1D_Col from ..modeling.jit import get_jit_fused_dropout_add_func from ..modeling.vit import ( ViTForImageClassification_pipeline_forward, ViTForMaskedImageModeling_pipeline_forward, ViTModel_pipeline_forward, get_jit_fused_vit_intermediate_forward, get_jit_fused_vit_output_forward, get_vit_flash_self_attention_forward, ) from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = ["ViTPolicy", "ViTModelPolicy", "ViTForImageClassificationPolicy", "ViTForMaskedImageModelingPolicy"] class ViTPolicy(Policy): def config_sanity_check(self): pass def preprocess(self): self.enable_bias_gelu_fused = self.shard_config.enable_jit_fused and self.model.config.hidden_act == "gelu" return self.model def module_policy(self) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: from transformers.models.vit.modeling_vit import ( ViTEmbeddings, ViTIntermediate, ViTLayer, ViTOutput, ViTSelfAttention, ) policy = {} if self.shard_config.enable_sequence_parallelism: self.shard_config.enable_sequence_parallelism = False warnings.warn("Vit doesn't support sequence parallelism now, will ignore the sequence parallelism flag.") use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: assert ( self.model.config.num_attention_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." policy[ViTEmbeddings] = ModulePolicyDescription( attribute_replacement={}, param_replacement=[], sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=DropoutForReplicatedInput, ) ], ) policy[ViTLayer] = ModulePolicyDescription( attribute_replacement={ "attention.attention.num_attention_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, "attention.attention.all_head_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, }, param_replacement=[], sub_module_replacement=[ SubModuleReplacementDescription( suffix="attention.attention.query", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.attention.key", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.attention.value", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.output.dense", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.output.dropout", target_module=col_nn.DropoutForReplicatedInput, ), SubModuleReplacementDescription( suffix="intermediate.dense", target_module=col_nn.Linear1D_Col, kwargs={ "skip_bias_add": self.enable_bias_gelu_fused, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="output.dense", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="output.dropout", target_module=col_nn.DropoutForReplicatedInput, ), ], ) if self.enable_bias_gelu_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_vit_intermediate_forward(), }, policy=policy, target_key=ViTIntermediate, ) elif use_zbv: policy[ViTEmbeddings] = ModulePolicyDescription( attribute_replacement={}, param_replacement=[], sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=DropoutForReplicatedInput, ) ], ) policy[ViTLayer] = ModulePolicyDescription( param_replacement=[], sub_module_replacement=[ SubModuleReplacementDescription( suffix="attention.attention.query", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.attention.key", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.attention.value", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.attention.dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="attention.output.dense", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.output.dropout", target_module=col_nn.DropoutForReplicatedInput, ), SubModuleReplacementDescription( suffix="intermediate.dense", target_module=col_nn.LinearWithGradAccum, kwargs={ "skip_bias_add": self.enable_bias_gelu_fused, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="output.dense", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="output.dropout", target_module=col_nn.DropoutForReplicatedInput, ), ], ) if self.enable_bias_gelu_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_vit_intermediate_forward(), }, policy=policy, target_key=ViTIntermediate, ) # use flash attention if self.shard_config.enable_flash_attention: self.append_or_create_method_replacement( description={ "forward": get_vit_flash_self_attention_forward(), }, policy=policy, target_key=ViTSelfAttention, ) # use jit fused operator if self.shard_config.enable_jit_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_vit_output_forward(), "dropout_add": get_jit_fused_dropout_add_func(), }, policy=policy, target_key=ViTOutput, ) return policy def new_model_class(self): return None def postprocess(self): return self.model def get_held_layers(self) -> List[nn.Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None, "pipeline_stage_manager is None" if self.model.__class__.__name__ == "ViTModel": module = self.model else: module = self.model.vit stage_manager = self.pipeline_stage_manager held_layers = [] if stage_manager.is_interleave: assert stage_manager.num_model_chunks is not None layers_per_stage = stage_manager.distribute_layers(len(module.encoder.layer)) stage_indices = stage_manager.get_stage_index(layers_per_stage) if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.embeddings) for start_idx, end_idx in stage_indices: held_layers.extend(module.encoder.layer[start_idx:end_idx]) else: layers_per_stage = stage_manager.distribute_layers(len(module.encoder.layer)) if stage_manager.is_first_stage(): held_layers.append(module.embeddings) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.encoder.layer[start_idx:end_idx]) return held_layers def set_pipeline_forward(self, model_cls: nn.Module, pipeline_forward: Callable, policy: Dict): if self.pipeline_stage_manager: stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "ViTModel": module = self.model else: module = self.model.vit layers_per_stage = stage_manager.distribute_layers(len(module.encoder.layer)) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = {"forward": pipeline_forward(stage_manager=stage_manager, stage_index=stage_index)} self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=model_cls ) # ViTModel class ViTModelPolicy(ViTPolicy): def module_policy(self): from transformers.models.vit.modeling_vit import ViTModel policy = super().module_policy() if self.shard_config.pipeline_stage_manager is not None: self.set_pipeline_forward(model_cls=ViTModel, pipeline_forward=ViTModel_pipeline_forward, policy=policy) return policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() assert self.pipeline_stage_manager is not None, "pipeline_stage_manager is None" module = self.model stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(module.layernorm) held_layers.append(module.pooler) else: if stage_manager.is_last_stage(): held_layers.append(module.layernorm) held_layers.append(module.pooler) return held_layers # ViTForImageClassification class ViTForImageClassificationPolicy(ViTPolicy): def module_policy(self): from transformers.models.vit.modeling_vit import ViTForImageClassification, ViTModel policy = super().module_policy() use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: new_item = { ViTForImageClassification: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="classifier", target_module=Linear1D_Col, kwargs=dict( gather_output=True, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ) ] ) } policy.update(new_item) elif use_zbv: new_item = { ViTForImageClassification: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="classifier", target_module=col_nn.LinearWithGradAccum, kwargs=dict( gather_output=True, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ) ] ) } policy.update(new_item) if self.shard_config.pipeline_stage_manager is not None: self.set_pipeline_forward(model_cls=ViTModel, pipeline_forward=ViTModel_pipeline_forward, policy=policy) self.set_pipeline_forward( model_cls=ViTForImageClassification, pipeline_forward=ViTForImageClassification_pipeline_forward, policy=policy, ) return policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() assert self.pipeline_stage_manager is not None, "pipeline_stage_manager is None" module = self.model.vit stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(module.layernorm) held_layers.append(self.model.classifier) else: if stage_manager.is_last_stage(): held_layers.append(module.layernorm) held_layers.append(self.model.classifier) return held_layers # ViTForMaskedImageModeling class ViTForMaskedImageModelingPolicy(ViTPolicy): def module_policy(self): from transformers.models.vit.modeling_vit import ViTForMaskedImageModeling, ViTModel policy = super().module_policy() if self.shard_config.pipeline_stage_manager is not None: self.set_pipeline_forward(model_cls=ViTModel, pipeline_forward=ViTModel_pipeline_forward, policy=policy) self.set_pipeline_forward( model_cls=ViTForMaskedImageModeling, pipeline_forward=ViTForMaskedImageModeling_pipeline_forward, policy=policy, ) return policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() assert self.pipeline_stage_manager is not None, "pipeline_stage_manager is None" module = self.model.vit stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(module.layernorm) held_layers.append(self.model.decoder) else: if stage_manager.is_last_stage(): held_layers.append(module.layernorm) held_layers.append(self.model.decoder) return held_layers
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/falcon.py
colossalai/shardformer/policies/falcon.py
import warnings from functools import partial from typing import Callable, Dict, List from torch import Tensor, nn from torch.nn import Module import colossalai.shardformer.layer as col_nn from ..modeling.falcon import ( FalconPipelineForwards, build_falcon_alibi_tensor_fn, get_lm_forward_with_dist_cross_entropy, get_tp_falcon_decoder_layer_forward, ) from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = ["FalconPolicy"] class FalconPolicy(Policy): def __init__(self) -> None: super().__init__() def config_sanity_check(self): pass def preprocess(self): self.tie_weight = self.tie_weight_check() return self.model def module_policy(self): from transformers.models.falcon.modeling_falcon import FalconDecoderLayer, FalconModel if not self.model.config.new_decoder_architecture and self.model.config.multi_query: warnings.warn( "Falcon doesn't support tensor parallelism when (not new_decoder_architecture and multi_query) is True, will ignore the tensor parallelism flag." ) self.shard_config.enable_tensor_parallelism = False if self.shard_config.enable_sequence_parallelism: self.shard_config.enable_sequence_parallelism = False warnings.warn("Falcon doesn't support sequence parallelism now, will ignore the sequence parallelism flag.") policy = {} embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = col_nn.VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = col_nn.PaddingEmbedding use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: assert ( self.model.config.num_attention_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." assert ( self.model.config.num_kv_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of key_value heads must be divisible by tensor parallel size." attn_attribute_replacement = { "self_attention.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "self_attention.split_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "self_attention.num_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, "self_attention.num_kv_heads": self.model.config.num_kv_heads // self.shard_config.tensor_parallel_size, } policy[FalconDecoderLayer] = ModulePolicyDescription( attribute_replacement=attn_attribute_replacement, method_replacement={"forward": get_tp_falcon_decoder_layer_forward()}, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attention.query_key_value", target_module=col_nn.Linear1D_Col, kwargs=dict( use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attention.dense", target_module=col_nn.Linear1D_Row, kwargs=dict( use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attention.attention_dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="mlp.dense_h_to_4h", target_module=col_nn.Linear1D_Col, kwargs=dict( use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.dense_4h_to_h", target_module=col_nn.Linear1D_Row, kwargs=dict( use_zbv=use_zbv, ), ), ], ) policy[FalconModel] = ModulePolicyDescription( attribute_replacement={ "num_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, }, method_replacement={ "build_alibi_tensor": build_falcon_alibi_tensor_fn(self.shard_config.tensor_parallel_process_group) }, ) elif use_zbv: policy[FalconDecoderLayer] = ModulePolicyDescription( method_replacement={"forward": get_tp_falcon_decoder_layer_forward()}, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attention.query_key_value", target_module=col_nn.LinearWithGradAccum, kwargs=dict( use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attention.dense", target_module=col_nn.LinearWithGradAccum, kwargs=dict( use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attention.attention_dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="mlp.dense_h_to_4h", target_module=col_nn.LinearWithGradAccum, kwargs=dict( use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.dense_4h_to_h", target_module=col_nn.LinearWithGradAccum, kwargs=dict( use_zbv=use_zbv, ), ), ], ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="word_embeddings", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), ], policy=policy, target_key=FalconModel, ) # optimization configuration if self.shard_config.enable_fused_normalization: # handle falcon model self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="ln_f", target_module=col_nn.FusedLayerNorm, ), ], policy=policy, target_key=FalconModel, ) # handle falcon decoder layer self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="ln_attn", target_module=col_nn.FusedLayerNorm, ignore_if_not_exist=True ), SubModuleReplacementDescription( suffix="ln_mlp", target_module=col_nn.FusedLayerNorm, ignore_if_not_exist=True ), SubModuleReplacementDescription( suffix="input_layernorm", target_module=col_nn.FusedLayerNorm, ignore_if_not_exist=True ), SubModuleReplacementDescription( suffix="post_attention_layernorm", target_module=col_nn.FusedLayerNorm, ignore_if_not_exist=True ), ], policy=policy, target_key=FalconDecoderLayer, ) if self.shard_config.enable_flash_attention: warnings.warn("Falcon doesn't support flash attention now, fallback to transformers attention.") return policy def postprocess(self): return self.model def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if self.pipeline_stage_manager: stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "FalconModel": module = self.model else: module = self.model.transformer layers_per_stage = stage_manager.distribute_layers(len(module.h)) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, stage_index=stage_index, shard_config=self.shard_config ) } self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=model_cls ) def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None if self.model.__class__.__name__ == "FalconModel": module = self.model else: module = self.model.transformer stage_manager = self.pipeline_stage_manager held_layers = [] held_layers.append(module.rotary_emb) if stage_manager.is_interleave: assert stage_manager.num_model_chunks is not None layers_per_stage = stage_manager.distribute_layers(len(module.h)) stage_indices = stage_manager.get_stage_index(layers_per_stage) if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.word_embeddings) for start_idx, end_idx in stage_indices: held_layers.extend(module.h[start_idx:end_idx]) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(module.ln_f) else: layers_per_stage = stage_manager.distribute_layers(len(module.h)) if stage_manager.is_first_stage(): held_layers.append(module.word_embeddings) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.h[start_idx:end_idx]) if stage_manager.is_last_stage(): held_layers.append(module.ln_f) return held_layers class FalconModelPolicy(FalconPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): policy = super().module_policy() from transformers.models.falcon.modeling_falcon import FalconModel if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=FalconModel, new_forward=FalconPipelineForwards.falcon_model_forward, policy=policy ) return policy def get_held_layers(self) -> List[Module]: """ get pipeline layers for current stage """ held_layers = super().get_held_layers() return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """no shared params in falcon model""" return [] class FalconForCausalLMPolicy(FalconPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): from transformers.models.falcon.modeling_falcon import FalconForCausalLM policy = super().module_policy() # handle tensor parallelism if self.shard_config.enable_tensor_parallelism: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="lm_head", target_module=col_nn.VocabParallelLMHead1D, kwargs=dict( gather_output=not self.shard_config.parallel_output, make_vocab_size_divisible_by=self.shard_config.make_vocab_size_divisible_by, ), ), policy=policy, target_key=FalconForCausalLM, ) if self.shard_config.parallel_output: method_replacement = {"forward": get_lm_forward_with_dist_cross_entropy(self.shard_config)} self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=FalconForCausalLM ) else: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="lm_head", target_module=col_nn.PaddingLMHead, kwargs=dict(make_vocab_size_divisible_by=self.shard_config.make_vocab_size_divisible_by), ), policy=policy, target_key=FalconForCausalLM, ) if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=FalconForCausalLM, new_forward=FalconPipelineForwards.falcon_for_causal_lm_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.lm_head) else: if stage_manager.is_last_stage(): held_layers.append(self.model.lm_head) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: falcon_model = self.model if self.pipeline_stage_manager and self.pipeline_stage_manager.num_stages > 1: if id(falcon_model.transformer.word_embeddings.weight) == id(falcon_model.lm_head.weight): # tie weights return [ { 0: falcon_model.transformer.word_embeddings.weight, self.pipeline_stage_manager.num_stages - 1: falcon_model.lm_head.weight, } ] return [] class FalconForSequenceClassificationPolicy(FalconPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): from transformers.models.falcon.modeling_falcon import FalconForSequenceClassification policy = super().module_policy() use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv # handle tensor parallelism if self.shard_config.enable_tensor_parallelism: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="score", target_module=col_nn.Linear1D_Col, kwargs=dict(gather_output=True, use_zbv=use_zbv) ), policy=policy, target_key=FalconForSequenceClassification, ) elif use_zbv: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="score", target_module=col_nn.LinearWithGradAccum, kwargs=dict(gather_output=True, use_zbv=use_zbv), ), policy=policy, target_key=FalconForSequenceClassification, ) if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=FalconForSequenceClassification, new_forward=FalconPipelineForwards.falcon_for_sequence_classification_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.score) else: if stage_manager.is_last_stage(): held_layers.append(self.model.score) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in falcon for sequence classification model""" return [] class FalconForTokenClassificationPolicy(FalconPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): from transformers.models.falcon.modeling_falcon import FalconForTokenClassification policy = super().module_policy() use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv # handle tensor parallelism if self.shard_config.enable_tensor_parallelism: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="classifier", target_module=col_nn.Linear1D_Col, kwargs=dict(gather_output=True, use_zbv=use_zbv), ), SubModuleReplacementDescription( suffix="dropout", target_module=col_nn.DropoutForReplicatedInput, ), ], policy=policy, target_key=FalconForTokenClassification, ) elif use_zbv: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="classifier", target_module=col_nn.LinearWithGradAccum, kwargs=dict(gather_output=True, use_zbv=use_zbv), ), SubModuleReplacementDescription( suffix="dropout", target_module=col_nn.DropoutForReplicatedInput, ), ], policy=policy, target_key=FalconForTokenClassification, ) if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=FalconForTokenClassification, new_forward=FalconPipelineForwards.falcon_for_token_classification_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.dropout) held_layers.append(self.model.classifier) else: if stage_manager.is_last_stage(): held_layers.append(self.model.dropout) held_layers.append(self.model.classifier) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in falcon for token classification model""" return [] class FalconForQuestionAnsweringPolicy(FalconPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): from transformers.models.falcon.modeling_falcon import FalconForQuestionAnswering policy = super().module_policy() use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv # handle tensor parallelism if self.shard_config.enable_tensor_parallelism: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="qa_outputs", target_module=col_nn.Linear1D_Col, kwargs=dict(gather_output=True, use_zbv=use_zbv), ), policy=policy, target_key=FalconForQuestionAnswering, ) elif use_zbv: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="qa_outputs", target_module=col_nn.Linear1D_Col, kwargs=dict(gather_output=True, use_zbv=use_zbv), ), policy=policy, target_key=FalconForQuestionAnswering, ) if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=FalconForQuestionAnswering, new_forward=FalconPipelineForwards.falcon_for_question_answering_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.qa_outputs) else: if stage_manager.is_last_stage(): held_layers.append(self.model.qa_outputs) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in falcon for question answering model""" return []
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/chatglm2.py
colossalai/shardformer/policies/chatglm2.py
import warnings from functools import partial from typing import Callable, Dict, List, Union import torch.nn as nn from torch import Tensor import colossalai.shardformer.layer as col_nn from colossalai.shardformer.modeling.chatglm2 import ChatGLMPipelineForwards from ..modeling.chatglm2 import ( get_chatglm_sequence_parallel_attention_forward, get_chatglm_sequence_parallel_forward_fn, get_flash_attention_forward_for_chat_glm_model, get_flash_core_attention_forward, get_jit_fused_glm_block_forward, ) from ..modeling.jit import get_jit_fused_dropout_add_func from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = [ "ChatGLMPolicy", "ChatGLMModelPolicy", "ChatGLMForConditionalGenerationPolicy", ] class ChatGLMPolicy(Policy): def config_sanity_check(self): pass def preprocess(self): if self.pipeline_stage_manager is not None: # the batch_size_dim is bounded to Model bsz_dim = 1 setattr(self.model, "batch_size_dim", bsz_dim) self.tie_weight = self.tie_weight_check() return self.model def module_policy(self) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: policy = {} embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = col_nn.VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = col_nn.PaddingEmbedding if self.shard_config.enable_fused_normalization: if self.model.config.rmsnorm: norm_cls = col_nn.FusedRMSNorm else: norm_cls = col_nn.FusedLayerNorm else: if self.model.config.rmsnorm: norm_cls = col_nn.RMSNorm else: norm_cls = col_nn.LayerNorm sp_mode = self.shard_config.sequence_parallelism_mode or None sp_size = self.shard_config.sequence_parallel_size or None sp_group = self.shard_config.sequence_parallel_process_group or None if sp_mode == "ring": warnings.warn( f"For ChatGLM2, sequence parallelism doesn't support mode {sp_mode} yet, will set to be split_gather" ) sp_mode = "split_gather" sp_partial_derived = sp_mode in ["split_gather"] if sp_mode == "all_to_all": decoder_attribute_replacement = { "num_heads": self.model.config.num_attention_heads // sp_size, "hidden_size_per_partition": self.model.config.kv_channels * self.model.config.num_attention_heads // sp_size, } if getattr(self.model.config, "num_key_value_heads", False): decoder_attribute_replacement["num_key_value_heads"] = self.model.config.num_key_value_heads // sp_size policy["CoreAttention"] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, ) use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: assert ( self.model.config.num_attention_heads % self.shard_config.tensor_parallel_size == 0 ), f"num_attention_heads {self.model.config.num_attention_heads} should be divisible by tensor_parallel_size {self.shard_config.tensor_parallel_size}" attn_kwargs = { "self_attention.qkv_hidden_size": ( self.model.config.kv_channels * self.model.config.num_attention_heads * 3 ) // self.shard_config.tensor_parallel_size, } if self.model.config.multi_query_attention: assert ( self.model.config.multi_query_group_num % self.shard_config.tensor_parallel_size == 0 ), f"multi_query_group_num {self.model.config.multi_query_group_num} should be divisible by tensor_parallel_size {self.shard_config.tensor_parallel_size}" attn_kwargs["self_attention.num_multi_query_groups_per_partition"] = ( self.model.config.multi_query_group_num // self.shard_config.tensor_parallel_size ) attn_kwargs["self_attention.qkv_hidden_size"] = ( self.model.config.kv_channels * self.model.config.num_attention_heads + 2 * self.model.config.kv_channels * self.model.config.multi_query_group_num ) // self.shard_config.tensor_parallel_size policy["GLMBlock"] = ModulePolicyDescription( attribute_replacement={ "self_attention.num_attention_heads_per_partition": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, "self_attention.projection_size": ( self.model.config.kv_channels * self.model.config.num_attention_heads ) // self.shard_config.tensor_parallel_size, "self_attention.core_attention.num_attention_heads_per_partition": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, "self_attention.core_attention.hidden_size_per_partition": self.model.config.kv_channels * self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, **attn_kwargs, }, param_replacement=[], sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attention.query_key_value", target_module=col_nn.Linear1D_Col, kwargs={ "seq_parallel_mode": sp_mode, "seq_parallel_dim": 0, "fp8_communication": self.shard_config.fp8_communication, }, ), SubModuleReplacementDescription( suffix="self_attention.dense", target_module=col_nn.Linear1D_Row, kwargs={ "seq_parallel_mode": sp_mode, "seq_parallel_dim": 0, "fp8_communication": self.shard_config.fp8_communication, }, ), SubModuleReplacementDescription( suffix="self_attention.core_attention.attention_dropout", target_module=col_nn.DropoutForParallelInput, ), ], ) elif use_zbv: policy["GLMBlock"] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attention.query_key_value", target_module=col_nn.Linear1D_Col, kwargs={ "seq_parallel_mode": sp_mode, "seq_parallel_dim": 0, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attention.dense", target_module=col_nn.Linear1D_Row, kwargs={ "seq_parallel_mode": sp_mode, "seq_parallel_dim": 0, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attention.core_attention.attention_dropout", target_module=col_nn.DropoutForParallelInput, ), ], ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="embedding.word_embeddings", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), ], policy=policy, target_key="ChatGLMModel", ) # optimization configuration self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="input_layernorm", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), SubModuleReplacementDescription( suffix="post_attention_layernorm", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), ], policy=policy, target_key="GLMBlock", ) if self.model.config.post_layer_norm: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="encoder.final_layernorm", target_module=norm_cls, ) ], policy=policy, target_key="ChatGLMModel", ) # use flash attention if self.shard_config.enable_flash_attention: self.append_or_create_method_replacement( description={ "forward": get_flash_core_attention_forward(), }, policy=policy, target_key="CoreAttention", ) self.append_or_create_method_replacement( description={ "forward": get_flash_attention_forward_for_chat_glm_model(), }, policy=policy, target_key="ChatGLMModel", ) # use sequence parallel if self.shard_config.enable_sequence_parallelism: self.append_or_create_method_replacement( description={ "forward": get_chatglm_sequence_parallel_attention_forward( self.shard_config, sp_mode, sp_size, sp_group ), }, policy=policy, target_key="SelfAttention", ) if self.pipeline_stage_manager is None: self.append_or_create_method_replacement( description={ "forward": get_chatglm_sequence_parallel_forward_fn( self.shard_config, sp_mode, sp_size, sp_group ) }, policy=policy, target_key="ChatGLMModel", ) # use jit fused operator if self.shard_config.enable_jit_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_glm_block_forward(), "dropout_add": get_jit_fused_dropout_add_func(), }, policy=policy, target_key="GLMBlock", ) return policy def postprocess(self): return self.model def get_held_layers(self) -> List[nn.Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None if self.model.__class__.__name__ == "ChatGLMModel": module = self.model else: module = self.model.transformer stage_manager = self.pipeline_stage_manager held_layers = [] if stage_manager.is_interleave: layers_per_stage = stage_manager.distribute_layers(module.num_layers) stage_indices = stage_manager.get_stage_index(layers_per_stage) if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.embed_tokens) for start_idx, end_idx in stage_indices: held_layers.extend(module.layers[start_idx:end_idx]) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): if module.encoder.post_layer_norm: held_layers.append(module.encoder.final_layernorm) else: layers_per_stage = stage_manager.distribute_layers(module.num_layers) if stage_manager.is_first_stage(): held_layers.append(module.embedding) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.encoder.layers[start_idx:end_idx]) if stage_manager.is_last_stage(): if module.encoder.post_layer_norm: held_layers.append(module.encoder.final_layernorm) # rotary_pos_emb is needed for all stages held_layers.append(module.rotary_pos_emb) return held_layers def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if not self.pipeline_stage_manager: raise ValueError("set_pipeline_forward method can only be called when pipeline parallel is enabled.") stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "ChatGLMModel": module = self.model else: module = self.model.transformer layers_per_stage = stage_manager.distribute_layers(module.num_layers) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, stage_index=stage_index, shard_config=self.shard_config, ) } self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=model_cls) class ChatGLMModelPolicy(ChatGLMPolicy): def module_policy(self): pass policy = super().module_policy() if self.pipeline_stage_manager is not None: self.set_pipeline_forward( model_cls="ChatGLMModel", new_forward=ChatGLMPipelineForwards.chatglm_model_forward, policy=policy, ) return policy def get_held_layers(self) -> List[nn.Module]: return super().get_held_layers() def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in ChatGLMModel.""" return [] class ChatGLMForConditionalGenerationPolicy(ChatGLMModelPolicy): def module_policy(self): policy = super().module_policy() if self.pipeline_stage_manager is not None: self.set_pipeline_forward( model_cls="ChatGLMForConditionalGeneration", new_forward=ChatGLMPipelineForwards.chatglm_for_conditional_generation_forward, policy=policy, ) return policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.transformer.output_layer) else: if self.pipeline_stage_manager.is_last_stage(): held_layers.append(self.model.transformer.output_layer) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in ChatGLMForConditionalGenerationModel.""" return []
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/qwen3.py
colossalai/shardformer/policies/qwen3.py
# Modifed from qwen2 policy from functools import partial from typing import Callable, Dict, List, Union import torch.nn as nn from torch import Tensor from torch.nn import Module from transformers.models.qwen3.modeling_qwen3 import ( Qwen3Attention, Qwen3DecoderLayer, Qwen3ForCausalLM, Qwen3ForSequenceClassification, Qwen3Model, ) from colossalai.shardformer.layer import ( FusedRMSNorm, Linear1D_Col, Linear1D_Row, LinearWithGradAccum, PaddingEmbedding, RMSNorm, VocabParallelEmbedding1D, ) from ..modeling.qwen3 import ( Qwen3PipelineForwards, get_lm_forward_with_dist_cross_entropy, get_qwen3_flash_attention_forward, get_qwen3_model_forward_for_flash_attn, ) from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = ["Qwen3Policy", "Qwen3ForCausalLMPolicy", "Qwen3ForSequenceClassificationPolicy"] class Qwen3Policy(Policy): def __init__(self) -> None: super().__init__() import transformers from packaging.version import Version assert Version(transformers.__version__) >= Version( "4.51.0" ), "The Qwen3 model should run on a transformers version of 4.51.0 or higher." def config_sanity_check(self): pass def preprocess(self): self.tie_weight = self.tie_weight_check() self.origin_attn_implement = self.model.config._attn_implementation return self.model def module_policy(self) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: policy = {} embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = PaddingEmbedding norm_cls = FusedRMSNorm if self.shard_config.enable_fused_normalization else RMSNorm sp_mode = self.shard_config.sequence_parallelism_mode or None sp_size = self.shard_config.sequence_parallel_size or None sp_group = self.shard_config.sequence_parallel_process_group or None sp_partial_derived = sp_mode in ["split_gather", "ring"] if sp_mode == "all_to_all": decoder_attribute_replacement = { "num_heads": self.model.config.num_attention_heads // sp_size, } if getattr(self.model.config, "num_key_value_heads", False): decoder_attribute_replacement["num_key_value_heads"] = self.model.config.num_key_value_heads // sp_size policy[Qwen3Attention] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, ) use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: assert ( self.model.config.num_attention_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." if hasattr(self.model.config, "num_key_value_heads"): assert ( self.model.config.num_key_value_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of key_value heads must be divisible by tensor parallel size." decoder_attribute_replacement = { "self_attn.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "self_attn.num_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, } if getattr(self.model.config, "num_key_value_heads", False): decoder_attribute_replacement["self_attn.num_key_value_heads"] = ( self.model.config.num_key_value_heads // self.shard_config.tensor_parallel_size ) policy[Qwen3DecoderLayer] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=Linear1D_Row, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.gate_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.up_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.down_proj", target_module=Linear1D_Row, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), ], ) elif use_zbv: policy[Qwen3DecoderLayer] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.gate_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.up_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.down_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), ], ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="embed_tokens", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), policy=policy, target_key=Qwen3Model, ) # optimization configuration self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="input_layernorm", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), SubModuleReplacementDescription( suffix="post_attention_layernorm", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), ], policy=policy, target_key=Qwen3DecoderLayer, ) self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="norm", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), policy=policy, target_key=Qwen3Model, ) if self.shard_config.enable_flash_attention or self.shard_config.enable_sequence_parallelism: self.append_or_create_method_replacement( description={ "forward": get_qwen3_flash_attention_forward(self.shard_config, sp_mode, sp_size, sp_group), }, policy=policy, target_key=Qwen3Attention, ) if self.pipeline_stage_manager is None: # replace qwen3 model forward method self.append_or_create_method_replacement( description={ "forward": get_qwen3_model_forward_for_flash_attn( self.shard_config, sp_mode, sp_size, sp_group ), }, policy=policy, target_key=Qwen3Model, ) return policy def postprocess(self): return self.model def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if self.pipeline_stage_manager is None: return stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "Qwen3Model": module = self.model else: module = self.model.model if stage_manager.is_interleave: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_manager.stage_indices = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial(new_forward, stage_manager=stage_manager, shard_config=self.shard_config) } else: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, stage_index=stage_index, shard_config=self.shard_config ) } self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=model_cls ) self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=model_cls) def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None if self.model.__class__.__name__ == "Qwen3Model": module = self.model else: module = self.model.model stage_manager = self.pipeline_stage_manager held_layers = [] held_layers.append(module.rotary_emb) if stage_manager.is_interleave: assert stage_manager.num_model_chunks is not None layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_indices = stage_manager.get_stage_index(layers_per_stage) if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.embed_tokens) for start_idx, end_idx in stage_indices: held_layers.extend(module.layers[start_idx:end_idx]) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(module.norm) else: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) if stage_manager.is_first_stage(): held_layers.append(module.embed_tokens) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.layers[start_idx:end_idx]) if stage_manager.is_last_stage(): held_layers.append(module.norm) return held_layers class Qwen3ModelPolicy(Qwen3Policy): def module_policy(self): policy = super().module_policy() if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=Qwen3Model, new_forward=Qwen3PipelineForwards.qwen3_model_forward, policy=policy ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" held_layers = super().get_held_layers() return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in Qwen3 model""" return [] class Qwen3ForCausalLMPolicy(Qwen3Policy): def module_policy(self): policy = super().module_policy() setattr(self.shard_config, "causal_lm", True) use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: # add a new item for casual lm new_item = { Qwen3ForCausalLM: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=Linear1D_Col, kwargs=dict(fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv), ) ], method_replacement={"forward": get_lm_forward_with_dist_cross_entropy(self.shard_config)}, ) } policy.update(new_item) elif use_zbv: # add a new item for casual lm new_item = { Qwen3ForCausalLM: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=LinearWithGradAccum, kwargs=dict(fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv), ) ], method_replacement={"forward": get_lm_forward_with_dist_cross_entropy(self.shard_config)}, ) } policy.update(new_item) if self.pipeline_stage_manager: # set None as default self.set_pipeline_forward( model_cls=Qwen3ForCausalLM, new_forward=Qwen3PipelineForwards.qwen3_for_causal_lm_forward, policy=policy ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.lm_head) else: if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.lm_head) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: qwen3_model = self.model.model if self.pipeline_stage_manager and self.pipeline_stage_manager.num_stages > 1: if ( id(qwen3_model.embed_tokens.weight) == id(self.model.lm_head.weight) and self.pipeline_stage_manager.num_stages > 1 ): # tie weights return [ { 0: qwen3_model.embed_tokens.weight, self.pipeline_stage_manager.num_stages - 1: self.model.lm_head.weight, } ] return [] class Qwen3ForSequenceClassificationPolicy(Qwen3Policy): def module_policy(self): policy = super().module_policy() use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: # add a new item for sequence classification new_item = { Qwen3ForSequenceClassification: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="score", target_module=Linear1D_Col, kwargs=dict( gather_output=True, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ) ] ) } policy.update(new_item) elif use_zbv: new_item = { Qwen3ForSequenceClassification: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="score", target_module=LinearWithGradAccum, kwargs=dict( gather_output=True, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ) ] ) } policy.update(new_item) # to be confirmed if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=Qwen3ForSequenceClassification, new_forward=Qwen3PipelineForwards.qwen3_for_sequence_classification_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.score) else: if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.score) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in Qwen3 for sequence classification model""" return []
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/gptj.py
colossalai/shardformer/policies/gptj.py
import warnings from functools import partial from typing import Callable, Dict, List from torch import Tensor, nn import colossalai.shardformer.layer as col_nn from ..modeling.gptj import ( GPTJPipelineForwards, get_gptj_flash_attention_forward, gptj_model_forward_for_flash_attention, ) from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = [ "GPTJPolicy", "GPTJModelPolicy", "GPTJForCausalLMPolicy", "GPTJForSequenceClassificationPolicy", "GPTJForQuestionAnsweringPolicy", "FlaxGPTJPolicy", "FlaxGPTJForCausalLMPolicy", ] class GPTJPolicy(Policy): def config_sanity_check(self): pass def preprocess(self): self.tie_weight = self.tie_weight_check() self.origin_attn_implement = self.model.config._attn_implementation return self.model def module_policy(self): from transformers.models.gptj.modeling_gptj import GPTJ_ATTENTION_CLASSES, GPTJBlock, GPTJModel policy = {} attn_cls = GPTJ_ATTENTION_CLASSES[self.origin_attn_implement] embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = col_nn.VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = col_nn.PaddingEmbedding if self.shard_config.enable_sequence_parallelism: self.shard_config.enable_sequence_parallelism = False warnings.warn("GPTJ doesn't support sequence parallelism now, will ignore the sequence parallelism flag.") use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: assert ( self.model.config.num_attention_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." policy[GPTJModel] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="drop", target_module=col_nn.DropoutForParallelInput, ), ] ) policy[GPTJBlock] = ModulePolicyDescription( attribute_replacement={ "attn.embed_dim": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "attn.num_attention_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="attn.k_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attn.q_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attn.v_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attn.out_proj", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.fc_in", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.fc_out", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attn.attn_dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="attn.resid_dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="mlp.dropout", target_module=col_nn.DropoutForParallelInput, ), ], ) elif use_zbv: policy[GPTJBlock] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="attn.k_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attn.q_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attn.v_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attn.out_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.fc_in", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.fc_out", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attn.attn_dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="attn.resid_dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="mlp.dropout", target_module=col_nn.DropoutForParallelInput, ), ], ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="wte", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), policy=policy, target_key=GPTJModel, ) # optimization configuration if self.shard_config.enable_fused_normalization: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="ln_f", target_module=col_nn.FusedLayerNorm, ), policy=policy, target_key=GPTJModel, ) self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="ln_1", target_module=col_nn.FusedLayerNorm, ) ], policy=policy, target_key=GPTJBlock, ) if self.shard_config.enable_flash_attention: self.append_or_create_method_replacement( description={ "forward": get_gptj_flash_attention_forward(), }, policy=policy, target_key=attn_cls, ) if not self.shard_config.pipeline_stage_manager: self.append_or_create_method_replacement( description={"forward": gptj_model_forward_for_flash_attention(self.shard_config)}, policy=policy, target_key=GPTJModel, ) return policy def postprocess(self): return self.model def get_held_layers(self) -> List[nn.Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None if self.model.__class__.__name__ == "GPTJModel": module = self.model else: module = self.model.transformer stage_manager = self.pipeline_stage_manager held_layers = [] layers_per_stage = stage_manager.distribute_layers(len(module.h)) if stage_manager.is_interleave: if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.wte) held_layers.append(module.drop) stage_indices = stage_manager.get_stage_index(layers_per_stage) for start_idx, end_idx in stage_indices: held_layers.extend(module.h[start_idx:end_idx]) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(module.ln_f) else: if stage_manager.is_first_stage(): held_layers.append(module.wte) held_layers.append(module.drop) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.h[start_idx:end_idx]) if stage_manager.is_last_stage(): held_layers.append(module.ln_f) return held_layers def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if not self.pipeline_stage_manager: raise ValueError("set_pipeline_forward method can only be called when pipeline parallel is enabled.") stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "GPTJModel": module = self.model else: module = self.model.transformer layers_per_stage = stage_manager.distribute_layers(len(module.h)) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, stage_index=stage_index, shard_config=self.shard_config, ) } self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=model_cls) # GPTJModel class GPTJModelPolicy(GPTJPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): from transformers.models.gptj.modeling_gptj import GPTJModel policy = super().module_policy() if self.pipeline_stage_manager is not None: self.set_pipeline_forward( model_cls=GPTJModel, new_forward=GPTJPipelineForwards.gptj_model_forward, policy=policy, ) return policy def get_held_layers(self) -> List[nn.Module]: return super().get_held_layers() def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in GPT2Model.""" return [] # GPTJForCausalLM class GPTJForCausalLMPolicy(GPTJPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): from transformers.models.gptj.modeling_gptj import GPTJForCausalLM policy = super().module_policy() if self.shard_config.enable_tensor_parallelism: addon_module = { GPTJForCausalLM: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=col_nn.VocabParallelLMHead1D, kwargs={ "gather_output": True, "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, }, ) ] ) } else: addon_module = { GPTJForCausalLM: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=col_nn.PaddingLMHead, kwargs={"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by}, ) ] ) } policy.update(addon_module) if self.pipeline_stage_manager is not None: self.set_pipeline_forward( model_cls=GPTJForCausalLM, new_forward=GPTJPipelineForwards.gptj_causallm_model_forward, policy=policy, ) return policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.lm_head) else: if self.pipeline_stage_manager.is_last_stage(): held_layers.append(self.model.lm_head) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """The weights of wte and lm_head are shared.""" module = self.model stage_manager = self.pipeline_stage_manager if stage_manager is not None: if stage_manager.num_stages > 1 and id(module.transformer.wte.weight) == id(module.lm_head.weight): first_stage, last_stage = 0, stage_manager.num_stages - 1 return [ { first_stage: module.transformer.wte.weight, last_stage: module.lm_head.weight, } ] return [] # GPTJForSequenceClassification class GPTJForSequenceClassificationPolicy(GPTJPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): from transformers.models.gptj.modeling_gptj import GPTJForSequenceClassification policy = super().module_policy() if self.pipeline_stage_manager is not None: self.set_pipeline_forward( model_cls=GPTJForSequenceClassification, new_forward=GPTJPipelineForwards.gptj_for_sequence_classification_forward, policy=policy, ) return policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.score) else: if self.pipeline_stage_manager.is_last_stage(): held_layers.append(self.model.score) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in GPTJForSequenceClassification.""" return [] # GPTJForQuestionAnswering class GPTJForQuestionAnsweringPolicy(GPTJPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): from transformers.models.gptj.modeling_gptj import GPTJForQuestionAnswering policy = super().module_policy() if self.pipeline_stage_manager is not None: self.set_pipeline_forward( model_cls=GPTJForQuestionAnswering, new_forward=GPTJPipelineForwards.gptj_for_question_answering_forward, policy=policy, ) return policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.qa_outputs) else: if self.pipeline_stage_manager.is_last_stage(): held_layers.append(self.model.qa_outputs) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in GPT2ForQuestionAnswering.""" return []
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/bloom.py
colossalai/shardformer/policies/bloom.py
import warnings from functools import partial from typing import Callable, Dict, List import torch.nn as nn from torch import Tensor from torch.nn import Module import colossalai.shardformer.layer as col_nn from ..modeling.bloom import ( BloomPipelineForwards, build_bloom_alibi_tensor_fn, get_bloom_sequence_parallel_attention_forward, get_bloom_sequence_parallel_forward_fn, get_jit_fused_bloom_attention_forward, get_jit_fused_bloom_gelu_forward, get_jit_fused_bloom_mlp_forward, get_lm_forward_with_dist_cross_entropy, ) from ..modeling.jit import get_jit_fused_dropout_add_func, get_jit_fused_gelu_forward_func from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription class BloomPolicy(Policy): def __init__(self) -> None: super().__init__() def config_sanity_check(self): pass def preprocess(self): self.tie_weight = self.tie_weight_check() return self.model def module_policy(self): from transformers.models.bloom.modeling_bloom import BloomAttention, BloomBlock, BloomGelu, BloomMLP, BloomModel policy = {} embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = col_nn.VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = col_nn.PaddingEmbedding if self.shard_config.enable_fused_normalization: norm_cls = col_nn.FusedLayerNorm else: norm_cls = col_nn.LayerNorm sp_mode = self.shard_config.sequence_parallelism_mode or None assert sp_mode != "all_to_all", "all_to_all sequence parallelism is not supported for BLOOM" if sp_mode == "ring": warnings.warn( f"For BLOOM, sequence parallelism is currently not support mode {sp_mode}, will set to be split_gather" ) sp_mode = "split_gather" sp_partial_derived = sp_mode == "split_gather" use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_sequence_parallelism: self.append_or_create_method_replacement( description={ "forward": get_bloom_sequence_parallel_attention_forward(self.shard_config), }, policy=policy, target_key=BloomAttention, ) if self.shard_config.enable_tensor_parallelism: assert ( self.model.config.n_head % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." policy[BloomBlock] = ModulePolicyDescription( attribute_replacement={ "self_attention.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "self_attention.split_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "self_attention.num_heads": self.model.config.n_head // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attention.query_key_value", target_module=col_nn.Linear1D_Col, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attention.dense", target_module=col_nn.Linear1D_Row, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attention.attention_dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="mlp.dense_h_to_4h", target_module=col_nn.Linear1D_Col, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.dense_4h_to_h", target_module=col_nn.Linear1D_Row, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) policy[BloomModel] = ModulePolicyDescription( attribute_replacement={ "num_heads": self.model.config.n_head // self.shard_config.tensor_parallel_size, }, method_replacement={ "build_alibi_tensor": build_bloom_alibi_tensor_fn(self.shard_config.tensor_parallel_process_group) }, ) if use_zbv: policy[BloomBlock] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attention.query_key_value", target_module=col_nn.LinearWithGradAccum, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attention.dense", target_module=col_nn.LinearWithGradAccum, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attention.attention_dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="mlp.dense_h_to_4h", target_module=col_nn.LinearWithGradAccum, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.dense_4h_to_h", target_module=col_nn.LinearWithGradAccum, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="word_embeddings", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), ], policy=policy, target_key=BloomModel, ) # optimization configuration # handle bloom model self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="ln_f", target_module=norm_cls, ), SubModuleReplacementDescription( suffix="word_embeddings_layernorm", target_module=norm_cls, ), ], policy=policy, target_key=BloomModel, ) # handle bloom block self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="input_layernorm", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), SubModuleReplacementDescription( suffix="post_attention_layernorm", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), ], policy=policy, target_key=BloomBlock, ) if sp_mode == "split_gather": self.append_or_create_method_replacement( description={"forward": get_bloom_sequence_parallel_forward_fn(self.shard_config)}, policy=policy, target_key=BloomModel, ) # enable jit fused operator if self.shard_config.enable_jit_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_bloom_attention_forward(), "dropout_add": get_jit_fused_dropout_add_func(), }, policy=policy, target_key=BloomAttention, ) self.append_or_create_method_replacement( description={ "forward": get_jit_fused_bloom_mlp_forward(), "dropout_add": get_jit_fused_dropout_add_func(), }, policy=policy, target_key=BloomMLP, ) self.append_or_create_method_replacement( description={ "forward": get_jit_fused_bloom_gelu_forward(), "bloom_gelu_forward": get_jit_fused_gelu_forward_func(), }, policy=policy, target_key=BloomGelu, ) return policy def postprocess(self): return self.model def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if self.pipeline_stage_manager: stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "BloomModel": module = self.model else: module = self.model.transformer layers_per_stage = stage_manager.distribute_layers(len(module.h)) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, stage_index=stage_index, shard_config=self.shard_config ) } self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=model_cls ) return def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None if self.model.__class__.__name__ == "BloomModel": module = self.model else: module = self.model.transformer stage_manager = self.pipeline_stage_manager held_layers = [] if stage_manager.is_interleave: layers_per_stage = stage_manager.distribute_layers(len(module.h)) stage_indices = stage_manager.get_stage_index(layers_per_stage) if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.word_embeddings) held_layers.append(module.word_embeddings_layernorm) for start_idx, end_idx in stage_indices: held_layers.extend(module.h[start_idx:end_idx]) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(module.ln_f) else: layers_per_stage = stage_manager.distribute_layers(len(module.h)) if stage_manager.is_first_stage(): held_layers.append(module.word_embeddings) held_layers.append(module.word_embeddings_layernorm) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.h[start_idx:end_idx]) if stage_manager.is_last_stage(): held_layers.append(module.ln_f) return held_layers class BloomModelPolicy(BloomPolicy): def module_policy(self): policy = super().module_policy() from transformers.models.bloom.modeling_bloom import BloomModel if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=BloomModel, new_forward=BloomPipelineForwards.bloom_model_forward, policy=policy ) return policy def get_held_layers(self) -> List[Module]: """ get pipeline layers for current stage """ held_layers = super().get_held_layers() return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """no shared params in bloom model""" return [] class BloomForCausalLMPolicy(BloomPolicy): def module_policy(self): from transformers.models.bloom.modeling_bloom import BloomForCausalLM policy = super().module_policy() # handle tensor parallelism if self.shard_config.enable_tensor_parallelism: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="lm_head", target_module=col_nn.VocabParallelLMHead1D, kwargs=dict( gather_output=not self.shard_config.parallel_output, make_vocab_size_divisible_by=self.shard_config.make_vocab_size_divisible_by, fp8_communication=self.shard_config.fp8_communication, ), ), policy=policy, target_key=BloomForCausalLM, ) if self.shard_config.parallel_output: method_replacement = {"forward": get_lm_forward_with_dist_cross_entropy(self.shard_config)} self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=BloomForCausalLM ) else: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="lm_head", target_module=col_nn.PaddingLMHead, kwargs=dict(make_vocab_size_divisible_by=self.shard_config.make_vocab_size_divisible_by), ), policy=policy, target_key=BloomForCausalLM, ) if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=BloomForCausalLM, new_forward=BloomPipelineForwards.bloom_for_causal_lm_forward, policy=policy ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.lm_head) else: if stage_manager.is_last_stage(): held_layers.append(self.model.lm_head) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: bloom_model = self.model if self.pipeline_stage_manager and self.pipeline_stage_manager.num_stages > 1: if id(bloom_model.transformer.word_embeddings.weight) == id(bloom_model.lm_head.weight): # tie weights return [ { 0: bloom_model.transformer.word_embeddings.weight, self.pipeline_stage_manager.num_stages - 1: bloom_model.lm_head.weight, } ] return [] class BloomForSequenceClassificationPolicy(BloomPolicy): def module_policy(self): from transformers.models.bloom.modeling_bloom import BloomForSequenceClassification policy = super().module_policy() use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv # handle tensor parallelism if self.shard_config.enable_tensor_parallelism: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="score", target_module=col_nn.Linear1D_Col, kwargs=dict(gather_output=True, fp8_communication=self.shard_config.fp8_communication), ), policy=policy, target_key=BloomForSequenceClassification, ) elif use_zbv: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="score", target_module=col_nn.LinearWithGradAccum, kwargs=dict( gather_output=True, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv ), ), policy=policy, target_key=BloomForSequenceClassification, ) if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=BloomForSequenceClassification, new_forward=BloomPipelineForwards.bloom_for_sequence_classification_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.score) else: if stage_manager.is_last_stage(): held_layers.append(self.model.score) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in bloom for sequence classification model""" return [] class BloomForTokenClassificationPolicy(BloomPolicy): def module_policy(self): from transformers.models.bloom.modeling_bloom import BloomForTokenClassification policy = super().module_policy() use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv # handle tensor parallelism if self.shard_config.enable_tensor_parallelism: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="classifier", target_module=col_nn.Linear1D_Col, kwargs=dict(gather_output=True, fp8_communication=self.shard_config.fp8_communication), ), SubModuleReplacementDescription( suffix="dropout", target_module=col_nn.DropoutForReplicatedInput, ), ], policy=policy, target_key=BloomForTokenClassification, ) elif use_zbv: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="classifier", target_module=col_nn.LinearWithGradAccum, kwargs=dict( gather_output=True, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv ), ), SubModuleReplacementDescription( suffix="dropout", target_module=col_nn.DropoutForReplicatedInput, ), ], policy=policy, target_key=BloomForTokenClassification, ) if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=BloomForTokenClassification, new_forward=BloomPipelineForwards.bloom_for_token_classification_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.dropout) held_layers.append(self.model.classifier) else: if stage_manager.is_last_stage(): held_layers.append(self.model.dropout) held_layers.append(self.model.classifier) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in bloom for token classification model""" return [] class BloomForQuestionAnsweringPolicy(BloomPolicy): # No head sharding as the output features is only 2 def module_policy(self): from transformers.models.bloom.modeling_bloom import BloomForQuestionAnswering policy = super().module_policy() if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=BloomForQuestionAnswering, new_forward=BloomPipelineForwards.bloom_for_question_answering_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.qa_outputs) else: if stage_manager.is_last_stage(): held_layers.append(self.model.qa_outputs) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in bloom for question answering model""" return []
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/command.py
colossalai/shardformer/policies/command.py
from functools import partial from typing import Callable, Dict, List, Union import torch.nn as nn from torch import Tensor from torch.nn import Module from colossalai.shardformer.layer import ( Linear1D_Col, Linear1D_Row, LinearWithGradAccum, PaddingEmbedding, PaddingLMHead, VocabParallelEmbedding1D, VocabParallelLMHead1D, ) from ..modeling.command import ( CommandPipelineForwards, get_command_flash_attention_forward, get_command_flash_attention_model_forward, get_lm_forward_with_dist_cross_entropy, ) from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = ["CommandPolicy", "CommandForCausalLMPolicy"] class CommandPolicy(Policy): def config_sanity_check(self): pass def preprocess(self): self.tie_weight = self.tie_weight_check() self.origin_attn_implement = self.model.config._attn_implementation return self.model def module_policy(self) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: from transformers.models.cohere.modeling_cohere import CohereAttention, CohereDecoderLayer, CohereModel # The eager, flash_attention_2, sdpa will all be passed to CohereAttention in v4.51.3 transformers. ATTN_IMPLEMENTATION = { "eager": CohereAttention, "flash_attention_2": CohereAttention, "sdpa": CohereAttention, } policy = {} attn_cls = ATTN_IMPLEMENTATION[self.origin_attn_implement] embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = PaddingEmbedding # CohereLayerNorm has no bias in v4.51.3 transformers, so we don't replace it. sp_mode = self.shard_config.sequence_parallelism_mode or None sp_size = self.shard_config.sequence_parallel_size or None sp_group = self.shard_config.sequence_parallel_process_group or None if sp_mode == "ring_attn" and not self.is_causal: raise ValueError("Ring attention is only meant for causal language modeling.") tp_size = self.shard_config.tensor_parallel_size or None num_q_heads = self.model.config.num_attention_heads num_kv_heads = getattr(self.model.config, "num_key_value_heads", None) if sp_mode == "all_to_all": num_q_heads //= sp_size decoder_attribute_replacement = {"num_heads": num_q_heads} if num_kv_heads: num_kv_heads //= sp_size decoder_attribute_replacement["num_key_value_heads"] = num_kv_heads policy[attn_cls] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, ) self.append_or_create_method_replacement( description={ "forward": get_command_flash_attention_forward(self.shard_config, sp_mode, sp_size, sp_group), }, policy=policy, target_key=attn_cls, ) if self.shard_config.enable_flash_attention or self.shard_config.enable_sequence_parallelism: if self.pipeline_stage_manager is None: self.append_or_create_method_replacement( description={ "forward": get_command_flash_attention_model_forward( self.shard_config, sp_mode=sp_mode, sp_size=sp_size, sp_group=sp_group, ), }, policy=policy, target_key=CohereModel, ) use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: assert ( num_q_heads % tp_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." if hasattr(self.model.config, "num_key_value_heads"): assert ( num_kv_heads >= tp_size and num_kv_heads % tp_size == 0 ), f"The number of key_value heads must be divisible by, and must not be less than tensor parallel size." decoder_attribute_replacement = { "self_attn.hidden_size": self.model.config.hidden_size // tp_size, "self_attn.num_heads": num_q_heads // tp_size, } if getattr(self.model.config, "num_key_value_heads", False): decoder_attribute_replacement["self_attn.num_key_value_heads"] = num_kv_heads // tp_size policy[CohereDecoderLayer] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=Linear1D_Row, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.gate_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.up_proj", target_module=Linear1D_Col, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.down_proj", target_module=Linear1D_Row, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), ], ) elif use_zbv: policy[CohereDecoderLayer] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.gate_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.up_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), SubModuleReplacementDescription( suffix="mlp.down_proj", target_module=LinearWithGradAccum, kwargs=dict( seq_parallel_mode=sp_mode, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ), ], ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="embed_tokens", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), policy=policy, target_key=CohereModel, ) return policy def postprocess(self): return self.model def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if self.pipeline_stage_manager is None: return stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "CohereModel": module = self.model else: module = self.model.model if stage_manager.is_interleave: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_manager.stage_indices = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial(new_forward, stage_manager=stage_manager, shard_config=self.shard_config) } else: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, stage_index=stage_index, shard_config=self.shard_config ) } self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=model_cls) def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None if self.model.__class__.__name__ == "CohereModel": module = self.model else: module = self.model.model stage_manager = self.pipeline_stage_manager held_layers = [] held_layers.append(module.rotary_emb) if stage_manager.is_interleave: assert stage_manager.num_model_chunks is not None layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_indices = stage_manager.get_stage_index(layers_per_stage) if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.embed_tokens) for start_idx, end_idx in stage_indices: held_layers.extend(module.layers[start_idx:end_idx]) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(module.norm) else: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) if stage_manager.is_first_stage(): held_layers.append(module.embed_tokens) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.layers[start_idx:end_idx]) if stage_manager.is_last_stage(): held_layers.append(module.norm) return held_layers class CommandModelPolicy(CommandPolicy): def module_policy(self): policy = super().module_policy() from transformers.models.cohere.modeling_cohere import CohereModel if self.pipeline_stage_manager: # set None as default self.set_pipeline_forward( model_cls=CohereModel, new_forward=CommandPipelineForwards.command_model_forward, policy=policy ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" held_layers = super().get_held_layers() return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in command model""" return [] class CommandForCausalLMPolicy(CommandPolicy): def module_policy(self): from transformers import CohereForCausalLM self.is_causal = True policy = super().module_policy() if self.shard_config.enable_tensor_parallelism: # add a new item for causal lm new_item = { CohereForCausalLM: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=VocabParallelLMHead1D, kwargs={ "gather_output": not self.shard_config.parallel_output, "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, }, ) ], ) } if self.shard_config.parallel_output: new_item[CohereForCausalLM].method_replacement = { "forward": get_lm_forward_with_dist_cross_entropy(self.shard_config) } else: new_item = { CohereForCausalLM: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=PaddingLMHead, kwargs={"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by}, ) ], ) } policy.update(new_item) if self.pipeline_stage_manager: # set None as default self.set_pipeline_forward( model_cls=CohereForCausalLM, new_forward=CommandPipelineForwards.command_for_causal_lm_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.lm_head) else: if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.lm_head) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: command_model = self.model.model if self.pipeline_stage_manager and self.pipeline_stage_manager.num_stages > 1: if ( id(command_model.embed_tokens.weight) == id(self.model.lm_head.weight) and self.pipeline_stage_manager.num_stages > 1 ): # tie weights return [ { 0: command_model.embed_tokens.weight, self.pipeline_stage_manager.num_stages - 1: self.model.lm_head.weight, } ] return []
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/deepseek_v3.py
colossalai/shardformer/policies/deepseek_v3.py
from functools import partial from typing import Callable, Dict, List, Union import torch.nn as nn from colossalai.shardformer.layer import FusedRMSNorm from colossalai.shardformer.modeling.deepseek_v3 import ( EpDeepseekV3MoE, deepseek_v3_for_causal_lm_forward, deepseek_v3_model_forward, ) from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = ["DeepseekPolicy", "DeepseekForCausalLMPolicy"] class DeepseekV3Policy(Policy): def config_sanity_check(self): assert not self.shard_config.enable_tensor_parallelism, "DeepSeekV3 does not support tensor parallelism" assert not self.shard_config.enable_sequence_parallelism, "DeepSeekV3 does not support sequence parallelism" if self.shard_config.pipeline_stage_manager: assert not self.shard_config.pipeline_stage_manager.use_zbv, "DeepSeekV3 does not support ZBV" def preprocess(self): return self.model def module_policy(self) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: policy = {} # support gradient checkpointing if self.shard_config.pipeline_stage_manager is None: policy["DeepseekV3Model"] = ModulePolicyDescription( method_replacement={"forward": deepseek_v3_model_forward} ) if self.shard_config.expert_parallel_size > 1: # expert parallel self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="mlp", target_module=EpDeepseekV3MoE, kwargs={ "ep_group": self.shard_config.ep_group, "moe_dp_group": self.shard_config.moe_dp_group, }, ) ], policy=policy, target_key="DeepseekV3DecoderLayer", ) # optimization configuration if self.shard_config.enable_fused_normalization: # TODO: prevent casting to fp32 self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="input_layernorm", target_module=FusedRMSNorm, ), SubModuleReplacementDescription( suffix="post_attention_layernorm", target_module=FusedRMSNorm, ), ], policy=policy, target_key="DeepseekV3DecoderLayer", ) self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="norm", target_module=FusedRMSNorm, ), policy=policy, target_key="DeepseekV3Model", ) return policy def postprocess(self): return self.model def set_pipeline_forward(self, model_cls: str, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if self.pipeline_stage_manager: num_layers = self.model.config.num_hidden_layers stage_manager = self.pipeline_stage_manager layers_per_stage = stage_manager.distribute_layers(num_layers) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = {"forward": partial(new_forward, stage_manager=stage_manager, stage_index=stage_index)} self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=model_cls ) return def get_held_layers(self) -> List[nn.Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None module = self.model if module.__class__.__name__.startswith("PeftModel"): module = module.get_base_model() if module.__class__.__name__ != "DeepseekV3Model": module = module.model stage_manager = self.pipeline_stage_manager held_layers = [] if stage_manager.is_interleave: assert stage_manager.num_model_chunks is not None layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_indices = stage_manager.get_stage_index(layers_per_stage) stage_manager.stage_indices = stage_indices if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.embed_tokens) for start_idx, end_idx in stage_indices: held_layers.extend(module.layers[start_idx:end_idx]) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): # for zbv, when is_first_stage (last fwd), we append norm # for interleaved, when is_last_stage (last fwd), we also append norm held_layers.append(module.norm) else: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) if stage_manager.is_first_stage(): held_layers.append(module.embed_tokens) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.layers[start_idx:end_idx]) if stage_manager.is_last_stage(): held_layers.append(module.norm) return held_layers class DeepseekV3ModelPolicy(DeepseekV3Policy): def module_policy(self): policy = super().module_policy() if self.shard_config.pipeline_stage_manager: self.set_pipeline_forward("DeepseekV3Model", deepseek_v3_model_forward, policy) return policy class DeepseekV3ForCausalLMPolicy(DeepseekV3Policy): def module_policy(self): policy = super().module_policy() if self.shard_config.pipeline_stage_manager: self.set_pipeline_forward("DeepseekV3ForCausalLM", deepseek_v3_for_causal_lm_forward, policy) return policy def get_held_layers(self): stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(self.model.lm_head) elif stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.lm_head) return held_layers
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/blip2.py
colossalai/shardformer/policies/blip2.py
import colossalai.shardformer.layer as col_nn from ..modeling.blip2 import ( forward_fn, get_blip2_flash_attention_forward, get_jit_fused_blip2_mlp_forward, get_jit_fused_blip2_QFormer_output_forward, get_jit_fused_blip2_QFormer_self_output_forward, ) from ..modeling.jit import get_jit_fused_dropout_add_func from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = ["BlipPolicy", "BlipModelPolicy"] class BlipPolicy(Policy): def config_sanity_check(self): pass def preprocess(self): self.tie_weight = self.tie_weight_check() self.enable_bias_gelu_fused = ( self.shard_config.enable_jit_fused and self.model.config.vision_config.hidden_act == "gelu" ) return self.model def module_policy(self): from transformers.models.blip_2.modeling_blip_2 import ( Blip2Attention, Blip2EncoderLayer, Blip2MLP, Blip2QFormerLayer, Blip2QFormerModel, Blip2QFormerOutput, Blip2QFormerSelfOutput, Blip2VisionModel, ) from transformers.models.opt.modeling_opt import OPTDecoderLayer, OPTForCausalLM policy = {} embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = col_nn.VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = col_nn.PaddingEmbedding if self.shard_config.enable_fused_normalization: norm_cls = col_nn.FusedLayerNorm else: norm_cls = col_nn.LayerNorm use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: assert ( self.model.config.vision_config.num_attention_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." policy[Blip2EncoderLayer] = ModulePolicyDescription( attribute_replacement={ "self_attn.num_heads": self.model.config.vision_config.num_attention_heads // self.shard_config.tensor_parallel_size, "self_attn.embed_dim": self.model.config.vision_config.hidden_size // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="self_attn.qkv", target_module=col_nn.FusedLinear1D_Col, kwargs={ "split_sizes": [self.model.config.vision_config.hidden_size] * 3, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.projection", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.fc1", target_module=col_nn.Linear1D_Col, kwargs={ "skip_bias_add": self.enable_bias_gelu_fused, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.fc2", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) policy[Blip2QFormerModel] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=col_nn.DropoutForParallelInput, ), ] ) policy[Blip2QFormerLayer] = ModulePolicyDescription( attribute_replacement={ "attention.attention.num_attention_heads": self.model.config.qformer_config.num_attention_heads // self.shard_config.tensor_parallel_size, "attention.attention.all_head_size": self.model.config.qformer_config.hidden_size // self.shard_config.tensor_parallel_size, "crossattention.attention.num_attention_heads": self.model.config.qformer_config.num_attention_heads // self.shard_config.tensor_parallel_size, "crossattention.attention.all_head_size": self.model.config.qformer_config.hidden_size // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="attention.attention.query", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.attention.key", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.attention.value", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.attention.dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="attention.output.dense", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.output.dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="crossattention.attention.query", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="crossattention.attention.key", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="crossattention.attention.value", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="crossattention.attention.dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="crossattention.output.dense", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="crossattention.output.dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="intermediate_query.dense", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="output_query.dense", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="output_query.dropout", target_module=col_nn.DropoutForParallelInput, ), ], ) policy[OPTDecoderLayer] = ModulePolicyDescription( attribute_replacement={ "self_attn.embed_dim": self.model.config.text_config.hidden_size // self.shard_config.tensor_parallel_size, "self_attn.num_heads": self.model.config.text_config.num_attention_heads // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.out_proj", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="fc1", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="fc2", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) policy[Blip2Attention] = ModulePolicyDescription(method_replacement={"forward": forward_fn()}) if self.enable_bias_gelu_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_blip2_mlp_forward(), }, policy=policy, target_key=Blip2MLP, ) elif use_zbv: policy[Blip2EncoderLayer] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="self_attn.qkv", target_module=col_nn.FusedLinear, kwargs={ "split_sizes": [self.model.config.vision_config.hidden_size] * 3, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.projection", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.fc1", target_module=col_nn.LinearWithGradAccum, kwargs={ "skip_bias_add": self.enable_bias_gelu_fused, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.fc2", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) policy[Blip2QFormerModel] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=col_nn.DropoutForParallelInput, ), ] ) policy[Blip2QFormerLayer] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="attention.attention.query", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.attention.key", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.attention.value", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.attention.dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="attention.output.dense", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attention.output.dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="crossattention.attention.query", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="crossattention.attention.key", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="crossattention.attention.value", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="crossattention.attention.dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="crossattention.output.dense", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="crossattention.output.dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="intermediate_query.dense", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="output_query.dense", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="output_query.dropout", target_module=col_nn.DropoutForParallelInput, ), ], ) policy[OPTDecoderLayer] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.out_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="fc1", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="fc2", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) policy[Blip2Attention] = ModulePolicyDescription(method_replacement={"forward": forward_fn()}) if self.enable_bias_gelu_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_blip2_mlp_forward(), }, policy=policy, target_key=Blip2MLP, ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="model.decoder.embed_tokens", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), ], policy=policy, target_key=OPTForCausalLM, ) if self.shard_config.enable_tensor_parallelism: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="lm_head", target_module=col_nn.VocabParallelLMHead1D, kwargs={ "gather_output": True, "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, }, ), ], policy=policy, target_key=OPTForCausalLM, ) else: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="lm_head", target_module=col_nn.PaddingLMHead, kwargs={"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by}, ), ], policy=policy, target_key=OPTForCausalLM, ) # optimization configuration # Handle Blip2EncoderLayer layer self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="layer_norm1", target_module=norm_cls, ), SubModuleReplacementDescription( suffix="layer_norm2", target_module=norm_cls, ), ], policy=policy, target_key=Blip2EncoderLayer, ) # handle Blip2VisionModel layer self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="post_layernorm", target_module=norm_cls, ) ], policy=policy, target_key=Blip2VisionModel, ) # handle Blip2VisionModel layer self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="layernorm", target_module=norm_cls, ) ], policy=policy, target_key=Blip2QFormerModel, ) # handle Blip2QFormerLayer layer self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="attention.output.LayerNorm", target_module=norm_cls, ), SubModuleReplacementDescription( suffix="crossattention.output.LayerNorm", target_module=norm_cls, ), SubModuleReplacementDescription( suffix="output_query.LayerNorm", target_module=norm_cls, ), ], policy=policy, target_key=Blip2QFormerLayer, ) # handle OPTForCausalLM layer self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="model.decoder.final_layer_norm", target_module=norm_cls, ) ], policy=policy, target_key=OPTForCausalLM, ) # handle OPTDecoderLayer layer self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="self_attn_layer_norm", target_module=norm_cls, ), SubModuleReplacementDescription( suffix="final_layer_norm", target_module=norm_cls, ), ], policy=policy, target_key=OPTDecoderLayer, ) # use flash attention if self.shard_config.enable_flash_attention: self.append_or_create_method_replacement( description={ "forward": get_blip2_flash_attention_forward(), }, policy=policy, target_key=Blip2Attention, ) # use jit operator if self.shard_config.enable_jit_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_blip2_QFormer_self_output_forward(), "dropout_add": get_jit_fused_dropout_add_func(), }, policy=policy, target_key=Blip2QFormerSelfOutput, ) self.append_or_create_method_replacement( description={ "forward": get_jit_fused_blip2_QFormer_output_forward(), "dropout_add": get_jit_fused_dropout_add_func(), }, policy=policy, target_key=Blip2QFormerOutput, ) return policy def postprocess(self): return self.model # Blip2Model class Blip2ModelPolicy(BlipPolicy): def __init__(self) -> None: super().__init__() # Blip2ForConditionalGeneration class Blip2ForConditionalGenerationPolicy(BlipPolicy): def __init__(self) -> None: super().__init__()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/gpt2.py
colossalai/shardformer/policies/gpt2.py
import warnings from functools import partial from typing import Callable, Dict, List from torch import Tensor, nn import colossalai.shardformer.layer as col_nn from ..modeling.gpt2 import GPT2PipelineForwards, get_gpt2_flash_attention_forward, get_jit_fused_gpt2_mlp_forward from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = [ "GPT2Policy", "GPT2ModelPolicy", "GPT2LMHeadModelPolicy", "GPT2DoubleHeadsModelPolicy", "GPT2ForTokenClassificationPolicy", "GPT2ForSequenceClassificationPolicy", ] class GPT2Policy(Policy): def config_sanity_check(self): pass def preprocess(self): # reshape the embedding layer r""" Reshape the Embedding layer to make the embedding dimension divisible by world_size """ self.tie_weight = self.tie_weight_check() self.origin_attn_implement = self.model.config._attn_implementation self.enable_bias_gelu_fused = ( self.shard_config.enable_jit_fused and self.model.config.activation_function == "gelu" ) return self.model def module_policy(self): from transformers.models.gpt2.modeling_gpt2 import GPT2MLP, GPT2Attention, GPT2Block, GPT2Model policy = {} embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = col_nn.VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = col_nn.PaddingEmbedding if self.shard_config.enable_fused_normalization: norm_cls = col_nn.FusedLayerNorm else: norm_cls = col_nn.LayerNorm sp_mode = self.shard_config.sequence_parallelism_mode or None assert sp_mode != "all_to_all", "all_to_all sequence parallelism is not supported for GPT2" if sp_mode == "ring": warnings.warn( f"For GPT2, sequence parallelism is currently not support mode {sp_mode}, will set to be split_gather" ) self.shard_config.sequence_parallelism_mode = sp_mode = "split_gather" sp_partial_derived = sp_mode in ["split_gather", "ring"] use_flash_attention = self.shard_config.enable_flash_attention use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: assert ( self.model.config.num_attention_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." policy[GPT2Model] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="drop", target_module=col_nn.DropoutForParallelInput, ), ] ) policy[GPT2Block] = ModulePolicyDescription( attribute_replacement={ "attn.embed_dim": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "attn.split_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "attn.num_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="attn.c_attn", target_module=col_nn.GPT2FusedLinearConv1D_Col, kwargs={ "split_sizes": [self.model.config.hidden_size] * 3, "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attn.c_proj", target_module=col_nn.GPT2FusedLinearConv1D_Row, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.c_fc", target_module=col_nn.GPT2FusedLinearConv1D_Col, kwargs={ "split_sizes": [self.model.config.n_inner or 4 * self.model.config.hidden_size], "seq_parallel_mode": sp_mode, "skip_bias_add": self.enable_bias_gelu_fused, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.c_proj", target_module=col_nn.GPT2FusedLinearConv1D_Row, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attn.attn_dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="attn.resid_dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="mlp.dropout", target_module=col_nn.DropoutForParallelInput, ), ], ) if self.enable_bias_gelu_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_gpt2_mlp_forward(), }, policy=policy, target_key=GPT2MLP, ) elif use_zbv: policy[GPT2Model] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="drop", target_module=col_nn.DropoutForParallelInput, ), ] ) policy[GPT2Block] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="attn.c_attn", target_module=col_nn.GPT2FusedLinearConv, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attn.c_proj", target_module=col_nn.GPT2FusedLinearConv, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.c_fc", target_module=col_nn.GPT2FusedLinearConv, kwargs={ "seq_parallel_mode": sp_mode, "skip_bias_add": self.enable_bias_gelu_fused, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.c_proj", target_module=col_nn.GPT2FusedLinearConv, kwargs={ "seq_parallel_mode": sp_mode, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attn.attn_dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="attn.resid_dropout", target_module=col_nn.DropoutForParallelInput, ), SubModuleReplacementDescription( suffix="mlp.dropout", target_module=col_nn.DropoutForParallelInput, ), ], ) if self.enable_bias_gelu_fused: self.append_or_create_method_replacement( description={ "forward": get_jit_fused_gpt2_mlp_forward(), }, policy=policy, target_key=GPT2MLP, ) if embedding_cls is not None: # padding vocabulary size when using pp to make it divisible by shard_config.make_vocab_size_divisible_by self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="wte", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), policy=policy, target_key=GPT2Model, ) # optimization configuration self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="ln_f", target_module=norm_cls, ), policy=policy, target_key=GPT2Model, ) self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="ln_1", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), SubModuleReplacementDescription( suffix="ln_2", target_module=norm_cls, kwargs={"sp_partial_derived": sp_partial_derived}, ), SubModuleReplacementDescription( suffix="ln_cross_attn", target_module=norm_cls, ignore_if_not_exist=True, kwargs={"sp_partial_derived": sp_partial_derived}, ), ], policy=policy, target_key=GPT2Block, ) if use_flash_attention: self.append_or_create_method_replacement( description={ "forward": get_gpt2_flash_attention_forward(shard_config=self.shard_config), }, policy=policy, target_key=GPT2Attention, ) if not self.shard_config.pipeline_stage_manager and self.shard_config.enable_sequence_parallelism: policy[GPT2Model].method_replacement = { "forward": partial(GPT2PipelineForwards.gpt2_model_forward, shard_config=self.shard_config) } return policy def postprocess(self): return self.model def get_held_layers(self) -> List[nn.Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None if self.model.__class__.__name__ == "GPT2Model": module = self.model else: module = self.model.transformer stage_manager = self.pipeline_stage_manager held_layers = [] if stage_manager.is_interleave: assert stage_manager.num_model_chunks is not None layers_per_stage = stage_manager.distribute_layers(len(module.h)) stage_indices = stage_manager.get_stage_index(layers_per_stage) if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.wte) held_layers.append(module.wpe) held_layers.append(module.drop) for start_idx, end_idx in stage_indices: held_layers.extend(module.h[start_idx:end_idx]) if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(module.ln_f) else: layers_per_stage = stage_manager.distribute_layers(len(module.h)) if stage_manager.is_first_stage(): held_layers.append(module.wte) held_layers.append(module.wpe) held_layers.append(module.drop) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.h[start_idx:end_idx]) if stage_manager.is_last_stage(): held_layers.append(module.ln_f) return held_layers def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if not self.pipeline_stage_manager: raise ValueError("set_pipeline_forward method can only be called when pipeline parallel is enabled.") stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "GPT2Model": module = self.model else: module = self.model.transformer if stage_manager.is_interleave: layers_per_stage = stage_manager.distribute_layers(len(module.h)) stage_manager.stage_indices = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, shard_config=self.shard_config, ) } else: layers_per_stage = stage_manager.distribute_layers(len(module.h)) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, stage_index=stage_index, shard_config=self.shard_config, ) } self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=model_cls) # GPT2Model class GPT2ModelPolicy(GPT2Policy): def module_policy(self): from transformers.models.gpt2.modeling_gpt2 import GPT2Model policy = super().module_policy() if self.pipeline_stage_manager is not None: self.set_pipeline_forward( model_cls=GPT2Model, new_forward=GPT2PipelineForwards.gpt2_model_forward, policy=policy, ) return policy def get_held_layers(self) -> List[nn.Module]: return super().get_held_layers() def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in GPT2Model.""" return [] # GPT2LMHeadModel class GPT2LMHeadModelPolicy(GPT2Policy): def module_policy(self): from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel module_policy = super().module_policy() module_policy[GPT2LMHeadModel] = ModulePolicyDescription() if self.shard_config.enable_tensor_parallelism: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="lm_head", target_module=col_nn.VocabParallelLMHead1D, kwargs={ "gather_output": False, "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, }, ), policy=module_policy, target_key=GPT2LMHeadModel, ) else: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="lm_head", target_module=col_nn.PaddingLMHead, kwargs={"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by}, ), policy=module_policy, target_key=GPT2LMHeadModel, ) if self.shard_config.parallel_output: self.append_or_create_method_replacement( description={ "forward": partial(GPT2PipelineForwards.gpt2_lmhead_model_forward, shard_config=self.shard_config) }, policy=module_policy, target_key=GPT2LMHeadModel, ) if self.pipeline_stage_manager is not None: self.set_pipeline_forward( model_cls=GPT2LMHeadModel, new_forward=GPT2PipelineForwards.gpt2_lmhead_model_forward, policy=module_policy, ) return module_policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.lm_head) else: if self.pipeline_stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.lm_head) # if self.pipeline_stage_manager.is_last_stage(ignore_chunk=True): # held_layers.append(self.model.lm_head) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """The weights of wte and lm_head are shared.""" module = self.model stage_manager = self.pipeline_stage_manager if stage_manager is not None: if stage_manager.num_stages > 1 and id(module.transformer.wte.weight) == id(module.lm_head.weight): first_stage, last_stage = 0, stage_manager.num_stages - 1 return [ { first_stage: module.transformer.wte.weight, last_stage: module.lm_head.weight, } ] return [] # GPT2DoubleHeadsModel class GPT2DoubleHeadsModelPolicy(GPT2Policy): def module_policy(self): from transformers.models.gpt2.modeling_gpt2 import GPT2DoubleHeadsModel module_policy = super().module_policy() if self.shard_config.enable_tensor_parallelism: addon_module = { GPT2DoubleHeadsModel: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=col_nn.VocabParallelLMHead1D, kwargs={ "gather_output": True, "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, }, ) ] ) } else: addon_module = { GPT2DoubleHeadsModel: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=col_nn.PaddingLMHead, kwargs={"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by}, ) ] ) } module_policy.update(addon_module) if self.pipeline_stage_manager is not None: self.set_pipeline_forward( model_cls=GPT2DoubleHeadsModel, new_forward=GPT2PipelineForwards.gpt2_double_heads_model_forward, policy=module_policy, ) return module_policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.lm_head) held_layers.append(multiple_choice_head.summary) held_layers.append(multiple_choice_head.activation) held_layers.append(multiple_choice_head.first_dropout) held_layers.append(multiple_choice_head.last_dropout) else: if self.pipeline_stage_manager.is_last_stage(): multiple_choice_head = self.model.multiple_choice_head held_layers.append(self.model.lm_head) held_layers.append(multiple_choice_head.summary) held_layers.append(multiple_choice_head.activation) held_layers.append(multiple_choice_head.first_dropout) held_layers.append(multiple_choice_head.last_dropout) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """The weights of wte and lm_head are shared.""" module = self.model stage_manager = self.pipeline_stage_manager if stage_manager is not None: if stage_manager.num_stages > 1 and id(module.transformer.wte.weight) == id(module.lm_head.weight): first_stage, last_stage = 0, stage_manager.num_stages - 1 return [ { first_stage: module.transformer.wte.weight, last_stage: module.lm_head.weight, } ] return [] # GPT2ForQuestionAnswering class GPT2ForQuestionAnsweringPolicy(GPT2Policy): def module_policy(self): from transformers.models.gpt2.modeling_gpt2 import GPT2ForQuestionAnswering module_policy = super().module_policy() if self.pipeline_stage_manager is not None: self.set_pipeline_forward( model_cls=GPT2ForQuestionAnswering, new_forward=GPT2PipelineForwards.gpt2_for_question_answering_forward, policy=module_policy, ) return module_policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.qa_outputs) else: if self.pipeline_stage_manager.is_last_stage(): held_layers.append(self.model.qa_outputs) # if self.pipeline_stage_manager.is_last_stage(): # held_layers.append(self.model.qa_outputs) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared_params in gpt2 for QA.""" return [] # GPT2ForTokenClassification class GPT2ForTokenClassificationPolicy(GPT2Policy): def module_policy(self): from transformers.models.gpt2.modeling_gpt2 import GPT2ForTokenClassification module_policy = super().module_policy() if self.shard_config.enable_tensor_parallelism: addon_module = { GPT2ForTokenClassification: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="dropout", target_module=col_nn.DropoutForParallelInput, ) ] ) } module_policy.update(addon_module) if self.pipeline_stage_manager is not None: self.set_pipeline_forward( model_cls=GPT2ForTokenClassification, new_forward=GPT2PipelineForwards.gpt2_for_token_classification_forward, policy=module_policy, ) return module_policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.dropout) held_layers.append(self.model.classifier) else: if self.pipeline_stage_manager.is_last_stage(): held_layers.append(self.model.dropout) held_layers.append(self.model.classifier) # if self.pipeline_stage_manager.is_last_stage(): # held_layers.append(self.model.dropout) # held_layers.append(self.model.classifier) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in GPT2ForTokenClassification.""" return [] # GPT2ForSequenceClassification class GPT2ForSequenceClassificationPolicy(GPT2Policy): def module_policy(self): from transformers.models.gpt2.modeling_gpt2 import GPT2ForSequenceClassification module_policy = super().module_policy() if self.pipeline_stage_manager is not None: self.set_pipeline_forward( model_cls=GPT2ForSequenceClassification, new_forward=GPT2PipelineForwards.gpt2_for_sequence_classification_forward, policy=module_policy, ) return module_policy def get_held_layers(self) -> List[nn.Module]: held_layers = super().get_held_layers() stage_manager = self.pipeline_stage_manager if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.score) else: if self.pipeline_stage_manager.is_last_stage(): held_layers.append(self.model.score) # if self.pipeline_stage_manager.is_last_stage(): # held_layers.append(self.model.score) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in GPT2ForTokenClassification.""" return []
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/sam.py
colossalai/shardformer/policies/sam.py
import colossalai.shardformer.layer as col_nn from ..modeling.sam import forward_fn from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = ["SamPolicy", "SamModelPolicy"] class SamPolicy(Policy): def config_sanity_check(self): pass def preprocess(self): return self.model def module_policy(self): from transformers.models.sam.modeling_sam import ( SamTwoWayAttentionBlock, SamTwoWayTransformer, SamVisionAttention, SamVisionLayer, ) policy = {} if self.shard_config.enable_fused_normalization: norm_cls = col_nn.FusedLayerNorm else: norm_cls = col_nn.LayerNorm use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: assert ( self.model.config.vision_config.num_attention_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." policy[SamVisionLayer] = ModulePolicyDescription( attribute_replacement={ "attn.num_attention_heads": self.model.config.vision_config.num_attention_heads // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="attn.qkv", target_module=col_nn.FusedLinear1D_Col, kwargs={ "split_sizes": [self.model.config.vision_config.hidden_size] * 3, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attn.proj", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.lin1", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.lin2", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) policy[SamTwoWayAttentionBlock] = ModulePolicyDescription( attribute_replacement={ "self_attn.num_attention_heads": self.model.config.mask_decoder_config.num_attention_heads // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.out_proj", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_token_to_image.q_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_token_to_image.k_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_token_to_image.v_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_token_to_image.out_proj", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.lin1", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.lin2", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_image_to_token.q_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_image_to_token.k_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_image_to_token.v_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_image_to_token.out_proj", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) policy[SamTwoWayTransformer] = ModulePolicyDescription( attribute_replacement={ "final_attn_token_to_image.num_attention_heads": self.model.config.mask_decoder_config.num_attention_heads // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="final_attn_token_to_image.q_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="final_attn_token_to_image.k_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="final_attn_token_to_image.v_proj", target_module=col_nn.Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="final_attn_token_to_image.out_proj", target_module=col_nn.Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) # add `DropoutForParallelInput` layer to replace the useage of `nn.functional.dropout` policy[SamVisionAttention] = ModulePolicyDescription( attribute_replacement={ "dropout_layer": col_nn.DropoutForParallelInput(self.model.config.vision_config.attention_dropout) }, method_replacement={"forward": forward_fn()}, sub_module_replacement=[], ) elif use_zbv: policy[SamVisionLayer] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="attn.qkv", target_module=col_nn.FusedLinear, kwargs={ "split_sizes": [self.model.config.vision_config.hidden_size] * 3, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="attn.proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.lin1", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.lin2", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) policy[SamTwoWayAttentionBlock] = ModulePolicyDescription( attribute_replacement={ "self_attn.num_attention_heads": self.model.config.mask_decoder_config.num_attention_heads // self.shard_config.tensor_parallel_size, }, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.out_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_token_to_image.q_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_token_to_image.k_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_token_to_image.v_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_token_to_image.out_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.lin1", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.lin2", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_image_to_token.q_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_image_to_token.k_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_image_to_token.v_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="cross_attn_image_to_token.out_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) policy[SamTwoWayTransformer] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="final_attn_token_to_image.q_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="final_attn_token_to_image.k_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="final_attn_token_to_image.v_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="final_attn_token_to_image.out_proj", target_module=col_nn.LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) # add `DropoutForParallelInput` layer to replace the useage of `nn.functional.dropout` policy[SamVisionAttention] = ModulePolicyDescription( attribute_replacement={ "dropout_layer": col_nn.DropoutForParallelInput(self.model.config.vision_config.attention_dropout) }, method_replacement={"forward": forward_fn()}, sub_module_replacement=[], ) # optimization configuration # Handle SamVisionLayer self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="layer_norm1", target_module=norm_cls, ), SubModuleReplacementDescription( suffix="layer_norm2", target_module=norm_cls, ), ], policy=policy, target_key=SamVisionLayer, ) # Handle SamTwoWayAttentionBlock self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="layer_norm1", target_module=norm_cls, ), SubModuleReplacementDescription( suffix="layer_norm2", target_module=norm_cls, ), SubModuleReplacementDescription( suffix="layer_norm3", target_module=norm_cls, ), SubModuleReplacementDescription( suffix="layer_norm4", target_module=norm_cls, ), ], policy=policy, target_key=SamTwoWayAttentionBlock, ) # Handle SamTwoWayTransformer self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="layer_norm_final_attn", target_module=norm_cls, ) ], policy=policy, target_key=SamTwoWayTransformer, ) return policy def postprocess(self): return self.model # SamModel class SamModelPolicy(SamPolicy): def __init__(self) -> None: super().__init__()
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/__init__.py
colossalai/shardformer/policies/__init__.py
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/mixtral.py
colossalai/shardformer/policies/mixtral.py
import warnings from functools import partial from typing import Callable, Dict, List, Union import torch.nn as nn from torch import Tensor from torch.nn import Module from transformers.models.mixtral.modeling_mixtral import MixtralForCausalLM, MixtralModel from colossalai.shardformer.layer import ( FusedRMSNorm, Linear1D_Col, Linear1D_Row, LinearWithGradAccum, PaddingEmbedding, VocabParallelEmbedding1D, ) # from colossalai.shardformer.layer import FusedRMSNorm, Linear1D_Col # from colossalai.shardformer.layer.embedding import PaddingEmbedding, VocabParallelEmbedding1D # from colossalai.shardformer.layer.linear import Linear1D_Row from colossalai.shardformer.modeling.mixtral import ( EPMixtralSparseMoeBlock, MixtralPipelineForwards, get_mixtral_flash_attention_forward, get_mixtral_flash_attention_model_forward, ) from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = ["MixtralPolicy", "MixtralForCausalLMPolicy"] class MixtralPolicy(Policy): def config_sanity_check(self): pass def preprocess(self): self.tie_weight = self.tie_weight_check() self.origin_attn_implement = self.model.config._attn_implementation return self.model def module_policy(self) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: from transformers.models.mixtral.modeling_mixtral import MixtralAttention, MixtralDecoderLayer, MixtralModel policy = {} sp_mode = self.shard_config.sequence_parallelism_mode or None sp_size = self.shard_config.sequence_parallel_size or None sp_group = self.shard_config.sequence_parallel_process_group or None sp_partial_derived = sp_mode in ["split_gather", "ring"] tp_size = self.shard_config.tensor_parallel_size use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv # modified for both SP and TP num_q_heads = self.model.config.num_attention_heads num_kv_heads = getattr(self.model.config, "num_key_value_heads", None) if sp_mode == "all_to_all": num_q_heads //= sp_size decoder_attribute_replacement = { "num_heads": num_q_heads, } if getattr(self.model.config, "num_key_value_heads", False): num_kv_heads //= sp_size decoder_attribute_replacement["num_key_value_heads"] = num_kv_heads policy[MixtralAttention] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, ) if self.shard_config.enable_sequence_parallelism: if self.pipeline_stage_manager is not None: # NOTE: we are replacing model forward for both sequence parallelism and pipeline parallelism # if both are enabled, one of them will be ignored raise NotImplementedError("Sequence parallelism is not supported with pipeline parallelism.") self.append_or_create_method_replacement( description={ "forward": get_mixtral_flash_attention_forward(self.shard_config, sp_mode, sp_size, sp_group), }, policy=policy, target_key=MixtralAttention, ) self.append_or_create_method_replacement( description={ "forward": get_mixtral_flash_attention_model_forward( self.shard_config, sp_mode=sp_mode, sp_size=sp_size, sp_group=sp_group, ), }, policy=policy, target_key=MixtralModel, ) embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = PaddingEmbedding if self.shard_config.enable_tensor_parallelism: # tensor parallelism for non-moe params assert ( self.model.config.num_attention_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." assert ( self.model.config.num_key_value_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of key_value heads must be divisible by tensor parallel size." num_q_heads //= tp_size decoder_attribute_replacement = { "self_attn.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "self_attn.num_heads": num_q_heads, } if num_kv_heads: num_kv_heads //= tp_size decoder_attribute_replacement["self_attn.num_key_value_heads"] = num_kv_heads policy[MixtralDecoderLayer] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="block_sparse_moe.gate", target_module=Linear1D_Col, kwargs={ "gather_output": True, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) elif use_zbv: policy[MixtralDecoderLayer] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="block_sparse_moe.gate", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="embed_tokens", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), policy=policy, target_key=MixtralModel, ) if self.shard_config.ep_group: # expert parallel self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="block_sparse_moe", target_module=EPMixtralSparseMoeBlock, kwargs={ "ep_group": self.shard_config.ep_group, "tp_group": self.shard_config.tensor_parallel_process_group, "moe_dp_group": self.shard_config.moe_dp_group, "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ) ], policy=policy, target_key=MixtralDecoderLayer, ) # optimization configuration if self.shard_config.enable_fused_normalization: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="input_layernorm", target_module=FusedRMSNorm, kwargs={"sp_partial_derived": sp_partial_derived}, ), SubModuleReplacementDescription( suffix="post_attention_layernorm", target_module=FusedRMSNorm, kwargs={"sp_partial_derived": sp_partial_derived}, ), ], policy=policy, target_key=MixtralDecoderLayer, ) self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="norm", target_module=FusedRMSNorm, kwargs={"sp_partial_derived": sp_partial_derived}, ), policy=policy, target_key=MixtralModel, ) if self.shard_config.enable_flash_attention: warnings.warn("Flash attention is natively supported in transformers, will ignore the flag.") self.shard_config.enable_flash_attention = False return policy def postprocess(self): return self.model def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if self.pipeline_stage_manager: if self.shard_config.enable_sequence_parallelism: # NOTE: we are replacing model forward for both sequence parallelism and pipeline parallelism # if both are enabled, one of them will be ignored raise NotImplementedError("Pipeline parallelism is not supported with sequence parallelism.") stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "MixtralModel": module = self.model else: module = self.model.model layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = {"forward": partial(new_forward, stage_manager=stage_manager, stage_index=stage_index)} self.append_or_create_method_replacement( description=method_replacement, policy=policy, target_key=model_cls ) return def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None if self.model.__class__.__name__ == "MixtralModel": module = self.model else: module = self.model.model stage_manager = self.pipeline_stage_manager held_layers = [] held_layers.append(module.rotary_emb) if stage_manager.is_interleave: assert stage_manager.num_model_chunks is not None layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_indices = stage_manager.get_stage_index(layers_per_stage) stage_manager.stage_indices = stage_indices if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.embed_tokens) for start_idx, end_idx in stage_indices: held_layers.extend(module.layers[start_idx:end_idx]) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): # for zbv, when is_first_stage (last fwd), we append norm # for interleaved, when is_last_stage (last fwd), we also append norm held_layers.append(module.norm) else: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) if stage_manager.is_first_stage(): held_layers.append(module.embed_tokens) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.layers[start_idx:end_idx]) if stage_manager.is_last_stage(): held_layers.append(module.norm) return held_layers class MixtralModelPolicy(MixtralPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): policy = super().module_policy() if self.pipeline_stage_manager: # set None as default self.set_pipeline_forward( model_cls=MixtralModel, new_forward=MixtralPipelineForwards.mixtral_model_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" held_layers = super().get_held_layers() return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in mixtral model""" return [] class MixtralForCausalLMPolicy(MixtralPolicy): def module_policy(self): policy = super().module_policy() use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv # TODO: assign pg mesh from plugin to all modules if self.shard_config.enable_tensor_parallelism: # add a new item for causal lm new_item = { MixtralForCausalLM: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=Linear1D_Col, kwargs=dict( gather_output=True, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ) ], ) } policy.update(new_item) elif use_zbv: new_item = { MixtralForCausalLM: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=LinearWithGradAccum, kwargs=dict( fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ) ], ) } policy.update(new_item) if self.pipeline_stage_manager: # set None as default self.set_pipeline_forward( model_cls=MixtralForCausalLM, new_forward=MixtralPipelineForwards.mixtral_for_causal_lm_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(self.model.lm_head) elif stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.lm_head) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: mixtral_model = self.model.model if self.pipeline_stage_manager and self.pipeline_stage_manager.num_stages > 1: if ( id(mixtral_model.embed_tokens.weight) == id(self.model.lm_head.weight) and self.pipeline_stage_manager.num_stages > 1 ): # tie weights return [ { 0: mixtral_model.embed_tokens.weight, self.pipeline_stage_manager.num_stages - 1: self.model.lm_head.weight, } ] return [] class MixtralForSequenceClassificationPolicy(MixtralPolicy): def module_policy(self): from transformers import MixtralForSequenceClassification policy = super().module_policy() use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_tensor_parallelism: # add a new item for sequence classification new_item = { MixtralForSequenceClassification: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="score", target_module=Linear1D_Col, kwargs=dict( gather_output=True, fp8_communication=self.shard_config.fp8_communication, use_zbv=use_zbv, ), ) ] ) } policy.update(new_item) if self.pipeline_stage_manager: raise NotImplementedError return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.is_last_stage(ignore_chunk=True): held_layers.append(self.model.score) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in mixtral for sequence classification model""" return []
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/policies/mistral.py
colossalai/shardformer/policies/mistral.py
import warnings from functools import partial from typing import Callable, Dict, List, Union import torch.nn as nn from torch import Tensor from torch.nn import Module from colossalai.shardformer.layer import ( FusedRMSNorm, Linear1D_Col, Linear1D_Row, LinearWithGradAccum, PaddingEmbedding, PaddingLMHead, VocabParallelEmbedding1D, VocabParallelLMHead1D, ) from ..modeling.mistral import ( MistralForwards, get_lm_forward_with_dist_cross_entropy, get_mistral_flash_attention_forward, get_mistral_model_forward_for_flash_attn, ) from .base_policy import ModulePolicyDescription, Policy, SubModuleReplacementDescription __all__ = ["MistralPolicy", "MistralModelPolicy", "MistralForCausalLMPolicy", "MistralForSequenceClassificationPolicy"] class MistralPolicy(Policy): def config_sanity_check(self): pass def preprocess(self): self.tie_weight = self.tie_weight_check() self.origin_attn_implement = self.model.config._attn_implementation return self.model def module_policy(self) -> Dict[Union[str, nn.Module], ModulePolicyDescription]: from transformers.models.mistral.modeling_mistral import MistralAttention, MistralDecoderLayer, MistralModel policy = {} embedding_cls = None if self.shard_config.enable_tensor_parallelism: embedding_cls = VocabParallelEmbedding1D else: if self.tie_weight: embedding_cls = PaddingEmbedding use_zbv = self.pipeline_stage_manager is not None and self.pipeline_stage_manager.use_zbv if self.shard_config.enable_sequence_parallelism: self.shard_config.enable_sequence_parallelism = False warnings.warn( "Mistral doesn't support sequence parallelism now, will ignore the sequence parallelism flag." ) if self.shard_config.enable_tensor_parallelism: assert ( self.model.config.num_attention_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of attention heads must be divisible by tensor parallel size." assert ( self.model.config.num_key_value_heads % self.shard_config.tensor_parallel_size == 0 ), f"The number of key_value heads must be divisible by tensor parallel size." decoder_attribute_replacement = { "self_attn.hidden_size": self.model.config.hidden_size // self.shard_config.tensor_parallel_size, "self_attn.num_heads": self.model.config.num_attention_heads // self.shard_config.tensor_parallel_size, "self_attn.num_key_value_heads": self.model.config.num_key_value_heads // self.shard_config.tensor_parallel_size, } policy[MistralDecoderLayer] = ModulePolicyDescription( attribute_replacement=decoder_attribute_replacement, sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.gate_proj", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.up_proj", target_module=Linear1D_Col, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.down_proj", target_module=Linear1D_Row, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) elif use_zbv: policy[MistralDecoderLayer] = ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="self_attn.q_proj", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.k_proj", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.v_proj", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="self_attn.o_proj", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.gate_proj", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.up_proj", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), SubModuleReplacementDescription( suffix="mlp.down_proj", target_module=LinearWithGradAccum, kwargs={ "fp8_communication": self.shard_config.fp8_communication, "use_zbv": use_zbv, }, ), ], ) if embedding_cls is not None: self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="embed_tokens", target_module=embedding_cls, kwargs=( { "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, } if self.shard_config.enable_tensor_parallelism else {"make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by} ), ), policy=policy, target_key=MistralModel, ) # optimization configuration if self.shard_config.enable_fused_normalization: self.append_or_create_submodule_replacement( description=[ SubModuleReplacementDescription( suffix="input_layernorm", target_module=FusedRMSNorm, ), SubModuleReplacementDescription( suffix="post_attention_layernorm", target_module=FusedRMSNorm, ), ], policy=policy, target_key=MistralDecoderLayer, ) self.append_or_create_submodule_replacement( description=SubModuleReplacementDescription( suffix="norm", target_module=FusedRMSNorm, ), policy=policy, target_key=MistralModel, ) if self.shard_config.enable_flash_attention: self.append_or_create_method_replacement( description={ "forward": get_mistral_flash_attention_forward(self.shard_config), }, policy=policy, target_key=MistralAttention, ) if self.pipeline_stage_manager is None: # replace llama model forward method self.append_or_create_method_replacement( description={ "forward": get_mistral_model_forward_for_flash_attn(self.shard_config), }, policy=policy, target_key=MistralModel, ) return policy def postprocess(self): return self.model def set_pipeline_forward(self, model_cls: nn.Module, new_forward: Callable, policy: Dict) -> None: """If under pipeline parallel setting, replacing the original forward method of huggingface to customized forward method, and add this changing to policy.""" if self.pipeline_stage_manager is None: return stage_manager = self.pipeline_stage_manager if self.model.__class__.__name__ == "MistralModel": module = self.model else: module = self.model.model if stage_manager.is_interleave: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_manager.stage_indices = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial(new_forward, stage_manager=stage_manager, shard_config=self.shard_config) } else: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_index = stage_manager.get_stage_index(layers_per_stage) method_replacement = { "forward": partial( new_forward, stage_manager=stage_manager, stage_index=stage_index, shard_config=self.shard_config ) } self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=model_cls) def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" assert self.pipeline_stage_manager is not None if self.model.__class__.__name__ == "MistralModel": module = self.model else: module = self.model.model stage_manager = self.pipeline_stage_manager held_layers = [] held_layers.append(module.rotary_emb) if stage_manager.is_interleave: assert stage_manager.num_model_chunks is not None layers_per_stage = stage_manager.distribute_layers(len(module.layers)) stage_indices = stage_manager.get_stage_index(layers_per_stage) if stage_manager.is_first_stage(ignore_chunk=True): held_layers.append(module.embed_tokens) for start_idx, end_idx in stage_indices: held_layers.extend(module.layers[start_idx:end_idx]) if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(module.norm) else: layers_per_stage = stage_manager.distribute_layers(len(module.layers)) if stage_manager.is_first_stage(): held_layers.append(module.embed_tokens) start_idx, end_idx = stage_manager.get_stage_index(layers_per_stage) held_layers.extend(module.layers[start_idx:end_idx]) if stage_manager.is_last_stage(): held_layers.append(module.norm) return held_layers class MistralModelPolicy(MistralPolicy): def __init__(self) -> None: super().__init__() def module_policy(self): policy = super().module_policy() from transformers.models.mistral.modeling_mistral import MistralModel if self.pipeline_stage_manager: self.set_pipeline_forward( model_cls=MistralModel, new_forward=MistralForwards.mistral_model_forward, policy=policy ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" held_layers = super().get_held_layers() return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in mistral model""" return [] class MistralForCausalLMPolicy(MistralPolicy): def module_policy(self): from transformers import MistralForCausalLM policy = super().module_policy() if self.shard_config.enable_tensor_parallelism: # add a new item for causal lm new_item = { MistralForCausalLM: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=VocabParallelLMHead1D, kwargs={ "gather_output": not self.shard_config.parallel_output, "make_vocab_size_divisible_by": self.shard_config.make_vocab_size_divisible_by, "fp8_communication": self.shard_config.fp8_communication, }, ) ] ) } if self.shard_config.parallel_output: new_item[MistralForCausalLM].method_replacement = { "forward": get_lm_forward_with_dist_cross_entropy(self.shard_config) } else: new_item = { MistralForCausalLM: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="lm_head", target_module=PaddingLMHead, kwargs=dict( make_vocab_size_divisible_by=self.shard_config.make_vocab_size_divisible_by, ), ) ] ) } policy.update(new_item) if self.pipeline_stage_manager: # set None as default self.set_pipeline_forward( model_cls=MistralForCausalLM, new_forward=MistralForwards.mistral_for_causal_lm_forward, policy=policy ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.lm_head) else: if stage_manager.is_last_stage(): held_layers.append(self.model.lm_head) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: mistral_model = self.model.model if self.pipeline_stage_manager and self.pipeline_stage_manager.num_stages > 1: if ( id(mistral_model.embed_tokens.weight) == id(self.model.lm_head.weight) and self.pipeline_stage_manager.num_stages > 1 ): # tie weights return [ { 0: mistral_model.embed_tokens.weight, self.pipeline_stage_manager.num_stages - 1: self.model.lm_head.weight, } ] return [] class MistralForSequenceClassificationPolicy(MistralPolicy): def module_policy(self): from transformers import MistralForSequenceClassification policy = super().module_policy() if self.shard_config.enable_tensor_parallelism: # add a new item for sequence classification new_item = { MistralForSequenceClassification: ModulePolicyDescription( sub_module_replacement=[ SubModuleReplacementDescription( suffix="score", target_module=Linear1D_Col, kwargs=dict(gather_output=True, fp8_communication=self.shard_config.fp8_communication), ) ] ) } policy.update(new_item) if self.pipeline_stage_manager: # set None as default self.set_pipeline_forward( model_cls=MistralForSequenceClassification, new_forward=MistralForwards.mistral_for_sequence_classification_forward, policy=policy, ) return policy def get_held_layers(self) -> List[Module]: """Get pipeline layers for current stage.""" stage_manager = self.pipeline_stage_manager held_layers = super().get_held_layers() if stage_manager.is_interleave: if (stage_manager.use_zbv and stage_manager.is_first_stage(ignore_chunk=True)) or ( not stage_manager.use_zbv and stage_manager.is_last_stage(ignore_chunk=True) ): held_layers.append(self.model.score) else: if stage_manager.is_last_stage(): held_layers.append(self.model.score) return held_layers def get_shared_params(self) -> List[Dict[int, Tensor]]: """No shared params in llama for sequence classification model""" return []
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/layer/attn.py
colossalai/shardformer/layer/attn.py
from enum import Enum from typing import Callable, Dict, Optional, Tuple import torch import torch.distributed import torch.distributed as dist import torch.nn.functional as F from einops import rearrange from packaging import version from colossalai.kernel.kernel_loader import ( FlashAttentionDaoLoader, FlashAttentionForFloatAndCustomMaskLoader, FlashAttentionLoader, FlashAttentionWithCustomMaskLoader, KernelLoader, ) from colossalai.logging import get_dist_logger from .utils import RingComm, get_half_index, split_varlen_zigzag MEMORY_BOUND = 10 * 1e9 __all__ = [ "AttnMaskType", "ColoAttention", ] _flash_attn_forward = _flash_attn_backward = None _unpad_input = _pad_input = None class AttnMaskType(Enum): CUSTOM = 0 PADDED = 1 CAUSAL = 2 PADDED_CAUSAL = 3 def invert_mask(mask: torch.Tensor) -> torch.Tensor: """Invert the mask tensor. Args: mask (torch.Tensor): Mask tensor. Shape should be [B, 1, Sq, Skv] Returns: torch.Tensor: Inverted mask tensor. """ inverted_mask = 1.0 - mask return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(mask.dtype).min) # adapted from https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/bert_padding.py def get_pad_info( padding_mask: torch.Tensor, invert: Optional[bool] = False, return_indices: Optional[bool] = True ) -> Tuple[int, torch.Tensor, torch.Tensor]: """Get padding information from padding mask. Args: padding_mask (torch.Tensor): Padding mask tensor. Shape should be [B, Skv] invert (Optional[bool], optional): Whether to reverse the padding mask. return_indices (Optional[bool], optional): Whether to return the indices of non-masked tokens. Returns: max_seqlen_in_batch (int): Maximum sequence length in the batch. cu_seqlens (torch.Tensor): Shape [B+1]. Cumulative sequence lengths of the sequences in the batch. indices (torch.Tensor): Shape [total_nonzero]. The indices of non-masked tokens from the flattened input sequence. """ if invert: padding_mask = padding_mask.logical_not() seqlens_in_batch = padding_mask.sum(dim=-1, dtype=torch.int32) if return_indices: indices = torch.nonzero(padding_mask.flatten(), as_tuple=False).flatten() max_seqlen_in_batch = seqlens_in_batch.max().item() cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) if return_indices: return max_seqlen_in_batch, cu_seqlens, indices return max_seqlen_in_batch, cu_seqlens class ColoAttention: _kernel_dispatch_map: Optional[Dict[torch.dtype, Dict[Optional[AttnMaskType], Callable]]] = None _flash_kernel_dispatch: Optional[Dict[torch.dtype, Dict[Optional[AttnMaskType], Callable]]] = None @staticmethod def _init_kernels_dispatch(): if ColoAttention._kernel_dispatch_map is None: # fp16/bf16 half_dispatch_map = { None: FlashAttentionLoader(), AttnMaskType.CUSTOM: FlashAttentionWithCustomMaskLoader(), AttnMaskType.PADDED: FlashAttentionLoader(), AttnMaskType.CAUSAL: FlashAttentionLoader(), AttnMaskType.PADDED_CAUSAL: FlashAttentionLoader(), } # fp32 float_dispatch_map = { None: FlashAttentionForFloatAndCustomMaskLoader(), AttnMaskType.CUSTOM: FlashAttentionForFloatAndCustomMaskLoader(), AttnMaskType.PADDED: FlashAttentionForFloatAndCustomMaskLoader(), AttnMaskType.CAUSAL: FlashAttentionForFloatAndCustomMaskLoader(), AttnMaskType.PADDED_CAUSAL: FlashAttentionForFloatAndCustomMaskLoader(), } ColoAttention._kernel_dispatch_map = { torch.float16: half_dispatch_map, torch.bfloat16: half_dispatch_map, torch.float32: float_dispatch_map, } if ColoAttention._flash_kernel_dispatch is None: ColoAttention._flash_kernel_dispatch = FlashAttentionDaoLoader() @staticmethod def _dispatch_kernel(dtype: torch.dtype, mask_type: Optional[AttnMaskType], size) -> Callable: ColoAttention._init_kernels_dispatch() if ( dtype not in ColoAttention._kernel_dispatch_map or mask_type not in ColoAttention._kernel_dispatch_map[dtype] ): raise ValueError( "FlashAttention kernel is not available for dtype {} and mask_type {}".format(dtype, mask_type) ) if size >= MEMORY_BOUND: if isinstance(ColoAttention._flash_kernel_dispatch, KernelLoader): ColoAttention._flash_kernel_dispatch = ColoAttention._flash_kernel_dispatch.load() # lazy load if isinstance(ColoAttention._kernel_dispatch_map[dtype][mask_type], KernelLoader): ColoAttention._kernel_dispatch_map[dtype][mask_type] = ColoAttention._kernel_dispatch_map[dtype][ mask_type ].load() if size >= MEMORY_BOUND and mask_type in (AttnMaskType.PADDED_CAUSAL, AttnMaskType.CAUSAL): return ColoAttention._flash_kernel_dispatch else: return ColoAttention._kernel_dispatch_map[dtype][mask_type] @staticmethod def prepare_attn_kwargs( shape_4d: Tuple[int], dtype: torch.dtype, device: torch.device, q_padding_mask: Optional[torch.Tensor] = None, kv_padding_mask: Optional[torch.Tensor] = None, is_causal: bool = False, invert: bool = True, ) -> Dict[str, torch.Tensor]: """Return a dictionary of keyword arguments for attention function. It supports 4 mask type. 1. custom mask: no padding mask and is_causal=False, return {}, users should handle attention mask by themselves. 2. padded mask: recv padding mask and is_causal=False, return {attention_mask, attention_mask_type, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv, q_indices, kv_indices}. 3. causal mask: no padding mask and is_causal=True, return {attention_mask, attention_mask_type}. 4. padded causal mask: recv padding mask and is_causal=True, return {attention_mask, attention_mask_type, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv, q_indices, kv_indices}. Args: shape_4d (Tuple[int]): Should be (B, 1, Sq, Skv) dtype (torch.dtype): Dtype of attention mask, generally should be ``hidden_states.dtype`` device (torch.device): Device of attention mask, generally should be ``hidden_states.device`` q_padding_mask (Optional[torch.Tensor], optional): Padding mask of query. It should be a long tensor or int tensor. The shape should be [B, Sq]. ``1`` means valid token, and ``0`` means padding token. Defaults to None. kv_padding_mask (Optional[torch.Tensor], optional): Padding mask of key and value. It should be a long tensor or int tensor. The shape should be [B, Skv]. ``1`` means valid token, and ``0`` means padding token. If it's None and ``q_padding_mask`` is not None, it will be set to ``q_padding_mask``. Defaults to None. is_causal (bool, optional): Whether to use causal attention mask. Defaults to False. invert_mask (bool, optional): Whether to invert the mask. Defaults to True. Returns: Dict[str, torch.Tensor]: Dictionary of keyword arguments for attention function. """ if q_padding_mask is None and not is_causal: return {} assert len(shape_4d) == 4 and shape_4d[1] == 1 b, _, s_q, s_kv = shape_4d element_size = torch.tensor([], dtype=dtype).element_size() memory_size = s_q * s_kv * element_size outputs = {} if (q_padding_mask is None or q_padding_mask.bool().all()) and ( kv_padding_mask is None or kv_padding_mask.bool().all() ): # no padding assert is_causal outputs["attention_mask_type"] = AttnMaskType.CAUSAL if memory_size < MEMORY_BOUND: attention_mask = torch.ones(s_q, s_kv, dtype=dtype, device=device) if s_q != 1: attention_mask.tril_(diagonal=0) attention_mask = attention_mask.expand(b, s_q, s_kv) else: attention_mask = torch.empty((0,), dtype=dtype, device=device) else: max_seqlen_q, cu_seqlens_q, q_indices = get_pad_info(q_padding_mask) if kv_padding_mask is None: # self attention kv_padding_mask = q_padding_mask max_seqlen_kv, cu_seqlens_kv, kv_indices = max_seqlen_q, cu_seqlens_q, q_indices else: max_seqlen_kv, cu_seqlens_kv, kv_indices = get_pad_info(kv_padding_mask) assert kv_padding_mask.shape == ( b, s_kv, ), f"Padding mask shape {kv_padding_mask.shape} should align with shape 4d ({b}, {s_kv})" outputs.update( { "cu_seqlens_q": cu_seqlens_q, "cu_seqlens_kv": cu_seqlens_kv, "max_seqlen_q": max_seqlen_q, "max_seqlen_kv": max_seqlen_kv, "q_indices": q_indices, "kv_indices": kv_indices, } ) if is_causal: outputs["attention_mask_type"] = AttnMaskType.PADDED_CAUSAL if memory_size < MEMORY_BOUND: if s_q != 1: attention_mask = kv_padding_mask[:, None, :].expand(b, s_q, s_kv).to(dtype=dtype, device=device) attention_mask = attention_mask * attention_mask.new_ones(s_q, s_kv).tril(diagonal=0) else: attention_mask = torch.empty((0,), dtype=dtype, device=device) else: outputs["attention_mask_type"] = AttnMaskType.PADDED if memory_size < MEMORY_BOUND: attention_mask = kv_padding_mask[:, None, :].expand(b, s_q, s_kv).to(dtype=dtype, device=device) if invert: attention_mask = invert_mask(attention_mask).unsqueeze(1) outputs["attention_mask"] = attention_mask return outputs @staticmethod def attention( q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, attention_mask_type: AttnMaskType = AttnMaskType.CUSTOM, cu_seqlens_q: Optional[torch.Tensor] = None, cu_seqlens_kv: Optional[torch.Tensor] = None, max_seqlen_q: Optional[int] = None, max_seqlen_kv: Optional[int] = None, q_indices: Optional[torch.Tensor] = None, kv_indices: Optional[torch.Tensor] = None, dropout_p: float = 0.0, scale: Optional[float] = None, **kwargs, ) -> torch.Tensor: """Flash Attention function. It supports 4 mask type. 1. custom mask: recv attention_mask 2. padded mask: recv attention_mask, attention_mask_type, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv, indices 3. causal mask: recv attention_mask, attention_mask_type 4. padded causal mask: recv attention_mask, attention_mask_type, cu_seqlens_q, cu_seqlens_kv, max_seqlen_q, max_seqlen_kv, indices Args: q (torch.Tensor): Query tensor. Shape should be [B, nHeads, Sq, D] k (torch.Tensor): Key tensor. Shape should be [B, nHeads, Skv, D] v (torch.Tensor): Value tensor. Shape should be [B, nHeads, Skv, D] attention_mask (Optional[torch.Tensor], optional): Attention mask tensor. Shape should be [B, 1, Sq, Skv]. Defaults to None. attention_mask_type (AttnMaskType, optional): Attention mask type. Defaults to AttnMaskType.CUSTOM. cu_seqlens_q (Optional[torch.Tensor], optional): The cumulative sequence lengths of the sequences in the batch, used to index into q. Shape should be [B+1]. Defaults to None. cu_seqlens_kv (Optional[torch.Tensor], optional): The cumulative sequence lengths of the sequences in the batch, used to index into kv. Shape should be [B+1]. Defaults to None. max_seqlen_q (Optional[int], optional): Maximum query sequence length in the batch. Defaults to None. max_seqlen_kv (Optional[int], optional): Maximum key/value sequence length in the batch. Defaults to None. indices (Optional[torch.Tensor], optional): The indices of non-masked tokens from the flattened input sequence. Shape should be [NUM_TOKENS]. Defaults to None. dropout_p (float, optional): Dropout probability. Defaults to 0.0. scale (Optional[float], optional): Scaling factor applied prior to softmax. Defaults to None. Returns: torch.Tensor: Output tensor. Shape should be [B, nHeads, Sq, D] """ # known issue: sdpa does not support attention mask which contains whole row of masked tokens, which leads to nan # this case is usaul when padding mask is used and self attention is performed # thus, we don't use sdpa when padding mask is used # sanity check if attention_mask is not None: assert torch.is_floating_point(attention_mask), "attention_mask should be a floating point tensor." if attention_mask_type in (AttnMaskType.CUSTOM, AttnMaskType.CAUSAL): assert ( cu_seqlens_q is None and cu_seqlens_kv is None and max_seqlen_q is None and max_seqlen_kv is None and q_indices is None and kv_indices is None ) if attention_mask_type == AttnMaskType.CUSTOM: assert not torch.all(attention_mask != 0, dim=-1).any() elif attention_mask_type in ( AttnMaskType.PADDED, AttnMaskType.PADDED_CAUSAL, ): assert ( cu_seqlens_q is not None and cu_seqlens_kv is not None and max_seqlen_q is not None and max_seqlen_kv is not None and q_indices is not None and kv_indices is not None ) else: # if attention_mask is None, attention_mask_type should be the default value assert attention_mask_type == AttnMaskType.CUSTOM # kernel dispatch b, _, s_q, _ = q.shape b, _, s_kv, _ = v.shape element_size = torch.tensor([], dtype=q.dtype).element_size() memory_size = s_q * s_kv * element_size mask_type = attention_mask_type if attention_mask is not None else None attn_func = ColoAttention._dispatch_kernel(q.dtype, mask_type, memory_size) is_causal = attention_mask is not None and attention_mask_type in ( AttnMaskType.CAUSAL, AttnMaskType.PADDED_CAUSAL, ) return attn_func( q, k, v, dropout_p=dropout_p, scale=scale, attention_mask=attention_mask, is_causal=is_causal, cu_seqlens_q=cu_seqlens_q, cu_seqlens_kv=cu_seqlens_kv, max_seqlen_q=max_seqlen_q, max_seqlen_kv=max_seqlen_kv, q_indices=q_indices, kv_indices=kv_indices, ) def _load_varlen_helpers(): """Helper to load functions for padding and unpadding packed sequences. Use only when flash attn is installed """ global _pad_input, _unpad_input # Flash attn claims this is more efficient than torch's bool indexing due to avoiding # broadcast if _pad_input is None or _unpad_input is None: try: from flash_attn.bert_padding import index_first_axis, pad_input def unpad_input(hidden_states: torch.Tensor, indices: torch.Tensor): return index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices) _pad_input = pad_input _unpad_input = unpad_input except ImportError as e: raise RuntimeError( f"Flash Attention is not installed. You can install it via 'pip install flash-attn --no-build-isolation'" ) from e def _load_flash_attn(): """A light-weight loader to check whether flash-attn is installed. Can't use ColoAttention._dispatch_kernel because we mutate the backward pass """ global _flash_attn_forward, _flash_attn_backward if _flash_attn_forward is None or _flash_attn_backward is None: try: from flash_attn.flash_attn_interface import _flash_attn_varlen_backward as _flash_attn_backward from flash_attn.flash_attn_interface import _flash_attn_varlen_forward as _flash_attn_forward except ImportError as e: raise RuntimeError( f"Flash Attention is not installed. You can install it via 'pip install flash-attn --no-build-isolation'" ) from e _load_varlen_helpers() # NOTE: This can cause spawned processes to hang on exit # with python 3.9 @torch.compile() def _rescale_out_lse(out, block_out, lse, block_lse): """ Compute the new attention denominator: exp(lse) + exp(block_lse) = exp(max_scale) * (exp(min_scale - max_scale) + 1) Args: out: (T, H, D) block_out: (T, H, D) lse: (H, T, 1) block_lse: (H, T, 1) """ # min_scale = torch.min(lse, block_lse) # max_scale = torch.max(lse, block_lse) # new_lse = max_scale + torch.log(1 + torch.exp(min_scale - max_scale)) # NOTE: directly assigning to .data here is buggy # probably due to casting dtypes/strides new_lse = lse + torch.log(1 + torch.exp(block_lse - lse)) new_block_lse = torch.exp(block_lse - new_lse) out = (torch.exp(lse - new_lse) * out + new_block_lse * block_out).to(out) lse = new_lse # Equivalent to the above # See https://github.com/zhuzilin/ring-flash-attention/pull/34#issuecomment-2076126795 # out = (out - F.sigmoid(block_lse - lse) * (out - block_out)) # lse = (lse - F.logsigmoid(lse - block_lse)) return out, lse class RingAttention(torch.autograd.Function): """Implements the Ring Attention from `Ring Attention with Blockwise Transformers for Near-Infinite Context` (https://arxiv.org/abs/2310.01889). For load-balancing, we adopted the "zigzag" dataloading scheme from ring-flash-attention. We also adopt the double ring topology from LoongTrain to fully utilize available NICs on each node, by computing attention within a inner ring first and then sending all KVs to the next ring at once. Our implementation references code from - ring-flash-attention: https://github.com/zhuzilin/ring-flash-attention/tree/main - Megatron Context Parallel: https://github.com/NVIDIA/TransformerEngine/pull/726 References: - Ring Attention with Blockwise Transformers for Near-Infinite Context https://arxiv.org/abs/2310.01889 - LoongTrain: Efficient Training of Long-Sequence LLMs with Head-Context Parallelism https://arxiv.org/abs/2406.18485 """ # Globle cache to avoid recomputation for same-lengthed sequences CU_SEQLENS: torch.Tensor = None # [B+1] TOTAL_SEQLEN: int = None HALF_INDICES: Tuple = None SUPPORTED_MASK_TYPES = (AttnMaskType.CAUSAL, AttnMaskType.PADDED_CAUSAL) ATTN_DONE: torch.cuda.Event = None SP_STREAM: torch.cuda.Stream = None SP_GROUP: dist.ProcessGroup = None # NOTE: Duplicating PGs for concurrent NCCL streams is a risky hack -- while it may increase throughput, # both PyTorch and NCCL warn against this. (https://github.com/pytorch/pytorch/commit/2dbe5cb979f674f0052a8eea1f7b6c3c0ba441d7) # LoongTrain's original double ring impl. uses concurrent PGs # (https://github.com/InternLM/InternEvo/blob/e52f2ffc9acf818e8f2b1f97dfc69ceb2f06e154/internlm/model/ops/ring_flash_attn/zigzag_ring_flash_attn_with_sliding_window.py#L192) # but I confirmed with Pytorch developers this can cause obscure "Software caused connection abort" errors. # (https://github.com/pytorch/pytorch/issues/132852) # NOTE: In general, a smarter idea is put as many P2P calls as possible into one `batch_isend_irecv`. INNER_RING_GROUP: dist.ProcessGroup = None # INNER_RING_GROUP_COPY: dist.ProcessGroup = None INTER_RING_GROUP: dist.ProcessGroup = None # INTER_RING_GROUP_COPY: dist.ProcessGroup = None @staticmethod def get_double_ring_groups(sp_axis, pg_mesh, inner_ring_size=None): """ Get 2D ring groups for the given process group. Generally, to avoid congestion, the inner ring size shouldn't be larger than the number of NICs on each node. Args: sp_group (dist.ProcessGroup): Process group for sequence parallelism inner_ring_size (Optional[int], optional): Inner ring size. Defaults to None. Returns: Tuple[dist.ProcessGroup, dist.ProcessGroup]: Inner-ring process group and inter-ring process group. """ assert pg_mesh is not None, f"Error: The pg mesh is None! please check the process group initialization." sp_group = pg_mesh.get_group_along_axis(sp_axis) sp_size = dist.get_world_size(sp_group) sp_rank = dist.get_rank(sp_group) assert inner_ring_size is not None assert ( inner_ring_size <= sp_size and sp_size % inner_ring_size == 0 ), f"Error: sp_size {sp_size} should be divisible by inner_ring_size {inner_ring_size}" if inner_ring_size == sp_size: return sp_group, sp_group assert ( sp_size % inner_ring_size == 0 ), f"sp_size {sp_size} should be divisible by inner_ring_size {inner_ring_size}" logger = get_dist_logger() logger.info( f"Using 2D Ring Attention with inner ring size {inner_ring_size} to maximze NIC util for inter-node comm. Cross your fingers for speed-ups!", ranks=[0], ) num_rings = sp_size // inner_ring_size inner_ring_group = None inter_ring_group = None # Create inner ring groups for i in range(inner_ring_size): ranks = list(range(i * inner_ring_size, (i + 1) * inner_ring_size)) group = pg_mesh.get_group_along_axis(sp_axis, ranks) if sp_rank in ranks: inner_ring_group = group # Create inter ring groups for i in range(num_rings): ranks = list(range(i, sp_size, num_rings)) group = pg_mesh.get_group_along_axis(sp_axis, ranks) if sp_rank in ranks: inter_ring_group = group return inner_ring_group, inter_ring_group @staticmethod def attention( q, # (B, H, Sq, D) k, v, sp_axis, attention_mask_type, cu_seqlens=None, max_seqlen=None, valid_indices=None, dropout_p=0.0, softmax_scale=None, deterministic=False, return_softmax=False, inner_ring_size=None, pg_mesh=None, **kwargs, ): """ Ring Attention forward pass supporting variable-length sequences. When using varlen mode, each sequence in the batch should have length divisible by sp_size * 2. Args: q (torch.Tensor): Query tensor. Shape should be [B, nHeads, Sq, D] k (torch.Tensor): Key tensor. Shape should be [B, nHeads, Sq, Sq, D] v (torch.Tensor): Value tensor. Shape should be [B, nHeads, Sq, Sq, D] sp_axis (Optional[int]): Sp axis for the global pg mesh. sp_tream (torch.cuda.Stream): An different stream for output correction. cu_seqlens (Optional[torch.Tensor], optional): The cumulative sequence lengths of the sequences in the batch, used to index into q. Shape should be [B+1]. max_seqlen (Optional[int], optional): Maximum query sequence length in the batch. valid_indices (Optional[torch.Tensor], optional): The indices of non-masked tokens from get_pad_info. Shape should be [t]. dropout_p (float, optional): Dropout probability. Defaults to 0.0. softmax_scale (Optional[float], optional): Scaling factor applied prior to softmax. deterministic (bool, optional): Whether to force deterministic backward pass. See https://github.com/Dao-AILab/flash-attention/issues/349 return_softmax (bool, optional): Whether to return the softmax denominator (logsumexp). inner_ring_size (Optional[int], optional): Inner ring size of the 2D ring. By default use a heuristic to decide. Returns: out: Output tensor of shape [B, nHeads, Sq, D] or [T, nHeads, D] if pad_output is False. softmax_lse: (if return_softmax is True) Softmax denominator (logsumexp). Shape should be [total_q_seqlen, nHeads] """ # Check input args _load_flash_attn() if RingAttention.ATTN_DONE is None: RingAttention.ATTN_DONE = torch.cuda.Event() if RingAttention.SP_STREAM is None: RingAttention.SP_STREAM = torch.cuda.Stream() assert ( q.shape[2] == k.shape[2] ), "Q, K and V having different sequence lengths (inference or cross-attn)\ is not supported yet in training." assert ( attention_mask_type in RingAttention.SUPPORTED_MASK_TYPES ), f"Mask type {attention_mask_type} is not supported yet." assert pg_mesh is not None, f"Error: The pg mesh is None! please check the process group initialization." clone_pg = lambda pg: dist.new_group(dist.get_process_group_ranks(pg)) sp_group = pg_mesh.get_group_along_axis(sp_axis) if inner_ring_size != None: RingAttention.SP_GROUP = sp_group inner_ring_group, inter_ring_group = RingAttention.get_double_ring_groups(sp_axis, pg_mesh, inner_ring_size) RingAttention.INNER_RING_GROUP = inner_ring_group RingAttention.INTER_RING_GROUP = inter_ring_group else: inner_ring_group = RingAttention.INNER_RING_GROUP inter_ring_group = RingAttention.INTER_RING_GROUP # (B, H, Sq, D) -> (B, Sq, H, D) q, k, v = [x.transpose(1, 2).contiguous() for x in (q, k, v)] pad_output = q.dim() == 4 # Get sequence length info for varlen forward if attention_mask_type == AttnMaskType.CAUSAL: # All sequences share the same length b, sq, h, d = q.shape max_seqlen = sq # Cache to avoid recreation for a single sequence if sq * b == RingAttention.TOTAL_SEQLEN: cu_seqlens = RingAttention.CU_SEQLENS else: cu_seqlens = torch.arange(0, b * sq + 1, sq, device=q.device, dtype=torch.int32) RingAttention.TOTAL_SEQLEN = b * sq # "Packed" mode where sequences of different lengths are packed into [total_q_seqlen, H, D] elif attention_mask_type == AttnMaskType.PADDED_CAUSAL: assert ( cu_seqlens is not None and max_seqlen is not None and valid_indices is not None ), "Packed mode requires pre-computed cu_seqlens and max_seq_len." if pad_output: b, sq, h, d = q.shape q, k, v = [_unpad_input(x, valid_indices) for x in (q, k, v)] out, softmax_lse = RingAttention.apply( q, k, v, sp_group, RingAttention.SP_STREAM, cu_seqlens, max_seqlen, dropout_p, softmax_scale, deterministic, return_softmax, attention_mask_type == AttnMaskType.PADDED_CAUSAL, inner_ring_group, inter_ring_group, ) if attention_mask_type == AttnMaskType.PADDED_CAUSAL: if pad_output: out = _pad_input(out, valid_indices, b, sq) # (T, ...) -> (B, Sq, ...) out = out.transpose(1, 2) # (B, Sq, H, D) -> (B, H, Sq, D) else: out = out.transpose(1, 2) if return_softmax: return out, softmax_lse return out @staticmethod def forward( ctx, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, sp_group: dist.ProcessGroup, sp_stream: torch.cuda.Stream, cu_seqlens: torch.Tensor, max_seqlen: int, dropout_p: float = 0.0, softmax_scale: Optional[float] = None, deterministic: Optional[bool] = False, return_softmax: Optional[bool] = False, is_packed: Optional[bool] = False, inner_ring_group: Optional[dist.ProcessGroup] = None, inter_ring_group: Optional[dist.ProcessGroup] = None, ): """ Forward supporting both packed (varlen) and batched(fixed length, no padding) sequences. No separate version for batched seq (hard to maintain), which incurs some overhead in sequence splitting due to python for loops. Uses two CUDA streams to overlap softmax denominator correction with next flash attn (see comments below). """ cu_seqlens_q = cu_seqlens_kv = cu_seqlens max_seqlen_q = max_seqlen_kv = max_seqlen cu_seqlens_half = cu_seqlens // 2 max_seqlen_half = max_seqlen // 2 misc_kwargs = { "alibi_slopes": None, "softmax_scale": q.shape[-1] ** -0.5 if softmax_scale is None else softmax_scale, "dropout_p": dropout_p, "block_table": None, "softcap": 0.0, "return_softmax": False, } import flash_attn if version.parse(flash_attn.__version__) > version.parse("2.6.3"): misc_kwargs["window_size_left"] = -1 misc_kwargs["window_size_right"] = -1 else: misc_kwargs["window_size"] = (-1, -1) if ( RingAttention.HALF_INDICES is not None and cu_seqlens.shape == RingAttention.CU_SEQLENS.shape and (cu_seqlens == RingAttention.CU_SEQLENS).all() ): half_idx_front, half_idx_back = RingAttention.HALF_INDICES else: half_idx_front = get_half_index(cu_seqlens, front=True) half_idx_back = get_half_index(cu_seqlens, front=False) RingAttention.HALF_INDICES = (half_idx_front, half_idx_back) RingAttention.CU_SEQLENS = cu_seqlens if is_packed: t, h, d = q.shape else: b, sq, h, d = q.shape t = b * sq # Be careful about GQA/MQA in reshape q, k, v = [x.view(t, *x.shape[-2:]) for x in (q, k, v)] if inner_ring_group is None or inter_ring_group is None: # Use one ring if not specified inner_ring_group = inter_ring_group = sp_group sp_size = dist.get_world_size(sp_group) sp_rank = dist.get_rank(sp_group) # Create communicators corresponding to two CUDA streams local_kv_comms = [RingComm(inner_ring_group) for _ in range(2)] inter_ring_comm = RingComm(inter_ring_group) local_sp_size = dist.get_world_size(inner_ring_group) local_sp_rank = dist.get_rank(inner_ring_group) inter_ring_rank = dist.get_rank(inter_ring_group) if inter_ring_group is not sp_group else 0 num_rings = dist.get_world_size(inter_ring_group) if inter_ring_group is not sp_group else 1 # Any type of indexing(but not slicing) copies to a new contiguous tensor, # so only do it once if sp_rank != sp_size - 1: q1 = q[half_idx_back] # Pre-allocate double buffer for overlapping and receiving next step's inputs kv_buffers = [torch.stack((k, v))] # (2, B, Sq, H, D) kv_buffers.append(torch.empty_like(kv_buffers[0])) # outputs out = None block_out = [None, None] softmax_lse = [None, None] block_softmax_lse = [None, None] # log sum exp, the denominator of softmax in attention
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/layer/_operation.py
colossalai/shardformer/layer/_operation.py
import functools import torch import torch.distributed as dist import torch.nn.functional as F from colossalai.pipeline.weight_grad_store import WeightGradStore from .utils import ( execute_conv1d_w_pass, execute_conv1d_w_pass_grad_accum, execute_w_pass, execute_w_pass_grad_accum, is_share_sp_tp, ) try: import fused_mix_prec_layer_norm_cuda except: fused_mix_prec_layer_norm_cuda = None try: import fused_weight_gradient_mlp_cuda _grad_accum_fusion_available = True except ImportError: _grad_accum_fusion_available = False from colossalai.quantization.fp8 import ( all_gather_fp8, all_reduce_fp8, all_to_all_fp8, all_to_all_single_fp8, reduce_scatter_fp8, ) class FusedLayerNormAffineFunction1D(torch.autograd.Function): r"""Layernorm Args: input: input matrix. weight: weight matrix. bias: bias matrix. normalized_shape: input shape from an expected input of size. :math:`[* \times \text{normalized_shape}[0] \times \text{normalized_shape}[1] \times \ldots \times \text{normalized_shape}[-1]]` If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps: a value added to the denominator for numerical stability """ @staticmethod def forward(ctx, input, weight, bias, normalized_shape, eps): ctx.normalized_shape = normalized_shape ctx.eps = eps input_ = input.contiguous() weight_ = weight.contiguous() bias_ = bias.contiguous() output, mean, invvar = fused_mix_prec_layer_norm_cuda.forward_affine( input_, ctx.normalized_shape, weight_, bias_, ctx.eps ) ctx.save_for_backward(input_, weight_, bias_, mean, invvar) return output @staticmethod def backward(ctx, grad_output): input_, weight_, bias_, mean, invvar = ctx.saved_tensors grad_input = grad_weight = grad_bias = None grad_input, grad_weight, grad_bias = fused_mix_prec_layer_norm_cuda.backward_affine( grad_output.contiguous(), mean, invvar, input_, ctx.normalized_shape, weight_, bias_, ctx.eps ) return grad_input, grad_weight, grad_bias, None, None class MatmulWithAsyncCommunication(torch.autograd.Function): """ Linear layer execution with asynchronous communication in backprop. """ @staticmethod def forward(ctx, input_, weight, bias, process_group, async_grad_allreduce, fp8_communication=False, use_zbv=False): ctx.save_for_backward(input_, weight, bias) ctx.use_bias = bias is not None ctx.process_group = process_group ctx.async_grad_allreduce = async_grad_allreduce ctx.fp8_communication = fp8_communication ctx.use_zbv = use_zbv output = torch.matmul(input_, weight) if bias is not None: output = output + bias return output @staticmethod def backward(ctx, grad_output): input, weight, bias = ctx.saved_tensors use_bias = ctx.use_bias fp8_communication = ctx.fp8_communication use_zbv = ctx.use_zbv # In order to be hooked into Gemini's '__torch_function__', adding a view operation to weight and bias. weight_origin = weight weight = weight.view(weight.shape) if bias is not None: bias = bias.view(bias.shape) total_input = input grad_input = grad_output.matmul(weight.T) grad_output = grad_output.contiguous() # Convert the tensor shapes to 2D for execution compatibility if len(grad_output.shape) > 2: grad_output = grad_output.view(-1, grad_output.shape[-1]) total_input = total_input.view(-1, total_input.shape[-1]) if fp8_communication or not ctx.async_grad_allreduce: _reduce(grad_input, group=ctx.process_group, fp8_communication=fp8_communication, fp8_format="e5m2") elif ctx.async_grad_allreduce: # Asynchronous all-reduce handle = dist.all_reduce(grad_input, group=ctx.process_group, async_op=True) # Rely on CUDA_DEVICE_MAX_CONNECTIONS=1 to have # all-reduce scheduled first and have GPU resources allocated, CUDA_DEVICE_MAX_CONNECTIONS=1 is set in shardformer.py # split dx & dw if _grad_accum_fusion_available and weight.grad is not None: grad = weight.grad if use_zbv: WeightGradStore.put( total_input, grad_output, (weight, weight_origin), functools.partial( execute_conv1d_w_pass_grad_accum, ), ) grad_weight = None else: if grad.dtype == torch.float32: fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp32(total_input, grad_output, grad) grad_weight = None elif grad.dtype == torch.float16: fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp16(total_input, grad_output, grad) grad_weight = None else: grad_weight = total_input.t().matmul(grad_output) else: if use_zbv: WeightGradStore.put( total_input, grad_output, (weight, weight_origin), functools.partial( execute_conv1d_w_pass, wgrad_gemm_func=torch.matmul, ), ) grad_weight = None else: grad_weight = total_input.t().matmul(grad_output) grad_bias = grad_output.sum(dim=0) if use_bias else None if ctx.async_grad_allreduce and not fp8_communication: handle.wait() return grad_input, grad_weight, grad_bias, None, None, None, None class MatmulWithGradAccum(torch.autograd.Function): """ Linear layer execution with grad accum in backprop. (no tp version) """ @staticmethod def forward(ctx, input_, weight, bias, async_grad_allreduce, use_zbv=False): ctx.save_for_backward(input_, weight, bias) ctx.use_bias = bias is not None ctx.async_grad_allreduce = async_grad_allreduce ctx.use_zbv = use_zbv output = torch.matmul(input_, weight) if bias is not None: output = output + bias return output @staticmethod def backward(ctx, grad_output): input, weight, bias = ctx.saved_tensors use_bias = ctx.use_bias use_zbv = ctx.use_zbv # In order to be hooked into Gemini's '__torch_function__', adding a view operation to weight and bias. weight_origin = weight weight = weight.view(weight.shape) if bias is not None: bias = bias.view(bias.shape) total_input = input grad_input = grad_output.matmul(weight.T) grad_output = grad_output.contiguous() # Convert the tensor shapes to 2D for execution compatibility if len(grad_output.shape) > 2: grad_output = grad_output.view(-1, grad_output.shape[-1]) total_input = total_input.view(-1, total_input.shape[-1]) # split dx & dw if _grad_accum_fusion_available and weight.grad is not None: grad = weight.grad if use_zbv: WeightGradStore.put( total_input, grad_output, (weight, weight_origin), functools.partial( execute_conv1d_w_pass_grad_accum, ), ) grad_weight = None else: if grad.dtype == torch.float32: fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp32(total_input, grad_output, grad) grad_weight = None elif grad.dtype == torch.float16: fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp16(total_input, grad_output, grad) grad_weight = None else: grad_weight = total_input.t().matmul(grad_output) else: if use_zbv: WeightGradStore.put( total_input, grad_output, (weight, weight_origin), functools.partial( execute_conv1d_w_pass, wgrad_gemm_func=torch.matmul, ), ) grad_weight = None else: grad_weight = total_input.t().matmul(grad_output) grad_bias = grad_output.sum(dim=0) if use_bias else None return grad_input, grad_weight, grad_bias, None, None, None, None class LinearWithAsyncCommunication(torch.autograd.Function): """ Linear layer execution with asynchronous communication in backprop. """ @staticmethod def forward(ctx, input_, weight, bias, process_group, async_grad_allreduce, fp8_communication=False, use_zbv=False): ctx.save_for_backward(input_, weight, bias) ctx.use_bias = bias is not None ctx.process_group = process_group ctx.async_grad_allreduce = async_grad_allreduce ctx.fp8_communication = fp8_communication ctx.use_zbv = use_zbv if bias is not None: output = F.linear(input_, weight, bias) else: output = F.linear(input_, weight) return output @staticmethod def backward(ctx, grad_output): input, weight, bias = ctx.saved_tensors use_bias = ctx.use_bias fp8_communication = ctx.fp8_communication use_zbv = ctx.use_zbv # In order to be hooked into Gemini's '__torch_function__', adding a view operation to bias. if use_bias: bias.view(bias.shape) total_input = input.contiguous() grad_input = grad_output.matmul(weight) grad_output = grad_output.contiguous() # Convert the tensor shapes to 2D for execution compatibility if len(grad_output.shape) > 2: grad_output = grad_output.view(-1, grad_output.shape[-1]) total_input = total_input.view(-1, total_input.shape[-1]) if ctx.async_grad_allreduce: # Asynchronous all-reduce if fp8_communication: all_reduce_fp8(grad_input, group=ctx.process_group) else: handle = dist.all_reduce(grad_input, group=ctx.process_group, async_op=True) # Relay on CUDA_DEVICE_MAX_CONNECTIONS=1 to have # all-reduce scheduled first and have GPU resources allocated, CUDA_DEVICE_MAX_CONNECTIONS=1 is set in shardformer.py if _grad_accum_fusion_available and weight.grad is not None: grad = weight.grad if use_zbv: WeightGradStore.put( total_input, grad_output, weight, functools.partial( execute_w_pass_grad_accum, ), ) grad_weight = None else: if grad.dtype == torch.float32: fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp32(total_input, grad_output, grad) grad_weight = None elif grad.dtype == torch.float16: fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp16(total_input, grad_output, grad) grad_weight = None else: grad_weight = grad_output.t().matmul(total_input) else: if use_zbv: WeightGradStore.put( total_input, grad_output, weight, functools.partial( execute_w_pass, wgrad_gemm_func=torch.matmul, ), ) grad_weight = None else: grad_weight = grad_output.t().matmul(total_input) grad_bias = grad_output.sum(dim=0) if use_bias else None if ctx.async_grad_allreduce and not fp8_communication: handle.wait() return grad_input, grad_weight, grad_bias, None, None, None, None class LinearWithGradAccum(torch.autograd.Function): """ Linear layer baseline (no tensor parallel version). """ @staticmethod def forward(ctx, input_, weight, bias, async_grad_allreduce, use_zbv=False): ctx.save_for_backward(input_, weight, bias) ctx.use_bias = bias is not None ctx.async_grad_allreduce = async_grad_allreduce ctx.use_zbv = use_zbv if bias is not None: output = F.linear(input_, weight, bias) else: output = F.linear(input_, weight) return output @staticmethod def backward(ctx, grad_output): input, weight, bias = ctx.saved_tensors use_bias = ctx.use_bias use_zbv = ctx.use_zbv # In order to be hooked into Gemini's '__torch_function__', adding a view operation to bias. if use_bias: bias.view(bias.shape) total_input = input.contiguous() grad_input = grad_output.matmul(weight) grad_output = grad_output.contiguous() # Convert the tensor shapes to 2D for execution compatibility if len(grad_output.shape) > 2: grad_output = grad_output.view(-1, grad_output.shape[-1]) total_input = total_input.view(-1, total_input.shape[-1]) if _grad_accum_fusion_available and weight.grad is not None: grad = weight.grad if use_zbv: WeightGradStore.put( total_input, grad_output, weight, functools.partial( execute_w_pass_grad_accum, ), ) grad_weight = None else: if grad.dtype == torch.float32: fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp32(total_input, grad_output, grad) grad_weight = None elif grad.dtype == torch.float16: fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp16(total_input, grad_output, grad) grad_weight = None else: grad_weight = grad_output.t().matmul(total_input) else: if use_zbv: WeightGradStore.put( total_input, grad_output, weight, functools.partial( execute_w_pass, wgrad_gemm_func=torch.matmul, ), ) grad_weight = None else: grad_weight = grad_output.t().matmul(total_input) grad_bias = grad_output.sum(dim=0) if use_bias else None return grad_input, grad_weight, grad_bias, None, None, None, None def _ring_as_gather(func, input_to_gather=None, input_local=None, process_group=None, gather_dim=1, keep_item=False): # currently only support one single tensor as output group_size = dist.get_world_size(process_group) cur_rank = dist.get_rank(process_group) # output_tensors = [torch.empty((input_shape[0], input_shape[1], weight_shape[0])) for _ in range(group_size)] # initialization of ring communication recv_rank = cur_rank + 1 if cur_rank + 1 < group_size else 0 send_rank = cur_rank - 1 if cur_rank > 0 else group_size - 1 rank_map = list(dist.get_process_group_ranks(process_group)) recv_rank = rank_map[recv_rank] send_rank = rank_map[send_rank] recv_tensors = {} send_tensors = {} for k, v in input_to_gather.items(): recv_tensors[k] = torch.empty_like(v) send_tensors[k] = v.clone() def communicate_step(): comm_ops = [] for k in recv_tensors: comm_ops.append(dist.P2POp(dist.irecv, recv_tensors[k], recv_rank, group=process_group)) comm_ops.append(dist.P2POp(dist.isend, send_tensors[k], send_rank, group=process_group)) return dist.batch_isend_irecv(comm_ops) def switch_step(): for k in recv_tensors: send_tensors[k], recv_tensors[k] = recv_tensors[k], send_tensors[k] input_tensors = [] output_tensors = [] handles = communicate_step() # first round: special case, retrive from local tensor input_tensors.append(input_to_gather) output_tensors.append(func(**input_to_gather, **input_local)) for i in range(group_size - 2): for handle in handles: handle.wait() switch_step() handles = communicate_step() # actual computation input_tensors.append(send_tensors) output_tensors.append(func(**send_tensors, **input_local)) # final round: special case, no need to send/recv again for handle in handles: handle.wait() input_tensors.append(send_tensors) output_tensors.append(func(**recv_tensors, **input_local)) gathered_input = {} for k in input_to_gather: input_shards = [d[k] for d in input_tensors[group_size - cur_rank :] + input_tensors[: group_size - cur_rank]] gathered_input[k] = torch.cat(input_shards, dim=gather_dim) gathered_output = torch.cat( output_tensors[group_size - cur_rank :] + output_tensors[: group_size - cur_rank], dim=gather_dim ) return gathered_output, gathered_input class _GatherForwardReduceScatterBackward(torch.autograd.Function): """Gather input from sequence parallel in forward and reduce-scatter gradient in backward Args: input_ (`torch.Tensor`): The input tensor from sequence parallel region. process_group (`torch.distributed.ProcessGroup`): The process group used for collective communication. overlap (`bool`): Whther to overlap the all_gather op and gradient calculate in backward. """ @staticmethod def forward(ctx, input_, process_group, dim, fp8_communication=False): ctx.process_group = process_group ctx.dim = dim ctx.fp8_communication = fp8_communication return _gather(input_, dim, process_group, fp8_communication, fp8_format="e4m3") @staticmethod def backward(ctx, grad_output): dim = ctx.dim process_group = ctx.process_group fp8_communication = ctx.fp8_communication # do reduce-scatter new_shape = list(grad_output.shape) assert ( new_shape[dim] % dist.get_world_size(process_group) == 0 ), f"The dimension to split ({new_shape[dim]}) is not a multiple of tensor parallel size ({dist.get_world_size(process_group)}). " new_shape[dim] = new_shape[dim] // dist.get_world_size(process_group) grad_list = [ item.contiguous() for item in torch.chunk(grad_output, dist.get_world_size(process_group), dim=dim) ] output = torch.empty(new_shape, dtype=grad_output.dtype, device=grad_output.device) if fp8_communication: reduce_scatter_fp8(output, grad_list, group=process_group, fp8_format="e5m2") else: dist.reduce_scatter(output, grad_list, group=process_group) return output, None, None, None class _LinearWithGatherForwardReduceScatterBackward(torch.autograd.Function): """Gather input from sequence parallel in forward and reduce-scatter gradient in backward Args: input_ (`torch.Tensor`): The input tensor from sequence parallel region. process_group (`torch.distributed.ProcessGroup`): The process group used for collective communication. overlap (`bool`): Whether to overlap the all_gather op and gradient calculate in backward. """ @staticmethod def forward(ctx, input_, weight, bias, process_group, async_grad_reduce_scatter, dim, ring=False, use_zbv=False): ctx.save_for_backward(input_, weight, bias) ctx.use_bias = bias is not None ctx.process_group = process_group ctx.async_grad_reduce_scatter = async_grad_reduce_scatter ctx.dim = dim ctx.use_zbv = use_zbv if ring is True: input_to_gather = {"input": input_} input_local = {"weight": weight} output, input_dict = _ring_as_gather( F.linear, input_to_gather=input_to_gather, input_local=input_local, process_group=process_group, ) ctx.gathered_input = input_dict["input"] if bias is not None: output += bias else: input_parallel = _gather(input_, dim, process_group) ctx.gathered_input = input_parallel if bias is not None: output = F.linear(input_parallel, weight, bias) else: output = F.linear(input_parallel, weight) return output @staticmethod def backward(ctx, grad_output): input_, weight, bias = ctx.saved_tensors use_bias = ctx.use_bias dim = ctx.dim process_group = ctx.process_group use_zbv = ctx.use_zbv # In order to be hooked into Gemini's '__torch_function__', adding a view operation to weight and bias. Used in FusedLayerNorm if use_bias: bias = bias.view(bias.shape) input_parallel = ctx.gathered_input total_input = input_parallel grad_input = grad_output.matmul(weight) grad_output = grad_output.contiguous() # Convert the tensor shapes to 2D for execution compatibility if len(grad_output.shape) > 2: grad_output = grad_output.view(-1, grad_output.shape[-1]) total_input = total_input.view(-1, total_input.shape[-1]) if ctx.async_grad_reduce_scatter: # Asynchronous reduce-scatter input_list = [ item.contiguous() for item in torch.chunk(grad_input, dist.get_world_size(process_group), dim=dim) ] output = torch.empty(input_.shape, dtype=input_parallel.dtype, device=input_parallel.device).contiguous() handle = dist.reduce_scatter(output, input_list, group=process_group, async_op=True) # Rely on CUDA_DEVICE_MAX_CONNECTIONS=1 to have # all-reduce scheduled first and have GPU resources allocated, CUDA_DEVICE_MAX_CONNECTIONS=1 is set in shardformer.py if _grad_accum_fusion_available and weight.grad is not None: grad = weight.grad if use_zbv: WeightGradStore.put( total_input, grad_output, weight, functools.partial( execute_w_pass_grad_accum, ), ) grad_weight = None else: if grad.dtype == torch.float32: fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp32(total_input, grad_output, grad) grad_weight = None elif grad.dtype == torch.float16: fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp16(total_input, grad_output, grad) grad_weight = None else: grad_weight = grad_output.t().matmul(total_input) else: if use_zbv: WeightGradStore.put( total_input, grad_output, weight, functools.partial( execute_w_pass, wgrad_gemm_func=torch.matmul, ), ) grad_weight = None else: grad_weight = grad_output.t().matmul(total_input) grad_bias = grad_output.sum(dim=0) if use_bias else None if ctx.async_grad_reduce_scatter: handle.wait() return output, grad_weight, grad_bias, None, None, None, None, None def _ring_as_reducescatter( func, input_to_reducescatter=None, input_local=None, process_group=None, reducescatter_dim=1 ): # currently only support one single tensor as output group_size = dist.get_world_size(process_group) cur_rank = dist.get_rank(process_group) # initialization of ring communication recv_rank = cur_rank - 1 if cur_rank > 0 else group_size - 1 send_rank = cur_rank + 1 if cur_rank + 1 < group_size else 0 rank_map = list(dist.get_process_group_ranks(process_group)) recv_rank = rank_map[recv_rank] send_rank = rank_map[send_rank] input_tensors = [] for _ in range(group_size): input_tensors.append({}) for k, v in input_to_reducescatter.items(): input_shape = v.shape assert input_shape[reducescatter_dim] % group_size == 0 _input_tensors = list(torch.split(v, input_shape[reducescatter_dim] // group_size, dim=reducescatter_dim)) for i in range(group_size): input_tensors[i][k] = _input_tensors[i] input_tensors = input_tensors[cur_rank:] + input_tensors[:cur_rank] input_tensors.reverse() output_tensor = func(**input_tensors[0], **input_local) recv_tensor = torch.empty_like(output_tensor) send_tensor = output_tensor.clone() def communicate_step(): recv_op = dist.P2POp(dist.irecv, recv_tensor, recv_rank, group=process_group) send_op = dist.P2POp(dist.isend, send_tensor, send_rank, group=process_group) return dist.batch_isend_irecv([recv_op, send_op]) handles = communicate_step() # first round: special case, retrive from local tensor for i in range(group_size - 2): # actual computation output_tensor = func(**input_tensors[i + 1], **input_local) for handle in handles: handle.wait() output_tensor += recv_tensor tmp_tensor = send_tensor send_tensor = output_tensor output_tensor = tmp_tensor handles = communicate_step() # final round: special case, no need to send/recv again output_tensor = func(**input_tensors[-1], **input_local) for handle in handles: handle.wait() output_tensor += recv_tensor return output_tensor class _LinearWithReduceScatterForwardGatherBackward(torch.autograd.Function): """Reduce-scatter input from sequence parallel in forward and gather gradient in backward with ring Args: input_ (`torch.Tensor`): The input tensor from sequence parallel region. process_group (`torch.distributed.ProcessGroup`): The process group used for collective communication. overlap (`bool`): Whther to overlap the all_gather op and gradient calculate in backward. """ @staticmethod def forward(ctx, input_, weight, bias, process_group, dim, ring, use_zbv=False): ctx.save_for_backward(input_, weight, bias) ctx.use_bias = bias is not None ctx.process_group = process_group ctx.dim = dim ctx.use_zbv = use_zbv if ring is True: input_to_reducescatter = {"input": input_} input_local = {"weight": weight} if bias is not None: input_to_reducescatter["bias"] = bias output = _ring_as_reducescatter( F.linear, input_to_reducescatter=input_to_reducescatter, input_local=input_local, process_group=process_group, ) else: if bias is not None: partial_output = F.linear(input_, weight, bias) else: partial_output = F.linear(input_, weight) output_shape = list(partial_output.shape) assert ( output_shape[dim] % dist.get_world_size(process_group) == 0 ), f"The dimension to split ({output_shape[dim]}) is not a multiple of tensor parallel size ({dist.get_world_size(process_group)}). " output_shape[dim] = output_shape[dim] // dist.get_world_size(process_group) output_list = [ item.contiguous() for item in torch.chunk(partial_output, dist.get_world_size(process_group), dim=dim) ] output = torch.empty(output_shape, dtype=partial_output.dtype, device=partial_output.device).contiguous() dist.reduce_scatter(output, output_list, group=process_group) return output @staticmethod def backward(ctx, grad_output): input_, weight, bias = ctx.saved_tensors use_bias = ctx.use_bias dim = ctx.dim process_group = ctx.process_group use_zbv = ctx.use_zbv # In order to be hooked into Gemini's '__torch_function__', adding a view operation to weight and bias. Used in FusedLayerNorm if use_bias: bias = bias.view(bias.shape) grad_output = _gather(grad_output, dim, process_group) # TODO Need to fully optimize total_input = input_ grad_input = grad_output.matmul(weight) grad_output = grad_output.contiguous() # Convert the tensor shapes to 2D for execution compatibility if len(grad_output.shape) > 2: grad_output = grad_output.view(-1, grad_output.shape[-1]) total_input = total_input.reshape(-1, total_input.shape[-1]) if _grad_accum_fusion_available and weight.grad is not None: grad = weight.grad if use_zbv: WeightGradStore.put( total_input, grad_output, weight, functools.partial( execute_w_pass_grad_accum, ), ) grad_weight = None else: if grad.dtype == torch.float32: fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp32(total_input, grad_output, grad) grad_weight = None elif grad.dtype == torch.float16: fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp16(total_input, grad_output, grad) grad_weight = None else: grad_weight = grad_output.t().matmul(total_input) else: if use_zbv: WeightGradStore.put( total_input, grad_output, weight, functools.partial( execute_w_pass, wgrad_gemm_func=torch.matmul, ), ) grad_weight = None else: grad_weight = grad_output.t().matmul(total_input) # grad_weight = grad_output.t().matmul(total_input) grad_bias = grad_output.sum(dim=0) if use_bias else None return grad_input, grad_weight, grad_bias, None, None, None, None class _ReduceScatterForwardGatherBackward(torch.autograd.Function): """Reduce-scatter input from sequence parallel in forward and gather gradient in backward Args: input_ (`torch.Tensor`): The input tensor from sequence parallel region. process_group (`torch.distributed.ProcessGroup`): The process group used for collective communication. """ @staticmethod def forward(ctx, input_, process_group, dim, fp8_communication=False): ctx.dim = dim ctx.process_group = process_group ctx.fp8_communication = fp8_communication # do reduce-scatter new_shape = list(input_.shape) assert ( new_shape[dim] % dist.get_world_size(process_group) == 0 ), f"The dimension to split ({new_shape[dim]}) is not a multiple of tensor parallel size ({dist.get_world_size(process_group)}). " new_shape[dim] = new_shape[dim] // dist.get_world_size(process_group) input_list = [item.contiguous() for item in torch.chunk(input_, dist.get_world_size(process_group), dim=dim)] output = torch.empty(new_shape, dtype=input_.dtype, device=input_.device) if fp8_communication: reduce_scatter_fp8(output, input_list, group=process_group, fp8_format="e4m3") else: dist.reduce_scatter(output, input_list, group=process_group) return output
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/layer/dropout.py
colossalai/shardformer/layer/dropout.py
from typing import List, Union import torch import torch.nn as nn from torch.distributed import ProcessGroup from .parallel_module import ParallelModule from .utils import create_randomizer_with_offset __all__ = ["DropoutForParallelInput", "DropoutForReplicatedInput"] class DropoutForParallelInput(ParallelModule, nn.Dropout): """ The Dropout Layer will apply dropout mask to the input tensor. The dropout mask is generated with randomness on different ranks of the given process group. This can avoid the same dropout mask is generated and applied on the same position of different ranks, leading to poor convergence performance. Args: p (float): probability of an element to be zeroed. Defaults to 0.5. inplace (bool): If set to True, will do this operation in-place. Defaults to False. process_group (ProcessGroup): the process group to be used for generating randomness. Defaults to None. """ def __init__(self, p: float = 0.5, inplace: bool = False, process_group: ProcessGroup = None): # init with nn.Dropout super(nn.Dropout, self).__init__(p=p, inplace=inplace) # offset the seed with randomizer index and rank seed = torch.random.initial_seed() self.randomizer = create_randomizer_with_offset(seed, process_group=process_group) @staticmethod def from_native_module( module: nn.Dropout, process_group: Union[ProcessGroup, List[ProcessGroup]] = None ) -> "DropoutForParallelInput": """ Create a DropoutForParallelInput layer from a native dropout layer. """ p = module.p inplace = module.inplace return DropoutForParallelInput(p=p, inplace=inplace, process_group=process_group) def forward(self, input): with self.randomizer.fork_rng(): input = super().forward(input) return input class DropoutForReplicatedInput(ParallelModule, nn.Dropout): """ The Dropout Layer will apply dropout mask to the input tensor. The dropout mask is generated with randomness on different ranks of the given process group. This can avoid the same dropout mask is generated and applied on the same position of different ranks, leading to poor convergence performance. Args: p (float): probability of an element to be zeroed. Defaults to 0.5. inplace (bool): If set to True, will do this operation in-place. Defaults to False. process_group (ProcessGroup): the process group to be used for generating randomness. Defaults to None. """ def __init__(self, p: float = 0.5, inplace: bool = False, process_group: ProcessGroup = None): # init with nn.Dropout super(nn.Dropout, self).__init__(p=p, inplace=inplace) # offset the seed with randomizer index only seed = torch.random.initial_seed() self.randomizer = create_randomizer_with_offset(seed, process_group=process_group, offset_by_rank=False) @staticmethod def from_native_module( module: nn.Dropout, process_group: Union[ProcessGroup, List[ProcessGroup]] = None ) -> "DropoutForReplicatedInput": """ Create a Dropout1D layer from a native dropout layer. """ p = module.p inplace = module.inplace return DropoutForReplicatedInput(p=p, inplace=inplace, process_group=process_group) def forward(self, input): with self.randomizer.fork_rng(): input = super().forward(input) return input
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/layer/qkv_fused_linear.py
colossalai/shardformer/layer/qkv_fused_linear.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import math from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn from torch import Tensor from torch.distributed import ProcessGroup from torch.nn.parameter import Parameter from colossalai.lazy import LazyInitContext from colossalai.nn import init as init from colossalai.nn.layer.utils import divide from colossalai.tensor.d_tensor.api import ( customized_distributed_tensor_to_existing_param, distribute_tensor_with_customization, is_customized_distributed_tensor, is_distributed_tensor, shard_rowwise, sharded_tensor_to_existing_param, ) from ._operation import ( linear_gather_forward_reducescatter_backward, linear_reducescatter_forward_gather_backward, linear_with_async_comm, linear_with_grad_accum, matmul_gather_forward_reducescatter_backward, matmul_with_async_comm, matmul_with_grad_comm, reduce_forward, reducescatter_forward_gather_backward, split_forward_gather_backward, ) from .parallel_module import ParallelModule from .utils import create_randomizer_with_offset, is_share_sp_tp __all__ = [ "FusedLinear1D_Col", "FusedLinear1D_Row", "FusedLinear", "GPT2FusedLinearConv1D_Col", "GPT2FusedLinearConv1D_Row", "GPT2FusedLinearConv", ] # ==================================== # For GPT Only # ==================================== def split_fused_qkv_in_gpt2_style( qkv: torch.Tensor, split_sizes: List[int], process_group: ProcessGroup, is_transposed: bool = False ): """ The fused qkv tensor looks like [Q1, Q2, K1, K2, V1, V2], this function will split them into [Q1, K1, V1] and [Q2, K2, V2]. Args: qkv (torch.Tensor): The fused qkv tensor. split_sizes (List[int]): The sizes of the split tensor. process_group (ProcessGroup): The process group for distributed communication. is_transposed (bool): generally the tensor is the shape of (out_features, in_features). Set this to True if the tensor is in the shape (in_features, out_features). """ # get the number of slice for the fused qkv rank = dist.get_rank(group=process_group) world_size = dist.get_world_size(group=process_group) order = torch.arange(world_size * len(split_sizes)) new_split_sizes = [] for sz in split_sizes: assert sz % world_size == 0, f"size {sz} is not divisible by world_size {world_size}" new_split_sizes.extend([sz // world_size] * world_size) # split the fused qkv # from # [Q, K, V] # to # [Q1, Q2, K1, K2, V1, V2] if is_transposed: weight_chunks = torch.split(qkv, new_split_sizes, dim=-1) else: weight_chunks = torch.split(qkv, new_split_sizes, dim=0) # rearrange the slice into the final order # from # [Q1, Q2, K1, K2, V1, V2] # to # [Q1, K1, V1], [Q2, K2, V2] weight_chunks_of_current_rank = [weight_chunks[i] for i in order[rank::world_size]] if is_transposed: weight_of_current_rank = torch.cat(weight_chunks_of_current_rank, dim=-1) else: weight_of_current_rank = torch.cat(weight_chunks_of_current_rank, dim=0) return weight_of_current_rank def gather_fused_qkv_in_gpt2_style( qkv: torch.Tensor, split_sizes: List[int], process_group: ProcessGroup, is_transposed: bool = False ): """ The splitted qkv tensor looks like [Q1, K1, V1] and [Q2, K2, V2], this function will gather them into [Q1, Q2, K1, K2, V1, V2]. Args: qkv (torch.Tensor): The fused qkv tensor. split_sizes (List[int]): The sizes of the split tensor. process_group (ProcessGroup): The process group for distributed communication. is_transposed (bool): generally the tensor is the shape of (out_features, in_features). Set this to True if the tensor is in the shape (in_features, out_features). """ world_size = dist.get_world_size(group=process_group) new_split_sizes = [] for sz in split_sizes: assert sz % world_size == 0, f"size {sz} is not divisible by world_size {world_size}" new_split_sizes.append(sz // world_size) new_split_sizes = new_split_sizes * world_size # gather the tensors # from # [Q1, K1, V1], [Q2, K2, V2] # to # [Q1, K1, V1, Q2, K2, V2] origin_device = qkv.device qkv = qkv.cuda() gather_list = [torch.zeros_like(qkv) for _ in range(world_size)] dist.all_gather(gather_list, qkv, group=process_group) if is_transposed: gather_weight = torch.cat(gather_list, dim=-1) else: gather_weight = torch.cat(gather_list, dim=0) gather_weight = gather_weight.to(origin_device) qkv = qkv.to(origin_device) # rearrange the tensor slices # from # [Q1, K1, V1, Q2, K2, V2] # to # [Q1, Q2, K1, K2, V1, V2] if is_transposed: weight_chunks = torch.split(gather_weight, new_split_sizes, dim=-1) else: weight_chunks = torch.split(gather_weight, new_split_sizes, dim=0) reordered_chunk_list = [] for i in range(len(split_sizes)): reordered_chunk_list.extend(weight_chunks[i :: len(split_sizes)]) if is_transposed: reordered_gather_weight = torch.cat(reordered_chunk_list, dim=-1) else: reordered_gather_weight = torch.cat(reordered_chunk_list, dim=0) return reordered_gather_weight class _SplitForwardGatherBackwardFusedQKV(torch.autograd.Function): @staticmethod def forward(ctx, qkv: torch.Tensor, split_sizes: List[int], process_group: ProcessGroup): ctx.split_sizes = split_sizes ctx.process_group = process_group return split_fused_qkv_in_gpt2_style(qkv, split_sizes, process_group, is_transposed=True) @staticmethod def backward(ctx, grad_output): grad_output = gather_fused_qkv_in_gpt2_style( grad_output, ctx.split_sizes, ctx.process_group, is_transposed=True ) return grad_output, None, None def split_forward_gather_backward_fused_qkv(qkv: torch.Tensor, split_sizes: List[int], process_group: ProcessGroup): return _SplitForwardGatherBackwardFusedQKV.apply(qkv, split_sizes, process_group) class _GatherForwardSplitBackwardFusedQKV(torch.autograd.Function): @staticmethod def forward(ctx, qkv: torch.Tensor, split_sizes: List[int], process_group: ProcessGroup): ctx.split_sizes = split_sizes ctx.process_group = process_group return gather_fused_qkv_in_gpt2_style(qkv, split_sizes, process_group, is_transposed=True) @staticmethod def backward(ctx, grad_output): grad_output = split_fused_qkv_in_gpt2_style(grad_output, ctx.split_sizes, ctx.process_group, is_transposed=True) return grad_output, None, None def gather_forward_split_backward_fused_qkv(qkv: torch.Tensor, split_sizes: List[int], process_group: ProcessGroup): return _GatherForwardSplitBackwardFusedQKV.apply(qkv, split_sizes, process_group) class GPT2FusedLinearConv1D_Col(ParallelModule): r"""Linear layer with column parallelism. The linear layer is defined as :math:`Y = XA + b`. A is parallelized along its second dimension as :math:`A = [A_1, ..., A_p]`. This layer is used to fit `Conv1D` layer (Fused QKV) in gpt2 of huggingface. Args: in_features (int): size of each input sample. out_features (int): size of each output sample. split_sizes (List[int]): The sizes of the split tensor. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (`torch.dtype`): The dtype of parameters, defaults to None. device (`torch.device`): The device of parameters, defaults to None. process_group (`torch.distributed.ProcessGroup`): The process group to be used for weight sharding and communication, defaults to None. seq_parallel_mode (str): If set to ``None``, it will not use sequence parallel, otherwise will use corresponding mode of sequence parallel, defaults to None. gather_output (bool, optional): If true, call all-gather on output and make Y available to all GPUs, otherwise, every GPU will have its output which is :math:`Y_i = XA_i`, defaults to False skip_bias_add (bool): If set to ``True``, it will skip bias add for linear layer, which is preserved for kernel fusion, defaults to False weight_initializer (`typing.Callable`): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (`typing.Callable`): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, out_features: int, split_sizes: List[int], bias: bool = True, dtype: torch.dtype = None, device: torch.device = None, process_group: ProcessGroup = None, gather_output: bool = False, seq_parallel_mode: str = None, skip_bias_add: bool = False, weight: Optional[Parameter] = None, bias_: Optional[Parameter] = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), fp8_communication: bool = False, use_zbv: bool = False, ): super().__init__() # Keep input parameters self.in_features = in_features self.out_features = out_features self.gather_output = gather_output self.seq_parallel_mode = seq_parallel_mode self.skip_bias_add = skip_bias_add self.device = device self.split_sizes = split_sizes self.process_group = process_group self.fp8_communication = fp8_communication self.use_zbv = use_zbv assert ( sum(split_sizes) == out_features ), f"The sum of split_sizes({sum(split_sizes)}) should be equal to out_features({out_features})." if skip_bias_add and not bias: raise ValueError("cannot skip bias addition if bias is None") # offset the seed with randomizer index and rank seed = torch.random.initial_seed() self.randomizer = create_randomizer_with_offset(seed, process_group=self.process_group) # sanity check if weight is not None: assert not bias or bias_ is not None, "bias_ must be provided if bias is True when weight is not None" else: assert bias_ is None, "bias_ must be None if weight is None" # Parameters. if weight is None: # Initialize weight. factory_kwargs = {"device": device, "dtype": dtype} self.weight = Parameter(torch.empty(self.in_features, self.out_features, **factory_kwargs)) else: weight.data = weight.data.to(device=device, dtype=dtype) self.weight = weight def shard_fn(tensor): return split_fused_qkv_in_gpt2_style(tensor, self.split_sizes, self.process_group, True) def gather_fn(tensor): return gather_fused_qkv_in_gpt2_style(tensor, self.split_sizes, self.process_group, True) if not is_customized_distributed_tensor(self.weight): with torch.no_grad(): sharded_weight = distribute_tensor_with_customization(self.weight.data, shard_fn, gather_fn) customized_distributed_tensor_to_existing_param(sharded_weight, self.weight) if bias: if bias_ is None: self.bias = Parameter(torch.empty(self.out_features, **factory_kwargs)) else: bias_.data = bias_.data.to(device=device, dtype=dtype) self.bias = bias_ if not is_customized_distributed_tensor(self.bias): with torch.no_grad(): sharded_bias = distribute_tensor_with_customization(self.bias.data, shard_fn, gather_fn) customized_distributed_tensor_to_existing_param(sharded_bias, self.bias) else: self.bias = None if weight is None: # init weights self.reset_parameters(weight_initializer, bias_initializer) @staticmethod def from_native_module( module: nn.Module, process_group: Union[ProcessGroup, List[ProcessGroup]], split_sizes: List[int], *args, **kwargs, ) -> ParallelModule: r""" Convert a huggingface layer `Conv1D` in gpt2 to a parallelized linear layer. Args: module (`nn.Linear`): The module to be converted. process_group (`Union[ProcessGroup, List[ProcessGroup]]`): The process group to be used for weight sharding and communication. split_sizes (List[int]): The sizes of the split tensor. In GPT2, Q,K,V are fused in one weight. """ LazyInitContext.materialize(module) # get the attributes in_features = module.weight.shape[0] out_features = module.weight.shape[1] bias = module.bias is not None device = module.weight.device # ensure only one process group is passed if isinstance(process_group, (list, tuple)): assert len(process_group) == 1, f"Expected only one process group, got {len(process_group)}." process_group = process_group[0] tp_size = dist.get_world_size(process_group) if out_features < tp_size: return module if out_features % tp_size != 0: raise ValueError( f"The size of out_features:{out_features} is not integer multiples of tensor parallel size: {tp_size}!" ) linear_1d = GPT2FusedLinearConv1D_Col( in_features=in_features, out_features=out_features, bias=bias, device=device, process_group=process_group, weight=module.weight, bias_=module.bias, split_sizes=split_sizes, *args, **kwargs, ) return linear_1d def reset_parameters(self, weight_initializer, bias_initializer) -> None: with self.randomizer.fork_rng(enable_cpu=True): fan_in, fan_out = self.in_features, self.out_features weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) def forward(self, input_: Tensor) -> Tuple[Tensor, Tensor]: assert ( input_.shape[-1] == self.weight.shape[0] ), "Invalid shapes in Linear1D_Col forward: input={}, weight={}. Expected last dim of input {}.".format( input_.shape, self.weight.shape, self.weight.shape[-1] ) # Matrix multiply. bias = self.bias if not self.skip_bias_add else None if is_share_sp_tp(self.seq_parallel_mode): input_parallel = input_ output_parallel = matmul_gather_forward_reducescatter_backward( input_parallel, self.weight, bias, self.process_group, True, 1, ring=self.seq_parallel_mode == "ring", fp8_communication=self.fp8_communication, use_zbv=self.use_zbv, ) elif self.seq_parallel_mode is None or self.seq_parallel_mode == "ring_attn": # Set up backprop all-reduce. input_parallel = input_ output_parallel = matmul_with_async_comm( input_parallel, self.weight, bias, self.process_group, True, fp8_communication=self.fp8_communication, use_zbv=self.use_zbv, ) else: raise NotImplementedError(f"seq_parallel_mode={self.seq_parallel_mode} is not supported!") if self.gather_output: # All-gather across the partitions. output = gather_forward_split_backward_fused_qkv(output_parallel, self.split_sizes, self.process_group) else: output = output_parallel if self.skip_bias_add: return output, self.bias else: return output class GPT2FusedLinearConv1D_Row(ParallelModule): r"""Linear layer with row parallelism. This layer is used to fit `Conv1D` layer (Fused QKV) in gpt2 of huggingface. Args: in_features (int): size of each input sample. out_features (int): size of each output sample. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (`torch.dtype`): The dtype of parameters, defaults to None. parallel_input (bool): If set to ``True``, it's assumed that the input is split, defaults to False. skip_bias_add (bool): If set to ``True``, it will skip bias add for linear layer, seq_parallel_mode (str): If set to ``None``, it will not use sequence parallel, otherwise will use corresponding mode of sequence parallel, defaults to None. which is preserved for kernel fusion, defaults to False weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, dtype: torch.dtype = None, device: torch.device = None, process_group: ProcessGroup = None, seq_parallel_mode: str = None, parallel_input: bool = True, skip_bias_add: bool = False, weight: Optional[Parameter] = None, bias_: Optional[Parameter] = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), stream_chunk_num: int = 1, fp8_communication: bool = False, use_zbv: bool = False, ): super().__init__() self.stream_chunk_num = stream_chunk_num # Keep input parameters self.in_features = in_features self.out_features = out_features self.parallel_input = parallel_input self.skip_bias_add = skip_bias_add self.process_group = process_group self.seq_parallel_mode = seq_parallel_mode self.num_partitions = dist.get_world_size(self.process_group) self.fp8_communication = fp8_communication self.use_zbv = use_zbv if skip_bias_add and not bias: raise ValueError("cannot skip bias addition if bias is None") # offset the seed with randomizer index and rank seed = torch.random.initial_seed() self.randomizer = create_randomizer_with_offset(seed, process_group=self.process_group) # Divide the weight matrix along the last dimension. self.input_size_per_partition = divide(in_features, self.num_partitions) # sanity check if weight is not None: assert not bias or bias_ is not None, "bias_ must be provided if bias is True when weight is not None" else: assert bias_ is None, "bias_ must be None if weight is None" # Parameters. if weight is None: # Initialize weight. factory_kwargs = {"device": device, "dtype": dtype} self.weight = Parameter(torch.empty(self.in_features, self.out_features, **factory_kwargs)) else: weight.data = weight.data.to(device=device, dtype=dtype) self.weight = weight if not is_distributed_tensor(self.weight): sharded_weight = shard_rowwise(self.weight.data, self.process_group) sharded_tensor_to_existing_param(sharded_weight, self.weight) if self.stream_chunk_num > 1: # TODO() work for inference only self.chunk_weight() if bias: if bias_ is None: self.bias = Parameter(torch.empty(self.out_features, **factory_kwargs)) else: bias_.data = bias_.data.to(device=device, dtype=dtype) self.bias = bias_ else: self.bias = None if weight is None: # init weights self.reset_parameters(weight_initializer, bias_initializer) @staticmethod def from_native_module( module: nn.Linear, process_group: Union[ProcessGroup, List[ProcessGroup]], *args, **kwargs ) -> ParallelModule: r""" Convert a native PyTorch linear layer to a parallelized linear layer. """ LazyInitContext.materialize(module) # get the attributes in_features = module.weight.shape[0] out_features = module.weight.shape[1] bias = module.bias is not None device = module.weight.device # ensure only one process group is passed if isinstance(process_group, (list, tuple)): assert len(process_group) == 1, f"Expected only one process group, got {len(process_group)}." process_group = process_group[0] tp_size = dist.get_world_size(process_group) if in_features < tp_size: return module if in_features % tp_size != 0: raise ValueError( f"The size of in_features:{in_features} is not integer multiples of tensor parallel size: {tp_size}!" ) linear_1d = GPT2FusedLinearConv1D_Row( in_features=in_features, out_features=out_features, bias=bias, device=device, process_group=process_group, weight=module.weight, bias_=module.bias, *args, **kwargs, ) return linear_1d def chunk_weight(self): self.weight_list = torch.chunk(self.weight, self.stream_chunk_num, dim=0) def reset_parameters(self, weight_initializer, bias_initializer) -> None: with self.randomizer.fork_rng(enable_cpu=True): fan_in, fan_out = self.in_features, self.out_features weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) if self.process_group is None: src_rank = 0 else: src_rank = dist.distributed_c10d._get_global_rank(self.process_group, 0) origin_device = self.bias.device self.bias.data = self.bias.cuda() dist.broadcast(self.bias, src=src_rank, group=self.process_group) self.bias.data = self.bias.to(origin_device) def forward(self, input_: Tensor) -> Tensor: # Set up backprop all-reduce. if self.parallel_input: assert ( input_.shape[-1] == self.weight.shape[0] ), "Invalid shapes in Linear1D_Row forward: input={}, weight={}. Expected last dim of input {}.".format( input_.shape, self.weight.shape, self.weight.shape[0] ) input_ = input_ else: assert ( divide(input_.shape[-1], self.num_partitions) == self.weight.shape[0] ), "Invalid shapes in Linear1D_Row forward: input={}, weight={}. Expected last dim of input {}.".format( input_.shape, self.weight.shape, self.weight.shape[0] * self.num_partitions ) input_ = split_forward_gather_backward( input_, dim=-1, process_group=self.process_group, fp8_communication=self.fp8_communication ) if self.stream_chunk_num > 1: if self.training: raise RuntimeError("use stream_chunk_num=1 in Linear1D_Row for training!") with torch.no_grad(): output_parallel_list = [None for i in range(self.stream_chunk_num)] handle_list = [] for i in range(self.stream_chunk_num): output_parallel_list[i] = torch.matmul(input_, self.weight_list[i]) handle = torch.distributed.all_reduce( output_parallel_list[i], group=self.process_group, async_op=True ) handle_list.append(handle) # output_parallel_list[i] = reduce_input(output_parallel_list[i], ParallelMode.PARALLEL_1D) for handle in handle_list: handle.wait() output = torch.cat(output_parallel_list, dim=-1) else: if self.seq_parallel_mode is None or self.seq_parallel_mode == "ring_attn": output_parallel = torch.matmul(input_, self.weight) output = reduce_forward(output_parallel, self.process_group, fp8_communication=self.fp8_communication) elif is_share_sp_tp(self.seq_parallel_mode): output_parallel = torch.matmul(input_, self.weight) output = reducescatter_forward_gather_backward( output_parallel, self.process_group, 1, self.fp8_communication, ) else: raise NotImplementedError(f"seq_parallel_mode={self.seq_parallel_mode} is not supported!") if not self.skip_bias_add: if self.bias is not None: output = output + self.bias return output else: return output, self.bias class GPT2FusedLinearConv(ParallelModule): r"""Linear layer without parallelism. This layer is used to fit `Conv1D` layer (Fused QKV) in gpt2 of huggingface. Args: in_features (int): size of each input sample. out_features (int): size of each output sample. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (`torch.dtype`): The dtype of parameters, defaults to None. skip_bias_add (bool): If set to ``True``, it will skip bias add for linear layer, seq_parallel_mode (str): If set to ``None``, it will not use sequence parallel, otherwise will use corresponding mode of sequence parallel, defaults to None. which is preserved for kernel fusion, defaults to False weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, dtype: torch.dtype = None, device: torch.device = None, seq_parallel_mode: str = None, seq_parallel_dim: int = 1, skip_bias_add: bool = False, weight: Optional[Parameter] = None, bias_: Optional[Parameter] = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), use_zbv: bool = False, ): super().__init__() # Keep input parameters self.in_features = in_features self.out_features = out_features self.seq_parallel_mode = seq_parallel_mode self.seq_parallel_dim = seq_parallel_dim self.skip_bias_add = skip_bias_add self.device = device self.use_zbv = use_zbv if skip_bias_add and not bias: raise ValueError("cannot skip bias addition if bias is None") # offset the seed with randomizer index and rank seed = torch.random.initial_seed() self.randomizer = create_randomizer_with_offset(seed, None) # sanity check if weight is not None: assert not bias or bias_ is not None, "bias_ must be provided if bias is True when weight is not None" else: assert bias_ is None, "bias_ must be None if weight is None" # Parameters. if weight is None: # Initialize weight. factory_kwargs = {"device": device, "dtype": dtype} self.weight = Parameter(torch.empty(self.out_features, self.in_features, **factory_kwargs)) else: weight.data = weight.data.to(device=device, dtype=dtype) self.weight = weight if bias: if bias_ is None: self.bias = Parameter(torch.empty(self.out_features, **factory_kwargs)) else: bias_.data = bias_.data.to(device=device, dtype=dtype) self.bias = bias_ else: self.bias = None if weight is None: # init weights self.reset_parameters(weight_initializer, bias_initializer) @staticmethod def from_native_module( module: nn.Module, *args, **kwargs, ) -> ParallelModule: r""" Convert a huggingface layer `Conv1D` in gpt2 to a parallelized linear layer. Args: module (`nn.Linear`): The module to be converted. split_sizes (List[int]): The sizes of the split tensor. In GPT2, Q,K,V are fused in one weight. """ LazyInitContext.materialize(module) # get the attributes in_features = module.weight.shape[0] out_features = module.weight.shape[1] bias = module.bias is not None device = module.weight.device linear_1d = GPT2FusedLinearConv( in_features=in_features, out_features=out_features, bias=bias, device=device, weight=module.weight, bias_=module.bias, *args, **kwargs, ) return linear_1d def reset_parameters(self, weight_initializer, bias_initializer) -> None: with self.randomizer.fork_rng(enable_cpu=True): fan_in, fan_out = self.in_features, self.out_features weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) def forward(self, input_: Tensor) -> Tuple[Tensor, Tensor]: # Matrix multiply. bias = self.bias if not self.skip_bias_add else None if self.seq_parallel_mode is None or self.seq_parallel_mode == "ring_attn": # Set up backprop all-reduce. input_parallel = input_ output_parallel = matmul_with_grad_comm( input_parallel, self.weight, bias, False, self.use_zbv, ) else: raise NotImplementedError(f"seq_parallel_mode={self.seq_parallel_mode} is not supported!") output = output_parallel if self.skip_bias_add: return output, self.bias else: return output # ==================================== # For Fused torch.nn.Linear # ==================================== class FusedLinear1D_Col(ParallelModule): r"""Fused Linear layer with column parallelism. The linear layer is defined as :math:`Y = XA + b`. A is parallelized along its second dimension as :math:`A = [A_1, ..., A_p]`. This layer is used to fit `torch.nn.Linear` layer (Fused QKV) in normal torch layer of huggingface, like SAM. Args: in_features (int): size of each input sample. out_features (int): size of each output sample. split_sizes (List[int]): The sizes of the split tensor. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (`torch.dtype`): The dtype of parameters, defaults to None. device (`torch.device`): The device of parameters, defaults to None.
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/layer/utils.py
colossalai/shardformer/layer/utils.py
from contextlib import contextmanager from typing import List, Optional, Union import torch import torch.distributed as dist from torch import nn from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from torch.distributed import ProcessGroup, get_world_size from colossalai.accelerator import get_accelerator try: import fused_weight_gradient_mlp_cuda _grad_accum_fusion_available = True except ImportError: _grad_accum_fusion_available = False # execute_w_pass_grad_accum & execute_conv1d_w_pass for GPT2FusedLinearConv1D def execute_conv1d_w_pass_grad_accum(_input_, _grad_output_, _weight_main_grad_): if _input_.dtype == torch.float32: wgrad_gemm_accum_func = fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp32 elif _input_.dtype in (torch.float16, torch.bfloat16): wgrad_gemm_accum_func = fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp16 else: raise RuntimeError("Unsupported gradient type for gradient accumulation fusion") wgrad_gemm_accum_func(_grad_output_, _input_, _weight_main_grad_) def execute_conv1d_w_pass(_input_, _grad_output_, _weight_main_grad_=None, wgrad_gemm_func=None): return wgrad_gemm_func(_input_.t(), _grad_output_) # execute_w_pass_grad_accum & execute_w_pass for Linear (except GPT2FusedLinearConv1D) def execute_w_pass_grad_accum(_input_, _grad_output_, _weight_main_grad_): if _input_.dtype == torch.float32: wgrad_gemm_accum_func = fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp32 elif _input_.dtype in (torch.float16, torch.bfloat16): wgrad_gemm_accum_func = fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp16 else: raise RuntimeError("Unsupported gradient type for gradient accumulation fusion") wgrad_gemm_accum_func(_input_, _grad_output_, _weight_main_grad_) def execute_w_pass(_input_, _grad_output_, _weight_main_grad_=None, wgrad_gemm_func=None): return wgrad_gemm_func(_grad_output_.t(), _input_) class SeqParallelUtils: @staticmethod def marked_as_sp_partial_derived_param(param): """ Mark a parameter as partially derived in sequence parallelism. Args: param: The parameter to mark as partially derived. """ setattr(param, "partial_derived", True) @staticmethod def is_sp_partial_derived_param(param): """ Check if a parameter is marked as partially derived in sequence parallelism. Args: param: The parameter to check. Returns: bool: True if the parameter is marked as partially derived, False otherwise. """ return getattr(param, "partial_derived", False) @staticmethod def allreduce_partial_data_grad( process_group: ProcessGroup, model: nn.Module = None, grads: List[torch.Tensor] = None, ): """ Allreduce partial derived gradients across the specified process group. This function performs gradient synchronization for parameters that are marked as partially derived in sequence parallelism. Args: process_group (ProcessGroup): The process group for gradient synchronization. model (nn.Module): The model from which gradients will be synchronized. grads (List[torch.Tensor]): The list of gradients to be synchronized. only_sp_partial (bool): Whether handle all the parameters or only parameters marked as partial derived. Raises: AssertionError: If both `model` and `grads` are provided or neither is provided. """ # Ensure that exactly one of `model` and `grads` is provided for gradient synchronization. assert (model is not None) ^ (grads is not None), "Exactly one of model and grads must be not None." # Get the size of the process group, which determines whether synchronization is needed. group_size = get_world_size(process_group) if process_group is not None else 1 if group_size == 1: # If the process group size is 1, no synchronization is required. return if model is not None: # If `model` is provided, extract partial derived gradients from the model's parameters. grads = [] for p in model.parameters(): if p.grad is not None: if SeqParallelUtils.is_sp_partial_derived_param(p): grads.append(p.grad.data) # Flatten and reduce the gradients using the specified process group. if len(grads) == 0: return coalesced = _flatten_dense_tensors(grads) dist.all_reduce(coalesced, op=dist.ReduceOp.SUM, group=process_group) # Unflatten the synchronized gradients and update the model's gradients. for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)): buf.copy_(synced) else: # If `grads` are provided explicitly, synchronize those gradients directly. coalesced = _flatten_dense_tensors(grads) dist.all_reduce(coalesced, op=dist.ReduceOp.SUM, group=process_group) for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)): buf.copy_(synced) class Randomizer: """ Randomizer enables the program to be executed under a different seed within the context. Example: ```python randomizer = Randomizer(seed=1024) with randomizer.fork(): # do something here with seed 1024 do_something() ``` Args: seed (int): The random seed to set. enable_cpu (bool): fork the CPU RNG state as well. with_index (bool): whether to use the index of the randomizer. """ _INDEX = 0 def __init__(self, seed: int): self.seed = seed # Handle device rng state # 1. get the current rng state # 2. set the seed and store the rng state # 3. recover the original rng state device_original_rng_state = get_accelerator().get_rng_state() get_accelerator().manual_seed(seed) self.device_rng_state = get_accelerator().get_rng_state() get_accelerator().set_rng_state(device_original_rng_state) # to the same for cpu rng state cpu_original_rng_state = torch.get_rng_state() torch.manual_seed(seed) self.cpu_rng_state = torch.get_rng_state() torch.set_rng_state(cpu_original_rng_state) def _set_device_rng_state(self, rng_state): get_accelerator().set_rng_state(rng_state) def _get_device_rng_state(self): current_state = get_accelerator().get_rng_state() return current_state def _set_cpu_rng_state(self, rng_state): torch.set_rng_state(rng_state) def _get_cpu_rng_state(self): current_state = torch.get_rng_state() return current_state @contextmanager def fork_rng(self, enable_cpu: bool = False): """ This is a context manager to change the dropout state and recover the original state. Usage: :: >>> with _seed_manager.dropout_mode(): >>> input = super().forward(input) """ try: current_device_rng_state = self._get_device_rng_state() self._set_device_rng_state(self.device_rng_state) if enable_cpu: current_cpu_rng_state = self._get_cpu_rng_state() self._set_cpu_rng_state(self.cpu_rng_state) yield finally: self.device_rng_state = self._get_device_rng_state() self._set_device_rng_state(current_device_rng_state) if enable_cpu: self.cpu_rng_state = self._get_cpu_rng_state() self._set_cpu_rng_state(current_cpu_rng_state) @staticmethod def index(): """ Return the index of the randomizer. The index is useful when the user wants to introduce some randomness in the program. Note: The index will increment by one each time this method is called. Example: ```python # assume we need a randomizer to init the weight of different layers # we can use the index of the randomizer to do so that # each layer has its own randomizer with a different seed base_seed = torch.random.initial_seed() seed = base_seed + Randomizer.index() randomizer = Randomizer(seed) with randomizer.fork(): init_weights() ``` """ idx = Randomizer._INDEX return idx @staticmethod def increment_index(): """ Increment the index of the randomizer by one. """ Randomizer._INDEX += 1 @staticmethod def reset_index(): """ Reset the index to zero. """ Randomizer._INDEX = 0 @staticmethod def is_randomizer_index_synchronized(process_group: ProcessGroup = None): """ Return whether the randomizer index is synchronized across processes. """ index = Randomizer.index() if dist.is_initialized(): # convert the index to tensor index_tensor = torch.tensor(index, dtype=torch.int32, device=get_accelerator().get_current_device()) # all gather the index gathered_index = [torch.zeros_like(index_tensor) for _ in range(dist.get_world_size(process_group))] dist.all_gather(gathered_index, index_tensor, process_group) # make sure all the gathered index are the same for i in range(1, dist.get_world_size(process_group)): if gathered_index[i] != gathered_index[0]: return False return True @staticmethod def synchronize_index(process_group: ProcessGroup = None): """ All gather the index and pick the largest value. """ index = Randomizer.index() if dist.is_initialized(): # convert the index to tensor index_tensor = torch.tensor(index, dtype=torch.int32, device=get_accelerator().get_current_device()) # all gather the index gathered_index = [torch.zeros_like(index_tensor) for _ in range(dist.get_world_size(process_group))] dist.all_gather(gathered_index, index_tensor, process_group) # pick the largest index for i in range(1, dist.get_world_size(process_group)): if gathered_index[i] > index_tensor: index_tensor = gathered_index[i] # set the index Randomizer._INDEX = index_tensor.item() def create_randomizer_with_offset( seed: int, process_group: ProcessGroup = None, offset_by_rank: bool = True, offset_by_index: bool = True ): """ Create a randomizer with an offset. The offset is equal to the rank of the process and the index of the randomizer. Args: seed (int): The base random seed to set. process_group (ProcessGroup): the process group to get the rank from. offset_by_rank (bool): whether to offset by the rank of the process, i.e., the rank of the process will be added to the seed. Default: True. offset_by_index (bool): whether to offset by the index of the randomizer, i.e., the index of the randomizer will be added to the seed. Default: True. Returns: Randomizer: the randomizer with offset. """ base_seed = seed if offset_by_rank and dist.is_initialized(): rank = dist.get_rank(process_group) base_seed += rank if offset_by_index: # check if the randomizer index is synchronized is_synchronized = Randomizer.is_randomizer_index_synchronized(process_group) assert is_synchronized, ( "We detect that the randomizer index is not synchronized across processes." "This is not allowed when we want to create a randomizer with offset by index." "Please call Randomizer.synchronize_index() first." ) base_seed += Randomizer.index() Randomizer.increment_index() return Randomizer(seed=base_seed) def split_batch_zigzag( batch: Union[torch.Tensor, List[torch.Tensor]], sp_group: ProcessGroup, seq_dim: int = 1, is_label: bool = False ) -> Union[torch.Tensor, List[torch.Tensor]]: """ Split the input sequence batch . Naively spliting the attention mask in the causal setting will result in the preceding ranks having much less workload. We split after "folding" the 2D attention mask in half (https://github.com/zhuzilin/ring-flash-attention/issues/2). For example, for sp_size = 4 and seq_len = 8, we get | s0, s7 | s1, s6 | s2, s5 | s3, s4 |. Args: batch (List[torch.Tensor] or Tensor): The input tensor(s) to split. sp_group (ProcessGroup): The process group for sequence parallelism. seq_dim (int): The sequence dimension to split. is_label (bool): If True, mask and shift the tensor for next token prediction. """ sp_size = dist.get_world_size(sp_group) sp_rank = dist.get_rank(sp_group) if sp_size == 1: return batch if isinstance(batch, torch.Tensor): batch = [batch] seq_dim = seq_dim if seq_dim != -1 else batch[0].dim() - 1 if sp_size > 1: for idx, tensor in enumerate(batch): assert ( tensor.shape[seq_dim] // (sp_size * 2) > 1 and tensor.shape[seq_dim] % (sp_size * 2) == 0 ), f"Bro, the seq length {tensor.shape[seq_dim]} for tensor {idx} can't be split by {sp_size * 2}!" if is_label: assert tensor.dim() == 2, "Label shape should be (B, Seqlen)" tensor = torch.cat([tensor[:, 1:], torch.full_like(tensor[:, :1], -100)], dim=1) tensor = tensor.view( *tensor.shape[:seq_dim], 2 * sp_size, tensor.shape[seq_dim] // (2 * sp_size), *tensor.shape[seq_dim + 1 :], ) indices = torch.tensor([sp_rank, 2 * sp_size - 1 - sp_rank], device=tensor.device) tensor = tensor.index_select(seq_dim, indices).contiguous() # (B, 2, Sq // (2 * sp_size), ...) -> (B, Sq // sp_size, ...) batch[idx] = tensor.view(*tensor.shape[:seq_dim], -1, *tensor.shape[seq_dim + 2 :]) if len(batch) == 1: return batch[0] return batch def split_varlen_zigzag( batch: Union[List[torch.Tensor], torch.Tensor], cu_seqlens: torch.Tensor, sp_group: ProcessGroup, max_seqlen: int = 0, is_batched_seq: bool = False, is_label: bool = False, ) -> Union[List[torch.Tensor], torch.Tensor]: """Split a packed seq/batch of padded sequences in a Zigzag fashion. Different from split_batch_zigzag, inputs here have variable sequence lengths. Args: batch (List[torch.Tensor]): Packed sequences of shape (T, ...), or (B, Sq, ...) if is_batched_seq, where T is the total number of tokens. cu_seqlens (torch.Tensor): Cumulative sequence lengths of shape (B + 1) before splitting. sp_group (ProcessGroup): The process group for sequence parallelism. max_seqlen (int): The maximum sequence length in the batch before splitting. is_batched_seq (bool): If True, then the input is a batch of sequences padded to the same len. is_label (bool): If True, mask out the first token in each sequence (<Start of Sentence>). Returns: batch (List[torch.Tensor]): Packed sequences of shape (T, ..) or (B, max_seqlen // sp_size, ...) if is_batched_seq """ sp_size = dist.get_world_size(sp_group) sp_rank = dist.get_rank(sp_group) if sp_size == 1: return batch if is_batched_seq: assert max_seqlen > 0, "max_seqlen must be provided for 2D input" if isinstance(batch, torch.Tensor): batch = [batch] # seq: (B, Sq, h, n) # seq = seq[:, :rank * (seqlen // sp_size), ...] for i, packed_seq in enumerate(batch): device = packed_seq.device dtype = packed_seq.dtype if is_batched_seq: assert max_seqlen % (sp_size * 2) == 0 # Recreate a padded tensor with the new max seqlen shape = (packed_seq.shape[0], max_seqlen // sp_size, *packed_seq.shape[2:]) local_seq = torch.zeros(shape, dtype=dtype, device=device) else: total_seqlen = cu_seqlens[-1] assert ( total_seqlen % (2 * sp_size) == 0 ), f"total_seqlen {total_seqlen} must be divisible by 2 * sp_size = {2 * sp_size}" local_seq = [] for j in range(len(cu_seqlens) - 1): start, end = cu_seqlens[j], cu_seqlens[j + 1] seqlen = end - start assert ( seqlen % (2 * sp_size) == 0 ), f"batch {i} seq {j}'s length ({seqlen}) must be divisible by 2 * sp_size = {2 * sp_size} for splitting" if is_batched_seq: seq = packed_seq[j][:seqlen] if is_label: # Shift one position to the right for next token prediction seq = torch.cat([seq[1:], torch.tensor([-100], dtype=dtype, device=device)]) seq = seq.chunk(2 * sp_size, dim=0) half = seqlen // sp_size // 2 local_seq[j][:half] = seq[sp_rank] local_seq[j][half : seqlen // sp_size] = seq[2 * sp_size - 1 - sp_rank] else: seq = packed_seq[start:end] if is_label: seq = torch.cat(seq[1:], torch.tensor([-100], dtype=dtype, device=device)) seq = seq.chunk(sp_size * 2) local_seq.extend([seq[sp_rank], seq[2 * sp_size - 1 - sp_rank]]) if is_batched_seq: batch[i] = local_seq.contiguous() else: batch[i] = torch.cat(local_seq, dim=0) if len(batch) == 1: batch = batch[0] return batch def is_share_sp_tp(sp_mode: str): """sp_mode "ring" and "split_gather" use the TP group as SP group to split both the vocab and sequence, so we must gather the sequence to correctly get logits at each positions. """ return sp_mode in ["ring", "split_gather"] class RingComm: def __init__(self, process_group: dist.ProcessGroup): self._process_group = process_group self._ops = [] self.rank = dist.get_rank(self._process_group) self.world_size = dist.get_world_size(self._process_group) self._reqs = [] self.send_rank = (self.rank + 1) % self.world_size self.recv_rank = (self.rank - 1) % self.world_size self.send_rank = dist.get_global_rank(self._process_group, self.send_rank) self.recv_rank = dist.get_global_rank(self._process_group, self.recv_rank) def send_recv( self, send_tensor: torch.Tensor, recv_tensor: Optional[torch.Tensor] = None, commit: bool = True, ) -> torch.Tensor: if recv_tensor is None: res = torch.empty_like(send_tensor) else: res = recv_tensor # looks like batch_isend_irecv doesn't deadlock even # when we don't swap send recv ops based on rank send_op = dist.P2POp(dist.isend, send_tensor, self.send_rank, group=self._process_group) recv_op = dist.P2POp(dist.irecv, res, self.recv_rank, group=self._process_group) self._ops.extend([send_op, recv_op]) if commit: self._reqs = dist.batch_isend_irecv(self._ops) return res def commit(self): assert len(self._ops) > 0, "No ops to commit" self._reqs = dist.batch_isend_irecv(self._ops) def wait(self): assert len(self._reqs) > 0, "No requests to wait for" for req in self._reqs: req.wait() self._reqs = [] self._ops = [] @torch.jit.script def get_half_index(cu_seqlens, *, front: bool): index = torch.zeros(cu_seqlens[-1], dtype=torch.bool, device=cu_seqlens.device) for i in range(len(cu_seqlens) - 1): start, end = cu_seqlens[i], cu_seqlens[i + 1] if front: end = (start + end) // 2 else: start = (start + end) // 2 index[start:end] = True return index
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/layer/parallel_module.py
colossalai/shardformer/layer/parallel_module.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import itertools from abc import ABC, abstractmethod from typing import List, Optional, Union import torch import torch.nn as nn from torch.distributed import ProcessGroup from torch.nn.modules.module import _EXTRA_STATE_KEY_SUFFIX, Module from colossalai.checkpoint_io.utils import gather_distributed_param from colossalai.tensor.d_tensor import ( distribute_tensor, distribute_tensor_with_customization, get_device_mesh, get_sharding_spec, is_customized_distributed_tensor, is_distributed_tensor, sharded_tensor_to_param, ) from colossalai.tensor.padded_tensor import is_padded_tensor, to_padded_tensor, to_unpadded_tensor __all__ = ["ParallelModule"] class ParallelModule(nn.Module, ABC): def __init__(self, **kwargs): super().__init__() @abstractmethod def from_native_module( module: nn.Module, process_group: Union[ProcessGroup, List[ProcessGroup]] = None ) -> "ParallelModule": """ Convert a native PyTorch module to a parallelized module. Args: module (nn.Module): the module to be converted. process_group (ProcessGroup or list[ProcessGroup]): the process group(s) to be used for communication. If this is a list, the process group at the ith index of the list will correspond to the process group in the ith axis of the device mesh. Defaults to None, which means the global process group. """ def _save_to_state_dict(self, destination, prefix, keep_vars): r"""Saves module state to `destination` dictionary, containing a state of the module, but not its descendants. This is called on every submodule in :meth:`~torch.nn.Module.state_dict`. In rare cases, subclasses can achieve class-specific behavior by overriding this method with custom logic. Args: destination (dict): a dict where state will be stored prefix (str): the prefix for parameters and buffers used in this module """ for name, param in self._parameters.items(): if param is not None: destination[prefix + name] = gather_distributed_param(param, keep_vars=keep_vars).data for name, buf in self._buffers.items(): if buf is not None and name not in self._non_persistent_buffers_set: destination[prefix + name] = buf if keep_vars else buf.detach() extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX if getattr(self.__class__, "get_extra_state", Module.get_extra_state) is not Module.get_extra_state: destination[extra_state_key] = self.get_extra_state() def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): r"""Copies parameters and buffers from :attr:`state_dict` into only this module, but not its descendants. This is called on every submodule in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this module in input :attr:`state_dict` is provided as :attr:`local_metadata`. For state dicts without metadata, :attr:`local_metadata` is empty. Subclasses can achieve class-specific backward compatible loading using the version number at `local_metadata.get("version", None)`. .. note:: :attr:`state_dict` is not the same object as the input :attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So it can be modified. Args: state_dict (dict): a dict containing parameters and persistent buffers. prefix (str): the prefix for parameters and buffers used in this module local_metadata (dict): a dict containing the metadata for this module. See strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` with :attr:`prefix` match the names of parameters and buffers in this module missing_keys (list of str): if ``strict=True``, add missing keys to this list unexpected_keys (list of str): if ``strict=True``, add unexpected keys to this list error_msgs (list of str): error messages should be added to this list, and will be reported together in :meth:`~torch.nn.Module.load_state_dict` """ for hook in self._load_state_dict_pre_hooks.values(): hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) persistent_buffers = {k: v for k, v in self._buffers.items() if k not in self._non_persistent_buffers_set} local_name_params = itertools.chain(self._parameters.items(), persistent_buffers.items()) local_state = {k: v for k, v in local_name_params if v is not None} for name, param in local_state.items(): key = prefix + name if key in state_dict: input_param = state_dict[key] if not torch.overrides.is_tensor_like(input_param): error_msgs.append( 'While copying the parameter named "{}", ' "expected torch.Tensor or Tensor-like object from checkpoint but " "received {}".format(key, type(input_param)) ) continue if is_distributed_tensor(param): # shard the input param device_mesh = get_device_mesh(param) sharding_spec = get_sharding_spec(param) sharded_tensor = distribute_tensor(input_param, device_mesh, sharding_spec) input_param = sharded_tensor_to_param(sharded_tensor) elif is_customized_distributed_tensor(param): input_param = distribute_tensor_with_customization(input_param, param.shard_fn, param.gather_fn) # This is used to avoid copying uninitialized parameters into # non-lazy modules, since they dont have the hook to do the checks # in such case, it will error when accessing the .shape attribute. is_param_lazy = torch.nn.parameter.is_lazy(param) # Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+ if not is_param_lazy and len(param.shape) == 0 and len(input_param.shape) == 1: input_param = input_param[0] if not is_param_lazy and input_param.shape != param.shape: # local shape should match the one in checkpoint error_msgs.append( "size mismatch for {}: copying a param with shape {} from checkpoint, " "the shape in current model is {}.".format(key, input_param.shape, param.shape) ) continue try: with torch.no_grad(): param.copy_(input_param) except Exception as ex: error_msgs.append( 'While copying the parameter named "{}", ' "whose dimensions in the model are {} and " "whose dimensions in the checkpoint are {}, " "an exception occurred : {}.".format(key, param.size(), input_param.size(), ex.args) ) elif strict: missing_keys.append(key) extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX if getattr(self.__class__, "set_extra_state", Module.set_extra_state) is not Module.set_extra_state: if extra_state_key in state_dict: self.set_extra_state(state_dict[extra_state_key]) elif strict: missing_keys.append(extra_state_key) elif strict and (extra_state_key in state_dict): unexpected_keys.append(extra_state_key) if strict: for key in state_dict.keys(): if key.startswith(prefix) and key != extra_state_key: input_name = key[len(prefix) :] input_name = input_name.split(".", 1)[0] # get the name of param/buffer/child if input_name not in self._modules and input_name not in local_state: unexpected_keys.append(key) class PaddingParallelModule(ParallelModule): def __init__( self, new_num_embeddings: int, old_num_embeddings: int, weight: Optional[nn.Parameter], bias_: Optional[nn.Parameter] = None, **kwargs, ) -> None: super().__init__(**kwargs) self.new_num_embeddings = new_num_embeddings self.old_num_embeddings = old_num_embeddings self.weight = weight self.bias = bias_ if not (is_distributed_tensor(self.weight) or self.weight.shape[0] == self.new_num_embeddings): self.resize_embedding_weight() if self.bias is not None and not ( is_distributed_tensor(self.bias) or self.bias.shape[0] == self.new_num_embeddings ): self.resize_embedding_bias() @abstractmethod def from_native_module( module: nn.Module, process_group: Union[ProcessGroup, List[ProcessGroup]] = None ) -> "PaddingParallelModule": """ Convert a native PyTorch module to a parallelized module. Args: module (nn.Module): the module to be converted. process_group (ProcessGroup or list[ProcessGroup]): the process group(s) to be used for communication. If this is a list, the process group at the ith index of the list will correspond to the process group in the ith axis of the device mesh. Defaults to None, which means the global process group. """ raise NotImplementedError def _save_to_state_dict(self, destination, prefix, keep_vars): r"""Saves module state to `destination` dictionary, containing a state of the module, but not its descendants. This is called on every submodule in :meth:`~torch.nn.Module.state_dict`. In rare cases, subclasses can achieve class-specific behavior by overriding this method with custom logic. Args: destination (dict): a dict where state will be stored prefix (str): the prefix for parameters and buffers used in this module """ for name, param in self._parameters.items(): if param is not None: param = gather_distributed_param(param, keep_vars=keep_vars) if is_padded_tensor(param): param = to_unpadded_tensor(param) destination[prefix + name] = param.data for name, buf in self._buffers.items(): if buf is not None and name not in self._non_persistent_buffers_set: destination[prefix + name] = buf if keep_vars else buf.detach() extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX if getattr(self.__class__, "get_extra_state", Module.get_extra_state) is not Module.get_extra_state: destination[extra_state_key] = self.get_extra_state() def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): r"""Copies parameters and buffers from :attr:`state_dict` into only this module, but not its descendants. This is called on every submodule in :meth:`~torch.nn.Module.load_state_dict`. Metadata saved for this module in input :attr:`state_dict` is provided as :attr:`local_metadata`. For state dicts without metadata, :attr:`local_metadata` is empty. Subclasses can achieve class-specific backward compatible loading using the version number at `local_metadata.get("version", None)`. .. note:: :attr:`state_dict` is not the same object as the input :attr:`state_dict` to :meth:`~torch.nn.Module.load_state_dict`. So it can be modified. Args: state_dict (dict): a dict containing parameters and persistent buffers. prefix (str): the prefix for parameters and buffers used in this module local_metadata (dict): a dict containing the metadata for this module. See strict (bool): whether to strictly enforce that the keys in :attr:`state_dict` with :attr:`prefix` match the names of parameters and buffers in this module missing_keys (list of str): if ``strict=True``, add missing keys to this list unexpected_keys (list of str): if ``strict=True``, add unexpected keys to this list error_msgs (list of str): error messages should be added to this list, and will be reported together in :meth:`~torch.nn.Module.load_state_dict` """ for hook in self._load_state_dict_pre_hooks.values(): hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs) persistent_buffers = {k: v for k, v in self._buffers.items() if k not in self._non_persistent_buffers_set} local_name_params = itertools.chain(self._parameters.items(), persistent_buffers.items()) local_state = {k: v for k, v in local_name_params if v is not None} for name, param in local_state.items(): key = prefix + name if key in state_dict: input_param = state_dict[key] if not torch.overrides.is_tensor_like(input_param): error_msgs.append( 'While copying the parameter named "{}", ' "expected torch.Tensor or Tensor-like object from checkpoint but " "received {}".format(key, type(input_param)) ) continue if is_padded_tensor(param): input_param = to_padded_tensor(input_param, param._current_length, param._padding_dim) if is_distributed_tensor(param): # shard the input param device_mesh = get_device_mesh(param) sharding_spec = get_sharding_spec(param) sharded_tensor = distribute_tensor(input_param, device_mesh, sharding_spec) input_param = sharded_tensor_to_param(sharded_tensor) elif is_customized_distributed_tensor(param): input_param = distribute_tensor_with_customization(input_param, param.shard_fn, param.gather_fn) # This is used to avoid copying uninitialized parameters into # non-lazy modules, since they dont have the hook to do the checks # in such case, it will error when accessing the .shape attribute. is_param_lazy = torch.nn.parameter.is_lazy(param) # Backward compatibility: loading 1-dim tensor from 0.3.* to version 0.4+ if not is_param_lazy and len(param.shape) == 0 and len(input_param.shape) == 1: input_param = input_param[0] if not is_param_lazy and input_param.shape != param.shape: # local shape should match the one in checkpoint error_msgs.append( "size mismatch for {}: copying a param with shape {} from checkpoint, " "the shape in current model is {}.".format(key, input_param.shape, param.shape) ) continue try: with torch.no_grad(): param.copy_(input_param) except Exception as ex: error_msgs.append( 'While copying the parameter named "{}", ' "whose dimensions in the model are {} and " "whose dimensions in the checkpoint are {}, " "an exception occurred : {}.".format(key, param.size(), input_param.size(), ex.args) ) elif strict: missing_keys.append(key) extra_state_key = prefix + _EXTRA_STATE_KEY_SUFFIX if getattr(self.__class__, "set_extra_state", Module.set_extra_state) is not Module.set_extra_state: if extra_state_key in state_dict: self.set_extra_state(state_dict[extra_state_key]) elif strict: missing_keys.append(extra_state_key) elif strict and (extra_state_key in state_dict): unexpected_keys.append(extra_state_key) if strict: for key in state_dict.keys(): if key.startswith(prefix) and key != extra_state_key: input_name = key[len(prefix) :] input_name = input_name.split(".", 1)[0] # get the name of param/buffer/child if input_name not in self._modules and input_name not in local_state: unexpected_keys.append(key) def resize_embedding_weight(self): self.weight = to_padded_tensor(self.weight, self.new_num_embeddings, 0) def resize_embedding_bias(self): self.bias = to_padded_tensor(self.bias, self.new_num_embeddings, 0)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/layer/loss.py
colossalai/shardformer/layer/loss.py
import torch import torch.distributed as dist from torch.autograd import Function from torch.distributed import ProcessGroup from torch.nn import CrossEntropyLoss from torch.nn.functional import log_softmax from colossalai.shardformer.layer._operation import reduce_forward from colossalai.shardformer.shard import ShardConfig from .utils import is_share_sp_tp __all__ = [ "DistCrossEntropy", "cross_entropy_1d", "dist_cross_entropy", "DistLogProb", "dist_log_prob_1d", "dist_log_prob", ] _IGNORE_IDX = -100 class DistCrossEntropy(Function): r""" Overwrite the forward and backward function to calculate the cross entropy loss before gather Args: Function (:class:`torch.autograd.Function`): default """ @staticmethod def forward( ctx, vocab_logits: torch.Tensor, target: torch.Tensor, ignore_index: int, process_group: ProcessGroup, vocab_size: int, dtype=torch.float32, mode="mean", ): r""" Calculate the cross entropy loss before gather, the origin loss function is as follows: loss = -log(exp(x[class])/sum(exp(x[i])) and can be rewriten as: loss = log(sum(exp(x[i])) - x[class] To avoid the `nan` of log(sum(exp(x[i]))), we minus the max of x[i] Args: vocab_logits (:class:`torch.Tensor`): The logits of the vocabulary, shape is [batch_size, seq_len, vocab_size] target (:class:`torch.Tensor`): The labels of the vocabulary, shape is [batch_size, seq_len] Returns: :class:`torch.Tensor`: The cross entropy loss """ assert mode in ["mean", "sum"] # get the max logits_max = torch.max(vocab_logits, dim=-1)[0] handle = dist.all_reduce(logits_max, op=dist.ReduceOp.MAX, group=process_group, async_op=True) # mask the target in the local device rank = dist.get_rank(group=process_group) world_size = dist.get_world_size(group=process_group) if vocab_size == None: partition_vocab_size = vocab_logits.size()[-1] global_vocab_size = partition_vocab_size * world_size else: global_vocab_size = vocab_size partition_vocab_size = global_vocab_size // world_size # [down, up) => false, other device and -100 => true delta = (global_vocab_size + world_size - 1) // world_size down_threshold = rank * delta up_threshold = down_threshold + delta if up_threshold > global_vocab_size: up_threshold = global_vocab_size mask = (target < down_threshold) | (target >= up_threshold) masked_target = target.clone() - down_threshold masked_target[mask] = 0 masked_target_1d = masked_target.view(-1).contiguous() # minus the max to avoid the result of sum of exp is too large and the log is nan handle.wait() vocab_logits = vocab_logits - logits_max.unsqueeze(dim=-1) # reshape the logits and target # reshape the vocab_logits to [bath_size * seq_len, vocab_size] # reshape the labels to [bath_size * seq_len] self_vocab_size = vocab_logits.size()[-1] logits_2d = vocab_logits.view(-1, self_vocab_size) # extract the x[class] and set the x[other device] to zero idx = torch.arange(start=0, end=logits_2d.shape[0], device=logits_2d.device) pred_logits_1d = logits_2d[idx, masked_target_1d].contiguous() pred_logits = pred_logits_1d.view_as(target) pred_logits[mask] = 0.0 # all-reduce to get full x[i, y] handle = dist.all_reduce(pred_logits, op=dist.ReduceOp.SUM, group=process_group, async_op=True) exp_logits = vocab_logits torch.exp(vocab_logits, out=exp_logits) sum_exp_logits = torch.sum(exp_logits, dim=-1, dtype=torch.float32) dist.all_reduce(sum_exp_logits, op=dist.ReduceOp.SUM, group=process_group) # calculate the loss # loss = log(sum(exp(x[i]))) - x[class] handle.wait() loss = torch.where(target == ignore_index, 0.0, torch.log(sum_exp_logits) - pred_logits) if mode == "mean": num_non_zero = torch.sum(loss != 0.0) ctx.inv_num_non_zero = 1.0 / num_non_zero loss = torch.sum(loss).div_(num_non_zero) else: loss = torch.sum(loss) # calculate the softmax exp_logits = exp_logits.div(sum_exp_logits.unsqueeze(dim=-1)).to(dtype) exp_logits[target == ignore_index] = 0.0 ctx.save_for_backward(exp_logits, mask, masked_target_1d) ctx.dtype = dtype ctx.mode = mode return loss @staticmethod def backward(ctx, grad_output): # retrieve the saved tensors if ctx.mode == "mean": grad_output = grad_output * ctx.inv_num_non_zero exp_logits, mask, masked_target_1d = ctx.saved_tensors # use exp logits as the input grad grad_logits = exp_logits partion_vocab_size = grad_logits.shape[-1] grad_logits_2d = grad_logits.view(-1, partion_vocab_size) update = 1.0 - mask.view(-1).float().to(ctx.dtype) grad_logits_2d[torch.arange(0, grad_logits_2d.shape[0]), masked_target_1d] -= update grad_logits.mul_(grad_output.unsqueeze(dim=-1)) return grad_logits, None, None, None, None, None, None class DistLogProb(Function): r""" Overwrite the forward and backward function to calculate the log prob before gather Args: Function (:class:`torch.autograd.Function`): default """ @staticmethod def forward( ctx, vocab_logits: torch.Tensor, target: torch.Tensor, process_group: ProcessGroup, vocab_size: int, dtype=torch.float32, ): ################## # Step1:Find the global maximum value of logits ################## logits_max = torch.max(vocab_logits, dim=-1)[0] handle = dist.all_reduce(logits_max, op=dist.ReduceOp.MAX, group=process_group, async_op=True) ################## # Step2:Find the local mask. local mask will be use to select log_probs value in Step 4. # For accleration, we overlap Step 2 and Step 3 ################## rank = dist.get_rank(group=process_group) world_size = dist.get_world_size(group=process_group) if vocab_size is None: partition_vocab_size = vocab_logits.size()[-1] global_vocab_size = partition_vocab_size * world_size else: global_vocab_size = vocab_size partition_vocab_size = global_vocab_size // world_size # down and up threshold for local logits delta = (global_vocab_size + world_size - 1) // world_size down_threshold = rank * delta up_threshold = down_threshold + delta if up_threshold > global_vocab_size: up_threshold = global_vocab_size # mask mask = (target < down_threshold) | (target >= up_threshold) masked_target = target.clone() - down_threshold masked_target[mask] = 0 masked_target_1d = masked_target.view(-1).contiguous() handle.wait() ################## # Step3:Calculate global summation exp logits ################## vocab_logits = vocab_logits - logits_max.unsqueeze(dim=-1) exp_logits = torch.exp(vocab_logits) sum_exp_logits = torch.sum(exp_logits, dim=-1, dtype=torch.float32) # local summation exp logits dist.all_reduce(sum_exp_logits, op=dist.ReduceOp.SUM, group=process_group) ################## # Step4:Calculate local prob. We first cal log_softmax, then select log probs via local mask ################## log_probs = vocab_logits - torch.log(sum_exp_logits.unsqueeze(dim=-1)) # cal log_softmax log_probs = log_probs.gather(dim=-1, index=masked_target.unsqueeze(-1)) log_probs[mask.unsqueeze(-1)] = 0 # set masked val to zero dist.all_reduce(log_probs, op=dist.ReduceOp.SUM, group=process_group) ctx.save_for_backward(exp_logits, mask, masked_target_1d, sum_exp_logits) ctx.dtype = dtype return log_probs @staticmethod def backward(ctx, grad_output): exp_logits, mask, masked_target_1d, sum_exp_logits = ctx.saved_tensors ################## # Step1:Find the global sofmax value ################## softmax_logits = exp_logits / sum_exp_logits.unsqueeze(dim=-1) ################## # Step2:Update softmax value based on local target index ################## partion_vocab_size = softmax_logits.shape[-1] softmax_logits_2d = softmax_logits.view(-1, partion_vocab_size) update = 1.0 - mask.view(-1).float().to(ctx.dtype) softmax_logits_2d[torch.arange(0, softmax_logits_2d.shape[0]), masked_target_1d] -= update ################## # Step3:Calculate grad_output, which is the gradient of the loss function with respect to the output of logsoftmax ################## grad_logits = -softmax_logits.mul_(grad_output) return grad_logits, None, None, None, None, None, None def cross_entropy_1d( vocab_logits: torch.Tensor, labels: torch.Tensor, ignore_index: int = _IGNORE_IDX, process_group: ProcessGroup = None, vocab_size: int = None, dtype: torch.dtype = None, mode: str = "mean", ) -> torch.Tensor: return DistCrossEntropy.apply(vocab_logits, labels, ignore_index, process_group, vocab_size, dtype, mode) def dist_log_prob_1d( vocab_logits: torch.Tensor, labels: torch.Tensor, process_group: ProcessGroup = None, vocab_size: int = None, dtype: torch.dtype = None, ) -> torch.Tensor: return DistLogProb.apply(vocab_logits, labels, process_group, vocab_size, dtype) def dist_cross_entropy( labels: torch.Tensor, # [B, S] or [B, S, Vocab_size] logits: torch.Tensor, # [B, S, Vocab_size] shard_config: ShardConfig, vocab_size: int, dtype: torch.dtype, seq_dim: int = 1, ) -> torch.Tensor: """ Helper to compute cross entropy loss for most shardformer models supporting PP, TP and SP. """ # Split labels if not gather output sp_group = shard_config.sequence_parallel_process_group sp_rank = dist.get_rank(sp_group) sp_size = shard_config.sequence_parallel_size sp_mode = shard_config.sequence_parallelism_mode parallel_output = shard_config.parallel_output is_tp = shard_config.enable_tensor_parallelism is_packed = labels.dim() == 2 if is_packed: bs, seq_len = labels.shape else: # padded sequence seq_len = labels.shape[-1] logits = logits.reshape(-1, *logits.shape[2:]) seq_dim = 0 # Shift labels to predict the next token, and remove the tail logit predicting <EOS> is_sp = sp_size > 1 and (not is_share_sp_tp(sp_mode)) split_labels_here = seq_len // sp_size == logits.size(seq_dim) # ring attn splits labels before forward if sp_mode == "ring_attn": # For Zigzag Ring Attention, labels should've been split and # shifted by RingAttention.prepare_varlen_batch() if sp_rank == 0: logits = logits[..., :-1, :] logits = torch.cat([logits, torch.full_like(logits[:, :1, :], _IGNORE_IDX)], dim=seq_dim) elif is_sp: # Shift only once: either before splitting or in the last rank without splitting if split_labels_here or (sp_rank == sp_size - 1): labels = labels[..., 1:] if split_labels_here: labels = labels.split(seq_len // sp_size, dim=-1)[sp_rank] if sp_rank == sp_size - 1: logits = logits[..., :-1, :] # Pad logits and labels to the same shape across all ranks for TP all_reduce if is_tp and parallel_output: # If is packed sequence (label dim is 1), then each seq already has the end label token padded. # torch.cat is faster than F.pad... pad_shape = (logits.shape[0], 1, *logits.shape[2:]) if is_packed else (1, *logits.shape[1:]) padding = torch.full(pad_shape, _IGNORE_IDX, dtype=logits.dtype, device=logits.device) logits = torch.cat([logits, padding], dim=seq_dim) pad_shape = (labels.shape[0], 1) if is_packed else (1,) padding = torch.full(pad_shape, _IGNORE_IDX, dtype=labels.dtype, device=labels.device) labels = torch.cat([labels, padding], dim=seq_dim) else: labels = labels[..., 1:] logits = logits[..., :-1, :] labels = labels.contiguous() logits = logits.contiguous() num_nonzero = (labels != _IGNORE_IDX).sum() assert labels.shape == logits.shape[:-1], f"label shape {labels.shape} does not match logit shape {logits.shape}" # Flatten the tokens loss_fct = CrossEntropyLoss(ignore_index=_IGNORE_IDX, reduction="sum") labels = labels.view(-1) if is_tp and parallel_output: # Cross entropy with all-reduce for TP new_vocab_size = logits.shape[-1] logits = logits.view(-1, new_vocab_size) loss = cross_entropy_1d( logits, labels, process_group=shard_config.tensor_parallel_process_group, vocab_size=vocab_size, dtype=dtype, mode="sum", ) else: # NOTE if use TP and not parallel_output, the output is gathered in VocabParallelLMHead1D logits = logits.view(-1, logits.size(-1)) loss = loss_fct(logits, labels) # Reduce loss instead of gathering logits over seq dim for savings if split_labels_here or sp_mode == "ring_attn": # Get the global non-zero count loss = torch.stack((loss, num_nonzero)) # Rescale to offset the grad / (DP * SP) in HybridParallelPlugin loss = reduce_forward(loss, sp_group, grad_scale=sp_size) loss, num_nonzero = loss[0], loss[1].detach() loss = (loss / num_nonzero).squeeze() return loss def dist_log_prob( labels: torch.Tensor, # [B, S] or [B, S, Vocab_size] logits: torch.Tensor, # [B, S, Vocab_size] shard_config: ShardConfig, vocab_size: int, dtype: torch.dtype, seq_dim: int = 1, ) -> torch.Tensor: """ Helper to compute log prob for most shardformer models supporting PP, TP. """ # Split labels if not gather output parallel_output = shard_config.parallel_output is_tp = shard_config.enable_tensor_parallelism # TODO:support sp labels = labels[..., 1:] logits = logits[..., :-1, :] labels = labels.contiguous() logits = logits.contiguous() assert labels.shape == logits.shape[:-1], f"label shape {labels.shape} does not match logit shape {logits.shape}" # Flatten the tokens if is_tp and parallel_output: log_prob = dist_log_prob_1d( logits, labels, process_group=shard_config.tensor_parallel_process_group, vocab_size=vocab_size, dtype=dtype, ) else: log_prob = log_softmax(logits, dim=-1) log_prob = log_prob.gather(dim=-1, index=labels.unsqueeze(-1)) return log_prob
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/layer/__init__.py
colossalai/shardformer/layer/__init__.py
from ._operation import all_to_all_comm from .attn import AttnMaskType, ColoAttention, RingAttention, get_pad_info from .dropout import DropoutForParallelInput, DropoutForReplicatedInput from .embedding import Embedding1D, PaddingEmbedding, VocabParallelEmbedding1D from .linear import Linear1D_Col, Linear1D_Row, LinearWithGradAccum, PaddingLMHead, VocabParallelLMHead1D from .loss import cross_entropy_1d, dist_cross_entropy, dist_log_prob, dist_log_prob_1d from .normalization import FusedLayerNorm, FusedRMSNorm, LayerNorm, RMSNorm from .parallel_module import ParallelModule from .qkv_fused_linear import ( FusedLinear, FusedLinear1D_Col, FusedLinear1D_Row, GPT2FusedLinearConv, GPT2FusedLinearConv1D_Col, GPT2FusedLinearConv1D_Row, ) __all__ = [ "Embedding1D", "VocabParallelEmbedding1D", "LinearWithGradAccum", "Linear1D_Col", "Linear1D_Row", "GPT2FusedLinearConv", "GPT2FusedLinearConv1D_Row", "GPT2FusedLinearConv1D_Col", "DropoutForParallelInput", "DropoutForReplicatedInput", "cross_entropy_1d", "dist_cross_entropy", "dist_log_prob_1d", "dist_log_prob", "BaseLayerNorm", "LayerNorm", "RMSNorm", "FusedLayerNorm", "FusedRMSNorm", "FusedLinear1D_Col", "FusedLinear", "ParallelModule", "PaddingEmbedding", "PaddingLMHead", "VocabParallelLMHead1D", "AttnMaskType", "ColoAttention", "RingAttention", "get_pad_info", "all_to_all_comm", "FusedLinear1D_Row", ]
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/layer/embedding.py
colossalai/shardformer/layer/embedding.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from typing import Callable, List, Optional, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch import Tensor from torch.distributed import ProcessGroup from colossalai.lazy import LazyInitContext from colossalai.nn import init as init from colossalai.nn.layer.utils import divide from colossalai.tensor.d_tensor.api import ( is_distributed_tensor, shard_colwise, shard_rowwise, sharded_tensor_to_existing_param, ) from ._operation import gather_forward_split_backward, reduce_forward from .parallel_module import PaddingParallelModule, ParallelModule from .utils import create_randomizer_with_offset __all__ = ["Embedding1D", "VocabParallelEmbedding1D", "PaddingEmbedding"] class Embedding1D(ParallelModule): r"""Embedding for 1D parallelism. Args: num_embeddings (int): number of embeddings. embedding_dim (int): dimension of embedding. padding_idx (int, optional): If specified, the entries at padding_idx do not contribute to the gradient; therefore, the embedding vector at padding_idx is not updated during training, i.e. it remains as a fixed “pad”, defaults to None. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. weight_initializer (:class:`typing.Callable`, optional): he initializer of weight, defaults to normal initializer. The ``args`` and ``kwargs`` used in :class:`torch.nn.functional.embedding` should contain: :: max_norm (float, optional): If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm. Note: this will modify weight in-place. norm_type (float, optional): The p of the p-norm to compute for the max_norm option. Default 2. scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default False. sparse (bool, optional): If True, gradient w.r.t. weight will be a sparse tensor. Default False. More details about ``args`` and ``kwargs`` could be found in `Embedding <https://pytorch.org/docs/stable/generated/torch.nn.functional.embedding.html#torch.nn.functional.embedding>`_. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_ """ def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: int = None, dtype: torch.dtype = None, device: torch.device = None, process_group: ProcessGroup = None, gather_output: bool = True, weight: Optional[nn.Parameter] = None, weight_initializer: Callable = init.normal_(), fp8_communication: bool = False, *args, **kwargs, ): super().__init__() self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim self.process_group = process_group self.padding_idx = padding_idx self.embed_args = args self.embed_kwargs = kwargs self.gather_output = gather_output self.fp8_communication = fp8_communication # offset the seed with randomizer index and rank seed = torch.random.initial_seed() self.randomizer = create_randomizer_with_offset(seed, process_group=self.process_group) # Parameters. if weight is None: factory_kwargs = {"device": device, "dtype": dtype} self.weight = nn.Parameter(torch.empty((num_embeddings, self.embedding_dim), **factory_kwargs)) else: weight.data = weight.data.to(device=device, dtype=dtype) self.weight = weight if not is_distributed_tensor(self.weight): sharded_weight = shard_colwise(self.weight.data, process_group) sharded_tensor_to_existing_param(sharded_weight, self.weight) if weight is None: with self.randomizer.fork_rng(enable_cpu=True): self.reset_parameters(weight_initializer) @staticmethod def from_native_module( module: nn.Embedding, process_group: Union[ProcessGroup, List[ProcessGroup]] = None, *args, **kwargs ) -> "Embedding1D": r""" Build a 1D parallelized Embedding from a native nn.Embedding module. """ LazyInitContext.materialize(module) # get the attributes num_embedding = module.num_embeddings embedding_dim = module.embedding_dim padding_idx = module.padding_idx max_norm = module.max_norm norm_type = module.norm_type scale_grad_by_freq = module.scale_grad_by_freq sparse = module.sparse dtype = module.weight.dtype device = module.weight.device # sparse is not support yet if sparse: raise NotImplementedError("The Embedding1D module does not support sparse embedding yet.") embedding = Embedding1D( num_embeddings=num_embedding, embedding_dim=embedding_dim, padding_idx=padding_idx, process_group=process_group, dtype=dtype, device=device, max_norm=max_norm, norm_type=norm_type, scale_grad_by_freq=scale_grad_by_freq, sparse=sparse, weight=module.weight, *args, **kwargs, ) return embedding def reset_parameters(self, weight_initializer) -> None: fan_in, fan_out = self.num_embeddings, self.embedding_dim weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) self._fill_padding_idx_with_zero() def _fill_padding_idx_with_zero(self) -> None: if self.padding_idx is not None: with torch.no_grad(): self.weight[self.padding_idx].fill_(0) def forward(self, input_: Tensor) -> Tensor: output_parallel = F.embedding(input_, self.weight, self.padding_idx, *self.embed_args, **self.embed_kwargs) if self.gather_output: output = gather_forward_split_backward( output_parallel, dim=-1, process_group=self.process_group, fp8_communication=self.fp8_communication ) return output else: return output_parallel class PaddingEmbedding(PaddingParallelModule): def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: int = None, dtype: torch.dtype = None, device: torch.device = None, weight: Optional[nn.Parameter] = None, make_vocab_size_divisible_by: int = 64, *args, **kwargs, ): self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim self.embed_args = args self.embed_kwargs = kwargs self.padding_idx = padding_idx if num_embeddings % make_vocab_size_divisible_by != 0: self.num_embeddings = ( num_embeddings + make_vocab_size_divisible_by - (num_embeddings % make_vocab_size_divisible_by) ) # create weight and bias if weight is None: factory_kwargs = {"device": device, "dtype": dtype} weight = nn.Parameter(torch.empty((num_embeddings, self.embedding_dim), **factory_kwargs)) else: weight.data = weight.data.to(device=device, dtype=dtype) super().__init__(self.num_embeddings, num_embeddings, weight) if weight is None: self.reset_parameters() def reset_parameters(self) -> None: init.normal_(self.weight) self._fill_padding_idx_with_zero() def _fill_padding_idx_with_zero(self) -> None: if self.padding_idx is not None: with torch.no_grad(): self.weight[self.padding_idx].fill_(0) def forward(self, input: Tensor) -> Tensor: return F.embedding(input, self.weight, self.padding_idx, *self.embed_args, **self.embed_kwargs) @staticmethod def from_native_module( module: nn.Embedding, process_group: Union[ProcessGroup, List[ProcessGroup]], *args, **kwargs ) -> PaddingParallelModule: r""" Convert a native pytorch embedding module to a parallel module. """ LazyInitContext.materialize(module) # get the origin attributes num_embeddings = module.num_embeddings embedding_dim = module.embedding_dim padding_idx = module.padding_idx device = module.weight.device # create the parallel module padding_embedding = PaddingEmbedding( num_embeddings=num_embeddings, embedding_dim=embedding_dim, padding_idx=padding_idx, device=device, weight=module.weight, *args, **kwargs, ) return padding_embedding class VocabParallelEmbedding1D(PaddingParallelModule): r"""Embedding parallelized in the vocabulary dimension. Args: num_embeddings (int): number of embeddings. embedding_dim (int): dimension of embedding. padding_idx (int, optional): If specified, the entries at padding_idx do not contribute to the gradient; therefore, the embedding vector at padding_idx is not updated during training, i.e. it remains as a fixed “pad”, defaults to None. dtype (:class:`torch.dtype`, optional): The dtype of parameters, defaults to None. weight_initializer (:class:`typing.Callable`, optional): he initializer of weight, defaults to normal initializer. The ``args`` and ``kwargs`` used in :class:``torch.nn.functional.embedding`` should contain: :: max_norm (float, optional): If given, each embedding vector with norm larger than max_norm is renormalized to have norm max_norm. Note: this will modify weight in-place. norm_type (float, optional): The p of the p-norm to compute for the max_norm option. Default 2. scale_grad_by_freq (bool, optional): If given, this will scale gradients by the inverse of frequency of the words in the mini-batch. Default False. sparse (bool, optional): If True, gradient w.r.t. weight will be a sparse tensor. Default False. More details about ``args`` and ``kwargs`` could be found in `Embedding <https://pytorch.org/docs/stable/generated/torch.nn.functional.embedding.html#torch.nn.functional.embedding>`_. More details about initializer please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, num_embeddings: int, embedding_dim: int, padding_idx: int = None, dtype: torch.dtype = None, device: torch.device = None, process_group: ProcessGroup = None, weight: Optional[nn.Parameter] = None, weight_initializer: Callable = init.normal_(), make_vocab_size_divisible_by: int = 64, fp8_communication: bool = False, *args, **kwargs, ): self.num_embeddings = num_embeddings self.embedding_dim = embedding_dim self.embed_args = args self.embed_kwargs = kwargs self.process_group = process_group self.fp8_communication = fp8_communication tensor_parallel_size = dist.get_world_size(group=process_group) tensor_parallel_rank = dist.get_rank(group=process_group) # generate weight and bias if weight is None: factory_kwargs = {"device": device, "dtype": dtype} weight = nn.Parameter(torch.empty((num_embeddings, self.embedding_dim), **factory_kwargs)) else: weight.data = weight.data.to(device=device, dtype=dtype) # calculate new padding size multiple = make_vocab_size_divisible_by * tensor_parallel_size if num_embeddings % multiple != 0: self.num_embeddings = num_embeddings + multiple - (num_embeddings % multiple) # resize vocabulary size super().__init__(self.num_embeddings, num_embeddings, weight) # deal with tensor parallelism self.num_embeddings_per_partition = divide(self.num_embeddings, tensor_parallel_size) self.vocab_start_index = tensor_parallel_rank * self.num_embeddings_per_partition self.vocab_end_index = self.vocab_start_index + self.num_embeddings_per_partition # padding index self.padding_idx = self._select_padding_idx(padding_idx) # offset the seed with randomizer index and rank seed = torch.random.initial_seed() self.randomizer = create_randomizer_with_offset(seed, process_group=self.process_group) if not is_distributed_tensor(self.weight): sharded_weight = shard_rowwise(self.weight.data, process_group) sharded_tensor_to_existing_param(sharded_weight, self.weight) if weight is None: self.reset_parameters(weight_initializer) @staticmethod def from_native_module( module: nn.Embedding, process_group: Union[ProcessGroup, List[ProcessGroup]], *args, **kwargs ) -> PaddingParallelModule: r""" Convert a native pytorch embedding module to a parallel module. """ LazyInitContext.materialize(module) # get the origin attributes num_embeddings = module.num_embeddings embedding_dim = module.embedding_dim padding_idx = module.padding_idx device = module.weight.device # ensure only one process group is used if isinstance(process_group, (list, tuple)): assert len(process_group) == 1, f"Expected only one process group, got {len(process_group)}." process_group = process_group[0] # create the parallel module vocab_embedding_1d = VocabParallelEmbedding1D( num_embeddings=num_embeddings, embedding_dim=embedding_dim, padding_idx=padding_idx, device=device, process_group=process_group, weight=module.weight, *args, **kwargs, ) return vocab_embedding_1d def reset_parameters(self, weight_initializer) -> None: with self.randomizer.fork_rng(enable_cpu=True): fan_in, fan_out = self.num_embeddings, self.embedding_dim weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) self._fill_padding_idx_with_zero() def _fill_padding_idx_with_zero(self) -> None: if ( self.padding_idx is not None and self.padding_idx >= self.vocab_start_index and self.padding_idx < self.vocab_end_index ): with torch.no_grad(): self.weight[self.padding_idx - self.vocab_start_index].fill_(0) def _select_padding_idx(self, padding_idx: int): # select padding index according to the rank if padding_idx is None: return None elif padding_idx < self.vocab_end_index and padding_idx >= self.vocab_start_index: return padding_idx - self.vocab_start_index else: return None def forward(self, input_: Tensor) -> Tensor: # Build the mask. input_mask = (input_ < self.vocab_start_index) | (input_ >= self.vocab_end_index) # Mask the input. masked_input = input_.clone() - self.vocab_start_index masked_input[input_mask] = 0 output_parallel = F.embedding( masked_input, self.weight, self.padding_idx, *self.embed_args, **self.embed_kwargs ) # Mask the output embedding. embedding_output = output_parallel.clone() embedding_output[input_mask, :] = 0.0 # Reduce across all the model parallel GPUs. output = reduce_forward(embedding_output, self.process_group, fp8_communication=self.fp8_communication) return output
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/layer/normalization.py
colossalai/shardformer/layer/normalization.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import numbers import warnings from abc import ABC, abstractmethod import torch import torch.nn as nn from torch.nn import init from torch.nn.parameter import Parameter from colossalai.lazy import LazyInitContext from ._operation import hook_parameter_in_backward from .utils import SeqParallelUtils SUPPORT_NPU = False try: import torch_npu SUPPORT_NPU = True except Exception: pass __all__ = ["FusedLayerNorm", "FusedRMSNorm", "LayerNorm", "RMSNorm", "BaseLayerNorm"] try: from apex.contrib.layer_norm.layer_norm import FastLayerNorm EnableFastLayerNorm = True except ImportError: EnableFastLayerNorm = False try: from apex.normalization import FusedLayerNorm as ApexFusedLayerNorm class FusedLayerNormWithHook(ApexFusedLayerNorm): def __init__(self, normalized_shape, eps=0.00001, elementwise_affine=True): super().__init__(normalized_shape, eps, elementwise_affine) def forward(self, input): output = super().forward(input) output = hook_parameter_in_backward(output, self.weight, self.bias) return output except ImportError: warnings.warn("Please install apex from source (https://github.com/NVIDIA/apex) to use the fused RMSNorm kernel") FusedRMSNormWithHook = None if SUPPORT_NPU: class NPUFusedRMSNormWithHook(nn.Module): def __init__(self, normalized_shape, eps=0.00001, elementwise_affine=True): super().__init__() if isinstance(normalized_shape, numbers.Integral): normalized_shape = (normalized_shape,) self.normalized_shape = torch.Size(normalized_shape) self.eps = eps self.elementwise_affine = elementwise_affine if self.elementwise_affine: self.weight = Parameter(torch.empty(*normalized_shape)) else: self.register_parameter("weight", None) self.reset_parameters() def reset_parameters(self): if self.elementwise_affine: init.ones_(self.weight) def forward(self, input): output, _ = torch_npu.npu_rms_norm(input, self.weight, self.eps) output = hook_parameter_in_backward(output, self.weight) return output FusedRMSNormWithHook = NPUFusedRMSNormWithHook else: try: from apex.normalization import FusedRMSNorm as ApexFusedRMSNorm class CUDAFusedRMSNormWithHook(ApexFusedRMSNorm): def __init__(self, normalized_shape, eps=0.00001, elementwise_affine=True): super().__init__(normalized_shape, eps, elementwise_affine) def forward(self, input): output = super().forward(input) output = hook_parameter_in_backward(output, self.weight) return output FusedRMSNormWithHook = CUDAFusedRMSNormWithHook except ImportError: warnings.warn( "Please install apex from source (https://github.com/NVIDIA/apex) to use the fused RMSNorm kernel" ) FAST_LAYERNORM_SUPPORTED_SIZE = [ 1024, 1536, 2048, 2304, 3072, 3840, 4096, 5120, 6144, 8192, 10240, 12288, 12800, 15360, 16384, 18432, 20480, 24576, 25600, 30720, 32768, 40960, 49152, 65536, ] if EnableFastLayerNorm: class FastLayerNormWithHook(FastLayerNorm): def __init__(self, hidden_size, eps=0.00001): super().__init__(hidden_size, eps) def forward(self, input): output = super().forward(input) output = hook_parameter_in_backward(output, self.weight, self.bias) return output class BaseLayerNorm(ABC): @abstractmethod def from_native_module(module: nn.Module, sp_partial_derived: bool = False): """ Convert a native PyTorch layer normalization module to a specific layer normalization module, and optionally mark parameters for gradient aggregation. Args: module (nn.Module): The native PyTorch layer normalization module to be converted. sp_partial_derived (bool): Whether this module's gradients are partially derived in sequence parallelism. Returns: nn.Module: The specific layer normalization module. Raises: AssertionError: If the provided module is not an instance of the supported layer normalization type. """ class RMSNorm(BaseLayerNorm): r""" This is a wrapper around the RMSNorm. It is meant to be used only with the from_native_module interface. """ def __init__(self) -> None: raise NotImplementedError( "FusedLayerNorm is not implemented as a physical class. " "It is meant to be used only with the from_native_module interface to convert a native RMSNorm module to colossalai layer norm module." ) @staticmethod def from_native_module(module: nn.Module, sp_partial_derived: bool = False, *args, **kwargs) -> nn.Module: """ Convert a native RMSNorm module to colossalai layer norm module, and optionally mark parameters for gradient aggregation. Args: module (nn.Module): The native RMSNorm module to be converted. sp_partial_derived (bool): Whether this module's gradients are partially derived in sequence parallelism. Returns: nn.Module: The RMSNorm module. """ LazyInitContext.materialize(module) if sp_partial_derived: # Since gradients are computed using only a subset of the data, # aggregation of these gradients is necessary during backpropagation. # Therefore, we annotate these parameters in advance to indicate the need for gradient aggregation. SeqParallelUtils.marked_as_sp_partial_derived_param(module.weight) return module class LayerNorm(BaseLayerNorm): r""" This is a wrapper around native LayerNorm. It is meant to be used only with the from_native_module interface. """ def __init__(self) -> None: raise NotImplementedError( "LayerNorm is not implemented as a physical class. " "It is meant to be used only with the from_native_module interface to convert a native LayerNorm module to colossalai layer norm module." ) @staticmethod def from_native_module(module: nn.Module, sp_partial_derived: bool = False, *args, **kwargs) -> nn.Module: r""" Convert a native LayerNorm module to colossalai layer norm module, and optionally marking parameters for gradient aggregation. Args: module (nn.Module): The native LayerNorm module to be converted. sp_partial_derived (bool): Whether this module's gradients are partially derived in sequence parallelism. Returns: nn.Module: The colossalai LayerNorm module. """ LazyInitContext.materialize(module) if sp_partial_derived: # Since gradients are computed using only a subset of the data, # aggregation of these gradients is necessary during backpropagation. # Therefore, we annotate these parameters in advance to indicate the need for gradient aggregation. SeqParallelUtils.marked_as_sp_partial_derived_param(module.weight) if module.bias is not None: SeqParallelUtils.marked_as_sp_partial_derived_param(module.bias) return module class FusedLayerNorm(BaseLayerNorm): r""" This is a wrapper around the apex fused layernorm implementation. It is meant to be used only with the from_native_module interface. """ def __init__(self) -> None: raise NotImplementedError( "FusedLayerNorm is not implemented as a physical class. " "It is meant to be used only with the from_native_module interface convert a native LayerNorm module to FusedLayerNorm module provided by apex." ) @staticmethod def from_native_module(module: nn.LayerNorm, sp_partial_derived: bool = False, *args, **kwargs) -> nn.Module: r""" Convert a native LayerNorm module to FusedLayerNorm module provided by apex, and optionally marking parameters for gradient aggregation. Args: module (nn.Module): The native LayerNorm module to be converted. sp_partial_derived (bool): Whether this module's gradients are partially derived in sequence parallelism. Returns: nn.Module: Union[FastLayerNorm, FusedLayerNorm]. """ LazyInitContext.materialize(module) # get the attributes of the module normalized_shape = getattr(module, "normalized_shape", module.weight.shape[0]) eps = module.variance_epsilon if hasattr(module, "variance_epsilon") else module.eps elementwise_affine = getattr(module, "elementwise_affine", True) dtype = module.weight.dtype device = module.weight.device # pick the suitable layernorm implementation use_fast_ln = normalized_shape in FAST_LAYERNORM_SUPPORTED_SIZE if use_fast_ln: if EnableFastLayerNorm: ApexFusedLayerNorm = FastLayerNormWithHook else: # fall back to the normal fused layernorm is not built ApexFusedLayerNorm = FusedLayerNormWithHook else: try: ApexFusedLayerNorm = FusedLayerNormWithHook except NameError: warnings.warn( "Please install Apex from source to use fused kernels, or set self.enable_fused_normalization = False. Using native layernorm instead." ) return module layernorm = ( ApexFusedLayerNorm(normalized_shape, eps=eps, elementwise_affine=elementwise_affine).to(dtype).to(device) ) layernorm.weight = module.weight if module.bias is not None: layernorm.bias = module.bias if sp_partial_derived: # Since gradients are computed using only a subset of the data, # aggregation of these gradients is necessary during backpropagation. # Therefore, we annotate these parameters in advance to indicate the need for gradient aggregation. SeqParallelUtils.marked_as_sp_partial_derived_param(layernorm.weight) SeqParallelUtils.marked_as_sp_partial_derived_param(layernorm.bias) return layernorm class FusedRMSNorm(BaseLayerNorm): """ This is a wrapper around the apex fused rms norm implementation. It is meant to be used only with the from_native_module interface. """ def __init__(self) -> None: raise NotImplementedError( "FusedRMSNorm is not implemented as a physical class. " "It is meant to be used only with the from_native_module interface to Convert a native RMSNorm module to FusedRMSNorm module provided by apex." ) @staticmethod def from_native_module(module: nn.Module, sp_partial_derived: bool = False, *args, **kwargs) -> nn.Module: r""" Convert a native RMSNorm module module to FusedRMSNorm module provided by apex, and optionally marking parameters for gradient aggregation. Args: module (nn.LayerNorm): The native PyTorch LayerNorm module to be converted. sp_partial_derived (bool): Whether this module's gradients are partially derived in sequence parallelism. Returns: nn.Module: FusedRMSNorm module. """ LazyInitContext.materialize(module) # try to get normalized_shape, eps, elementwise_affine from the module normalized_shape = getattr(module, "normalized_shape", module.weight.shape[0]) eps = module.variance_epsilon if hasattr(module, "variance_epsilon") else module.eps elementwise_affine = getattr(module, "elementwise_affine", True) try: rmsnorm = FusedRMSNormWithHook( normalized_shape=normalized_shape, eps=eps, elementwise_affine=elementwise_affine, ) except ImportError: warnings.warn( "Module replacement failed.\ Please install apex from source (https://github.com/NVIDIA/apex) to use the fused RMS normalization kernel" ) return module rmsnorm.weight = module.weight if sp_partial_derived: # Since gradients are computed using only a subset of the data, # aggregation of these gradients is necessary during backpropagation. # Therefore, we annotate these parameters in advance to indicate the need for gradient aggregation. SeqParallelUtils.marked_as_sp_partial_derived_param(rmsnorm.weight) return rmsnorm
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/layer/linear.py
colossalai/shardformer/layer/linear.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- import math from typing import Callable, List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F from torch import Tensor from torch.distributed import ProcessGroup from torch.nn.parameter import Parameter from colossalai.lazy import LazyInitContext from colossalai.nn import init as init from colossalai.nn.layer.utils import divide from colossalai.tensor.d_tensor.api import ( is_distributed_tensor, shard_colwise, shard_rowwise, sharded_tensor_to_existing_param, ) from ._operation import ( gather_forward_split_backward, linear_gather_forward_reducescatter_backward, linear_reducescatter_forward_gather_backward, linear_with_async_comm, linear_with_grad_accum, reduce_forward, split_forward_gather_backward, ) from .parallel_module import PaddingParallelModule, ParallelModule from .utils import create_randomizer_with_offset, is_share_sp_tp __all__ = ["LinearWithGradAccum", "Linear1D_Col", "Linear1D_Row"] class LinearWithGradAccum(ParallelModule): r"""Linear layer with no parallelism. Args: in_features (int): size of each input sample. out_features (int): size of each output sample. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (`torch.dtype`): The dtype of parameters, defaults to None. device (`torch.device`): The device of parameters, defaults to None. gather_output (bool, optional): If true, call all-gather on output and make Y available to all GPUs, otherwise, every GPU will have its output which is :math:`Y_i = XA_i`, defaults to False seq_parallel (`bool`): If set to ``True``, it will use sequence parallel, defaults to False. overlap (`bool`): If set to ``True``, it will overlap input all-gather with gradient computation during backward, defaults to False. skip_bias_add (bool): If set to ``True``, it will skip bias add for linear layer, which is preserved for kernel fusion, defaults to False weight_initializer (`typing.Callable`): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (`typing.Callable`): The initializer of bias, defaults to xavier uniform initializer. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, dtype: torch.dtype = None, device: torch.device = None, skip_bias_add: bool = False, weight: Optional[Parameter] = None, bias_: Optional[Parameter] = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), use_zbv: bool = False, **kwargs, ): super().__init__(weight=weight, bias_=bias_, **kwargs) # Keep input parameters self.in_features = in_features self.out_features = out_features self.skip_bias_add = skip_bias_add self.device = device self.use_zbv = use_zbv if skip_bias_add and not bias: raise ValueError("cannot skip bias addition if bias is None") # offset the seed with randomizer index and rank seed = torch.random.initial_seed() self.randomizer = create_randomizer_with_offset(seed, process_group=None) # sanity check if weight is not None: assert not bias or bias_ is not None, "bias_ must be provided if bias is True when weight is not None" else: assert bias_ is None, "bias_ must be None if weight is None" # Parameters. if weight is None: factory_kwargs = {"device": device, "dtype": dtype} self.weight = Parameter(torch.empty(self.out_features, self.in_features, **factory_kwargs)) else: weight.data = weight.data.to(device=device, dtype=dtype) self.weight = weight if bias: if bias_ is None: self.bias = Parameter(torch.empty(self.out_features, **factory_kwargs)) else: bias_.data = bias_.data.to(device=device, dtype=dtype) self.bias = bias_ else: self.bias = None if weight is None: # init weights self.reset_parameters(weight_initializer, bias_initializer) @staticmethod def from_native_module(module: nn.Linear, **kwargs) -> ParallelModule: r""" Convert a native PyTorch linear layer to a parallelized linear layer. """ LazyInitContext.materialize(module) # get the attributes in_features = module.in_features out_features = module.out_features bias = module.bias is not None device = module.weight.device linear_1d = LinearWithGradAccum( in_features=in_features, out_features=out_features, bias=bias, device=device, weight=module.weight, bias_=module.bias, **kwargs, ) return linear_1d def reset_parameters(self, weight_initializer, bias_initializer) -> None: with self.randomizer.fork_rng(enable_cpu=True): fan_in, fan_out = self.in_features, self.out_features weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) def forward(self, input_: Tensor) -> Tuple[Tensor, Tensor]: assert ( input_.shape[-1] == self.weight.shape[-1] ), "Invalid shapes in Linear1D_Col forward: input={}, weight={}. Expected last dim of input {}.".format( input_.shape, self.weight.shape, self.weight.shape[-1] ) # Set up backprop all-reduce. input_parallel = input_ # Matrix multiply. bias = self.bias if not self.skip_bias_add else None output_parallel = linear_with_grad_accum( input_parallel, self.weight, bias, False, use_zbv=self.use_zbv, ) output = output_parallel if self.skip_bias_add: return output, self.bias else: return output class Linear1D_Col(ParallelModule): r"""Linear layer with column parallelism. The linear layer is defined as :math:`Y = XA + b`. A is parallelized along its second dimension as :math:`A = [A_1, ..., A_p]`. Args: in_features (int): size of each input sample. out_features (int): size of each output sample. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (`torch.dtype`): The dtype of parameters, defaults to None. device (`torch.device`): The device of parameters, defaults to None. process_group (`torch.distributed.ProcessGroup`): The process group to be used for weight sharding and communication, defaults to None. gather_output (bool, optional): If true, call all-gather on output and make Y available to all GPUs, otherwise, every GPU will have its output which is :math:`Y_i = XA_i`, defaults to False seq_parallel (`bool`): If set to ``True``, it will use sequence parallel, defaults to False. skip_bias_add (bool): If set to ``True``, it will skip bias add for linear layer, which is preserved for kernel fusion, defaults to False weight_initializer (`typing.Callable`): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (`typing.Callable`): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, dtype: torch.dtype = None, device: torch.device = None, process_group: ProcessGroup = None, gather_output: bool = False, seq_parallel_mode: str = None, seq_parallel_dim: int = 1, skip_bias_add: bool = False, weight: Optional[Parameter] = None, bias_: Optional[Parameter] = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), fp8_communication: bool = False, use_zbv: bool = False, **kwargs, ): super().__init__(weight=weight, bias_=bias_, **kwargs) # Keep input parameters self.in_features = in_features self.out_features = out_features self.gather_output = gather_output self.seq_parallel_mode = seq_parallel_mode self.seq_parallel_dim = seq_parallel_dim self.skip_bias_add = skip_bias_add self.device = device self.process_group = process_group self.fp8_communication = fp8_communication self.use_zbv = use_zbv if skip_bias_add and not bias: raise ValueError("cannot skip bias addition if bias is None") # offset the seed with randomizer index and rank seed = torch.random.initial_seed() self.randomizer = create_randomizer_with_offset(seed, process_group=self.process_group) # sanity check if weight is not None: assert not bias or bias_ is not None, "bias_ must be provided if bias is True when weight is not None" else: assert bias_ is None, "bias_ must be None if weight is None" # Parameters. if weight is None: factory_kwargs = {"device": device, "dtype": dtype} self.weight = Parameter(torch.empty(self.out_features, self.in_features, **factory_kwargs)) else: weight.data = weight.data.to(device=device, dtype=dtype) self.weight = weight if not is_distributed_tensor(self.weight): sharded_weight = shard_rowwise(self.weight.data, self.process_group) sharded_tensor_to_existing_param(sharded_weight, self.weight) if bias: if bias_ is None: self.bias = Parameter(torch.empty(self.out_features, **factory_kwargs)) else: bias_.data = bias_.data.to(device=device, dtype=dtype) self.bias = bias_ if not is_distributed_tensor(self.bias): sharded_bias = shard_colwise(self.bias.data, self.process_group) sharded_tensor_to_existing_param(sharded_bias, self.bias) else: self.bias = None if weight is None: # init weights self.reset_parameters(weight_initializer, bias_initializer) @staticmethod def from_native_module( module: nn.Linear, process_group: Union[ProcessGroup, List[ProcessGroup]], **kwargs ) -> ParallelModule: r""" Convert a native PyTorch linear layer to a parallelized linear layer. """ LazyInitContext.materialize(module) # get the attributes in_features = module.in_features out_features = module.out_features bias = module.bias is not None device = module.weight.device # ensure only one process group is passed if isinstance(process_group, (list, tuple)): assert len(process_group) == 1, f"Expected only one process group, got {len(process_group)}." process_group = process_group[0] tp_size = dist.get_world_size(process_group) if out_features < tp_size: return module if out_features % tp_size != 0: raise ValueError( f"The size of out_features:{out_features} is not integer multiples of tensor parallel size: {tp_size}!" ) linear_1d = Linear1D_Col( in_features=in_features, out_features=out_features, bias=bias, device=device, process_group=process_group, weight=module.weight, bias_=module.bias, **kwargs, ) return linear_1d def reset_parameters(self, weight_initializer, bias_initializer) -> None: with self.randomizer.fork_rng(enable_cpu=True): fan_in, fan_out = self.in_features, self.out_features weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) def forward(self, input_: Tensor) -> Tuple[Tensor, Tensor]: assert ( input_.shape[-1] == self.weight.shape[-1] ), "Invalid shapes in Linear1D_Col forward: input={}, weight={}. Expected last dim of input {}.".format( input_.shape, self.weight.shape, self.weight.shape[-1] ) # Set up backprop all-reduce. input_parallel = input_ # Matrix multiply. bias = self.bias if not self.skip_bias_add else None if is_share_sp_tp(self.seq_parallel_mode): output_parallel = linear_gather_forward_reducescatter_backward( input_parallel, self.weight, bias, self.process_group, True, self.seq_parallel_dim, ring=self.seq_parallel_mode == "ring", use_zbv=self.use_zbv, ) else: output_parallel = linear_with_async_comm( input_parallel, self.weight, bias, self.process_group, True, fp8_communication=self.fp8_communication, use_zbv=self.use_zbv, ) if self.gather_output: # All-gather across the partitions. output = gather_forward_split_backward( output_parallel, dim=-1, process_group=self.process_group, fp8_communication=self.fp8_communication ) else: output = output_parallel if self.skip_bias_add: return output, self.bias else: return output class Linear1D_Row(ParallelModule): r"""Linear layer with row parallelism Args: in_features (int): size of each input sample. out_features (int): size of each output sample. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (`torch.dtype`): The dtype of parameters, defaults to None. parallel_input (bool): If set to ``True``, it's assumed that the input is already split/copied across each rank, defaults to False. process_group (`torch.distributed.ProcessGroup`): The process group to be used for weight sharding and communication, defaults to None. seq_parallel_mode (`str`): The type of sp mode, it will use sequence parallel when `seq_parallel_mode` is not None. Defaults to None. seq_parallel_dim (`int`): Which dim will sequence parallelism split and gather the sequence. skip_bias_add (bool): If set to ``True``, it will skip bias add for linear layer, which is preserved for kernel fusion, defaults to False weight_initializer (:class:`typing.Callable`, optional): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (:class:`typing.Callable`, optional): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, dtype: torch.dtype = None, device: torch.device = None, process_group: ProcessGroup = None, seq_parallel_mode: str = None, seq_parallel_dim: int = 1, parallel_input: bool = True, skip_bias_add: bool = False, weight: Optional[Parameter] = None, bias_: Optional[Parameter] = None, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), stream_chunk_num: int = 1, fp8_communication: bool = False, use_zbv: bool = False, ): super().__init__() self.stream_chunk_num = stream_chunk_num # Keep input parameters self.in_features = in_features self.out_features = out_features self.parallel_input = parallel_input self.skip_bias_add = skip_bias_add self.process_group = process_group self.seq_parallel_mode = seq_parallel_mode self.seq_parallel_dim = seq_parallel_dim self.num_partitions = dist.get_world_size(self.process_group) self.fp8_communication = fp8_communication self.use_zbv = use_zbv if skip_bias_add and not bias: raise ValueError("cannot skip bias addition if bias is None") # offset the seed with randomizer index and rank seed = torch.random.initial_seed() self.randomizer = create_randomizer_with_offset(seed, process_group=self.process_group) # sanity check if weight is not None: assert not bias or bias_ is not None, "bias_ must be provided if bias is True when weight is not None" else: assert bias_ is None, "bias_ must be None if weight is None" # Parameters. if weight is None: # Initialize weight. factory_kwargs = {"device": device, "dtype": dtype} self.weight = Parameter(torch.empty(self.out_features, self.in_features, **factory_kwargs)) else: weight.data = weight.data.to(device=device, dtype=dtype) self.weight = weight if not is_distributed_tensor(self.weight): sharded_weight = shard_colwise(self.weight.data, self.process_group) sharded_tensor_to_existing_param(sharded_weight, self.weight) if self.stream_chunk_num > 1: # TODO() work for inference only self.chunk_weight() if bias: if bias_ is None: self.bias = Parameter(torch.empty(self.out_features, **factory_kwargs)) else: bias_.data = bias_.data.to(device=device, dtype=dtype) self.bias = bias_ else: self.bias = None if weight is None: with self.randomizer.fork_rng(enable_cpu=True): self.reset_parameters(weight_initializer, bias_initializer) @staticmethod def from_native_module( module: nn.Linear, process_group: Union[ProcessGroup, List[ProcessGroup]], **kwargs ) -> ParallelModule: r""" Convert a native PyTorch linear layer to a parallelized linear layer. """ LazyInitContext.materialize(module) # get the attributes in_features = module.in_features out_features = module.out_features bias = module.bias is not None device = module.weight.device # ensure only one process group is passed if isinstance(process_group, (list, tuple)): assert len(process_group) == 1, f"Expected only one process group, got {len(process_group)}." process_group = process_group[0] tp_size = dist.get_world_size(process_group) if in_features < tp_size: return module if in_features % tp_size != 0: raise ValueError( f"The size of in_features:{in_features} is not integer multiples of tensor parallel size: {tp_size}!" ) linear_1d = Linear1D_Row( in_features=in_features, out_features=out_features, bias=bias, device=device, process_group=process_group, weight=module.weight, bias_=module.bias, **kwargs, ) return linear_1d def chunk_weight(self): self.weight_list = torch.chunk(self.weight, self.stream_chunk_num, dim=0) @torch.no_grad() def reset_parameters(self, weight_initializer, bias_initializer) -> None: fan_in, fan_out = self.in_features, self.out_features weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) if self.process_group is None: src_rank = 0 else: src_rank = dist.distributed_c10d._get_global_rank(self.process_group, 0) origin_device = self.bias.device bias = self.bias.cuda() dist.broadcast(bias, src=src_rank, group=self.process_group) bias = bias.to(origin_device) self.bias.copy_(bias) def forward(self, input_: Tensor) -> Tensor: # Set up backprop all-reduce. if self.parallel_input: assert ( input_.shape[-1] == self.weight.shape[-1] ), "Invalid shapes in Linear1D_Row forward: input={}, weight={}. Expected feature dim of input {}.".format( input_.shape, self.weight.shape, self.weight.shape[-1] ) input_ = input_ else: assert ( divide(input_.shape[-1], self.num_partitions) == self.weight.shape[-1] ), "Invalid shapes in Linear1D_Row forward: input={}, weight={}. Expected feature dim of input {}.".format( input_.shape, self.weight.shape, self.weight.shape[-1] * self.num_partitions ) input_ = split_forward_gather_backward( input_, dim=-1, process_group=self.process_group, fp8_communication=self.fp8_communication ) if self.stream_chunk_num > 1: if self.training: raise RuntimeError("use stream_chunk_num=1 in Linear1D_Row for training!") with torch.no_grad(): output_parallel_list = [None for i in range(self.stream_chunk_num)] handle_list = [] for i in range(self.stream_chunk_num): output_parallel_list[i] = F.linear(input_, self.weight_list[i]) handle = torch.distributed.all_reduce( output_parallel_list[i], group=self.process_group, async_op=True ) handle_list.append(handle) for handle in handle_list: handle.wait() output = torch.cat(output_parallel_list, dim=-1) else: if is_share_sp_tp(self.seq_parallel_mode): output = linear_reducescatter_forward_gather_backward( input_, self.weight, process_group=self.process_group, dim=self.seq_parallel_dim, ring=self.seq_parallel_mode == "ring", use_zbv=self.use_zbv, ) else: output_parallel = F.linear(input_, self.weight) output = reduce_forward(output_parallel, self.process_group, fp8_communication=self.fp8_communication) if not self.skip_bias_add: if self.bias is not None: output = output + self.bias return output else: return output, self.bias class PaddingLMHead(PaddingParallelModule): def __init__( self, in_features: int, out_features: int, bias: bool = True, dtype: torch.dtype = None, device: torch.device = None, weight: Optional[Parameter] = None, bias_: Optional[Parameter] = None, make_vocab_size_divisible_by: int = 64, weight_initializer: Callable = init.kaiming_uniform_(a=math.sqrt(5)), bias_initializer: Callable = init.xavier_uniform_(a=1, scale=1), ): # Keep input parameters self.in_features = in_features self.out_features = out_features if out_features % make_vocab_size_divisible_by != 0: self.out_features = ( out_features + make_vocab_size_divisible_by - (out_features % make_vocab_size_divisible_by) ) if weight is None: factory_kwargs = {"device": device, "dtype": dtype} weight = Parameter(torch.empty(out_features, self.in_features, **factory_kwargs)) else: weight.data = weight.data.to(device=device, dtype=dtype) if bias: if bias_ is None: self.bias = Parameter(torch.empty(out_features, **factory_kwargs)) else: bias_.data = bias_.data.to(device=device, dtype=dtype) else: bias_ = None # resize embeddings super().__init__(self.out_features, out_features, weight, bias_) if weight is None: self.reset_parameters(weight_initializer, bias_initializer) def reset_parameters(self, weight_initializer, bias_initializer) -> None: fan_in, fan_out = self.in_features, self.out_features weight_initializer(self.weight, fan_in=fan_in, fan_out=fan_out) if self.bias is not None: bias_initializer(self.bias, fan_in=fan_in) @staticmethod def from_native_module( module: nn.Linear, process_group: Union[ProcessGroup, List[ProcessGroup]], **kwargs ) -> PaddingParallelModule: r""" Convert a native PyTorch linear layer to a parallelized linear layer. """ LazyInitContext.materialize(module) # get the attributes in_features = module.in_features out_features = module.out_features bias = module.bias is not None device = module.weight.device # ensure only one process group is passed lm_head_linear = PaddingLMHead( in_features=in_features, out_features=out_features, bias=bias, device=device, weight=module.weight, bias_=module.bias, **kwargs, ) return lm_head_linear def forward(self, input: Tensor) -> Tensor: output = F.linear(input, self.weight, self.bias) output = output[..., : self.old_num_embeddings] return output class VocabParallelLMHead1D(Linear1D_Col, PaddingParallelModule): r"""Linear layer with column parallelism. The linear layer is defined as :math:`Y = XA + b`. A is parallelized along its second dimension as :math:`A = [A_1, ..., A_p]`. Args: in_features (int): size of each input sample. out_features (int): size of each output sample. bias (bool, optional): If set to ``False``, the layer will not learn an additive bias, defaults to ``True``. dtype (`torch.dtype`): The dtype of parameters, defaults to None. device (`torch.device`): The device of parameters, defaults to None. process_group (`torch.distributed.ProcessGroup`): The process group to be used for weight sharding and communication, defaults to None. gather_output (bool, optional): If true, call all-gather on output and make Y available to all GPUs, otherwise, every GPU will have its output which is :math:`Y_i = XA_i`, defaults to False seq_parallel (`bool`): If set to ``True``, it will use sequence parallel, defaults to False. skip_bias_add (bool): If set to ``True``, it will skip bias add for linear layer, which is preserved for kernel fusion, defaults to False weight_initializer (`typing.Callable`): The initializer of weight, defaults to kaiming uniform initializer. bias_initializer (`typing.Callable`): The initializer of bias, defaults to xavier uniform initializer. More details about ``initializer`` please refer to `init <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/nn/init.py>`_. """ def __init__( self, in_features: int, out_features: int, bias: bool = True, dtype: torch.dtype = None, device: torch.device = None, process_group: ProcessGroup = None, weight: Optional[Parameter] = None, bias_: Optional[Parameter] = None, make_vocab_size_divisible_by: int = 64, fp8_communication: bool = False, **kwargs, ): # create weight and bias if weight is None: factory_kwargs = {"device": device, "dtype": dtype} weight = Parameter(torch.empty(out_features, self.in_features, **factory_kwargs)) if bias: if bias_ is None: bias_ = Parameter(torch.empty(out_features, **factory_kwargs)) else: bias_ = None # calculate new vocab size self.tensor_parallel_size = dist.get_world_size(group=process_group) new_out_features = out_features multiple = make_vocab_size_divisible_by * self.tensor_parallel_size if out_features % multiple != 0: new_out_features = out_features + multiple - (out_features % multiple) super().__init__( in_features=in_features, out_features=new_out_features, bias=bias, device=device, process_group=process_group, weight=weight, bias_=bias_, **kwargs, new_num_embeddings=new_out_features, old_num_embeddings=out_features, fp8_communication=fp8_communication, ) # get the length of valid embeddings tp_rank = dist.get_rank(process_group) partition_size = self.new_num_embeddings // dist.get_world_size(process_group) if self.old_num_embeddings >= (tp_rank + 1) * partition_size: self.num_valid_embeddings_local = partition_size elif self.old_num_embeddings >= tp_rank * partition_size: self.num_valid_embeddings_local = self.old_num_embeddings - tp_rank * partition_size else: self.num_valid_embeddings_local = 0 @staticmethod def from_native_module( module: nn.Linear, process_group: Union[ProcessGroup, List[ProcessGroup]], **kwargs ) -> PaddingParallelModule: r""" Convert a native PyTorch linear layer to a parallelized linear layer. """ LazyInitContext.materialize(module) # get the attributes in_features = module.in_features out_features = module.out_features bias = module.bias is not None device = module.weight.device lm_head_linear = VocabParallelLMHead1D( in_features=in_features, out_features=out_features, bias=bias, device=device, process_group=process_group, weight=module.weight, bias_=module.bias, **kwargs, ) return lm_head_linear def forward(self, input_: Tensor) -> Tuple[Tensor, Tensor]: # get forward output if self.skip_bias_add: output, bias = super().forward(input_) else: output = super().forward(input_) # delete the padding of output if self.gather_output: output = output[..., : self.old_num_embeddings] else: output = output[..., : self.num_valid_embeddings_local] # return if self.skip_bias_add: return output, bias return output
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/opt.py
colossalai/shardformer/modeling/opt.py
import random from typing import List, Optional, Tuple, Union import torch from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, ) from transformers.models.opt.modeling_opt import ( OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, ) from transformers.utils import logging from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer.layer import ColoAttention from colossalai.shardformer.shard import ShardConfig from ..layer import dist_cross_entropy logger = logging.get_logger(__name__) def _get_attention_mask( self: OPTModel, shard_config: ShardConfig, hidden_states: torch.Tensor, past_key_values_length: int, attention_mask: Optional[torch.FloatTensor], ): batch_size, seq_length = hidden_states.shape[:2] mask_seq_length = past_key_values_length + seq_length if shard_config.enable_flash_attention: attention_mask = ColoAttention.prepare_attn_kwargs( (batch_size, 1, seq_length, mask_seq_length), hidden_states.dtype, hidden_states.device, attention_mask, is_causal=True, ) else: attention_mask = _prepare_4d_causal_attention_mask( attention_mask, (batch_size, seq_length), hidden_states, past_key_values_length, ) return attention_mask class OPTPipelineForwards: """ This class serves as a micro library for forward function substitution of OPT models under pipeline setting. """ @staticmethod def opt_model_forward( self: OPTModel, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: Optional[ShardConfig] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: """ This forward method is modified based on transformers.models.opt.modeling_opt.OPTModel.forward """ from transformers.modeling_outputs import BaseModelOutputWithPast from transformers.utils import logging logger = logging.get_logger(__name__) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict decoder = self.decoder if stage_manager.is_first_stage(): # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") batch_size, seq_length = input_shape if inputs_embeds is None: inputs_embeds = decoder.embed_tokens(input_ids) if decoder.project_in is not None: inputs_embeds = decoder.project_in(inputs_embeds) device = input_ids.device if input_ids is not None else inputs_embeds.device inputs_embeds.dtype hidden_states = inputs_embeds else: if hidden_states is None: raise ValueError("hidden_states shouldn't be None for intermediate stages.") input_shape = hidden_states.size()[:-1] batch_size, seq_length = input_shape[0], input_shape[1] device = hidden_states.device hidden_states.dtype past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 # required mask seq length can be calculated via length of past mask_seq_length = past_key_values_length + seq_length # embed positions if self.decoder.config._attn_implementation == "flash_attention_2": # 2d mask is passed through the layers causal_attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None attention_mask = ( torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) if attention_mask is None else attention_mask ) else: # 4d mask is passed through the layers if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) elif attention_mask.shape[1] != mask_seq_length: raise ValueError( f"The provided attention mask has length {attention_mask.shape[1]}, but its length should be " f"{mask_seq_length} (sum of the lengths of current and past inputs)" ) causal_attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, hidden_states, past_key_values_length ) if stage_manager.is_first_stage(): causal_attention_mask = _get_attention_mask( self, shard_config, inputs_embeds, past_key_values_length, attention_mask, ) pos_embeds = decoder.embed_positions(attention_mask, past_key_values_length) hidden_states = inputs_embeds + pos_embeds else: causal_attention_mask = _get_attention_mask( self, shard_config, hidden_states, past_key_values_length, attention_mask, ) if decoder.gradient_checkpointing and decoder.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # TODO(baizhou): left the recording kv-value tensors as () or None type, this feature may be added in the future. if past_key_values: logger.warning_once("Non-empty past_key_values is not supported for pipeline models at the moment.") past_key_values = None if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False if use_cache: logger.warning_once("use_cache=True is not supported for pipeline models at the moment.") use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None # check if head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask], ["head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(decoder.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(decoder.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) start_idx, end_idx = stage_index[0], stage_index[1] torch.cuda.set_device(device) for idx in range(start_idx, end_idx): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) decoder_layer = decoder.layers[idx] if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if decoder.training and (dropout_probability < decoder.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None if decoder.gradient_checkpointing and decoder.training: layer_outputs = self.decoder._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, causal_attention_mask, head_mask[idx] if head_mask is not None else None, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=causal_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if stage_manager.is_last_stage(): if decoder.final_layer_norm is not None: hidden_states = decoder.final_layer_norm(hidden_states) if decoder.project_out is not None: hidden_states = decoder.project_out(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if stage_manager.is_last_stage(): if not return_dict: return tuple( v for v in [ hidden_states, next_cache, all_hidden_states, all_self_attns, ] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) else: return {"hidden_states": hidden_states} @staticmethod def opt_for_causal_lm_forward( self: OPTForCausalLM, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: Optional[ShardConfig] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" This function is modified on the basis of transformers.models.opt.modeling_opt.OPTForCausalLM.forward. Please refer to original code of transformers for more details. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = OPTPipelineForwards.opt_model_forward( self.model, input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) if stage_manager.is_last_stage(): logits = self.lm_head(outputs[0]).contiguous() loss = None if labels is not None: loss = dist_cross_entropy( labels, logits, shard_config, self.lm_head.out_features, self.model.decoder.dtype, ) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: hidden_states = outputs.get("hidden_states") return {"hidden_states": hidden_states} @staticmethod def opt_for_sequence_classification_forward( self: OPTForSequenceClassification, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: Optional[ShardConfig] = None, ) -> Union[Tuple, SequenceClassifierOutputWithPast]: r""" This function is modified on the basis of transformers.models.opt.modeling_opt.OPTForSequenceClassification.forward. Please refer to original code of transformers for more details. """ logger = logging.get_logger(__name__) return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = OPTPipelineForwards.opt_model_forward( self.model, input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) if stage_manager.is_last_stage(): hidden_states = transformer_outputs[0] logits = self.score(hidden_states) batch_size = input_ids.shape[0] if input_ids is not None else hidden_states.shape[0] if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device) else: sequence_lengths = -1 logger.warning( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: hidden_states = transformer_outputs.get("hidden_states") return {"hidden_states": hidden_states} @staticmethod def opt_for_question_answering_forward( self: OPTForQuestionAnswering, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: Optional[ShardConfig] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" This function is modified on the basis of transformers.models.opt.modeling_opt.OPTForQuestionAnswering.forward. Please refer to original code of transformers for more details. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = OPTPipelineForwards.opt_model_forward( self.model, input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) if stage_manager.is_last_stage(): hidden_states = transformer_outputs[0] logits = self.qa_outputs(hidden_states) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + transformer_outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: hidden_states = transformer_outputs.get("hidden_states") return {"hidden_states": hidden_states} def get_opt_flash_attention_forward(shard_config: ShardConfig): from transformers.models.opt.modeling_opt import OPTAttention def _shape(tensor: torch.Tensor, seq_len: int, bsz: int, num_heads: int, head_dim: int): return tensor.view(bsz, seq_len, num_heads, head_dim).transpose(1, 2).contiguous() def forward( self: OPTAttention, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[dict] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" assert layer_head_mask is None, "layer_head_mask is not supported for FlashAttention" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = _shape(self.k_proj(key_value_states), -1, bsz, self.num_heads, self.head_dim) value_states = _shape(self.v_proj(key_value_states), -1, bsz, self.num_heads, self.head_dim) elif past_key_value is not None: # reuse k, v, self_attention key_states = _shape(self.k_proj(hidden_states), -1, bsz, self.num_heads, self.head_dim) value_states = _shape(self.v_proj(hidden_states), -1, bsz, self.num_heads, self.head_dim) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = _shape(self.k_proj(hidden_states), -1, bsz, self.num_heads, self.head_dim) value_states = _shape(self.v_proj(hidden_states), -1, bsz, self.num_heads, self.head_dim) query_states = _shape(query_states, tgt_len, bsz, self.num_heads, self.head_dim) dropout_p = self.dropout if self.training else 0.0 attn_output = ColoAttention.attention( query_states, key_states, value_states, **attention_mask, dropout_p=dropout_p, scale=self.scaling, ) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned aross GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, None, past_key_value return forward def get_opt_decoder_forward_for_flash_attention(shard_config: ShardConfig): from transformers.models.opt.modeling_opt import OPTDecoder def forward( self: OPTDecoder, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, position_ids: Optional[torch.LongTensor] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 # required mask seq length can be calculated via length of past mask_seq_length = past_key_values_length + seq_length # embed positions if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device) elif attention_mask.shape[1] != mask_seq_length: raise ValueError( f"The provided attention mask has length {attention_mask.shape[1]}, but its length should be " f"{mask_seq_length} (sum of the lengths of current and past inputs)" ) causal_attention_mask = _get_attention_mask( self, shard_config, inputs_embeds, past_key_values_length, attention_mask ) pos_embeds = self.embed_positions(attention_mask, past_key_values_length) if self.project_in is not None: inputs_embeds = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None # check if head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask], ["head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, None) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, causal_attention_mask, head_mask[idx] if head_mask is not None else None, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=causal_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if self.final_layer_norm is not None: hidden_states = self.final_layer_norm(hidden_states) if self.project_out is not None: hidden_states = self.project_out(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict:
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/qwen2.py
colossalai/shardformer/modeling/qwen2.py
import math from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.modeling_attn_mask_utils import ( _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa, ) from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast, ) from transformers.models.qwen2.modeling_qwen2 import ( Qwen2Attention, Qwen2ForCausalLM, Qwen2ForSequenceClassification, Qwen2Model, apply_rotary_pos_emb, repeat_kv, ) from transformers.utils import logging from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer.layer._operation import all_to_all_comm, split_forward_gather_backward from colossalai.shardformer.shard import ShardConfig from ..layer import ColoAttention, dist_cross_entropy from ..layer._operation import gather_sp_output from ..layer.utils import is_share_sp_tp class Qwen2PipelineForwards: """ This class serves as a micro library for forward function substitution of Qwen2 models under pipeline setting. """ @staticmethod def qwen2_model_forward( self: Qwen2Model, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, force_sp_output_gather: bool = True, ) -> Union[Tuple, BaseModelOutputWithPast]: logger = logging.get_logger(__name__) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if stage_manager.is_first_stage(): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds else: input_shape = hidden_states.shape[:-1] batch_size, seq_length = input_shape device = hidden_states.device seq_length_with_past = seq_length past_key_values_length = 0 # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False if use_cache: logger.warning_once("use_cache=True is not supported for pipeline models at the moment.") use_cache = False # assert past_key_values is None, "past_key_values is not supported for Qwen2 models at the moment." if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length # Support SP + PP sp_size = shard_config.sequence_parallel_size sp_group = shard_config.sequence_parallel_process_group sp_mode = shard_config.sequence_parallelism_mode # For generating full positions ids (the states will be gathered along the seq dim before attention fwd). if sp_mode != "ring_attn" and not stage_manager.is_first_stage(): seq_length *= sp_size if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() # embed positions, for the first stage, hidden_states is the input embeddings, # for the other stages, hidden_states is the output of the previous stage if shard_config.enable_flash_attention: # in this case, attention_mask is a dict rather than a tensor mask_shape = (batch_size, 1, seq_length, seq_length_with_past) attention_mask = ColoAttention.prepare_attn_kwargs( mask_shape, hidden_states.dtype, hidden_states.device, q_padding_mask=attention_mask, is_causal=True, ) else: if self.config._attn_implementation == "flash_attention_2": # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None elif self.config._attn_implementation == "sdpa" and not output_attentions: # output_attentions=True can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, (batch_size, seq_length), hidden_states, past_key_values_length, ) else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask( attention_mask, (batch_size, seq_length), hidden_states, past_key_values_length, sliding_window=self.config.sliding_window, ) if stage_manager.is_first_stage(): if shard_config.enable_sequence_parallelism: if is_share_sp_tp(sp_mode): hidden_states = split_forward_gather_backward( hidden_states, dim=1, process_group=sp_group, ) elif sp_mode == "all_to_all": hidden_states = split_forward_gather_backward( hidden_states, dim=1, process_group=sp_group, grad_scale=1 / sp_size, ) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None position_embeddings = self.rotary_emb(hidden_states, position_ids) start_idx, end_idx = stage_index[0], stage_index[1] num_ckpt_layers = 0 if self.gradient_checkpointing and self.training: num_ckpt_layers = end_idx - start_idx # TODO: We can replace `gradient_checkpointing_enable` fn and initialize a gradient_checkpointing (List[bool]) for each layer if shard_config.gradient_checkpoint_config is not None: num_ckpt_layers = shard_config.gradient_checkpoint_config.get_num_ckpt_layers( stage=stage_manager.stage, num_stages=stage_manager.num_stages, num_layers=end_idx - start_idx, model_chunk_id=(stage_manager.model_chunk_id if stage_manager.is_interleave else 0), num_model_chunks=stage_manager.num_model_chunks, ) assert num_ckpt_layers <= end_idx - start_idx for idx, decoder_layer in enumerate(self.layers[start_idx:end_idx], start=start_idx): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_values[idx] if past_key_values is not None else None if idx - start_idx < num_ckpt_layers: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if stage_manager.is_last_stage(): hidden_states = self.norm(hidden_states) if shard_config.enable_sequence_parallelism: if (not shard_config.parallel_output) or force_sp_output_gather or is_share_sp_tp(sp_mode): hidden_states = gather_sp_output(hidden_states, shard_config) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if stage_manager.is_last_stage(): if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) # always return dict for imediate stage return {"hidden_states": hidden_states} @staticmethod def qwen2_for_causal_lm_forward( self: Qwen2ForCausalLM, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, **kwargs, ): r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, Qwen2ForCausalLM >>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) >>> prompt = "Hey, are you consciours? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." ```""" logger = logging.get_logger(__name__) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = Qwen2PipelineForwards.qwen2_model_forward( self.model, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, force_sp_output_gather=False, ) past_key_values = None if stage_manager.is_last_stage(): hidden_states = outputs[0] if hidden_states.shape[1] == 2: pass logits = self.lm_head(hidden_states) loss = None if labels is not None: loss = dist_cross_entropy(labels, logits, shard_config, self.lm_head.out_features, logits.dtype) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: hidden_states = outputs.get("hidden_states") return {"hidden_states": hidden_states} @staticmethod def qwen2_for_sequence_classification_forward( self: Qwen2ForSequenceClassification, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ): r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ logger = logging.get_logger(__name__) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False transformer_outputs = Qwen2PipelineForwards.qwen2_model_forward( self.model, input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) if input_ids is not None: batch_size = input_ids.shape[0] elif inputs_embeds is not None: batch_size = inputs_embeds.shape[0] else: batch_size = hidden_states.shape[0] if stage_manager.is_last_stage(): hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device) else: sequence_lengths = -1 pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: hidden_states = transformer_outputs.get("hidden_states") return {"hidden_states": hidden_states} def get_qwen2_flash_attention_forward(shard_config: ShardConfig, sp_mode=None, sp_size=None, sp_group=None): def forward( self: Qwen2Attention, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if sp_mode is not None: assert sp_mode in ["all_to_all", "split_gather", "ring"], "Invalid sp_mode" assert (sp_size is not None) and ( sp_group is not None ), "Must specify sp_size and sp_group for sequence parallel" bsz, q_len, _ = hidden_states.size() # sp: modify sp_len when sequence parallel mode is ring if sp_mode in ["split_gather", "ring"]: q_len *= sp_size query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # sp: all-to-all comminucation when introducing sequence parallel if sp_mode == "all_to_all": query_states = all_to_all_comm(query_states, sp_group, fp8_communication=shard_config.fp8_communication) key_states = all_to_all_comm(key_states, sp_group, fp8_communication=shard_config.fp8_communication) value_states = all_to_all_comm(value_states, sp_group, fp8_communication=shard_config.fp8_communication) bsz, q_len, _ = query_states.size() query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: if self.layer_idx is None: raise ValueError( f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " "with a layer index." ) kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) # Because the input can be padded, the absolute sequence length depends on the max position id. cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # Activate slicing cache only if the config has a value `sliding_windows` attribute cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0 if ( getattr(self.config, "sliding_window", None) is not None and kv_seq_len > self.config.sliding_window and cache_has_contents ): slicing_tokens = 1 - self.config.sliding_window past_key = past_key_value[self.layer_idx][0] past_value = past_key_value[self.layer_idx][1] past_key = past_key[:, :, slicing_tokens:, :].contiguous() past_value = past_value[:, :, slicing_tokens:, :].contiguous() if past_key.shape[-2] != self.config.sliding_window - 1: raise ValueError( f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got" f" {past_key.shape}" ) if attention_mask is not None: attention_mask = attention_mask[:, slicing_tokens:] attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) if shard_config.enable_flash_attention: assert isinstance(attention_mask, dict), "Flash Attention Error: attention_mask should be a dict." attn_output = ColoAttention.attention(query_states, key_states, value_states, **attention_mask) else: attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() if sp_mode == "all_to_all": attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim) attn_output = all_to_all_comm( attn_output, sp_group, scatter_dim=1, gather_dim=2, fp8_communication=shard_config.fp8_communication ) else: attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.o_proj(attn_output) return attn_output, None return forward def get_qwen2_model_forward_for_flash_attn(shard_config: ShardConfig, sp_mode=None, sp_size=None, sp_group=None): logger = logging.get_logger(__name__) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, force_sp_output_gather: bool = True, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() # embed positions hidden_states = inputs_embeds if shard_config.enable_flash_attention: # in this case, attention_mask is a dict rather than a tensor mask_shape = (batch_size, 1, seq_length, seq_length_with_past) attention_mask = ColoAttention.prepare_attn_kwargs( mask_shape, hidden_states.dtype, hidden_states.device, q_padding_mask=attention_mask, is_causal=True, ) else: attention_mask = _prepare_4d_causal_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, sliding_window=self.config.sliding_window, ) if (self.gradient_checkpointing or sp_mode in ["ring", "all_to_all"]) and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None position_embeddings = self.rotary_emb(hidden_states, position_ids) if sp_mode in ["ring", "split_gather"]: hidden_states = split_forward_gather_backward( hidden_states, 1, sp_group, fp8_communication=shard_config.fp8_communication ) elif sp_mode == "all_to_all": hidden_states = split_forward_gather_backward( hidden_states, 1, sp_group, 1 / sp_size, fp8_communication=shard_config.fp8_communication ) for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training:
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/deepseek.py
colossalai/shardformer/modeling/deepseek.py
import warnings from typing import List, Optional, Tuple, Union import torch import torch.distributed as dist import torch.functional as F from torch.distributed import ProcessGroup from torch.nn import CrossEntropyLoss from transformers.cache_utils import Cache, DynamicCache from transformers.modeling_attn_mask_utils import ( _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa, ) from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from transformers.models.llama.modeling_llama import apply_rotary_pos_emb from transformers.utils import is_flash_attn_2_available, logging from colossalai.lazy import LazyInitContext from colossalai.moe._operation import ( DPGradScalerIn, DPGradScalerOut, EPGradScalerIn, EPGradScalerOut, all_to_all_uneven, ) from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.quantization.fp8 import all_reduce_fp8 from colossalai.shardformer.layer._operation import ( all_to_all_comm, gather_forward_split_backward, linear_with_async_comm, split_forward_gather_backward, ) from colossalai.shardformer.layer.linear import Linear1D_Col, Linear1D_Row, ParallelModule from colossalai.shardformer.shard import ShardConfig from colossalai.shardformer.shard.utils import set_tensors_to_none from colossalai.tensor.d_tensor.api import shard_rowwise, sharded_tensor_to_existing_param from colossalai.tensor.moe_tensor.api import set_moe_tensor_ep_group # copied from modeling_deepseek.py class AddAuxiliaryLoss(torch.autograd.Function): """ The trick function of adding auxiliary (aux) loss, which includes the gradient of the aux loss during backpropagation. """ @staticmethod def forward(ctx, x, loss): assert loss.numel() == 1 ctx.dtype = loss.dtype ctx.required_aux_loss = loss.requires_grad return x @staticmethod def backward(ctx, grad_output): grad_loss = None if ctx.required_aux_loss: grad_loss = torch.ones(1, dtype=ctx.dtype, device=grad_output.device) return grad_output, grad_loss class EPDeepseekMoE(ParallelModule): def __init__(self): raise RuntimeError(f"Please use `from_native_module` to create an instance of {self.__class__.__name__}") def setup_process_groups( self, tp_group: ProcessGroup, moe_dp_group: ProcessGroup, ep_group: ProcessGroup, fp8_communication: bool = False, ): assert tp_group is not None assert moe_dp_group is not None assert ep_group is not None self.ep_size = dist.get_world_size(ep_group) self.ep_rank = dist.get_rank(ep_group) self.num_experts = self.config.n_routed_experts assert self.num_experts % self.ep_size == 0 self.fp8_communication = fp8_communication self.ep_group = ep_group self.num_experts_per_ep = self.num_experts // self.ep_size self.expert_start_idx = self.ep_rank * self.num_experts_per_ep held_experts = self.experts[self.expert_start_idx : self.expert_start_idx + self.num_experts_per_ep] set_tensors_to_none(self.experts, exclude=set(held_experts)) # setup moe_dp group self.moe_dp_group = moe_dp_group self.moe_dp_size = moe_dp_group.size() # setup tp group self.tp_group = tp_group if self.tp_group.size() > 1: for expert in held_experts: expert.gate_proj = Linear1D_Col.from_native_module( expert.gate_proj, self.tp_group, fp8_communication=self.fp8_communication ) expert.up_proj = Linear1D_Col.from_native_module( expert.up_proj, self.tp_group, fp8_communication=self.fp8_communication ) expert.down_proj = Linear1D_Row.from_native_module( expert.down_proj, self.tp_group, fp8_communication=self.fp8_communication ) for p in self.experts.parameters(): set_moe_tensor_ep_group(p, ep_group) if self.config.n_shared_experts is not None: self.shared_experts.gate_proj = Linear1D_Col.from_native_module( self.shared_experts.gate_proj, self.tp_group, fp8_communication=self.fp8_communication ) self.shared_experts.up_proj = Linear1D_Col.from_native_module( self.shared_experts.up_proj, self.tp_group, fp8_communication=self.fp8_communication ) self.shared_experts.down_proj = Linear1D_Row.from_native_module( self.shared_experts.down_proj, self.tp_group, fp8_communication=self.fp8_communication ) @staticmethod def from_native_module( module, tp_group: ProcessGroup, moe_dp_group: ProcessGroup, ep_group: ProcessGroup, *args, **kwargs, ) -> "EPDeepseekMoE": LazyInitContext.materialize(module) if module.__class__.__name__ == "DeepseekMLP": return module module.__class__ = EPDeepseekMoE fp8_communication = kwargs.get("fp8_communication", False) module.setup_process_groups(tp_group, moe_dp_group, ep_group, fp8_communication=fp8_communication) return module def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: identity = hidden_states orig_shape = hidden_states.shape topk_experts_idx, topk_experts_weight, aux_loss = self.gate(hidden_states) hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) # [t0, t1, t2 ...] hidden_states = hidden_states.repeat_interleave( self.num_experts_per_tok, dim=0 ) # after repeat_interleave: [t0 t0 t1 t1 t2 t2 ... ] flat_topk_experts_idx = topk_experts_idx.view(-1) # [e0 e1 e2 ...] # The elements of flat_topk_token_idx are token ids, which are arranged in ascending order of expert ids. flat_topk_token_idx = flat_topk_experts_idx.argsort() # Now we adjust the order of the hidden states, also in ascending order of expert id dispatch_states = hidden_states[flat_topk_token_idx] input_split_sizes = flat_topk_experts_idx.bincount(minlength=self.num_experts) # [n0, n1, n2, n3] output_split_sizes = torch.zeros_like(input_split_sizes) # [n0, n1, n2, n3] [m0, m1, m2, m3] -> [n0, n1, m0, m1] [n2, n3, m2, m3] dist.all_to_all_single( output_split_sizes, input_split_sizes, group=self.ep_group, ) with torch.no_grad(): activate_experts = output_split_sizes[: self.num_experts_per_ep].clone() for i in range(1, self.ep_size): activate_experts += output_split_sizes[i * self.num_experts_per_ep : (i + 1) * self.num_experts_per_ep] activate_experts = (activate_experts > 0).float() if self.fp8_communication: all_reduce_fp8(activate_experts, group=self.moe_dp_group) else: dist.all_reduce(activate_experts, group=self.moe_dp_group) input_split_list = input_split_sizes.view(self.ep_size, self.num_experts_per_ep).sum(dim=-1).tolist() output_split_list = output_split_sizes.view(self.ep_size, self.num_experts_per_ep).sum(dim=-1).tolist() output_states, _ = all_to_all_uneven( dispatch_states, input_split_list, output_split_list, self.ep_group, fp8_communication=self.fp8_communication, ) output_states = EPGradScalerIn.apply(output_states, self.ep_size) if output_states.size(0) > 0: if self.num_experts_per_ep == 1: expert = self.experts[self.expert_start_idx] output_states = DPGradScalerIn.apply(output_states, self.moe_dp_size, activate_experts[0]) output_states = expert(output_states) output_states = DPGradScalerOut.apply(output_states, self.moe_dp_size, activate_experts[0]) else: output_states_splits = output_states.split(output_split_sizes.tolist()) output_states_list = [] for i, split_states in enumerate(output_states_splits): if split_states.size(0) == 0: # no token routed to this experts continue expert = self.experts[self.expert_start_idx + i % self.num_experts_per_ep] split_states = DPGradScalerIn.apply( split_states, self.moe_dp_size, activate_experts[i % self.num_experts_per_ep] ) split_states = expert(split_states) split_states = DPGradScalerOut.apply( split_states, self.moe_dp_size, activate_experts[i % self.num_experts_per_ep] ) output_states_list.append(split_states) output_states = torch.cat(output_states_list) output_states = EPGradScalerOut.apply(output_states, self.ep_size) dispatch_states, _ = all_to_all_uneven( output_states, output_split_list, input_split_list, self.ep_group, fp8_communication=self.fp8_communication ) recover_token_idx = torch.empty_like(flat_topk_token_idx) recover_token_idx[flat_topk_token_idx] = torch.arange( flat_topk_token_idx.size(0), device=flat_topk_token_idx.device ) output_hidden_states = dispatch_states[recover_token_idx] # t0 t0 t1 t1 t2 t2 output_hidden_states = output_hidden_states.view(-1, self.num_experts_per_tok, orig_shape[-1]) output_hidden_states = (output_hidden_states * topk_experts_weight[:, :, None]).sum(dim=-2) # (B*S, h) output_hidden_states = output_hidden_states.view(*orig_shape) output_hidden_states = AddAuxiliaryLoss.apply(output_hidden_states, aux_loss) if self.config.n_shared_experts is not None: output_hidden_states = output_hidden_states + self.shared_experts(identity) return output_hidden_states class DeepseekMoEGate_Col(ParallelModule): def parallel_linear(self, hidden_states): assert ( hidden_states.shape[-1] == self.weight.shape[-1] ), "Invalid shapes in Linear1D_Col forward: input={}, weight={}. Expected last dim of input {}.".format( hidden_states.shape, self.weight.shape, self.weight.shape[-1] ) output = linear_with_async_comm( hidden_states, self.weight, None, self.process_group, True, fp8_communication=self.fp8_communication ) # All-gather across the partitions. output = gather_forward_split_backward( output, dim=-1, process_group=self.process_group, fp8_communication=self.fp8_communication ) return output def forward(self, hidden_states): bsz, seq_len, h = hidden_states.shape ### compute gating score hidden_states = hidden_states.view(-1, h) logits = self.parallel_linear(hidden_states) if self.scoring_func == "softmax": scores = logits.softmax(dim=-1) else: raise NotImplementedError(f"insupportable scoring function for MoE gating: {self.scoring_func}") ### select top-k experts topk_weight, topk_idx = torch.topk(scores, k=self.top_k, dim=-1, sorted=False) ### norm gate to sum 1 if self.top_k > 1 and self.norm_topk_prob: denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20 topk_weight = topk_weight / denominator ### expert-level computation auxiliary loss if self.training and self.alpha > 0.0: scores_for_aux = scores aux_topk = self.top_k # always compute aux loss based on the naive greedy topk method topk_idx_for_aux_loss = topk_idx.view(bsz, -1) if self.seq_aux: scores_for_seq_aux = scores_for_aux.view(bsz, seq_len, -1) ce = torch.zeros(bsz, self.n_routed_experts, device=hidden_states.device) ce.scatter_add_( 1, topk_idx_for_aux_loss, torch.ones(bsz, seq_len * aux_topk, device=hidden_states.device) ).div_(seq_len * aux_topk / self.n_routed_experts) aux_loss = (ce * scores_for_seq_aux.mean(dim=1)).sum(dim=1).mean() * self.alpha else: mask_ce = F.one_hot(topk_idx_for_aux_loss.view(-1), num_classes=self.n_routed_experts) ce = mask_ce.float().mean(0) Pi = scores_for_aux.mean(0) fi = ce * self.n_routed_experts aux_loss = (Pi * fi).sum() * self.alpha else: aux_loss = None return topk_idx, topk_weight, aux_loss @staticmethod def from_native_module( module, process_group: ProcessGroup, config, gather_output, fp8_communication ) -> "DeepseekMoEGate_Col": LazyInitContext.materialize(module) module.process_group = process_group module.fp8_communication = fp8_communication sharded_weight = shard_rowwise(module.weight.data, process_group) sharded_tensor_to_existing_param(sharded_weight, module.weight) module.__class__ = DeepseekMoEGate_Col return module class DeepseekPipelineForwards: """ This class serves as a micro library for forward function substitution of Llama models under pipeline setting. """ @staticmethod def deepseek_model_forward( self: "DeepseekModel", input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ): r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, AutoModelForCausalLM >>> model = AutoModelForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" logger = logging.get_logger(__name__) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if stage_manager.is_first_stage(): # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds else: input_shape = hidden_states.shape[:-1] batch_size, seq_length = input_shape device = hidden_states.device seq_length_with_past = seq_length past_key_values_length = 0 # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False if use_cache: logger.warning_once("use_cache=True is not supported for pipeline models at the moment.") use_cache = False if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if position_ids is None: position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device, ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() # embed positions, for the first stage, hidden_states is the input embeddings, # for the other stages, hidden_states is the output of the previous stage if is_flash_attn_2_available(): # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask( attention_mask, (batch_size, seq_length), hidden_states, past_key_values_length, sliding_window=self.config.sliding_window, ) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None start_idx, end_idx = stage_index[0], stage_index[1] for idx, decoder_layer in enumerate(self.layers[start_idx:end_idx], start=start_idx): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, attention_mask, position_ids, None, output_attentions, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask, position_ids, past_key_value, output_attentions, use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if stage_manager.is_last_stage(): hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if stage_manager.is_last_stage(): if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) # always return dict for imediate stage return { "hidden_states": hidden_states, } @staticmethod def deepseek_for_causal_lm_forward( self: "DeepseekForCausalLM", input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ): r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, MixtralForCausalLM >>> model = DeepseekForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" logger = logging.get_logger(__name__) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = DeepseekPipelineForwards.deepseek_model_forward( self.model, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, ) past_key_values = None if stage_manager.is_last_stage(): hidden_states = outputs[0] logits = self.lm_head(hidden_states) logits = logits.float() loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=None, hidden_states=outputs[0], attentions=None, ) else: out = {} hidden_states = outputs.get("hidden_states") out["hidden_states"] = hidden_states return out def get_deepseek_flash_attention_forward(shard_config, sp_mode=None, sp_size=None, sp_group=None): logger = logging.get_logger(__name__) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if sp_mode is not None: assert sp_mode in ["all_to_all", "split_gather", "ring"], "Invalid sp_mode" assert (sp_size is not None) and ( sp_group is not None ), "Must specify sp_size and sp_group for sequence parallel" # DeepseekFlashAttention2 attention does not support output_attentions if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" ) # overwrite attention_mask with padding_mask attention_mask = kwargs.pop("padding_mask") output_attentions = False bsz, q_len, _ = hidden_states.size() # sp: modify sp_len when sequence parallel mode is ring if sp_mode in ["split_gather", "ring"]: q_len *= sp_size query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # sp: all-to-all comminucation when introducing sequence parallel if sp_mode == "all_to_all": query_states = all_to_all_comm(query_states, sp_group, fp8_communication=shard_config.fp8_communication) key_states = all_to_all_comm(key_states, sp_group, fp8_communication=shard_config.fp8_communication) value_states = all_to_all_comm(value_states, sp_group, fp8_communication=shard_config.fp8_communication) bsz, q_len, _ = query_states.size() # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb( query_states, key_states, cos, sin, position_ids, unsqueeze_dim=0 ) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (DeepseekRMSNorm handles it correctly) input_dtype = query_states.dtype if input_dtype == torch.float32: # Handle the case where the model is quantized if hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype elif torch.is_autocast_enabled(): target_dtype = torch.get_autocast_gpu_dtype() else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = self._flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate ) # sp: all-to-all comminucation when introducing sequence parallel if sp_mode == "all_to_all": attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim).contiguous() # (1, 8, 128) attn_output = all_to_all_comm( attn_output, sp_group, scatter_dim=1, gather_dim=2, fp8_communication=shard_config.fp8_communication ) # (1, 4, 256) else: attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/bert.py
colossalai/shardformer/modeling/bert.py
import warnings from typing import List, Optional, Tuple, Union import torch from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.modeling_outputs import ( BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from transformers.models.bert.modeling_bert import ( BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForPreTrainingOutput, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLMHeadModel, BertModel, ) from transformers.utils import logging from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer import ShardConfig from colossalai.shardformer.layer._operation import gather_forward_split_backward, split_forward_gather_backward class BertPipelineForwards: """ This class serves as a micro library for forward function substitution of Bert models under pipeline setting. """ @staticmethod def bert_model_forward( self: BertModel, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, # this is from the previous stage stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: # TODO(jianghai): add explaination of the output here. r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ logger = logging.get_logger(__name__) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if stage_manager.is_first_stage(): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) else: input_shape = hidden_states.size()[:-1] batch_size, seq_length = input_shape device = hidden_states.device # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False if use_cache: logger.warning_once("use_cache=True is not supported for pipeline models at the moment.") use_cache = False # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) attention_mask = extended_attention_mask # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) hidden_states = hidden_states if hidden_states is not None else None if stage_manager.is_first_stage(): hidden_states = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) # inherit from bert_layer,this should be changed when we add the feature to record hidden_states all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.encoder.gradient_checkpointing and self.encoder.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None start_idx, end_idx = stage_index[0], stage_index[1] # layer_outputs layer_outputs = hidden_states if hidden_states is not None else None # split the input tensor along sequence dimension # [batch_size, seq_len, hidden_size] -> [batch_size, seq_len/TP_size, hidden_size] if shard_config is not None and shard_config.enable_sequence_parallelism: if shard_config.sequence_parallelism_mode == "split_gather": hidden_states = split_forward_gather_backward( hidden_states, dim=1, process_group=shard_config.tensor_parallel_process_group, fp8_communication=shard_config.fp8_communication, ) if encoder_hidden_states is not None: encoder_hidden_states = split_forward_gather_backward( encoder_hidden_states, dim=1, process_group=shard_config.tensor_parallel_process_group, fp8_communication=shard_config.fp8_communication, ) for idx, encoder_layer in enumerate(self.encoder.layer[start_idx:end_idx], start=start_idx): if stage_manager.is_first_stage() and idx == 0: encoder_attention_mask = encoder_extended_attention_mask if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[idx] if head_mask is not None else None past_key_value = past_key_values[idx] if past_key_values is not None else None if self.encoder.gradient_checkpointing and self.encoder.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) # When sequence parallelism done, gather the output tensor in forward and split it in backward if shard_config is not None and shard_config.enable_sequence_parallelism: if shard_config.sequence_parallelism_mode == "split_gather": hidden_states = gather_forward_split_backward( hidden_states, dim=1, process_group=shard_config.tensor_parallel_process_group, fp8_communication=shard_config.fp8_communication, ) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # end of a stage loop sequence_output = hidden_states if hidden_states is not None else None if stage_manager.is_last_stage(): pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + layer_outputs[1:] # return dict is not supported at this moment else: return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # output of non-first and non-last stages: must be a dict else: # intermediate stage always return dict return { "hidden_states": hidden_states, } @staticmethod def bert_for_pretraining_forward( self: BertForPreTraining, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, next_sentence_label: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_manager: Optional[PipelineStageManager] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ): logger = logging.get_logger(__name__) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TODO(jianghai) left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False outputs = BertPipelineForwards.bert_model_forward( self.bert, input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states if hidden_states is not None else None, stage_index=stage_index, shard_config=shard_config, ) if stage_manager.is_last_stage(): sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) # the last stage for pretraining model total_loss = None if labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return BertForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: hidden_states = outputs.get("hidden_states") # intermediate stage always return dict return { "hidden_states": hidden_states, } @staticmethod def bert_lm_head_model_forward( self: BertLMHeadModel, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.Tensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_manager: Optional[PipelineStageManager] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ): r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ logger = logging.get_logger(__name__) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False outputs = BertPipelineForwards.bert_model_forward( self.bert, input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states if hidden_states is not None else None, stage_index=stage_index, shard_config=shard_config, ) past_key_values = None if stage_manager.is_last_stage(): sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) else: hidden_states = outputs.get("hidden_states") # intermediate stage always return dict return {"hidden_states": hidden_states} @staticmethod def bert_for_masked_lm_forward( self: BertForMaskedLM, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, hidden_states: Optional[torch.Tensor] = None, stage_manager: Optional[PipelineStageManager] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ): r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ logger = logging.get_logger(__name__) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False outputs = BertPipelineForwards.bert_model_forward( self.bert, input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, hidden_states=hidden_states, stage_manager=stage_manager, stage_index=stage_index, shard_config=shard_config, ) if stage_manager.is_last_stage(): sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: hidden_states = outputs.get("hidden_states") return {"hidden_states": hidden_states} @staticmethod def bert_for_next_sentence_prediction_forward( self: BertForNextSentencePrediction, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, hidden_states: Optional[torch.Tensor] = None, stage_manager: Optional[PipelineStageManager] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, **kwargs, ): # -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring). Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Returns: Example: ```python >>> from transformers import AutoTokenizer, BertForNextSentencePrediction >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") >>> model = BertForNextSentencePrediction.from_pretrained("bert-base-uncased") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt") >>> outputs = model(**encoding, labels=torch.LongTensor([1])) >>> logits = outputs.logits >>> assert logits[0, 0] < logits[0, 1] # next sentence was random ``` """ logger = logging.get_logger(__name__) if "next_sentence_label" in kwargs: warnings.warn( "The `next_sentence_label` argument is deprecated and will be removed in a future version, use" " `labels` instead.", FutureWarning, ) labels = kwargs.pop("next_sentence_label") return_dict = return_dict if return_dict is not None else self.config.use_return_dict if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False outputs = BertPipelineForwards.bert_model_forward( self.bert, input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, hidden_states=hidden_states, stage_manager=stage_manager, stage_index=stage_index, shard_config=shard_config, ) if stage_manager.is_last_stage(): pooled_output = outputs[1] seq_relationship_scores = self.cls(pooled_output) next_sentence_loss = None if labels is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) if not return_dict: output = (seq_relationship_scores,) + outputs[2:] return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output return NextSentencePredictorOutput( loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: hidden_states = outputs.get("hidden_states") # intermediate stage always return dict return {"hidden_states": hidden_states} @staticmethod def bert_for_sequence_classification_forward( self: BertForSequenceClassification, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, hidden_states: Optional[torch.Tensor] = None, stage_manager: Optional[PipelineStageManager] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ): r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/t5.py
colossalai/shardformer/modeling/t5.py
import warnings from typing import Dict, List, Optional, Tuple, Union import torch from torch.nn import CrossEntropyLoss from transformers.modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, TokenClassifierOutput, ) from transformers.models.t5.modeling_t5 import ( T5EncoderModel, T5ForConditionalGeneration, T5ForTokenClassification, T5Model, T5Stack, ) from transformers.utils import logging from colossalai.pipeline.stage_manager import PipelineStageManager class T5PipelineForwards: """ This class serves as a micro library for forward function substitution of T5 models under pipeline setting. """ @staticmethod def t5_stack_forward( self: T5Stack, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = None, cache_position=None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, position_bias: Optional[torch.Tensor] = None, encoder_decoder_position_bias: Optional[torch.Tensor] = None, stage_index: Optional[List[int]] = None, decoder_starting_stage: Optional[int] = None, ) -> Union[Dict, Tuple, BaseModelOutputWithPastAndCrossAttentions]: # This function is modified on the basis of transformers.models.t5.modeling_t5.T5Stack.forward. # Please refer to original code of transformers for more details. logger = logging.get_logger(__name__) # TODO(baizhou): left the recording kv-value tensors as () or None type, this feature may be added in the future. if past_key_values: logger.warning_once("Non-empty past_key_values is not supported for pipeline models at the moment.") past_key_values = None if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False if use_cache: logger.warning_once("use_cache=True is not supported for pipeline models at the moment.") use_cache = False stage = stage_manager.stage in_decoder = self.is_decoder if in_decoder != (stage >= decoder_starting_stage): raise ValueError("Config in T5Stack is not aligned with pipeline setting.") # at_first_stage: current stage is the first stage of encoder/decoder, taking input_ids/input_embeds # at_last_stage: current stage is the last stage of encoder/decoder, making outputs the same form as huggingface at_first_stage = (stage == 0) or (stage == decoder_starting_stage) at_last_stage = (stage == decoder_starting_stage - 1) or (stage == stage_manager.num_stages - 1) # Process inputs if at the first stage of encoder/decoder. if at_first_stage: if input_ids is not None and inputs_embeds is not None: err_msg_prefix = "decoder_" if in_decoder else "" raise ValueError( f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: err_msg_prefix = "decoder_" if in_decoder else "" raise ValueError( f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds" ) if inputs_embeds is None: if self.embed_tokens is None: raise ValueError("You have to initialize the model with valid token embeddings") inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape device = inputs_embeds.device hidden_states = self.dropout(inputs_embeds) else: if hidden_states is None: raise ValueError( "hidden_states shouldn't be None for stages other than the first stage of encoder/decoder." ) input_shape = hidden_states.size()[:-1] batch_size, seq_length = input_shape[0], input_shape[1] device = hidden_states.device # required mask seq length can be calculated via length of past mask_seq_length = seq_length # initialize past_key_values with `None` if past does not exist if past_key_values is None: past_key_values = [None] * len(self.block) past_key_values_length = 0 if cache_position is None: cache_position = torch.arange( past_key_values_length, past_key_values_length + seq_length, device=hidden_states.device ) if attention_mask is None: attention_mask = torch.ones(batch_size, mask_seq_length, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=inputs_embeds.device, dtype=torch.long) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None if self.config.is_decoder: causal_mask = self._update_causal_mask( attention_mask, inputs_embeds, cache_position, None, output_attentions, ) elif attention_mask is not None: causal_mask = attention_mask[:, None, None, :] causal_mask = causal_mask.to(dtype=hidden_states.dtype) causal_mask = (1.0 - causal_mask) * torch.finfo(hidden_states.dtype).min else: causal_mask = None # Prepare head mask if needed head_mask = self.get_head_mask(head_mask, self.config.num_layers) cross_attn_head_mask = self.get_head_mask(cross_attn_head_mask, self.config.num_layers) present_key_value_states = () if use_cache else None all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None all_cross_attentions = () if (output_attentions and self.is_decoder) else None # Going through held blocks. start_idx, end_idx = stage_index[0], stage_index[1] for i in range(start_idx, end_idx): layer_module = self.block[i] layer_head_mask = head_mask[i] cross_attn_layer_head_mask = cross_attn_head_mask[i] torch.cuda.set_device(hidden_states.device) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.forward, hidden_states, causal_mask, position_bias, encoder_hidden_states, encoder_extended_attention_mask, encoder_decoder_position_bias, layer_head_mask, cross_attn_layer_head_mask, None, # past_key_value is always None with gradient checkpointing use_cache, output_attentions, return_dict, cache_position, ) else: layer_outputs = layer_module( hidden_states, attention_mask=causal_mask, position_bias=position_bias, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, encoder_decoder_position_bias=encoder_decoder_position_bias, layer_head_mask=layer_head_mask, cross_attn_layer_head_mask=cross_attn_layer_head_mask, past_key_value=None, use_cache=use_cache, output_attentions=output_attentions, return_dict=return_dict, cache_position=cache_position, ) # layer_outputs is a tuple with: # hidden-states, key-value-states, (self-attention position bias), (self-attention weights), (cross-attention position bias), (cross-attention weights) if use_cache is False or use_cache is None: layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:] hidden_states, present_key_value_state = layer_outputs[:2] # We share the position biases between the layers - the first layer store them # layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights), # (cross-attention position bias), (cross-attention weights) position_bias = layer_outputs[2] if in_decoder and encoder_hidden_states is not None: encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3] # append next layer key value states if use_cache: present_key_value_states = present_key_value_states + (present_key_value_state,) # last layer if at_last_stage: hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) if not return_dict: return tuple( v for v in [ hidden_states, present_key_value_states, all_hidden_states, all_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_value_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) else: return { "hidden_states": hidden_states, "position_bias": position_bias, "encoder_decoder_position_bias": encoder_decoder_position_bias, "backward_tensor_keys": ["hidden_states"], } @staticmethod def t5_model_forward( self: T5Model, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, decoder_inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, position_bias: Optional[torch.Tensor] = None, encoder_decoder_position_bias: Optional[torch.Tensor] = None, backward_tensor_keys: Optional[List[str]] = None, stage_index: Optional[List[int]] = None, decoder_starting_stage: Optional[int] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]: # This function is modified on the basis of transformers.models.t5.modeling_t5.T5Model.forward. # Please refer to original code of transformers for more details. __HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. """ use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict logger = logging.get_logger(__name__) # TODO(baizhou): left the recording kv-value tensors as () or None type, this feature may be added in the future. if past_key_values: logger.warning_once("Non-empty past_key_values is not supported for pipeline models at the moment.") past_key_values = None if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False if use_cache: logger.warning_once("use_cache=True is not supported for pipeline models at the moment.") use_cache = False # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask in_decoder = stage_manager.stage >= decoder_starting_stage # Stage is in encoder, directly return the output of t5_stack_forward if not in_decoder: encoder_outputs = T5PipelineForwards.t5_stack_forward( self.encoder, input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, position_bias=position_bias, encoder_decoder_position_bias=encoder_decoder_position_bias, stage_index=stage_index, decoder_starting_stage=decoder_starting_stage, ) if stage_manager.stage == decoder_starting_stage - 1: # last stage of encoder return {"encoder_hidden_states": encoder_outputs[0]} else: return encoder_outputs at_last_decoder_stage = stage_manager.is_last_stage() at_first_decoder_stage = stage_manager.stage == decoder_starting_stage if encoder_outputs is not None: encoder_hidden_states = encoder_outputs[0] elif encoder_hidden_states is None: raise ValueError("Non-empty encoder_hidden_states should be passed in at decoder stages.") if not at_first_decoder_stage and hidden_states is None: raise ValueError("If not at the first layer of decoder, non-empty hidden_states must be provided.") # Decode decoder_outputs = T5PipelineForwards.t5_stack_forward( self.decoder, input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, position_bias=position_bias, encoder_decoder_position_bias=encoder_decoder_position_bias, stage_index=stage_index, decoder_starting_stage=decoder_starting_stage, ) # Directly return outputs of overloaded T5Stack forward if not at last stage. if not at_last_decoder_stage: # encoder_hidden_states should be passed to the next stage decoder_outputs["encoder_hidden_states"] = encoder_hidden_states return decoder_outputs if not return_dict: return decoder_outputs + encoder_hidden_states else: return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_hidden_states, ) @staticmethod def t5_for_conditional_generation_forward( self: T5ForConditionalGeneration, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.FloatTensor] = None, decoder_head_mask: Optional[torch.FloatTensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, position_bias: Optional[torch.Tensor] = None, encoder_decoder_position_bias: Optional[torch.Tensor] = None, backward_tensor_keys: Optional[List[str]] = None, stage_index: Optional[List[int]] = None, decoder_starting_stage: Optional[int] = None, ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]: # This function is modified on the basis of transformers.models.t5.modeling_t5.T5ForConditionalGeneration.forward. # Please refer to original code of transformers for more details. __HEAD_MASK_WARNING_MSG = """ The input argument `head_mask` was split into two arguments `head_mask` and `decoder_head_mask`. Currently, `decoder_head_mask` is set to copy `head_mask`, but this feature is deprecated and will be removed in future versions. If you do not want to use any `decoder_head_mask` now, please set `decoder_head_mask = torch.ones(num_layers, num_heads)`. """ use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict logger = logging.get_logger(__name__) # TODO(baizhou): left the recording kv-value tensors as () or None type, this feature may be added in the future. if past_key_values: logger.warning_once("Non-empty past_key_values is not supported for pipeline models at the moment.") past_key_values = None if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False if use_cache: logger.warning_once("use_cache=True is not supported for pipeline models at the moment.") use_cache = False # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask if head_mask is not None and decoder_head_mask is None: if self.config.num_layers == self.config.num_decoder_layers: warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning) decoder_head_mask = head_mask in_decoder = stage_manager.stage >= decoder_starting_stage # Stage is in encoder, directly return the output of t5_stack_forward if not in_decoder: encoder_outputs = T5PipelineForwards.t5_stack_forward( self.encoder, input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, position_bias=position_bias, encoder_decoder_position_bias=encoder_decoder_position_bias, stage_index=stage_index, decoder_starting_stage=decoder_starting_stage, ) if stage_manager.stage == decoder_starting_stage - 1: # last stage of encoder return {"encoder_hidden_states": encoder_outputs[0]} else: return encoder_outputs at_last_decoder_stage = stage_manager.is_last_stage() at_first_decoder_stage = stage_manager.stage == decoder_starting_stage if encoder_outputs is not None: encoder_hidden_states = encoder_outputs[0] elif encoder_hidden_states is None: raise ValueError("Non-empty encoder_hidden_states should be passed in at decoder stages.") if not at_first_decoder_stage and hidden_states is None: raise ValueError("If not at the first layer of decoder, non-empty hidden_states must be provided.") if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: # get decoder inputs from shifting lm labels to the right decoder_input_ids = self._shift_right(labels) # Decode decoder_outputs = T5PipelineForwards.t5_stack_forward( self.decoder, input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, position_bias=position_bias, encoder_decoder_position_bias=encoder_decoder_position_bias, stage_index=stage_index, decoder_starting_stage=decoder_starting_stage, ) # Directly return outputs of overloaded T5Stack forward if not at last stage. if not at_last_decoder_stage: # encoder_hidden_states should be passed to the next stage decoder_outputs["encoder_hidden_states"] = encoder_hidden_states return decoder_outputs sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: # Rescale output before projecting on vocab # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 sequence_output = sequence_output * (self.model_dim**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) # move labels to correct device to enable PP labels = labels.to(lm_logits.device) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + decoder_outputs[1:] + encoder_hidden_states return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_hidden_states, ) @staticmethod def t5_encoder_model_forward( self: T5EncoderModel, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, position_bias: Optional[torch.Tensor] = None, encoder_decoder_position_bias: Optional[torch.Tensor] = None, backward_tensor_keys: Optional[List[str]] = None, stage_index: Optional[List[int]] = None, decoder_starting_stage: Optional[int] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r""" This function is modified on the basis of transformers.models.t5.modeling_gpt2.T5EncoderModel.forward. Please refer to original code of transformers for more details. ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = T5PipelineForwards.t5_stack_forward( self.encoder, input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, position_bias=position_bias, encoder_decoder_position_bias=encoder_decoder_position_bias, stage_index=stage_index, decoder_starting_stage=decoder_starting_stage, ) return outputs @staticmethod def t5_for_token_classification_forward( self: T5ForTokenClassification, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, position_bias: Optional[torch.Tensor] = None, encoder_decoder_position_bias: Optional[torch.Tensor] = None, labels: Optional[torch.LongTensor] = None, backward_tensor_keys: Optional[List[str]] = None, stage_index: Optional[List[int]] = None, decoder_starting_stage: Optional[int] = None, ) -> Union[Tuple[torch.FloatTensor], BaseModelOutput]: r""" This function is modified on the basis of transformers.models.t5.modeling_t5.T5ForTokenClassification.forward. Please refer to original code of transformers for more details. ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = T5PipelineForwards.t5_stack_forward( self.transformer.encoder, input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, position_bias=position_bias, encoder_decoder_position_bias=encoder_decoder_position_bias, stage_index=stage_index, decoder_starting_stage=decoder_starting_stage, ) if stage_manager.is_last_stage(): sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) return outputs def get_t5_flash_attention_forward(): from transformers.models.t5.modeling_t5 import T5Attention def forward( self: T5Attention, hidden_states: torch.Tensor, mask: Optional[torch.Tensor] = None, key_value_states: Optional[torch.Tensor] = None, position_bias: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, layer_head_mask: Optional[torch.Tensor] = None, query_length: Optional[int] = None, use_cache: bool = False, output_attentions: bool = False, cache_position=None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]: """
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/whisper.py
colossalai/shardformer/modeling/whisper.py
import logging import random from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from transformers.modeling_attn_mask_utils import ( _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa, ) from transformers.modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, SequenceClassifierOutput, ) from transformers.models.whisper.modeling_whisper import ( _HIDDEN_STATES_START_POSITION, WhisperDecoder, WhisperEncoder, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, shift_tokens_right, ) from transformers.utils import logging from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer.layer import ColoAttention from colossalai.shardformer.shard import ShardConfig logger = logging.get_logger(__name__) def _get_attention_mask( self: WhisperDecoder, shard_config: ShardConfig, hidden_states: torch.Tensor, past_key_values_length: int, attention_mask: Optional[torch.FloatTensor], head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ): batch_size, seq_length = hidden_states.shape[:2] mask_seq_length = past_key_values_length + seq_length if shard_config.enable_flash_attention: attention_mask = ColoAttention.prepare_attn_kwargs( (batch_size, 1, seq_length, mask_seq_length), hidden_states.dtype, hidden_states.device, attention_mask, is_causal=True, ) else: input_shape = (batch_size, seq_length) if self._use_flash_attention_2: # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None elif self._use_sdpa and head_mask is None and not output_attentions: # output_attentions=True & head_mask can not be supported when using SDPA. attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, input_shape, hidden_states, past_key_values_length ) else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask( attention_mask, input_shape, hidden_states, past_key_values_length ) return attention_mask def get_whisper_flash_attention_forward(): from transformers.models.whisper.modeling_whisper import WhisperAttention def forward( self: WhisperAttention, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[dict] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" assert layer_head_mask is None, "layer_head_mask is not supported for FlashAttention" # for encoder, attention_mask is None if attention_mask is None: attention_mask = {} # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) query_states = self._shape(query_states, tgt_len, bsz) dropout_p = self.dropout if self.training else 0.0 attn_output = ColoAttention.attention( query_states, key_states, value_states, **attention_mask, dropout_p=dropout_p, scale=self.scaling, ) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, None, past_key_value return forward def get_whisper_decoder_forward_for_flash_attention(shard_config: ShardConfig): def forward( self: WhisperDecoder, input_ids=None, attention_mask=None, encoder_hidden_states=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, position_ids=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, cache_position=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) attention_mask = _get_attention_mask(self, shard_config, inputs_embeds, past_key_values_length, attention_mask) # embed positions if input_ids is not None: positions = self.embed_positions( input_ids, past_key_values_length=past_key_values_length, position_ids=position_ids ) else: positions = self.embed_positions( inputs_embeds, past_key_values_length=past_key_values_length, position_ids=position_ids ) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: assert attn_mask.size()[0] == (len(self.layers)), ( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, use_cache) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, attention_mask, encoder_hidden_states, None, # encoder attention mask head_mask[idx] if head_mask is not None else None, (cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), None, # past_key_value ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [ hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) return forward def get_jit_fused_whisper_encoder_layer_forward(): from transformers.models.whisper.modeling_whisper import WhisperEncoderLayer def forward( self: WhisperEncoderLayer, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = self.dropout_add(hidden_states, residual, self.dropout, self.training) residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_add(hidden_states, residual, self.dropout, self.training) if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs return forward def get_jit_fused_whisper_decoder_layer_forward(): from transformers.models.whisper.modeling_whisper import WhisperDecoderLayer def forward( self: WhisperDecoderLayer, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = self.dropout_add(hidden_states, residual, self.dropout, self.training) # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = self.dropout_add(hidden_states, residual, self.dropout, self.training) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout_add(hidden_states, residual, self.dropout, self.training) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs return forward class WhisperPipelineForwards: """ This class serves as a micro library for forward function substitution of Llama models under pipeline setting. """ @staticmethod def whisper_encoder_forward( self: WhisperEncoder, input_features, attention_mask=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, encoder_states=None, all_attentions=None, stage_index: Optional[List[int]] = None, decoder_starting_stage: Optional[int] = None, shard_config: Optional[ShardConfig] = None, ): r""" Args: input_features (`torch.LongTensor` of shape `(batch_size, feature_size, sequence_length)`): Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`] attention_mask (`torch.Tensor`)`, *optional*): Whisper does not support masking of the `input_features`, this argument is preserved for compatibility, but it is not used. By default the silence in the input log mel spectrogram are ignored. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ logging.get_logger(__name__) stage = stage_manager.stage at_first_stage = stage == 0 at_last_stage = stage == decoder_starting_stage - 1 output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # Process inputs if at the first stage of encoder. if at_first_stage: inputs_embeds = nn.functional.gelu(self.conv1(input_features)) inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds)) inputs_embeds = inputs_embeds.permute(0, 2, 1) embed_pos = self.embed_positions.weight hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." else: if hidden_states is None: raise ValueError( "hidden_states shouldn't be None for stages other than the first stage of encoder/decoder." ) start_idx, end_idx = stage_index[0], stage_index[1] for idx in range(start_idx, end_idx): encoder_layer = self.layers[idx] if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, None, (head_mask[idx] if head_mask is not None else None), output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, None, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if at_last_stage: hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions, ) else: return {"hidden_states": hidden_states, "head_mask": head_mask} @staticmethod def whisper_decoder_forward( self: WhisperDecoder, input_ids=None, attention_mask=None, encoder_hidden_states=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, inputs_embeds=None, position_ids=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, decoder_starting_stage: Optional[int] = None, shard_config: Optional[ShardConfig] = None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ logger = logging.get_logger(__name__) stage = stage_manager.stage at_first_stage = stage == decoder_starting_stage at_last_stage = stage == stage_manager.num_stages - 1 output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: assert attn_mask.size()[0] == (len(self.layers)), (
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/llama.py
colossalai/shardformer/modeling/llama.py
import math import warnings from typing import Dict, List, Optional, Tuple, Union import torch import torch.distributed import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.cache_utils import Cache, DynamicCache from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast, ) from transformers.models.llama.modeling_llama import ( LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, StaticCache, apply_rotary_pos_emb, repeat_kv, ) from transformers.utils import logging from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer.layer._operation import all_to_all_comm, gather_sp_output, split_forward_gather_backward from colossalai.shardformer.layer.utils import is_share_sp_tp, split_batch_zigzag from colossalai.shardformer.shard import ShardConfig from ..layer import ColoAttention, RingAttention, dist_cross_entropy _SUPPORTED_SP_MODE = ["all_to_all", "split_gather", "ring", "ring_attn"] class LlamaPipelineForwards: """ This class serves as a micro library for forward function substitution of Llama models under pipeline setting. """ @staticmethod def llama_model_forward( self: LlamaModel, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, force_sp_gather: bool = True, # Set to false only when computing cross entropy ): logger = logging.get_logger(__name__) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache if use_cache: logger.warning_once( "`use_cache=True` is incompatible with pipeline parallelism. Setting `use_cache=False`..." ) use_cache = False return_dict = return_dict if return_dict is not None else self.config.use_return_dict disable_pp = stage_manager is None # retrieve input_ids and inputs_embeds if disable_pp or stage_manager.is_first_stage(): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape[:2] elif inputs_embeds is not None: batch_size, seq_length = inputs_embeds.shape[:2] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds device = hidden_states.device else: input_shape = hidden_states.shape[:-1] batch_size, seq_length = input_shape device = hidden_states.device # Support SP + PP sp_mode = shard_config.sequence_parallelism_mode sp_group = shard_config.sequence_parallel_process_group sp_size = shard_config.sequence_parallel_size # Generating full positions ids for modes that gather sequence before attn if stage_manager and (sp_mode != "ring_attn" and not stage_manager.is_first_stage()): seq_length *= sp_size past_seen_tokens = 0 if use_cache: # kept for BC (cache positions) if not isinstance(past_key_values, StaticCache): past_key_values = DynamicCache.from_legacy_cache(past_key_values) past_seen_tokens = past_key_values.get_seq_length() if cache_position is None: if isinstance(past_key_values, StaticCache): raise ValueError("cache_position is a required argument when using StaticCache.") cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_length, device=device) seq_length_with_past = seq_length + past_seen_tokens if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False if use_cache: logger.warning_once("use_cache=True is not supported for pipeline models at the moment.") use_cache = False if position_ids is None: position_ids = cache_position.unsqueeze(0) no_split_input = disable_pp or not stage_manager.is_first_stage() if no_split_input and sp_mode == "ring_attn": _, attn_kwargs, _ = RingAttention.prepare_varlen_batch(attention_mask, sp_group) elif shard_config.enable_flash_attention: mask_shape = (batch_size, 1, seq_length_with_past, seq_length_with_past) attn_kwargs: dict = ColoAttention.prepare_attn_kwargs( mask_shape, hidden_states.dtype, hidden_states.device, q_padding_mask=attention_mask, is_causal=True, invert=(sp_mode != "ring_attn"), ) else: attn_kwargs: torch.Tensor = self._update_causal_mask( attention_mask, hidden_states, cache_position, past_key_values ) # Support SP + PP. Later stages have already received the split input. split_input = disable_pp or stage_manager.is_first_stage() if split_input: # Ring Attention zigzag batch processing if sp_mode == "ring_attn": assert shard_config.enable_flash_attention, "Ring Attention inherently requires Flash Attention." if not attention_mask.bool().all(): hidden_states, attn_kwargs, position_ids = RingAttention.prepare_varlen_batch( attention_mask, sp_group, hidden_states, position_ids ) else: hidden_states, position_ids = split_batch_zigzag([hidden_states, position_ids], sp_group) elif is_share_sp_tp(sp_mode): hidden_states = split_forward_gather_backward( hidden_states, 1, sp_group, fp8_communication=shard_config.fp8_communication ) elif sp_mode == "all_to_all": hidden_states = split_forward_gather_backward( hidden_states, 1, sp_group, 1 / sp_size, fp8_communication=shard_config.fp8_communication ) if self.gradient_checkpointing and self.training and use_cache: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None start_idx, end_idx = (0, len(self.layers)) if disable_pp else (stage_index[0], stage_index[1]) position_embeddings = self.rotary_emb(hidden_states, position_ids) num_ckpt_layers = 0 if self.gradient_checkpointing and self.training: num_ckpt_layers = end_idx - start_idx # TODO: We can replace `gradient_checkpointing_enable` fn and initialize a gradient_checkpointing (List[bool]) for each layer if shard_config.gradient_checkpoint_config is not None: num_ckpt_layers = shard_config.gradient_checkpoint_config.get_num_ckpt_layers( stage=stage_manager.stage, num_stages=stage_manager.num_stages, num_layers=end_idx - start_idx, model_chunk_id=(stage_manager.model_chunk_id if stage_manager.is_interleave else 0), num_model_chunks=stage_manager.num_model_chunks, ) assert num_ckpt_layers <= end_idx - start_idx for idx, decoder_layer in enumerate(self.layers[start_idx:end_idx], start=start_idx): if output_hidden_states: all_hidden_states += (hidden_states,) if idx - start_idx < num_ckpt_layers: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attn_kwargs, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attn_kwargs, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = layer_outputs[2 if output_attentions else 1] if output_attentions: all_self_attns += (layer_outputs[1],) if disable_pp or stage_manager.is_last_stage(): hidden_states = self.norm(hidden_states) if (not shard_config.parallel_output) or force_sp_gather or is_share_sp_tp(sp_mode): # noqa hidden_states = gather_sp_output(hidden_states, shard_config) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if disable_pp or stage_manager.is_last_stage(): if not return_dict: return tuple( v for v in [ hidden_states, next_cache, all_hidden_states, all_self_attns, ] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) # always return dict for intermediate stage return {"hidden_states": hidden_states} @staticmethod def llama_for_causal_lm_forward( self: LlamaForCausalLM, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, **kwargs, ): r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, LlamaForCausalLM >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" logger = logging.get_logger(__name__) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False if shard_config.sequence_parallelism_mode == "ring_attn" and shard_config.parallel_output: # Split labels in a zigzag fashion too sp_group = shard_config.sequence_parallel_process_group if attention_mask.bool().all(): labels = split_batch_zigzag(labels, sp_group, seq_dim=1, is_label=True) else: # [B, max_seqlen // sp_size] labels, _, _ = RingAttention.prepare_varlen_batch(attention_mask, sp_group, labels, is_label=True) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = LlamaPipelineForwards.llama_model_forward( self.model, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, force_sp_gather=False, ) past_key_values = None disable_pp = stage_manager is None if disable_pp or stage_manager.is_last_stage(): hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: loss = dist_cross_entropy(labels, logits, shard_config, self.lm_head.out_features, self.model.dtype) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: hidden_states = outputs.get("hidden_states") return {"hidden_states": hidden_states} @staticmethod def llama_for_sequence_classification_forward( self: LlamaForSequenceClassification, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ): r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ logger = logging.get_logger(__name__) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False transformer_outputs = LlamaPipelineForwards.llama_model_forward( self.model, input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) if input_ids is not None: batch_size = input_ids.shape[0] elif inputs_embeds is not None: batch_size = inputs_embeds.shape[0] else: batch_size = hidden_states.shape[0] if stage_manager.is_last_stage(): hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device) else: sequence_lengths = -1 pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: hidden_states = transformer_outputs.get("hidden_states") return {"hidden_states": hidden_states} def get_llama_flash_attention_forward(shard_config: ShardConfig, sp_mode=None, sp_size=None, sp_group=None): def forward( self, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[Union[torch.Tensor, Dict]] = None, past_key_value: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Cache]]: if sp_mode is not None: assert sp_mode in _SUPPORTED_SP_MODE, f"SP mode {sp_mode} is not supported by {type(self)} yet" assert (sp_size is not None) and ( sp_group is not None ), "Must specify sp_size and sp_group for sequence parallel" if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" ) bsz, q_len, _ = hidden_states.size() input_shape = hidden_states.shape[:-1] # sp: modify sp_len when sequence parallel mode is ring if is_share_sp_tp(sp_mode): q_len *= sp_size query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # sp: all-to-all comminucation when introducing sequence parallel if sp_mode == "all_to_all": query_states = all_to_all_comm(query_states, sp_group, fp8_communication=shard_config.fp8_communication) key_states = all_to_all_comm(key_states, sp_group, fp8_communication=shard_config.fp8_communication) value_states = all_to_all_comm(value_states, sp_group, fp8_communication=shard_config.fp8_communication) bsz, q_len, _ = query_states.size() query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: if self.layer_idx is None: raise ValueError( f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " "with a layer index." ) kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) if sp_mode == "ring_attn": attn_output = RingAttention.attention( query_states, key_states, value_states, sp_axis=shard_config.sp_axis, **attention_mask, inner_ring_size=shard_config.inner_ring_size, pg_mesh=shard_config.pg_mesh, ) elif shard_config.enable_flash_attention: assert isinstance(attention_mask, dict), "Flash Attention Error: attention_mask should be a dict." attn_output = ColoAttention.attention(query_states, key_states, value_states, **attention_mask) else: attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() # sp: all-to-all comminucation when introducing sequence parallel if sp_mode == "all_to_all": attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim) attn_output = all_to_all_comm( attn_output, sp_group, scatter_dim=1, gather_dim=2, fp8_communication=shard_config.fp8_communication ) else: attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights return forward
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/vit.py
colossalai/shardformer/modeling/vit.py
from typing import List, Optional, Tuple, Union import torch from transformers.models.vit.modeling_vit import BaseModelOutput, ViTEncoder from transformers.utils import logging from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer.layer import ColoAttention def _encoder_forward( encoder: ViTEncoder, start_idx: int, end_idx: int, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, stage_manager: PipelineStageManager = None, ) -> Union[tuple, BaseModelOutput]: for i in range(start_idx, end_idx): layer_module = encoder.layer[i] layer_head_mask = head_mask[i] if head_mask is not None else None if encoder.gradient_checkpointing and encoder.training: layer_outputs = encoder._gradient_checkpointing_func( layer_module.__call__, hidden_states, layer_head_mask, output_attentions, ) else: layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) hidden_states = layer_outputs[0] if not stage_manager.is_last_stage(): return hidden_states else: if not return_dict: return tuple(hidden_states) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=None, attentions=None, ) def ViTModel_pipeline_forward(stage_manager: PipelineStageManager, stage_index: List[int]): from transformers.models.vit.modeling_vit import BaseModelOutputWithPooling def pp_forward( self, pixel_values: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, hidden_states: Optional[torch.FloatTensor] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict logger = logging.get_logger(__name__) # Preprocess passed in arguments if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if stage_manager.is_first_stage(): if pixel_values is None: raise ValueError("You have to specify pixel_values") # TODO(FoolPlayer): maybe have a cleaner way to cast the input (from `ImageProcessor` side?) expected_dtype = self.embeddings.patch_embeddings.projection.weight.dtype if pixel_values.dtype != expected_dtype: pixel_values = pixel_values.to(expected_dtype) embedding_output = self.embeddings( pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding, ) hidden_states = embedding_output else: assert ( hidden_states is not None ), f"Current stage is {stage_manager.stage}, hidden_states should not be None" encoder_outputs = _encoder_forward( encoder=self.encoder, start_idx=stage_index[0], end_idx=stage_index[1], hidden_states=hidden_states, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, ) if not stage_manager.is_last_stage(): return {"hidden_states": encoder_outputs} sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: head_outputs = (sequence_output, pooled_output) if pooled_output is not None else (sequence_output,) return head_outputs + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) return pp_forward def ViTForImageClassification_pipeline_forward(stage_manager: PipelineStageManager, stage_index: List[int]): from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.models.vit.modeling_vit import ImageClassifierOutput def pp_forward( self, pixel_values: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, hidden_states: Optional[torch.FloatTensor] = None, ) -> Union[tuple, ImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if not stage_manager.is_first_stage(): assert ( hidden_states is not None ), f"Current stage is {stage_manager.stage}, hidden_states should not be None" outputs = self.vit( pixel_values, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, hidden_states=hidden_states, ) # not last stage, return hidden_states if not stage_manager.is_last_stage(): return outputs else: sequence_output = outputs[0] # last stage logits = self.classifier(sequence_output[:, 0, :]) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) return pp_forward def ViTForMaskedImageModeling_pipeline_forward(stage_manager: PipelineStageManager, stage_index: List[int]): import math import torch.nn as nn from transformers.models.vit.modeling_vit import ImageClassifierOutput, MaskedImageModelingOutput def pp_forward( self, pixel_values: Optional[torch.Tensor] = None, bool_masked_pos: Optional[torch.BoolTensor] = None, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = None, return_dict: Optional[bool] = None, hidden_states: Optional[torch.FloatTensor] = None, ) -> Union[tuple, ImageClassifierOutput]: r""" bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`): Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Returns: Examples: ```python >>> from transformers import AutoImageProcessor, ViTForMaskedImageModeling >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k") >>> model = ViTForMaskedImageModeling.from_pretrained("google/vit-base-patch16-224-in21k") >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2 >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values >>> # create random boolean mask of shape (batch_size, num_patches) >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool() >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction >>> list(reconstructed_pixel_values.shape) [1, 3, 224, 224] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict if bool_masked_pos is not None and (self.config.patch_size != self.config.encoder_stride): raise ValueError( "When `bool_masked_pos` is provided, `patch_size` must be equal to `encoder_stride` to ensure that " "the reconstructed image has the same dimensions as the input." f"Got `patch_size` = {self.config.patch_size} and `encoder_stride` = {self.config.encoder_stride}." ) if not stage_manager.is_first_stage(): assert ( hidden_states is not None ), f"Current stage is {stage_manager.stage}, hidden_states should not be None" outputs = self.vit( pixel_values, bool_masked_pos=bool_masked_pos, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, hidden_states=hidden_states, ) if not stage_manager.is_last_stage(): return outputs else: sequence_output = outputs[0] # Reshape to (batch_size, num_channels, height, width) sequence_output = sequence_output[:, 1:] batch_size, sequence_length, num_channels = sequence_output.shape height = width = math.floor(sequence_length**0.5) sequence_output = sequence_output.permute(0, 2, 1).reshape(batch_size, num_channels, height, width) # Reconstruct pixel values reconstructed_pixel_values = self.decoder(sequence_output) masked_im_loss = None if bool_masked_pos is not None: size = self.config.image_size // self.config.patch_size bool_masked_pos = bool_masked_pos.reshape(-1, size, size) mask = ( bool_masked_pos.repeat_interleave(self.config.patch_size, 1) .repeat_interleave(self.config.patch_size, 2) .unsqueeze(1) .contiguous() ) reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none") masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels if not return_dict: output = (reconstructed_pixel_values,) + outputs[1:] return ((masked_im_loss,) + output) if masked_im_loss is not None else output return MaskedImageModelingOutput( loss=masked_im_loss, reconstruction=reconstructed_pixel_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) return pp_forward def get_vit_flash_self_attention_forward(): from transformers.models.vit.modeling_vit import ViTSelfAttention def forward( self: ViTSelfAttention, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: assert head_mask is None, "head_mask is not supported for FlashAttention" mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) dropout_p = self.dropout_prob if self.training else 0.0 context_layer = ColoAttention.attention(query_layer, key_layer, value_layer, dropout_p=dropout_p) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, None) if output_attentions else (context_layer,) return outputs return forward def get_jit_fused_vit_output_forward(): from transformers.models.vit.modeling_vit import ViTOutput def forward(self: ViTOutput, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout_add(hidden_states, input_tensor, self.dropout.p, self.dropout.training) return hidden_states return forward def get_jit_fused_vit_intermediate_forward(): from colossalai.kernel.jit.bias_gelu import GeLUFunction as JitGeLUFunction def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states, bias = self.dense(hidden_states) hidden_states = JitGeLUFunction.apply(hidden_states, bias) return hidden_states return forward
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/falcon.py
colossalai/shardformer/modeling/falcon.py
import warnings from typing import List, Optional, Tuple, Union import torch import torch.distributed as dist from torch.distributed import ProcessGroup from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from transformers.models.falcon.modeling_falcon import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, build_alibi_tensor, ) from transformers.utils import logging from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer.shard import ShardConfig from ..layer import cross_entropy_1d def build_falcon_alibi_tensor_fn(process_group: ProcessGroup) -> torch.Tensor: def build_falcon_alibi_tensor( self, attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype ) -> torch.Tensor: """ Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value `softmax(l+a) = softmax(l)`. Based on https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742 TODO @thomasw21 this doesn't work as nicely due to the masking strategy, and so masking varies slightly. Args: Returns tensor shaped (batch_size * num_heads, 1, max_seq_len) attention_mask (`torch.Tensor`): Token-wise attention mask, this should be of shape (batch_size, max_seq_len). num_heads (`int`, *required*): number of heads dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`): dtype of the output tensor """ import math if dist.is_initialized(): world_size = dist.get_world_size(process_group) num_heads = num_heads * world_size batch_size, seq_length = attention_mask.shape closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = torch.tensor( 2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32 ) powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != num_heads: extra_base = torch.tensor( 2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32, ) num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) extra_powers = torch.arange( 1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32 ) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) # Note: alibi will added to the attention bias that will be applied to the query, key product of attention # => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length) # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length) # => the query_length dimension will then be broadcasted correctly # This is more or less identical to T5's relative position bias: # https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527 arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :] alibi = slopes[..., None] * arange_tensor if dist.is_initialized(): num_heads_per_rank = int(num_heads / dist.get_world_size(process_group)) offset = dist.get_rank(process_group) * num_heads_per_rank alibi = alibi.view(batch_size, num_heads, 1, seq_length) alibi = alibi[:, offset : num_heads_per_rank + offset, :, :] return alibi.reshape(batch_size * num_heads_per_rank, 1, seq_length).to(dtype) else: return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype) return build_falcon_alibi_tensor def get_tp_falcon_decoder_layer_forward(): from transformers.models.falcon.modeling_falcon import FalconDecoderLayer, dropout_add def forward( self: FalconDecoderLayer, hidden_states: torch.Tensor, alibi: Optional[torch.Tensor], attention_mask: torch.Tensor, position_ids: Optional[torch.LongTensor] = None, layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, head_mask: Optional[torch.Tensor] = None, use_cache: bool = False, output_attentions: bool = False, cache_position: Optional[torch.LongTensor] = None, position_embeddings: Optional[ Tuple[torch.Tensor, torch.Tensor] ] = None, # Add cache_position and position_embeddings args for v4.51.3 transformers **kwargs, ): if "padding_mask" in kwargs: warnings.warn( "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" ) residual = hidden_states # same as v4.51.3 transformers if self.config.new_decoder_architecture and self.config.num_ln_in_parallel_attn == 2: attention_layernorm_out = self.ln_attn(hidden_states) mlp_layernorm_out = self.ln_mlp(hidden_states) else: attention_layernorm_out = self.input_layernorm(hidden_states) # Self attention. attn_outputs = self.self_attention( attention_layernorm_out, layer_past=layer_past, attention_mask=attention_mask, position_ids=position_ids, alibi=alibi, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, position_embeddings=position_embeddings, ) attention_output = attn_outputs[0] if not self.config.new_decoder_architecture: if self.config.parallel_attn: mlp_layernorm_out = attention_layernorm_out else: residual = dropout_add( attention_output, residual, self.config.attention_dropout, training=self.training ) mlp_layernorm_out = self.post_attention_layernorm(residual) # v4.51.3 transformers mlp if ( self.config.new_decoder_architecture and self.config.parallel_attn and self.config.num_ln_in_parallel_attn == 1 ): mlp_layernorm_out = attention_layernorm_out outputs = attn_outputs[1:] # MLP. mlp_output = self.mlp(mlp_layernorm_out) if self.config.new_decoder_architecture or self.config.parallel_attn: mlp_output = mlp_output + attention_output output = dropout_add(mlp_output, residual, self.config.hidden_dropout, training=self.training) if use_cache: outputs = (output,) + outputs else: outputs = (output,) + outputs[1:] return outputs # hidden_states, present, attentions return forward class FalconPipelineForwards: """ This class serves as a micro library for falcon pipeline forwards. """ @staticmethod def falcon_model_forward( self: FalconModel, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]: # Add cache_position and position_embeddings args for v4.51.3 transformers logger = logging.get_logger(__name__) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache if use_cache: logger.warning_once("use_cache=True is not supported for pipeline models at the moment.") use_cache = False logger.warning_once("past_key_values is not supported for pipeline models at the moment.") past_key_values = None return_dict = return_dict if return_dict is not None else self.config.use_return_dict # case: First stage of training if stage_manager.is_first_stage(): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) hidden_states = inputs_embeds else: input_shape = hidden_states.shape[:-1] batch_size, seq_length = input_shape if self.gradient_checkpointing and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None # Compute alibi tensor: check build_alibi_tensor documentation # alibi calculation is same as v4.51.3 transformers. alibi = None past_key_values_length = 0 batch_size, seq_length, _ = hidden_states.shape if self.use_alibi: mask = ( torch.ones( (batch_size, seq_length + past_key_values_length), device=inputs_embeds.device, dtype=torch.long ) if attention_mask is None else attention_mask ) alibi = build_alibi_tensor(mask, self.num_heads, dtype=hidden_states.dtype) if cache_position is None: cache_position = torch.arange( past_key_values_length, past_key_values_length + seq_length, device=hidden_states.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) # use new version of causal mask construction. # In v4.51.3 version, sdpa, egaer and flash attention are merged into one class. causal_mask = self._update_causal_mask( attention_mask, hidden_states, cache_position, past_key_values, output_attentions, head_mask, alibi ) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape batch_size x num_heads x N x N # head_mask has shape n_layer x batch x num_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) # v4.51.3 create position embeddings to be shared across the decoder layers position_embeddings = self.rotary_emb(hidden_states, position_ids) start_idx, end_idx = stage_index[0], stage_index[1] # keep past_key_values arg same with v4.51.3 transformers for i, block in enumerate(self.h[start_idx:end_idx], start=start_idx): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func( block.__call__, hidden_states, alibi, causal_mask, position_ids, head_mask[i], past_key_values, use_cache, output_attentions, cache_position, position_embeddings, ) else: outputs = block( hidden_states, layer_past=past_key_values, attention_mask=causal_mask, position_ids=position_ids, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, alibi=alibi, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = outputs[0] if use_cache is True: outputs[1] if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) if stage_manager.is_last_stage(): # Add last hidden state hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if stage_manager.is_last_stage(): if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, ) else: # always return dict for imediate stage return {"hidden_states": hidden_states} @staticmethod def falcon_for_causal_lm_forward( self: FalconForCausalLM, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ logger = logging.get_logger(__name__) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False transformer_outputs = FalconPipelineForwards.falcon_model_forward( self.transformer, input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) past_key_values = None if stage_manager.is_last_stage(): hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n labels = labels.to(lm_logits.device) shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() batch_size, seq_length, vocab_size = shift_logits.shape # Flatten the tokens loss_fct = CrossEntropyLoss() if shard_config.enable_tensor_parallelism and shard_config.parallel_output: new_vocab_size = shift_logits.shape[-1] shift_logits = shift_logits.view(-1, new_vocab_size) shift_labels = shift_labels.view(-1) loss = cross_entropy_1d( shift_logits, shift_labels, process_group=shard_config.tensor_parallel_process_group, vocab_size=self.lm_head.out_features, dtype=self.transformer.dtype, ) else: loss = loss_fct( shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length), ) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: hidden_states = transformer_outputs.get("hidden_states") return {"hidden_states": hidden_states} @staticmethod def falcon_for_sequence_classification_forward( self: FalconForSequenceClassification, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ logger = logging.get_logger(__name__) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False transformer_outputs = FalconPipelineForwards.falcon_model_forward( self.transformer, input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) past_key_values = None if stage_manager.is_last_stage(): batch_size = hidden_states.shape[0] hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: hidden_states = transformer_outputs.get("hidden_states") return {"hidden_states": hidden_states} @staticmethod def falcon_for_token_classification_forward( self: FalconForTokenClassification, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ logger = logging.get_logger(__name__) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False transformer_outputs = FalconPipelineForwards.falcon_model_forward( self.transformer, input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) past_key_values = None if stage_manager.is_last_stage(): hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: batch_size, seq_length = labels.shape loss_fct = CrossEntropyLoss() loss = loss_fct( logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length) ) if not return_dict: output = (logits,) + transformer_outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: hidden_states = transformer_outputs.get("hidden_states") return {"hidden_states": hidden_states} @staticmethod def falcon_for_question_answering_forward( self: FalconForQuestionAnswering, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ logger = logging.get_logger(__name__) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False outputs = FalconPipelineForwards.falcon_model_forward( self.transformer, input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) if stage_manager.is_last_stage(): sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index)
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/chatglm2.py
colossalai/shardformer/modeling/chatglm2.py
""" PyTorch ChatGLM model. """ from typing import List, Optional, Tuple import torch import torch.utils.checkpoint from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from transformers.utils import logging from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer import ShardConfig from colossalai.shardformer.layer import ColoAttention from colossalai.shardformer.layer._operation import ( all_to_all_comm, gather_sp_output, is_share_sp_tp, split_forward_gather_backward, ) from ..layer import dist_cross_entropy def get_flash_core_attention_forward(): from .chatglm2_6b.modeling_chatglm import CoreAttention def forward(self: CoreAttention, query_layer, key_layer, value_layer, attention_mask): query_layer, key_layer, value_layer = [k.permute(1, 2, 0, 3) for k in [query_layer, key_layer, value_layer]] context_layer = ColoAttention.attention(query_layer, key_layer, value_layer, **attention_mask) context_layer = context_layer.permute(2, 0, 1, 3) new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,) context_layer = context_layer.reshape(*new_context_layer_shape) return context_layer return forward def get_jit_fused_glm_block_forward(): from .chatglm2_6b.modeling_chatglm import GLMBlock def forward( self: GLMBlock, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True, ): # hidden_states: [s, b, h] # Layer norm at the beginning of the transformer layer. layernorm_output = self.input_layernorm(hidden_states) # Self attention. attention_output, kv_cache = self.self_attention( layernorm_output, attention_mask, rotary_pos_emb, kv_cache=kv_cache, use_cache=use_cache, ) # Residual connection. if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = hidden_states layernorm_input = self.dropout_add(attention_output, residual, self.hidden_dropout, self.training) # Layer norm post the self attention. layernorm_output = self.post_attention_layernorm(layernorm_input) # MLP. mlp_output = self.mlp(layernorm_output) # Second residual connection. if self.apply_residual_connection_post_layernorm: residual = layernorm_output else: residual = layernorm_input output = self.dropout_add(mlp_output, residual, self.hidden_dropout, self.training) return output, kv_cache return forward class ChatGLMPipelineForwards: """ This class serves as a micro library for ChatGLM model forwards under pipeline parallelism. """ @staticmethod def chatglm_model_forward( self: "ChatGLMModel", input_ids, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.BoolTensor] = None, full_attention_mask: Optional[torch.BoolTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, force_sp_output_gather: Optional[bool] = True, ): logger = logging.get_logger(__name__) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if past_key_values: logger.warning_once("Non-empty past_key_values is not supported for pipeline models at the moment.") past_key_values = None if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False if use_cache: logger.warning_once("use_cache=True is not supported for pipeline models at the moment.") use_cache = False if stage_manager.is_first_stage(): batch_size, seq_length = input_ids.shape if inputs_embeds is None: inputs_embeds = self.embedding(input_ids) hidden_states = inputs_embeds else: seq_length, batch_size = hidden_states.shape[:2] if self.pre_seq_len is not None: if past_key_values is None: past_key_values = self.get_prompt( batch_size=batch_size, device=input_ids.device, dtype=inputs_embeds.dtype, ) if attention_mask is not None: attention_mask = torch.cat( [ attention_mask.new_ones((batch_size, self.pre_seq_len)), attention_mask, ], dim=-1, ) if shard_config.enable_flash_attention: mask_shape = (batch_size, 1, seq_length, seq_length) full_attention_mask: dict = ColoAttention.prepare_attn_kwargs( mask_shape, hidden_states.dtype, hidden_states.device, q_padding_mask=attention_mask, is_causal=True, ) else: if full_attention_mask is None: if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1): full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask) # Support SP + PP sp_size = shard_config.sequence_parallel_size sp_mode = shard_config.sequence_parallelism_mode sp_group = shard_config.sequence_parallel_process_group # For generating full positions ids (the states will be gathered along the seq dim before attention fwd). if sp_mode != "ring_attn" and not stage_manager.is_first_stage(): seq_length *= sp_size # Rotary positional embeddings rotary_pos_emb = self.rotary_pos_emb(self.seq_length) if position_ids is not None: rotary_pos_emb = rotary_pos_emb[position_ids] else: rotary_pos_emb = rotary_pos_emb[None, :seq_length] rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous() if not past_key_values: past_key_values = [None for _ in range(self.num_layers)] presents = () if use_cache else None if self.encoder.gradient_checkpointing and self.encoder.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False all_self_attentions = None all_hidden_states = () if output_hidden_states else None start_idx, end_idx = stage_index[0], stage_index[1] # Keep the input split across all PP stages if stage_manager.is_first_stage(): if shard_config.enable_sequence_parallelism: if sp_mode == "split_gather": hidden_states = split_forward_gather_backward( hidden_states, dim=0, process_group=sp_group, ) elif shard_config.sequence_parallelism_mode == "all_to_all": hidden_states = split_forward_gather_backward( hidden_states, dim=0, process_group=shard_config.sequence_parallel_process_group, grad_scale=1 / shard_config.sequence_parallel_size, ) for idx in range(start_idx, end_idx): layer = self.encoder._get_layer(idx) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.encoder.gradient_checkpointing and self.encoder.training: layer_ret = torch.utils.checkpoint.checkpoint( layer, hidden_states, full_attention_mask, rotary_pos_emb, past_key_values[idx], use_cache, ) else: layer_ret = layer( hidden_states, full_attention_mask, rotary_pos_emb, kv_cache=past_key_values[idx], use_cache=use_cache, ) hidden_states, kv_cache = layer_ret if use_cache: presents = presents + (kv_cache,) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if stage_manager.is_last_stage(): # final layer_norm if self.encoder.post_layer_norm: hidden_states = self.encoder.final_layernorm(hidden_states) # Gather seq-wise in the final output stage if shard_config.enable_sequence_parallelism: sp_mode = shard_config.sequence_parallelism_mode if (not shard_config.parallel_output) or force_sp_output_gather or is_share_sp_tp(sp_mode): hidden_states = gather_sp_output(hidden_states, shard_config, sp_dim=0) if not return_dict: return tuple( v for v in [ hidden_states, presents, all_hidden_states, all_self_attentions, ] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, ) else: return {"hidden_states": hidden_states} @staticmethod def chatglm_for_conditional_generation_forward( self: "ChatGLMForConditionalGeneration", input_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, return_last_logit: Optional[bool] = False, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ): logging.get_logger(__name__) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = ChatGLMPipelineForwards.chatglm_model_forward( self.transformer, input_ids=input_ids, position_ids=position_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, force_sp_output_gather=False, ) if stage_manager.is_last_stage(): hidden_states = transformer_outputs[0] if return_last_logit: hidden_states = hidden_states[-1:] lm_logits = self.transformer.output_layer(hidden_states) lm_logits = lm_logits.transpose(0, 1).contiguous() loss = None if labels is not None: # ChatGLM doesn't have lm_head split enable_tp = shard_config.enable_tensor_parallelism shard_config.enable_tensor_parallelism = False loss = dist_cross_entropy( labels, lm_logits, shard_config, self.transformer.output_layer.out_features, lm_logits.dtype, ) shard_config.enable_tensor_parallelism = enable_tp if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: return transformer_outputs def get_chatglm_sequence_parallel_forward_fn(shard_config: ShardConfig, sp_mode, sp_size, sp_group): logger = logging.get_logger(__name__) def forward( self, input_ids, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.BoolTensor] = None, full_attention_mask: Optional[torch.BoolTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, force_sp_output_gather: Optional[bool] = True, ): output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, seq_length = input_ids.shape if inputs_embeds is None: inputs_embeds = self.embedding(input_ids) if self.pre_seq_len is not None: if past_key_values is None: past_key_values = self.get_prompt( batch_size=batch_size, device=input_ids.device, dtype=inputs_embeds.dtype, ) if attention_mask is not None: attention_mask = torch.cat( [ attention_mask.new_ones((batch_size, self.pre_seq_len)), attention_mask, ], dim=-1, ) if shard_config.enable_flash_attention: mask_shape = (batch_size, 1, seq_length, seq_length) full_attention_mask: dict = ColoAttention.prepare_attn_kwargs( mask_shape, hidden_states.dtype, hidden_states.device, q_padding_mask=attention_mask, is_causal=True, ) else: if full_attention_mask is None: if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1): full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask) # Rotary positional embeddings rotary_pos_emb = self.rotary_pos_emb(self.seq_length) if position_ids is not None: rotary_pos_emb = rotary_pos_emb[position_ids] else: rotary_pos_emb = rotary_pos_emb[None, :seq_length] rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous() if sp_mode in ["all_to_all"] and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with sp mode `{sp_mode}`. Setting `use_cache=False`..." ) use_cache = False if sp_mode in ["all_to_all"] and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with sp mode `{sp_mode}`. Setting `use_cache=False`..." ) use_cache = False # Run encoder. # [seq_len, batch_size, hidden_size] -> [seq_len/TP_size, batch_size, hidden_size] if sp_mode in ["split_gather"]: inputs_embeds = split_forward_gather_backward( inputs_embeds, dim=0, process_group=sp_group, fp8_communication=shard_config.fp8_communication, ) elif sp_mode == "all_to_all": inputs_embeds = split_forward_gather_backward( inputs_embeds, dim=0, process_group=sp_group, grad_scale=1 / sp_size, fp8_communication=shard_config.fp8_communication, ) hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder( inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb, kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states, ) if shard_config.enable_sequence_parallelism: if (not shard_config.parallel_output) or force_sp_output_gather or is_share_sp_tp(sp_mode): hidden_states = gather_sp_output(hidden_states, shard_config, sp_dim=0) if not return_dict: return tuple( v for v in [ hidden_states, presents, all_hidden_states, all_self_attentions, ] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, ) return forward def get_chatglm_sequence_parallel_attention_forward(shard_config: ShardConfig, sp_mode, sp_size, sp_group): from .chatglm2_6b.modeling_chatglm import apply_rotary_pos_emb, split_tensor_along_last_dim def forward( self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True, ): if sp_mode is not None: assert sp_mode in ["all_to_all", "split_gather"], "Invalid sp_mode" assert (sp_size is not None) and ( sp_group is not None ), "Must specify sp_size and sp_group for sequence parallel" mixed_x_layer = self.query_key_value(hidden_states) if self.multi_query_attention: (query_layer, key_layer, value_layer) = mixed_x_layer.split( [ self.num_attention_heads_per_partition * self.hidden_size_per_attention_head, self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head, ], dim=-1, ) query_layer = query_layer.view( query_layer.size()[:-1] + ( self.num_attention_heads_per_partition, self.hidden_size_per_attention_head, ) ) key_layer = key_layer.view( key_layer.size()[:-1] + ( self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head, ) ) value_layer = value_layer.view( value_layer.size()[:-1] + ( self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head, ) ) else: new_tensor_shape = mixed_x_layer.size()[:-1] + ( self.num_attention_heads_per_partition, 3 * self.hidden_size_per_attention_head, ) mixed_x_layer = mixed_x_layer.view(*new_tensor_shape) # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn] (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3) # sp: all-to-all comminucation when introducing sequence parallel if sp_mode == "all_to_all": sq, bs, _, _ = value_layer.size() query_layer = query_layer.reshape(sq, bs, -1) key_layer = key_layer.reshape(sq, bs, -1) value_layer = value_layer.reshape(sq, bs, -1) query_layer = all_to_all_comm( query_layer, sp_group, gather_dim=0, fp8_communication=shard_config.fp8_communication, ) key_layer = all_to_all_comm( key_layer, sp_group, gather_dim=0, fp8_communication=shard_config.fp8_communication, ) value_layer = all_to_all_comm( value_layer, sp_group, gather_dim=0, fp8_communication=shard_config.fp8_communication, ) query_layer = query_layer.view( sq * sp_size, bs, self.num_attention_heads_per_partition // sp_size, self.hidden_size_per_attention_head, ).contiguous() key_layer = key_layer.view( sq * sp_size, bs, self.num_attention_heads_per_partition // sp_size, self.hidden_size_per_attention_head, ).contiguous() value_layer = value_layer.view( sq * sp_size, bs, self.num_attention_heads_per_partition // sp_size, self.hidden_size_per_attention_head, ).contiguous() # apply relative positional encoding (rotary embedding) if rotary_pos_emb is not None: query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb) key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb) # adjust key and value for inference if kv_cache is not None: cache_k, cache_v = kv_cache key_layer = torch.cat((cache_k, key_layer), dim=0) value_layer = torch.cat((cache_v, value_layer), dim=0) if use_cache: kv_cache = (key_layer, value_layer) else: kv_cache = None if self.multi_query_attention: key_layer = key_layer.unsqueeze(-2) key_layer = key_layer.expand( -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1, ) key_layer = key_layer.contiguous().view( key_layer.size()[:2] + ( self.num_attention_heads_per_partition, self.hidden_size_per_attention_head, ) ) value_layer = value_layer.unsqueeze(-2) value_layer = value_layer.expand( -1, -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1, ) value_layer = value_layer.contiguous().view( value_layer.size()[:2] + ( self.num_attention_heads_per_partition // sp_size, self.hidden_size_per_attention_head, ) ) # ================================== # core attention computation # ================================== context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask) if sp_mode == "all_to_all": context_layer = all_to_all_comm( context_layer, sp_group, gather_dim=2, scatter_dim=0, fp8_communication=shard_config.fp8_communication, ) # ================= # Output. [sq, b, h] # ================= output = self.dense(context_layer) return output, kv_cache return forward def get_flash_attention_forward_for_chat_glm_model(): from .chatglm2_6b.modeling_chatglm import ChatGLMModel def forward( self: ChatGLMModel, input_ids, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.BoolTensor] = None, full_attention_mask: Optional[torch.BoolTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, seq_length = input_ids.shape if inputs_embeds is None: inputs_embeds = self.embedding(input_ids) if self.pre_seq_len is not None: if past_key_values is None: past_key_values = self.get_prompt( batch_size=batch_size, device=input_ids.device, dtype=inputs_embeds.dtype ) if attention_mask is not None: attention_mask = torch.cat( [attention_mask.new_ones((batch_size, self.pre_seq_len)), attention_mask], dim=-1 ) mask_shape = (batch_size, 1, seq_length, seq_length) full_attention_mask: dict = ColoAttention.prepare_attn_kwargs( mask_shape, inputs_embeds.dtype, inputs_embeds.device, q_padding_mask=attention_mask, is_causal=True, ) # Rotary positional embeddings rotary_pos_emb = self.rotary_pos_emb(self.seq_length) if position_ids is not None: rotary_pos_emb = rotary_pos_emb[position_ids] else: rotary_pos_emb = rotary_pos_emb[None, :seq_length] rotary_pos_emb = rotary_pos_emb.transpose(0, 1).contiguous() # Run encoder. hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder( inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb, kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states, ) if not return_dict: return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, ) return forward
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/qwen3.py
colossalai/shardformer/modeling/qwen3.py
# Modifed from qwen2 modeling import math from typing import List, Optional, Tuple, Union import torch from torch import nn from transformers.modeling_attn_mask_utils import ( _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa, ) from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast, ) from transformers.models.qwen3.modeling_qwen3 import ( Qwen3Attention, Qwen3ForCausalLM, Qwen3ForSequenceClassification, Qwen3Model, apply_rotary_pos_emb, repeat_kv, ) from transformers.utils import logging from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer.layer._operation import all_to_all_comm, split_forward_gather_backward from colossalai.shardformer.shard import ShardConfig from ..layer import ColoAttention, dist_cross_entropy from ..layer._operation import gather_sp_output from ..layer.utils import is_share_sp_tp class Qwen3PipelineForwards: """ This class serves as a micro library for forward function substitution of Qwen3 models under pipeline setting. """ @staticmethod def qwen3_model_forward( self: Qwen3Model, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, force_sp_output_gather: bool = True, ) -> Union[Tuple, BaseModelOutputWithPast]: logger = logging.get_logger(__name__) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if stage_manager.is_first_stage(): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds else: input_shape = hidden_states.shape[:-1] batch_size, seq_length = input_shape device = hidden_states.device seq_length_with_past = seq_length past_key_values_length = 0 # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False if use_cache: logger.warning_once("use_cache=True is not supported for pipeline models at the moment.") use_cache = False if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length # Support SP + PP sp_size = shard_config.sequence_parallel_size sp_group = shard_config.sequence_parallel_process_group sp_mode = shard_config.sequence_parallelism_mode # For generating full positions ids (the states will be gathered along the seq dim before attention fwd). if sp_mode != "ring_attn" and not stage_manager.is_first_stage(): seq_length *= sp_size if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() # embed positions, for the first stage, hidden_states is the input embeddings, # for the other stages, hidden_states is the output of the previous stage if shard_config.enable_flash_attention: # in this case, attention_mask is a dict rather than a tensor mask_shape = (batch_size, 1, seq_length, seq_length_with_past) attention_mask = ColoAttention.prepare_attn_kwargs( mask_shape, hidden_states.dtype, hidden_states.device, q_padding_mask=attention_mask, is_causal=True, ) else: if self.config._attn_implementation == "flash_attention_2": # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None elif self.config._attn_implementation == "sdpa" and not output_attentions: # output_attentions=True can not be supported when using SDPA, and we fall back on # the manual implementation that requires a 4D causal mask in all cases. attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, (batch_size, seq_length), hidden_states, past_key_values_length, ) else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask( attention_mask, (batch_size, seq_length), hidden_states, past_key_values_length, sliding_window=self.config.sliding_window, ) if stage_manager.is_first_stage(): if shard_config.enable_sequence_parallelism: if is_share_sp_tp(sp_mode): hidden_states = split_forward_gather_backward( hidden_states, dim=1, process_group=sp_group, ) elif sp_mode == "all_to_all": hidden_states = split_forward_gather_backward( hidden_states, dim=1, process_group=sp_group, grad_scale=1 / sp_size, ) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None position_embeddings = self.rotary_emb(hidden_states, position_ids) start_idx, end_idx = stage_index[0], stage_index[1] num_ckpt_layers = 0 if self.gradient_checkpointing and self.training: num_ckpt_layers = end_idx - start_idx # TODO: We can replace `gradient_checkpointing_enable` fn and initialize a gradient_checkpointing (List[bool]) for each layer if shard_config.gradient_checkpoint_config is not None: num_ckpt_layers = shard_config.gradient_checkpoint_config.get_num_ckpt_layers( stage=stage_manager.stage, num_stages=stage_manager.num_stages, num_layers=end_idx - start_idx, model_chunk_id=(stage_manager.model_chunk_id if stage_manager.is_interleave else 0), num_model_chunks=stage_manager.num_model_chunks, ) assert num_ckpt_layers <= end_idx - start_idx for idx, decoder_layer in enumerate(self.layers[start_idx:end_idx], start=start_idx): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_values[idx] if past_key_values is not None else None if idx - start_idx < num_ckpt_layers: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if stage_manager.is_last_stage(): hidden_states = self.norm(hidden_states) if shard_config.enable_sequence_parallelism: if (not shard_config.parallel_output) or force_sp_output_gather or is_share_sp_tp(sp_mode): hidden_states = gather_sp_output(hidden_states, shard_config) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if stage_manager.is_last_stage(): if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) # always return dict for imediate stage return {"hidden_states": hidden_states} @staticmethod def qwen3_for_causal_lm_forward( self: Qwen3ForCausalLM, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, **kwargs, ): r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, Qwen2ForCausalLM >>> model = Qwen2ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) >>> prompt = "Hey, are you consciours? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." ```""" logger = logging.get_logger(__name__) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = Qwen3PipelineForwards.qwen3_model_forward( self.model, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, force_sp_output_gather=False, ) past_key_values = None if stage_manager.is_last_stage(): hidden_states = outputs[0] if hidden_states.shape[1] == 2: pass logits = self.lm_head(hidden_states) loss = None if labels is not None: loss = dist_cross_entropy(labels, logits, shard_config, self.lm_head.out_features, logits.dtype) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: hidden_states = outputs.get("hidden_states") return {"hidden_states": hidden_states} @staticmethod def qwen3_for_sequence_classification_forward( self: Qwen3ForSequenceClassification, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ): r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ logger = logging.get_logger(__name__) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False transformer_outputs = Qwen3PipelineForwards.qwen3_model_forward( self.model, input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) if input_ids is not None: batch_size = input_ids.shape[0] elif inputs_embeds is not None: batch_size = inputs_embeds.shape[0] else: batch_size = hidden_states.shape[0] if stage_manager.is_last_stage(): hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: hidden_states = transformer_outputs.get("hidden_states") return {"hidden_states": hidden_states} def get_qwen3_flash_attention_forward(shard_config: ShardConfig, sp_mode=None, sp_size=None, sp_group=None): def forward( self: Qwen3Attention, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if sp_mode is not None: assert sp_mode in ["all_to_all", "split_gather", "ring"], "Invalid sp_mode" assert (sp_size is not None) and ( sp_group is not None ), "Must specify sp_size and sp_group for sequence parallel" bsz, q_len, _ = hidden_states.size() # sp: modify sp_len when sequence parallel mode is ring if sp_mode in ["split_gather", "ring"]: q_len *= sp_size query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # sp: all-to-all comminucation when introducing sequence parallel if sp_mode == "all_to_all": query_states = all_to_all_comm(query_states, sp_group, fp8_communication=shard_config.fp8_communication) key_states = all_to_all_comm(key_states, sp_group, fp8_communication=shard_config.fp8_communication) value_states = all_to_all_comm(value_states, sp_group, fp8_communication=shard_config.fp8_communication) bsz, q_len, _ = query_states.size() query_states = self.q_norm(query_states.view(bsz, q_len, -1, self.head_dim)).transpose(1, 2) key_states = self.k_norm(key_states.view(bsz, q_len, -1, self.head_dim)).transpose(1, 2) value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: if self.layer_idx is None: raise ValueError( f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} " "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class " "with a layer index." ) kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx) # Because the input can be padded, the absolute sequence length depends on the max position id. cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_value is not None: # Activate slicing cache only if the config has a value `sliding_windows` attribute cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0 if ( getattr(self.config, "sliding_window", None) is not None and kv_seq_len > self.config.sliding_window and cache_has_contents ): slicing_tokens = 1 - self.config.sliding_window past_key = past_key_value[self.layer_idx][0] past_value = past_key_value[self.layer_idx][1] past_key = past_key[:, :, slicing_tokens:, :].contiguous() past_value = past_value[:, :, slicing_tokens:, :].contiguous() if past_key.shape[-2] != self.config.sliding_window - 1: raise ValueError( f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got" f" {past_key.shape}" ) if attention_mask is not None: attention_mask = attention_mask[:, slicing_tokens:] attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1) cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) if shard_config.enable_flash_attention: assert isinstance(attention_mask, dict), "Flash Attention Error: attention_mask should be a dict." attn_output = ColoAttention.attention( query_states, key_states, value_states, dropout_p=0.0 if not self.training else self.attention_dropout, **attention_mask, ) else: attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2).contiguous() if sp_mode == "all_to_all": attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim) attn_output = all_to_all_comm( attn_output, sp_group, scatter_dim=1, gather_dim=2, fp8_communication=shard_config.fp8_communication ) else: attn_output = attn_output.reshape(bsz, q_len, -1) attn_output = self.o_proj(attn_output) return attn_output, None return forward def get_qwen3_model_forward_for_flash_attn(shard_config: ShardConfig, sp_mode=None, sp_size=None, sp_group=None): logger = logging.get_logger(__name__) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, force_sp_output_gather: bool = True, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() # embed positions hidden_states = inputs_embeds if shard_config.enable_flash_attention: # in this case, attention_mask is a dict rather than a tensor mask_shape = (batch_size, 1, seq_length, seq_length_with_past) attention_mask = ColoAttention.prepare_attn_kwargs( mask_shape, hidden_states.dtype, hidden_states.device, q_padding_mask=attention_mask, is_causal=True, ) else: attention_mask = _prepare_4d_causal_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, sliding_window=self.config.sliding_window, ) if (self.gradient_checkpointing or sp_mode in ["ring", "all_to_all"]) and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None position_embeddings = self.rotary_emb(hidden_states, position_ids) if sp_mode in ["ring", "split_gather"]: hidden_states = split_forward_gather_backward( hidden_states, 1, sp_group, fp8_communication=shard_config.fp8_communication ) elif sp_mode == "all_to_all": hidden_states = split_forward_gather_backward( hidden_states, 1, sp_group, 1 / sp_size, fp8_communication=shard_config.fp8_communication ) for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states,
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/gptj.py
colossalai/shardformer/modeling/gptj.py
from typing import Dict, List, Optional, Tuple, Union import torch from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.cache_utils import Cache from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, ) from transformers.models.gptj.modeling_gptj import ( GPTJForCausalLM, GPTJForQuestionAnswering, GPTJForSequenceClassification, GPTJModel, apply_rotary_pos_emb, get_embed_positions, ) from transformers.utils import is_torch_fx_proxy, logging from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer.layer import ColoAttention from colossalai.shardformer.layer._operation import gather_forward_split_backward, split_forward_gather_backward from colossalai.shardformer.shard import ShardConfig logger = logging.get_logger(__name__) def _get_attention_mask( self: GPTJModel, shard_config: ShardConfig, hidden_states: torch.Tensor, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]], attention_mask: Optional[torch.FloatTensor], use_flash_attention_2: bool = False, ) -> Optional[Union[torch.Tensor, dict]]: batch_size, seq_len = hidden_states.shape[:2] past_key_values_length = 0 if past_key_values is not None and past_key_values[0] is not None: past_key_values_length = past_key_values[0][0].shape[2] if shard_config.enable_flash_attention: if attention_mask is not None: attention_mask = attention_mask.view(batch_size, -1) attention_mask = ColoAttention.prepare_attn_kwargs( (batch_size, 1, seq_len, seq_len + past_key_values_length), hidden_states.dtype, hidden_states.device, attention_mask, is_causal=True, ) elif use_flash_attention_2 and attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") attention_mask = attention_mask.view(batch_size, -1) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = attention_mask[:, None, None, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min return attention_mask class GPTJPipelineForwards: """ This class serves as a micro library for forward function substitution of GPTJ models under pipeline setting. """ @staticmethod def gptj_model_forward( self: GPTJModel, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ) -> Union[Dict, Tuple, BaseModelOutputWithPast]: # This function is modified on the v4.51.3 transformers.models.gptj.modeling_gptj.GPTJModel.forward. # Please refer to original code of transformers for more details. # GPTJ has no cross attention in comparison to GPT2 return_dict = return_dict if return_dict is not None else self.config.use_return_dict logger = logging.get_logger(__name__) # Preprocess passed in arguments # TODO(baizhou): left the recording kv-value tensors as () or None type, this feature may be added in the future. if past_key_values: logger.warning_once("Non-empty past_key_values is not supported for pipeline models at the moment.") past_key_values = None if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False if use_cache: logger.warning_once("use_cache=True is not supported for pipeline models at the moment.") use_cache = False if stage_manager.is_first_stage(): if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") elif input_ids is not None: batch_size, seq_length = input_ids.shape input_shape = input_ids.size() input_ids = input_ids.view(-1, seq_length) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") else: if hidden_states is None: raise ValueError("hidden_states shouldn't be None for stages other than the first stage.") input_shape = hidden_states.size()[:-1] batch_size, seq_length = input_shape[0], input_shape[1] if stage_manager.is_first_stage(): if inputs_embeds is None: inputs_embeds = self.wte(input_ids) hidden_states = inputs_embeds if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, seq_length) token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) seq_length = hidden_states.shape[1] if cache_position is None: past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_key_values_length, past_key_values_length + seq_length, device=hidden_states.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) causal_mask = self._update_causal_mask( attention_mask, hidden_states, cache_position, past_key_values, output_attentions ) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x num_attention_heads x N x N # head_mask has shape n_layer x batch x num_attention_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) output_shape = (-1, seq_length, hidden_states.size(-1)) next_decoder_cache = None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None # split the input tensor along sequence dimension # [batch_size, seq_len, hidden_size] -> [batch_size, seq_len/TP_size, hidden_size] if shard_config.enable_sequence_parallelism: hidden_states = split_forward_gather_backward( hidden_states, dim=1, process_group=shard_config.tensor_parallel_process_group, fp8_communication=shard_config.fp8_communication, ) # Going through held blocks. start_idx, end_idx = stage_index[0], stage_index[1] for i in range(start_idx, end_idx): block = self.h[i] torch.cuda.set_device(hidden_states.device) # Ensure that attention_mask is always on the same device as hidden_states if attention_mask is not None: attention_mask = attention_mask.to(hidden_states.device) if isinstance(head_mask, torch.Tensor): head_mask = head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func( block.__call__, hidden_states, None, causal_mask, position_ids, head_mask[i], use_cache, output_attentions, cache_position, ) else: outputs = block( hidden_states=hidden_states, layer_past=past_key_values, attention_mask=causal_mask, position_ids=position_ids, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, cache_position=cache_position, ) hidden_states = outputs[0] # When sequence parallelism done, gather the output tensor in forward and split it in backward if shard_config.enable_sequence_parallelism: hidden_states = gather_forward_split_backward( hidden_states, dim=1, process_group=shard_config.tensor_parallel_process_group, fp8_communication=shard_config.fp8_communication, ) if stage_manager.is_last_stage(): hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) next_cache = next_decoder_cache if use_cache else None if stage_manager.is_last_stage(): if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attentions] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, ) else: # always return dict for intermediate stage return {"hidden_states": hidden_states} @staticmethod def gptj_causallm_model_forward( self: GPTJForCausalLM, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ) -> Union[Dict, Tuple, CausalLMOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` # This function is modified on the basis of transformers.models.gptj.modeling_gptj.GPTJForCausalLM.forward. # Please refer to original code of transformers for more details. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = GPTJPipelineForwards.gptj_model_forward( self.transformer, input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) # If not at the last stage, return hidden_states as in GPTJModel if not stage_manager.is_last_stage(): return {"hidden_states": transformer_outputs["hidden_states"]} hidden_states = transformer_outputs[0] # Set device for model parallelism if self.model_parallel: torch.cuda.set_device(self.transformer.first_device) hidden_states = hidden_states.to(self.lm_head.weight.device) # v4.51.3 tranformers loss calculation # make sure sampling in fp16 works correctly and # compute loss in fp32 to match with mesh-tf version # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179 lm_logits = self.lm_head(hidden_states).to(torch.float32) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(lm_logits.device) # Flatten the tokens loss = self.loss_function( lm_logits, labels, vocab_size=self.config.vocab_size, ) loss = loss.to(hidden_states.dtype) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @staticmethod def gptj_for_sequence_classification_forward( self: GPTJForSequenceClassification, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor]]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ) -> Union[Dict, Tuple, SequenceClassifierOutputWithPast]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). # This function is modified on the v4.51.3 transformers.models.gptj.modeling_gptj.GPTJForSequenceClassification.forward. # Please refer to original code of transformers for more details. """ logger = logging.get_logger(__name__) if input_ids is not None: batch_size, _ = input_ids.shape[:2] else: batch_size, _ = hidden_states.shape[:2] assert ( self.config.pad_token_id is not None or batch_size == 1 ), "Cannot handle batch sizes > 1 if no padding token is defined." return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = GPTJPipelineForwards.gptj_model_forward( self.transformer, input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) # If not at the last stage, return hidden_states as in GPTJModel if not stage_manager.is_last_stage(): return {"hidden_states": transformer_outputs["hidden_states"]} hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: labels = labels.to(pooled_logits.device) if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @staticmethod def gptj_for_question_answering_forward( self: GPTJForQuestionAnswering, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ) -> Union[Dict, Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. # This function is modified on the basis of transformers.models.gptj.modeling_gptj.GPTJForQuestionAnswering.forward. # Please refer to original code of transformers for more details. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = GPTJPipelineForwards.gptj_model_forward( self.transformer, input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) # If not at the last stage, return hidden_states as in GPTJModel if not stage_manager.is_last_stage(): return {"hidden_states": outputs["hidden_states"]} sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1).to(start_logits.device) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1).to(end_logits.device) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def get_gptj_flash_attention_forward(): from transformers.models.gptj.modeling_gptj import GPTJAttention def forward( self: GPTJAttention, hidden_states: torch.FloatTensor, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[dict] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ) -> Union[ Tuple[torch.Tensor, Tuple[torch.Tensor]], Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]], ]: # This function is modified on the v4.51.3 transformers.models.gptj.modeling_gptj.GPTJAttention.forward. # Please refer to original code of transformers for more details. assert head_mask is None, "head_mask is not supported for FlashAttention" query = self.q_proj(hidden_states) key = self.k_proj(hidden_states) value = self.v_proj(hidden_states) query = self._split_heads(query, self.num_attention_heads, self.head_dim, True) key = self._split_heads(key, self.num_attention_heads, self.head_dim, True) value = self._split_heads(value, self.num_attention_heads, self.head_dim, False) if is_torch_fx_proxy(position_ids) or torch.jit.is_tracing(): # The logic to conditionally copy to GPU could not be traced, so we do this # every time in the torch.fx case embed_positions = get_embed_positions(self.embed_positions, position_ids) else: embed_positions = self._get_embed_positions(position_ids) repeated_position_ids = position_ids.unsqueeze(-1).repeat(1, 1, embed_positions.shape[-1]) sincos = torch.gather(embed_positions, 1, repeated_position_ids) sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1) if self.rotary_dim is not None: k_rot = key[:, :, :, : self.rotary_dim] k_pass = key[:, :, :, self.rotary_dim :] q_rot = query[:, :, :, : self.rotary_dim] q_pass = query[:, :, :, self.rotary_dim :] k_rot = apply_rotary_pos_emb(k_rot, sin, cos) q_rot = apply_rotary_pos_emb(q_rot, sin, cos) key = torch.cat([k_rot, k_pass], dim=-1) query = torch.cat([q_rot, q_pass], dim=-1) else: key = apply_rotary_pos_emb(key, sin, cos) query = apply_rotary_pos_emb(query, sin, cos) key = key.permute(0, 2, 1, 3) query = query.permute(0, 2, 1, 3) if layer_past is not None: past_key = layer_past[0] past_value = layer_past[1] key = torch.cat((past_key, key), dim=-2) value = torch.cat((past_value, value), dim=-2) if use_cache is True: # Note that this cast is quite ugly, but is not implemented before ROPE as the original codebase keeps the key in float32 all along the computation. # Reference: https://github.com/kingoflolz/mesh-transformer-jax/blob/f8315e3003033b23f21d78361b288953064e0e76/mesh_transformer/layers.py#L128 present = (key.to(hidden_states.dtype), value) else: present = None dropout_p = self.attn_dropout.p if self.training else 0.0 attn_output = ColoAttention.attention(query, key, value, **attention_mask, dropout_p=dropout_p) attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim) attn_output = self.out_proj(attn_output) attn_output = self.resid_dropout(attn_output) outputs = (attn_output, present, None) return outputs # a, present, (attentions) return forward def gptj_model_forward_for_flash_attention(shard_config: ShardConfig): def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if position_ids is not None: position_ids = position_ids.view(-1, input_shape[-1]).long() if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.h)) else: past_length = past_key_values[0][0].size(-2) if position_ids is None: position_ids = torch.arange( past_length, input_shape[-1] + past_length, dtype=torch.long, device=device, ) position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x num_attention_heads x N x N # head_mask has shape n_layer x batch x num_attention_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) if inputs_embeds is None:
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/bloom.py
colossalai/shardformer/modeling/bloom.py
import warnings from typing import List, Optional, Tuple, Union import torch import torch.distributed as dist from torch.distributed import ProcessGroup from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from torch.nn import functional as F from transformers.cache_utils import Cache, DynamicCache from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, CausalLMOutputWithPast, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from transformers.models.bloom.modeling_bloom import ( BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, dropout_add, ) from transformers.utils import logging from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer.layer._operation import gather_forward_split_backward, split_forward_gather_backward from colossalai.shardformer.shard import ShardConfig from ..layer import dist_cross_entropy logger = logging.get_logger(__name__) def build_bloom_alibi_tensor_fn(process_group: ProcessGroup) -> torch.Tensor: def build_bloom_alibi_tensor( self, attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype ) -> torch.Tensor: """ Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value `softmax(l+a) = softmax(l)`. Based on https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742 TODO @thomasw21 this doesn't work as nicely due to the masking strategy, and so masking varies slightly. Args: Returns tensor shaped (batch_size * num_heads, 1, max_seq_len) attention_mask (`torch.Tensor`): Token-wise attention mask, this should be of shape (batch_size, max_seq_len). num_heads (`int`, *required*): number of heads dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`): dtype of the output tensor """ import math if dist.is_initialized(): world_size = dist.get_world_size(process_group) num_heads = num_heads * world_size batch_size, seq_length = attention_mask.shape closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) base = torch.tensor( 2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32 ) powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != num_heads: extra_base = torch.tensor( 2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32, ) num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) extra_powers = torch.arange( 1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32 ) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) # Note: alibi will added to the attention bias that will be applied to the query, key product of attention # => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length) # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length) # => the query_length dimension will then be broadcasted correctly # This is more or less identical to T5's relative position bias: # https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527 arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :] alibi = slopes[..., None] * arange_tensor if dist.is_initialized(): num_heads_per_rank = int(num_heads / dist.get_world_size(process_group)) offset = dist.get_rank(process_group) * num_heads_per_rank alibi = alibi.view(batch_size, num_heads, 1, seq_length) alibi = alibi[:, offset : num_heads_per_rank + offset, :, :] return alibi.reshape(batch_size * num_heads_per_rank, 1, seq_length).to(dtype) else: return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype) return build_bloom_alibi_tensor class BloomPipelineForwards: """ This class serves as a micro library for bloom pipeline forwards. """ @staticmethod def bloom_model_forward( self: BloomModel, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, Tuple[Tuple[torch.Tensor, torch.Tensor], ...]]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, **deprecated_arguments, ) -> Union[Tuple[torch.Tensor, ...], "BaseModelOutputWithPastAndCrossAttentions"]: logger = logging.get_logger(__name__) if deprecated_arguments.pop("position_ids", False) is not False: # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None` warnings.warn( "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore" " passing `position_ids`.", FutureWarning, ) if len(deprecated_arguments) > 0: raise ValueError(f"Got unexpected arguments: {deprecated_arguments}") output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # add warnings here if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False if use_cache: logger.warning_once("use_cache=True is not supported for pipeline models at the moment.") use_cache = False past_key_values = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape batch_size x num_heads x N x N # head_mask has shape n_layer x batch x num_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) # case: First stage of training if stage_manager.is_first_stage(): # check input_ids and inputs_embeds if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) hidden_states = self.word_embeddings_layernorm(inputs_embeds) batch_size, seq_length, _ = inputs_embeds.shape past_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange(past_length, past_length + seq_length, device=inputs_embeds.device) # initialize in the first stage and then pass to the next stage else: input_shape = hidden_states.shape[:-1] batch_size, seq_length = input_shape past_length = past_key_values.get_seq_length() if past_key_values is not None else 0 if cache_position is None: cache_position = torch.arange(past_length, past_length + seq_length, device=hidden_states.device) # extra recording tensor should be generated in the first stage all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None if self.gradient_checkpointing and self.training and use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # kept for BC (non `Cache` `past_key_values` inputs) return_legacy_cache = False if use_cache and not isinstance(past_key_values, Cache): return_legacy_cache = True if past_key_values is None: past_key_values = DynamicCache() else: past_key_values = DynamicCache.from_legacy_cache(past_key_values) logger.warning_once( "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and " "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class " "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)" ) # Compute alibi tensor: check build_alibi_tensor documentation,build for every stage past_length = 0 seq_length_with_past = seq_length + past_length if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device) else: attention_mask = attention_mask.to(hidden_states.device) alibi = self.build_alibi_tensor(attention_mask, self.num_heads, dtype=hidden_states.dtype) # causal_mask is constructed every stage and its input is passed through different stages causal_mask = self._update_causal_mask( attention_mask, hidden_states, cache_position, past_key_values, output_attentions ) # split the input tensor along sequence dimension # [batch_size, seq_len, hidden_size] -> [batch_size, seq_len/TP_size, hidden_size] if shard_config and shard_config.enable_sequence_parallelism: if shard_config.sequence_parallelism_mode == "split_gather": hidden_states = split_forward_gather_backward( hidden_states, dim=1, process_group=shard_config.tensor_parallel_process_group, fp8_communication=shard_config.fp8_communication, ) start_idx, end_idx = stage_index[0], stage_index[1] for i, block in enumerate(self.h[start_idx:end_idx], start=start_idx): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func( block.__call__, hidden_states, alibi, causal_mask, past_key_values, head_mask[i], use_cache, output_attentions, cache_position, ) else: outputs = block( hidden_states, layer_past=past_key_values, attention_mask=causal_mask, head_mask=head_mask[i], use_cache=use_cache, output_attentions=output_attentions, alibi=alibi, cache_position=cache_position, ) hidden_states = outputs[0] if use_cache: next_decoder_cache = outputs[1] if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) # When sequence parallelism done, gather the output tensor in forward and split it in backward if shard_config and shard_config.enable_sequence_parallelism: if shard_config.sequence_parallelism_mode == "split_gather": hidden_states = gather_forward_split_backward( hidden_states, dim=1, process_group=shard_config.tensor_parallel_process_group, fp8_communication=shard_config.fp8_communication, ) if stage_manager.is_last_stage(): # Add last hidden state hidden_states = self.ln_f(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) next_cache = next_decoder_cache if use_cache else None if return_legacy_cache: next_cache = next_cache.to_legacy_cache() if stage_manager.is_last_stage(): if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attentions] if v is not None ) # attention_mask is not returned ; presents = past_key_values return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, ) else: # always return dict for imediate stage return {"hidden_states": hidden_states} @staticmethod def bloom_for_causal_lm_forward( self: BloomForCausalLM, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, **deprecated_arguments, ): r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ logger = logging.get_logger(__name__) if deprecated_arguments.pop("position_ids", False) is not False: # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None` warnings.warn( "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore" " passing `position_ids`.", FutureWarning, ) if len(deprecated_arguments) > 0: raise ValueError(f"Got unexpected arguments: {deprecated_arguments}") return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False transformer_outputs = BloomPipelineForwards.bloom_model_forward( self.transformer, input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) past_key_values = None if stage_manager.is_last_stage(): hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states).contiguous() loss = None if labels is not None: loss = dist_cross_entropy( labels, lm_logits, shard_config, self.lm_head.out_features, self.transformer.dtype, ) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: hidden_states = transformer_outputs.get("hidden_states") return {"hidden_states": hidden_states} @staticmethod def bloom_for_sequence_classification_forward( self: BloomForSequenceClassification, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, **deprecated_arguments, ): r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ logger = logging.get_logger(__name__) if deprecated_arguments.pop("position_ids", False) is not False: # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None` warnings.warn( "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore" " passing `position_ids`.", FutureWarning, ) if len(deprecated_arguments) > 0: raise ValueError(f"Got unexpected arguments: {deprecated_arguments}") return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False transformer_outputs = BloomPipelineForwards.bloom_model_forward( self.transformer, input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) past_key_values = None if stage_manager.is_last_stage(): batch_size = hidden_states.shape[0] # update batch size hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 sequence_lengths = sequence_lengths % input_ids.shape[-1] sequence_lengths = sequence_lengths.to(logits.device) else: sequence_lengths = -1 logger.warning( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: hidden_states = transformer_outputs.get("hidden_states") return {"hidden_states": hidden_states} @staticmethod def bloom_for_token_classification_forward( self: BloomForTokenClassification, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, **deprecated_arguments, ): r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ logger = logging.get_logger(__name__) if deprecated_arguments.pop("position_ids", False) is not False: # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None` warnings.warn( "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore" " passing `position_ids`.", FutureWarning, ) if len(deprecated_arguments) > 0: raise ValueError(f"Got unexpected arguments: {deprecated_arguments}") return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False transformer_outputs = BloomPipelineForwards.bloom_model_forward( self.transformer, input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) past_key_values = None if stage_manager.is_last_stage(): hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: # move labels to correct device to enable model parallelism labels = labels.to(logits.device) batch_size, seq_length = labels.shape loss_fct = CrossEntropyLoss() loss = loss_fct( logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length) ) if not return_dict: output = (logits,) + transformer_outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: hidden_states = transformer_outputs.get("hidden_states") return {"hidden_states": hidden_states} @staticmethod def bloom_for_question_answering_forward( self: BloomForQuestionAnswering, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ): r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ logger = logging.get_logger(__name__) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False outputs = BloomPipelineForwards.bloom_model_forward( self.transformer, input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager,
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/command.py
colossalai/shardformer/modeling/command.py
from typing import List, Optional, Tuple, Union import torch from torch import nn from transformers.cache_utils import Cache, DynamicCache from transformers.modeling_flash_attention_utils import FlashAttentionKwargs from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from transformers.models.cohere.modeling_cohere import ( CohereAttention, CohereForCausalLM, CohereModel, StaticCache, apply_rotary_pos_emb, repeat_kv, ) from transformers.processing_utils import Unpack from transformers.utils import logging from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer.layer._operation import all_to_all_comm, split_forward_gather_backward from colossalai.shardformer.shard import ShardConfig from ..layer import ColoAttention, dist_cross_entropy from ..layer._operation import gather_sp_output, is_share_sp_tp _SUPPORTED_SP_MODE = ["all_to_all", "split_gather", "ring"] _SUPPORTED_SP_MODE = ["all_to_all", "split_gather", "ring", "ring_attn"] logger = logging.get_logger(__name__) class CommandPipelineForwards: """ This class serves as a micro library for forward function substitution of Command models under pipeline setting. """ @staticmethod def command_model_forward( self: CohereModel, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, force_sp_output_gather: bool = True, **flash_attn_kwargs: Unpack[FlashAttentionKwargs], ): logger = logging.get_logger(__name__) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache if use_cache: logger.warning_once( "`use_cache=True` is incompatible with pipeline parallelism. Setting `use_cache=False`..." ) use_cache = False # retrieve input_ids and inputs_embeds if stage_manager.is_first_stage(): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape[:2] elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape[:2] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds else: input_shape = hidden_states.shape[:-1] batch_size, seq_length = input_shape device = hidden_states.device past_seen_tokens = 0 if use_cache: # kept for BC (cache positions) if not isinstance(past_key_values, StaticCache): past_key_values = DynamicCache.from_legacy_cache(past_key_values) past_seen_tokens = past_key_values.get_seq_length() # NOTE: For generating full positions ids # (the states will be gathered along the seq dim before attention fwd). if shard_config.sequence_parallelism_mode != "ring_attn" and not stage_manager.is_first_stage(): seq_length *= shard_config.sequence_parallel_size if cache_position is None: if isinstance(past_key_values, StaticCache): raise ValueError("cache_position is a required argument when using StaticCache.") cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_length, device=device) seq_length_with_past = seq_length + past_seen_tokens # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False if use_cache: logger.warning_once("use_cache=True is not supported for pipeline models at the moment.") use_cache = False if position_ids is None: position_ids = cache_position.unsqueeze(0) # embed positions, for the first stage, hidden_states is the input embeddings, # for the other stages, hidden_states is the output of the previous stage shard_config.enable_flash_attention = True if shard_config.enable_flash_attention: # in this case, attention_mask is a dict rather than a tensor mask_shape = (batch_size, 1, seq_length, seq_length_with_past) attention_mask = ColoAttention.prepare_attn_kwargs( mask_shape, hidden_states.dtype, hidden_states.device, q_padding_mask=attention_mask, is_causal=True, ) else: # v4.51.3 transformers attention_mask calculation attention_mask = self._update_causal_mask(attention_mask, hidden_states, cache_position, past_key_values) if self.gradient_checkpointing and self.training and use_cache: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if stage_manager.is_first_stage() and shard_config.enable_sequence_parallelism: if shard_config.sequence_parallelism_mode in ["split_gather", "ring"]: hidden_states = split_forward_gather_backward( hidden_states, dim=1, process_group=shard_config.tensor_parallel_process_group, fp8_communication=shard_config.fp8_communication, ) elif shard_config.sequence_parallelism_mode == "all_to_all": hidden_states = split_forward_gather_backward( hidden_states, dim=1, process_group=shard_config.sequence_parallel_process_group, grad_scale=1 / shard_config.sequence_parallel_size, fp8_communication=shard_config.fp8_communication, ) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None # v4.51.3 transformers position_embeddings calculation position_embeddings = self.rotary_emb(hidden_states, position_ids) start_idx, end_idx = stage_index[0], stage_index[1] num_ckpt_layers = 0 if self.gradient_checkpointing and self.training: num_ckpt_layers = end_idx - start_idx # TODO: We can replace `gradient_checkpointing_enable` fn and initialize a gradient_checkpointing (List[bool]) for each layer if shard_config.gradient_checkpoint_config is not None: num_ckpt_layers = shard_config.gradient_checkpoint_config.get_num_ckpt_layers( stage=stage_manager.stage, num_stages=stage_manager.num_stages, num_layers=end_idx - start_idx, model_chunk_id=(stage_manager.model_chunk_id if stage_manager.is_interleave else 0), num_model_chunks=stage_manager.num_model_chunks, ) assert num_ckpt_layers <= end_idx - start_idx for idx, decoder_layer in enumerate(self.layers[start_idx:end_idx], start=start_idx): if output_hidden_states: all_hidden_states += (hidden_states,) if idx - start_idx < num_ckpt_layers: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = layer_outputs[2 if output_attentions else 1] if output_attentions: all_self_attns += (layer_outputs[1],) if stage_manager.is_last_stage(): hidden_states = self.norm(hidden_states) sp_mode = shard_config.sequence_parallelism_mode if shard_config.enable_sequence_parallelism: if (not shard_config.parallel_output) or force_sp_output_gather or is_share_sp_tp(sp_mode): hidden_states = gather_sp_output(hidden_states, shard_config) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if stage_manager.is_last_stage(): return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) # always return dict for imediate stage return {"hidden_states": hidden_states} @staticmethod def command_for_causal_lm_forward( self: CohereForCausalLM, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ): r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, CohereForCausalLM >>> model = CohereForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" logger = logging.get_logger(__name__) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # TODO(jianghai): left the recording kv-value tensors as () or None type, this feature may be added in the future. if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = CommandPipelineForwards.command_model_forward( self.model, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, force_sp_output_gather=False, ) past_key_values = None if stage_manager.is_last_stage(): hidden_states = outputs[0] logits = self.lm_head(hidden_states) logits = logits * self.logit_scale logits = logits.float() loss = None if labels is not None: loss = dist_cross_entropy(labels, logits, shard_config, self.lm_head.out_features, self.model.dtype) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) else: hidden_states = outputs.get("hidden_states") return {"hidden_states": hidden_states} def get_command_flash_attention_forward(shard_config: ShardConfig, sp_mode=None, sp_size=None, sp_group=None): def forward( self: CohereAttention, hidden_states: torch.Tensor, position_embeddings: Tuple[torch.Tensor, torch.Tensor], attention_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Cache] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[FlashAttentionKwargs], ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: if sp_mode is not None: assert sp_mode in _SUPPORTED_SP_MODE, f"SP mode {sp_mode} is not supported by {type(self)} yet" assert (sp_size is not None) and ( sp_group is not None ), "Must specify sp_size and sp_group for sequence parallel" bsz, q_len, _ = hidden_states.size() # sp: modify sp_len when sequence parallel mode is ring if sp_mode in ["split_gather", "ring"]: q_len *= sp_size query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # sp: all-to-all comminucation when introducing sequence parallel if sp_mode == "all_to_all": query_states = all_to_all_comm(query_states, sp_group, fp8_communication=shard_config.fp8_communication) key_states = all_to_all_comm(key_states, sp_group, fp8_communication=shard_config.fp8_communication) value_states = all_to_all_comm(value_states, sp_group, fp8_communication=shard_config.fp8_communication) bsz, q_len, _ = query_states.size() query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2) cos, sin = position_embeddings query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) # repeat k/v heads if n_kv_heads < n_heads key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) attn_weights = None shard_config.enable_flash_attention = True if shard_config.enable_flash_attention: assert isinstance(attention_mask, dict), "Flash Attention Error: attention_mask should be a dict." attn_output = ColoAttention.attention(query_states, key_states, value_states, **attention_mask) else: # attn_weights and attn_output calculation is modified on the v4.51.3 of transformers.models.cohere.modeling_cohere.CohereAttention.forward. attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scaling if attention_mask is not None: causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] attn_weights = attn_weights + causal_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) dropout = 0.0 if not self.training else self.attention_dropout attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=self.training) attn_output = torch.matmul(attn_weights, value_states) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.transpose(1, 2).contiguous() # sp: all-to-all comminucation when introducing sequence parallel if sp_mode == "all_to_all": attn_output = attn_output.reshape(bsz, q_len, self.num_heads * self.head_dim) attn_output = all_to_all_comm( attn_output, sp_group, scatter_dim=1, gather_dim=2, fp8_communication=shard_config.fp8_communication ) else: attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) return attn_output, attn_weights return forward def get_command_flash_attention_model_forward(shard_config: ShardConfig, sp_mode=None, sp_size=None, sp_group=None): logger = logging.get_logger(__name__) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, force_sp_output_gather: bool = True, **flash_attn_kwargs: Unpack[FlashAttentionKwargs], ) -> BaseModelOutputWithPast: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache # retrieve input_ids and inputs_embeds if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError( "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" ) if (self.gradient_checkpointing or sp_mode in ["ring", "all_to_all"]) and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) past_seen_tokens = 0 seq_len = inputs_embeds.shape[1] if use_cache: # kept for BC (cache positions) if not isinstance(past_key_values, StaticCache): past_key_values = DynamicCache.from_legacy_cache(past_key_values) past_seen_tokens = past_key_values.get_seq_length() if cache_position is None: if isinstance(past_key_values, StaticCache): raise ValueError("cache_position is a required argument when using StaticCache.") cache_position = torch.arange(past_seen_tokens, past_seen_tokens + seq_len, device=inputs_embeds.device) if position_ids is None: position_ids = cache_position.unsqueeze(0) shard_config.enable_flash_attention = True # in this case, attention_mask is a dict rather than a tensor if shard_config.enable_flash_attention: mask_shape = (inputs_embeds.shape[0], 1, past_seen_tokens + seq_len, past_seen_tokens + seq_len) attention_mask = ColoAttention.prepare_attn_kwargs( mask_shape, inputs_embeds.dtype, inputs_embeds.device, q_padding_mask=attention_mask, is_causal=True, ) else: # v4.51.3 transformers attention_mask calculation attention_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_key_values) if sp_mode in ["ring", "split_gather"]: inputs_embeds = split_forward_gather_backward( inputs_embeds, 1, sp_group, fp8_communication=shard_config.fp8_communication ) elif sp_mode == "all_to_all": inputs_embeds = split_forward_gather_backward( inputs_embeds, 1, sp_group, 1 / sp_size, fp8_communication=shard_config.fp8_communication ) hidden_states = inputs_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None # v4.51.3 transformers position_embeddings calculation position_embeddings = self.rotary_emb(hidden_states, position_ids) for decoder_layer in self.layers: if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, position_ids, past_key_values, output_attentions, use_cache, cache_position, position_embeddings, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, ) hidden_states = layer_outputs[0] hidden_states = self.norm(hidden_states) # Cases that don't support parallelizing cross entropy computation along sequence if shard_config.enable_sequence_parallelism: if (not shard_config.parallel_output) or is_share_sp_tp(sp_mode) or force_sp_output_gather: hidden_states = gather_sp_output(hidden_states, shard_config) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = None if use_cache: next_cache = ( next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) return forward def get_lm_forward_with_dist_cross_entropy(shard_config: ShardConfig): from transformers import CohereForCausalLM def forward( self: CohereForCausalLM, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, CohereForCausalLM >>> model = CohereForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, force_sp_output_gather=False, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) logits = logits * self.logit_scale logits = logits.float() loss = None if labels is not None: loss = dist_cross_entropy( labels, logits, shard_config, self.lm_head.out_features, self.model.dtype, ) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) return forward
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/deepseek_v3.py
colossalai/shardformer/modeling/deepseek_v3.py
from typing import List, Optional, Tuple, Union import numpy as np import torch import torch.distributed as dist from torch.distributed import ProcessGroup from torch.nn import CrossEntropyLoss from transformers.cache_utils import Cache, DynamicCache from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast from colossalai.lazy import LazyInitContext from colossalai.moe._operation import ( DPGradScalerIn, DPGradScalerOut, EPGradScalerIn, EPGradScalerOut, all_to_all_uneven, ) from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer.layer.linear import ParallelModule from colossalai.shardformer.shard.utils import set_tensors_to_none from colossalai.tensor.moe_tensor.api import set_moe_tensor_ep_group class EpDeepseekV3MoE(ParallelModule): """ A mixed expert module containing shared experts. """ def __init__(self, config): raise RuntimeError(f"Please use `from_native_module` to create an instance of {self.__class__.__name__}") def setup_process_groups( self, moe_dp_group: ProcessGroup, ep_group: ProcessGroup, ): assert moe_dp_group is not None assert ep_group is not None self.ep_size = dist.get_world_size(ep_group) self.ep_rank = dist.get_rank(ep_group) self.num_experts = self.config.n_routed_experts assert self.num_experts % self.ep_size == 0 self.ep_group = ep_group self.num_experts_per_ep = self.num_experts // self.ep_size self.experts_per_rank = self.num_experts_per_ep self.expert_start_idx = self.ep_rank * self.num_experts_per_ep held_experts = self.experts[self.expert_start_idx : self.expert_start_idx + self.num_experts_per_ep] set_tensors_to_none(self.experts, exclude=set(held_experts)) # setup moe_dp group self.moe_dp_group = moe_dp_group self.moe_dp_size = dist.get_world_size(moe_dp_group) for p in self.experts.parameters(): set_moe_tensor_ep_group(p, ep_group) @staticmethod def from_native_module( module, moe_dp_group: ProcessGroup, ep_group: ProcessGroup, *args, **kwargs, ) -> "EpDeepseekV3MoE": if module.__class__.__name__ != "DeepseekV3MLP": module.__class__ = EpDeepseekV3MoE module.setup_process_groups(moe_dp_group, ep_group) LazyInitContext.materialize(module) return module def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: identity = hidden_states orig_shape = hidden_states.shape topk_idx, topk_weight = self.gate(hidden_states) hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) y = self.moe_forward(hidden_states, topk_idx, topk_weight).view(*orig_shape) if self.config.n_shared_experts is not None: y = y + self.shared_experts(identity) return y def moe_forward(self, x: torch.Tensor, topk_ids: torch.Tensor, topk_weight: torch.Tensor) -> torch.Tensor: cnts = topk_ids.new_zeros((topk_ids.shape[0], len(self.experts))) cnts.scatter_(1, topk_ids, 1) tokens_per_expert = cnts.sum(dim=0) idxs = topk_ids.view(-1).argsort() sorted_tokens = x[idxs // topk_ids.shape[1]] if self.ep_size > 1: tokens_per_ep_rank = tokens_per_expert.view(self.ep_size, -1).sum(dim=1) tokens_per_expert_group = tokens_per_expert.new_empty(tokens_per_expert.shape[0]) dist.all_to_all_single(tokens_per_expert_group, tokens_per_expert, group=self.ep_group) output_splits = tokens_per_expert_group.view(self.ep_size, -1).sum(1).tolist() input_split_sizes = tokens_per_ep_rank.tolist() gathered_tokens, _ = all_to_all_uneven(sorted_tokens, input_split_sizes, output_splits, self.ep_group) tokens_per_expert_post_gather = tokens_per_expert_group.view(self.ep_size, self.experts_per_rank).sum(dim=0) gatherd_idxs = np.zeros(shape=(gathered_tokens.shape[0],), dtype=np.int32) s = 0 for i, k in enumerate(tokens_per_expert_group.cpu().numpy()): gatherd_idxs[s : s + k] = i % self.experts_per_rank s += k gatherd_idxs = gatherd_idxs.argsort() sorted_tokens = gathered_tokens[gatherd_idxs] tokens_per_expert = tokens_per_expert_post_gather # moe-dp related code activate_experts = tokens_per_expert_post_gather > 0 activate_experts = activate_experts.int() dist.all_reduce(activate_experts, group=self.moe_dp_group) # ep related code sorted_tokens = EPGradScalerIn.apply(sorted_tokens, self.ep_size) tokens_per_expert = tokens_per_expert.cpu().numpy() outputs = [] start_idx = 0 for i, num_tokens in enumerate(tokens_per_expert): end_idx = start_idx + num_tokens if num_tokens == 0: continue expert = self.experts[i + self.ep_rank * self.experts_per_rank] tokens_for_this_expert = sorted_tokens[start_idx:end_idx] # moe-dp related code tokens_for_this_expert = DPGradScalerIn.apply(tokens_for_this_expert, self.moe_dp_size, activate_experts[i]) expert_out = expert(tokens_for_this_expert) # moe-dp related code expert_out = DPGradScalerOut.apply(expert_out, self.moe_dp_size, activate_experts[i]) outputs.append(expert_out) start_idx = end_idx if len(outputs) > 0: outs = torch.cat(outputs, dim=0) else: assert sorted_tokens.numel() == 0, f"sorted_tokens: should be empty, but got {sorted_tokens.shape}" outs = sorted_tokens if self.ep_size > 1: outs = EPGradScalerOut.apply(outs, self.ep_size) new_x = torch.empty_like(outs) new_x[gatherd_idxs] = outs gathered_tokens, _ = all_to_all_uneven(new_x, output_splits, input_split_sizes, self.ep_group) outs = gathered_tokens new_x = torch.empty_like(outs) new_x[idxs] = outs final_out = ( (new_x.view(*topk_ids.shape, -1).type(topk_weight.dtype) * topk_weight.unsqueeze(dim=-1)) .sum(dim=1) .type(new_x.dtype) ) return final_out def deepseek_v3_model_forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, stage_index: Optional[List[int]] = None, hidden_states_internal: Optional[torch.Tensor] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape[:2] elif inputs_embeds is not None: batch_size, seq_length = inputs_embeds.shape[:2] else: raise ValueError("You have to specify either input_ids or inputs_embeds") past_key_values_length = 0 if use_cache: use_legacy_cache = not isinstance(past_key_values, Cache) if use_legacy_cache: past_key_values = DynamicCache.from_legacy_cache(past_key_values) past_key_values_length = past_key_values.get_usable_length(seq_length) if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device, ) position_ids = position_ids.unsqueeze(0) if stage_manager is None or stage_manager.is_first_stage(): if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) else: inputs_embeds = hidden_states_internal if self._use_flash_attention_2: # 2d mask is passed through the layers attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None else: # 4d mask is passed through the layers attention_mask = _prepare_4d_causal_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length, ) # embed positions hidden_states = inputs_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = None if stage_index is not None: start_idx, end_idx = stage_index else: start_idx, end_idx = 0, len(self.layers) for i, decoder_layer in enumerate(self.layers[start_idx:end_idx], start=start_idx): if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and i > 0: layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, position_ids, past_key_values, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_values, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache = layer_outputs[2 if output_attentions else 1] if output_attentions: all_self_attns += (layer_outputs[1],) if stage_manager is None or stage_manager.is_last_stage(): hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = None if use_cache: next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache if stage_manager is not None and not stage_manager.is_last_stage(): return { "hidden_states_internal": hidden_states, } if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) def deepseek_v3_for_causal_lm_forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, stage_index: Optional[List[int]] = None, hidden_states_internal: Optional[torch.Tensor] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, transformers., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, transformers., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, DeepseekV3ForCausalLM >>> model = DeepseekV3ForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) >>> prompt = "Hey, are you conscious? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = deepseek_v3_model_forward( self.model, input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, stage_index=stage_index, hidden_states_internal=hidden_states_internal, ) if stage_manager is not None and not stage_manager.is_last_stage(): return outputs hidden_states = outputs[0] logits = self.lm_head(hidden_states) logits = logits.float() loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/jit.py
colossalai/shardformer/modeling/jit.py
import torch def get_dropout_add_func(): from transformers.models.bloom.modeling_bloom import dropout_add def self_dropout_add(self, x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor: return dropout_add(x, residual, prob, training) return self_dropout_add def get_jit_fused_dropout_add_func(): from colossalai.kernel.jit import bias_dropout_add_fused_inference, bias_dropout_add_fused_train def self_dropout_add(self, x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor: bias = torch.zeros_like(x) if training: return bias_dropout_add_fused_train(x, bias, residual, prob) return bias_dropout_add_fused_inference(x, bias, residual, prob) return self_dropout_add def get_jit_fused_gelu_forward_func(): from colossalai.kernel.jit.bias_gelu import bias_gelu def bloom_gelu_forward(x: torch.Tensor, bias: torch.Tensor) -> torch.Tensor: return bias_gelu(bias, x) return bloom_gelu_forward
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/blip2.py
colossalai/shardformer/modeling/blip2.py
from typing import Optional, Tuple import torch import torch.nn as nn from colossalai.shardformer.layer import ColoAttention def forward_fn(): def forward( self, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() mixed_qkv = self.qkv(hidden_states) # modified from original code, which is: # mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads).permute( # 2, 0, 3, 1, 4 # ) # to: mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) query_states, key_states, value_states = ( mixed_qkv[0], mixed_qkv[1], mixed_qkv[2], ) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) attention_scores = attention_scores * self.scale # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3) new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,) context_layer = context_layer.reshape(new_context_layer_shape) output = self.projection(context_layer) outputs = (output, attention_probs) if output_attentions else (output, None) return outputs return forward def get_blip2_flash_attention_forward(): from transformers.models.blip_2.modeling_blip_2 import Blip2Attention def forward( self: Blip2Attention, hidden_states: torch.Tensor, head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" assert head_mask is None, "head_mask is not supported in FlashAttention" bsz, tgt_len, embed_dim = hidden_states.size() mixed_qkv = self.qkv(hidden_states) mixed_qkv = mixed_qkv.reshape(bsz, tgt_len, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) query_states, key_states, value_states = ( mixed_qkv[0], mixed_qkv[1], mixed_qkv[2], ) dropout_p = self.dropout.p if self.training else 0.0 context_layer = ColoAttention.attention( query_states, key_states, value_states, dropout_p=dropout_p, scale=self.scale, ) context_layer = context_layer.permute(0, 2, 1, 3).reshape(bsz, tgt_len, self.embed_dim) output = self.projection(context_layer) outputs = (output, None) return outputs return forward def get_jit_fused_blip2_QFormer_self_output_forward(): from transformers.models.blip_2.modeling_blip_2 import Blip2QFormerSelfOutput def forward( self: Blip2QFormerSelfOutput, hidden_states: torch.Tensor, input_tensor: torch.Tensor, ) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout_add(hidden_states, input_tensor, self.dropout.p, self.dropout.training) hidden_states = self.LayerNorm(hidden_states) return hidden_states return forward def get_jit_fused_blip2_QFormer_output_forward(): from transformers.models.blip_2.modeling_blip_2 import Blip2QFormerOutput def forward( self: Blip2QFormerOutput, hidden_states: torch.Tensor, input_tensor: torch.Tensor, ) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout_add(hidden_states, input_tensor, self.dropout.p, self.dropout.training) hidden_states = self.LayerNorm(hidden_states) return hidden_states return forward def get_jit_fused_blip2_mlp_forward(): from transformers.models.blip_2.modeling_blip_2 import Blip2MLP from colossalai.kernel.jit.bias_gelu import GeLUFunction as JitGeLUFunction def forward(self: Blip2MLP, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states, bias = self.fc1(hidden_states) hidden_states = JitGeLUFunction.apply(hidden_states, bias) hidden_states = self.fc2(hidden_states) return hidden_states return forward
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/gpt2.py
colossalai/shardformer/modeling/gpt2.py
from typing import Dict, List, Optional, Tuple, Union import torch from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) from transformers.models.gpt2.modeling_gpt2 import ( GPT2DoubleHeadsModel, GPT2DoubleHeadsModelOutput, GPT2ForQuestionAnswering, GPT2ForSequenceClassification, GPT2ForTokenClassification, GPT2LMHeadModel, GPT2Model, ) from transformers.utils import logging from colossalai.pipeline.stage_manager import PipelineStageManager from colossalai.shardformer.layer import ColoAttention, RingAttention from colossalai.shardformer.layer._operation import gather_sp_output, split_forward_gather_backward from colossalai.shardformer.layer.utils import is_share_sp_tp, split_batch_zigzag from colossalai.shardformer.shard import ShardConfig from ..layer import dist_cross_entropy logger = logging.get_logger(__name__) def _get_attention_mask( self: GPT2Model, shard_config: ShardConfig, hidden_states: torch.Tensor, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]], attention_mask: Optional[torch.FloatTensor], encoder_hidden_states: Optional[torch.Tensor], encoder_attention_mask: Optional[torch.FloatTensor], ) -> Tuple[Optional[Union[torch.Tensor, dict]], Optional[Union[torch.Tensor, dict]]]: # Received input is already split for non-first pipeline stages, # but attn mask isn't batch_size = hidden_states.size(0) seq_len = attention_mask.size(-1) sp_mode = shard_config.sequence_parallelism_mode # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.add_cross_attention and encoder_hidden_states is not None: assert not sp_mode == "ring_attn", "Ring Attention only supports decoder-only." encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() if shard_config.enable_flash_attention: encoder_attention_mask = ColoAttention.prepare_attn_kwargs( (encoder_batch_size, 1, seq_len, encoder_sequence_length), dtype=hidden_states.dtype, device=encoder_hidden_states.device, q_padding_mask=attention_mask, kv_padding_mask=encoder_attention_mask, ) else: encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=encoder_hidden_states.device) encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: if shard_config.enable_flash_attention: encoder_attention_mask = {"attention_mask": None} else: encoder_attention_mask = None # GPT2Attention mask. past_key_values_length = 0 if past_key_values is not None and past_key_values[0] is not None: past_key_values_length = past_key_values[0][0].shape[2] if shard_config.enable_flash_attention: if attention_mask is not None: attention_mask = attention_mask.view(batch_size, -1) attention_mask = ColoAttention.prepare_attn_kwargs( (batch_size, 1, seq_len, seq_len + past_key_values_length), hidden_states.dtype, hidden_states.device, attention_mask, is_causal=True, ) elif attention_mask is not None: if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") attention_mask = attention_mask.view(batch_size, -1) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask = attention_mask[:, None, None, :] # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min return attention_mask, encoder_attention_mask class GPT2PipelineForwards: """ This class serves as a micro library for forward function substitution of GPT2 models under pipeline setting. """ @staticmethod def gpt2_model_forward( self: GPT2Model, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, force_sp_gather: Optional[bool] = True, ) -> Union[Dict, Tuple, BaseModelOutputWithPastAndCrossAttentions]: # This function is modified on the basis of transformers.models.gpt2.modeling_gpt2.GPT2Model.forward. # Please refer to original code of transformers for more details. return_dict = return_dict if return_dict is not None else self.config.use_return_dict logger = logging.get_logger(__name__) # Preprocess passed in arguments # TODO(baizhou): left the recording kv-value tensors as () or None type, this feature may be added in the future. if past_key_values: logger.warning_once("Non-empty past_key_values is not supported for pipeline models at the moment.") past_key_values = None if output_attentions: logger.warning_once("output_attentions=True is not supported for pipeline models at the moment.") output_attentions = False if output_hidden_states: logger.warning_once("output_hidden_states=True is not supported for pipeline models at the moment.") output_hidden_states = False if use_cache: logger.warning_once("use_cache=True is not supported for pipeline models at the moment.") use_cache = False disable_pp = stage_manager is None if disable_pp or stage_manager.is_first_stage(): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) else: if hidden_states is None: raise ValueError("hidden_states shouldn't be None for stages other than the first stage.") input_shape = hidden_states.size()[:-1] device = hidden_states.device hidden_states = hidden_states.view((-1,) + hidden_states.shape[-2:]) hidden_states.shape[0] # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # head_mask has shape n_layer x batch x n_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) if disable_pp or stage_manager.is_first_stage(): if position_ids is None: position_ids = torch.arange(0, input_shape[-1], dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) position_embeds = self.wpe(position_ids) hidden_states = inputs_embeds + position_embeds if token_type_ids is not None: token_type_embeds = self.wte(token_type_ids) hidden_states = hidden_states + token_type_embeds hidden_states = self.drop(hidden_states) attn_kwargs, encoder_attention_mask = _get_attention_mask( self, shard_config, hidden_states, past_key_values, attention_mask, encoder_hidden_states, encoder_attention_mask, ) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None # split the input tensor along sequence dimension # [batch_size, seq_len, hidden_size] -> [batch_size, seq_len/TP_size, hidden_size] sp_mode = shard_config.sequence_parallelism_mode sp_group = shard_config.sequence_parallel_process_group if disable_pp or stage_manager.is_first_stage(): # Ring Attention's special zigzag batch processing if sp_mode == "ring_attn": assert shard_config.enable_flash_attention, "Ring Attention inherently requires Flash Attention." if not attention_mask.bool().all(): hidden_states, attn_kwargs, position_ids = RingAttention.prepare_varlen_batch( attention_mask, sp_group, hidden_states, position_ids ) else: hidden_states, position_ids = split_batch_zigzag([hidden_states, position_ids], sp_group) # Other sp modes else: if sp_mode == "split_gather": hidden_states = split_forward_gather_backward( hidden_states, dim=1, process_group=shard_config.tensor_parallel_process_group, ) elif sp_mode == "ring_attn": # Later stages already received split hidden states _, attn_kwargs, _ = RingAttention.prepare_varlen_batch(attention_mask, sp_group) del attention_mask # Going through held blocks. if disable_pp: start_idx, end_idx = 0, len(self.h) else: start_idx, end_idx = stage_index[0], stage_index[1] for i in range(start_idx, end_idx): block = self.h[i] torch.cuda.set_device(hidden_states.device) # Ensure that attention_mask is always on the same device as hidden_states if torch.is_tensor(attn_kwargs): attn_kwargs = attn_kwargs.to(hidden_states.device) if isinstance(head_mask, torch.Tensor): head_mask = head_mask.to(hidden_states.device) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: outputs = self._gradient_checkpointing_func( block.__call__, hidden_states, None, attn_kwargs, head_mask[i], encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, ) else: outputs = block( hidden_states, layer_past=None, attention_mask=attn_kwargs, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],) # When sequence parallelism is done, gather the output tensor in forward and split it in backward gather_output = (not shard_config.parallel_output) or force_sp_gather or is_share_sp_tp(sp_mode) if disable_pp or stage_manager.is_last_stage(): if gather_output: hidden_states = gather_sp_output(hidden_states, shard_config) # gather_sp_output could've changed seq length. input_shape = (*input_shape[:-1], hidden_states.size(-2)) output_shape = input_shape + (hidden_states.size(-1),) if disable_pp or stage_manager.is_last_stage(): hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if disable_pp or stage_manager.is_last_stage(): if not return_dict: return tuple( v for v in [ hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) else: # always return dict for intermediate stage return {"hidden_states": hidden_states} @staticmethod def gpt2_lmhead_model_forward( self: GPT2LMHeadModel, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ) -> Union[Dict, Tuple, CausalLMOutputWithCrossAttentions]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` This function is modified on the basis of transformers.models.gpt2.modeling_gpt2.GPT2LMHeadModel.forward. Please refer to original code of transformers for more details. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = GPT2PipelineForwards.gpt2_model_forward( self.transformer, input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, force_sp_gather=False, ) # If not at the last stage, return hidden_states as in GPT2Model disable_pp = stage_manager is None if (not disable_pp) and (not stage_manager.is_last_stage()): return {"hidden_states": outputs["hidden_states"]} hidden_states = outputs[0] lm_logits = self.lm_head(hidden_states) if shard_config.sequence_parallelism_mode == "ring_attn": # Split labels in a zigzag fashion too sp_group = shard_config.sequence_parallel_process_group if not attention_mask.bool().all(): # [B, max_seqlen // sp_size] labels, _, _ = RingAttention.prepare_varlen_batch(attention_mask, sp_group, labels, is_label=True) else: labels = split_batch_zigzag(labels, sp_group, seq_dim=1, is_label=True) if labels is not None: loss = dist_cross_entropy( labels, lm_logits, shard_config, self.lm_head.out_features, self.transformer.dtype ) if not return_dict: output = (lm_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @staticmethod def gpt2_double_heads_model_forward( self: GPT2DoubleHeadsModel, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, mc_token_ids: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, mc_labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ) -> Union[Dict, Tuple, GPT2DoubleHeadsModelOutput]: r""" mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - 1]`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids`. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]` mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) This function is modified on the basis of transformers.models.gpt2.modeling_gpt2.GPT2DoubleHeadsModel.forward. Please refer to original code of transformers for more details. ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = GPT2PipelineForwards.gpt2_model_forward( self.transformer, input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) # If not at the last stage, return hidden_states as in GPT2Model if not stage_manager.is_last_stage(): return {"hidden_states": outputs["hidden_states"]} hidden_states = outputs[0] lm_logits = self.lm_head(hidden_states) mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1) mc_loss = None if mc_labels is not None: loss_fct = CrossEntropyLoss() mc_loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)) lm_loss = None if labels is not None: labels = labels.to(lm_logits.device) shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) if not return_dict: output = (lm_logits, mc_logits) + outputs[1:] if mc_loss is not None: output = (mc_loss,) + output return ((lm_loss,) + output) if lm_loss is not None else output return GPT2DoubleHeadsModelOutput( loss=lm_loss, mc_loss=mc_loss, logits=lm_logits, mc_logits=mc_logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @staticmethod def gpt2_for_question_answering_forward( self: GPT2ForQuestionAnswering, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ) -> Union[Dict, Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. # This function is modified on the basis of transformers.models.gpt2.modeling_gpt2.GPT2ForQuestionAnswering.forward. # Please refer to original code of transformers for more details. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = GPT2PipelineForwards.gpt2_model_forward( self.transformer, input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) # If not at the last stage, return hidden_states as in GPT2Model if not stage_manager.is_last_stage(): return {"hidden_states": outputs["hidden_states"]} sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1).to(start_logits.device) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1).to(end_logits.device) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @staticmethod def gpt2_for_token_classification_forward( self: GPT2ForTokenClassification, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, stage_manager: Optional[PipelineStageManager] = None, hidden_states: Optional[torch.FloatTensor] = None, stage_index: Optional[List[int]] = None, shard_config: ShardConfig = None, ) -> Union[Dict, Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). # This function is modified on the basis of transformers.models.gpt2.modeling_gpt2.GPT2ForTokenClassification.forward. # Please refer to original code of transformers for more details. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = GPT2PipelineForwards.gpt2_model_forward( self.transformer, input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, stage_manager=stage_manager, hidden_states=hidden_states, stage_index=stage_index, shard_config=shard_config, ) # If not at the last stage, return hidden_states as in GPT2Model if not stage_manager.is_last_stage(): return {"hidden_states": outputs["hidden_states"]} hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: labels = labels.to(logits.device) loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @staticmethod def gpt2_for_sequence_classification_forward( self: GPT2ForSequenceClassification, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None,
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
true
hpcaitech/ColossalAI
https://github.com/hpcaitech/ColossalAI/blob/b1915d2889543949eb5b610241f1515c73df5059/colossalai/shardformer/modeling/sam.py
colossalai/shardformer/modeling/sam.py
import torch from torch import nn # Same as the SamVisionAttention forward method in the v4.51.3 transformers def forward_fn(): def forward(self, hidden_states: torch.Tensor, output_attentions=False) -> torch.Tensor: batch_size, height, width, _ = hidden_states.shape # qkv with shape (3, batch_size, nHead, height * width, channel) qkv = ( self.qkv(hidden_states) .reshape(batch_size, height * width, 3, self.num_attention_heads, -1) .permute(2, 0, 3, 1, 4) ) # q, k, v with shape (batch_size * nHead, height * width, channel) query, key, value = qkv.reshape(3, batch_size * self.num_attention_heads, height * width, -1).unbind(0) attn_weights = (query * self.scale) @ key.transpose(-2, -1) if self.use_rel_pos: decomposed_rel_pos = self.get_decomposed_rel_pos( query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width) ) decomposed_rel_pos = decomposed_rel_pos.reshape_as(attn_weights) attn_weights = attn_weights + decomposed_rel_pos attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype) attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = (attn_probs @ value).reshape(batch_size, self.num_attention_heads, height, width, -1) attn_output = attn_output.permute(0, 2, 3, 1, 4).reshape(batch_size, height, width, -1) attn_output = self.proj(attn_output) if output_attentions: outputs = (attn_output, attn_weights) else: outputs = (attn_output, None) return outputs return forward
python
Apache-2.0
b1915d2889543949eb5b610241f1515c73df5059
2026-01-04T14:40:19.002665Z
false