| |
|
|
| from __future__ import annotations |
|
|
| import math |
| import warnings |
| from typing import List, Optional, Tuple, Union |
|
|
| import torch |
| import torch.nn as nn |
| import torch.utils.checkpoint |
| from transformers.activations import ACT2FN |
| from transformers.modeling_outputs import (BaseModelOutputWithPast, |
| CausalLMOutputWithPast) |
| from transformers.modeling_utils import PreTrainedModel |
| from transformers.utils import logging |
|
|
| from fla.layers.abc import ABCAttention |
| from fla.models.abc.configuration_abc import ABCConfig |
| from fla.models.utils import RecurrentCache |
| from fla.modules import FusedCrossEntropyLoss, RMSNorm |
| from fla.modules.activations import swiglu_linear |
|
|
| logger = logging.get_logger(__name__) |
|
|
|
|
| class ABCMLP(nn.Module): |
|
|
| def __init__( |
| self, |
| hidden_size: int, |
| hidden_ratio: Optional[int] = None, |
| intermediate_size: Optional[int] = None, |
| hidden_act: str = 'swish' |
| ) -> ABCMLP: |
| super().__init__() |
|
|
| self.hidden_size = hidden_size |
| |
| |
| if hidden_ratio is None: |
| hidden_ratio = 4 |
| if intermediate_size is None: |
| intermediate_size = int(hidden_size * hidden_ratio * 2 / 3) |
| intermediate_size = 256 * ((intermediate_size + 256 - 1) // 256) |
| self.hidden_ratio = hidden_ratio |
| self.intermediate_size = intermediate_size |
|
|
| self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=False) |
| self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) |
| self.act_fn = ACT2FN[hidden_act] |
|
|
| def forward(self, x): |
| y = self.gate_proj(x) |
| gate, y = y.chunk(2, -1) |
| return swiglu_linear(gate, y, self.down_proj.weight, self.down_proj.bias) |
|
|
|
|
| class ABCBlock(nn.Module): |
| def __init__(self, config: ABCConfig, layer_idx: int): |
| super().__init__() |
| self.hidden_size = config.hidden_size |
|
|
| self.attn_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps) |
| self.attn = ABCAttention( |
| hidden_size=config.hidden_size, |
| expand_k=config.expand_k, |
| expand_v=config.expand_v, |
| num_heads=config.num_heads, |
| num_slots=config.num_slots, |
| use_short_conv=config.use_short_conv, |
| conv_size=config.conv_size, |
| share_conv_kernel=config.share_conv_kernel, |
| gate_fn=config.hidden_act, |
| elementwise_affine=config.elementwise_affine, |
| norm_eps=config.norm_eps, |
| clamp_min=config.clamp_min, |
| clamp_max=config.clamp_max, |
| fuse_norm=config.fuse_norm, |
| layer_idx=layer_idx |
| ) |
| self.mlp_norm = RMSNorm(hidden_size=config.hidden_size, eps=config.norm_eps) |
| self.mlp = ABCMLP( |
| hidden_size=config.hidden_size, |
| hidden_ratio=config.hidden_ratio, |
| intermediate_size=config.intermediate_size, |
| hidden_act=config.hidden_act |
| ) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| past_key_values: Optional[Tuple[List[torch.Tensor]]] = None, |
| use_cache: Optional[bool] = False, |
| output_attentions: Optional[bool] = False, |
| **kwargs, |
| ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: |
|
|
| residual = hidden_states |
|
|
| hidden_states = self.attn_norm(hidden_states) |
| hidden_states, attentions, past_key_values = self.attn( |
| hidden_states=hidden_states, |
| attention_mask=attention_mask, |
| past_key_values=past_key_values, |
| use_cache=use_cache, |
| output_attentions=output_attentions |
| ) |
| hidden_states, residual = self.mlp_norm(hidden_states, residual, True) |
| hidden_states = self.mlp(hidden_states) |
| hidden_states = residual + hidden_states |
|
|
| outputs = (hidden_states, attentions, past_key_values) |
|
|
| return outputs |
|
|
|
|
| class ABCPreTrainedModel(PreTrainedModel): |
|
|
| config_class = ABCConfig |
| supports_gradient_checkpointing = True |
| _no_split_modules = ['ABCBlock'] |
|
|
| def __init__(self, *inputs, **kwargs): |
| super().__init__(*inputs, **kwargs) |
|
|
| def _init_weights( |
| self, |
| module: nn.Module, |
| rescale_prenorm_residual: bool = True, |
| num_residuals_per_layer: int = 2, |
| ): |
| if isinstance(module, (nn.Linear, nn.Conv1d)): |
| |
| |
| nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) |
| if module.bias is not None: |
| nn.init.zeros_(module.bias) |
| elif isinstance(module, nn.Embedding): |
| nn.init.normal_(module.weight, mean=0.0, std=self.config.initializer_range) |
| if module.padding_idx is not None: |
| module.weight.data[module.padding_idx].zero_() |
|
|
| if rescale_prenorm_residual: |
| |
| |
| |
| |
| |
| |
| for name, p in module.named_parameters(): |
| if name in ["o_proj.weight", "down_proj.weight"]: |
| |
| |
| |
| |
| with torch.no_grad(): |
| p /= math.sqrt(num_residuals_per_layer * self.config.num_hidden_layers) |
|
|
|
|
| class ABCModel(ABCPreTrainedModel): |
|
|
| def __init__(self, config: ABCConfig): |
| super().__init__(config) |
| self.padding_idx = config.pad_token_id |
| self.vocab_size = config.vocab_size |
|
|
| self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) |
| self.layers = nn.ModuleList([ABCBlock(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]) |
| self.norm = RMSNorm(config.hidden_size, eps=config.norm_eps) |
|
|
| self.gradient_checkpointing = False |
|
|
| self.post_init() |
|
|
| def get_input_embeddings(self): |
| return self.embeddings |
|
|
| def set_input_embeddings(self, value): |
| self.embeddings = value |
|
|
| def forward( |
| self, |
| input_ids: Optional[torch.LongTensor] = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| past_key_values: Optional[Tuple[List[torch.Tensor]]] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None |
| ) -> Union[Tuple, BaseModelOutputWithPast]: |
| if output_attentions: |
| warnings.warn("`ABCModel` does not `output_attentions` now, setting it to `False`.") |
| output_attentions = False |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| |
| if input_ids is not None and inputs_embeds is not None: |
| raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
| elif input_ids is not None: |
| batch_size = input_ids.shape[0] |
| elif inputs_embeds is not None: |
| batch_size = inputs_embeds.shape[0] |
| else: |
| raise ValueError("You have to specify either input_ids or inputs_embeds") |
|
|
| if inputs_embeds is None: |
| inputs_embeds = self.embeddings(input_ids) |
| hidden_states = inputs_embeds |
|
|
| if use_cache: |
| if past_key_values is None: |
| past_key_values = [layer.attn.init_state(batch_size) for layer in self.layers] |
| if not isinstance(past_key_values, RecurrentCache): |
| past_key_values = RecurrentCache.from_legacy_cache(past_key_values) |
|
|
| if self.gradient_checkpointing and self.training: |
| if use_cache: |
| logger.warning_once( |
| "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| ) |
| use_cache = False |
|
|
| all_hidden_states = () if output_hidden_states else None |
| all_attns = () if output_attentions else None |
| for layer in self.layers: |
| if output_hidden_states: |
| all_hidden_states += (hidden_states,) |
|
|
| if self.gradient_checkpointing and self.training: |
| hidden_states, attentions, past_key_values = self._gradient_checkpointing_func( |
| layer.__call__, |
| hidden_states, |
| attention_mask, |
| past_key_values, |
| use_cache, |
| output_attentions |
| ) |
| else: |
| hidden_states, attentions, past_key_values = layer( |
| hidden_states, |
| attention_mask, |
| past_key_values=past_key_values, |
| use_cache=use_cache, |
| output_attentions=output_attentions |
| ) |
|
|
| if output_attentions: |
| all_attns += (attentions,) |
|
|
| hidden_states = self.norm(hidden_states) |
|
|
| |
| if output_hidden_states: |
| all_hidden_states += (hidden_states,) |
|
|
| next_cache = None |
| if use_cache: |
| next_cache = past_key_values.to_legacy_cache() |
| if not return_dict: |
| return tuple(x for x in [hidden_states, next_cache, all_hidden_states, all_attns] if x is not None) |
| return BaseModelOutputWithPast( |
| last_hidden_state=hidden_states, |
| past_key_values=next_cache, |
| hidden_states=all_hidden_states, |
| attentions=all_attns |
| ) |
|
|
|
|
| class ABCForCausalLM(ABCPreTrainedModel): |
| _tied_weights_keys = ["lm_head.weight"] |
|
|
| def __init__(self, config): |
| super().__init__(config) |
| self.model = ABCModel(config) |
| self.vocab_size = config.vocab_size |
| self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) |
|
|
| |
| self.post_init() |
|
|
| def get_input_embeddings(self): |
| return self.model.embeddings |
|
|
| def set_input_embeddings(self, value): |
| self.model.embeddings = value |
|
|
| def get_output_embeddings(self): |
| return self.lm_head |
|
|
| def set_output_embeddings(self, new_embeddings): |
| self.lm_head = new_embeddings |
|
|
| def set_decoder(self, decoder): |
| self.model = decoder |
|
|
| def get_decoder(self): |
| return self.model |
|
|
| def generate(self, *args, **kwargs): |
| try: |
| return super().generate(*args, **kwargs) |
| except AttributeError as exception: |
| if 'past_key_values' in str(exception): |
| raise AttributeError( |
| f"You tried to call `generate` with a decoding strategy that manipulates `past_key_values`, " |
| f"which is not supported for {self.__class__.__name__}. " |
| f"Try another generation strategy instead. " |
| f"For the available generation strategies, check this doc: " |
| f"https://huggingface.co/docs/transformers/en/generation_strategies#decoding-strategies" |
| ) |
| else: |
| raise exception |
|
|
| def prepare_inputs_for_generation( |
| self, |
| input_ids: torch.LongTensor = None, |
| past_key_values: Optional[Tuple[List[torch.Tensor]]] = None, |
| inputs_embeds: Optional[torch.FloatTensor] = None, |
| **kwargs |
| ): |
| |
| if past_key_values is not None: |
| if not isinstance(past_key_values, RecurrentCache): |
| past_key_values = RecurrentCache.from_legacy_cache(past_key_values, input_ids.shape[1] - 1) |
| input_ids = input_ids[:, -1:] |
|
|
| |
| if inputs_embeds is not None and past_key_values is None: |
| model_inputs = {'inputs_embeds': inputs_embeds} |
| else: |
| model_inputs = {'input_ids': input_ids} |
| model_inputs['past_key_values'] = past_key_values |
| return model_inputs |
|
|
| def forward( |
| self, |
| input_ids: torch.LongTensor = None, |
| attention_mask: Optional[torch.Tensor] = None, |
| inputs_embeds: Optional[torch.Tensor] = None, |
| past_key_values: Optional[Tuple[List[torch.Tensor]]] = None, |
| labels: Optional[torch.LongTensor] = None, |
| use_cache: Optional[bool] = None, |
| output_attentions: Optional[bool] = None, |
| output_hidden_states: Optional[bool] = None, |
| return_dict: Optional[bool] = None, |
| ) -> Union[Tuple, CausalLMOutputWithPast]: |
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| output_hidden_states = ( |
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| ) |
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
|
|
| outputs = self.model( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| inputs_embeds=inputs_embeds, |
| past_key_values=past_key_values, |
| use_cache=use_cache, |
| output_attentions=output_attentions, |
| output_hidden_states=output_hidden_states, |
| return_dict=return_dict |
| ) |
|
|
| hidden_states = outputs[0] |
| logits = self.lm_head(hidden_states) |
|
|
| loss = None |
| if labels is not None: |
| if self.config.fuse_cross_entropy: |
| loss_fct = FusedCrossEntropyLoss(inplace_backward=True) |
| else: |
| loss_fct = nn.CrossEntropyLoss() |
| |
| labels = labels.to(logits.device) |
| labels = torch.cat((labels[..., 1:], torch.full_like(labels[:, :1], loss_fct.ignore_index)), 1) |
| loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) |
|
|
| if not return_dict: |
| output = (logits,) + outputs[1:] |
| return (loss,) + output if loss is not None else output |
|
|
| return CausalLMOutputWithPast( |
| loss=loss, |
| logits=logits, |
| past_key_values=outputs.past_key_values, |
| hidden_states=outputs.hidden_states, |
| attentions=outputs.attentions, |
| ) |
|
|