| | |
| | |
| | """ PyTorch DeciLM model.""" |
| | from .version_check import check_transformers_version |
| |
|
| | check_transformers_version() |
| |
|
| | from typing import List, Optional, Tuple, Union |
| |
|
| | import torch |
| | import torch.nn.functional as F |
| | import torch.utils.checkpoint |
| | from torch import nn |
| | from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES |
| | from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging |
| |
|
| | from .configuration_decilm import DeciLMConfig |
| | from .transformers_v4_35_2__modeling_attn_mask_utils import _prepare_4d_causal_attention_mask |
| | from .transformers_v4_35_2__modeling_llama import LlamaMLP, LlamaRMSNorm, LlamaAttention, apply_rotary_pos_emb, \ |
| | repeat_kv, LlamaPreTrainedModel, LLAMA_START_DOCSTRING, LlamaDecoderLayer, LlamaForCausalLM, LlamaModel, \ |
| | BaseModelOutputWithPast, LLAMA_INPUTS_DOCSTRING |
| |
|
| | MODEL_FOR_CAUSAL_LM_MAPPING_NAMES["deci"] = "DeciLMForCausalLM" |
| | _CONFIG_FOR_DOC = "DeciLMConfig" |
| | logger = logging.get_logger(__name__) |
| |
|
| |
|
| | class DeciLMAttention(LlamaAttention): |
| | """Multi-headed attention from 'Attention Is All You Need' paper""" |
| |
|
| | def __init__(self, config: DeciLMConfig, layer_idx: int): |
| | nn.Module.__init__(self) |
| | self.config = config |
| | self.hidden_size = config.hidden_size |
| | self.num_heads = config.num_attention_heads |
| | self.head_dim = self.hidden_size // self.num_heads |
| | self.layer_idx = layer_idx |
| | self.num_key_value_heads = config.num_key_value_heads_per_layer[layer_idx] |
| | self.num_key_value_groups = self.num_heads // self.num_key_value_heads |
| | self.pretraining_tp = config.pretraining_tp |
| | self.max_position_embeddings = config.max_position_embeddings |
| | self.rope_theta = getattr(config, 'rope_theta', None) |
| |
|
| | if (self.head_dim * self.num_heads) != self.hidden_size: |
| | raise ValueError( |
| | f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" |
| | f" and `num_heads`: {self.num_heads})." |
| | ) |
| | self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) |
| | self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) |
| | self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) |
| | self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) |
| |
|
| | self._init_rope() |
| |
|
| | def forward( |
| | self, |
| | hidden_states: torch.Tensor, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | past_key_value: Optional[Tuple[torch.Tensor]] = None, |
| | output_attentions: bool = False, |
| | use_cache: bool = False, |
| | **kwargs, |
| | ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: |
| | bsz, q_len, _ = hidden_states.size() |
| | is_decode = past_key_value is not None |
| | if self.pretraining_tp > 1: |
| | key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.pretraining_tp |
| | query_slices = self.q_proj.weight.split((self.num_heads * self.head_dim) // self.pretraining_tp, dim=0) |
| | key_slices = self.k_proj.weight.split(key_value_slicing, dim=0) |
| | value_slices = self.v_proj.weight.split(key_value_slicing, dim=0) |
| |
|
| | query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.pretraining_tp)] |
| | query_states = torch.cat(query_states, dim=-1) |
| |
|
| | key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.pretraining_tp)] |
| | key_states = torch.cat(key_states, dim=-1) |
| |
|
| | value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.pretraining_tp)] |
| | value_states = torch.cat(value_states, dim=-1) |
| |
|
| | else: |
| | query_states = self.q_proj(hidden_states) |
| | key_states = self.k_proj(hidden_states) |
| | value_states = self.v_proj(hidden_states) |
| |
|
| | query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) |
| | key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) |
| | value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) |
| |
|
| | kv_seq_len = key_states.shape[-2] |
| | if past_key_value is not None: |
| | kv_seq_len += past_key_value[0].shape[-2] |
| | cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) |
| |
|
| | query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) |
| |
|
| | if past_key_value is not None: |
| | |
| | key_states = torch.cat([past_key_value[0], key_states], dim=2) |
| | value_states = torch.cat([past_key_value[1], value_states], dim=2) |
| |
|
| | past_key_value = (key_states, value_states) if use_cache else None |
| |
|
| | |
| | key_states = repeat_kv(key_states, self.num_key_value_groups) |
| | value_states = repeat_kv(value_states, self.num_key_value_groups) |
| | if is_decode: |
| | with torch.backends.cuda.sdp_kernel(enable_math=True, enable_flash=True, |
| | enable_mem_efficient=attention_mask is None): |
| | attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, |
| | is_causal=False, |
| | attn_mask=attention_mask) |
| | attn_output = attn_output.contiguous().view(bsz, q_len, self.hidden_size) |
| |
|
| | else: |
| | with torch.backends.cuda.sdp_kernel(enable_math=True, enable_flash=False, enable_mem_efficient=False): |
| | attn_output = F.scaled_dot_product_attention(query_states, key_states, value_states, |
| | is_causal=attention_mask is None, |
| | attn_mask=attention_mask) |
| |
|
| | if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): |
| | raise ValueError( |
| | f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" |
| | f" {attn_output.size()}" |
| | ) |
| |
|
| | attn_output = attn_output.transpose(1, 2).contiguous().view(bsz, q_len, self.hidden_size) |
| |
|
| | if self.pretraining_tp > 1: |
| | attn_output = attn_output.split(self.hidden_size // self.pretraining_tp, dim=2) |
| | o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.pretraining_tp, dim=1) |
| | attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.pretraining_tp)]) |
| | else: |
| | attn_output = self.o_proj(attn_output) |
| |
|
| | attn_weights = None |
| |
|
| | return attn_output, attn_weights, past_key_value |
| |
|
| |
|
| | class DeciLMDecoderLayer(LlamaDecoderLayer): |
| | def __init__(self, config: DeciLMConfig, layer_idx: int): |
| | nn.Module.__init__(self) |
| | self.hidden_size = config.hidden_size |
| | self.layer_idx = layer_idx |
| | self.self_attn = DeciLMAttention(config=config, layer_idx=layer_idx) |
| | self.mlp = LlamaMLP(config) |
| | self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| | self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| |
|
| |
|
| | @add_start_docstrings( |
| | "The bare DeciLM Model outputting raw hidden-states without any specific head on top.", |
| | LLAMA_START_DOCSTRING, |
| | ) |
| | class DeciLMPreTrainedModel(LlamaPreTrainedModel): |
| | config_class = DeciLMConfig |
| | _no_split_modules = ["DeciLMDecoderLayer"] |
| | _keys_to_ignore_on_load_missing = ["self_attn.rotary_emb.inv_freq"] |
| |
|
| |
|
| | @add_start_docstrings( |
| | "The bare DeciLM Model outputting raw hidden-states without any specific head on top.", |
| | LLAMA_START_DOCSTRING, |
| | ) |
| | class DeciLMModel(LlamaModel, DeciLMPreTrainedModel): |
| | """ |
| | Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`DeciLMDecoderLayer`] |
| | |
| | Args: |
| | config: DeciLMConfig |
| | """ |
| |
|
| | def __init__(self, config: DeciLMConfig): |
| | DeciLMPreTrainedModel.__init__(self, config) |
| | self.padding_idx = config.pad_token_id |
| | self.vocab_size = config.vocab_size |
| |
|
| | self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) |
| | self.layers = nn.ModuleList([DeciLMDecoderLayer(config, layer_idx) for layer_idx |
| | in range(config.num_hidden_layers)]) |
| | self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) |
| |
|
| | self.gradient_checkpointing = False |
| | |
| | self.post_init() |
| |
|
| | @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) |
| | def forward( |
| | self, |
| | input_ids: torch.LongTensor = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[List[torch.FloatTensor]] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, BaseModelOutputWithPast]: |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | use_cache = use_cache if use_cache is not None else self.config.use_cache |
| |
|
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | |
| | if input_ids is not None and inputs_embeds is not None: |
| | raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") |
| | elif input_ids is not None: |
| | batch_size, seq_length = input_ids.shape[:2] |
| | elif inputs_embeds is not None: |
| | batch_size, seq_length = inputs_embeds.shape[:2] |
| | else: |
| | raise ValueError("You have to specify either input_ids or inputs_embeds") |
| |
|
| | past_key_values_length = 0 |
| | if past_key_values is not None: |
| | past_key_values_length = past_key_values[0][0].shape[2] |
| |
|
| | if position_ids is None: |
| | device = input_ids.device if input_ids is not None else inputs_embeds.device |
| | position_ids = torch.arange( |
| | past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device |
| | ) |
| | position_ids = position_ids.unsqueeze(0) |
| |
|
| | if inputs_embeds is None: |
| | inputs_embeds = self.embed_tokens(input_ids) |
| |
|
| | attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None |
| | if attention_mask is not None: |
| | |
| | attention_mask = _prepare_4d_causal_attention_mask( |
| | attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length |
| | ) |
| |
|
| | |
| | hidden_states = inputs_embeds |
| |
|
| | if self.gradient_checkpointing and self.training: |
| | if use_cache: |
| | logger.warning_once( |
| | "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| | ) |
| | use_cache = False |
| |
|
| | |
| | all_hidden_states = () if output_hidden_states else None |
| | all_self_attns = () if output_attentions else None |
| | next_decoder_cache = () if use_cache else None |
| |
|
| | for idx, decoder_layer in enumerate(self.layers): |
| | if output_hidden_states: |
| | all_hidden_states += (hidden_states,) |
| |
|
| | past_key_value = past_key_values[idx] if past_key_values is not None else None |
| |
|
| | if self.gradient_checkpointing and self.training: |
| | layer_outputs = self._gradient_checkpointing_func( |
| | decoder_layer.__call__, |
| | hidden_states, |
| | attention_mask, |
| | position_ids, |
| | past_key_value, |
| | output_attentions, |
| | use_cache, |
| | ) |
| | else: |
| | layer_outputs = decoder_layer( |
| | hidden_states, |
| | attention_mask=attention_mask, |
| | position_ids=position_ids, |
| | past_key_value=past_key_value, |
| | output_attentions=output_attentions, |
| | use_cache=use_cache, |
| | ) |
| |
|
| | hidden_states = layer_outputs[0] |
| |
|
| | if use_cache: |
| | next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) |
| |
|
| | if output_attentions: |
| | all_self_attns += (layer_outputs[1],) |
| |
|
| | hidden_states = self.norm(hidden_states) |
| |
|
| | |
| | if output_hidden_states: |
| | all_hidden_states += (hidden_states,) |
| |
|
| | next_cache = next_decoder_cache if use_cache else None |
| | if not return_dict: |
| | return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) |
| | return BaseModelOutputWithPast( |
| | last_hidden_state=hidden_states, |
| | past_key_values=next_cache, |
| | hidden_states=all_hidden_states, |
| | attentions=all_self_attns, |
| | ) |
| |
|
| |
|
| | class DeciLMForCausalLM(LlamaForCausalLM, DeciLMPreTrainedModel): |
| | def __init__(self, config): |
| | DeciLMPreTrainedModel.__init__(self, config) |
| | self.model = DeciLMModel(config) |
| | self.pretraining_tp = config.pretraining_tp |
| | self.vocab_size = config.vocab_size |
| | self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) |
| |
|
| | |
| | self.post_init() |
| |
|