| | from transformers import AutoConfig, AutoModel |
| | from transformers.models.mistral.configuration_mistral import MistralConfig |
| | from transformers.models.mistral.modeling_mistral import MistralModel |
| |
|
| | from typing import List, Optional, Tuple, Union |
| | import torch |
| | import torch.nn.functional as F |
| | from torch import nn |
| | from torch.nn import CrossEntropyLoss |
| |
|
| | |
| | from transformers.modeling_attn_mask_utils import _prepare_4d_attention_mask_for_sdpa, _prepare_4d_attention_mask |
| | from transformers.cache_utils import Cache, DynamicCache |
| | from transformers.modeling_outputs import BaseModelOutputWithPast |
| | from transformers.utils import ( |
| | logging |
| | ) |
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| | class MistralBiDirectionalConfig(MistralConfig): |
| | model_type = 'mistralbidirectional' |
| |
|
| | class MistralBiDirectionalModel(MistralModel): |
| | config_class = MistralBiDirectionalConfig |
| | |
| | def __init__(self, config: MistralConfig): |
| | super().__init__(config) |
| | for layer in self.layers: |
| | layer.self_attn.is_causal = False |
| | self._attn_implementation = "eager" |
| | |
| | def forward( |
| | self, |
| | input_ids: torch.LongTensor = None, |
| | attention_mask: Optional[torch.Tensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[List[torch.FloatTensor]] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, BaseModelOutputWithPast]: |
| | output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions |
| | output_hidden_states = ( |
| | output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states |
| | ) |
| | use_cache = use_cache if use_cache is not None else self.config.use_cache |
| |
|
| | return_dict = return_dict if return_dict is not None else self.config.use_return_dict |
| |
|
| | |
| | if input_ids is not None and inputs_embeds is not None: |
| | raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") |
| | elif input_ids is not None: |
| | batch_size, seq_length = input_ids.shape |
| | elif inputs_embeds is not None: |
| | batch_size, seq_length, _ = inputs_embeds.shape |
| | else: |
| | raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") |
| |
|
| | if self.gradient_checkpointing and self.training: |
| | if use_cache: |
| | logger.warning_once( |
| | "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| | ) |
| | use_cache = False |
| |
|
| | past_key_values_length = 0 |
| |
|
| | if use_cache: |
| | use_legacy_cache = not isinstance(past_key_values, Cache) |
| | if use_legacy_cache: |
| | past_key_values = DynamicCache.from_legacy_cache(past_key_values) |
| | past_key_values_length = past_key_values.get_usable_length(seq_length) |
| |
|
| | if position_ids is None: |
| | device = input_ids.device if input_ids is not None else inputs_embeds.device |
| | position_ids = torch.arange( |
| | past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device |
| | ) |
| | position_ids = position_ids.unsqueeze(0).view(-1, seq_length) |
| | else: |
| | position_ids = position_ids.view(-1, seq_length).long() |
| |
|
| | if inputs_embeds is None: |
| | inputs_embeds = self.embed_tokens(input_ids) |
| |
|
| | |
| | if attention_mask is not None and self._attn_implementation == "flash_attention_2" and use_cache: |
| | is_padding_right = attention_mask[:, -1].sum().item() != batch_size |
| | if is_padding_right: |
| | raise ValueError( |
| | "You are attempting to perform batched generation with padding_side='right'" |
| | " this may lead to unexpected behaviour for Flash Attention version of Mistral. Make sure to " |
| | " call `tokenizer.padding_side = 'left'` before tokenizing the input. " |
| | ) |
| | original_attention_mask = attention_mask |
| | if self._attn_implementation == "flash_attention_2": |
| | |
| | attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None |
| | raise Exception("bi-directional maks is not implemented for flash attention 2") |
| | elif self._attn_implementation == "sdpa" and not output_attentions: |
| | bidirectional_attention_mask = _prepare_4d_attention_mask_for_sdpa( |
| | original_attention_mask, |
| | inputs_embeds.dtype |
| | ) |
| | else: |
| | bidirectional_attention_mask = _prepare_4d_attention_mask( |
| | original_attention_mask, |
| | inputs_embeds.dtype |
| | ) |
| |
|
| | hidden_states = inputs_embeds |
| |
|
| | |
| | all_hidden_states = () if output_hidden_states else None |
| | all_self_attns = () if output_attentions else None |
| | next_decoder_cache = None |
| |
|
| | for decoder_layer in self.layers: |
| | if output_hidden_states: |
| | all_hidden_states += (hidden_states,) |
| |
|
| | if self.gradient_checkpointing and self.training: |
| | layer_outputs = self._gradient_checkpointing_func( |
| | decoder_layer.__call__, |
| | hidden_states, |
| | bidirectional_attention_mask, |
| | position_ids, |
| | past_key_values, |
| | output_attentions, |
| | use_cache, |
| | ) |
| | else: |
| | layer_outputs = decoder_layer( |
| | hidden_states, |
| | attention_mask=bidirectional_attention_mask, |
| | position_ids=position_ids, |
| | past_key_value=past_key_values, |
| | output_attentions=output_attentions, |
| | use_cache=use_cache, |
| | ) |
| |
|
| | hidden_states = layer_outputs[0] |
| |
|
| | if use_cache: |
| | next_decoder_cache = layer_outputs[2 if output_attentions else 1] |
| |
|
| | if output_attentions: |
| | all_self_attns += (layer_outputs[1],) |
| |
|
| | hidden_states = self.norm(hidden_states) |
| |
|
| | |
| | if output_hidden_states: |
| | all_hidden_states += (hidden_states,) |
| |
|
| | next_cache = None |
| | if use_cache: |
| | next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache |
| |
|
| | if not return_dict: |
| | return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) |
| | return BaseModelOutputWithPast( |
| | last_hidden_state=hidden_states, |
| | past_key_values=next_cache, |
| | hidden_states=all_hidden_states, |
| | attentions=all_self_attns, |
| | ) |