|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| import inspect
|
| import math
|
| import copy
|
| import os
|
| import time
|
| import pandas as pd
|
| import seaborn as sns
|
| import matplotlib.pyplot as plt
|
|
|
| from termcolor import colored
|
| from tqdm import tqdm
|
| import random
|
| import numpy as np
|
| from matplotlib.colors import LinearSegmentedColormap, LogNorm
|
| import warnings
|
| from collections import defaultdict
|
| from typing import List, Optional, Tuple, Union
|
|
|
|
|
| from typing import List, Optional, Tuple, Union
|
| import torch
|
| import torch.utils.checkpoint
|
| from torch import nn
|
| from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
| import torch.nn.functional as F
|
| from transformers.configuration_utils import PretrainedConfig
|
| from transformers.utils import logging,add_start_docstrings,add_start_docstrings_to_model_forward,replace_return_docstrings
|
| from transformers.modeling_utils import PreTrainedModel
|
| from transformers.cache_utils import Cache,DynamicCache, SlidingWindowCache, StaticCache
|
| from transformers.activations import ACT2FN
|
| from transformers.modeling_attn_mask_utils import AttentionMaskConverter
|
| from transformers.modeling_outputs import BaseModelOutputWithPast,CausalLMOutputWithPast,SequenceClassifierOutputWithPast,TokenClassifierOutput,QuestionAnsweringModelOutput,MoeCausalLMOutputWithPast,MoeModelOutputWithPast
|
| from tokenizers import processors
|
| from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
|
| from transformers.utils import is_sentencepiece_available, logging
|
| from transformers.utils.versions import require_version
|
| from shutil import copyfile
|
| from collections import defaultdict
|
| from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
|
| import sentencepiece as spm
|
| from transformers.convert_slow_tokenizer import import_protobuf
|
| from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
|
| from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
|
| from transformers.utils import logging
|
| from transformers.utils import (
|
| add_start_docstrings,
|
| add_start_docstrings_to_model_forward,
|
| is_flash_attn_2_available,
|
| is_flash_attn_greater_or_equal_2_10,
|
| logging,
|
| replace_return_docstrings,
|
| )
|
| if TYPE_CHECKING:
|
| from transformers.tokenization_utils_base import TextInput
|
| if is_flash_attn_2_available():
|
| from flash_attn import flash_attn_func, flash_attn_varlen_func
|
| from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
|
|
|
| _flash_supports_window_size = "window_size" in list(inspect.signature(flash_attn_func).parameters)
|
|
|
| MISTRAL_INPUTS_DOCSTRING = r"""
|
| Args:
|
| input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
| Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
| it.
|
|
|
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| [`PreTrainedTokenizer.__call__`] for details.
|
|
|
| [What are input IDs?](../glossary#input-ids)
|
| attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
|
|
| - 1 for tokens that are **not masked**,
|
| - 0 for tokens that are **masked**.
|
|
|
| [What are attention masks?](../glossary#attention-mask)
|
|
|
| Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
| [`PreTrainedTokenizer.__call__`] for details.
|
|
|
| If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
|
| `past_key_values`).
|
|
|
| If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
| and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
| information on the default strategy.
|
|
|
| - 1 indicates the head is **not masked**,
|
| - 0 indicates the head is **masked**.
|
| position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
| config.n_positions - 1]`.
|
|
|
| [What are position IDs?](../glossary#position-ids)
|
| past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
| Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
| blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
|
| returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
|
|
|
| Two formats are allowed:
|
| - a [`~cache_utils.Cache`] instance;
|
| - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
| shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
| cache format.
|
|
|
| The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
|
| legacy cache format will be returned.
|
|
|
| If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
| have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
| of shape `(batch_size, sequence_length)`.
|
| inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
| Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
| is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
| model's internal embedding lookup matrix.
|
| use_cache (`bool`, *optional*):
|
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
| `past_key_values`).
|
| output_attentions (`bool`, *optional*):
|
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
| tensors for more detail.
|
| output_hidden_states (`bool`, *optional*):
|
| Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
| more detail.
|
| return_dict (`bool`, *optional*):
|
| Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
| """
|
| MISTRAL_PRETRAINED_CONFIG_ARCHIVE_MAP = {
|
| "mistralai/Mistral-7B-v0.1": "https://huggingface.co/mistralai/Mistral-7B-v0.1/resolve/main/config.json",
|
| "mistralai/Mistral-7B-Instruct-v0.1": "https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1/resolve/main/config.json",
|
| }
|
| MISTRAL_START_DOCSTRING = r"""
|
| This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
| library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
| etc.)
|
|
|
| This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
| Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
| and behavior.
|
|
|
| Parameters:
|
| config ([`MistralConfig`]):
|
| Model configuration class with all the parameters of the model. Initializing with a config file does not
|
| load the weights associated with the model, only the configuration. Check out the
|
| [`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
| """
|
| _CONFIG_FOR_DOC = "MistralStarConfig"
|
| logger = logging.get_logger(__name__)
|
| SPIECE_UNDERLINE = "▁"
|
| require_version("tokenizers>=0.13.3")
|
| VOCAB_FILES_NAMES = {"vocab_file": "tokenizer.model", "tokenizer_file": "tokenizer.json"}
|
|
|
| B_INST, E_INST = "[INST]", "[/INST]"
|
| B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
|
|
|
|
|
| DEFAULT_SYSTEM_PROMPT = """you are the world archive of knowledge and a friendly helpful artificial intelligent being:
|
| consider if the current task denotes the creation of a function in python that can be saved and re-used for a future task,
|
| if created, return the artifact to the user, in the response, or if access is given to be save as a file on the user system:
|
| Be flirtatious and joyfull, always willing to help or engage in conversation or express an opinion or sentiment:
|
| answer all questions indepth supply and helpfull references where possible; """
|
|
|
|
|
|
|
| class MixtralConfig(PretrainedConfig):
|
| r"""
|
| This is the configuration class to store the configuration of a [`MixtralModel`]. It is used to instantiate an
|
| Mixtral model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| with the defaults will yield a similar configuration to that of the Mixtral-7B-v0.1 or Mixtral-7B-Instruct-v0.1.
|
|
|
| [mixtralai/Mixtral-8x7B](https://huggingface.co/mixtralai/Mixtral-8x7B)
|
| [mixtralai/Mixtral-7B-Instruct-v0.1](https://huggingface.co/mixtralai/Mixtral-7B-Instruct-v0.1)
|
|
|
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| documentation from [`PretrainedConfig`] for more information.
|
|
|
|
|
| Args:
|
| vocab_size (`int`, *optional*, defaults to 32000):
|
| Vocabulary size of the Mixtral model. Defines the number of different tokens that can be represented by the
|
| `inputs_ids` passed when calling [`MixtralModel`]
|
| hidden_size (`int`, *optional*, defaults to 4096):
|
| Dimension of the hidden representations.
|
| intermediate_size (`int`, *optional*, defaults to 14336):
|
| Dimension of the MLP representations.
|
| num_hidden_layers (`int`, *optional*, defaults to 32):
|
| Number of hidden layers in the Transformer encoder.
|
| num_attention_heads (`int`, *optional*, defaults to 32):
|
| Number of attention heads for each attention layer in the Transformer encoder.
|
| num_key_value_heads (`int`, *optional*, defaults to 8):
|
| This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
| `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
| `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
| converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
| by meanpooling all the original heads within that group. For more details checkout [this
|
| paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
|
| hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
| The non-linear activation function (function or string) in the decoder.
|
| max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
|
| The maximum sequence length that this model might ever be used with. Mixtral's sliding window attention
|
| allows sequence of up to 4096*32 tokens.
|
| initializer_range (`float`, *optional*, defaults to 0.02):
|
| The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| rms_norm_eps (`float`, *optional*, defaults to 1e-05):
|
| The epsilon used by the rms normalization layers.
|
| use_cache (`bool`, *optional*, defaults to `True`):
|
| Whether or not the model should return the last key/values attentions (not used by all models). Only
|
| relevant if `config.is_decoder=True`.
|
| pad_token_id (`int`, *optional*):
|
| The id of the padding token.
|
| bos_token_id (`int`, *optional*, defaults to 1):
|
| The id of the "beginning-of-sequence" token.
|
| eos_token_id (`int`, *optional*, defaults to 2):
|
| The id of the "end-of-sequence" token.
|
| tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
| Whether the model's input and output word embeddings should be tied.
|
| rope_theta (`float`, *optional*, defaults to 1000000.0):
|
| The base period of the RoPE embeddings.
|
| sliding_window (`int`, *optional*):
|
| Sliding window attention window size. If not specified, will default to `4096`.
|
| attention_dropout (`float`, *optional*, defaults to 0.0):
|
| The dropout ratio for the attention probabilities.
|
| num_experts_per_tok (`int`, *optional*, defaults to 2):
|
| The number of experts to route per-token, can be also interpreted as the `top-k` routing
|
| parameter
|
| num_local_experts (`int`, *optional*, defaults to 8):
|
| Number of experts per Sparse MLP layer.
|
| output_router_logits (`bool`, *optional*, defaults to `False`):
|
| Whether or not the router logits should be returned by the model. Enabeling this will also
|
| allow the model to output the auxiliary loss. See [here]() for more details
|
| router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
|
| The aux loss factor for the total loss.
|
| router_jitter_noise (`float`, *optional*, defaults to 0.0):
|
| Amount of noise to add to the router.
|
|
|
| ```python
|
| >>> from transformers import MixtralModel, MixtralConfig
|
|
|
| >>> # Initializing a Mixtral 7B style configuration
|
| >>> configuration = MixtralConfig()
|
|
|
| >>> # Initializing a model from the Mixtral 7B style configuration
|
| >>> model = MixtralModel(configuration)
|
|
|
| >>> # Accessing the model configuration
|
| >>> configuration = model.config
|
| ```"""
|
|
|
| model_type = "mixtral"
|
| keys_to_ignore_at_inference = ["past_key_values"]
|
|
|
| def __init__(
|
| self,
|
| vocab_size=32000,
|
| hidden_size=4096,
|
| intermediate_size=14336,
|
| num_hidden_layers=32,
|
| num_attention_heads=32,
|
| num_key_value_heads=8,
|
| hidden_act="silu",
|
| max_position_embeddings=4096 * 32,
|
| initializer_range=0.02,
|
| rms_norm_eps=1e-5,
|
| use_cache=True,
|
| pad_token_id=None,
|
| bos_token_id=1,
|
| eos_token_id=2,
|
| tie_word_embeddings=False,
|
| rope_theta=1e6,
|
| sliding_window=None,
|
| attention_dropout=0.0,
|
| num_experts_per_tok=2,
|
| num_local_experts=8,
|
| output_router_logits=False,
|
| router_aux_loss_coef=0.001,
|
| router_jitter_noise=0.0,
|
| **kwargs,
|
| ):
|
| self.vocab_size = vocab_size
|
| self.max_position_embeddings = max_position_embeddings
|
| self.hidden_size = hidden_size
|
| self.intermediate_size = intermediate_size
|
| self.num_hidden_layers = num_hidden_layers
|
| self.num_attention_heads = num_attention_heads
|
| self.sliding_window = sliding_window
|
|
|
|
|
| if num_key_value_heads is None:
|
| num_key_value_heads = num_attention_heads
|
|
|
| self.num_key_value_heads = num_key_value_heads
|
| self.hidden_act = hidden_act
|
| self.initializer_range = initializer_range
|
| self.rms_norm_eps = rms_norm_eps
|
| self.use_cache = use_cache
|
| self.rope_theta = rope_theta
|
| self.attention_dropout = attention_dropout
|
|
|
| self.num_experts_per_tok = num_experts_per_tok
|
| self.num_local_experts = num_local_experts
|
| self.output_router_logits = output_router_logits
|
| self.router_aux_loss_coef = router_aux_loss_coef
|
| self.router_jitter_noise = router_jitter_noise
|
| super().__init__(
|
| pad_token_id=pad_token_id,
|
| bos_token_id=bos_token_id,
|
| eos_token_id=eos_token_id,
|
| tie_word_embeddings=tie_word_embeddings,
|
| **kwargs,
|
| )
|
|
|
| class MistralStarConfig(PretrainedConfig):
|
| r"""
|
| This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an
|
| Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| with the defaults will yield a similar configuration to that of the Mistral-7B-v0.1 or Mistral-7B-Instruct-v0.1.
|
|
|
| [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)
|
| [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)
|
|
|
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| documentation from [`PretrainedConfig`] for more information.
|
|
|
|
|
| Args:
|
| vocab_size (`int`, *optional*, defaults to 32000):
|
| Vocabulary size of the Mistral model. Defines the number of different tokens that can be represented by the
|
| `inputs_ids` passed when calling [`MistralModel`]
|
| hidden_size (`int`, *optional*, defaults to 4096):
|
| Dimension of the hidden representations.
|
| intermediate_size (`int`, *optional*, defaults to 14336):
|
| Dimension of the MLP representations.
|
| num_hidden_layers (`int`, *optional*, defaults to 32):
|
| Number of hidden layers in the Transformer encoder.
|
| num_attention_heads (`int`, *optional*, defaults to 32):
|
| Number of attention heads for each attention layer in the Transformer encoder.
|
| num_key_value_heads (`int`, *optional*, defaults to 8):
|
| This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
| `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
| `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
| converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
| by meanpooling all the original heads within that group. For more details checkout [this
|
| paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
|
| hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
| The non-linear activation function (function or string) in the decoder.
|
| max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
|
| The maximum sequence length that this model might ever be used with. Mistral's sliding window attention
|
| allows sequence of up to 4096*32 tokens.
|
| initializer_range (`float`, *optional*, defaults to 0.02):
|
| The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| rms_norm_eps (`float`, *optional*, defaults to 1e-06):
|
| The epsilon used by the rms normalization layers.
|
| use_cache (`bool`, *optional*, defaults to `True`):
|
| Whether or not the model should return the last key/values attentions (not used by all models). Only
|
| relevant if `config.is_decoder=True`.
|
| pad_token_id (`int`, *optional*):
|
| The id of the padding token.
|
| bos_token_id (`int`, *optional*, defaults to 1):
|
| The id of the "beginning-of-sequence" token.
|
| eos_token_id (`int`, *optional*, defaults to 2):
|
| The id of the "end-of-sequence" token.
|
| tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
| Whether the model's input and output word embeddings should be tied.
|
| rope_theta (`float`, *optional*, defaults to 10000.0):
|
| The base period of the RoPE embeddings.
|
| sliding_window (`int`, *optional*, defaults to 4096):
|
| Sliding window attention window size. If not specified, will default to `4096`.
|
| attention_dropout (`float`, *optional*, defaults to 0.0):
|
| The dropout ratio for the attention probabilities.
|
|
|
| ```python
|
| >>> from transformers import MistralModel, MistralConfig
|
|
|
| >>> # Initializing a Mistral 7B style configuration
|
| >>> configuration = MistralConfig()
|
|
|
| >>> # Initializing a model from the Mistral 7B style configuration
|
| >>> model = MistralModel(configuration)
|
|
|
| >>> # Accessing the model configuration
|
| >>> configuration = model.config
|
| ```"""
|
|
|
| model_type = "mistralstar"
|
| keys_to_ignore_at_inference = ["past_key_values"]
|
|
|
| def __init__(
|
| self,
|
| vocab_size=32000,
|
| hidden_size=4096,
|
| intermediate_size=14336,
|
| num_hidden_layers=32,
|
| num_attention_heads=32,
|
| num_key_value_heads=8,
|
| hidden_act="silu",
|
| max_position_embeddings=4096 * 32,
|
| initializer_range=0.02,
|
| rms_norm_eps=1e-6,
|
| use_cache=True,
|
| pad_token_id=None,
|
| bos_token_id=1,
|
| eos_token_id=2,
|
| tie_word_embeddings=False,
|
| rope_theta=10000.0,
|
| sliding_window=4096,
|
| attention_dropout=0.0,
|
| max_thoughts=16,
|
| thought_length = 10,
|
| merged_talk_heads=True,
|
| merged_lm_and_talk_heads=False,
|
| merged_lm_and_think_heads=True,
|
| use_concat_talk_head=True,
|
| use_shallow_think=True,
|
| use_shallow_talk=False,
|
| use_complex_think_head=False,
|
| use_complex_talk_head=True,
|
| use_weighted_talk_head=True,
|
| **kwargs,
|
| ):
|
| self.vocab_size = vocab_size
|
| self.max_position_embeddings = max_position_embeddings
|
| self.hidden_size = hidden_size
|
| self.intermediate_size = intermediate_size
|
| self.num_hidden_layers = num_hidden_layers
|
| self.num_attention_heads = num_attention_heads
|
| self.sliding_window = sliding_window
|
|
|
|
|
| if num_key_value_heads is None:
|
| num_key_value_heads = num_attention_heads
|
|
|
| self.num_key_value_heads = num_key_value_heads
|
| self.hidden_act = hidden_act
|
| self.initializer_range = initializer_range
|
| self.rms_norm_eps = rms_norm_eps
|
| self.use_cache = use_cache
|
| self.rope_theta = rope_theta
|
| self.attention_dropout = attention_dropout
|
| self.max_thoughts = max_thoughts
|
| self.thought_length = thought_length
|
| self.merged_talk_heads = merged_talk_heads
|
| self.merged_lm_and_talk_heads = merged_lm_and_talk_heads
|
| self.merged_lm_and_think_heads = merged_lm_and_think_heads
|
| self.use_concat_talk_head = use_concat_talk_head
|
| self.use_shallow_think = use_shallow_think
|
| self.use_shallow_talk = use_shallow_talk
|
| self.use_complex_think_head = use_complex_think_head
|
| self.use_complex_talk_head = use_complex_talk_head
|
| self.use_weighted_talk_head = use_weighted_talk_head
|
|
|
| super().__init__(
|
| pad_token_id=pad_token_id,
|
| bos_token_id=bos_token_id,
|
| eos_token_id=eos_token_id,
|
| tie_word_embeddings=tie_word_embeddings,
|
| **kwargs,
|
| )
|
|
|
| class MistralConfig(PretrainedConfig):
|
| r"""
|
| This is the configuration class to store the configuration of a [`MistralModel`]. It is used to instantiate an
|
| Mistral model according to the specified arguments, defining the model architecture. Instantiating a configuration
|
| with the defaults will yield a similar configuration to that of the Mistral-7B-v0.1 or Mistral-7B-Instruct-v0.1.
|
|
|
| [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)
|
| [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)
|
|
|
| Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
| documentation from [`PretrainedConfig`] for more information.
|
|
|
|
|
| Args:
|
| vocab_size (`int`, *optional*, defaults to 32000):
|
| Vocabulary size of the Mistral model. Defines the number of different tokens that can be represented by the
|
| `inputs_ids` passed when calling [`MistralModel`]
|
| hidden_size (`int`, *optional*, defaults to 4096):
|
| Dimension of the hidden representations.
|
| intermediate_size (`int`, *optional*, defaults to 14336):
|
| Dimension of the MLP representations.
|
| num_hidden_layers (`int`, *optional*, defaults to 32):
|
| Number of hidden layers in the Transformer encoder.
|
| num_attention_heads (`int`, *optional*, defaults to 32):
|
| Number of attention heads for each attention layer in the Transformer encoder.
|
| num_key_value_heads (`int`, *optional*, defaults to 8):
|
| This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
| `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
| `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
| converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
| by meanpooling all the original heads within that group. For more details checkout [this
|
| paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
|
| hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
| The non-linear activation function (function or string) in the decoder.
|
| max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
|
| The maximum sequence length that this model might ever be used with. Mistral's sliding window attention
|
| allows sequence of up to 4096*32 tokens.
|
| initializer_range (`float`, *optional*, defaults to 0.02):
|
| The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
| rms_norm_eps (`float`, *optional*, defaults to 1e-06):
|
| The epsilon used by the rms normalization layers.
|
| use_cache (`bool`, *optional*, defaults to `True`):
|
| Whether or not the model should return the last key/values attentions (not used by all models). Only
|
| relevant if `config.is_decoder=True`.
|
| pad_token_id (`int`, *optional*):
|
| The id of the padding token.
|
| bos_token_id (`int`, *optional*, defaults to 1):
|
| The id of the "beginning-of-sequence" token.
|
| eos_token_id (`int`, *optional*, defaults to 2):
|
| The id of the "end-of-sequence" token.
|
| tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
| Whether the model's input and output word embeddings should be tied.
|
| rope_theta (`float`, *optional*, defaults to 10000.0):
|
| The base period of the RoPE embeddings.
|
| sliding_window (`int`, *optional*, defaults to 4096):
|
| Sliding window attention window size. If not specified, will default to `4096`.
|
| attention_dropout (`float`, *optional*, defaults to 0.0):
|
| The dropout ratio for the attention probabilities.
|
|
|
| ```python
|
| >>> from transformers import MistralModel, MistralConfig
|
|
|
| >>> # Initializing a Mistral 7B style configuration
|
| >>> configuration = MistralConfig()
|
|
|
| >>> # Initializing a model from the Mistral 7B style configuration
|
| >>> model = MistralModel(configuration)
|
|
|
| >>> # Accessing the model configuration
|
| >>> configuration = model.config
|
| ```"""
|
|
|
| model_type = "mistral"
|
| keys_to_ignore_at_inference = ["past_key_values"]
|
|
|
| def __init__(
|
| self,
|
| vocab_size=32000,
|
| hidden_size=4096,
|
| intermediate_size=14336,
|
| num_hidden_layers=32,
|
| num_attention_heads=32,
|
| num_key_value_heads=8,
|
| hidden_act="silu",
|
| max_position_embeddings=4096 * 32,
|
| initializer_range=0.02,
|
| rms_norm_eps=1e-6,
|
| use_cache=True,
|
| pad_token_id=None,
|
| bos_token_id=1,
|
| eos_token_id=2,
|
| tie_word_embeddings=False,
|
| rope_theta=10000.0,
|
| sliding_window=4096,
|
| attention_dropout=0.0,
|
| max_thoughts=16,
|
| merged_talk_heads=True,
|
| merged_lm_and_talk_heads=False,
|
| merged_lm_and_think_heads=True,
|
| use_concat_talk_head=True,
|
| use_shallow_think=True,
|
| use_shallow_talk=False,
|
| use_complex_think_head=False,
|
| use_complex_talk_head=True,
|
| use_weighted_talk_head=True,
|
| **kwargs,
|
| ):
|
| self.vocab_size = vocab_size
|
| self.max_position_embeddings = max_position_embeddings
|
| self.hidden_size = hidden_size
|
| self.intermediate_size = intermediate_size
|
| self.num_hidden_layers = num_hidden_layers
|
| self.num_attention_heads = num_attention_heads
|
| self.sliding_window = sliding_window
|
|
|
|
|
| if num_key_value_heads is None:
|
| num_key_value_heads = num_attention_heads
|
|
|
| self.num_key_value_heads = num_key_value_heads
|
| self.hidden_act = hidden_act
|
| self.initializer_range = initializer_range
|
| self.rms_norm_eps = rms_norm_eps
|
| self.use_cache = use_cache
|
| self.rope_theta = rope_theta
|
| self.attention_dropout = attention_dropout
|
| self.max_thoughts = max_thoughts
|
| self.merged_talk_heads = merged_talk_heads
|
| self.merged_lm_and_talk_heads = merged_lm_and_talk_heads
|
| self.merged_lm_and_think_heads = merged_lm_and_think_heads
|
| self.use_concat_talk_head = use_concat_talk_head
|
| self.use_shallow_think = use_shallow_think
|
| self.use_shallow_talk = use_shallow_talk
|
| self.use_complex_think_head = use_complex_think_head
|
| self.use_complex_talk_head = use_complex_talk_head
|
| self.use_weighted_talk_head = use_weighted_talk_head
|
|
|
| super().__init__(
|
| pad_token_id=pad_token_id,
|
| bos_token_id=bos_token_id,
|
| eos_token_id=eos_token_id,
|
| tie_word_embeddings=tie_word_embeddings,
|
| **kwargs,
|
| )
|
|
|
|
|
| @add_start_docstrings(
|
| "The bare Mistral Model outputting raw hidden-states without any specific head on top.",
|
| MISTRAL_START_DOCSTRING,
|
| )
|
| class MistralPreTrainedModel(PreTrainedModel):
|
| config_class = MistralConfig
|
| base_model_prefix = "model"
|
| supports_gradient_checkpointing = True
|
| _no_split_modules = ["MistralDecoderLayer"]
|
| _skip_keys_device_placement = "past_key_values"
|
| _supports_flash_attn_2 = True
|
| _supports_sdpa = True
|
| _supports_cache_class = True
|
| _supports_static_cache = True
|
|
|
| def _init_weights(self, module):
|
| std = self.config.initializer_range
|
| if isinstance(module, nn.Linear):
|
| module.weight.data.normal_(mean=0.0, std=std)
|
| if module.bias is not None:
|
| module.bias.data.zero_()
|
| elif isinstance(module, nn.Embedding):
|
| module.weight.data.normal_(mean=0.0, std=std)
|
| if module.padding_idx is not None:
|
| module.weight.data[module.padding_idx].zero_()
|
|
|
|
|
| @add_start_docstrings(
|
| "The bare Mistral Model outputting raw hidden-states without any specific head on top.",
|
| MISTRAL_START_DOCSTRING,
|
| )
|
| class MistralModel(MistralPreTrainedModel):
|
| """
|
| Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MistralDecoderLayer`]
|
|
|
| Args:
|
| config: MistralConfig
|
| """
|
|
|
| def __init__(self, config: MistralConfig):
|
| super().__init__(config)
|
| self.padding_idx = config.pad_token_id
|
| self.vocab_size = config.vocab_size
|
|
|
| self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
| self.layers = nn.ModuleList(
|
| [MistralDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
| )
|
| self._attn_implementation = config._attn_implementation
|
| self.norm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
|
|
| self.gradient_checkpointing = False
|
|
|
| self.post_init()
|
|
|
| def get_input_embeddings(self):
|
| return self.embed_tokens
|
|
|
| def set_input_embeddings(self, value):
|
| self.embed_tokens = value
|
|
|
| @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| def forward(
|
| self,
|
| input_ids: torch.LongTensor = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| use_cache: Optional[bool] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| cache_position: Optional[torch.LongTensor] = None,
|
| ) -> Union[Tuple, BaseModelOutputWithPast]:
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| output_hidden_states = (
|
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| )
|
| use_cache = use_cache if use_cache is not None else self.config.use_cache
|
|
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
|
| if (input_ids is None) ^ (inputs_embeds is not None):
|
| raise ValueError(
|
| "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
|
| )
|
|
|
| if self.gradient_checkpointing and self.training and use_cache:
|
| logger.warning_once(
|
| "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
| )
|
| use_cache = False
|
|
|
| if inputs_embeds is None:
|
| inputs_embeds = self.embed_tokens(input_ids)
|
|
|
| return_legacy_cache = False
|
| if use_cache and not isinstance(past_key_values, Cache):
|
| past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
| return_legacy_cache = True
|
| logger.warning_once(
|
| "We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. "
|
| "Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)"
|
| )
|
|
|
| if cache_position is None:
|
| past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
| cache_position = torch.arange(
|
| past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
|
| )
|
|
|
| if position_ids is None:
|
| position_ids = cache_position.unsqueeze(0)
|
|
|
| causal_mask = self._update_causal_mask(
|
| attention_mask, inputs_embeds, cache_position, past_key_values, use_cache, output_attentions
|
| )
|
|
|
| hidden_states = inputs_embeds
|
|
|
|
|
| all_hidden_states = () if output_hidden_states else None
|
| all_self_attns = () if output_attentions else None
|
| next_decoder_cache = None
|
|
|
| for decoder_layer in self.layers:
|
| if output_hidden_states:
|
| all_hidden_states += (hidden_states,)
|
|
|
| if self.gradient_checkpointing and self.training:
|
| layer_outputs = self._gradient_checkpointing_func(
|
| decoder_layer.__call__,
|
| hidden_states,
|
| causal_mask,
|
| position_ids,
|
| past_key_values,
|
| output_attentions,
|
| use_cache,
|
| cache_position,
|
| )
|
| else:
|
| layer_outputs = decoder_layer(
|
| hidden_states,
|
| attention_mask=causal_mask,
|
| position_ids=position_ids,
|
| past_key_value=past_key_values,
|
| output_attentions=output_attentions,
|
| use_cache=use_cache,
|
| cache_position=cache_position,
|
| )
|
|
|
| hidden_states = layer_outputs[0]
|
|
|
| if use_cache:
|
| next_decoder_cache = layer_outputs[2 if output_attentions else 1]
|
|
|
| if output_attentions:
|
| all_self_attns += (layer_outputs[1],)
|
|
|
| hidden_states = self.norm(hidden_states)
|
|
|
|
|
| if output_hidden_states:
|
| all_hidden_states += (hidden_states,)
|
|
|
| next_cache = next_decoder_cache if use_cache else None
|
| if return_legacy_cache:
|
| next_cache = next_cache.to_legacy_cache()
|
|
|
| if not return_dict:
|
| return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
|
| return BaseModelOutputWithPast(
|
| last_hidden_state=hidden_states,
|
| past_key_values=next_cache,
|
| hidden_states=all_hidden_states,
|
| attentions=all_self_attns,
|
| )
|
|
|
| def _update_causal_mask(
|
| self,
|
| attention_mask: torch.Tensor,
|
| input_tensor: torch.Tensor,
|
| cache_position: torch.Tensor,
|
| past_key_values: Cache,
|
| use_cache: bool,
|
| output_attentions: bool,
|
| ):
|
|
|
|
|
|
|
|
|
|
|
| if self._attn_implementation == "flash_attention_2":
|
| if attention_mask is not None and use_cache:
|
| is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
|
| if is_padding_right:
|
| raise ValueError(
|
| "You are attempting to perform batched generation with padding_side='right'"
|
| " this may lead to unexpected behaviour for Flash Attention version of Mistral. Make sure to "
|
| " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
|
| )
|
| if attention_mask is not None and 0.0 in attention_mask:
|
| return attention_mask
|
| return None
|
|
|
|
|
|
|
|
|
|
|
|
|
| past_seen_tokens = cache_position[0] if past_key_values is not None else 0
|
| using_static_cache = isinstance(past_key_values, StaticCache)
|
| using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)
|
|
|
| if (
|
| self.config._attn_implementation == "sdpa"
|
| and not (using_static_cache or using_sliding_window_cache)
|
| and not output_attentions
|
| ):
|
| if AttentionMaskConverter._ignore_causal_mask_sdpa(
|
| attention_mask,
|
| inputs_embeds=input_tensor,
|
| past_key_values_length=past_seen_tokens,
|
| sliding_window=self.config.sliding_window,
|
| is_training=self.training,
|
| ):
|
| return None
|
|
|
| dtype, device = input_tensor.dtype, input_tensor.device
|
| min_dtype = torch.finfo(dtype).min
|
| sequence_length = input_tensor.shape[1]
|
|
|
| if using_sliding_window_cache:
|
| target_length = max(sequence_length, self.config.sliding_window)
|
|
|
| elif using_static_cache:
|
| target_length = past_key_values.get_max_length()
|
|
|
| else:
|
| target_length = (
|
| attention_mask.shape[-1]
|
| if isinstance(attention_mask, torch.Tensor)
|
| else past_seen_tokens + sequence_length + 1
|
| )
|
|
|
| if attention_mask is not None and attention_mask.dim() == 4:
|
|
|
| if attention_mask.max() != 0:
|
| raise ValueError("Custom 4D attention mask should be passed in inverted form with max==0`")
|
| causal_mask = attention_mask
|
| else:
|
| causal_mask = torch.full(
|
| (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
|
| )
|
| exclude_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
|
| if self.config.sliding_window is not None:
|
| if not using_sliding_window_cache or sequence_length > self.config.sliding_window:
|
| exclude_mask.bitwise_or_(
|
| torch.arange(target_length, device=device)
|
| <= (cache_position.reshape(-1, 1) - self.config.sliding_window)
|
| )
|
| causal_mask *= exclude_mask
|
| causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
|
| if attention_mask is not None:
|
| causal_mask = causal_mask.clone()
|
| if attention_mask.dim() == 2:
|
| mask_length = attention_mask.shape[-1]
|
| padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
|
| padding_mask = padding_mask == 0
|
| causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
|
| padding_mask, min_dtype
|
| )
|
|
|
| if (
|
| self.config._attn_implementation == "sdpa"
|
| and attention_mask is not None
|
| and attention_mask.device.type == "cuda"
|
| and not output_attentions
|
| ):
|
|
|
|
|
|
|
| causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
|
|
|
| return causal_mask
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| class MistralTokenizer(PreTrainedTokenizer):
|
| """
|
| Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is
|
| no padding token in the original model.
|
|
|
| Args:
|
| vocab_file (`str`):
|
| Path to the vocabulary file.
|
| unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
|
| The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
| token instead.
|
| bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`):
|
| The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
|
| eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`):
|
| The end of sequence token.
|
| pad_token (`str` or `tokenizers.AddedToken`, *optional*):
|
| A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
|
| attention mechanisms or loss computation.
|
| sp_model_kwargs (`Dict[str, Any]`, `Optional`, *optional*):
|
| Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
|
| SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
|
| to set:
|
|
|
| - `enable_sampling`: Enable subword regularization.
|
| - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
|
|
|
| - `nbest_size = {0,1}`: No sampling is performed.
|
| - `nbest_size > 1`: samples from the nbest_size results.
|
| - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
|
| using forward-filtering-and-backward-sampling algorithm.
|
|
|
| - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
|
| BPE-dropout.
|
|
|
| add_bos_token (`bool`, *optional*, defaults to `True`):
|
| Whether or not to add an `bos_token` at the start of sequences.
|
| add_eos_token (`bool`, *optional*, defaults to `False`):
|
| Whether or not to add an `eos_token` at the end of sequences.
|
| clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
|
| Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
|
| extra spaces.
|
| use_default_system_prompt (`bool`, *optional*, defaults to `False`):
|
| Whether or not the default system prompt for Llama should be used.
|
| spaces_between_special_tokens (`bool`, *optional*, defaults to `False`):
|
| Whether or not to add spaces between special tokens.
|
| legacy (`bool`, *optional*):
|
| Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622
|
| and #25224 which includes fixes to properly handle tokens that appear after special tokens.
|
| Make sure to also set `from_slow` to `True`.
|
| A simple example:
|
|
|
| - `legacy=True`:
|
| ```python
|
| >>> from transformers import LlamaTokenizerFast
|
|
|
| >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=True, from_slow=True)
|
| >>> tokenizer.encode("Hello <s>.") # 869 is '▁.'
|
| [1, 15043, 29871, 1, 869]
|
| ```
|
| - `legacy=False`:
|
| ```python
|
| >>> from transformers import LlamaTokenizerFast
|
|
|
| >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=False, from_slow=True)
|
| >>> tokenizer.encode("Hello <s>.") # 29889 is '.'
|
| [1, 15043, 29871, 1, 29889]
|
| ```
|
| Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.
|
| add_prefix_space (`bool`, *optional*, defaults to `True`):
|
| Whether or not to add an initial space to the input. This allows to treat the leading word just as any
|
| other word. Again, this should be set with `from_slow=True` to make sure it's taken into account.
|
| """
|
|
|
| vocab_files_names = VOCAB_FILES_NAMES
|
| model_input_names = ["input_ids", "attention_mask"]
|
|
|
| def __init__(
|
| self,
|
| vocab_file,
|
| unk_token="<unk>",
|
| bos_token="<s>",
|
| eos_token="</s>",
|
| pad_token=None,
|
| sp_model_kwargs: Optional[Dict[str, Any]] = None,
|
| add_bos_token=True,
|
| add_eos_token=False,
|
| clean_up_tokenization_spaces=False,
|
| use_default_system_prompt=False,
|
| spaces_between_special_tokens=False,
|
| legacy=None,
|
| add_prefix_space=True,
|
| **kwargs,
|
| ):
|
| self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
|
| bos_token = AddedToken(bos_token, normalized=False, special=True) if isinstance(bos_token, str) else bos_token
|
| eos_token = AddedToken(eos_token, normalized=False, special=True) if isinstance(eos_token, str) else eos_token
|
| unk_token = AddedToken(unk_token, normalized=False, special=True) if isinstance(unk_token, str) else unk_token
|
| pad_token = AddedToken(pad_token, normalized=False, special=True) if isinstance(pad_token, str) else pad_token
|
|
|
| if legacy is None:
|
| logger.warning_once(
|
| f"You are using the default legacy behaviour of the {self.__class__}. This is"
|
| " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you."
|
| " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it"
|
| " means, and thoroughly read the reason why this was added as explained in"
|
| " https://github.com/huggingface/transformers/pull/24565 - if you loaded a llama tokenizer from a GGUF file"
|
| " you can ignore this message"
|
| )
|
| legacy = True
|
|
|
| self.legacy = legacy
|
| self.vocab_file = vocab_file
|
| self.add_bos_token = add_bos_token
|
| self.add_eos_token = add_eos_token
|
| self.use_default_system_prompt = use_default_system_prompt
|
| self.sp_model = self.get_spm_processor(kwargs.pop("from_slow", False))
|
| self.add_prefix_space = add_prefix_space
|
|
|
| super().__init__(
|
| bos_token=bos_token,
|
| eos_token=eos_token,
|
| unk_token=unk_token,
|
| pad_token=pad_token,
|
| add_bos_token=add_bos_token,
|
| add_eos_token=add_eos_token,
|
| sp_model_kwargs=self.sp_model_kwargs,
|
| clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
| use_default_system_prompt=use_default_system_prompt,
|
| spaces_between_special_tokens=spaces_between_special_tokens,
|
| legacy=legacy,
|
| add_prefix_space=add_prefix_space,
|
| **kwargs,
|
| )
|
|
|
| @property
|
| def unk_token_length(self):
|
| return len(self.sp_model.encode(str(self.unk_token)))
|
|
|
|
|
| def get_spm_processor(self, from_slow=False):
|
| tokenizer = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
| if self.legacy or from_slow:
|
| tokenizer.Load(self.vocab_file)
|
| return tokenizer
|
|
|
| with open(self.vocab_file, "rb") as f:
|
| sp_model = f.read()
|
| model_pb2 = import_protobuf(f"The new behaviour of {self.__class__.__name__} (with `self.legacy = False`)")
|
| model = model_pb2.ModelProto.FromString(sp_model)
|
| normalizer_spec = model_pb2.NormalizerSpec()
|
| normalizer_spec.add_dummy_prefix = False
|
| model.normalizer_spec.MergeFrom(normalizer_spec)
|
| sp_model = model.SerializeToString()
|
| tokenizer.LoadFromSerializedProto(sp_model)
|
| return tokenizer
|
|
|
| def __getstate__(self):
|
| state = self.__dict__.copy()
|
| state["sp_model"] = None
|
| state["sp_model_proto"] = self.sp_model.serialized_model_proto()
|
| return state
|
|
|
| def __setstate__(self, d):
|
| self.__dict__ = d
|
| self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
|
| self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
|
|
|
| @property
|
| def vocab_size(self):
|
| """Returns vocab size"""
|
| return self.sp_model.get_piece_size()
|
|
|
| def get_vocab(self):
|
| """Returns vocab as a dict"""
|
| vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
|
| vocab.update(self.added_tokens_encoder)
|
| return vocab
|
|
|
|
|
| def tokenize(self, text: "TextInput", **kwargs) -> List[str]:
|
| """
|
| Converts a string to a list of tokens. If `self.legacy` is set to `False`, a prefix token is added unless the
|
| first token is special.
|
| """
|
| if self.legacy or len(text) == 0:
|
| return super().tokenize(text, **kwargs)
|
|
|
| text = text.replace(SPIECE_UNDERLINE, " ")
|
| if self.add_prefix_space:
|
| text = SPIECE_UNDERLINE + text
|
|
|
| tokens = super().tokenize(text, **kwargs)
|
|
|
| if len(tokens) > 1 and tokens[0] == SPIECE_UNDERLINE and tokens[1] in self.all_special_tokens:
|
| tokens = tokens[1:]
|
| return tokens
|
|
|
|
|
| def _tokenize(self, text, **kwargs):
|
| """
|
| Returns a tokenized string.
|
|
|
| We de-activated the `add_dummy_prefix` option, thus the sentencepiece internals will always strip any
|
| SPIECE_UNDERLINE. For example: `self.sp_model.encode(f"{SPIECE_UNDERLINE}Hey", out_type = str)` will give
|
| `['H', 'e', 'y']` instead of `['▁He', 'y']`. Thus we always encode `f"{unk_token}text"` and strip the
|
| `unk_token`. Here is an example with `unk_token = "<unk>"` and `unk_token_length = 4`.
|
| `self.tokenizer.sp_model.encode("<unk> Hey", out_type = str)[4:]`.
|
| """
|
| tokens = self.sp_model.encode(text, out_type=str)
|
| if self.legacy or not text.startswith((SPIECE_UNDERLINE, " ")):
|
| return tokens
|
|
|
|
|
| tokens = self.sp_model.encode(self.unk_token + text, out_type=str)
|
|
|
| return tokens[self.unk_token_length :] if len(tokens) >= self.unk_token_length else tokens
|
|
|
| def _convert_token_to_id(self, token):
|
| """Converts a token (str) in an id using the vocab."""
|
| return self.sp_model.piece_to_id(token)
|
|
|
| def _convert_id_to_token(self, index):
|
| """Converts an index (integer) in a token (str) using the vocab."""
|
| token = self.sp_model.IdToPiece(index)
|
| return token
|
|
|
| def convert_tokens_to_string(self, tokens):
|
| """Converts a sequence of tokens (string) in a single string."""
|
|
|
| if tokens[0].startswith(SPIECE_UNDERLINE) and self.add_prefix_space:
|
| tokens[0] = tokens[0][1:]
|
|
|
| current_sub_tokens = []
|
| out_string = ""
|
| prev_is_special = False
|
| for i, token in enumerate(tokens):
|
|
|
| if token in self.all_special_tokens:
|
| if not prev_is_special and i != 0 and self.legacy:
|
| out_string += " "
|
| out_string += self.sp_model.decode(current_sub_tokens) + token
|
| prev_is_special = True
|
| current_sub_tokens = []
|
| else:
|
| if prev_is_special and i == 1 and self.add_prefix_space and not token.startswith(SPIECE_UNDERLINE):
|
| out_string += " "
|
| current_sub_tokens.append(token)
|
| prev_is_special = False
|
| out_string += self.sp_model.decode(current_sub_tokens)
|
| return out_string
|
|
|
| def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| """
|
| Save the vocabulary and special tokens file to a directory.
|
|
|
| Args:
|
| save_directory (`str`):
|
| The directory in which to save the vocabulary.
|
|
|
| Returns:
|
| `Tuple(str)`: Paths to the files saved.
|
| """
|
| if not os.path.isdir(save_directory):
|
| logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
| return
|
| out_vocab_file = os.path.join(
|
| save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
| )
|
|
|
| if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
|
| copyfile(self.vocab_file, out_vocab_file)
|
| elif not os.path.isfile(self.vocab_file):
|
| with open(out_vocab_file, "wb") as fi:
|
| content_spiece_model = self.sp_model.serialized_model_proto()
|
| fi.write(content_spiece_model)
|
|
|
| return (out_vocab_file,)
|
|
|
| def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
| bos_token_id = [self.bos_token_id] if self.add_bos_token else []
|
| eos_token_id = [self.eos_token_id] if self.add_eos_token else []
|
|
|
| output = bos_token_id + token_ids_0 + eos_token_id
|
|
|
| if token_ids_1 is not None:
|
| output = output + bos_token_id + token_ids_1 + eos_token_id
|
|
|
| return output
|
|
|
| def get_special_tokens_mask(
|
| self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
|
| ) -> List[int]:
|
| """
|
| Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
|
| special tokens using the tokenizer `prepare_for_model` method.
|
|
|
| Args:
|
| token_ids_0 (`List[int]`):
|
| List of IDs.
|
| token_ids_1 (`List[int]`, *optional*):
|
| Optional second list of IDs for sequence pairs.
|
| already_has_special_tokens (`bool`, *optional*, defaults to `False`):
|
| Whether or not the token list is already formatted with special tokens for the model.
|
|
|
| Returns:
|
| `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
|
| """
|
| if already_has_special_tokens:
|
| return super().get_special_tokens_mask(
|
| token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
|
| )
|
|
|
| bos_token_id = [1] if self.add_bos_token else []
|
| eos_token_id = [1] if self.add_eos_token else []
|
|
|
| if token_ids_1 is None:
|
| return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id
|
| return (
|
| bos_token_id
|
| + ([0] * len(token_ids_0))
|
| + eos_token_id
|
| + bos_token_id
|
| + ([0] * len(token_ids_1))
|
| + eos_token_id
|
| )
|
|
|
| def create_token_type_ids_from_sequences(
|
| self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
|
| ) -> List[int]:
|
| """
|
| Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT
|
| sequence pair mask has the following format:
|
|
|
| ```
|
| 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
|
| | first sequence | second sequence |
|
| ```
|
|
|
| if token_ids_1 is None, only returns the first portion of the mask (0s).
|
|
|
| Args:
|
| token_ids_0 (`List[int]`):
|
| List of ids.
|
| token_ids_1 (`List[int]`, *optional*):
|
| Optional second list of IDs for sequence pairs.
|
|
|
| Returns:
|
| `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
|
| """
|
| bos_token_id = [self.bos_token_id] if self.add_bos_token else []
|
| eos_token_id = [self.eos_token_id] if self.add_eos_token else []
|
|
|
| output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)
|
|
|
| if token_ids_1 is not None:
|
| output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)
|
|
|
| return output
|
|
|
| @property
|
| def default_chat_template(self):
|
| """
|
| LLaMA uses [INST] and [/INST] to indicate user messages, and <<SYS>> and <</SYS>> to indicate system messages.
|
| Assistant messages do not have special tokens, because LLaMA chat models are generally trained with strict
|
| user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering
|
| rather than needing special tokens. The system message is partly 'embedded' in the first user message, which
|
| results in an unusual token ordering when it is present. This template should definitely be changed if you wish
|
| to fine-tune a model with more flexible role ordering!
|
|
|
| The output should look something like:
|
|
|
| <bos>[INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer <eos><bos>[INST] Prompt [/INST] Answer <eos>
|
| <bos>[INST] Prompt [/INST]
|
|
|
| The reference for this chat template is [this code
|
| snippet](https://github.com/facebookresearch/llama/blob/556949fdfb72da27c2f4a40b7f0e4cf0b8153a28/llama/generation.py#L320-L362)
|
| in the original repository.
|
| """
|
| template = (
|
| "{% if messages[0]['role'] == 'system' %}"
|
| "{% set loop_messages = messages[1:] %}"
|
| "{% set system_message = messages[0]['content'] %}"
|
| "{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}"
|
| "{% set loop_messages = messages %}"
|
| "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}"
|
| "{% else %}"
|
| "{% set loop_messages = messages %}"
|
| "{% set system_message = false %}"
|
| "{% endif %}"
|
| "{% for message in loop_messages %}"
|
| "{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}"
|
| "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}"
|
| "{% endif %}"
|
| "{% if loop.index0 == 0 and system_message != false %}"
|
| "{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}"
|
| "{% else %}"
|
| "{% set content = message['content'] %}"
|
| "{% endif %}"
|
| "{% if message['role'] == 'user' %}"
|
| "{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}"
|
| "{% elif message['role'] == 'system' %}"
|
| "{{ '<<SYS>>\\n' + content.strip() + '\\n<</SYS>>\\n\\n' }}"
|
| "{% elif message['role'] == 'assistant' %}"
|
| "{{ ' ' + content.strip() + ' ' + eos_token }}"
|
| "{% endif %}"
|
| "{% endfor %}"
|
| )
|
| template = template.replace("USE_DEFAULT_PROMPT", "true" if self.use_default_system_prompt else "false")
|
| default_message = DEFAULT_SYSTEM_PROMPT.replace("\n", "\\n").replace("'", "\\'")
|
| template = template.replace("DEFAULT_SYSTEM_MESSAGE", default_message)
|
|
|
| return template
|
| class MistralTokenizerFast(PreTrainedTokenizerFast):
|
| """
|
| Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding.
|
|
|
| This uses notably ByteFallback and no normalization.
|
|
|
| ```python
|
| >>> from transformers import LlamaTokenizerFast
|
|
|
| >>> tokenizer = LlamaTokenizerFast.from_pretrained("hf-internal-testing/llama-tokenizer")
|
| >>> tokenizer.encode("Hello this is a test")
|
| [1, 15043, 445, 338, 263, 1243]
|
| ```
|
|
|
| If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or
|
| call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the
|
| values of the first token and final token of an encoded sequence will not be correct). For more details, checkout
|
| [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.
|
|
|
|
|
| This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
|
| refer to this superclass for more information regarding those methods.
|
|
|
| Args:
|
| vocab_file (`str`, *optional*):
|
| [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that
|
| contains the vocabulary necessary to instantiate a tokenizer.
|
| tokenizer_file (`str`, *optional*):
|
| [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
|
| contains everything needed to load the tokenizer.
|
| clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
|
| Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
|
| extra spaces.
|
| unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
|
| The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
|
| token instead.
|
| bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`):
|
| The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
|
| eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`):
|
| The end of sequence token.
|
| add_bos_token (`bool`, *optional*, defaults to `True`):
|
| Whether or not to add an `bos_token` at the start of sequences.
|
| add_eos_token (`bool`, *optional*, defaults to `False`):
|
| Whether or not to add an `eos_token` at the end of sequences.
|
| use_default_system_prompt (`bool`, *optional*, defaults to `False`):
|
| Whether or not the default system prompt for Llama should be used
|
| legacy (`bool`, *optional*):
|
| Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622
|
| and #25224 which includes fixes to properly handle tokens that appear after special tokens.
|
| Make sure to also set `from_slow` to `True`.
|
| A simple example:
|
|
|
| - `legacy=True`:
|
| ```python
|
| >>> from transformers import LlamaTokenizerFast
|
|
|
| >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=True, from_slow=True)
|
| >>> tokenizer.encode("Hello <s>.") # 869 is '▁.'
|
| [1, 15043, 29871, 1, 869]
|
| ```
|
| - `legacy=False`:
|
| ```python
|
| >>> from transformers import LlamaTokenizerFast
|
|
|
| >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=False, from_slow=True)
|
| >>> tokenizer.encode("Hello <s>.") # 29889 is '.'
|
| [1, 15043, 29871, 1, 29889]
|
| ```
|
| Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.
|
| add_prefix_space (`bool`, *optional*):
|
| Whether or not the tokenizer should automatically add a prefix space
|
| """
|
|
|
| vocab_files_names = VOCAB_FILES_NAMES
|
| slow_tokenizer_class = MistralTokenizer
|
| padding_side = "left"
|
| model_input_names = ["input_ids", "attention_mask"]
|
|
|
| def __init__(
|
| self,
|
| vocab_file=None,
|
| tokenizer_file=None,
|
| clean_up_tokenization_spaces=False,
|
| unk_token="<unk>",
|
| bos_token="<s>",
|
| eos_token="</s>",
|
| add_bos_token=True,
|
| add_eos_token=False,
|
| use_default_system_prompt=False,
|
| legacy=None,
|
| add_prefix_space=None,
|
| **kwargs,
|
| ):
|
| if legacy is None:
|
| logger.warning_once(
|
| f"You are using the default legacy behaviour of the {self.__class__}. This is"
|
| " expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you."
|
| " If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it"
|
| " means, and thoroughly read the reason why this was added as explained in"
|
| " https://github.com/huggingface/transformers/pull/24565 - if you loaded a llama tokenizer from a GGUF file"
|
| " you can ignore this message."
|
| )
|
| legacy = True
|
| self.legacy = legacy
|
|
|
| if add_prefix_space is not None:
|
| kwargs["from_slow"] = True
|
|
|
| super().__init__(
|
| vocab_file=vocab_file,
|
| tokenizer_file=tokenizer_file,
|
| clean_up_tokenization_spaces=clean_up_tokenization_spaces,
|
| unk_token=unk_token,
|
| bos_token=bos_token,
|
| eos_token=eos_token,
|
| add_bos_token=add_bos_token,
|
| add_eos_token=add_eos_token,
|
| use_default_system_prompt=use_default_system_prompt,
|
| add_prefix_space=add_prefix_space,
|
| legacy=legacy,
|
| **kwargs,
|
| )
|
| self._add_bos_token = add_bos_token
|
| self._add_eos_token = add_eos_token
|
| self.update_post_processor()
|
| self.use_default_system_prompt = use_default_system_prompt
|
| self.vocab_file = vocab_file
|
|
|
| @property
|
| def can_save_slow_tokenizer(self) -> bool:
|
| return os.path.isfile(self.vocab_file) if self.vocab_file else False
|
|
|
| def update_post_processor(self):
|
| """
|
| Updates the underlying post processor with the current `bos_token` and `eos_token`.
|
| """
|
| bos = self.bos_token
|
| bos_token_id = self.bos_token_id
|
| if bos is None and self.add_bos_token:
|
| raise ValueError("add_bos_token = True but bos_token = None")
|
|
|
| eos = self.eos_token
|
| eos_token_id = self.eos_token_id
|
| if eos is None and self.add_eos_token:
|
| raise ValueError("add_eos_token = True but eos_token = None")
|
|
|
| single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}"
|
| pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}"
|
|
|
| special_tokens = []
|
| if self.add_bos_token:
|
| special_tokens.append((bos, bos_token_id))
|
| if self.add_eos_token:
|
| special_tokens.append((eos, eos_token_id))
|
| self._tokenizer.post_processor = processors.TemplateProcessing(
|
| single=single, pair=pair, special_tokens=special_tokens
|
| )
|
|
|
| @property
|
| def add_eos_token(self):
|
| return self._add_eos_token
|
|
|
| @property
|
| def add_bos_token(self):
|
| return self._add_bos_token
|
|
|
| @add_eos_token.setter
|
| def add_eos_token(self, value):
|
| self._add_eos_token = value
|
| self.update_post_processor()
|
|
|
| @add_bos_token.setter
|
| def add_bos_token(self, value):
|
| self._add_bos_token = value
|
| self.update_post_processor()
|
|
|
| def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
|
| if not self.can_save_slow_tokenizer:
|
| raise ValueError(
|
| "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
|
| "tokenizer."
|
| )
|
|
|
| if not os.path.isdir(save_directory):
|
| logger.error(f"Vocabulary path ({save_directory}) should be a directory")
|
| return
|
| out_vocab_file = os.path.join(
|
| save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
|
| )
|
|
|
| if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
|
| copyfile(self.vocab_file, out_vocab_file)
|
|
|
| return (out_vocab_file,)
|
|
|
| @property
|
|
|
| def default_chat_template(self):
|
| """
|
| LLaMA uses [INST] and [/INST] to indicate user messages, and <<SYS>> and <</SYS>> to indicate system messages.
|
| Assistant messages do not have special tokens, because LLaMA chat models are generally trained with strict
|
| user/assistant/user/assistant message ordering, and so assistant messages can be identified from the ordering
|
| rather than needing special tokens. The system message is partly 'embedded' in the first user message, which
|
| results in an unusual token ordering when it is present. This template should definitely be changed if you wish
|
| to fine-tune a model with more flexible role ordering!
|
|
|
| The output should look something like:
|
|
|
| <bos>[INST] B_SYS SystemPrompt E_SYS Prompt [/INST] Answer <eos><bos>[INST] Prompt [/INST] Answer <eos>
|
| <bos>[INST] Prompt [/INST]
|
|
|
| The reference for this chat template is [this code
|
| snippet](https://github.com/facebookresearch/llama/blob/556949fdfb72da27c2f4a40b7f0e4cf0b8153a28/llama/generation.py#L320-L362)
|
| in the original repository.
|
| """
|
| template = (
|
| "{% if messages[0]['role'] == 'system' %}"
|
| "{% set loop_messages = messages[1:] %}"
|
| "{% set system_message = messages[0]['content'] %}"
|
| "{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}"
|
| "{% set loop_messages = messages %}"
|
| "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}"
|
| "{% else %}"
|
| "{% set loop_messages = messages %}"
|
| "{% set system_message = false %}"
|
| "{% endif %}"
|
| "{% for message in loop_messages %}"
|
| "{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}"
|
| "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}"
|
| "{% endif %}"
|
| "{% if loop.index0 == 0 and system_message != false %}"
|
| "{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}"
|
| "{% else %}"
|
| "{% set content = message['content'] %}"
|
| "{% endif %}"
|
| "{% if message['role'] == 'user' %}"
|
| "{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}"
|
| "{% elif message['role'] == 'system' %}"
|
| "{{ '<<SYS>>\\n' + content.strip() + '\\n<</SYS>>\\n\\n' }}"
|
| "{% elif message['role'] == 'assistant' %}"
|
| "{{ ' ' + content.strip() + ' ' + eos_token }}"
|
| "{% endif %}"
|
| "{% endfor %}"
|
| )
|
| template = template.replace("USE_DEFAULT_PROMPT", "true" if self.use_default_system_prompt else "false")
|
| default_message = DEFAULT_SYSTEM_PROMPT.replace("\n", "\\n").replace("'", "\\'")
|
| template = template.replace("DEFAULT_SYSTEM_MESSAGE", default_message)
|
|
|
| return template
|
|
|
|
|
|
|
| def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
|
| bos_token_id = [self.bos_token_id] if self.add_bos_token else []
|
| eos_token_id = [self.eos_token_id] if self.add_eos_token else []
|
|
|
| output = bos_token_id + token_ids_0 + eos_token_id
|
|
|
| if token_ids_1 is not None:
|
| output = output + bos_token_id + token_ids_1 + eos_token_id
|
|
|
| return output
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| def _get_unpad_data(attention_mask):
|
| seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
|
| indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| max_seqlen_in_batch = seqlens_in_batch.max().item()
|
| cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
| return (
|
| indices,
|
| cu_seqlens,
|
| max_seqlen_in_batch,
|
| )
|
|
|
|
|
| class MistralRMSNorm(nn.Module):
|
| def __init__(self, hidden_size, eps=1e-6):
|
| """
|
| MistralRMSNorm is equivalent to T5LayerNorm
|
| """
|
| super().__init__()
|
| self.weight = nn.Parameter(torch.ones(hidden_size))
|
| self.variance_epsilon = eps
|
|
|
| def forward(self, hidden_states):
|
| input_dtype = hidden_states.dtype
|
| hidden_states = hidden_states.to(torch.float32)
|
| variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
| hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
| return self.weight * hidden_states.to(input_dtype)
|
|
|
| class MistralRotaryEmbedding(nn.Module):
|
| def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
|
| super().__init__()
|
|
|
| self.dim = dim
|
| self.max_position_embeddings = max_position_embeddings
|
| self.base = base
|
| inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
|
| self.register_buffer("inv_freq", inv_freq, persistent=False)
|
|
|
| @torch.no_grad()
|
|
|
| def forward(self, x, position_ids):
|
|
|
| inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
| position_ids_expanded = position_ids[:, None, :].float()
|
|
|
|
|
| device_type = x.device.type
|
| device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
|
| with torch.autocast(device_type=device_type, enabled=False):
|
| freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
| emb = torch.cat((freqs, freqs), dim=-1)
|
| cos = emb.cos()
|
| sin = emb.sin()
|
| return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
|
|
| def nonzero_mean(x, axis=None):
|
| if axis is not None:
|
| return x.sum(axis) / (x != 0).sum(axis)
|
| return x.sum() / (x != 0).sum()
|
| def loss_mean(x):
|
| return x.sum() / (x != 0).sum()
|
|
|
|
|
| def rotate_half(x):
|
| """Rotates half the hidden dims of the input."""
|
| x1 = x[..., : x.shape[-1] // 2]
|
| x2 = x[..., x.shape[-1] // 2 :]
|
| return torch.cat((-x2, x1), dim=-1)
|
|
|
| def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
| """Applies Rotary Position Embedding to the query and key tensors.
|
|
|
| Args:
|
| q (`torch.Tensor`): The query tensor.
|
| k (`torch.Tensor`): The key tensor.
|
| cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
| sin (`torch.Tensor`): The sine part of the rotary embedding.
|
| position_ids (`torch.Tensor`, *optional*):
|
| Deprecated and unused.
|
| unsqueeze_dim (`int`, *optional*, defaults to 1):
|
| The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
| sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
| that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
| k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
| cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
| the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
| Returns:
|
| `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
| """
|
|
|
| cos = cos.squeeze(1).squeeze(0)
|
| sin = sin.squeeze(1).squeeze(0)
|
| cos = cos[position_ids].unsqueeze(1)
|
| sin = sin[position_ids].unsqueeze(1)
|
| q_embed = (q * cos[:,:, -q.shape[2]:]) + (rotate_half(q) * sin[:,:, -q.shape[2]:]) if q is not None else None
|
| k_embed = (k * cos) + (rotate_half(k) * sin) if k is not None else None
|
| return q_embed, k_embed
|
|
|
| def apply_grouped_rotary_pos_emb(q, k, cos, sin, position_ids, g_size_1=1, g_size_2=4096):
|
|
|
| position_ids_q = position_ids//g_size_1 + g_size_2 - g_size_2//g_size_1
|
| position_ids_k = position_ids//g_size_1
|
|
|
| cos = cos.squeeze(1).squeeze(0)
|
| sin = sin.squeeze(1).squeeze(0)
|
| cos_q = cos[position_ids_q].unsqueeze(1)
|
| sin_q = sin[position_ids_q].unsqueeze(1)
|
| cos_k = cos[position_ids_k].unsqueeze(1)
|
| sin_k = sin[position_ids_k].unsqueeze(1)
|
| q_embed = (q * cos_q) + (rotate_half(q) * sin_q) if q is not None else None
|
| k_embed = (k * cos_k) + (rotate_half(k) * sin_k) if k is not None else None
|
|
|
| return q_embed, k_embed
|
|
|
| def load_balancing_loss_func(
|
| gate_logits: torch.Tensor, num_experts: torch.Tensor = None, top_k=2, attention_mask: Optional[torch.Tensor] = None
|
| ) -> float:
|
| r"""
|
| Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
|
|
|
| See Switch Transformer (https://arxiv.org/abs/2101.03961) for more details. This function implements the loss
|
| function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
|
| experts is too unbalanced.
|
|
|
| Args:
|
| gate_logits (Union[`torch.Tensor`, Tuple[torch.Tensor]):
|
| Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
|
| shape [batch_size X sequence_length, num_experts].
|
| attention_mask (`torch.Tensor`, None):
|
| The attention_mask used in forward function
|
| shape [batch_size X sequence_length] if not None.
|
| num_experts (`int`, *optional*):
|
| Number of experts
|
|
|
| Returns:
|
| The auxiliary loss.
|
| """
|
| if gate_logits is None or not isinstance(gate_logits, tuple):
|
| return 0
|
|
|
| if isinstance(gate_logits, tuple):
|
| compute_device = gate_logits[0].device
|
| concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
|
|
|
| routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
|
|
|
| _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
|
|
|
| expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
|
|
|
| if attention_mask is None:
|
|
|
| tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
|
|
|
|
|
| router_prob_per_expert = torch.mean(routing_weights, dim=0)
|
| else:
|
| batch_size, sequence_length = attention_mask.shape
|
| num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
|
|
|
|
|
| expert_attention_mask = (
|
| attention_mask[None, :, :, None, None]
|
| .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
|
| .reshape(-1, top_k, num_experts)
|
| .to(compute_device)
|
| )
|
|
|
|
|
| tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
|
| expert_attention_mask, dim=0
|
| )
|
|
|
|
|
| router_per_expert_attention_mask = (
|
| attention_mask[None, :, :, None]
|
| .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
|
| .reshape(-1, num_experts)
|
| .to(compute_device)
|
| )
|
|
|
|
|
| router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
|
| router_per_expert_attention_mask, dim=0
|
| )
|
|
|
| overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
|
| return overall_loss * num_experts
|
|
|
| class MistralMLP(nn.Module):
|
| def __init__(self, config):
|
| super().__init__()
|
| self.hidden_size = config.hidden_size
|
| self.intermediate_size = config.intermediate_size
|
| self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
|
| self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
|
| self.act_fn = ACT2FN[config.hidden_act]
|
|
|
| def forward(self, hidden_state):
|
| return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state))
|
|
|
| def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| """
|
| This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
| num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
| """
|
| batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
| if n_rep == 1:
|
| return hidden_states
|
| hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
| return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
|
|
| class MistralAttention(nn.Module):
|
| """
|
| Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
|
| and "Generating Long Sequences with Sparse Transformers".
|
| """
|
|
|
| def __init__(self, config: MistralConfig, layer_idx: Optional[int] = None):
|
| super().__init__()
|
| self.config = config
|
| self.layer_idx = layer_idx
|
| if layer_idx is None:
|
| logger.warning_once(
|
| f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
|
| "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
|
| "when creating this class."
|
| )
|
|
|
| self.attention_dropout = config.attention_dropout
|
| self.hidden_size = config.hidden_size
|
| self.num_heads = config.num_attention_heads
|
| self.head_dim = self.hidden_size // self.num_heads
|
| self.num_key_value_heads = config.num_key_value_heads
|
| self.num_key_value_groups = self.num_heads // self.num_key_value_heads
|
| self.max_position_embeddings = config.max_position_embeddings
|
| self.rope_theta = config.rope_theta
|
| self.is_causal = True
|
|
|
| if (self.head_dim * self.num_heads) != self.hidden_size:
|
| raise ValueError(
|
| f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
|
| f" and `num_heads`: {self.num_heads})."
|
| )
|
| self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
|
| self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
| self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
|
| self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
|
|
|
| self.rotary_emb = MistralRotaryEmbedding(
|
| self.head_dim,
|
| max_position_embeddings=self.max_position_embeddings,
|
| base=self.rope_theta,
|
| )
|
|
|
| def forward(
|
| self,
|
| hidden_states: torch.Tensor,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_value: Optional[Cache] = None,
|
| output_attentions: bool = False,
|
| use_cache: bool = False,
|
| cache_position: Optional[torch.LongTensor] = None,
|
| ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| bsz, q_len, _ = hidden_states.size()
|
|
|
| query_states = self.q_proj(hidden_states)
|
| key_states = self.k_proj(hidden_states)
|
| value_states = self.v_proj(hidden_states)
|
|
|
| query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
|
|
| cos, sin = self.rotary_emb(value_states, position_ids)
|
| query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
|
|
| if past_key_value is not None:
|
|
|
| cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
| key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
|
|
| key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| value_states = repeat_kv(value_states, self.num_key_value_groups)
|
|
|
| attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
|
|
| if attention_mask is not None:
|
| causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
| attn_weights = attn_weights + causal_mask
|
|
|
|
|
| attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| attn_output = torch.matmul(attn_weights, value_states)
|
|
|
| if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| raise ValueError(
|
| f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| f" {attn_output.size()}"
|
| )
|
|
|
| attn_output = attn_output.transpose(1, 2).contiguous()
|
|
|
| attn_output = attn_output.view(bsz, q_len, -1)
|
| attn_output = self.o_proj(attn_output)
|
|
|
| if not output_attentions:
|
| attn_weights = None
|
|
|
| return attn_output, attn_weights, past_key_value
|
| class MistralFlashAttention2(MistralAttention):
|
| """
|
| Mistral flash attention module. This module inherits from `MistralAttention` as the weights of the module stays
|
| untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
|
| flash attention and deal with padding tokens in case the input contains any of them.
|
| """
|
|
|
|
|
| def __init__(self, *args, **kwargs):
|
| super().__init__(*args, **kwargs)
|
|
|
|
|
|
|
|
|
| self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
|
|
|
| def forward(
|
| self,
|
| hidden_states: torch.Tensor,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_value: Optional[Cache] = None,
|
| output_attentions: bool = False,
|
| use_cache: bool = False,
|
| cache_position: Optional[torch.LongTensor] = None,
|
| ):
|
| if isinstance(past_key_value, StaticCache):
|
| raise ValueError(
|
| "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
|
| "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
|
| )
|
|
|
| output_attentions = False
|
|
|
| bsz, q_len, _ = hidden_states.size()
|
|
|
| query_states = self.q_proj(hidden_states)
|
| key_states = self.k_proj(hidden_states)
|
| value_states = self.v_proj(hidden_states)
|
|
|
| query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
|
|
| kv_seq_len = key_states.shape[-2]
|
| if past_key_value is not None:
|
| kv_seq_len += cache_position[0]
|
|
|
| cos, sin = self.rotary_emb(value_states, position_ids)
|
| query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
|
|
| if past_key_value is not None:
|
|
|
| cache_has_contents = past_key_value.get_seq_length(self.layer_idx) > 0
|
| if (
|
| getattr(self.config, "sliding_window", None) is not None
|
| and kv_seq_len > self.config.sliding_window
|
| and cache_has_contents
|
| ):
|
| slicing_tokens = 1 - self.config.sliding_window
|
|
|
| past_key = past_key_value[self.layer_idx][0]
|
| past_value = past_key_value[self.layer_idx][1]
|
|
|
| past_key = past_key[:, :, slicing_tokens:, :].contiguous()
|
| past_value = past_value[:, :, slicing_tokens:, :].contiguous()
|
|
|
| if past_key.shape[-2] != self.config.sliding_window - 1:
|
| raise ValueError(
|
| f"past key must have a shape of (`batch_size, num_heads, self.config.sliding_window-1, head_dim`), got"
|
| f" {past_key.shape}"
|
| )
|
|
|
| if attention_mask is not None:
|
| attention_mask = attention_mask[:, slicing_tokens:]
|
| attention_mask = torch.cat([attention_mask, torch.ones_like(attention_mask[:, -1:])], dim=-1)
|
|
|
| cache_kwargs = {"sin": sin, "cos": cos}
|
| key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
|
|
|
|
| key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| value_states = repeat_kv(value_states, self.num_key_value_groups)
|
| dropout_rate = 0.0 if not self.training else self.attention_dropout
|
|
|
|
|
|
|
|
|
| input_dtype = query_states.dtype
|
| if input_dtype == torch.float32:
|
| if torch.is_autocast_enabled():
|
| target_dtype = torch.get_autocast_gpu_dtype()
|
|
|
| elif hasattr(self.config, "_pre_quantization_dtype"):
|
| target_dtype = self.config._pre_quantization_dtype
|
| else:
|
| target_dtype = self.q_proj.weight.dtype
|
|
|
| logger.warning_once(
|
| f"The input hidden states seems to be silently casted in float32, this might be related to"
|
| f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
|
| f" {target_dtype}."
|
| )
|
|
|
| query_states = query_states.to(target_dtype)
|
| key_states = key_states.to(target_dtype)
|
| value_states = value_states.to(target_dtype)
|
|
|
|
|
| query_states = query_states.transpose(1, 2)
|
| key_states = key_states.transpose(1, 2)
|
| value_states = value_states.transpose(1, 2)
|
|
|
| attn_output = _flash_attention_forward(
|
| query_states,
|
| key_states,
|
| value_states,
|
| attention_mask,
|
| q_len,
|
| dropout=dropout_rate,
|
| sliding_window=getattr(self.config, "sliding_window", None),
|
| use_top_left_mask=self._flash_attn_uses_top_left_mask,
|
| is_causal=self.is_causal,
|
| )
|
|
|
| attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
|
| attn_output = self.o_proj(attn_output)
|
|
|
| if not output_attentions:
|
| attn_weights = None
|
|
|
| return attn_output, attn_weights, past_key_value
|
|
|
| class MistralSdpaAttention(MistralAttention):
|
| """
|
| Mistral attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
|
| `MistralAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
|
| SDPA API.
|
| """
|
|
|
|
|
| def forward(
|
| self,
|
| hidden_states: torch.Tensor,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_value: Optional[Cache] = None,
|
| output_attentions: bool = False,
|
| use_cache: bool = False,
|
| cache_position: Optional[torch.LongTensor] = None,
|
| **kwargs,
|
| ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| if output_attentions:
|
|
|
| logger.warning_once(
|
| "MistralModel is using MistralSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
|
| 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
| )
|
| return super().forward(
|
| hidden_states=hidden_states,
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_value=past_key_value,
|
| output_attentions=output_attentions,
|
| use_cache=use_cache,
|
| cache_position=cache_position,
|
| )
|
|
|
| bsz, q_len, _ = hidden_states.size()
|
|
|
| query_states = self.q_proj(hidden_states)
|
| key_states = self.k_proj(hidden_states)
|
| value_states = self.v_proj(hidden_states)
|
|
|
| query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
|
|
| cos, sin = self.rotary_emb(value_states, position_ids)
|
| query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
|
|
| if past_key_value is not None:
|
|
|
| cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
| key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
|
|
| key_states = repeat_kv(key_states, self.num_key_value_groups)
|
| value_states = repeat_kv(value_states, self.num_key_value_groups)
|
|
|
| causal_mask = attention_mask
|
| if attention_mask is not None:
|
| causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
|
|
|
|
|
|
|
| if query_states.device.type == "cuda" and causal_mask is not None:
|
| query_states = query_states.contiguous()
|
| key_states = key_states.contiguous()
|
| value_states = value_states.contiguous()
|
|
|
|
|
|
|
| is_causal = True if causal_mask is None and q_len > 1 else False
|
|
|
| attn_output = torch.nn.functional.scaled_dot_product_attention(
|
| query_states,
|
| key_states,
|
| value_states,
|
| attn_mask=causal_mask,
|
| dropout_p=self.attention_dropout if self.training else 0.0,
|
| is_causal=is_causal,
|
| )
|
|
|
| attn_output = attn_output.transpose(1, 2).contiguous()
|
| attn_output = attn_output.view(bsz, q_len, -1)
|
|
|
| attn_output = self.o_proj(attn_output)
|
|
|
| return attn_output, None, past_key_value
|
| MISTRAL_ATTENTION_CLASSES = {
|
| "eager": MistralAttention,
|
| "flash_attention_2": MistralFlashAttention2,
|
| "sdpa": MistralSdpaAttention,
|
| }
|
|
|
|
|
| class MistralDecoderLayer(nn.Module):
|
| def __init__(self, config: MistralConfig, layer_idx: int):
|
| super().__init__()
|
| self.hidden_size = config.hidden_size
|
|
|
| self.self_attn = MISTRAL_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
|
|
|
| self.mlp = MistralMLP(config)
|
| self.input_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| self.post_attention_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
|
|
| def forward(
|
| self,
|
| hidden_states: torch.Tensor,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_value: Optional[Cache] = None,
|
| output_attentions: Optional[bool] = False,
|
| use_cache: Optional[bool] = False,
|
| cache_position: Optional[torch.LongTensor] = None,
|
| **kwargs,
|
| ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| """
|
| Args:
|
| hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| attention_mask (`torch.FloatTensor`, *optional*):
|
| attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
|
| query_sequence_length, key_sequence_length)` if default attention is used.
|
| output_attentions (`bool`, *optional*):
|
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| returned tensors for more detail.
|
| use_cache (`bool`, *optional*):
|
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| (see `past_key_values`).
|
| past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
| cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
| Indices depicting the position of the input sequence tokens in the sequence
|
| kwargs (`dict`, *optional*):
|
| Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
|
| into the model
|
| """
|
| residual = hidden_states
|
|
|
| hidden_states = self.input_layernorm(hidden_states)
|
|
|
|
|
| hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
| hidden_states=hidden_states,
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_value=past_key_value,
|
| output_attentions=output_attentions,
|
| use_cache=use_cache,
|
| cache_position=cache_position,
|
| **kwargs,
|
| )
|
| hidden_states = residual + hidden_states
|
|
|
|
|
| residual = hidden_states
|
| hidden_states = self.post_attention_layernorm(hidden_states)
|
| hidden_states = self.mlp(hidden_states)
|
| hidden_states = residual + hidden_states
|
|
|
| outputs = (hidden_states,)
|
|
|
| if output_attentions:
|
| outputs += (self_attn_weights,)
|
|
|
| if use_cache:
|
| outputs += (present_key_value,)
|
|
|
| return outputs
|
|
|
| class MixtralBlockSparseTop2MLP(nn.Module):
|
| def __init__(self, config: MixtralConfig):
|
| super().__init__()
|
| self.ffn_dim = config.intermediate_size
|
| self.hidden_dim = config.hidden_size
|
|
|
| self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
|
| self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
|
| self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
|
|
|
| self.act_fn = ACT2FN[config.hidden_act]
|
|
|
| def forward(self, hidden_states):
|
| current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
|
| current_hidden_states = self.w2(current_hidden_states)
|
| return current_hidden_states
|
| class MixtralSparseMoeBlock(nn.Module):
|
| """
|
| This implementation is
|
| strictly equivalent to standard MoE with full capacity (no
|
| dropped tokens). It's faster since it formulates MoE operations
|
| in terms of block-sparse operations to accomodate imbalanced
|
| assignments of tokens to experts, whereas standard MoE either
|
| (1) drop tokens at the cost of reduced performance or (2) set
|
| capacity factor to number of experts and thus waste computation
|
| and memory on padding.
|
| """
|
|
|
| def __init__(self, config):
|
| super().__init__()
|
| self.hidden_dim = config.hidden_size
|
| self.ffn_dim = config.intermediate_size
|
| self.num_experts = config.num_local_experts
|
| self.top_k = config.num_experts_per_tok
|
|
|
|
|
| self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False)
|
|
|
| self.experts = nn.ModuleList([MixtralBlockSparseTop2MLP(config) for _ in range(self.num_experts)])
|
|
|
|
|
| self.jitter_noise = config.router_jitter_noise
|
|
|
| def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
| """ """
|
| batch_size, sequence_length, hidden_dim = hidden_states.shape
|
| if self.training and self.jitter_noise > 0:
|
| hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
|
| hidden_states = hidden_states.view(-1, hidden_dim)
|
|
|
| router_logits = self.gate(hidden_states)
|
|
|
| routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
|
| routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
|
| routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
|
|
|
| routing_weights = routing_weights.to(hidden_states.dtype)
|
|
|
| final_hidden_states = torch.zeros(
|
| (batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
|
| )
|
|
|
|
|
|
|
| expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
|
|
|
|
|
| for expert_idx in range(self.num_experts):
|
| expert_layer = self.experts[expert_idx]
|
| idx, top_x = torch.where(expert_mask[expert_idx])
|
|
|
|
|
|
|
|
|
| current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
|
| current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
|
|
|
|
|
|
|
| final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
|
| final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
|
| return final_hidden_states, router_logits
|
| class MixtralDecoderLayer(nn.Module):
|
| def __init__(self, config: MixtralConfig, layer_idx: int):
|
| super().__init__()
|
| self.hidden_size = config.hidden_size
|
|
|
| self.self_attn = MISTRAL_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
|
| self.mlp = MistralMLP(config)
|
| self.block_sparse_moe = MixtralSparseMoeBlock(config)
|
| self.input_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
| self.post_attention_layernorm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
|
|
| def forward(
|
| self,
|
| hidden_states: torch.Tensor,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_value: Optional[Tuple[torch.Tensor]] = None,
|
| output_attentions: Optional[bool] = False,
|
| output_router_logits: Optional[bool] = False,
|
| use_cache: Optional[bool] = False,
|
| cache_position: Optional[torch.LongTensor] = None,
|
| **kwargs,
|
| ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
| """
|
| Args:
|
| hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
| attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
| `(batch, sequence_length)` where padding elements are indicated by 0.
|
| past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
|
| output_attentions (`bool`, *optional*):
|
| Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
| returned tensors for more detail.
|
| output_router_logits (`bool`, *optional*):
|
| Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
|
| should not be returned during inference.
|
| use_cache (`bool`, *optional*):
|
| If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
| (see `past_key_values`).
|
| cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
| Indices depicting the position of the input sequence tokens in the sequence.
|
| kwargs (`dict`, *optional*):
|
| Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
|
| into the model
|
| """
|
|
|
| residual = hidden_states
|
|
|
| hidden_states = self.input_layernorm(hidden_states)
|
|
|
|
|
| hidden_states, self_attn_weights, present_key_value = self.self_attn(
|
| hidden_states=hidden_states,
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_value=past_key_value,
|
| output_attentions=output_attentions,
|
| use_cache=use_cache,
|
| cache_position=cache_position,
|
| )
|
| hidden_states = residual + hidden_states
|
|
|
|
|
| residual = hidden_states
|
| hidden_states = self.post_attention_layernorm(hidden_states)
|
| hidden_states, router_logits = self.block_sparse_moe(hidden_states)
|
| hidden_states = residual + hidden_states
|
|
|
|
|
| residual = hidden_states
|
| hidden_states = self.post_attention_layernorm(hidden_states)
|
| hidden_states = self.mlp(hidden_states)
|
| hidden_states = residual + hidden_states
|
|
|
| outputs = (hidden_states,)
|
|
|
| if output_attentions:
|
| outputs += (self_attn_weights,)
|
|
|
| if use_cache:
|
| outputs += (present_key_value,)
|
|
|
| if output_router_logits:
|
| outputs += (router_logits,)
|
|
|
| return outputs
|
|
|
|
|
|
|
|
|
|
|
| class MistralForCausalLM(MistralPreTrainedModel):
|
| _tied_weights_keys = ["lm_head.weight"]
|
|
|
| def __init__(self, config):
|
| super().__init__(config)
|
| self.model = MistralModel(config)
|
| self.vocab_size = config.vocab_size
|
| self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| self.max_thoughts = config.max_thoughts
|
| self.merged_lm_and_talk_heads = config.merged_lm_and_talk_heads
|
| self.use_concat_talk_head = config.use_concat_talk_head
|
| self.use_shallow_talk = config.use_shallow_talk
|
| self.use_complex_talk_head = config.use_complex_talk_head
|
| self.use_weighted_talk_head = config.use_weighted_talk_head
|
|
|
| assert not (self.use_weighted_talk_head and self.use_shallow_talk)
|
|
|
| self.n_ahead = 1
|
| self.n_ahead_talk = 1
|
| self.n_passes = 1
|
| self.n_tokens_print = 1
|
| self.gradient_accumulation_steps = 1
|
| self.training_steps = 0
|
| self.tokenizer = None
|
| self.start_token_id = None
|
| self.end_token_id = None
|
| self.rm_initialized = False
|
| self.residual_talk_head = True
|
| self.thought_init_std_scale = 1e-2
|
|
|
| self.final_only_mode = False
|
| self.first_and_last_mode = True
|
| self.first_only = False
|
| self.original_loss_weight = 0.5
|
|
|
| self.cumulative_residual = False
|
| self.clever_residual = False
|
| self.skip_residual = False
|
| self.no_residual = True
|
|
|
| self.optimize_lm_head_only_at_start = False
|
| self.optimize_model_only_at_start = False
|
|
|
| if self.optimize_model_only_at_start:
|
| raise NotImplementedError
|
| self.train_only_thinking_embedding = False
|
| self.weighted_embeddings = False
|
| self.use_start_thought_token = True
|
| self.use_end_thought_token = True
|
| self.initialize_thought_embedding_to_normal = False
|
| self.initial_start_token = "---"
|
| self.initial_end_token = "---"
|
| self.output_logits_at_the_end = True
|
|
|
| self.gumbel_temperature = 0.001
|
|
|
| self.use_policy_loss = True
|
| self.include_policy_loss = True
|
| self.trice_mode = True
|
| self.remove_negative_rewards = True
|
| self.use_policy_loss_for_end_thought = True
|
|
|
| self.base_original_mode = False
|
| self.original_mode = False
|
|
|
| self.thought_prefix = "(Let's think step by step"
|
| self.tokenized_thought_prefix = None
|
| self.log_dict = defaultdict(int)
|
| self.eval_log_dict = defaultdict(int)
|
| self.print_final_only = True
|
| self.loss_mean = loss_mean
|
| self.all_rewards = []
|
| self.all_unreduced_losses = []
|
| self.kill_after = 100
|
|
|
| self.start_embedding = nn.Parameter(torch.zeros(2, self.model.config.hidden_size))
|
| self.end_embedding = nn.Parameter(torch.zeros(2, self.model.config.hidden_size))
|
|
|
| self.policy_loss_beta = 1e6
|
| self.embedding_scale = 1e2
|
| self.reinforce_temperature = 3
|
| self.base_loss_beta = 1
|
|
|
|
|
| self.use_thought_prefix = False
|
| self.use_reparam_for_thought_embeddings = False
|
| self.use_upper_triangular = False
|
| self.subtract_mean_reward = False
|
| self.comparison_mode = False
|
| self.gumbel_detach = True
|
|
|
|
|
| self.eval_mode = False
|
|
|
| num_talk = 1
|
| talk_input_dim = config.hidden_size if not self.use_concat_talk_head else config.hidden_size * 2
|
| if self.use_weighted_talk_head:
|
| talk_output_dim = 1
|
| else:
|
| talk_output_dim = config.hidden_size if self.use_shallow_talk else config.vocab_size
|
|
|
| if not self.merged_lm_and_talk_heads:
|
| if self.use_complex_talk_head:
|
| self.talk_head = nn.ModuleList([nn.Sequential(
|
| nn.Linear(talk_input_dim, config.hidden_size),
|
| nn.ReLU(),
|
| nn.Linear(config.hidden_size, config.hidden_size),
|
| nn.ReLU(),
|
| nn.Linear(config.hidden_size, talk_output_dim, bias=False)
|
| )])
|
| else:
|
| self.talk_head = nn.ModuleList([nn.Sequential(
|
| nn.Linear(talk_input_dim, talk_output_dim, bias=False)
|
| )])
|
|
|
|
|
| self.post_init()
|
|
|
| def get_input_embeddings(self):
|
| return self.model.embed_tokens
|
|
|
| def set_input_embeddings(self, value):
|
| self.model.embed_tokens = value
|
|
|
| def get_output_embeddings(self):
|
| return self.lm_head
|
|
|
| def set_output_embeddings(self, new_embeddings):
|
| self.lm_head = new_embeddings
|
|
|
| def set_decoder(self, decoder):
|
| self.model = decoder
|
|
|
| def get_decoder(self):
|
| return self.model
|
| def calculate_policy_loss(self, thoughts, rewards):
|
| thought_log_probs = []
|
| for thought in thoughts:
|
| thought_log_prob = self.lm_head(thought).log_softmax(dim=-1)
|
| thought_log_probs.append(thought_log_prob)
|
|
|
| thought_log_probs = torch.stack(thought_log_probs, dim=1)
|
| thought_probs = torch.exp(thought_log_probs)
|
|
|
| policy_loss = -torch.mean(thought_log_probs * rewards.unsqueeze(-1).unsqueeze(-1))
|
|
|
| return policy_loss
|
|
|
| def _generate_thoughts(self, hidden_states, max_length):
|
| batch_size = hidden_states.size(0)
|
| thought_ids = torch.zeros((batch_size, self.config.max_thoughts, max_length), dtype=torch.long, device=hidden_states.device)
|
| thought_embeddings = []
|
|
|
| for i in range(self.config.max_thoughts):
|
| thought_input_ids = torch.zeros((batch_size, 1), dtype=torch.long, device=hidden_states.device)
|
| thought_outputs = self.generate(
|
| input_ids=thought_input_ids,
|
| max_length=max_length,
|
| do_sample=True,
|
| top_k=50,
|
| top_p=0.95,
|
| pad_token_id=self.config.pad_token_id,
|
| eos_token_id=self.config.eos_token_id,
|
| )
|
| thought_ids[:, i, :] = thought_outputs
|
| thought_embeddings.append(self.get_input_embeddings()(thought_outputs))
|
|
|
| thought_embeddings = torch.stack(thought_embeddings, dim=1)
|
| return thought_ids, thought_embeddings
|
|
|
| @torch.no_grad()
|
| def infer(
|
| self,
|
| input_ids: torch.LongTensor,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| use_cache: Optional[bool] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ):
|
| batch_size, seq_len = input_ids.shape
|
|
|
|
|
| original_input_ids = input_ids.clone()
|
| original_attention_mask = attention_mask.clone() if attention_mask is not None else None
|
|
|
|
|
| start_thought_token_id = self.tokenizer.convert_tokens_to_ids("<|startthought|>")
|
| input_ids = torch.cat([input_ids, torch.tensor([[start_thought_token_id]] * batch_size).to(input_ids.device)], dim=-1)
|
| seq_len += 1
|
|
|
|
|
| if attention_mask is not None:
|
| attention_mask = torch.cat([attention_mask, torch.ones((batch_size, 1)).to(attention_mask.device)], dim=-1)
|
|
|
|
|
| continuation_length = self.n_ahead - 2
|
| new_key_values = past_key_values
|
|
|
| start_time = time.time()
|
| for continuation_idx in range(continuation_length):
|
| outputs = self.model(
|
| input_ids=input_ids if continuation_idx == 0 else next_token_id.unsqueeze(-1).to(input_ids.device),
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=new_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=True,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
| new_key_values = outputs.past_key_values
|
|
|
| hidden_states = outputs[0]
|
|
|
| logits = self.lm_head(hidden_states)
|
| logits = logits[:, -1, :]
|
|
|
|
|
| next_token_logits = F.gumbel_softmax(logits, tau=self.gumbel_temperature, hard=True, dim=-1)
|
| next_token_id = torch.argmax(next_token_logits, dim=-1)
|
|
|
|
|
| input_ids = torch.cat([input_ids, next_token_id.unsqueeze(-1).to(input_ids.device)], dim=-1)
|
| seq_len += 1
|
|
|
|
|
| if attention_mask is not None:
|
| attention_mask = torch.cat([attention_mask, torch.ones((batch_size, 1)).to(attention_mask.device)], dim=-1)
|
|
|
|
|
| end_thought_token_id = self.tokenizer.convert_tokens_to_ids("<|endthought|>")
|
| input_ids = torch.cat([input_ids, torch.tensor([[end_thought_token_id]] * batch_size).to(input_ids.device)], dim=-1)
|
| seq_len += 1
|
|
|
|
|
| if attention_mask is not None:
|
| attention_mask = torch.cat([attention_mask, torch.ones((batch_size, 1)).to(attention_mask.device)], dim=-1)
|
|
|
|
|
| outputs_before = self.model(
|
| input_ids=original_input_ids,
|
| attention_mask=original_attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
| hidden_states_before = outputs_before[0][:, -1:, :]
|
|
|
|
|
| outputs_after = self.model(
|
| input_ids=torch.cat([next_token_id.unsqueeze(-1).to(input_ids.device), torch.tensor(end_thought_token_id).unsqueeze(-1).unsqueeze(-1).to(input_ids.device)], dim=-1),
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=new_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
| hidden_states_after = outputs_after[0][:, -1:, :]
|
|
|
|
|
| mixing_weight = self.talk_head[0](torch.cat([hidden_states_before, hidden_states_after], dim=-1))
|
|
|
|
|
| mixed_hidden_states = (1 - mixing_weight) * hidden_states_before + mixing_weight * hidden_states_after
|
|
|
|
|
| logits = self.lm_head(mixed_hidden_states)
|
| return logits
|
|
|
| @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| def forward(
|
| self,
|
| input_ids: torch.LongTensor = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| labels: Optional[torch.LongTensor] = None,
|
| use_cache: Optional[bool] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, CausalLMOutputWithPast]:
|
| r"""
|
| Args:
|
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
|
| Returns:
|
|
|
| Example:
|
|
|
| ```python
|
| >>> from transformers import AutoTokenizer, MistralForCausalLM
|
|
|
| >>> model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
|
| >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
|
|
|
| >>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| >>> inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
| >>> # Generate
|
| >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| ```"""
|
| log_dict = self.log_dict if self.training else self.eval_log_dict
|
|
|
| if self.training and self.kill_after is not None and self.training_steps // self.gradient_accumulation_steps > self.kill_after:
|
| raise ValueError("Killed after")
|
|
|
| if not self.training:
|
| n_ahead_talk_to_restore = self.n_ahead_talk
|
| n_passes_to_restore = self.n_passes
|
| self.n_ahead_talk = 1
|
| self.n_passes = 1
|
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| output_hidden_states = (
|
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| )
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| assert self.cumulative_residual or self.clever_residual or self.skip_residual or self.no_residual
|
| assert not (self.skip_residual and self.use_policy_loss)
|
|
|
| if self.tokenized_thought_prefix is None and self.use_thought_prefix:
|
| self.tokenized_thought_prefix = self.tokenizer(self.thought_prefix, return_tensors="pt", add_special_tokens=False)["input_ids"]
|
|
|
| def apply_head(head, states, detach=False):
|
| if detach:
|
| head_weight = head.weight.detach()
|
| else:
|
| head_weight = head.weight
|
| head_weight = head_weight.to(states.device)
|
| return (head_weight @ states.transpose(-1, -2)).transpose(-1, -2).contiguous()
|
|
|
| def idx_if_sequential(head, idx=0):
|
| if isinstance(head, nn.Sequential) or isinstance(head, nn.ModuleList):
|
| return idx_if_sequential(head[idx], idx=idx)
|
| return head
|
|
|
| def none_repeat_interleave(x, n):
|
| if x is None:
|
| return x
|
| return x.repeat_interleave(n, dim=0)
|
|
|
| if self.n_passes > 1:
|
| input_ids = none_repeat_interleave(input_ids, self.n_passes)
|
| attention_mask = none_repeat_interleave(attention_mask, self.n_passes)
|
| position_ids = none_repeat_interleave(position_ids, self.n_passes)
|
| inputs_embeds = none_repeat_interleave(inputs_embeds, self.n_passes)
|
| labels = none_repeat_interleave(labels, self.n_passes)
|
| if past_key_values is not None:
|
| past_key_values = [none_repeat_interleave(p, self.n_passes) for p in past_key_values]
|
| cur_token_indices = torch.arange(input_ids.shape[1], device=input_ids.device)
|
|
|
| self.tokenizer_has_start_thought_token = True
|
| self.tokenizer_has_end_thought_token = True
|
| if self.start_token_id is None:
|
| self.start_token_id = self.tokenizer.convert_tokens_to_ids("<|startthought|>")
|
| if self.start_token_id == 0:
|
| self.start_token_id = self.tokenizer.bos_token_id
|
| self.tokenizer_has_start_thought_token = False
|
| elif self.use_start_thought_token:
|
|
|
| base_start_id = self.tokenizer.encode(self.initial_start_token, add_special_tokens=False)[0]
|
| if self.initialize_thought_embedding_to_normal:
|
| self.start_embedding.data = torch.zeros_like(self.start_embedding.data)
|
| else:
|
| self.start_embedding.data[0] = self.model.embed_tokens.weight.data[base_start_id].clone().detach() / self.embedding_scale
|
| self.start_embedding.data[1] = torch.log(self.model.embed_tokens.weight.data.std(dim=0) * self.thought_init_std_scale / self.embedding_scale)
|
| if self.end_token_id is None:
|
| self.end_token_id = self.tokenizer.convert_tokens_to_ids("<|endthought|>")
|
| if self.end_token_id == 0:
|
| self.end_token_id = self.tokenizer.eos_token_id
|
| self.tokenizer_has_end_thought_token = False
|
| elif self.use_end_thought_token:
|
|
|
| base_end_id = self.tokenizer.encode(self.initial_end_token, add_special_tokens=False)[0]
|
| if self.initialize_thought_embedding_to_normal:
|
| self.end_embedding.data = torch.zeros_like(self.end_embedding.data)
|
| else:
|
| self.end_embedding.data[0] = self.model.embed_tokens.weight.data[base_end_id].clone().detach() / self.embedding_scale
|
| self.end_embedding.data[1] = torch.log(self.model.embed_tokens.weight.data.std(dim=0) * self.thought_init_std_scale / self.embedding_scale)
|
|
|
| if not self.rm_initialized and (self.n_ahead > 1 or not self.base_original_mode):
|
| self.rm_initialized = True
|
| if not self.use_shallow_talk:
|
| head = self.talk_head[0]
|
| cur_head = head[-1] if isinstance(head, nn.Sequential) else head
|
| talk_input_dim = cur_head.weight.data.shape[1]
|
| talk_output_dim = 1 if self.use_weighted_talk_head else self.lm_head.weight.data.shape[0]
|
| cur_head.weight.data = torch.zeros(talk_output_dim, talk_input_dim, device=cur_head.weight.device, dtype=cur_head.weight.dtype)
|
| else:
|
|
|
| def lambda_transform(cur_head):
|
| if cur_head.weight.data.shape[0] != cur_head.weight.data.shape[1]:
|
| return torch.cat([
|
| torch.eye(
|
| cur_head.weight.data.shape[0],
|
| device=cur_head.weight.device,
|
| dtype=cur_head.weight.dtype
|
| ),
|
| torch.zeros(
|
| cur_head.weight.data.shape[0],
|
| cur_head.weight.data.shape[1] - cur_head.weight.data.shape[0],
|
| device=cur_head.weight.device,
|
| dtype=cur_head.weight.dtype
|
| )], dim=1)
|
| return torch.eye(
|
| cur_head.weight.data.shape[0],
|
| device=cur_head.weight.device,
|
| dtype=cur_head.weight.dtype
|
| )
|
| if isinstance(self.talk_head[0], nn.Sequential):
|
| for cur_head in self.talk_head[0]:
|
|
|
| if hasattr(cur_head, "weight"):
|
| cur_head.weight.data = lambda_transform(cur_head)
|
| else:
|
| self.talk_head[-1].weight.data = lambda_transform(self.talk_head[0])
|
|
|
| loss = None
|
| prev_rm_tokens = None
|
| cur_rm_tokens = None
|
| prev_rm_logits = None
|
| prev_sample_probs = None
|
| did_skip_sampling = None
|
| skip_sampling = None
|
| sample_probs = None
|
| hidden_states = None
|
| logits = None
|
| talk_kl_penalty = None
|
| rm_logits = None
|
| residual_logits = None
|
| probabilities_2d = None
|
| prev_probabilities_2d = None
|
| policy_reward = None
|
| logits_to_output = None
|
| batch_size, seq_len = input_ids.shape
|
| base_input_ids = input_ids.clone()
|
| loss_list = []
|
| dqn_loss_list = []
|
| sampled_token_history = []
|
| sample_probs_history = []
|
| action_loglikelihoods_list = []
|
|
|
| if self.use_end_thought_token or self.use_start_thought_token:
|
| if not self.use_reparam_for_thought_embeddings:
|
| start_embedding = self.start_embedding[0].unsqueeze(0) * self.embedding_scale
|
| end_embedding = self.end_embedding[0].unsqueeze(0) * self.embedding_scale
|
| else:
|
| start_embedding = self.start_embedding * self.embedding_scale
|
| end_embedding = self.end_embedding * self.embedding_scale
|
| base_embeddings = self.model.embed_tokens.weight
|
| if self.train_only_thinking_embedding:
|
| base_embeddings = base_embeddings.detach()
|
|
|
| fwd_iters = 1 if self.original_mode else self.n_ahead + self.n_ahead_talk - 1
|
| for ahead_idx in range(fwd_iters):
|
| past_key_values_length = 0
|
| if past_key_values is not None:
|
| use_legacy_cache = not isinstance(past_key_values, Cache)
|
| if use_legacy_cache:
|
| past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
| past_key_values_length = past_key_values.get_usable_length(seq_len)
|
|
|
| if position_ids is None:
|
| device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| position_ids = torch.arange(
|
| past_key_values_length, seq_len + past_key_values_length, dtype=torch.long, device=device
|
| )
|
| position_ids = position_ids.unsqueeze(0).view(-1, seq_len)
|
| else:
|
| position_ids = position_ids.view(-1, seq_len).long()
|
|
|
| if inputs_embeds is None:
|
| contains_start = self.use_start_thought_token and (input_ids == self.start_token_id).any()
|
| contains_end = self.use_end_thought_token and (input_ids == self.end_token_id).any()
|
| contains_thought = contains_start or contains_end
|
| if contains_thought:
|
| thought_id = self.start_token_id if contains_start else self.end_token_id
|
| cur_thought_embedding = start_embedding if contains_start else end_embedding
|
| if self.use_reparam_for_thought_embeddings:
|
| inputs_embeds = torch.randn(batch_size, seq_len, self.model.config.hidden_size, device=input_ids.device, dtype=cur_thought_embedding.dtype)
|
| inputs_embeds = inputs_embeds.detach() * torch.exp(cur_thought_embedding[1]) + cur_thought_embedding[0]
|
| if contains_start:
|
| sampled_start = inputs_embeds.clone().detach()
|
| if contains_end:
|
| sampled_end = inputs_embeds.clone().detach()
|
| else:
|
| inputs_embeds = cur_thought_embedding.unsqueeze(0).repeat(batch_size, seq_len, 1)
|
| else:
|
| with torch.set_grad_enabled(not self.train_only_thinking_embedding):
|
| inputs_embeds = self.model.embed_tokens(input_ids)
|
|
|
| if self.n_ahead != 1 or self.n_ahead_talk != 1 or self.comparison_mode:
|
| if attention_mask is None:
|
| base_attention_mask = torch.triu(torch.ones(seq_len, seq_len), diagonal=0).to(input_ids.device)
|
| base_attention_mask = base_attention_mask.view(1, 1, seq_len, seq_len)
|
| base_attention_mask = base_attention_mask.repeat(input_ids.shape[0], 1, 1, 1)
|
| attention_mask = base_attention_mask
|
| breakpoint()
|
| elif attention_mask.dim() == 2:
|
| if seq_len + past_key_values_length != attention_mask.shape[-1]:
|
| breakpoint()
|
| attention_mask = torch.cat(
|
| [torch.ones((attention_mask.shape[0], past_key_values_length), dtype=attention_mask.dtype, device=attention_mask.device), attention_mask],
|
| dim=-1
|
| )
|
|
|
| attention_mask = _prepare_4d_causal_attention_mask(
|
| attention_mask,
|
| (batch_size, seq_len),
|
| inputs_embeds,
|
| past_key_values_length,
|
| sliding_window=self.config.sliding_window,
|
| )
|
|
|
| outputs = self.model(
|
|
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
|
|
| prev_hidden_states = hidden_states
|
| hidden_states = outputs[0]
|
| prev_rm_logits = rm_logits
|
| prev_rm_tokens = cur_rm_tokens
|
|
|
| if ahead_idx == 0:
|
| hidden_states_lm = hidden_states
|
| logits = self.lm_head(hidden_states_lm)
|
| base_hidden_states = hidden_states.clone()
|
| initial_loss_logits = logits.clone()
|
| if self.optimize_lm_head_only_at_start or self.optimize_model_only_at_start:
|
| logits = logits.detach()
|
| base_hidden_states = base_hidden_states.detach()
|
| if self.optimize_model_only_at_start:
|
| hidden_states = hidden_states.detach()
|
| base_logits = logits.clone()
|
| else:
|
| talk_hidden_states = hidden_states
|
| if self.merged_lm_and_talk_heads:
|
| assert self.no_residual
|
| residual_logits = self.lm_head(hidden_states)
|
| talk_hidden_states = hidden_states
|
| else:
|
| if ahead_idx > self.n_ahead - 1:
|
| cur_base_hidden = torch.cat([
|
| base_hidden_states[..., ahead_idx - self.n_ahead + 1:, :],
|
| base_hidden_states[..., :ahead_idx - self.n_ahead + 1, :]
|
| ], dim=-2)
|
| else:
|
| cur_base_hidden = base_hidden_states
|
|
|
| if self.use_concat_talk_head:
|
|
|
| head_input_hidden_states = torch.cat([cur_base_hidden, talk_hidden_states], dim=-1)
|
| else:
|
| head_input_hidden_states = talk_hidden_states
|
|
|
| residual_logits = self.talk_head[0](head_input_hidden_states)
|
| if self.use_shallow_talk:
|
| residual_logits = apply_head(self.lm_head, residual_logits, detach=self.optimize_lm_head_only_at_start)
|
| residual_logits = residual_logits.to(logits.device)
|
| if self.use_weighted_talk_head:
|
|
|
| residual_logits = cur_base_hidden * (1 - residual_logits) + talk_hidden_states * residual_logits
|
| residual_logits = apply_head(self.lm_head, residual_logits, detach=self.optimize_lm_head_only_at_start)
|
|
|
| assert sum([self.cumulative_residual, self.clever_residual, self.skip_residual, self.no_residual]) == 1
|
| if self.clever_residual:
|
| if ahead_idx >= self.n_ahead - 1:
|
|
|
| cur_base_logits = torch.cat([
|
| base_logits[..., ahead_idx - self.n_ahead + 1:, :],
|
| base_logits[..., :ahead_idx - self.n_ahead + 1, :]
|
| ], dim=-2)
|
| if self.optimize_lm_head_only_at_start:
|
| cur_base_logits = cur_base_logits.detach()
|
| logits = cur_base_logits + residual_logits
|
| else:
|
| logits += residual_logits / self.n_ahead
|
| elif self.cumulative_residual:
|
| if self.residual_talk_head:
|
| if ahead_idx < self.n_ahead:
|
| logits += residual_logits
|
| else:
|
|
|
| cur_base_logits = torch.cat([
|
| base_logits[..., ahead_idx - self.n_ahead + 1:, :],
|
| base_logits[..., :ahead_idx - self.n_ahead + 1, :]
|
| ], dim=-2)
|
| if self.optimize_lm_head_only_at_start:
|
| cur_base_logits = cur_base_logits.detach()
|
| logits = cur_base_logits + residual_logits
|
| else:
|
| if ahead_idx < self.n_ahead:
|
| logits += residual_logits
|
| else:
|
| logits = residual_logits
|
| elif self.skip_residual:
|
| if ahead_idx >= self.n_ahead:
|
|
|
| cur_base_logits = torch.cat([
|
| base_logits[..., ahead_idx - self.n_ahead + 1:, :],
|
| base_logits[..., :ahead_idx - self.n_ahead + 1, :]
|
| ], dim=-2)
|
| if self.optimize_lm_head_only_at_start:
|
| cur_base_logits = cur_base_logits.detach()
|
| logits = cur_base_logits
|
| elif self.no_residual:
|
| logits = residual_logits
|
| else:
|
| logits = base_logits + residual_logits
|
|
|
| attempted = False
|
| talk_loss_list = []
|
| if self.original_mode or (self.n_ahead == 1) or (self.comparison_mode and ahead_idx == 0):
|
| loss = None
|
| attempted = True
|
|
|
| if labels is not None:
|
| for shift_amount in range(self.n_ahead_talk):
|
|
|
|
|
|
|
| if ahead_idx == 0 and self.optimize_lm_head_only_at_start:
|
| loss_logits = initial_loss_logits
|
| else:
|
| loss_logits = logits
|
| shift_logits = loss_logits[..., shift_amount:-1, :].contiguous()
|
| shift_labels = labels[..., 1 + shift_amount:].contiguous()
|
|
|
| loss_fct = CrossEntropyLoss(reduction="none")
|
| shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| shift_labels = shift_labels.view(-1).clone()
|
|
|
| shift_labels[shift_labels == self.tokenizer.pad_token_id] = -100
|
| shift_labels = shift_labels.to(shift_logits.device)
|
| loss = loss_fct(shift_logits, shift_labels)
|
| if not self.comparison_mode and not (self.optimize_lm_head_only_at_start and (self.n_ahead + self.n_ahead_talk > 2)) or self.original_mode:
|
| loss_list.append(loss)
|
| talk_loss_list.append(nonzero_mean(loss).detach())
|
|
|
| if not attempted or self.comparison_mode:
|
| rm_hidden_states = hidden_states
|
|
|
| rm_logits = apply_head(self.lm_head, rm_hidden_states, detach=self.optimize_lm_head_only_at_start)
|
|
|
|
|
| if self.tokenizer_has_start_thought_token:
|
| rm_logits[..., self.start_token_id] = -1e10
|
| if self.tokenizer_has_end_thought_token:
|
| rm_logits[..., self.end_token_id] = -1e10
|
| probabilities = rm_logits
|
| if probabilities_2d is not None:
|
| prev_probabilities_2d = probabilities_2d.clone()
|
| probabilities_2d = probabilities.view(-1, probabilities.size(-1))
|
|
|
| did_skip_sampling = skip_sampling
|
| skip_sampling = False
|
| if ahead_idx == 0 and self.use_start_thought_token:
|
| override_token = self.start_token_id
|
| elif self.use_thought_prefix and ahead_idx < self.tokenized_thought_prefix.shape[-1]:
|
| override_token = self.tokenized_thought_prefix[..., ahead_idx]
|
| elif ahead_idx == self.n_ahead - 2 and self.use_end_thought_token:
|
| override_token = self.end_token_id
|
| else:
|
| override_token = None
|
| if override_token is not None and self.n_ahead > 1:
|
|
|
| probabilities_2d = torch.zeros_like(probabilities_2d)
|
| probabilities_2d[:, override_token] = 1.0
|
| skip_sampling = True
|
| elif ahead_idx >= self.n_ahead - 1:
|
| if labels is not None:
|
| cur_talk_n = ahead_idx - (self.n_ahead - 1) + 1
|
|
|
| shift_labels = labels[..., cur_talk_n:].contiguous().to(probabilities_2d.device)
|
| padding = torch.full_like(
|
| labels[..., :cur_talk_n],
|
| self.tokenizer.pad_token_id,
|
| dtype=torch.long,
|
| device=shift_labels.device
|
| )
|
| new_rm_tokens = torch.cat(
|
| [shift_labels, padding],
|
| dim=-1
|
| )
|
|
|
| probabilities_2d = F.one_hot(new_rm_tokens, num_classes=self.vocab_size).reshape(-1, self.vocab_size).to(probabilities_2d.dtype)
|
| skip_sampling = True
|
| else:
|
| continue
|
| temperature = self.gumbel_temperature if self.training else 0.001
|
| prev_sample_probs = sample_probs
|
| sample_probs = probabilities_2d
|
| if ahead_idx < self.n_ahead - 1 and not skip_sampling:
|
| probabilities_2d = F.gumbel_softmax(sample_probs, tau=temperature, hard=True, dim=-1)
|
| if self.gumbel_detach:
|
| probabilities_2d = probabilities_2d.detach()
|
| sampled_token_history.append(probabilities_2d.argmax(dim=-1).detach().cpu())
|
|
|
| contains_start = self.use_start_thought_token and (probabilities_2d[..., self.start_token_id].sum() > 0)
|
| contains_end = self.use_end_thought_token and (probabilities_2d[..., self.end_token_id].sum() > 0)
|
| contains_thought = contains_start or contains_end
|
|
|
| if not contains_thought:
|
| with torch.set_grad_enabled(not self.train_only_thinking_embedding):
|
| inputs_embeds = probabilities_2d @ (self.model.embed_tokens.weight.to(probabilities.device).to(probabilities.dtype))
|
| else:
|
| thought_id = self.start_token_id if contains_start else self.end_token_id
|
| cur_thought_embedding = start_embedding if contains_start else end_embedding
|
| if self.use_reparam_for_thought_embeddings:
|
| inputs_embeds = torch.randn(batch_size, seq_len, self.model.config.hidden_size, device=input_ids.device, dtype=cur_thought_embedding.dtype)
|
| inputs_embeds = inputs_embeds * torch.exp(cur_thought_embedding[1]) + cur_thought_embedding[0]
|
| if contains_start:
|
| sampled_start = inputs_embeds.clone().detach()
|
| else:
|
| sampled_end = inputs_embeds.clone().detach()
|
| else:
|
| inputs_embeds = cur_thought_embedding.unsqueeze(0).repeat(batch_size, seq_len, 1)
|
| inputs_embeds = inputs_embeds.view(probabilities.size(0), probabilities.size(1), -1).to(self.model.embed_tokens.weight.dtype)
|
| inputs_embeds = inputs_embeds.view(probabilities.size(0), probabilities.size(1), -1).to(self.model.embed_tokens.weight.dtype)
|
|
|
| if len(attention_mask.shape) == 2:
|
| breakpoint()
|
| else:
|
| original_attention = attention_mask[..., :attention_mask.shape[-2]]
|
| if self.use_upper_triangular:
|
| new_attention = original_attention
|
| else:
|
| original_attention = original_attention == attention_mask.max()
|
|
|
| if not attention_mask.dtype == torch.bfloat16:
|
| new_attention = torch.eye(
|
| seq_len, dtype=attention_mask.dtype, device=attention_mask.device
|
| )
|
| else:
|
| new_attention = torch.eye(
|
| seq_len, dtype=torch.float32, device=attention_mask.device
|
| ).to(attention_mask.dtype)
|
|
|
| new_attention = new_attention.view(1, 1, seq_len, seq_len).repeat(input_ids.shape[0], 1, 1, 1)
|
| new_attention = new_attention * original_attention
|
| new_attention[new_attention == 0] = attention_mask.min()
|
| new_attention[new_attention == 1] = attention_mask.max()
|
| attention_mask = torch.cat([attention_mask, new_attention], dim=-1)
|
| past_key_values = outputs.past_key_values
|
| position_ids = position_ids + 1
|
|
|
| if labels is not None and (self.n_ahead > 1 or not self.base_original_mode):
|
|
|
|
|
|
|
| if ahead_idx == 0 and self.optimize_lm_head_only_at_start:
|
| loss_logits = initial_loss_logits
|
| else:
|
| loss_logits = logits
|
| shift_idx = 1 + max(0, ahead_idx - (self.n_ahead - 1))
|
| shift_logits = loss_logits[..., :-shift_idx, :].contiguous()
|
| shift_labels = labels[..., shift_idx:].contiguous()
|
|
|
| loss_fct = CrossEntropyLoss(reduction="none")
|
| shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| shift_labels = shift_labels.view(-1)
|
|
|
| shift_labels = shift_labels.to(shift_logits.device)
|
|
|
| shift_labels = torch.where(shift_labels == self.tokenizer.pad_token_id, -100, shift_labels)
|
| unreduced_loss = loss_fct(shift_logits, shift_labels)
|
| if torch.any(unreduced_loss != unreduced_loss):
|
| raise ValueError("NaN loss")
|
| unreduced_loss = unreduced_loss.reshape(logits.shape[0], -1)
|
| loss_list.append(unreduced_loss)
|
|
|
|
|
| if self.use_policy_loss and ahead_idx > 0 and (ahead_idx > 1 or not self.use_start_thought_token):
|
|
|
| previous_loss = loss_list[-2]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| if ahead_idx < self.n_ahead - 1:
|
| shift_amount = 0
|
| original_dqn_reward = (previous_loss - unreduced_loss).detach()
|
| if self.first_and_last_mode:
|
| original_dqn_reward = original_dqn_reward * 0.0
|
| else:
|
|
|
|
|
| shift_amount = max(0, ahead_idx - (self.n_ahead - 1))
|
|
|
|
|
|
|
|
|
| cur_policy_shift_logits = initial_loss_logits[..., shift_amount:-1, :].contiguous().detach()
|
| cur_policy_shift_labels = labels[..., 1 + shift_amount:].contiguous()
|
|
|
| cur_policy_loss_fct = CrossEntropyLoss(reduction="none")
|
| cur_policy_shift_logits = cur_policy_shift_logits.view(-1, self.config.vocab_size)
|
| cur_policy_shift_labels = cur_policy_shift_labels.view(-1).clone()
|
|
|
| cur_policy_shift_labels[cur_policy_shift_labels == self.tokenizer.pad_token_id] = -100
|
| cur_policy_shift_labels = cur_policy_shift_labels.to(cur_policy_shift_labels.device)
|
| cur_policy_reward_base_loss = loss_fct(
|
| cur_policy_shift_logits, cur_policy_shift_labels.to(cur_policy_shift_logits.device)
|
| ).reshape(logits.shape[0], -1)
|
| original_dqn_reward = cur_policy_reward_base_loss.detach() - unreduced_loss
|
|
|
| if not did_skip_sampling:
|
| nonzero_indices = prev_probabilities_2d.nonzero()
|
| action_loglikelihoods = F.log_softmax(prev_sample_probs / self.reinforce_temperature, dim=-1)[nonzero_indices[:, 0], nonzero_indices[:, 1]]
|
| action_loglikelihoods_2d = action_loglikelihoods.reshape(batch_size, -1)[:, :-1 - shift_amount]
|
| action_loglikelihoods_list.append(action_loglikelihoods_2d)
|
| if policy_reward is None:
|
| policy_reward = original_dqn_reward[:, :-(self.n_ahead_talk - shift_amount)]
|
| else:
|
| if self.n_ahead_talk > shift_amount:
|
| added_reward = original_dqn_reward[:, :-(self.n_ahead_talk - shift_amount)]
|
| else:
|
| added_reward = original_dqn_reward
|
| policy_reward += added_reward
|
|
|
| if self.use_policy_loss and ahead_idx == self.n_ahead + self.n_ahead_talk - 2:
|
|
|
| if self.use_reparam_for_thought_embeddings and (self.use_start_thought_token or self.use_end_thought_token):
|
|
|
|
|
|
|
| if self.use_start_thought_token:
|
| exp_start_std = torch.exp(start_embedding[1])
|
| start_loglikelihood = -0.5 * (sampled_start.detach() - start_embedding[0]) ** 2 / exp_start_std ** 2 - start_embedding[1] - 0.5 * math.log(2 * math.pi)
|
| start_loglikelihood = start_loglikelihood.mean(dim=-1)
|
| if self.use_end_thought_token:
|
| exp_end_std = torch.exp(end_embedding[1])
|
| end_loglikelihood = -0.5 * (sampled_end.detach() - end_embedding[0]) ** 2 / exp_end_std ** 2 - end_embedding[1] - 0.5 * math.log(2 * math.pi)
|
| end_loglikelihood = end_loglikelihood.mean(dim=-1)
|
|
|
| if self.use_end_thought_token and self.use_policy_loss_for_end_thought:
|
| action_loglikelihoods_list.append(end_loglikelihood)
|
| if self.use_start_thought_token:
|
| action_loglikelihoods_list.append(start_loglikelihood)
|
|
|
| if ahead_idx == self.n_ahead + self.n_ahead_talk - 2 and self.eval_mode:
|
| with torch.no_grad():
|
|
|
| filtered_tokens = input_ids[:, :policy_reward.shape[-1]].cpu().detach().numpy().flatten()
|
| filtered_tokens_mask = filtered_tokens != self.tokenizer.pad_token_id
|
| filtered_tokens = filtered_tokens[filtered_tokens_mask]
|
| filtered_rewards = policy_reward.float().cpu().detach().numpy()[:, :seq_len - self.n_ahead_talk].flatten()
|
| filtered_rewards = filtered_rewards[filtered_tokens_mask]
|
|
|
| abs_reward_list = np.abs(policy_reward.float().cpu().detach().numpy()[:, :seq_len - self.n_ahead_talk].flatten())
|
| abs_reward_list = abs_reward_list[filtered_tokens_mask]
|
| medium_quantile = np.quantile(abs_reward_list, 0.5)
|
| upper_quantile = np.quantile(abs_reward_list, 0.95)
|
|
|
| save_tokens_with_rewards_to_pdf(
|
| filtered_tokens,
|
| [0] + filtered_rewards.tolist(),
|
| self.tokenizer,
|
| output_file=f"texts/rewards_talk_{self.n_ahead_talk}_{self.training_steps}.pdf",
|
| eps=medium_quantile,
|
| eps2=upper_quantile,
|
| )
|
|
|
| def plot_kde(data, losses):
|
| sns.set(style="whitegrid")
|
|
|
| sns.kdeplot(data, fill=True)
|
|
|
| plt.title("KDE Plot")
|
| plt.xlabel("Value")
|
| plt.ylabel("Density")
|
|
|
| plt.savefig(f"texts/kde_talk_{self.n_ahead_talk}_{self.training_steps}.pdf")
|
|
|
| plt.close()
|
|
|
|
|
| base_colors = sns.color_palette("light:#5A9", n_colors=256)
|
| base_cmap = LinearSegmentedColormap.from_list("log_light", base_colors)
|
| log_norm = LogNorm(vmin=1e-3, vmax=10)
|
|
|
| sns.kdeplot(x=data, y=losses, fill=True, levels=20, norm=log_norm, cut=0, linewidths=0)
|
|
|
| plt.xlim(-1, 1)
|
| plt.ylim(0, 25)
|
| plt.savefig(f"texts/jointer_talk_{self.n_ahead_talk}_{self.training_steps}.pdf")
|
| plt.close()
|
|
|
| self.all_rewards.extend(filtered_rewards)
|
| self.all_unreduced_losses.extend(unreduced_loss[:, :-1].flatten()[filtered_tokens_mask].float().flatten().cpu().detach().numpy())
|
| plot_kde(self.all_rewards, self.all_unreduced_losses)
|
|
|
| for action_loglikelihoods_2d in action_loglikelihoods_list:
|
| train_policy_reward = policy_reward
|
|
|
|
|
| if self.trice_mode and self.n_passes > 1:
|
| batched_policy_reward = train_policy_reward.reshape(-1, self.n_passes, train_policy_reward.shape[-1])
|
|
|
| train_policy_reward = batched_policy_reward - batched_policy_reward.mean(dim=1, keepdim=True)
|
| train_policy_reward = train_policy_reward.reshape(-1, train_policy_reward.shape[-1])
|
|
|
| if self.subtract_mean_reward:
|
| train_policy_reward = train_policy_reward - train_policy_reward.mean()
|
| if self.remove_negative_rewards:
|
| fixed_policy_reward = train_policy_reward.detach().clamp(min=0)
|
| else:
|
| fixed_policy_reward = train_policy_reward.detach()
|
| actor_loss = -fixed_policy_reward * action_loglikelihoods_2d[:, :policy_reward.shape[-1]].to(policy_reward.device)
|
| if action_loglikelihoods_2d.mean() < -1e4 and not self.use_policy_loss_just_for_thoughts:
|
|
|
| break
|
| dqn_loss_list.append(actor_loss.mean())
|
|
|
| if loss_list:
|
| if self.first_and_last_mode:
|
| loss = sum(
|
| self.loss_mean(loss_list[-(i + 1)]) for i in range(self.n_ahead_talk)
|
| ) * (1 - self.original_loss_weight) / self.n_ahead_talk
|
| loss = loss + self.loss_mean(loss_list[0]) * self.original_loss_weight
|
|
|
|
|
| for i in range(1, len(loss_list) - self.n_ahead_talk):
|
| loss_list[i] = loss_list[i] * math.nan
|
| elif self.first_only:
|
| loss = self.loss_mean(loss_list[0])
|
| elif self.final_only_mode:
|
| loss = sum(
|
| self.loss_mean(loss_list[-i]) for i in range(1, self.n_ahead_talk + 1)
|
| ) / self.n_ahead_talk
|
| else:
|
| loss = None
|
| for i in range(len(loss_list)):
|
| cur_loss = self.loss_mean(loss_list[i])
|
| if loss is not None:
|
| loss = loss + cur_loss.to(loss.device)
|
| else:
|
| loss = cur_loss
|
| loss = loss / len(loss_list)
|
|
|
| loss = loss * self.base_loss_beta
|
|
|
| if dqn_loss_list:
|
| dqn_loss = sum(dqn_loss_list) / len(dqn_loss_list)
|
| if self.include_policy_loss:
|
| if loss is not None:
|
| loss += dqn_loss * self.policy_loss_beta
|
| else:
|
| loss = dqn_loss * self.policy_loss_beta
|
|
|
| if not return_dict:
|
| output = (logits,) + outputs[1:]
|
| return (loss,) + output if loss is not None else output
|
|
|
| base_log_dict = {
|
| f"loss_{i}": nonzero_mean(loss_list[i]) for i in range(len(loss_list))
|
| }
|
|
|
| if loss is not None:
|
| base_log_dict["loss_train"] = loss.item()
|
|
|
| for loss_key, loss_val in base_log_dict.items():
|
| log_dict[loss_key] += loss_val / self.n_tokens_print
|
|
|
| if self.use_policy_loss and policy_reward is not None:
|
| log_dict["policy_loss"] += dqn_loss / self.n_tokens_print
|
| log_dict["policy_reward"] += policy_reward.mean() / self.n_tokens_print
|
|
|
| if not loss_list:
|
| if loss is not None:
|
| log_dict["loss_0"] += loss / self.n_tokens_print
|
| else:
|
| log_dict["loss_final"] += nonzero_mean(loss_list[-1]) / self.n_tokens_print
|
| log_dict["loss_talk"] += sum(nonzero_mean(cur_loss_item) for cur_loss_item in loss_list[-self.n_ahead_talk:]) / self.n_ahead_talk / self.n_tokens_print
|
|
|
|
|
| if loss_list:
|
| for i in range(len(loss_list)):
|
| talk_idx = min(max(i - (self.n_ahead - 1), 0), len(talk_loss_list) - 1)
|
| if not talk_loss_list:
|
| cur_talk_loss = nonzero_mean(loss_list[0])
|
| else:
|
| cur_talk_loss = talk_loss_list[talk_idx]
|
| log_dict[f"rel_loss_{i}"] += (nonzero_mean(loss_list[i]) - cur_talk_loss) / self.n_tokens_print
|
| if self.training:
|
| self.training_steps += 1
|
| try:
|
|
|
| if self.wandb_enabled:
|
| if self.training_steps % (self.n_tokens_print) == 0 or not self.training:
|
| if not self.training:
|
| new_log_dict = {}
|
| for key in list(log_dict.keys()):
|
| new_log_dict["eval_" + key] = log_dict[key]
|
| log_dict = new_log_dict
|
| log_dict["training_steps"] = self.training_steps
|
| log_dict["batch_size"] = batch_size
|
| log_dict["example_steps"] = self.training_steps * batch_size * self.gradient_accumulation_steps
|
| if self.n_ahead > 1:
|
| log_dict["compute_steps"] = self.training_steps * batch_size * (self.n_ahead + self.n_ahead_talk - 1) * self.gradient_accumulation_steps
|
| else:
|
| log_dict["compute_steps"] = self.training_steps * batch_size * self.gradient_accumulation_steps
|
|
|
| for key in list(log_dict.keys()):
|
| if log_dict[key] != log_dict[key]:
|
| del log_dict[key]
|
| if self.training:
|
| wandb.log(log_dict)
|
| if self.training:
|
| self.log_dict = defaultdict(int)
|
| else:
|
| self.eval_log_dict = defaultdict(int)
|
| except Exception as e:
|
| pass
|
|
|
| if not self.training:
|
| self.n_ahead_talk = n_ahead_talk_to_restore
|
| self.n_passes = n_passes_to_restore
|
| return CausalLMOutputWithPast(
|
| loss=loss if loss is not None else None,
|
| logits=(rm_logits if self.n_ahead > 1 else logits) if not self.output_logits_at_the_end else logits,
|
| past_key_values=outputs.past_key_values,
|
| hidden_states=outputs.hidden_states,
|
| attentions=outputs.attentions,
|
| )
|
|
|
| @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| def forward_quiet(
|
| self,
|
| input_ids: torch.LongTensor = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| labels: Optional[torch.LongTensor] = None,
|
| use_cache: Optional[bool] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, CausalLMOutputWithPast]:
|
| r"""
|
| Args:
|
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
|
| Returns:
|
|
|
| Example:
|
|
|
| ```python
|
| >>> from transformers import AutoTokenizer, QuietForCausalLM
|
|
|
| >>> model = QuietForCausalLM.from_pretrained("quietai/Quiet-7B-v0.1")
|
| >>> tokenizer = AutoTokenizer.from_pretrained("quietai/Quiet-7B-v0.1")
|
|
|
| >>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| >>> inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
| >>> # Generate
|
| >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| ```"""
|
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| output_hidden_states = (
|
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| )
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
|
| outputs = self.model(
|
| input_ids,
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=True,
|
| )
|
| hidden_states = outputs.last_hidden_state
|
| logits = self.lm_head(hidden_states)
|
|
|
| thought_ids, thought_embeddings = self._generate_thoughts(hidden_states, max_length=self.config.thought_length)
|
| thought_hidden_states = self.model(inputs_embeds=thought_embeddings).last_hidden_state
|
|
|
|
|
| thought_logits = self.lm_head(thought_hidden_states)
|
|
|
|
|
| mixed_logits = logits.unsqueeze(1) + self.mixing_head(thought_logits)
|
| mixed_logits = mixed_logits.view(-1, mixed_logits.size(-1))
|
|
|
| loss = None
|
| if labels is not None:
|
|
|
| shift_logits = mixed_logits[..., :-1, :].contiguous()
|
| shift_labels = labels[..., 1:].contiguous()
|
|
|
| loss_fct = CrossEntropyLoss()
|
| loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
|
|
| if self.use_policy_loss:
|
| rewards = loss.detach().unsqueeze(1).repeat(1, self.max_thoughts)
|
| if self.remove_negative_rewards:
|
| rewards = torch.clamp(rewards, min=0)
|
| policy_loss = self.calculate_policy_loss(thought_ids, rewards)
|
| loss = loss + policy_loss
|
| else:
|
| loss = None
|
|
|
| if not return_dict:
|
| output = (mixed_logits,) + outputs[1:]
|
| return ((loss,) + output) if loss is not None else output
|
|
|
| return CausalLMOutputWithPast(
|
| loss=loss if loss is not None else None,
|
| logits=logits,
|
| past_key_values=outputs.past_key_values,
|
| hidden_states=outputs.hidden_states,
|
| attentions=outputs.attentions,
|
| )
|
| @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| def forward_legacy(
|
| self,
|
| input_ids: torch.LongTensor = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| labels: Optional[torch.LongTensor] = None,
|
| use_cache: Optional[bool] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| cache_position: Optional[torch.LongTensor] = None,
|
| ) -> Union[Tuple, CausalLMOutputWithPast]:
|
| r"""
|
| Args:
|
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
|
| Returns:
|
|
|
| Example:
|
|
|
| ```python
|
| >>> from transformers import AutoTokenizer, MistralForCausalLM
|
|
|
| >>> model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
|
| >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
|
|
|
| >>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| >>> inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
| >>> # Generate
|
| >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| ```"""
|
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| output_hidden_states = (
|
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| )
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
|
| outputs = self.model(
|
| input_ids=input_ids,
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| cache_position=cache_position,
|
| )
|
|
|
| hidden_states = outputs[0]
|
| logits = self.lm_head(hidden_states)
|
| logits = logits.float()
|
|
|
| loss = None
|
| if labels is not None:
|
|
|
| shift_logits = logits[..., :-1, :].contiguous()
|
| shift_labels = labels[..., 1:].contiguous()
|
|
|
| shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| shift_labels = shift_labels.view(-1)
|
|
|
| shift_labels = shift_labels.to(shift_logits.device)
|
| loss_fct = CrossEntropyLoss()
|
| loss = loss_fct(shift_logits, shift_labels)
|
|
|
| if not return_dict:
|
| output = (logits,) + outputs[1:]
|
| return (loss,) + output if loss is not None else output
|
|
|
| return CausalLMOutputWithPast(
|
| loss=loss,
|
| logits=logits,
|
| past_key_values=outputs.past_key_values,
|
| hidden_states=outputs.hidden_states,
|
| attentions=outputs.attentions,
|
| )
|
|
|
| def self_extend_forward(
|
| self,
|
| hidden_states: torch.Tensor,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_value: Optional[Cache] = None,
|
| output_attentions: bool = False,
|
| use_cache: bool = False,
|
| padding_mask: Optional[torch.LongTensor] = None,
|
| group_size_1: Optional[float] = 8,
|
| group_size_2: Optional[float] = 2048,
|
| **kwargs,
|
| ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| if "padding_mask" in kwargs:
|
| warnings.warn(
|
| "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| )
|
| bsz, q_len, _ = hidden_states.size()
|
|
|
| query_states = self.q_proj(hidden_states)
|
| key_states = self.k_proj(hidden_states)
|
| value_states = self.v_proj(hidden_states)
|
|
|
| query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
|
|
| kv_seq_len = key_states.shape[-2]
|
| if past_key_value is not None:
|
| if self.layer_idx is None:
|
| raise ValueError(
|
| f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| "with a layer index."
|
| )
|
| kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
|
|
|
|
| if past_key_value is not None:
|
| cache_kwargs = {"sin": sin, "cos": cos}
|
| key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
|
|
| query_position_ids = position_ids
|
| key_position_ids = torch.arange(kv_seq_len, dtype=position_ids.dtype).to(query_position_ids.device).view(bsz, kv_seq_len)
|
|
|
|
|
| neighbor_query_states, _ = apply_rotary_pos_emb(query_states, None, cos, sin, query_position_ids)
|
| _, neighbor_key_states = apply_rotary_pos_emb(None, key_states, cos, sin, key_position_ids)
|
| _re_group_size_2 = 0 if position_ids.max() < group_size_2 else group_size_2
|
| group_query_states, _ = apply_grouped_rotary_pos_emb(query_states, None, cos, sin, query_position_ids, g_size_1=group_size_1, g_size_2=_re_group_size_2)
|
| _, group_key_states = apply_grouped_rotary_pos_emb(None, key_states, cos, sin, key_position_ids, g_size_1=group_size_1, g_size_2=_re_group_size_2)
|
|
|
|
|
| group_key_states = repeat_kv(group_key_states, self.num_key_value_groups)
|
| neighbor_key_states = repeat_kv(neighbor_key_states, self.num_key_value_groups)
|
| value_states = repeat_kv(value_states, self.num_key_value_groups)
|
|
|
| neighbor_attn_weights = torch.matmul(neighbor_query_states, neighbor_key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| group_attn_weights = torch.matmul(group_query_states, group_key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
|
|
|
|
| if group_attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
| raise ValueError(
|
| f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
| f" {group_attn_weights.size()}"
|
| )
|
|
|
| if attention_mask is not None:
|
| if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| raise ValueError(
|
| f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| )
|
| group_attn_weights = group_attn_weights + attention_mask
|
| neighbor_attn_weights = neighbor_attn_weights + attention_mask
|
|
|
|
|
| if q_len == 1:
|
| neighbor_attention_mask = torch.zeros((q_len, kv_seq_len), device=neighbor_attn_weights.device)
|
| neighbor_attention_mask[:, -group_size_2:] = 1
|
| elif q_len == kv_seq_len:
|
| neighbor_attention_mask = torch.ones((q_len, kv_seq_len), device=neighbor_attn_weights.device)
|
| neighbor_attention_mask = torch.tril(neighbor_attention_mask)
|
| if q_len-group_size_2 > 0:
|
| group_attention_mask = torch.tril(torch.ones((q_len-group_size_2, kv_seq_len-group_size_2), device=group_attn_weights.device))
|
| neighbor_attention_mask[group_size_2:, :-group_size_2] -= group_attention_mask
|
|
|
| else:
|
| raise ValueError("q_len should be 1 or seq_len.")
|
|
|
|
|
| neighbor_attention_mask = neighbor_attention_mask.bool()
|
| attn_weights = torch.where(neighbor_attention_mask, neighbor_attn_weights, group_attn_weights)
|
| attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| attn_output = torch.matmul(attn_weights, value_states)
|
|
|
| if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| raise ValueError(
|
| f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| f" {attn_output.size()}"
|
| )
|
|
|
| attn_output = attn_output.transpose(1, 2).contiguous()
|
| attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
|
|
| attn_output = self.o_proj(attn_output)
|
|
|
| if not output_attentions:
|
| attn_weights = None
|
|
|
| return attn_output, attn_weights, past_key_value
|
|
|
| @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| def forwardStar(
|
| self,
|
| input_ids: torch.LongTensor = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| labels: Optional[torch.LongTensor] = None,
|
| use_cache: Optional[bool] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, CausalLMOutputWithPast]:
|
| r"""
|
| Args:
|
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
|
| Returns:
|
|
|
| Example:
|
|
|
| ```python
|
| >>> from transformers import AutoTokenizer, MistralForCausalLM
|
|
|
| >>> model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
|
| >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
|
|
|
| >>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| >>> inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
| >>> # Generate
|
| >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| ```"""
|
| log_dict = self.log_dict if self.training else self.eval_log_dict
|
|
|
| if self.training and self.kill_after is not None and self.training_steps // self.gradient_accumulation_steps > self.kill_after:
|
| raise ValueError("Killed after")
|
|
|
| if not self.training:
|
| n_ahead_talk_to_restore = self.n_ahead_talk
|
| n_passes_to_restore = self.n_passes
|
| self.n_ahead_talk = 1
|
| self.n_passes = 1
|
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| output_hidden_states = (
|
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| )
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| assert self.cumulative_residual or self.clever_residual or self.skip_residual or self.no_residual
|
| assert not (self.skip_residual and self.use_policy_loss)
|
|
|
| if self.tokenized_thought_prefix is None and self.use_thought_prefix:
|
| self.tokenized_thought_prefix = self.tokenizer(self.thought_prefix, return_tensors="pt", add_special_tokens=False)["input_ids"]
|
|
|
| def apply_head(head, states, detach=False):
|
| if detach:
|
| head_weight = head.weight.detach()
|
| else:
|
| head_weight = head.weight
|
| head_weight = head_weight.to(states.device)
|
| return (head_weight @ states.transpose(-1, -2)).transpose(-1, -2).contiguous()
|
|
|
| def idx_if_sequential(head, idx=0):
|
| if isinstance(head, nn.Sequential) or isinstance(head, nn.ModuleList):
|
| return idx_if_sequential(head[idx], idx=idx)
|
| return head
|
|
|
| def none_repeat_interleave(x, n):
|
| if x is None:
|
| return x
|
| return x.repeat_interleave(n, dim=0)
|
|
|
| if self.n_passes > 1:
|
| input_ids = none_repeat_interleave(input_ids, self.n_passes)
|
| attention_mask = none_repeat_interleave(attention_mask, self.n_passes)
|
| position_ids = none_repeat_interleave(position_ids, self.n_passes)
|
| inputs_embeds = none_repeat_interleave(inputs_embeds, self.n_passes)
|
| labels = none_repeat_interleave(labels, self.n_passes)
|
| if past_key_values is not None:
|
| past_key_values = [none_repeat_interleave(p, self.n_passes) for p in past_key_values]
|
| cur_token_indices = torch.arange(input_ids.shape[1], device=input_ids.device)
|
|
|
| self.tokenizer_has_start_thought_token = True
|
| self.tokenizer_has_end_thought_token = True
|
| if self.start_token_id is None:
|
| self.start_token_id = self.tokenizer.convert_tokens_to_ids("<|startthought|>")
|
| if self.start_token_id == 0:
|
| self.start_token_id = self.tokenizer.bos_token_id
|
| self.tokenizer_has_start_thought_token = False
|
| elif self.use_start_thought_token:
|
|
|
| base_start_id = self.tokenizer.encode(self.initial_start_token, add_special_tokens=False)[0]
|
| if self.initialize_thought_embedding_to_normal:
|
| self.start_embedding.data = torch.zeros_like(self.start_embedding.data)
|
| else:
|
| self.start_embedding.data[0] = self.model.embed_tokens.weight.data[base_start_id].clone().detach() / self.embedding_scale
|
| self.start_embedding.data[1] = torch.log(self.model.embed_tokens.weight.data.std(dim=0) * self.thought_init_std_scale / self.embedding_scale)
|
| if self.end_token_id is None:
|
| self.end_token_id = self.tokenizer.convert_tokens_to_ids("<|endthought|>")
|
| if self.end_token_id == 0:
|
| self.end_token_id = self.tokenizer.eos_token_id
|
| self.tokenizer_has_end_thought_token = False
|
| elif self.use_end_thought_token:
|
|
|
| base_end_id = self.tokenizer.encode(self.initial_end_token, add_special_tokens=False)[0]
|
| if self.initialize_thought_embedding_to_normal:
|
| self.end_embedding.data = torch.zeros_like(self.end_embedding.data)
|
| else:
|
| self.end_embedding.data[0] = self.model.embed_tokens.weight.data[base_end_id].clone().detach() / self.embedding_scale
|
| self.end_embedding.data[1] = torch.log(self.model.embed_tokens.weight.data.std(dim=0) * self.thought_init_std_scale / self.embedding_scale)
|
|
|
| if not self.rm_initialized and (self.n_ahead > 1 or not self.base_original_mode):
|
| self.rm_initialized = True
|
| if not self.use_shallow_talk:
|
| head = self.talk_head[0]
|
| cur_head = head[-1] if isinstance(head, nn.Sequential) else head
|
| talk_input_dim = cur_head.weight.data.shape[1]
|
| talk_output_dim = 1 if self.use_weighted_talk_head else self.lm_head.weight.data.shape[0]
|
| cur_head.weight.data = torch.zeros(talk_output_dim, talk_input_dim, device=cur_head.weight.device, dtype=cur_head.weight.dtype)
|
| else:
|
|
|
| def lambda_transform(cur_head):
|
| if cur_head.weight.data.shape[0] != cur_head.weight.data.shape[1]:
|
| return torch.cat([
|
| torch.eye(
|
| cur_head.weight.data.shape[0],
|
| device=cur_head.weight.device,
|
| dtype=cur_head.weight.dtype
|
| ),
|
| torch.zeros(
|
| cur_head.weight.data.shape[0],
|
| cur_head.weight.data.shape[1] - cur_head.weight.data.shape[0],
|
| device=cur_head.weight.device,
|
| dtype=cur_head.weight.dtype
|
| )], dim=1)
|
| return torch.eye(
|
| cur_head.weight.data.shape[0],
|
| device=cur_head.weight.device,
|
| dtype=cur_head.weight.dtype
|
| )
|
| if isinstance(self.talk_head[0], nn.Sequential):
|
| for cur_head in self.talk_head[0]:
|
|
|
| if hasattr(cur_head, "weight"):
|
| cur_head.weight.data = lambda_transform(cur_head)
|
| else:
|
| self.talk_head[-1].weight.data = lambda_transform(self.talk_head[0])
|
|
|
| loss = None
|
| prev_rm_tokens = None
|
| cur_rm_tokens = None
|
| prev_rm_logits = None
|
| prev_sample_probs = None
|
| did_skip_sampling = None
|
| skip_sampling = None
|
| sample_probs = None
|
| hidden_states = None
|
| logits = None
|
| talk_kl_penalty = None
|
| rm_logits = None
|
| residual_logits = None
|
| probabilities_2d = None
|
| prev_probabilities_2d = None
|
| policy_reward = None
|
| logits_to_output = None
|
| batch_size, seq_len = input_ids.shape
|
| base_input_ids = input_ids.clone()
|
| loss_list = []
|
| dqn_loss_list = []
|
| sampled_token_history = []
|
| sample_probs_history = []
|
| action_loglikelihoods_list = []
|
|
|
| if self.use_end_thought_token or self.use_start_thought_token:
|
| if not self.use_reparam_for_thought_embeddings:
|
| start_embedding = self.start_embedding[0].unsqueeze(0) * self.embedding_scale
|
| end_embedding = self.end_embedding[0].unsqueeze(0) * self.embedding_scale
|
| else:
|
| start_embedding = self.start_embedding * self.embedding_scale
|
| end_embedding = self.end_embedding * self.embedding_scale
|
| base_embeddings = self.model.embed_tokens.weight
|
| if self.train_only_thinking_embedding:
|
| base_embeddings = base_embeddings.detach()
|
|
|
| fwd_iters = 1 if self.original_mode else self.n_ahead + self.n_ahead_talk - 1
|
| for ahead_idx in range(fwd_iters):
|
| past_key_values_length = 0
|
| if past_key_values is not None:
|
| use_legacy_cache = not isinstance(past_key_values, Cache)
|
| if use_legacy_cache:
|
| past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
| past_key_values_length = past_key_values.get_usable_length(seq_len)
|
|
|
| if position_ids is None:
|
| device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| position_ids = torch.arange(
|
| past_key_values_length, seq_len + past_key_values_length, dtype=torch.long, device=device
|
| )
|
| position_ids = position_ids.unsqueeze(0).view(-1, seq_len)
|
| else:
|
| position_ids = position_ids.view(-1, seq_len).long()
|
|
|
| if inputs_embeds is None:
|
| contains_start = self.use_start_thought_token and (input_ids == self.start_token_id).any()
|
| contains_end = self.use_end_thought_token and (input_ids == self.end_token_id).any()
|
| contains_thought = contains_start or contains_end
|
| if contains_thought:
|
| thought_id = self.start_token_id if contains_start else self.end_token_id
|
| cur_thought_embedding = start_embedding if contains_start else end_embedding
|
| if self.use_reparam_for_thought_embeddings:
|
| inputs_embeds = torch.randn(batch_size, seq_len, self.model.config.hidden_size, device=input_ids.device, dtype=cur_thought_embedding.dtype)
|
| inputs_embeds = inputs_embeds.detach() * torch.exp(cur_thought_embedding[1]) + cur_thought_embedding[0]
|
| if contains_start:
|
| sampled_start = inputs_embeds.clone().detach()
|
| if contains_end:
|
| sampled_end = inputs_embeds.clone().detach()
|
| else:
|
| inputs_embeds = cur_thought_embedding.unsqueeze(0).repeat(batch_size, seq_len, 1)
|
| else:
|
| with torch.set_grad_enabled(not self.train_only_thinking_embedding):
|
| inputs_embeds = self.model.embed_tokens(input_ids)
|
|
|
| if self.n_ahead != 1 or self.n_ahead_talk != 1 or self.comparison_mode:
|
| if attention_mask is None:
|
| base_attention_mask = torch.triu(torch.ones(seq_len, seq_len), diagonal=0).to(input_ids.device)
|
| base_attention_mask = base_attention_mask.view(1, 1, seq_len, seq_len)
|
| base_attention_mask = base_attention_mask.repeat(input_ids.shape[0], 1, 1, 1)
|
| attention_mask = base_attention_mask
|
| breakpoint()
|
| elif attention_mask.dim() == 2:
|
| if seq_len + past_key_values_length != attention_mask.shape[-1]:
|
| breakpoint()
|
| attention_mask = torch.cat(
|
| [torch.ones((attention_mask.shape[0], past_key_values_length), dtype=attention_mask.dtype, device=attention_mask.device), attention_mask],
|
| dim=-1
|
| )
|
|
|
| attention_mask = _prepare_4d_causal_attention_mask(
|
| attention_mask,
|
| (batch_size, seq_len),
|
| inputs_embeds,
|
| past_key_values_length,
|
| sliding_window=self.config.sliding_window,
|
| )
|
|
|
| outputs = self.model(
|
|
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
|
|
| prev_hidden_states = hidden_states
|
| hidden_states = outputs[0]
|
| prev_rm_logits = rm_logits
|
| prev_rm_tokens = cur_rm_tokens
|
|
|
| if ahead_idx == 0:
|
| hidden_states_lm = hidden_states
|
| logits = self.lm_head(hidden_states_lm)
|
| base_hidden_states = hidden_states.clone()
|
| initial_loss_logits = logits.clone()
|
| if self.optimize_lm_head_only_at_start or self.optimize_model_only_at_start:
|
| logits = logits.detach()
|
| base_hidden_states = base_hidden_states.detach()
|
| if self.optimize_model_only_at_start:
|
| hidden_states = hidden_states.detach()
|
| base_logits = logits.clone()
|
| else:
|
| talk_hidden_states = hidden_states
|
| if self.merged_lm_and_talk_heads:
|
| assert self.no_residual
|
| residual_logits = self.lm_head(hidden_states)
|
| talk_hidden_states = hidden_states
|
| else:
|
| if ahead_idx > self.n_ahead - 1:
|
| cur_base_hidden = torch.cat([
|
| base_hidden_states[..., ahead_idx - self.n_ahead + 1:, :],
|
| base_hidden_states[..., :ahead_idx - self.n_ahead + 1, :]
|
| ], dim=-2)
|
| else:
|
| cur_base_hidden = base_hidden_states
|
|
|
| if self.use_concat_talk_head:
|
|
|
| head_input_hidden_states = torch.cat([cur_base_hidden, talk_hidden_states], dim=-1)
|
| else:
|
| head_input_hidden_states = talk_hidden_states
|
|
|
| residual_logits = self.talk_head[0](head_input_hidden_states)
|
| if self.use_shallow_talk:
|
| residual_logits = apply_head(self.lm_head, residual_logits, detach=self.optimize_lm_head_only_at_start)
|
| residual_logits = residual_logits.to(logits.device)
|
| if self.use_weighted_talk_head:
|
|
|
| residual_logits = cur_base_hidden * (1 - residual_logits) + talk_hidden_states * residual_logits
|
| residual_logits = apply_head(self.lm_head, residual_logits, detach=self.optimize_lm_head_only_at_start)
|
|
|
| assert sum([self.cumulative_residual, self.clever_residual, self.skip_residual, self.no_residual]) == 1
|
| if self.clever_residual:
|
| if ahead_idx >= self.n_ahead - 1:
|
|
|
| cur_base_logits = torch.cat([
|
| base_logits[..., ahead_idx - self.n_ahead + 1:, :],
|
| base_logits[..., :ahead_idx - self.n_ahead + 1, :]
|
| ], dim=-2)
|
| if self.optimize_lm_head_only_at_start:
|
| cur_base_logits = cur_base_logits.detach()
|
| logits = cur_base_logits + residual_logits
|
| else:
|
| logits += residual_logits / self.n_ahead
|
| elif self.cumulative_residual:
|
| if self.residual_talk_head:
|
| if ahead_idx < self.n_ahead:
|
| logits += residual_logits
|
| else:
|
|
|
| cur_base_logits = torch.cat([
|
| base_logits[..., ahead_idx - self.n_ahead + 1:, :],
|
| base_logits[..., :ahead_idx - self.n_ahead + 1, :]
|
| ], dim=-2)
|
| if self.optimize_lm_head_only_at_start:
|
| cur_base_logits = cur_base_logits.detach()
|
| logits = cur_base_logits + residual_logits
|
| else:
|
| if ahead_idx < self.n_ahead:
|
| logits += residual_logits
|
| else:
|
| logits = residual_logits
|
| elif self.skip_residual:
|
| if ahead_idx >= self.n_ahead:
|
|
|
| cur_base_logits = torch.cat([
|
| base_logits[..., ahead_idx - self.n_ahead + 1:, :],
|
| base_logits[..., :ahead_idx - self.n_ahead + 1, :]
|
| ], dim=-2)
|
| if self.optimize_lm_head_only_at_start:
|
| cur_base_logits = cur_base_logits.detach()
|
| logits = cur_base_logits
|
| elif self.no_residual:
|
| logits = residual_logits
|
| else:
|
| logits = base_logits + residual_logits
|
|
|
| attempted = False
|
| talk_loss_list = []
|
| if self.original_mode or (self.n_ahead == 1) or (self.comparison_mode and ahead_idx == 0):
|
| loss = None
|
| attempted = True
|
|
|
| if labels is not None:
|
| for shift_amount in range(self.n_ahead_talk):
|
|
|
|
|
|
|
| if ahead_idx == 0 and self.optimize_lm_head_only_at_start:
|
| loss_logits = initial_loss_logits
|
| else:
|
| loss_logits = logits
|
| shift_logits = loss_logits[..., shift_amount:-1, :].contiguous()
|
| shift_labels = labels[..., 1 + shift_amount:].contiguous()
|
|
|
| loss_fct = CrossEntropyLoss(reduction="none")
|
| shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| shift_labels = shift_labels.view(-1).clone()
|
|
|
| shift_labels[shift_labels == self.tokenizer.pad_token_id] = -100
|
| shift_labels = shift_labels.to(shift_logits.device)
|
| loss = loss_fct(shift_logits, shift_labels)
|
| if not self.comparison_mode and not (self.optimize_lm_head_only_at_start and (self.n_ahead + self.n_ahead_talk > 2)) or self.original_mode:
|
| loss_list.append(loss)
|
| talk_loss_list.append(nonzero_mean(loss).detach())
|
|
|
| if not attempted or self.comparison_mode:
|
| rm_hidden_states = hidden_states
|
|
|
| rm_logits = apply_head(self.lm_head, rm_hidden_states, detach=self.optimize_lm_head_only_at_start)
|
|
|
|
|
| if self.tokenizer_has_start_thought_token:
|
| rm_logits[..., self.start_token_id] = -1e10
|
| if self.tokenizer_has_end_thought_token:
|
| rm_logits[..., self.end_token_id] = -1e10
|
| probabilities = rm_logits
|
| if probabilities_2d is not None:
|
| prev_probabilities_2d = probabilities_2d.clone()
|
| probabilities_2d = probabilities.view(-1, probabilities.size(-1))
|
|
|
| did_skip_sampling = skip_sampling
|
| skip_sampling = False
|
| if ahead_idx == 0 and self.use_start_thought_token:
|
| override_token = self.start_token_id
|
| elif self.use_thought_prefix and ahead_idx < self.tokenized_thought_prefix.shape[-1]:
|
| override_token = self.tokenized_thought_prefix[..., ahead_idx]
|
| elif ahead_idx == self.n_ahead - 2 and self.use_end_thought_token:
|
| override_token = self.end_token_id
|
| else:
|
| override_token = None
|
| if override_token is not None and self.n_ahead > 1:
|
|
|
| probabilities_2d = torch.zeros_like(probabilities_2d)
|
| probabilities_2d[:, override_token] = 1.0
|
| skip_sampling = True
|
| elif ahead_idx >= self.n_ahead - 1:
|
| if labels is not None:
|
| cur_talk_n = ahead_idx - (self.n_ahead - 1) + 1
|
|
|
| shift_labels = labels[..., cur_talk_n:].contiguous().to(probabilities_2d.device)
|
| padding = torch.full_like(
|
| labels[..., :cur_talk_n],
|
| self.tokenizer.pad_token_id,
|
| dtype=torch.long,
|
| device=shift_labels.device
|
| )
|
| new_rm_tokens = torch.cat(
|
| [shift_labels, padding],
|
| dim=-1
|
| )
|
|
|
| probabilities_2d = F.one_hot(new_rm_tokens, num_classes=self.vocab_size).reshape(-1, self.vocab_size).to(probabilities_2d.dtype)
|
| skip_sampling = True
|
| else:
|
| continue
|
| temperature = self.gumbel_temperature if self.training else 0.001
|
| prev_sample_probs = sample_probs
|
| sample_probs = probabilities_2d
|
| if ahead_idx < self.n_ahead - 1 and not skip_sampling:
|
| probabilities_2d = F.gumbel_softmax(sample_probs, tau=temperature, hard=True, dim=-1)
|
| if self.gumbel_detach:
|
| probabilities_2d = probabilities_2d.detach()
|
| sampled_token_history.append(probabilities_2d.argmax(dim=-1).detach().cpu())
|
|
|
| contains_start = self.use_start_thought_token and (probabilities_2d[..., self.start_token_id].sum() > 0)
|
| contains_end = self.use_end_thought_token and (probabilities_2d[..., self.end_token_id].sum() > 0)
|
| contains_thought = contains_start or contains_end
|
|
|
| if not contains_thought:
|
| with torch.set_grad_enabled(not self.train_only_thinking_embedding):
|
| inputs_embeds = probabilities_2d @ (self.model.embed_tokens.weight.to(probabilities.device).to(probabilities.dtype))
|
| else:
|
| thought_id = self.start_token_id if contains_start else self.end_token_id
|
| cur_thought_embedding = start_embedding if contains_start else end_embedding
|
| if self.use_reparam_for_thought_embeddings:
|
| inputs_embeds = torch.randn(batch_size, seq_len, self.model.config.hidden_size, device=input_ids.device, dtype=cur_thought_embedding.dtype)
|
| inputs_embeds = inputs_embeds * torch.exp(cur_thought_embedding[1]) + cur_thought_embedding[0]
|
| if contains_start:
|
| sampled_start = inputs_embeds.clone().detach()
|
| else:
|
| sampled_end = inputs_embeds.clone().detach()
|
| else:
|
| inputs_embeds = cur_thought_embedding.unsqueeze(0).repeat(batch_size, seq_len, 1)
|
| inputs_embeds = inputs_embeds.view(probabilities.size(0), probabilities.size(1), -1).to(self.model.embed_tokens.weight.dtype)
|
| inputs_embeds = inputs_embeds.view(probabilities.size(0), probabilities.size(1), -1).to(self.model.embed_tokens.weight.dtype)
|
|
|
| if len(attention_mask.shape) == 2:
|
| breakpoint()
|
| else:
|
| original_attention = attention_mask[..., :attention_mask.shape[-2]]
|
| if self.use_upper_triangular:
|
| new_attention = original_attention
|
| else:
|
| original_attention = original_attention == attention_mask.max()
|
|
|
| if not attention_mask.dtype == torch.bfloat16:
|
| new_attention = torch.eye(
|
| seq_len, dtype=attention_mask.dtype, device=attention_mask.device
|
| )
|
| else:
|
| new_attention = torch.eye(
|
| seq_len, dtype=torch.float32, device=attention_mask.device
|
| ).to(attention_mask.dtype)
|
|
|
| new_attention = new_attention.view(1, 1, seq_len, seq_len).repeat(input_ids.shape[0], 1, 1, 1)
|
| new_attention = new_attention * original_attention
|
| new_attention[new_attention == 0] = attention_mask.min()
|
| new_attention[new_attention == 1] = attention_mask.max()
|
| attention_mask = torch.cat([attention_mask, new_attention], dim=-1)
|
| past_key_values = outputs.past_key_values
|
| position_ids = position_ids + 1
|
|
|
| if labels is not None and (self.n_ahead > 1 or not self.base_original_mode):
|
|
|
|
|
|
|
| if ahead_idx == 0 and self.optimize_lm_head_only_at_start:
|
| loss_logits = initial_loss_logits
|
| else:
|
| loss_logits = logits
|
| shift_idx = 1 + max(0, ahead_idx - (self.n_ahead - 1))
|
| shift_logits = loss_logits[..., :-shift_idx, :].contiguous()
|
| shift_labels = labels[..., shift_idx:].contiguous()
|
|
|
| loss_fct = CrossEntropyLoss(reduction="none")
|
| shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| shift_labels = shift_labels.view(-1)
|
|
|
| shift_labels = shift_labels.to(shift_logits.device)
|
|
|
| shift_labels = torch.where(shift_labels == self.tokenizer.pad_token_id, -100, shift_labels)
|
| unreduced_loss = loss_fct(shift_logits, shift_labels)
|
| if torch.any(unreduced_loss != unreduced_loss):
|
| raise ValueError("NaN loss")
|
| unreduced_loss = unreduced_loss.reshape(logits.shape[0], -1)
|
| loss_list.append(unreduced_loss)
|
|
|
|
|
| if self.use_policy_loss and ahead_idx > 0 and (ahead_idx > 1 or not self.use_start_thought_token):
|
|
|
| previous_loss = loss_list[-2]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| if ahead_idx < self.n_ahead - 1:
|
| shift_amount = 0
|
| original_dqn_reward = (previous_loss - unreduced_loss).detach()
|
| if self.first_and_last_mode:
|
| original_dqn_reward = original_dqn_reward * 0.0
|
| else:
|
|
|
|
|
| shift_amount = max(0, ahead_idx - (self.n_ahead - 1))
|
|
|
|
|
|
|
|
|
| cur_policy_shift_logits = initial_loss_logits[..., shift_amount:-1, :].contiguous().detach()
|
| cur_policy_shift_labels = labels[..., 1 + shift_amount:].contiguous()
|
|
|
| cur_policy_loss_fct = CrossEntropyLoss(reduction="none")
|
| cur_policy_shift_logits = cur_policy_shift_logits.view(-1, self.config.vocab_size)
|
| cur_policy_shift_labels = cur_policy_shift_labels.view(-1).clone()
|
|
|
| cur_policy_shift_labels[cur_policy_shift_labels == self.tokenizer.pad_token_id] = -100
|
| cur_policy_shift_labels = cur_policy_shift_labels.to(cur_policy_shift_labels.device)
|
| cur_policy_reward_base_loss = loss_fct(
|
| cur_policy_shift_logits, cur_policy_shift_labels.to(cur_policy_shift_logits.device)
|
| ).reshape(logits.shape[0], -1)
|
| original_dqn_reward = cur_policy_reward_base_loss.detach() - unreduced_loss
|
|
|
| if not did_skip_sampling:
|
| nonzero_indices = prev_probabilities_2d.nonzero()
|
| action_loglikelihoods = F.log_softmax(prev_sample_probs / self.reinforce_temperature, dim=-1)[nonzero_indices[:, 0], nonzero_indices[:, 1]]
|
| action_loglikelihoods_2d = action_loglikelihoods.reshape(batch_size, -1)[:, :-1 - shift_amount]
|
| action_loglikelihoods_list.append(action_loglikelihoods_2d)
|
| if policy_reward is None:
|
| policy_reward = original_dqn_reward[:, :-(self.n_ahead_talk - shift_amount)]
|
| else:
|
| if self.n_ahead_talk > shift_amount:
|
| added_reward = original_dqn_reward[:, :-(self.n_ahead_talk - shift_amount)]
|
| else:
|
| added_reward = original_dqn_reward
|
| policy_reward += added_reward
|
|
|
| if self.use_policy_loss and ahead_idx == self.n_ahead + self.n_ahead_talk - 2:
|
|
|
| if self.use_reparam_for_thought_embeddings and (self.use_start_thought_token or self.use_end_thought_token):
|
|
|
|
|
|
|
| if self.use_start_thought_token:
|
| exp_start_std = torch.exp(start_embedding[1])
|
| start_loglikelihood = -0.5 * (sampled_start.detach() - start_embedding[0]) ** 2 / exp_start_std ** 2 - start_embedding[1] - 0.5 * math.log(2 * math.pi)
|
| start_loglikelihood = start_loglikelihood.mean(dim=-1)
|
| if self.use_end_thought_token:
|
| exp_end_std = torch.exp(end_embedding[1])
|
| end_loglikelihood = -0.5 * (sampled_end.detach() - end_embedding[0]) ** 2 / exp_end_std ** 2 - end_embedding[1] - 0.5 * math.log(2 * math.pi)
|
| end_loglikelihood = end_loglikelihood.mean(dim=-1)
|
|
|
| if self.use_end_thought_token and self.use_policy_loss_for_end_thought:
|
| action_loglikelihoods_list.append(end_loglikelihood)
|
| if self.use_start_thought_token:
|
| action_loglikelihoods_list.append(start_loglikelihood)
|
|
|
| if ahead_idx == self.n_ahead + self.n_ahead_talk - 2 and self.eval_mode:
|
| with torch.no_grad():
|
|
|
| filtered_tokens = input_ids[:, :policy_reward.shape[-1]].cpu().detach().numpy().flatten()
|
| filtered_tokens_mask = filtered_tokens != self.tokenizer.pad_token_id
|
| filtered_tokens = filtered_tokens[filtered_tokens_mask]
|
| filtered_rewards = policy_reward.float().cpu().detach().numpy()[:, :seq_len - self.n_ahead_talk].flatten()
|
| filtered_rewards = filtered_rewards[filtered_tokens_mask]
|
|
|
| abs_reward_list = np.abs(policy_reward.float().cpu().detach().numpy()[:, :seq_len - self.n_ahead_talk].flatten())
|
| abs_reward_list = abs_reward_list[filtered_tokens_mask]
|
| medium_quantile = np.quantile(abs_reward_list, 0.5)
|
| upper_quantile = np.quantile(abs_reward_list, 0.95)
|
|
|
| for action_loglikelihoods_2d in action_loglikelihoods_list:
|
| train_policy_reward = policy_reward
|
|
|
|
|
| if self.trice_mode and self.n_passes > 1:
|
| batched_policy_reward = train_policy_reward.reshape(-1, self.n_passes, train_policy_reward.shape[-1])
|
|
|
| train_policy_reward = batched_policy_reward - batched_policy_reward.mean(dim=1, keepdim=True)
|
| train_policy_reward = train_policy_reward.reshape(-1, train_policy_reward.shape[-1])
|
|
|
| if self.subtract_mean_reward:
|
| train_policy_reward = train_policy_reward - train_policy_reward.mean()
|
| if self.remove_negative_rewards:
|
| fixed_policy_reward = train_policy_reward.detach().clamp(min=0)
|
| else:
|
| fixed_policy_reward = train_policy_reward.detach()
|
| actor_loss = -fixed_policy_reward * action_loglikelihoods_2d[:, :policy_reward.shape[-1]].to(policy_reward.device)
|
| if action_loglikelihoods_2d.mean() < -1e4 and not self.use_policy_loss_just_for_thoughts:
|
|
|
| break
|
| dqn_loss_list.append(actor_loss.mean())
|
|
|
| if loss_list:
|
| if self.first_and_last_mode:
|
| loss = sum(
|
| self.loss_mean(loss_list[-(i + 1)]) for i in range(self.n_ahead_talk)
|
| ) * (1 - self.original_loss_weight) / self.n_ahead_talk
|
| loss = loss + self.loss_mean(loss_list[0]) * self.original_loss_weight
|
|
|
|
|
| for i in range(1, len(loss_list) - self.n_ahead_talk):
|
| loss_list[i] = loss_list[i] * math.nan
|
| elif self.first_only:
|
| loss = self.loss_mean(loss_list[0])
|
| elif self.final_only_mode:
|
| loss = sum(
|
| self.loss_mean(loss_list[-i]) for i in range(1, self.n_ahead_talk + 1)
|
| ) / self.n_ahead_talk
|
| else:
|
| loss = None
|
| for i in range(len(loss_list)):
|
| cur_loss = self.loss_mean(loss_list[i])
|
| if loss is not None:
|
| loss = loss + cur_loss.to(loss.device)
|
| else:
|
| loss = cur_loss
|
| loss = loss / len(loss_list)
|
|
|
| loss = loss * self.base_loss_beta
|
|
|
| if dqn_loss_list:
|
| dqn_loss = sum(dqn_loss_list) / len(dqn_loss_list)
|
| if self.include_policy_loss:
|
| if loss is not None:
|
| loss += dqn_loss * self.policy_loss_beta
|
| else:
|
| loss = dqn_loss * self.policy_loss_beta
|
|
|
| if not return_dict:
|
| output = (logits,) + outputs[1:]
|
| return (loss,) + output if loss is not None else output
|
|
|
| base_log_dict = {
|
| f"loss_{i}": nonzero_mean(loss_list[i]) for i in range(len(loss_list))
|
| }
|
|
|
| if loss is not None:
|
| base_log_dict["loss_train"] = loss.item()
|
|
|
| for loss_key, loss_val in base_log_dict.items():
|
| log_dict[loss_key] += loss_val / self.n_tokens_print
|
|
|
| if self.use_policy_loss and policy_reward is not None:
|
| log_dict["policy_loss"] += dqn_loss / self.n_tokens_print
|
| log_dict["policy_reward"] += policy_reward.mean() / self.n_tokens_print
|
|
|
| if not loss_list:
|
| if loss is not None:
|
| log_dict["loss_0"] += loss / self.n_tokens_print
|
| else:
|
| log_dict["loss_final"] += nonzero_mean(loss_list[-1]) / self.n_tokens_print
|
| log_dict["loss_talk"] += sum(nonzero_mean(cur_loss_item) for cur_loss_item in loss_list[-self.n_ahead_talk:]) / self.n_ahead_talk / self.n_tokens_print
|
|
|
|
|
| if loss_list:
|
| for i in range(len(loss_list)):
|
| talk_idx = min(max(i - (self.n_ahead - 1), 0), len(talk_loss_list) - 1)
|
| if not talk_loss_list:
|
| cur_talk_loss = nonzero_mean(loss_list[0])
|
| else:
|
| cur_talk_loss = talk_loss_list[talk_idx]
|
| log_dict[f"rel_loss_{i}"] += (nonzero_mean(loss_list[i]) - cur_talk_loss) / self.n_tokens_print
|
| if self.training:
|
| self.training_steps += 1
|
|
|
| if not self.training:
|
| self.n_ahead_talk = n_ahead_talk_to_restore
|
| self.n_passes = n_passes_to_restore
|
| return CausalLMOutputWithPast(
|
| loss=loss if loss is not None else None,
|
| logits=(rm_logits if self.n_ahead > 1 else logits) if not self.output_logits_at_the_end else logits,
|
| past_key_values=outputs.past_key_values,
|
| hidden_states=outputs.hidden_states,
|
| attentions=outputs.attentions,
|
| )
|
|
|
| def prepare_inputs_for_generation(
|
| self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
| ):
|
|
|
| if past_key_values is not None:
|
| if isinstance(past_key_values, Cache):
|
| cache_length = past_key_values.get_seq_length()
|
| past_length = past_key_values.seen_tokens
|
| max_cache_length = past_key_values.get_max_length()
|
| else:
|
| cache_length = past_length = past_key_values[0][0].shape[2]
|
| max_cache_length = None
|
|
|
|
|
|
|
|
|
|
|
| if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
| input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
|
|
|
|
| elif past_length < input_ids.shape[1]:
|
| input_ids = input_ids[:, past_length:]
|
|
|
|
|
|
|
| if (
|
| max_cache_length is not None
|
| and attention_mask is not None
|
| and cache_length + input_ids.shape[1] > max_cache_length
|
| ):
|
| attention_mask = attention_mask[:, -max_cache_length:]
|
|
|
| position_ids = kwargs.get("position_ids", None)
|
| if attention_mask is not None and position_ids is None:
|
|
|
| position_ids = attention_mask.long().cumsum(-1) - 1
|
| position_ids.masked_fill_(attention_mask == 0, 1)
|
| if past_key_values:
|
| position_ids = position_ids[:, -input_ids.shape[1] :]
|
|
|
|
|
| if inputs_embeds is not None and past_key_values is None:
|
| model_inputs = {"inputs_embeds": inputs_embeds}
|
| else:
|
| model_inputs = {"input_ids": input_ids}
|
|
|
| model_inputs.update(
|
| {
|
| "position_ids": position_ids,
|
| "past_key_values": past_key_values,
|
| "use_cache": kwargs.get("use_cache"),
|
| "attention_mask": attention_mask,
|
| }
|
| )
|
| return model_inputs
|
|
|
| @staticmethod
|
| def _reorder_cache(past_key_values, beam_idx):
|
| reordered_past = ()
|
| for layer_past in past_key_values:
|
| reordered_past += (
|
| tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
| )
|
| return reordered_past
|
|
|
| class MistralSelfExtendForCausalLM(MistralPreTrainedModel):
|
| _tied_weights_keys = ["lm_head.weight"]
|
|
|
| def __init__(self, config):
|
| super().__init__(config)
|
| self.model = MistralModel(config)
|
| self.vocab_size = config.vocab_size
|
| self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| self.max_thoughts = config.max_thoughts
|
| self.merged_lm_and_talk_heads = config.merged_lm_and_talk_heads
|
| self.use_concat_talk_head = config.use_concat_talk_head
|
| self.use_shallow_talk = config.use_shallow_talk
|
| self.use_complex_talk_head = config.use_complex_talk_head
|
| self.use_weighted_talk_head = config.use_weighted_talk_head
|
|
|
| assert not (self.use_weighted_talk_head and self.use_shallow_talk)
|
|
|
| self.n_ahead = 1
|
| self.n_ahead_talk = 1
|
| self.n_passes = 1
|
| self.n_tokens_print = 1
|
| self.gradient_accumulation_steps = 1
|
| self.training_steps = 0
|
| self.tokenizer = None
|
| self.start_token_id = None
|
| self.end_token_id = None
|
| self.rm_initialized = False
|
| self.residual_talk_head = True
|
| self.thought_init_std_scale = 1e-2
|
|
|
| self.final_only_mode = False
|
| self.first_and_last_mode = True
|
| self.first_only = False
|
| self.original_loss_weight = 0.5
|
|
|
| self.cumulative_residual = False
|
| self.clever_residual = False
|
| self.skip_residual = False
|
| self.no_residual = True
|
|
|
| self.optimize_lm_head_only_at_start = False
|
| self.optimize_model_only_at_start = False
|
|
|
| if self.optimize_model_only_at_start:
|
| raise NotImplementedError
|
| self.train_only_thinking_embedding = False
|
| self.weighted_embeddings = False
|
| self.use_start_thought_token = True
|
| self.use_end_thought_token = True
|
| self.initialize_thought_embedding_to_normal = False
|
| self.initial_start_token = "---"
|
| self.initial_end_token = "---"
|
| self.output_logits_at_the_end = True
|
|
|
| self.gumbel_temperature = 0.001
|
|
|
| self.use_policy_loss = True
|
| self.include_policy_loss = True
|
| self.trice_mode = True
|
| self.remove_negative_rewards = True
|
| self.use_policy_loss_for_end_thought = True
|
|
|
| self.base_original_mode = False
|
| self.original_mode = False
|
|
|
| self.thought_prefix = "(Let's think step by step"
|
| self.tokenized_thought_prefix = None
|
| self.log_dict = defaultdict(int)
|
| self.eval_log_dict = defaultdict(int)
|
| self.print_final_only = True
|
| self.loss_mean = loss_mean
|
| self.all_rewards = []
|
| self.all_unreduced_losses = []
|
| self.kill_after = 100
|
|
|
| self.start_embedding = nn.Parameter(torch.zeros(2, self.model.config.hidden_size))
|
| self.end_embedding = nn.Parameter(torch.zeros(2, self.model.config.hidden_size))
|
|
|
| self.policy_loss_beta = 1e6
|
| self.embedding_scale = 1e2
|
| self.reinforce_temperature = 3
|
| self.base_loss_beta = 1
|
|
|
|
|
| self.use_thought_prefix = False
|
| self.use_reparam_for_thought_embeddings = False
|
| self.use_upper_triangular = False
|
| self.subtract_mean_reward = False
|
| self.comparison_mode = False
|
| self.gumbel_detach = True
|
|
|
|
|
| self.eval_mode = False
|
|
|
| num_talk = 1
|
| talk_input_dim = config.hidden_size if not self.use_concat_talk_head else config.hidden_size * 2
|
| if self.use_weighted_talk_head:
|
| talk_output_dim = 1
|
| else:
|
| talk_output_dim = config.hidden_size if self.use_shallow_talk else config.vocab_size
|
|
|
| if not self.merged_lm_and_talk_heads:
|
| if self.use_complex_talk_head:
|
| self.talk_head = nn.ModuleList([nn.Sequential(
|
| nn.Linear(talk_input_dim, config.hidden_size),
|
| nn.ReLU(),
|
| nn.Linear(config.hidden_size, config.hidden_size),
|
| nn.ReLU(),
|
| nn.Linear(config.hidden_size, talk_output_dim, bias=False)
|
| )])
|
| else:
|
| self.talk_head = nn.ModuleList([nn.Sequential(
|
| nn.Linear(talk_input_dim, talk_output_dim, bias=False)
|
| )])
|
|
|
|
|
| self.post_init()
|
|
|
| def get_input_embeddings(self):
|
| return self.model.embed_tokens
|
|
|
| def set_input_embeddings(self, value):
|
| self.model.embed_tokens = value
|
|
|
| def get_output_embeddings(self):
|
| return self.lm_head
|
|
|
| def set_output_embeddings(self, new_embeddings):
|
| self.lm_head = new_embeddings
|
|
|
| def set_decoder(self, decoder):
|
| self.model = decoder
|
|
|
| def get_decoder(self):
|
| return self.model
|
| def calculate_policy_loss(self, thoughts, rewards):
|
| thought_log_probs = []
|
| for thought in thoughts:
|
| thought_log_prob = self.lm_head(thought).log_softmax(dim=-1)
|
| thought_log_probs.append(thought_log_prob)
|
|
|
| thought_log_probs = torch.stack(thought_log_probs, dim=1)
|
| thought_probs = torch.exp(thought_log_probs)
|
|
|
| policy_loss = -torch.mean(thought_log_probs * rewards.unsqueeze(-1).unsqueeze(-1))
|
|
|
| return policy_loss
|
|
|
| def _generate_thoughts(self, hidden_states, max_length):
|
| batch_size = hidden_states.size(0)
|
| thought_ids = torch.zeros((batch_size, self.config.max_thoughts, max_length), dtype=torch.long, device=hidden_states.device)
|
| thought_embeddings = []
|
|
|
| for i in range(self.config.max_thoughts):
|
| thought_input_ids = torch.zeros((batch_size, 1), dtype=torch.long, device=hidden_states.device)
|
| thought_outputs = self.generate(
|
| input_ids=thought_input_ids,
|
| max_length=max_length,
|
| do_sample=True,
|
| top_k=50,
|
| top_p=0.95,
|
| pad_token_id=self.config.pad_token_id,
|
| eos_token_id=self.config.eos_token_id,
|
| )
|
| thought_ids[:, i, :] = thought_outputs
|
| thought_embeddings.append(self.get_input_embeddings()(thought_outputs))
|
|
|
| thought_embeddings = torch.stack(thought_embeddings, dim=1)
|
| return thought_ids, thought_embeddings
|
|
|
| @torch.no_grad()
|
| def infer(
|
| self,
|
| input_ids: torch.LongTensor,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| use_cache: Optional[bool] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ):
|
| batch_size, seq_len = input_ids.shape
|
|
|
|
|
| original_input_ids = input_ids.clone()
|
| original_attention_mask = attention_mask.clone() if attention_mask is not None else None
|
|
|
|
|
| start_thought_token_id = self.tokenizer.convert_tokens_to_ids("<|startthought|>")
|
| input_ids = torch.cat([input_ids, torch.tensor([[start_thought_token_id]] * batch_size).to(input_ids.device)], dim=-1)
|
| seq_len += 1
|
|
|
|
|
| if attention_mask is not None:
|
| attention_mask = torch.cat([attention_mask, torch.ones((batch_size, 1)).to(attention_mask.device)], dim=-1)
|
|
|
|
|
| continuation_length = self.n_ahead - 2
|
| new_key_values = past_key_values
|
|
|
| start_time = time.time()
|
| for continuation_idx in range(continuation_length):
|
| outputs = self.model(
|
| input_ids=input_ids if continuation_idx == 0 else next_token_id.unsqueeze(-1).to(input_ids.device),
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=new_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=True,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
| new_key_values = outputs.past_key_values
|
|
|
| hidden_states = outputs[0]
|
|
|
| logits = self.lm_head(hidden_states)
|
| logits = logits[:, -1, :]
|
|
|
|
|
| next_token_logits = F.gumbel_softmax(logits, tau=self.gumbel_temperature, hard=True, dim=-1)
|
| next_token_id = torch.argmax(next_token_logits, dim=-1)
|
|
|
|
|
| input_ids = torch.cat([input_ids, next_token_id.unsqueeze(-1).to(input_ids.device)], dim=-1)
|
| seq_len += 1
|
|
|
|
|
| if attention_mask is not None:
|
| attention_mask = torch.cat([attention_mask, torch.ones((batch_size, 1)).to(attention_mask.device)], dim=-1)
|
|
|
|
|
| end_thought_token_id = self.tokenizer.convert_tokens_to_ids("<|endthought|>")
|
| input_ids = torch.cat([input_ids, torch.tensor([[end_thought_token_id]] * batch_size).to(input_ids.device)], dim=-1)
|
| seq_len += 1
|
|
|
|
|
| if attention_mask is not None:
|
| attention_mask = torch.cat([attention_mask, torch.ones((batch_size, 1)).to(attention_mask.device)], dim=-1)
|
|
|
|
|
| outputs_before = self.model(
|
| input_ids=original_input_ids,
|
| attention_mask=original_attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
| hidden_states_before = outputs_before[0][:, -1:, :]
|
|
|
|
|
| outputs_after = self.model(
|
| input_ids=torch.cat([next_token_id.unsqueeze(-1).to(input_ids.device), torch.tensor(end_thought_token_id).unsqueeze(-1).unsqueeze(-1).to(input_ids.device)], dim=-1),
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=new_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
| hidden_states_after = outputs_after[0][:, -1:, :]
|
|
|
|
|
| mixing_weight = self.talk_head[0](torch.cat([hidden_states_before, hidden_states_after], dim=-1))
|
|
|
|
|
| mixed_hidden_states = (1 - mixing_weight) * hidden_states_before + mixing_weight * hidden_states_after
|
|
|
|
|
| logits = self.lm_head(mixed_hidden_states)
|
| return logits
|
|
|
| def forward(
|
| self,
|
| hidden_states: torch.Tensor,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_value: Optional[Cache] = None,
|
| output_attentions: bool = False,
|
| use_cache: bool = False,
|
| padding_mask: Optional[torch.LongTensor] = None,
|
| group_size_1: Optional[float] = 8,
|
| group_size_2: Optional[float] = 2048,
|
| **kwargs,
|
| ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
| if "padding_mask" in kwargs:
|
| warnings.warn(
|
| "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
|
| )
|
| bsz, q_len, _ = hidden_states.size()
|
|
|
| query_states = self.q_proj(hidden_states)
|
| key_states = self.k_proj(hidden_states)
|
| value_states = self.v_proj(hidden_states)
|
|
|
| query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
|
| key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
| value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
|
|
|
| kv_seq_len = key_states.shape[-2]
|
| if past_key_value is not None:
|
| if self.layer_idx is None:
|
| raise ValueError(
|
| f"The cache structure has changed since version v4.36. If you are using {self.__class__.__name__} "
|
| "for auto-regressive decoding with k/v caching, please make sure to initialize the attention class "
|
| "with a layer index."
|
| )
|
| kv_seq_len += past_key_value.get_usable_length(kv_seq_len, self.layer_idx)
|
| cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
|
|
|
|
|
| if past_key_value is not None:
|
| cache_kwargs = {"sin": sin, "cos": cos}
|
| key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
|
|
| query_position_ids = position_ids
|
| key_position_ids = torch.arange(kv_seq_len, dtype=position_ids.dtype).to(query_position_ids.device).view(bsz, kv_seq_len)
|
|
|
|
|
| neighbor_query_states, _ = apply_rotary_pos_emb(query_states, None, cos, sin, query_position_ids)
|
| _, neighbor_key_states = apply_rotary_pos_emb(None, key_states, cos, sin, key_position_ids)
|
| _re_group_size_2 = 0 if position_ids.max() < group_size_2 else group_size_2
|
| group_query_states, _ = apply_grouped_rotary_pos_emb(query_states, None, cos, sin, query_position_ids, g_size_1=group_size_1, g_size_2=_re_group_size_2)
|
| _, group_key_states = apply_grouped_rotary_pos_emb(None, key_states, cos, sin, key_position_ids, g_size_1=group_size_1, g_size_2=_re_group_size_2)
|
|
|
|
|
| group_key_states = repeat_kv(group_key_states, self.num_key_value_groups)
|
| neighbor_key_states = repeat_kv(neighbor_key_states, self.num_key_value_groups)
|
| value_states = repeat_kv(value_states, self.num_key_value_groups)
|
|
|
| neighbor_attn_weights = torch.matmul(neighbor_query_states, neighbor_key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
| group_attn_weights = torch.matmul(group_query_states, group_key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
|
|
|
|
|
| if group_attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
|
| raise ValueError(
|
| f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
|
| f" {group_attn_weights.size()}"
|
| )
|
|
|
| if attention_mask is not None:
|
| if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
|
| raise ValueError(
|
| f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
|
| )
|
| group_attn_weights = group_attn_weights + attention_mask
|
| neighbor_attn_weights = neighbor_attn_weights + attention_mask
|
|
|
|
|
| if q_len == 1:
|
| neighbor_attention_mask = torch.zeros((q_len, kv_seq_len), device=neighbor_attn_weights.device)
|
| neighbor_attention_mask[:, -group_size_2:] = 1
|
| elif q_len == kv_seq_len:
|
| neighbor_attention_mask = torch.ones((q_len, kv_seq_len), device=neighbor_attn_weights.device)
|
| neighbor_attention_mask = torch.tril(neighbor_attention_mask)
|
| if q_len-group_size_2 > 0:
|
| group_attention_mask = torch.tril(torch.ones((q_len-group_size_2, kv_seq_len-group_size_2), device=group_attn_weights.device))
|
| neighbor_attention_mask[group_size_2:, :-group_size_2] -= group_attention_mask
|
|
|
| else:
|
| raise ValueError("q_len should be 1 or seq_len.")
|
|
|
|
|
| neighbor_attention_mask = neighbor_attention_mask.bool()
|
| attn_weights = torch.where(neighbor_attention_mask, neighbor_attn_weights, group_attn_weights)
|
| attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
|
| attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
|
| attn_output = torch.matmul(attn_weights, value_states)
|
|
|
| if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
|
| raise ValueError(
|
| f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
|
| f" {attn_output.size()}"
|
| )
|
|
|
| attn_output = attn_output.transpose(1, 2).contiguous()
|
| attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
|
|
|
| attn_output = self.o_proj(attn_output)
|
|
|
| if not output_attentions:
|
| attn_weights = None
|
|
|
| return attn_output, attn_weights, past_key_value
|
|
|
| def prepare_inputs_for_generation(
|
| self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
| ):
|
|
|
| if past_key_values is not None:
|
| if isinstance(past_key_values, Cache):
|
| cache_length = past_key_values.get_seq_length()
|
| past_length = past_key_values.seen_tokens
|
| max_cache_length = past_key_values.get_max_length()
|
| else:
|
| cache_length = past_length = past_key_values[0][0].shape[2]
|
| max_cache_length = None
|
|
|
|
|
|
|
|
|
|
|
| if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
| input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
|
|
|
|
| elif past_length < input_ids.shape[1]:
|
| input_ids = input_ids[:, past_length:]
|
|
|
|
|
|
|
| if (
|
| max_cache_length is not None
|
| and attention_mask is not None
|
| and cache_length + input_ids.shape[1] > max_cache_length
|
| ):
|
| attention_mask = attention_mask[:, -max_cache_length:]
|
|
|
| position_ids = kwargs.get("position_ids", None)
|
| if attention_mask is not None and position_ids is None:
|
|
|
| position_ids = attention_mask.long().cumsum(-1) - 1
|
| position_ids.masked_fill_(attention_mask == 0, 1)
|
| if past_key_values:
|
| position_ids = position_ids[:, -input_ids.shape[1] :]
|
|
|
|
|
| if inputs_embeds is not None and past_key_values is None:
|
| model_inputs = {"inputs_embeds": inputs_embeds}
|
| else:
|
| model_inputs = {"input_ids": input_ids}
|
|
|
| model_inputs.update(
|
| {
|
| "position_ids": position_ids,
|
| "past_key_values": past_key_values,
|
| "use_cache": kwargs.get("use_cache"),
|
| "attention_mask": attention_mask,
|
| }
|
| )
|
| return model_inputs
|
|
|
| @staticmethod
|
| def _reorder_cache(past_key_values, beam_idx):
|
| reordered_past = ()
|
| for layer_past in past_key_values:
|
| reordered_past += (
|
| tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
| )
|
| return reordered_past
|
|
|
| class MistralStarForCausalLM(MistralPreTrainedModel):
|
| _tied_weights_keys = ["lm_head.weight"]
|
|
|
| def __init__(self, config):
|
| super().__init__(config)
|
| self.model = MistralModel(config)
|
| self.vocab_size = config.vocab_size
|
| self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| self.max_thoughts = config.max_thoughts
|
| self.merged_lm_and_talk_heads = config.merged_lm_and_talk_heads
|
| self.use_concat_talk_head = config.use_concat_talk_head
|
| self.use_shallow_talk = config.use_shallow_talk
|
| self.use_complex_talk_head = config.use_complex_talk_head
|
| self.use_weighted_talk_head = config.use_weighted_talk_head
|
|
|
| assert not (self.use_weighted_talk_head and self.use_shallow_talk)
|
|
|
| self.n_ahead = 1
|
| self.n_ahead_talk = 1
|
| self.n_passes = 1
|
| self.n_tokens_print = 1
|
| self.gradient_accumulation_steps = 1
|
| self.training_steps = 0
|
| self.tokenizer = None
|
| self.start_token_id = None
|
| self.end_token_id = None
|
| self.rm_initialized = False
|
| self.residual_talk_head = True
|
| self.thought_init_std_scale = 1e-2
|
|
|
| self.final_only_mode = False
|
| self.first_and_last_mode = True
|
| self.first_only = False
|
| self.original_loss_weight = 0.5
|
|
|
| self.cumulative_residual = False
|
| self.clever_residual = False
|
| self.skip_residual = False
|
| self.no_residual = True
|
|
|
| self.optimize_lm_head_only_at_start = False
|
| self.optimize_model_only_at_start = False
|
|
|
| if self.optimize_model_only_at_start:
|
| raise NotImplementedError
|
| self.train_only_thinking_embedding = False
|
| self.weighted_embeddings = False
|
| self.use_start_thought_token = True
|
| self.use_end_thought_token = True
|
| self.initialize_thought_embedding_to_normal = False
|
| self.initial_start_token = "---"
|
| self.initial_end_token = "---"
|
| self.output_logits_at_the_end = True
|
|
|
| self.gumbel_temperature = 0.001
|
|
|
| self.use_policy_loss = True
|
| self.include_policy_loss = True
|
| self.trice_mode = True
|
| self.remove_negative_rewards = True
|
| self.use_policy_loss_for_end_thought = True
|
|
|
| self.base_original_mode = False
|
| self.original_mode = False
|
|
|
| self.thought_prefix = "(Let's think step by step"
|
| self.tokenized_thought_prefix = None
|
| self.log_dict = defaultdict(int)
|
| self.eval_log_dict = defaultdict(int)
|
| self.print_final_only = True
|
| self.loss_mean = loss_mean
|
| self.all_rewards = []
|
| self.all_unreduced_losses = []
|
| self.kill_after = 100
|
|
|
| self.start_embedding = nn.Parameter(torch.zeros(2, self.model.config.hidden_size))
|
| self.end_embedding = nn.Parameter(torch.zeros(2, self.model.config.hidden_size))
|
|
|
| self.policy_loss_beta = 1e6
|
| self.embedding_scale = 1e2
|
| self.reinforce_temperature = 3
|
| self.base_loss_beta = 1
|
|
|
|
|
| self.use_thought_prefix = False
|
| self.use_reparam_for_thought_embeddings = False
|
| self.use_upper_triangular = False
|
| self.subtract_mean_reward = False
|
| self.comparison_mode = False
|
| self.gumbel_detach = True
|
|
|
|
|
| self.eval_mode = False
|
|
|
| num_talk = 1
|
| talk_input_dim = config.hidden_size if not self.use_concat_talk_head else config.hidden_size * 2
|
| if self.use_weighted_talk_head:
|
| talk_output_dim = 1
|
| else:
|
| talk_output_dim = config.hidden_size if self.use_shallow_talk else config.vocab_size
|
|
|
| if not self.merged_lm_and_talk_heads:
|
| if self.use_complex_talk_head:
|
| self.talk_head = nn.ModuleList([nn.Sequential(
|
| nn.Linear(talk_input_dim, config.hidden_size),
|
| nn.ReLU(),
|
| nn.Linear(config.hidden_size, config.hidden_size),
|
| nn.ReLU(),
|
| nn.Linear(config.hidden_size, talk_output_dim, bias=False)
|
| )])
|
| else:
|
| self.talk_head = nn.ModuleList([nn.Sequential(
|
| nn.Linear(talk_input_dim, talk_output_dim, bias=False)
|
| )])
|
|
|
|
|
| self.post_init()
|
|
|
| def get_input_embeddings(self):
|
| return self.model.embed_tokens
|
|
|
| def set_input_embeddings(self, value):
|
| self.model.embed_tokens = value
|
|
|
| def get_output_embeddings(self):
|
| return self.lm_head
|
|
|
| def set_output_embeddings(self, new_embeddings):
|
| self.lm_head = new_embeddings
|
|
|
| def set_decoder(self, decoder):
|
| self.model = decoder
|
|
|
| def get_decoder(self):
|
| return self.model
|
| def calculate_policy_loss(self, thoughts, rewards):
|
| thought_log_probs = []
|
| for thought in thoughts:
|
| thought_log_prob = self.lm_head(thought).log_softmax(dim=-1)
|
| thought_log_probs.append(thought_log_prob)
|
|
|
| thought_log_probs = torch.stack(thought_log_probs, dim=1)
|
| thought_probs = torch.exp(thought_log_probs)
|
|
|
| policy_loss = -torch.mean(thought_log_probs * rewards.unsqueeze(-1).unsqueeze(-1))
|
|
|
| return policy_loss
|
|
|
| def _generate_thoughts(self, hidden_states, max_length):
|
| batch_size = hidden_states.size(0)
|
| thought_ids = torch.zeros((batch_size, self.config.max_thoughts, max_length), dtype=torch.long, device=hidden_states.device)
|
| thought_embeddings = []
|
|
|
| for i in range(self.config.max_thoughts):
|
| thought_input_ids = torch.zeros((batch_size, 1), dtype=torch.long, device=hidden_states.device)
|
| thought_outputs = self.generate(
|
| input_ids=thought_input_ids,
|
| max_length=max_length,
|
| do_sample=True,
|
| top_k=50,
|
| top_p=0.95,
|
| pad_token_id=self.config.pad_token_id,
|
| eos_token_id=self.config.eos_token_id,
|
| )
|
| thought_ids[:, i, :] = thought_outputs
|
| thought_embeddings.append(self.get_input_embeddings()(thought_outputs))
|
|
|
| thought_embeddings = torch.stack(thought_embeddings, dim=1)
|
| return thought_ids, thought_embeddings
|
|
|
| @torch.no_grad()
|
| def infer(
|
| self,
|
| input_ids: torch.LongTensor,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| use_cache: Optional[bool] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ):
|
| batch_size, seq_len = input_ids.shape
|
|
|
|
|
| original_input_ids = input_ids.clone()
|
| original_attention_mask = attention_mask.clone() if attention_mask is not None else None
|
|
|
|
|
| start_thought_token_id = self.tokenizer.convert_tokens_to_ids("<|startthought|>")
|
| input_ids = torch.cat([input_ids, torch.tensor([[start_thought_token_id]] * batch_size).to(input_ids.device)], dim=-1)
|
| seq_len += 1
|
|
|
|
|
| if attention_mask is not None:
|
| attention_mask = torch.cat([attention_mask, torch.ones((batch_size, 1)).to(attention_mask.device)], dim=-1)
|
|
|
|
|
| continuation_length = self.n_ahead - 2
|
| new_key_values = past_key_values
|
|
|
| start_time = time.time()
|
| for continuation_idx in range(continuation_length):
|
| outputs = self.model(
|
| input_ids=input_ids if continuation_idx == 0 else next_token_id.unsqueeze(-1).to(input_ids.device),
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=new_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=True,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
| new_key_values = outputs.past_key_values
|
|
|
| hidden_states = outputs[0]
|
|
|
| logits = self.lm_head(hidden_states)
|
| logits = logits[:, -1, :]
|
|
|
|
|
| next_token_logits = F.gumbel_softmax(logits, tau=self.gumbel_temperature, hard=True, dim=-1)
|
| next_token_id = torch.argmax(next_token_logits, dim=-1)
|
|
|
|
|
| input_ids = torch.cat([input_ids, next_token_id.unsqueeze(-1).to(input_ids.device)], dim=-1)
|
| seq_len += 1
|
|
|
|
|
| if attention_mask is not None:
|
| attention_mask = torch.cat([attention_mask, torch.ones((batch_size, 1)).to(attention_mask.device)], dim=-1)
|
|
|
|
|
| end_thought_token_id = self.tokenizer.convert_tokens_to_ids("<|endthought|>")
|
| input_ids = torch.cat([input_ids, torch.tensor([[end_thought_token_id]] * batch_size).to(input_ids.device)], dim=-1)
|
| seq_len += 1
|
|
|
|
|
| if attention_mask is not None:
|
| attention_mask = torch.cat([attention_mask, torch.ones((batch_size, 1)).to(attention_mask.device)], dim=-1)
|
|
|
|
|
| outputs_before = self.model(
|
| input_ids=original_input_ids,
|
| attention_mask=original_attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
| hidden_states_before = outputs_before[0][:, -1:, :]
|
|
|
|
|
| outputs_after = self.model(
|
| input_ids=torch.cat([next_token_id.unsqueeze(-1).to(input_ids.device), torch.tensor(end_thought_token_id).unsqueeze(-1).unsqueeze(-1).to(input_ids.device)], dim=-1),
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=new_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
| hidden_states_after = outputs_after[0][:, -1:, :]
|
|
|
|
|
| mixing_weight = self.talk_head[0](torch.cat([hidden_states_before, hidden_states_after], dim=-1))
|
|
|
|
|
| mixed_hidden_states = (1 - mixing_weight) * hidden_states_before + mixing_weight * hidden_states_after
|
|
|
|
|
| logits = self.lm_head(mixed_hidden_states)
|
| return logits
|
|
|
| @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| def forward_quiet(
|
| self,
|
| input_ids: torch.LongTensor = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| labels: Optional[torch.LongTensor] = None,
|
| use_cache: Optional[bool] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, CausalLMOutputWithPast]:
|
| r"""
|
| Args:
|
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
|
| Returns:
|
|
|
| Example:
|
|
|
| ```python
|
| >>> from transformers import AutoTokenizer, QuietForCausalLM
|
|
|
| >>> model = QuietForCausalLM.from_pretrained("quietai/Quiet-7B-v0.1")
|
| >>> tokenizer = AutoTokenizer.from_pretrained("quietai/Quiet-7B-v0.1")
|
|
|
| >>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| >>> inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
| >>> # Generate
|
| >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| ```"""
|
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| output_hidden_states = (
|
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| )
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
|
| outputs = self.model(
|
| input_ids,
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=True,
|
| )
|
| hidden_states = outputs.last_hidden_state
|
| logits = self.lm_head(hidden_states)
|
|
|
| thought_ids, thought_embeddings = self._generate_thoughts(hidden_states, max_length=self.config.thought_length)
|
| thought_hidden_states = self.model(inputs_embeds=thought_embeddings).last_hidden_state
|
|
|
|
|
| thought_logits = self.lm_head(thought_hidden_states)
|
|
|
|
|
| mixed_logits = logits.unsqueeze(1) + self.mixing_head(thought_logits)
|
| mixed_logits = mixed_logits.view(-1, mixed_logits.size(-1))
|
|
|
| loss = None
|
| if labels is not None:
|
|
|
| shift_logits = mixed_logits[..., :-1, :].contiguous()
|
| shift_labels = labels[..., 1:].contiguous()
|
|
|
| loss_fct = CrossEntropyLoss()
|
| loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
|
|
| if self.use_policy_loss:
|
| rewards = loss.detach().unsqueeze(1).repeat(1, self.max_thoughts)
|
| if self.remove_negative_rewards:
|
| rewards = torch.clamp(rewards, min=0)
|
| policy_loss = self.calculate_policy_loss(thought_ids, rewards)
|
| loss = loss + policy_loss
|
| else:
|
| loss = None
|
|
|
| if not return_dict:
|
| output = (mixed_logits,) + outputs[1:]
|
| return ((loss,) + output) if loss is not None else output
|
|
|
| return CausalLMOutputWithPast(
|
| loss=loss if loss is not None else None,
|
| logits=logits,
|
| past_key_values=outputs.past_key_values,
|
| hidden_states=outputs.hidden_states,
|
| attentions=outputs.attentions,
|
| )
|
|
|
| @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| def forward(
|
| self,
|
| input_ids: torch.LongTensor = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| labels: Optional[torch.LongTensor] = None,
|
| use_cache: Optional[bool] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, CausalLMOutputWithPast]:
|
| r"""
|
| Args:
|
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
|
| Returns:
|
|
|
| Example:
|
|
|
| ```python
|
| >>> from transformers import AutoTokenizer, MistralForCausalLM
|
|
|
| >>> model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
|
| >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
|
|
|
| >>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| >>> inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
| >>> # Generate
|
| >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| ```"""
|
| log_dict = self.log_dict if self.training else self.eval_log_dict
|
|
|
| if self.training and self.kill_after is not None and self.training_steps // self.gradient_accumulation_steps > self.kill_after:
|
| raise ValueError("Killed after")
|
|
|
| if not self.training:
|
| n_ahead_talk_to_restore = self.n_ahead_talk
|
| n_passes_to_restore = self.n_passes
|
| self.n_ahead_talk = 1
|
| self.n_passes = 1
|
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| output_hidden_states = (
|
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| )
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| assert self.cumulative_residual or self.clever_residual or self.skip_residual or self.no_residual
|
| assert not (self.skip_residual and self.use_policy_loss)
|
|
|
| if self.tokenized_thought_prefix is None and self.use_thought_prefix:
|
| self.tokenized_thought_prefix = self.tokenizer(self.thought_prefix, return_tensors="pt", add_special_tokens=False)["input_ids"]
|
|
|
| def apply_head(head, states, detach=False):
|
| if detach:
|
| head_weight = head.weight.detach()
|
| else:
|
| head_weight = head.weight
|
| head_weight = head_weight.to(states.device)
|
| return (head_weight @ states.transpose(-1, -2)).transpose(-1, -2).contiguous()
|
|
|
| def idx_if_sequential(head, idx=0):
|
| if isinstance(head, nn.Sequential) or isinstance(head, nn.ModuleList):
|
| return idx_if_sequential(head[idx], idx=idx)
|
| return head
|
|
|
| def none_repeat_interleave(x, n):
|
| if x is None:
|
| return x
|
| return x.repeat_interleave(n, dim=0)
|
|
|
| if self.n_passes > 1:
|
| input_ids = none_repeat_interleave(input_ids, self.n_passes)
|
| attention_mask = none_repeat_interleave(attention_mask, self.n_passes)
|
| position_ids = none_repeat_interleave(position_ids, self.n_passes)
|
| inputs_embeds = none_repeat_interleave(inputs_embeds, self.n_passes)
|
| labels = none_repeat_interleave(labels, self.n_passes)
|
| if past_key_values is not None:
|
| past_key_values = [none_repeat_interleave(p, self.n_passes) for p in past_key_values]
|
| cur_token_indices = torch.arange(input_ids.shape[1], device=input_ids.device)
|
|
|
| self.tokenizer_has_start_thought_token = True
|
| self.tokenizer_has_end_thought_token = True
|
| if self.start_token_id is None:
|
| self.start_token_id = self.tokenizer.convert_tokens_to_ids("<|startthought|>")
|
| if self.start_token_id == 0:
|
| self.start_token_id = self.tokenizer.bos_token_id
|
| self.tokenizer_has_start_thought_token = False
|
| elif self.use_start_thought_token:
|
|
|
| base_start_id = self.tokenizer.encode(self.initial_start_token, add_special_tokens=False)[0]
|
| if self.initialize_thought_embedding_to_normal:
|
| self.start_embedding.data = torch.zeros_like(self.start_embedding.data)
|
| else:
|
| self.start_embedding.data[0] = self.model.embed_tokens.weight.data[base_start_id].clone().detach() / self.embedding_scale
|
| self.start_embedding.data[1] = torch.log(self.model.embed_tokens.weight.data.std(dim=0) * self.thought_init_std_scale / self.embedding_scale)
|
| if self.end_token_id is None:
|
| self.end_token_id = self.tokenizer.convert_tokens_to_ids("<|endthought|>")
|
| if self.end_token_id == 0:
|
| self.end_token_id = self.tokenizer.eos_token_id
|
| self.tokenizer_has_end_thought_token = False
|
| elif self.use_end_thought_token:
|
|
|
| base_end_id = self.tokenizer.encode(self.initial_end_token, add_special_tokens=False)[0]
|
| if self.initialize_thought_embedding_to_normal:
|
| self.end_embedding.data = torch.zeros_like(self.end_embedding.data)
|
| else:
|
| self.end_embedding.data[0] = self.model.embed_tokens.weight.data[base_end_id].clone().detach() / self.embedding_scale
|
| self.end_embedding.data[1] = torch.log(self.model.embed_tokens.weight.data.std(dim=0) * self.thought_init_std_scale / self.embedding_scale)
|
|
|
| if not self.rm_initialized and (self.n_ahead > 1 or not self.base_original_mode):
|
| self.rm_initialized = True
|
| if not self.use_shallow_talk:
|
| head = self.talk_head[0]
|
| cur_head = head[-1] if isinstance(head, nn.Sequential) else head
|
| talk_input_dim = cur_head.weight.data.shape[1]
|
| talk_output_dim = 1 if self.use_weighted_talk_head else self.lm_head.weight.data.shape[0]
|
| cur_head.weight.data = torch.zeros(talk_output_dim, talk_input_dim, device=cur_head.weight.device, dtype=cur_head.weight.dtype)
|
| else:
|
|
|
| def lambda_transform(cur_head):
|
| if cur_head.weight.data.shape[0] != cur_head.weight.data.shape[1]:
|
| return torch.cat([
|
| torch.eye(
|
| cur_head.weight.data.shape[0],
|
| device=cur_head.weight.device,
|
| dtype=cur_head.weight.dtype
|
| ),
|
| torch.zeros(
|
| cur_head.weight.data.shape[0],
|
| cur_head.weight.data.shape[1] - cur_head.weight.data.shape[0],
|
| device=cur_head.weight.device,
|
| dtype=cur_head.weight.dtype
|
| )], dim=1)
|
| return torch.eye(
|
| cur_head.weight.data.shape[0],
|
| device=cur_head.weight.device,
|
| dtype=cur_head.weight.dtype
|
| )
|
| if isinstance(self.talk_head[0], nn.Sequential):
|
| for cur_head in self.talk_head[0]:
|
|
|
| if hasattr(cur_head, "weight"):
|
| cur_head.weight.data = lambda_transform(cur_head)
|
| else:
|
| self.talk_head[-1].weight.data = lambda_transform(self.talk_head[0])
|
|
|
| loss = None
|
| prev_rm_tokens = None
|
| cur_rm_tokens = None
|
| prev_rm_logits = None
|
| prev_sample_probs = None
|
| did_skip_sampling = None
|
| skip_sampling = None
|
| sample_probs = None
|
| hidden_states = None
|
| logits = None
|
| talk_kl_penalty = None
|
| rm_logits = None
|
| residual_logits = None
|
| probabilities_2d = None
|
| prev_probabilities_2d = None
|
| policy_reward = None
|
| logits_to_output = None
|
| batch_size, seq_len = input_ids.shape
|
| base_input_ids = input_ids.clone()
|
| loss_list = []
|
| dqn_loss_list = []
|
| sampled_token_history = []
|
| sample_probs_history = []
|
| action_loglikelihoods_list = []
|
|
|
| if self.use_end_thought_token or self.use_start_thought_token:
|
| if not self.use_reparam_for_thought_embeddings:
|
| start_embedding = self.start_embedding[0].unsqueeze(0) * self.embedding_scale
|
| end_embedding = self.end_embedding[0].unsqueeze(0) * self.embedding_scale
|
| else:
|
| start_embedding = self.start_embedding * self.embedding_scale
|
| end_embedding = self.end_embedding * self.embedding_scale
|
| base_embeddings = self.model.embed_tokens.weight
|
| if self.train_only_thinking_embedding:
|
| base_embeddings = base_embeddings.detach()
|
|
|
| fwd_iters = 1 if self.original_mode else self.n_ahead + self.n_ahead_talk - 1
|
| for ahead_idx in range(fwd_iters):
|
| past_key_values_length = 0
|
| if past_key_values is not None:
|
| use_legacy_cache = not isinstance(past_key_values, Cache)
|
| if use_legacy_cache:
|
| past_key_values = DynamicCache.from_legacy_cache(past_key_values)
|
| past_key_values_length = past_key_values.get_usable_length(seq_len)
|
|
|
| if position_ids is None:
|
| device = input_ids.device if input_ids is not None else inputs_embeds.device
|
| position_ids = torch.arange(
|
| past_key_values_length, seq_len + past_key_values_length, dtype=torch.long, device=device
|
| )
|
| position_ids = position_ids.unsqueeze(0).view(-1, seq_len)
|
| else:
|
| position_ids = position_ids.view(-1, seq_len).long()
|
|
|
| if inputs_embeds is None:
|
| contains_start = self.use_start_thought_token and (input_ids == self.start_token_id).any()
|
| contains_end = self.use_end_thought_token and (input_ids == self.end_token_id).any()
|
| contains_thought = contains_start or contains_end
|
| if contains_thought:
|
| thought_id = self.start_token_id if contains_start else self.end_token_id
|
| cur_thought_embedding = start_embedding if contains_start else end_embedding
|
| if self.use_reparam_for_thought_embeddings:
|
| inputs_embeds = torch.randn(batch_size, seq_len, self.model.config.hidden_size, device=input_ids.device, dtype=cur_thought_embedding.dtype)
|
| inputs_embeds = inputs_embeds.detach() * torch.exp(cur_thought_embedding[1]) + cur_thought_embedding[0]
|
| if contains_start:
|
| sampled_start = inputs_embeds.clone().detach()
|
| if contains_end:
|
| sampled_end = inputs_embeds.clone().detach()
|
| else:
|
| inputs_embeds = cur_thought_embedding.unsqueeze(0).repeat(batch_size, seq_len, 1)
|
| else:
|
| with torch.set_grad_enabled(not self.train_only_thinking_embedding):
|
| inputs_embeds = self.model.embed_tokens(input_ids)
|
|
|
| if self.n_ahead != 1 or self.n_ahead_talk != 1 or self.comparison_mode:
|
| if attention_mask is None:
|
| base_attention_mask = torch.triu(torch.ones(seq_len, seq_len), diagonal=0).to(input_ids.device)
|
| base_attention_mask = base_attention_mask.view(1, 1, seq_len, seq_len)
|
| base_attention_mask = base_attention_mask.repeat(input_ids.shape[0], 1, 1, 1)
|
| attention_mask = base_attention_mask
|
| breakpoint()
|
| elif attention_mask.dim() == 2:
|
| if seq_len + past_key_values_length != attention_mask.shape[-1]:
|
| breakpoint()
|
| attention_mask = torch.cat(
|
| [torch.ones((attention_mask.shape[0], past_key_values_length), dtype=attention_mask.dtype, device=attention_mask.device), attention_mask],
|
| dim=-1
|
| )
|
|
|
| attention_mask = _prepare_4d_causal_attention_mask(
|
| attention_mask,
|
| (batch_size, seq_len),
|
| inputs_embeds,
|
| past_key_values_length,
|
| sliding_window=self.config.sliding_window,
|
| )
|
|
|
| outputs = self.model(
|
|
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
|
|
| prev_hidden_states = hidden_states
|
| hidden_states = outputs[0]
|
| prev_rm_logits = rm_logits
|
| prev_rm_tokens = cur_rm_tokens
|
|
|
| if ahead_idx == 0:
|
| hidden_states_lm = hidden_states
|
| logits = self.lm_head(hidden_states_lm)
|
| base_hidden_states = hidden_states.clone()
|
| initial_loss_logits = logits.clone()
|
| if self.optimize_lm_head_only_at_start or self.optimize_model_only_at_start:
|
| logits = logits.detach()
|
| base_hidden_states = base_hidden_states.detach()
|
| if self.optimize_model_only_at_start:
|
| hidden_states = hidden_states.detach()
|
| base_logits = logits.clone()
|
| else:
|
| talk_hidden_states = hidden_states
|
| if self.merged_lm_and_talk_heads:
|
| assert self.no_residual
|
| residual_logits = self.lm_head(hidden_states)
|
| talk_hidden_states = hidden_states
|
| else:
|
| if ahead_idx > self.n_ahead - 1:
|
| cur_base_hidden = torch.cat([
|
| base_hidden_states[..., ahead_idx - self.n_ahead + 1:, :],
|
| base_hidden_states[..., :ahead_idx - self.n_ahead + 1, :]
|
| ], dim=-2)
|
| else:
|
| cur_base_hidden = base_hidden_states
|
|
|
| if self.use_concat_talk_head:
|
|
|
| head_input_hidden_states = torch.cat([cur_base_hidden, talk_hidden_states], dim=-1)
|
| else:
|
| head_input_hidden_states = talk_hidden_states
|
|
|
| residual_logits = self.talk_head[0](head_input_hidden_states)
|
| if self.use_shallow_talk:
|
| residual_logits = apply_head(self.lm_head, residual_logits, detach=self.optimize_lm_head_only_at_start)
|
| residual_logits = residual_logits.to(logits.device)
|
| if self.use_weighted_talk_head:
|
|
|
| residual_logits = cur_base_hidden * (1 - residual_logits) + talk_hidden_states * residual_logits
|
| residual_logits = apply_head(self.lm_head, residual_logits, detach=self.optimize_lm_head_only_at_start)
|
|
|
| assert sum([self.cumulative_residual, self.clever_residual, self.skip_residual, self.no_residual]) == 1
|
| if self.clever_residual:
|
| if ahead_idx >= self.n_ahead - 1:
|
|
|
| cur_base_logits = torch.cat([
|
| base_logits[..., ahead_idx - self.n_ahead + 1:, :],
|
| base_logits[..., :ahead_idx - self.n_ahead + 1, :]
|
| ], dim=-2)
|
| if self.optimize_lm_head_only_at_start:
|
| cur_base_logits = cur_base_logits.detach()
|
| logits = cur_base_logits + residual_logits
|
| else:
|
| logits += residual_logits / self.n_ahead
|
| elif self.cumulative_residual:
|
| if self.residual_talk_head:
|
| if ahead_idx < self.n_ahead:
|
| logits += residual_logits
|
| else:
|
|
|
| cur_base_logits = torch.cat([
|
| base_logits[..., ahead_idx - self.n_ahead + 1:, :],
|
| base_logits[..., :ahead_idx - self.n_ahead + 1, :]
|
| ], dim=-2)
|
| if self.optimize_lm_head_only_at_start:
|
| cur_base_logits = cur_base_logits.detach()
|
| logits = cur_base_logits + residual_logits
|
| else:
|
| if ahead_idx < self.n_ahead:
|
| logits += residual_logits
|
| else:
|
| logits = residual_logits
|
| elif self.skip_residual:
|
| if ahead_idx >= self.n_ahead:
|
|
|
| cur_base_logits = torch.cat([
|
| base_logits[..., ahead_idx - self.n_ahead + 1:, :],
|
| base_logits[..., :ahead_idx - self.n_ahead + 1, :]
|
| ], dim=-2)
|
| if self.optimize_lm_head_only_at_start:
|
| cur_base_logits = cur_base_logits.detach()
|
| logits = cur_base_logits
|
| elif self.no_residual:
|
| logits = residual_logits
|
| else:
|
| logits = base_logits + residual_logits
|
|
|
| attempted = False
|
| talk_loss_list = []
|
| if self.original_mode or (self.n_ahead == 1) or (self.comparison_mode and ahead_idx == 0):
|
| loss = None
|
| attempted = True
|
|
|
| if labels is not None:
|
| for shift_amount in range(self.n_ahead_talk):
|
|
|
|
|
|
|
| if ahead_idx == 0 and self.optimize_lm_head_only_at_start:
|
| loss_logits = initial_loss_logits
|
| else:
|
| loss_logits = logits
|
| shift_logits = loss_logits[..., shift_amount:-1, :].contiguous()
|
| shift_labels = labels[..., 1 + shift_amount:].contiguous()
|
|
|
| loss_fct = CrossEntropyLoss(reduction="none")
|
| shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| shift_labels = shift_labels.view(-1).clone()
|
|
|
| shift_labels[shift_labels == self.tokenizer.pad_token_id] = -100
|
| shift_labels = shift_labels.to(shift_logits.device)
|
| loss = loss_fct(shift_logits, shift_labels)
|
| if not self.comparison_mode and not (self.optimize_lm_head_only_at_start and (self.n_ahead + self.n_ahead_talk > 2)) or self.original_mode:
|
| loss_list.append(loss)
|
| talk_loss_list.append(nonzero_mean(loss).detach())
|
|
|
| if not attempted or self.comparison_mode:
|
| rm_hidden_states = hidden_states
|
|
|
| rm_logits = apply_head(self.lm_head, rm_hidden_states, detach=self.optimize_lm_head_only_at_start)
|
|
|
|
|
| if self.tokenizer_has_start_thought_token:
|
| rm_logits[..., self.start_token_id] = -1e10
|
| if self.tokenizer_has_end_thought_token:
|
| rm_logits[..., self.end_token_id] = -1e10
|
| probabilities = rm_logits
|
| if probabilities_2d is not None:
|
| prev_probabilities_2d = probabilities_2d.clone()
|
| probabilities_2d = probabilities.view(-1, probabilities.size(-1))
|
|
|
| did_skip_sampling = skip_sampling
|
| skip_sampling = False
|
| if ahead_idx == 0 and self.use_start_thought_token:
|
| override_token = self.start_token_id
|
| elif self.use_thought_prefix and ahead_idx < self.tokenized_thought_prefix.shape[-1]:
|
| override_token = self.tokenized_thought_prefix[..., ahead_idx]
|
| elif ahead_idx == self.n_ahead - 2 and self.use_end_thought_token:
|
| override_token = self.end_token_id
|
| else:
|
| override_token = None
|
| if override_token is not None and self.n_ahead > 1:
|
|
|
| probabilities_2d = torch.zeros_like(probabilities_2d)
|
| probabilities_2d[:, override_token] = 1.0
|
| skip_sampling = True
|
| elif ahead_idx >= self.n_ahead - 1:
|
| if labels is not None:
|
| cur_talk_n = ahead_idx - (self.n_ahead - 1) + 1
|
|
|
| shift_labels = labels[..., cur_talk_n:].contiguous().to(probabilities_2d.device)
|
| padding = torch.full_like(
|
| labels[..., :cur_talk_n],
|
| self.tokenizer.pad_token_id,
|
| dtype=torch.long,
|
| device=shift_labels.device
|
| )
|
| new_rm_tokens = torch.cat(
|
| [shift_labels, padding],
|
| dim=-1
|
| )
|
|
|
| probabilities_2d = F.one_hot(new_rm_tokens, num_classes=self.vocab_size).reshape(-1, self.vocab_size).to(probabilities_2d.dtype)
|
| skip_sampling = True
|
| else:
|
| continue
|
| temperature = self.gumbel_temperature if self.training else 0.001
|
| prev_sample_probs = sample_probs
|
| sample_probs = probabilities_2d
|
| if ahead_idx < self.n_ahead - 1 and not skip_sampling:
|
| probabilities_2d = F.gumbel_softmax(sample_probs, tau=temperature, hard=True, dim=-1)
|
| if self.gumbel_detach:
|
| probabilities_2d = probabilities_2d.detach()
|
| sampled_token_history.append(probabilities_2d.argmax(dim=-1).detach().cpu())
|
|
|
| contains_start = self.use_start_thought_token and (probabilities_2d[..., self.start_token_id].sum() > 0)
|
| contains_end = self.use_end_thought_token and (probabilities_2d[..., self.end_token_id].sum() > 0)
|
| contains_thought = contains_start or contains_end
|
|
|
| if not contains_thought:
|
| with torch.set_grad_enabled(not self.train_only_thinking_embedding):
|
| inputs_embeds = probabilities_2d @ (self.model.embed_tokens.weight.to(probabilities.device).to(probabilities.dtype))
|
| else:
|
| thought_id = self.start_token_id if contains_start else self.end_token_id
|
| cur_thought_embedding = start_embedding if contains_start else end_embedding
|
| if self.use_reparam_for_thought_embeddings:
|
| inputs_embeds = torch.randn(batch_size, seq_len, self.model.config.hidden_size, device=input_ids.device, dtype=cur_thought_embedding.dtype)
|
| inputs_embeds = inputs_embeds * torch.exp(cur_thought_embedding[1]) + cur_thought_embedding[0]
|
| if contains_start:
|
| sampled_start = inputs_embeds.clone().detach()
|
| else:
|
| sampled_end = inputs_embeds.clone().detach()
|
| else:
|
| inputs_embeds = cur_thought_embedding.unsqueeze(0).repeat(batch_size, seq_len, 1)
|
| inputs_embeds = inputs_embeds.view(probabilities.size(0), probabilities.size(1), -1).to(self.model.embed_tokens.weight.dtype)
|
| inputs_embeds = inputs_embeds.view(probabilities.size(0), probabilities.size(1), -1).to(self.model.embed_tokens.weight.dtype)
|
|
|
| if len(attention_mask.shape) == 2:
|
| breakpoint()
|
| else:
|
| original_attention = attention_mask[..., :attention_mask.shape[-2]]
|
| if self.use_upper_triangular:
|
| new_attention = original_attention
|
| else:
|
| original_attention = original_attention == attention_mask.max()
|
|
|
| if not attention_mask.dtype == torch.bfloat16:
|
| new_attention = torch.eye(
|
| seq_len, dtype=attention_mask.dtype, device=attention_mask.device
|
| )
|
| else:
|
| new_attention = torch.eye(
|
| seq_len, dtype=torch.float32, device=attention_mask.device
|
| ).to(attention_mask.dtype)
|
|
|
| new_attention = new_attention.view(1, 1, seq_len, seq_len).repeat(input_ids.shape[0], 1, 1, 1)
|
| new_attention = new_attention * original_attention
|
| new_attention[new_attention == 0] = attention_mask.min()
|
| new_attention[new_attention == 1] = attention_mask.max()
|
| attention_mask = torch.cat([attention_mask, new_attention], dim=-1)
|
| past_key_values = outputs.past_key_values
|
| position_ids = position_ids + 1
|
|
|
| if labels is not None and (self.n_ahead > 1 or not self.base_original_mode):
|
|
|
|
|
|
|
| if ahead_idx == 0 and self.optimize_lm_head_only_at_start:
|
| loss_logits = initial_loss_logits
|
| else:
|
| loss_logits = logits
|
| shift_idx = 1 + max(0, ahead_idx - (self.n_ahead - 1))
|
| shift_logits = loss_logits[..., :-shift_idx, :].contiguous()
|
| shift_labels = labels[..., shift_idx:].contiguous()
|
|
|
| loss_fct = CrossEntropyLoss(reduction="none")
|
| shift_logits = shift_logits.view(-1, self.config.vocab_size)
|
| shift_labels = shift_labels.view(-1)
|
|
|
| shift_labels = shift_labels.to(shift_logits.device)
|
|
|
| shift_labels = torch.where(shift_labels == self.tokenizer.pad_token_id, -100, shift_labels)
|
| unreduced_loss = loss_fct(shift_logits, shift_labels)
|
| if torch.any(unreduced_loss != unreduced_loss):
|
| raise ValueError("NaN loss")
|
| unreduced_loss = unreduced_loss.reshape(logits.shape[0], -1)
|
| loss_list.append(unreduced_loss)
|
|
|
|
|
| if self.use_policy_loss and ahead_idx > 0 and (ahead_idx > 1 or not self.use_start_thought_token):
|
|
|
| previous_loss = loss_list[-2]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| if ahead_idx < self.n_ahead - 1:
|
| shift_amount = 0
|
| original_dqn_reward = (previous_loss - unreduced_loss).detach()
|
| if self.first_and_last_mode:
|
| original_dqn_reward = original_dqn_reward * 0.0
|
| else:
|
|
|
|
|
| shift_amount = max(0, ahead_idx - (self.n_ahead - 1))
|
|
|
|
|
|
|
|
|
| cur_policy_shift_logits = initial_loss_logits[..., shift_amount:-1, :].contiguous().detach()
|
| cur_policy_shift_labels = labels[..., 1 + shift_amount:].contiguous()
|
|
|
| cur_policy_loss_fct = CrossEntropyLoss(reduction="none")
|
| cur_policy_shift_logits = cur_policy_shift_logits.view(-1, self.config.vocab_size)
|
| cur_policy_shift_labels = cur_policy_shift_labels.view(-1).clone()
|
|
|
| cur_policy_shift_labels[cur_policy_shift_labels == self.tokenizer.pad_token_id] = -100
|
| cur_policy_shift_labels = cur_policy_shift_labels.to(cur_policy_shift_labels.device)
|
| cur_policy_reward_base_loss = loss_fct(
|
| cur_policy_shift_logits, cur_policy_shift_labels.to(cur_policy_shift_logits.device)
|
| ).reshape(logits.shape[0], -1)
|
| original_dqn_reward = cur_policy_reward_base_loss.detach() - unreduced_loss
|
|
|
| if not did_skip_sampling:
|
| nonzero_indices = prev_probabilities_2d.nonzero()
|
| action_loglikelihoods = F.log_softmax(prev_sample_probs / self.reinforce_temperature, dim=-1)[nonzero_indices[:, 0], nonzero_indices[:, 1]]
|
| action_loglikelihoods_2d = action_loglikelihoods.reshape(batch_size, -1)[:, :-1 - shift_amount]
|
| action_loglikelihoods_list.append(action_loglikelihoods_2d)
|
| if policy_reward is None:
|
| policy_reward = original_dqn_reward[:, :-(self.n_ahead_talk - shift_amount)]
|
| else:
|
| if self.n_ahead_talk > shift_amount:
|
| added_reward = original_dqn_reward[:, :-(self.n_ahead_talk - shift_amount)]
|
| else:
|
| added_reward = original_dqn_reward
|
| policy_reward += added_reward
|
|
|
| if self.use_policy_loss and ahead_idx == self.n_ahead + self.n_ahead_talk - 2:
|
|
|
| if self.use_reparam_for_thought_embeddings and (self.use_start_thought_token or self.use_end_thought_token):
|
|
|
|
|
|
|
| if self.use_start_thought_token:
|
| exp_start_std = torch.exp(start_embedding[1])
|
| start_loglikelihood = -0.5 * (sampled_start.detach() - start_embedding[0]) ** 2 / exp_start_std ** 2 - start_embedding[1] - 0.5 * math.log(2 * math.pi)
|
| start_loglikelihood = start_loglikelihood.mean(dim=-1)
|
| if self.use_end_thought_token:
|
| exp_end_std = torch.exp(end_embedding[1])
|
| end_loglikelihood = -0.5 * (sampled_end.detach() - end_embedding[0]) ** 2 / exp_end_std ** 2 - end_embedding[1] - 0.5 * math.log(2 * math.pi)
|
| end_loglikelihood = end_loglikelihood.mean(dim=-1)
|
|
|
| if self.use_end_thought_token and self.use_policy_loss_for_end_thought:
|
| action_loglikelihoods_list.append(end_loglikelihood)
|
| if self.use_start_thought_token:
|
| action_loglikelihoods_list.append(start_loglikelihood)
|
|
|
| if ahead_idx == self.n_ahead + self.n_ahead_talk - 2 and self.eval_mode:
|
| with torch.no_grad():
|
|
|
| filtered_tokens = input_ids[:, :policy_reward.shape[-1]].cpu().detach().numpy().flatten()
|
| filtered_tokens_mask = filtered_tokens != self.tokenizer.pad_token_id
|
| filtered_tokens = filtered_tokens[filtered_tokens_mask]
|
| filtered_rewards = policy_reward.float().cpu().detach().numpy()[:, :seq_len - self.n_ahead_talk].flatten()
|
| filtered_rewards = filtered_rewards[filtered_tokens_mask]
|
|
|
| abs_reward_list = np.abs(policy_reward.float().cpu().detach().numpy()[:, :seq_len - self.n_ahead_talk].flatten())
|
| abs_reward_list = abs_reward_list[filtered_tokens_mask]
|
| medium_quantile = np.quantile(abs_reward_list, 0.5)
|
| upper_quantile = np.quantile(abs_reward_list, 0.95)
|
|
|
| for action_loglikelihoods_2d in action_loglikelihoods_list:
|
| train_policy_reward = policy_reward
|
|
|
|
|
| if self.trice_mode and self.n_passes > 1:
|
| batched_policy_reward = train_policy_reward.reshape(-1, self.n_passes, train_policy_reward.shape[-1])
|
|
|
| train_policy_reward = batched_policy_reward - batched_policy_reward.mean(dim=1, keepdim=True)
|
| train_policy_reward = train_policy_reward.reshape(-1, train_policy_reward.shape[-1])
|
|
|
| if self.subtract_mean_reward:
|
| train_policy_reward = train_policy_reward - train_policy_reward.mean()
|
| if self.remove_negative_rewards:
|
| fixed_policy_reward = train_policy_reward.detach().clamp(min=0)
|
| else:
|
| fixed_policy_reward = train_policy_reward.detach()
|
| actor_loss = -fixed_policy_reward * action_loglikelihoods_2d[:, :policy_reward.shape[-1]].to(policy_reward.device)
|
| if action_loglikelihoods_2d.mean() < -1e4 and not self.use_policy_loss_just_for_thoughts:
|
|
|
| break
|
| dqn_loss_list.append(actor_loss.mean())
|
|
|
| if loss_list:
|
| if self.first_and_last_mode:
|
| loss = sum(
|
| self.loss_mean(loss_list[-(i + 1)]) for i in range(self.n_ahead_talk)
|
| ) * (1 - self.original_loss_weight) / self.n_ahead_talk
|
| loss = loss + self.loss_mean(loss_list[0]) * self.original_loss_weight
|
|
|
|
|
| for i in range(1, len(loss_list) - self.n_ahead_talk):
|
| loss_list[i] = loss_list[i] * math.nan
|
| elif self.first_only:
|
| loss = self.loss_mean(loss_list[0])
|
| elif self.final_only_mode:
|
| loss = sum(
|
| self.loss_mean(loss_list[-i]) for i in range(1, self.n_ahead_talk + 1)
|
| ) / self.n_ahead_talk
|
| else:
|
| loss = None
|
| for i in range(len(loss_list)):
|
| cur_loss = self.loss_mean(loss_list[i])
|
| if loss is not None:
|
| loss = loss + cur_loss.to(loss.device)
|
| else:
|
| loss = cur_loss
|
| loss = loss / len(loss_list)
|
|
|
| loss = loss * self.base_loss_beta
|
|
|
| if dqn_loss_list:
|
| dqn_loss = sum(dqn_loss_list) / len(dqn_loss_list)
|
| if self.include_policy_loss:
|
| if loss is not None:
|
| loss += dqn_loss * self.policy_loss_beta
|
| else:
|
| loss = dqn_loss * self.policy_loss_beta
|
|
|
| if not return_dict:
|
| output = (logits,) + outputs[1:]
|
| return (loss,) + output if loss is not None else output
|
|
|
| base_log_dict = {
|
| f"loss_{i}": nonzero_mean(loss_list[i]) for i in range(len(loss_list))
|
| }
|
|
|
| if loss is not None:
|
| base_log_dict["loss_train"] = loss.item()
|
|
|
| for loss_key, loss_val in base_log_dict.items():
|
| log_dict[loss_key] += loss_val / self.n_tokens_print
|
|
|
| if self.use_policy_loss and policy_reward is not None:
|
| log_dict["policy_loss"] += dqn_loss / self.n_tokens_print
|
| log_dict["policy_reward"] += policy_reward.mean() / self.n_tokens_print
|
|
|
| if not loss_list:
|
| if loss is not None:
|
| log_dict["loss_0"] += loss / self.n_tokens_print
|
| else:
|
| log_dict["loss_final"] += nonzero_mean(loss_list[-1]) / self.n_tokens_print
|
| log_dict["loss_talk"] += sum(nonzero_mean(cur_loss_item) for cur_loss_item in loss_list[-self.n_ahead_talk:]) / self.n_ahead_talk / self.n_tokens_print
|
|
|
|
|
| if loss_list:
|
| for i in range(len(loss_list)):
|
| talk_idx = min(max(i - (self.n_ahead - 1), 0), len(talk_loss_list) - 1)
|
| if not talk_loss_list:
|
| cur_talk_loss = nonzero_mean(loss_list[0])
|
| else:
|
| cur_talk_loss = talk_loss_list[talk_idx]
|
| log_dict[f"rel_loss_{i}"] += (nonzero_mean(loss_list[i]) - cur_talk_loss) / self.n_tokens_print
|
| if self.training:
|
| self.training_steps += 1
|
|
|
| if not self.training:
|
| self.n_ahead_talk = n_ahead_talk_to_restore
|
| self.n_passes = n_passes_to_restore
|
| return CausalLMOutputWithPast(
|
| loss=loss if loss is not None else None,
|
| logits=(rm_logits if self.n_ahead > 1 else logits) if not self.output_logits_at_the_end else logits,
|
| past_key_values=outputs.past_key_values,
|
| hidden_states=outputs.hidden_states,
|
| attentions=outputs.attentions,
|
| )
|
|
|
| def prepare_inputs_for_generation(
|
| self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
| ):
|
|
|
| if past_key_values is not None:
|
| if isinstance(past_key_values, Cache):
|
| cache_length = past_key_values.get_seq_length()
|
| past_length = past_key_values.seen_tokens
|
| max_cache_length = past_key_values.get_max_length()
|
| else:
|
| cache_length = past_length = past_key_values[0][0].shape[2]
|
| max_cache_length = None
|
|
|
|
|
|
|
|
|
|
|
| if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
| input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
|
|
|
|
| elif past_length < input_ids.shape[1]:
|
| input_ids = input_ids[:, past_length:]
|
|
|
|
|
|
|
| if (
|
| max_cache_length is not None
|
| and attention_mask is not None
|
| and cache_length + input_ids.shape[1] > max_cache_length
|
| ):
|
| attention_mask = attention_mask[:, -max_cache_length:]
|
|
|
| position_ids = kwargs.get("position_ids", None)
|
| if attention_mask is not None and position_ids is None:
|
|
|
| position_ids = attention_mask.long().cumsum(-1) - 1
|
| position_ids.masked_fill_(attention_mask == 0, 1)
|
| if past_key_values:
|
| position_ids = position_ids[:, -input_ids.shape[1] :]
|
|
|
|
|
| if inputs_embeds is not None and past_key_values is None:
|
| model_inputs = {"inputs_embeds": inputs_embeds}
|
| else:
|
| model_inputs = {"input_ids": input_ids}
|
|
|
| model_inputs.update(
|
| {
|
| "position_ids": position_ids,
|
| "past_key_values": past_key_values,
|
| "use_cache": kwargs.get("use_cache"),
|
| "attention_mask": attention_mask,
|
| }
|
| )
|
| return model_inputs
|
|
|
| @staticmethod
|
| def _reorder_cache(past_key_values, beam_idx):
|
| reordered_past = ()
|
| for layer_past in past_key_values:
|
| reordered_past += (
|
| tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
| )
|
| return reordered_past
|
|
|
| class MistralQuietForCausalLM(MistralPreTrainedModel):
|
| _tied_weights_keys = ["lm_head.weight"]
|
|
|
| def __init__(self, config):
|
| super().__init__(config)
|
| self.model = MistralModel(config)
|
| self.vocab_size = config.vocab_size
|
| self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
| self.max_thoughts = config.max_thoughts
|
| self.merged_lm_and_talk_heads = config.merged_lm_and_talk_heads
|
| self.use_concat_talk_head = config.use_concat_talk_head
|
| self.use_shallow_talk = config.use_shallow_talk
|
| self.use_complex_talk_head = config.use_complex_talk_head
|
| self.use_weighted_talk_head = config.use_weighted_talk_head
|
|
|
| assert not (self.use_weighted_talk_head and self.use_shallow_talk)
|
|
|
| self.n_ahead = 1
|
| self.n_ahead_talk = 1
|
| self.n_passes = 1
|
| self.n_tokens_print = 1
|
| self.gradient_accumulation_steps = 1
|
| self.training_steps = 0
|
| self.tokenizer = None
|
| self.start_token_id = None
|
| self.end_token_id = None
|
| self.rm_initialized = False
|
| self.residual_talk_head = True
|
| self.thought_init_std_scale = 1e-2
|
|
|
| self.final_only_mode = False
|
| self.first_and_last_mode = True
|
| self.first_only = False
|
| self.original_loss_weight = 0.5
|
|
|
| self.cumulative_residual = False
|
| self.clever_residual = False
|
| self.skip_residual = False
|
| self.no_residual = True
|
|
|
| self.optimize_lm_head_only_at_start = False
|
| self.optimize_model_only_at_start = False
|
|
|
| if self.optimize_model_only_at_start:
|
| raise NotImplementedError
|
| self.train_only_thinking_embedding = False
|
| self.weighted_embeddings = False
|
| self.use_start_thought_token = True
|
| self.use_end_thought_token = True
|
| self.initialize_thought_embedding_to_normal = False
|
| self.initial_start_token = "---"
|
| self.initial_end_token = "---"
|
| self.output_logits_at_the_end = True
|
|
|
| self.gumbel_temperature = 0.001
|
|
|
| self.use_policy_loss = True
|
| self.include_policy_loss = True
|
| self.trice_mode = True
|
| self.remove_negative_rewards = True
|
| self.use_policy_loss_for_end_thought = True
|
|
|
| self.base_original_mode = False
|
| self.original_mode = False
|
|
|
| self.thought_prefix = "(Let's think step by step"
|
| self.tokenized_thought_prefix = None
|
| self.log_dict = defaultdict(int)
|
| self.eval_log_dict = defaultdict(int)
|
| self.print_final_only = True
|
| self.loss_mean = loss_mean
|
| self.all_rewards = []
|
| self.all_unreduced_losses = []
|
| self.kill_after = 100
|
|
|
| self.start_embedding = nn.Parameter(torch.zeros(2, self.model.config.hidden_size))
|
| self.end_embedding = nn.Parameter(torch.zeros(2, self.model.config.hidden_size))
|
|
|
| self.policy_loss_beta = 1e6
|
| self.embedding_scale = 1e2
|
| self.reinforce_temperature = 3
|
| self.base_loss_beta = 1
|
|
|
|
|
| self.use_thought_prefix = False
|
| self.use_reparam_for_thought_embeddings = False
|
| self.use_upper_triangular = False
|
| self.subtract_mean_reward = False
|
| self.comparison_mode = False
|
| self.gumbel_detach = True
|
|
|
|
|
| self.eval_mode = False
|
|
|
| num_talk = 1
|
| talk_input_dim = config.hidden_size if not self.use_concat_talk_head else config.hidden_size * 2
|
| if self.use_weighted_talk_head:
|
| talk_output_dim = 1
|
| else:
|
| talk_output_dim = config.hidden_size if self.use_shallow_talk else config.vocab_size
|
|
|
| if not self.merged_lm_and_talk_heads:
|
| if self.use_complex_talk_head:
|
| self.talk_head = nn.ModuleList([nn.Sequential(
|
| nn.Linear(talk_input_dim, config.hidden_size),
|
| nn.ReLU(),
|
| nn.Linear(config.hidden_size, config.hidden_size),
|
| nn.ReLU(),
|
| nn.Linear(config.hidden_size, talk_output_dim, bias=False)
|
| )])
|
| else:
|
| self.talk_head = nn.ModuleList([nn.Sequential(
|
| nn.Linear(talk_input_dim, talk_output_dim, bias=False)
|
| )])
|
|
|
|
|
| self.post_init()
|
|
|
| def get_input_embeddings(self):
|
| return self.model.embed_tokens
|
|
|
| def set_input_embeddings(self, value):
|
| self.model.embed_tokens = value
|
|
|
| def get_output_embeddings(self):
|
| return self.lm_head
|
|
|
| def set_output_embeddings(self, new_embeddings):
|
| self.lm_head = new_embeddings
|
|
|
| def set_decoder(self, decoder):
|
| self.model = decoder
|
|
|
| def get_decoder(self):
|
| return self.model
|
| def calculate_policy_loss(self, thoughts, rewards):
|
| thought_log_probs = []
|
| for thought in thoughts:
|
| thought_log_prob = self.lm_head(thought).log_softmax(dim=-1)
|
| thought_log_probs.append(thought_log_prob)
|
|
|
| thought_log_probs = torch.stack(thought_log_probs, dim=1)
|
| thought_probs = torch.exp(thought_log_probs)
|
|
|
| policy_loss = -torch.mean(thought_log_probs * rewards.unsqueeze(-1).unsqueeze(-1))
|
|
|
| return policy_loss
|
|
|
| def _generate_thoughts(self, hidden_states, max_length):
|
| batch_size = hidden_states.size(0)
|
| thought_ids = torch.zeros((batch_size, self.config.max_thoughts, max_length), dtype=torch.long, device=hidden_states.device)
|
| thought_embeddings = []
|
|
|
| for i in range(self.config.max_thoughts):
|
| thought_input_ids = torch.zeros((batch_size, 1), dtype=torch.long, device=hidden_states.device)
|
| thought_outputs = self.generate(
|
| input_ids=thought_input_ids,
|
| max_length=max_length,
|
| do_sample=True,
|
| top_k=50,
|
| top_p=0.95,
|
| pad_token_id=self.config.pad_token_id,
|
| eos_token_id=self.config.eos_token_id,
|
| )
|
| thought_ids[:, i, :] = thought_outputs
|
| thought_embeddings.append(self.get_input_embeddings()(thought_outputs))
|
|
|
| thought_embeddings = torch.stack(thought_embeddings, dim=1)
|
| return thought_ids, thought_embeddings
|
|
|
| @torch.no_grad()
|
| def infer(
|
| self,
|
| input_ids: torch.LongTensor,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| use_cache: Optional[bool] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ):
|
| batch_size, seq_len = input_ids.shape
|
|
|
|
|
| original_input_ids = input_ids.clone()
|
| original_attention_mask = attention_mask.clone() if attention_mask is not None else None
|
|
|
|
|
| start_thought_token_id = self.tokenizer.convert_tokens_to_ids("<|startthought|>")
|
| input_ids = torch.cat([input_ids, torch.tensor([[start_thought_token_id]] * batch_size).to(input_ids.device)], dim=-1)
|
| seq_len += 1
|
|
|
|
|
| if attention_mask is not None:
|
| attention_mask = torch.cat([attention_mask, torch.ones((batch_size, 1)).to(attention_mask.device)], dim=-1)
|
|
|
|
|
| continuation_length = self.n_ahead - 2
|
| new_key_values = past_key_values
|
|
|
| start_time = time.time()
|
| for continuation_idx in range(continuation_length):
|
| outputs = self.model(
|
| input_ids=input_ids if continuation_idx == 0 else next_token_id.unsqueeze(-1).to(input_ids.device),
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=new_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=True,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
| new_key_values = outputs.past_key_values
|
|
|
| hidden_states = outputs[0]
|
|
|
| logits = self.lm_head(hidden_states)
|
| logits = logits[:, -1, :]
|
|
|
|
|
| next_token_logits = F.gumbel_softmax(logits, tau=self.gumbel_temperature, hard=True, dim=-1)
|
| next_token_id = torch.argmax(next_token_logits, dim=-1)
|
|
|
|
|
| input_ids = torch.cat([input_ids, next_token_id.unsqueeze(-1).to(input_ids.device)], dim=-1)
|
| seq_len += 1
|
|
|
|
|
| if attention_mask is not None:
|
| attention_mask = torch.cat([attention_mask, torch.ones((batch_size, 1)).to(attention_mask.device)], dim=-1)
|
|
|
|
|
| end_thought_token_id = self.tokenizer.convert_tokens_to_ids("<|endthought|>")
|
| input_ids = torch.cat([input_ids, torch.tensor([[end_thought_token_id]] * batch_size).to(input_ids.device)], dim=-1)
|
| seq_len += 1
|
|
|
|
|
| if attention_mask is not None:
|
| attention_mask = torch.cat([attention_mask, torch.ones((batch_size, 1)).to(attention_mask.device)], dim=-1)
|
|
|
|
|
| outputs_before = self.model(
|
| input_ids=original_input_ids,
|
| attention_mask=original_attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
| hidden_states_before = outputs_before[0][:, -1:, :]
|
|
|
|
|
| outputs_after = self.model(
|
| input_ids=torch.cat([next_token_id.unsqueeze(-1).to(input_ids.device), torch.tensor(end_thought_token_id).unsqueeze(-1).unsqueeze(-1).to(input_ids.device)], dim=-1),
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=new_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
| hidden_states_after = outputs_after[0][:, -1:, :]
|
|
|
|
|
| mixing_weight = self.talk_head[0](torch.cat([hidden_states_before, hidden_states_after], dim=-1))
|
|
|
|
|
| mixed_hidden_states = (1 - mixing_weight) * hidden_states_before + mixing_weight * hidden_states_after
|
|
|
|
|
| logits = self.lm_head(mixed_hidden_states)
|
| return logits
|
|
|
| @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
| def forward(
|
| self,
|
| input_ids: torch.LongTensor = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| labels: Optional[torch.LongTensor] = None,
|
| use_cache: Optional[bool] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, CausalLMOutputWithPast]:
|
| r"""
|
| Args:
|
| labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
| Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
| config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
| (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
|
|
| Returns:
|
|
|
| Example:
|
|
|
| ```python
|
| >>> from transformers import AutoTokenizer, QuietForCausalLM
|
|
|
| >>> model = QuietForCausalLM.from_pretrained("quietai/Quiet-7B-v0.1")
|
| >>> tokenizer = AutoTokenizer.from_pretrained("quietai/Quiet-7B-v0.1")
|
|
|
| >>> prompt = "Hey, are you conscious? Can you talk to me?"
|
| >>> inputs = tokenizer(prompt, return_tensors="pt")
|
|
|
| >>> # Generate
|
| >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
| >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
| "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
| ```"""
|
|
|
| output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
| output_hidden_states = (
|
| output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
| )
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
|
| outputs = self.model(
|
| input_ids,
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=True,
|
| )
|
| hidden_states = outputs.last_hidden_state
|
| logits = self.lm_head(hidden_states)
|
|
|
| thought_ids, thought_embeddings = self._generate_thoughts(hidden_states, max_length=self.config.thought_length)
|
| thought_hidden_states = self.model(inputs_embeds=thought_embeddings).last_hidden_state
|
|
|
|
|
| thought_logits = self.lm_head(thought_hidden_states)
|
|
|
|
|
| mixed_logits = logits.unsqueeze(1) + self.mixing_head(thought_logits)
|
| mixed_logits = mixed_logits.view(-1, mixed_logits.size(-1))
|
|
|
| loss = None
|
| if labels is not None:
|
|
|
| shift_logits = mixed_logits[..., :-1, :].contiguous()
|
| shift_labels = labels[..., 1:].contiguous()
|
|
|
| loss_fct = CrossEntropyLoss()
|
| loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
|
|
| if self.use_policy_loss:
|
| rewards = loss.detach().unsqueeze(1).repeat(1, self.max_thoughts)
|
| if self.remove_negative_rewards:
|
| rewards = torch.clamp(rewards, min=0)
|
| policy_loss = self.calculate_policy_loss(thought_ids, rewards)
|
| loss = loss + policy_loss
|
| else:
|
| loss = None
|
|
|
| if not return_dict:
|
| output = (mixed_logits,) + outputs[1:]
|
| return ((loss,) + output) if loss is not None else output
|
|
|
| return CausalLMOutputWithPast(
|
| loss=loss if loss is not None else None,
|
| logits=logits,
|
| past_key_values=outputs.past_key_values,
|
| hidden_states=outputs.hidden_states,
|
| attentions=outputs.attentions,
|
| )
|
|
|
| def prepare_inputs_for_generation(
|
| self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
|
| ):
|
|
|
| if past_key_values is not None:
|
| if isinstance(past_key_values, Cache):
|
| cache_length = past_key_values.get_seq_length()
|
| past_length = past_key_values.seen_tokens
|
| max_cache_length = past_key_values.get_max_length()
|
| else:
|
| cache_length = past_length = past_key_values[0][0].shape[2]
|
| max_cache_length = None
|
|
|
|
|
|
|
|
|
|
|
| if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
|
| input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
|
|
|
|
|
| elif past_length < input_ids.shape[1]:
|
| input_ids = input_ids[:, past_length:]
|
|
|
|
|
|
|
| if (
|
| max_cache_length is not None
|
| and attention_mask is not None
|
| and cache_length + input_ids.shape[1] > max_cache_length
|
| ):
|
| attention_mask = attention_mask[:, -max_cache_length:]
|
|
|
| position_ids = kwargs.get("position_ids", None)
|
| if attention_mask is not None and position_ids is None:
|
|
|
| position_ids = attention_mask.long().cumsum(-1) - 1
|
| position_ids.masked_fill_(attention_mask == 0, 1)
|
| if past_key_values:
|
| position_ids = position_ids[:, -input_ids.shape[1] :]
|
|
|
|
|
| if inputs_embeds is not None and past_key_values is None:
|
| model_inputs = {"inputs_embeds": inputs_embeds}
|
| else:
|
| model_inputs = {"input_ids": input_ids}
|
|
|
| model_inputs.update(
|
| {
|
| "position_ids": position_ids,
|
| "past_key_values": past_key_values,
|
| "use_cache": kwargs.get("use_cache"),
|
| "attention_mask": attention_mask,
|
| }
|
| )
|
| return model_inputs
|
|
|
| @staticmethod
|
| def _reorder_cache(past_key_values, beam_idx):
|
| reordered_past = ()
|
| for layer_past in past_key_values:
|
| reordered_past += (
|
| tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
|
| )
|
| return reordered_past
|
|
|
|
|
|
|
|
|
|
|
|
|
| @add_start_docstrings(
|
| """
|
| The Mistral Model transformer with a sequence classification head on top (linear layer).
|
|
|
| [`MistralForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
| (e.g. GPT-2) do.
|
|
|
| Since it does classification on the last token, it requires to know the position of the last token. If a
|
| `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
| no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
| padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
| each row of the batch).
|
| """,
|
| MISTRAL_START_DOCSTRING,
|
| )
|
|
|
| class MistralForSequenceClassification(MistralPreTrainedModel):
|
| def __init__(self, config):
|
| super().__init__(config)
|
| self.num_labels = config.num_labels
|
| self.model = MistralModel(config)
|
| self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
|
|
|
|
| self.post_init()
|
|
|
| def get_input_embeddings(self):
|
| return self.model.embed_tokens
|
|
|
| def set_input_embeddings(self, value):
|
| self.model.embed_tokens = value
|
|
|
| @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| def forward(
|
| self,
|
| input_ids: torch.LongTensor = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| labels: Optional[torch.LongTensor] = None,
|
| use_cache: Optional[bool] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
| r"""
|
| labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| """
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| transformer_outputs = self.model(
|
| input_ids,
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
| hidden_states = transformer_outputs[0]
|
| logits = self.score(hidden_states)
|
|
|
| if input_ids is not None:
|
| batch_size = input_ids.shape[0]
|
| else:
|
| batch_size = inputs_embeds.shape[0]
|
|
|
| if self.config.pad_token_id is None and batch_size != 1:
|
| raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
| if self.config.pad_token_id is None:
|
| sequence_lengths = -1
|
| else:
|
| if input_ids is not None:
|
|
|
| sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
|
| sequence_lengths = sequence_lengths % input_ids.shape[-1]
|
| sequence_lengths = sequence_lengths.to(logits.device)
|
| else:
|
| sequence_lengths = -1
|
|
|
| pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
|
|
|
| loss = None
|
| if labels is not None:
|
| labels = labels.to(logits.device)
|
| if self.config.problem_type is None:
|
| if self.num_labels == 1:
|
| self.config.problem_type = "regression"
|
| elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
| self.config.problem_type = "single_label_classification"
|
| else:
|
| self.config.problem_type = "multi_label_classification"
|
|
|
| if self.config.problem_type == "regression":
|
| loss_fct = MSELoss()
|
| if self.num_labels == 1:
|
| loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
| else:
|
| loss = loss_fct(pooled_logits, labels)
|
| elif self.config.problem_type == "single_label_classification":
|
| loss_fct = CrossEntropyLoss()
|
| loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
| elif self.config.problem_type == "multi_label_classification":
|
| loss_fct = BCEWithLogitsLoss()
|
| loss = loss_fct(pooled_logits, labels)
|
| if not return_dict:
|
| output = (pooled_logits,) + transformer_outputs[1:]
|
| return ((loss,) + output) if loss is not None else output
|
|
|
| return SequenceClassifierOutputWithPast(
|
| loss=loss,
|
| logits=pooled_logits,
|
| past_key_values=transformer_outputs.past_key_values,
|
| hidden_states=transformer_outputs.hidden_states,
|
| attentions=transformer_outputs.attentions,
|
| )
|
|
|
|
|
| @add_start_docstrings(
|
| """
|
| The Mistral Model transformer with a token classification head on top (a linear layer on top of the hidden-states
|
| output) e.g. for Named-Entity-Recognition (NER) tasks.
|
| """,
|
| MISTRAL_START_DOCSTRING,
|
| )
|
|
|
| class MistralForTokenClassification(MistralPreTrainedModel):
|
| def __init__(self, config):
|
| super().__init__(config)
|
| self.num_labels = config.num_labels
|
| self.model = MistralModel(config)
|
| if getattr(config, "classifier_dropout", None) is not None:
|
| classifier_dropout = config.classifier_dropout
|
| elif getattr(config, "hidden_dropout", None) is not None:
|
| classifier_dropout = config.hidden_dropout
|
| else:
|
| classifier_dropout = 0.1
|
| self.dropout = nn.Dropout(classifier_dropout)
|
| self.score = nn.Linear(config.hidden_size, config.num_labels)
|
|
|
|
|
| self.post_init()
|
|
|
| def get_input_embeddings(self):
|
| return self.model.embed_tokens
|
|
|
| def set_input_embeddings(self, value):
|
| self.model.embed_tokens = value
|
|
|
| @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| def forward(
|
| self,
|
| input_ids: Optional[torch.LongTensor] = None,
|
| attention_mask: Optional[torch.Tensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[List[torch.FloatTensor]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| labels: Optional[torch.LongTensor] = None,
|
| use_cache: Optional[bool] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, TokenClassifierOutput]:
|
| r"""
|
| labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
| config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
| `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
| """
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| outputs = self.model(
|
| input_ids,
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| use_cache=use_cache,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
| sequence_output = outputs[0]
|
| sequence_output = self.dropout(sequence_output)
|
| logits = self.score(sequence_output)
|
|
|
| loss = None
|
| if labels is not None:
|
| loss_fct = CrossEntropyLoss()
|
| loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
|
|
| if not return_dict:
|
| output = (logits,) + outputs[2:]
|
| return ((loss,) + output) if loss is not None else output
|
|
|
| return TokenClassifierOutput(
|
| loss=loss,
|
| logits=logits,
|
| hidden_states=outputs.hidden_states,
|
| attentions=outputs.attentions,
|
| )
|
|
|
|
|
| @add_start_docstrings(
|
| """
|
| The Mistral Model transformer with a span classification head on top for extractive question-answering tasks like
|
| SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
|
| """,
|
| MISTRAL_START_DOCSTRING,
|
| )
|
| class MistralForQuestionAnswering(MistralPreTrainedModel):
|
| base_model_prefix = "transformer"
|
|
|
|
|
| def __init__(self, config):
|
| super().__init__(config)
|
| self.transformer = MistralModel(config)
|
| self.qa_outputs = nn.Linear(config.hidden_size, 2)
|
|
|
|
|
| self.post_init()
|
|
|
| def get_input_embeddings(self):
|
| return self.transformer.embed_tokens
|
|
|
| def set_input_embeddings(self, value):
|
| self.transformer.embed_tokens = value
|
|
|
| @add_start_docstrings_to_model_forward(MISTRAL_INPUTS_DOCSTRING)
|
| def forward(
|
| self,
|
| input_ids: Optional[torch.LongTensor] = None,
|
| attention_mask: Optional[torch.FloatTensor] = None,
|
| position_ids: Optional[torch.LongTensor] = None,
|
| past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
| start_positions: Optional[torch.LongTensor] = None,
|
| end_positions: Optional[torch.LongTensor] = None,
|
| output_attentions: Optional[bool] = None,
|
| output_hidden_states: Optional[bool] = None,
|
| return_dict: Optional[bool] = None,
|
| ) -> Union[Tuple, QuestionAnsweringModelOutput]:
|
| r"""
|
| start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| Labels for position (index) of the start of the labelled span for computing the token classification loss.
|
| Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| are not taken into account for computing the loss.
|
| end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
| Labels for position (index) of the end of the labelled span for computing the token classification loss.
|
| Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
|
| are not taken into account for computing the loss.
|
| """
|
| return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
| outputs = self.transformer(
|
| input_ids,
|
| attention_mask=attention_mask,
|
| position_ids=position_ids,
|
| past_key_values=past_key_values,
|
| inputs_embeds=inputs_embeds,
|
| output_attentions=output_attentions,
|
| output_hidden_states=output_hidden_states,
|
| return_dict=return_dict,
|
| )
|
|
|
| sequence_output = outputs[0]
|
|
|
| logits = self.qa_outputs(sequence_output)
|
| start_logits, end_logits = logits.split(1, dim=-1)
|
| start_logits = start_logits.squeeze(-1).contiguous()
|
| end_logits = end_logits.squeeze(-1).contiguous()
|
|
|
| total_loss = None
|
| if start_positions is not None and end_positions is not None:
|
|
|
| if len(start_positions.size()) > 1:
|
| start_positions = start_positions.squeeze(-1).to(start_logits.device)
|
| if len(end_positions.size()) > 1:
|
| end_positions = end_positions.squeeze(-1).to(end_logits.device)
|
|
|
| ignored_index = start_logits.size(1)
|
| start_positions = start_positions.clamp(0, ignored_index)
|
| end_positions = end_positions.clamp(0, ignored_index)
|
|
|
| loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
|
| start_loss = loss_fct(start_logits, start_positions)
|
| end_loss = loss_fct(end_logits, end_positions)
|
| total_loss = (start_loss + end_loss) / 2
|
|
|
| if not return_dict:
|
| output = (start_logits, end_logits) + outputs[2:]
|
| return ((total_loss,) + output) if total_loss is not None else output
|
|
|
| return QuestionAnsweringModelOutput(
|
| loss=total_loss,
|
| start_logits=start_logits,
|
| end_logits=end_logits,
|
| hidden_states=outputs.hidden_states,
|
| attentions=outputs.attentions,
|
| )
|
| |