| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| import inspect |
| import random |
| import warnings |
| from collections import defaultdict |
| from copy import deepcopy |
| from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from accelerate.utils import is_deepspeed_available |
| from datasets import Dataset |
| from torch.utils.data import DataLoader |
| from transformers import ( |
| AutoModelForCausalLM, |
| DataCollator, |
| PreTrainedModel, |
| PreTrainedTokenizerBase, |
| Trainer, |
| TrainingArguments, |
| ) |
| from transformers.trainer_callback import TrainerCallback |
| from transformers.trainer_utils import EvalLoopOutput |
|
|
| from ..import_utils import is_peft_available, is_wandb_available |
| from ..models import PreTrainedModelWrapper, create_reference_model |
| from .utils import DPODataCollatorWithPadding, disable_dropout_in_model, pad_to_length |
|
|
|
|
| if is_peft_available(): |
| from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training |
|
|
|
|
| if is_wandb_available(): |
| import wandb |
|
|
| if is_deepspeed_available(): |
| import deepspeed |
|
|
|
|
| class DPOTrainer(Trainer): |
| r""" |
| Initialize DPOTrainer. |
| |
| Args: |
| model (`transformers.PreTrainedModel`): |
| The model to train, preferably an `AutoModelForSequenceClassification`. |
| ref_model (`PreTrainedModelWrapper`): |
| Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation and loss. If no |
| reference model is provided, the trainer will create a reference model with the same architecture as the model to be optimized. |
| beta (`float`, defaults to 0.1): |
| The beta factor in DPO loss. Higher beta means less divergence from the initial policy. For the IPO loss, beta is the regularization parameter denoted by tau in the paper. |
| label_smoothing (`float`, defaults to 0): |
| The robust DPO label smoothing parameter from the [cDPO](https://ericmitchell.ai/cdpo.pdf) report that should be between 0 and 0.5. |
| loss_type (`str`, defaults to `"sigmoid"`): |
| The type of DPO loss to use. Either `"sigmoid"` the default DPO loss,`"hinge"` loss from [SLiC](https://arxiv.org/abs/2305.10425) paper, `"ipo"` from [IPO](https://arxiv.org/abs/2310.12036) paper, or `"kto"` from the HALOs [report](https://github.com/ContextualAI/HALOs/blob/main/assets/report.pdf). |
| args (`transformers.TrainingArguments`): |
| The arguments to use for training. |
| data_collator (`transformers.DataCollator`): |
| The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used |
| which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences. |
| label_pad_token_id (`int`, defaults to `-100`): |
| The label pad token id. This argument is required if you want to use the default data collator. |
| padding_value (`int`, defaults to `0`): |
| The padding value. This argument is required if you want to use the default data collator. |
| truncation_mode (`str`, defaults to `keep_end`): |
| The truncation mode to use, either `keep_end` or `keep_start`. This argument is required if you want to use the default data collator. |
| train_dataset (`datasets.Dataset`): |
| The dataset to use for training. |
| eval_dataset (`datasets.Dataset`): |
| The dataset to use for evaluation. |
| tokenizer (`transformers.PreTrainedTokenizerBase`): |
| The tokenizer to use for training. This argument is required if you want to use the default data collator. |
| model_init (`Callable[[], transformers.PreTrainedModel]`): |
| The model initializer to use for training. If None is specified, the default model initializer will be used. |
| callbacks (`List[transformers.TrainerCallback]`): |
| The callbacks to use for training. |
| optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): |
| The optimizer and scheduler to use for training. |
| preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): |
| The function to use to preprocess the logits before computing the metrics. |
| max_length (`int`, defaults to `None`): |
| The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator. |
| max_prompt_length (`int`, defaults to `None`): |
| The maximum length of the prompt. This argument is required if you want to use the default data collator. |
| max_target_length (`int`, defaults to `None`): |
| The maximum length of the target. This argument is required if you want to use the default data collator and your model is an encoder-decoder. |
| peft_config (`Dict`, defaults to `None`): |
| The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model. |
| is_encoder_decoder (`Optional[bool]`, `optional`, defaults to `None`): |
| If no model is provided, we need to know if the model_init returns an encoder-decoder. |
| disable_dropout (`bool`, defaults to `True`): |
| Whether or not to disable dropouts in `model` and `ref_model`. |
| generate_during_eval (`bool`, defaults to `False`): |
| Whether to sample and log generations during evaluation step. |
| compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*): |
| The function to use to compute the metrics. Must take a `EvalPrediction` and return |
| a dictionary string to metric values. |
| model_init_kwargs: (`Optional[Dict]`, *optional*): |
| Dict of Optional kwargs to pass when instantiating the model from a string |
| ref_model_init_kwargs: (`Optional[Dict]`, *optional*): |
| Dict of Optional kwargs to pass when instantiating the ref model from a string |
| |
| """ |
|
|
| def __init__( |
| self, |
| model: Union[PreTrainedModel, nn.Module, str] = None, |
| ref_model: Optional[Union[PreTrainedModel, nn.Module, str]] = None, |
| beta: float = 0.1, |
| label_smoothing: float = 0, |
| loss_type: Literal["sigmoid", "hinge", "ipo", "kto"] = "sigmoid", |
| args: TrainingArguments = None, |
| data_collator: Optional[DataCollator] = None, |
| label_pad_token_id: int = -100, |
| padding_value: int = 0, |
| truncation_mode: str = "keep_end", |
| train_dataset: Optional[Dataset] = None, |
| eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, |
| tokenizer: Optional[PreTrainedTokenizerBase] = None, |
| model_init: Optional[Callable[[], PreTrainedModel]] = None, |
| callbacks: Optional[List[TrainerCallback]] = None, |
| optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = ( |
| None, |
| None, |
| ), |
| preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, |
| max_length: Optional[int] = None, |
| max_prompt_length: Optional[int] = None, |
| max_target_length: Optional[int] = None, |
| peft_config: Optional[Dict] = None, |
| is_encoder_decoder: Optional[bool] = None, |
| disable_dropout: bool = True, |
| generate_during_eval: bool = False, |
| compute_metrics: Optional[Callable[[EvalLoopOutput], Dict]] = None, |
| model_init_kwargs: Optional[Dict] = None, |
| ref_model_init_kwargs: Optional[Dict] = None, |
| ): |
| if model_init_kwargs is None: |
| model_init_kwargs = {} |
| elif not isinstance(model, str): |
| raise ValueError("You passed model_kwargs to the DPOTrainer. But your model is already instantiated.") |
|
|
| if ref_model_init_kwargs is None: |
| ref_model_init_kwargs = {} |
| elif not isinstance(ref_model, str): |
| raise ValueError( |
| "You passed ref_model_kwargs to the DPOTrainer. But your ref_model is already instantiated." |
| ) |
|
|
| if isinstance(model, str): |
| warnings.warn( |
| "You passed a model_id to the DPOTrainer. This will automatically create an " |
| "`AutoModelForCausalLM` or a `PeftModel` (if you passed a `peft_config`) for you." |
| ) |
| model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs) |
|
|
| if isinstance(ref_model, str): |
| warnings.warn( |
| "You passed a ref model_id to the DPOTrainer. This will automatically create an " |
| "`AutoModelForCausalLM`" |
| ) |
| ref_model = AutoModelForCausalLM.from_pretrained(ref_model, **ref_model_init_kwargs) |
|
|
| if not is_peft_available() and peft_config is not None: |
| raise ValueError( |
| "PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models" |
| ) |
| elif is_peft_available() and peft_config is not None: |
| |
| if isinstance(model, PeftModel): |
| model = model.merge_and_unload() |
|
|
| if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False): |
| _support_gc_kwargs = hasattr( |
| args, "gradient_checkpointing_kwargs" |
| ) and "gradient_checkpointing_kwargs" in list( |
| inspect.signature(prepare_model_for_kbit_training).parameters |
| ) |
|
|
| preprare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing} |
|
|
| if _support_gc_kwargs: |
| preprare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs |
|
|
| model = prepare_model_for_kbit_training(model, **preprare_model_kwargs) |
| elif getattr(args, "gradient_checkpointing", False): |
| |
| if hasattr(model, "enable_input_require_grads"): |
| model.enable_input_require_grads() |
| else: |
|
|
| def make_inputs_require_grad(module, input, output): |
| output.requires_grad_(True) |
|
|
| model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) |
|
|
| |
| model = get_peft_model(model, peft_config) |
|
|
| |
| |
| |
| elif getattr(args, "gradient_checkpointing", False): |
| |
| if hasattr(model, "enable_input_require_grads"): |
| model.enable_input_require_grads() |
| else: |
|
|
| def make_inputs_require_grad(module, input, output): |
| output.requires_grad_(True) |
|
|
| model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) |
|
|
| if generate_during_eval and not is_wandb_available(): |
| raise ValueError( |
| "`generate_during_eval=True` requires Weights and Biases to be installed." |
| " Please install `wandb` to resolve." |
| ) |
|
|
| if model is not None: |
| self.is_encoder_decoder = model.config.is_encoder_decoder |
| elif is_encoder_decoder is None: |
| raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.") |
| else: |
| self.is_encoder_decoder = is_encoder_decoder |
|
|
| self.is_peft_model = is_peft_available() and isinstance(model, PeftModel) |
|
|
| if ref_model: |
| self.ref_model = ref_model |
| elif self.is_peft_model: |
| |
| self.ref_model = None |
| else: |
| self.ref_model = create_reference_model(model) |
|
|
| if data_collator is None: |
| if tokenizer is None: |
| raise ValueError( |
| "max_length or a tokenizer must be specified when using the default DPODataCollatorWithPadding" |
| ) |
| if max_length is None: |
| warnings.warn( |
| "When using DPODataCollatorWithPadding, you should set `max_length` in the DPOTrainer's init" |
| " it will be set to `512` by default, but you should do it yourself in the future.", |
| UserWarning, |
| ) |
| max_length = 512 |
| if max_prompt_length is None: |
| warnings.warn( |
| "When using DPODataCollatorWithPadding, you should set `max_prompt_length` in the DPOTrainer's init" |
| " it will be set to `128` by default, but you should do it yourself in the future.", |
| UserWarning, |
| ) |
| max_prompt_length = 128 |
|
|
| if max_target_length is None and self.is_encoder_decoder: |
| warnings.warn( |
| "When using DPODataCollatorWithPadding with an encoder decoder architecture, you should set `max_target_length` in the DPOTrainer's init" |
| " it will be set to `128` by default, but you should do it yourself in the future.", |
| UserWarning, |
| ) |
| max_target_length = 128 |
|
|
| data_collator = DPODataCollatorWithPadding( |
| tokenizer, |
| max_length=max_length, |
| max_prompt_length=max_prompt_length, |
| label_pad_token_id=label_pad_token_id, |
| padding_value=padding_value, |
| truncation_mode=truncation_mode, |
| is_encoder_decoder=self.is_encoder_decoder, |
| max_target_length=max_target_length, |
| ) |
|
|
| if args.remove_unused_columns: |
| args.remove_unused_columns = False |
| |
| warnings.warn( |
| "When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your TrainingArguments" |
| " we have set it for you, but you should do it yourself in the future.", |
| UserWarning, |
| ) |
|
|
| self.use_dpo_data_collator = True |
| else: |
| self.use_dpo_data_collator = False |
|
|
| if disable_dropout: |
| disable_dropout_in_model(model) |
| if self.ref_model is not None: |
| disable_dropout_in_model(self.ref_model) |
|
|
| self.max_length = max_length |
| self.generate_during_eval = generate_during_eval |
| self.label_pad_token_id = label_pad_token_id |
| self.padding_value = padding_value |
|
|
| if loss_type in ["hinge", "ipo", "kto"] and label_smoothing > 0: |
| warnings.warn( |
| "You are using a loss type that does not support label smoothing. Ignoring label_smoothing parameter." |
| ) |
|
|
| self.beta = beta |
| self.label_smoothing = label_smoothing |
| self.loss_type = loss_type |
|
|
| self._stored_metrics = defaultdict(lambda: defaultdict(list)) |
|
|
| super().__init__( |
| model=model, |
| args=args, |
| data_collator=data_collator, |
| train_dataset=train_dataset, |
| eval_dataset=eval_dataset, |
| tokenizer=tokenizer, |
| model_init=model_init, |
| compute_metrics=compute_metrics, |
| callbacks=callbacks, |
| optimizers=optimizers, |
| preprocess_logits_for_metrics=preprocess_logits_for_metrics, |
| ) |
|
|
| if not hasattr(self, "accelerator"): |
| raise AttributeError( |
| "Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`." |
| ) |
|
|
| if self.ref_model is None: |
| if not hasattr(self.accelerator.unwrap_model(self.model), "disable_adapter"): |
| raise ValueError( |
| "You are using a `peft` version that does not support `disable_adapter`. Please update your `peft` version to the latest version." |
| ) |
| else: |
| if self.is_deepspeed_enabled: |
| self.ref_model = self._prepare_deepspeed(self.ref_model) |
| else: |
| self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True) |
|
|
| def _prepare_deepspeed(self, model: PreTrainedModelWrapper): |
| |
| deepspeed_plugin = self.accelerator.state.deepspeed_plugin |
| config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config) |
|
|
| if model is not None: |
| if hasattr(model, "config"): |
| hidden_size = ( |
| max(model.config.hidden_sizes) |
| if getattr(model.config, "hidden_sizes", None) |
| else getattr(model.config, "hidden_size", None) |
| ) |
| if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3: |
| |
| |
| config_kwargs.update( |
| { |
| "zero_optimization.reduce_bucket_size": hidden_size * hidden_size, |
| "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size, |
| "zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size, |
| } |
| ) |
|
|
| |
| |
| if config_kwargs["zero_optimization"]["stage"] != 3: |
| config_kwargs["zero_optimization"]["stage"] = 0 |
| model, *_ = deepspeed.initialize(model=model, config=config_kwargs) |
| model.eval() |
| return model |
|
|
| def concatenated_inputs(self, batch: Dict[str, Union[List, torch.LongTensor]]) -> Dict[str, torch.LongTensor]: |
| """Concatenate the chosen and rejected inputs into a single tensor. |
| |
| Args: |
| batch: A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', which are tensors of shape (batch_size, sequence_length). |
| |
| Returns: |
| A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'. |
| """ |
| concatenated_batch = {} |
|
|
| if self.is_encoder_decoder: |
| max_length = max(batch["chosen_labels"].shape[1], batch["rejected_labels"].shape[1]) |
| else: |
| max_length = max(batch["chosen_input_ids"].shape[1], batch["rejected_input_ids"].shape[1]) |
|
|
| for k in batch: |
| if k.startswith("chosen") and isinstance(batch[k], torch.Tensor): |
| pad_value = self.label_pad_token_id if "labels" in k or self.is_encoder_decoder else self.padding_value |
| concatenated_key = k.replace("chosen", "concatenated") |
| concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value) |
| for k in batch: |
| if k.startswith("rejected") and isinstance(batch[k], torch.Tensor): |
| pad_value = self.label_pad_token_id if "labels" in k or self.is_encoder_decoder else self.padding_value |
| concatenated_key = k.replace("rejected", "concatenated") |
| concatenated_batch[concatenated_key] = torch.cat( |
| ( |
| concatenated_batch[concatenated_key], |
| pad_to_length(batch[k], max_length, pad_value=pad_value), |
| ), |
| dim=0, |
| ).to(self.accelerator.device) |
|
|
| if self.is_encoder_decoder: |
| concatenated_batch["concatenated_input_ids"] = batch["prompt_input_ids"].repeat(2, 1) |
| concatenated_batch["concatenated_attention_mask"] = batch["prompt_attention_mask"].repeat(2, 1) |
|
|
| return concatenated_batch |
|
|
| def dpo_loss( |
| self, |
| policy_chosen_logps: torch.FloatTensor, |
| policy_rejected_logps: torch.FloatTensor, |
| reference_chosen_logps: torch.FloatTensor, |
| reference_rejected_logps: torch.FloatTensor, |
| reference_free: bool = False, |
| ) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: |
| """Compute the DPO loss for a batch of policy and reference model log probabilities. |
| |
| Args: |
| policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,) |
| policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,) |
| reference_chosen_logps: Log probabilities of the reference model for the chosen responses. Shape: (batch_size,) |
| reference_rejected_logps: Log probabilities of the reference model for the rejected responses. Shape: (batch_size,) |
| reference_free: If True, we ignore the _provided_ reference model and implicitly use a reference model that assigns equal probability to all responses. |
| |
| Returns: |
| A tuple of three tensors: (losses, chosen_rewards, rejected_rewards). |
| The losses tensor contains the DPO loss for each example in the batch. |
| The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively. |
| """ |
| pi_logratios = policy_chosen_logps - policy_rejected_logps |
| if reference_free: |
| ref_logratios = 0 |
| else: |
| ref_logratios = reference_chosen_logps - reference_rejected_logps |
|
|
| logits = pi_logratios - ref_logratios |
|
|
| |
| |
| |
| if self.loss_type == "sigmoid": |
| losses = ( |
| -F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing) |
| - F.logsigmoid(-self.beta * logits) * self.label_smoothing |
| ) |
| elif self.loss_type == "hinge": |
| losses = torch.relu(1 - self.beta * logits) |
| elif self.loss_type == "ipo": |
| |
| losses = (logits - 1 / (2 * self.beta)) ** 2 |
| elif self.loss_type == "kto": |
| |
| chosen_KL = (policy_chosen_logps - reference_chosen_logps).mean().clamp(min=0) |
| rejected_KL = (policy_rejected_logps - reference_rejected_logps).mean().clamp(min=0) |
|
|
| chosen_logratios = policy_chosen_logps - reference_chosen_logps |
| rejected_logratios = policy_rejected_logps - reference_rejected_logps |
| |
| losses = torch.cat( |
| ( |
| 1 - F.sigmoid(self.beta * (chosen_logratios - rejected_KL)), |
| 1 - F.sigmoid(self.beta * (chosen_KL - rejected_logratios)), |
| ), |
| 0, |
| ) |
| else: |
| raise ValueError( |
| f"Unknown loss type: {self.loss_type}. Should be one of ['sigmoid', 'hinge', 'ipo', 'kto']" |
| ) |
|
|
| chosen_rewards = self.beta * (policy_chosen_logps - reference_chosen_logps).detach() |
| rejected_rewards = self.beta * (policy_rejected_logps - reference_rejected_logps).detach() |
|
|
| return losses, chosen_rewards, rejected_rewards |
|
|
| def _get_batch_logps( |
| self, |
| logits: torch.FloatTensor, |
| labels: torch.LongTensor, |
| average_log_prob: bool = False, |
| ) -> torch.FloatTensor: |
| """Compute the log probabilities of the given labels under the given logits. |
| |
| Args: |
| logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size) |
| labels: Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are ignored. Shape: (batch_size, sequence_length) |
| average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens. |
| |
| Returns: |
| A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the given logits. |
| """ |
| if logits.shape[:-1] != labels.shape: |
| raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.") |
|
|
| if not self.is_encoder_decoder: |
| labels = labels[:, 1:].clone() |
| logits = logits[:, :-1, :] |
| loss_mask = labels != self.label_pad_token_id |
|
|
| |
| labels[labels == self.label_pad_token_id] = 0 |
|
|
| per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2) |
|
|
| if average_log_prob: |
| return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1) |
| else: |
| return (per_token_logps * loss_mask).sum(-1) |
|
|
| def concatenated_forward( |
| self, model: nn.Module, batch: Dict[str, Union[List, torch.LongTensor]] |
| ) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: |
| """Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together. |
| |
| We do this to avoid doing two forward passes, because it's faster for FSDP. |
| """ |
| concatenated_batch = self.concatenated_inputs(batch) |
| len_chosen = batch["chosen_labels"].shape[0] |
|
|
| model_kwargs = ( |
| { |
| "labels": concatenated_batch["concatenated_labels"], |
| "decoder_input_ids": concatenated_batch.pop("concatenated_decoder_input_ids", None), |
| } |
| if self.is_encoder_decoder |
| else {} |
| ) |
| all_logits = model( |
| concatenated_batch["concatenated_input_ids"], |
| attention_mask=concatenated_batch["concatenated_attention_mask"], |
| **model_kwargs, |
| ).logits.to(torch.float32) |
|
|
| all_logps = self._get_batch_logps( |
| all_logits, |
| concatenated_batch["concatenated_labels"], |
| average_log_prob=False, |
| ) |
|
|
| chosen_logps = all_logps[:len_chosen] |
| rejected_logps = all_logps[len_chosen:] |
|
|
| chosen_logits = all_logits[:len_chosen] |
| rejected_logits = all_logits[len_chosen:] |
|
|
| return (chosen_logps, rejected_logps, chosen_logits, rejected_logits) |
|
|
| def get_batch_metrics( |
| self, |
| model, |
| batch: Dict[str, Union[List, torch.LongTensor]], |
| train_eval: Literal["train", "eval"] = "train", |
| ): |
| """Compute the DPO loss and other metrics for the given batch of inputs for train or test.""" |
| metrics = {} |
|
|
| ( |
| policy_chosen_logps, |
| policy_rejected_logps, |
| policy_chosen_logits, |
| policy_rejected_logits, |
| ) = self.concatenated_forward(model, batch) |
| with torch.no_grad(): |
| if self.ref_model is None: |
| with self.accelerator.unwrap_model(self.model).disable_adapter(): |
| ( |
| reference_chosen_logps, |
| reference_rejected_logps, |
| _, |
| _, |
| ) = self.concatenated_forward(self.model, batch) |
| else: |
| ( |
| reference_chosen_logps, |
| reference_rejected_logps, |
| _, |
| _, |
| ) = self.concatenated_forward(self.ref_model, batch) |
|
|
| losses, chosen_rewards, rejected_rewards = self.dpo_loss( |
| policy_chosen_logps, |
| policy_rejected_logps, |
| reference_chosen_logps, |
| reference_rejected_logps, |
| ) |
| reward_accuracies = (chosen_rewards > rejected_rewards).float() |
|
|
| prefix = "eval_" if train_eval == "eval" else "" |
| metrics[f"{prefix}rewards/chosen"] = chosen_rewards.cpu().mean() |
| metrics[f"{prefix}rewards/rejected"] = rejected_rewards.cpu().mean() |
| metrics[f"{prefix}rewards/accuracies"] = reward_accuracies.cpu().mean() |
| metrics[f"{prefix}rewards/margins"] = (chosen_rewards - rejected_rewards).cpu().mean() |
| metrics[f"{prefix}logps/rejected"] = policy_rejected_logps.detach().cpu().mean() |
| metrics[f"{prefix}logps/chosen"] = policy_chosen_logps.detach().cpu().mean() |
| metrics[f"{prefix}logits/rejected"] = policy_rejected_logits.detach().cpu().mean() |
| metrics[f"{prefix}logits/chosen"] = policy_chosen_logits.detach().cpu().mean() |
|
|
| return losses.mean(), metrics |
|
|
| def compute_loss( |
| self, |
| model: Union[PreTrainedModel, nn.Module], |
| inputs: Dict[str, Union[torch.Tensor, Any]], |
| return_outputs=False, |
| ) -> Union[torch.Tensor, Tuple[torch.Tensor, Dict[str, torch.Tensor]]]: |
| if not self.use_dpo_data_collator: |
| warnings.warn( |
| "compute_loss is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than " |
| "DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator" |
| ) |
| loss, metrics = self.get_batch_metrics(model, inputs, train_eval="train") |
|
|
| |
| if self.accelerator.is_main_process: |
| self.store_metrics(metrics, train_eval="train") |
|
|
| if return_outputs: |
| return (loss, metrics) |
| return loss |
|
|
| def get_batch_samples(self, model, batch: Dict[str, torch.LongTensor]) -> Tuple[str, str]: |
| """Generate samples from the model and reference model for the given batch of inputs.""" |
|
|
| policy_output = model.generate( |
| input_ids=batch["prompt_input_ids"], |
| attention_mask=batch["prompt_attention_mask"], |
| max_length=self.max_length, |
| do_sample=True, |
| pad_token_id=self.tokenizer.pad_token_id, |
| ) |
|
|
| if self.ref_model is None: |
| with self.accelerator.unwrap_model(self.model).disable_adapter(): |
| reference_output = self.model.generate( |
| input_ids=batch["prompt_input_ids"], |
| attention_mask=batch["prompt_attention_mask"], |
| max_length=self.max_length, |
| do_sample=True, |
| pad_token_id=self.tokenizer.pad_token_id, |
| ) |
| else: |
| reference_output = self.ref_model.generate( |
| input_ids=batch["prompt_input_ids"], |
| attention_mask=batch["prompt_attention_mask"], |
| max_length=self.max_length, |
| do_sample=True, |
| pad_token_id=self.tokenizer.pad_token_id, |
| ) |
|
|
| policy_output = pad_to_length(policy_output, self.max_length, self.tokenizer.pad_token_id) |
| policy_output_decoded = self.tokenizer.batch_decode(policy_output, skip_special_tokens=True) |
|
|
| reference_output = pad_to_length(reference_output, self.max_length, self.tokenizer.pad_token_id) |
| reference_output_decoded = self.tokenizer.batch_decode(reference_output, skip_special_tokens=True) |
|
|
| return policy_output_decoded, reference_output_decoded |
|
|
| def prediction_step( |
| self, |
| model: Union[PreTrainedModel, nn.Module], |
| inputs: Dict[str, Union[torch.Tensor, Any]], |
| prediction_loss_only: bool, |
| ignore_keys: Optional[List[str]] = None, |
| ): |
| if not self.use_dpo_data_collator: |
| warnings.warn( |
| "prediction_step is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than " |
| "DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator" |
| ) |
| if ignore_keys is None: |
| if hasattr(model, "config"): |
| ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", []) |
| else: |
| ignore_keys = [] |
|
|
| with torch.no_grad(): |
| loss, metrics = self.get_batch_metrics(model, inputs, train_eval="eval") |
|
|
| |
| if self.accelerator.is_main_process: |
| self.store_metrics(metrics, train_eval="eval") |
|
|
| if prediction_loss_only: |
| return (loss.detach(), None, None) |
|
|
| |
| logits_dict = { |
| "eval_logits/chosen": metrics["eval_logits/chosen"], |
| "eval_logits/rejected": metrics["eval_logits/rejected"], |
| } |
| logits = tuple(v.unsqueeze(dim=0) for k, v in logits_dict.items() if k not in ignore_keys) |
| logits = torch.stack(logits).mean(axis=1).to(self.accelerator.device) |
| labels = torch.zeros(logits.shape[0], device=self.accelerator.device) |
|
|
| return (loss.detach(), logits, labels) |
|
|
| def store_metrics(self, metrics: Dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None: |
| for key, value in metrics.items(): |
| self._stored_metrics[train_eval][key].append(value) |
|
|
| def evaluation_loop( |
| self, |
| dataloader: DataLoader, |
| description: str, |
| prediction_loss_only: Optional[bool] = None, |
| ignore_keys: Optional[List[str]] = None, |
| metric_key_prefix: str = "eval", |
| ) -> EvalLoopOutput: |
| """ |
| Overriding built-in evaluation loop to store metrics for each batch. |
| Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. |
| |
| Works both with or without labels. |
| """ |
|
|
| |
| if self.generate_during_eval: |
| |
| num_samples = len(dataloader.dataset) |
| random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size) |
|
|
| |
| random_batch_dataset = dataloader.dataset.select(random_indices) |
| random_batch = self.data_collator(random_batch_dataset) |
| random_batch = self._prepare_inputs(random_batch) |
|
|
| policy_output_decoded, ref_output_decoded = self.get_batch_samples(self.model, random_batch) |
|
|
| self.log( |
| { |
| "game_log": wandb.Table( |
| columns=["Prompt", "Policy", "Ref Model"], |
| rows=[ |
| [prompt, pol[len(prompt) :], ref[len(prompt) :]] |
| for prompt, pol, ref in zip( |
| random_batch["prompt"], policy_output_decoded, ref_output_decoded |
| ) |
| ], |
| ) |
| } |
| ) |
| self.state.log_history.pop() |
|
|
| |
| initial_output = super().evaluation_loop( |
| dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix |
| ) |
|
|
| return initial_output |
|
|
| def log(self, logs: Dict[str, float]) -> None: |
| """ |
| Log `logs` on the various objects watching training, including stored metrics. |
| |
| Args: |
| logs (`Dict[str, float]`): |
| The values to log. |
| """ |
| |
| train_eval = "train" if "loss" in logs else "eval" |
| |
| for key, metrics in self._stored_metrics[train_eval].items(): |
| logs[key] = torch.tensor(metrics).mean().item() |
| del self._stored_metrics[train_eval] |
| return super().log(logs) |
|
|