Spaces:
Running on CPU Upgrade
Running on CPU Upgrade
| """ | |
| 2026.3.2 | |
| 2026.3.4 | |
| 5.3.0 | |
| 0.24.0 | |
| __UNSLOTH_VERSIONING__ | |
| """ | |
| # Unsloth auto generated code | |
| # Copyright 2023-present Daniel Han-Chen, Michael Han-Chen & the Unsloth team. All rights reserved. | |
| # | |
| # This program is free software: you can redistribute it and/or modify | |
| # it under the terms of the GNU Lesser General Public License as published by | |
| # the Free Software Foundation, either version 3 of the License, or | |
| # (at your option) any later version. | |
| # | |
| # This program is distributed in the hope that it will be useful, | |
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
| # GNU General Public License for more details. | |
| # | |
| # You should have received a copy of the GNU Lesser General Public License | |
| # along with this program. If not, see <https://www.gnu.org/licenses/>. | |
| from torch import Tensor | |
| import torch | |
| import torch.nn as nn | |
| from torch.nn import functional as F | |
| from unsloth_zoo.temporary_patches.common import torch_compile | |
| from typing import Any, List, Optional, Tuple, Union, Dict, Set, Callable | |
| from trl.trainer.orpo_trainer import (Any, AutoModelForCausalLM, BaseImageProcessor, BaseTrainer, Callable, DPODataCollatorWithPadding, DataCollator, DataLoader, Dataset, EvalLoopOutput, F, FeatureExtractionMixin, Literal, ORPOConfig, ORPOTrainer, Optional, PartialState, Path, PeftModel, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, TrainerCallback, Union, add_bos_token_if_needed, add_eos_token_if_needed, autocast, defaultdict, disable_dropout_in_model, inspect, is_comet_available, is_peft_available, is_torch_fx_proxy, is_torch_xla_available, is_wandb_available, log_table_to_comet_experiment, logger, logging, maybe_apply_chat_template, maybe_extract_prompt, nn, np, nullcontext, os, pad_to_length, pd, peft_module_casting_to_bf16, prepare_model_for_kbit_training, random, selective_log_softmax, textwrap, torch, wandb, warnings, AutoModelForCausalLM, BaseImageProcessor, Callable, DPODataCollatorWithPadding, DataCollator, Dataset, EvalLoopOutput, F, FeatureExtractionMixin, ORPOConfig, ORPOTrainer, Optional, PartialState, PeftModel, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, TrainerCallback, Union, autocast, defaultdict, disable_dropout_in_model, inspect, is_comet_available, is_peft_available, is_wandb_available, logger, maybe_apply_chat_template, maybe_extract_prompt, nn, np, os, peft_module_casting_to_bf16, prepare_model_for_kbit_training, torch, wandb, warnings, F, PeftModel, PreTrainedModel, is_peft_available, logger, os, torch) | |
| import os | |
| import math | |
| import logging | |
| from typing import * | |
| from dataclasses import dataclass, field | |
| from packaging.version import Version | |
| import torch | |
| import numpy as np | |
| from contextlib import nullcontext | |
| from torch.nn import functional as F | |
| import inspect | |
| from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling as TransformersDataCollatorForLanguageModeling | |
| from transformers.training_args import ParallelMode | |
| from unsloth_zoo.device_type import DEVICE_TYPE, device_synchronize | |
| # Wrap trainer with padding to right and enable training mode | |
| # Also patches W&B since multiple runs must use wandb.finish() | |
| import functools | |
| from types import MethodType | |
| try: | |
| from unsloth_zoo.gradient_checkpointing import reset_unsloth_gradient_checkpointing_buffers | |
| except: | |
| def reset_unsloth_gradient_checkpointing_buffers(): pass | |
| def prepare_for_training_mode(f): | |
| def wrapper(self, *args, **kwargs): | |
| # Enable training mode | |
| _was_training = None | |
| # Get gradient checkpointing setting from training arguments | |
| use_gc = getattr(self.args, 'gradient_checkpointing', True) | |
| if hasattr(self, 'model') and hasattr(self.model, "training"): | |
| _was_training = self.model.training | |
| if hasattr(self, 'model') and hasattr(self.model, "for_training"): | |
| self.model.for_training(use_gradient_checkpointing=use_gc) | |
| output = f(self, *args, **kwargs) | |
| # Restore previous mode when possible | |
| if hasattr(self, 'model') and hasattr(self.model, "for_inference"): | |
| if _was_training is False: | |
| self.model.for_inference() | |
| elif _was_training is True and hasattr(self.model, "for_training"): | |
| self.model.for_training(use_gradient_checkpointing=use_gc) | |
| # Reset gradient checkpointing buffers to free memory while staying ready for next run | |
| try: | |
| reset_unsloth_gradient_checkpointing_buffers() | |
| except: | |
| pass | |
| # Patch W&B to enable logging on future runs, otherwise it'll overwrite the first run | |
| try: | |
| import wandb | |
| wandb.finish() | |
| except: | |
| pass | |
| return output | |
| return wrapper | |
| pass | |
| torch_compile_options = { | |
| "epilogue_fusion" : True, | |
| "max_autotune" : False, | |
| "shape_padding" : True, | |
| "trace.enabled" : False, | |
| "triton.cudagraphs" : False, | |
| } | |
| def chunked_hidden_states_selective_log_softmax( | |
| hidden_states: torch.Tensor, | |
| lm_head: torch.Tensor, | |
| index: torch.Tensor, | |
| chunks: int = 4, | |
| logit_scale_multiply: float = 0.0, | |
| logit_scale_divide: float = 0.0, | |
| logit_softcapping: float = 0.0, | |
| temperature: float = 1.0, | |
| ) -> torch.Tensor: | |
| # All Unsloth Zoo code licensed under AGPL3 | |
| flat_hidden_states = hidden_states.reshape(-1, hidden_states.shape[-1]) | |
| flat_index = index.reshape(-1) | |
| chunked_hidden_states = torch.chunk(flat_hidden_states, chunks=chunks, dim=0) | |
| chunked_index = torch.chunk(flat_index, chunks=chunks, dim=0) | |
| all_per_token_logps = [] | |
| for chunk_hidden_states, chunk_index in zip(chunked_hidden_states, chunked_index): | |
| chunk_logits = chunk_hidden_states.to(lm_head.dtype) @ lm_head.t() | |
| if logit_scale_multiply != 0.0: | |
| chunk_logits = chunk_logits * logit_scale_multiply | |
| if logit_scale_divide != 0.0: | |
| chunk_logits = chunk_logits / logit_scale_divide | |
| if logit_softcapping != 0.0: | |
| chunk_logits = chunk_logits * torch.tanh(chunk_logits / logit_softcapping) | |
| chunk_logits = chunk_logits.to(torch.float32) | |
| if temperature != 1.0: | |
| chunk_logits = chunk_logits / temperature | |
| selected_logits = torch.gather(chunk_logits, dim=-1, index=chunk_index.unsqueeze(-1)).squeeze(-1) | |
| logsumexp_values = torch.logsumexp(chunk_logits, dim=-1) | |
| per_token_logps = selected_logits - logsumexp_values | |
| all_per_token_logps.append(per_token_logps) | |
| all_per_token_logps = torch.concat(all_per_token_logps) | |
| all_per_token_logps = all_per_token_logps.reshape((hidden_states.shape[0], hidden_states.shape[1])) | |
| return all_per_token_logps | |
| def chunked_selective_log_softmax(logits, index): | |
| # Split into 4 chunks only | |
| chunked_logits = torch.chunk(logits.reshape(-1, logits.shape[-1]), chunks = 4, dim = 0) | |
| chunked_index = torch.chunk(index.reshape(-1), chunks = 4, dim = 0) | |
| all_per_token_logps = [] | |
| # Below loop does the same as selective_log_softmax(chunk_logits, chunk_index) | |
| for chunk_logits, chunk_index in zip(chunked_logits, chunked_index): | |
| chunk_logits = chunk_logits.to(torch.float32) | |
| selected_logits = torch.gather(chunk_logits, dim = -1, index = chunk_index.unsqueeze(-1)).squeeze(-1) | |
| logsumexp_values = torch.logsumexp(chunk_logits, dim = -1) | |
| per_token_logps = selected_logits - logsumexp_values | |
| all_per_token_logps.append(per_token_logps) | |
| pass | |
| all_per_token_logps = torch.concat(all_per_token_logps) | |
| all_per_token_logps = all_per_token_logps.reshape((logits.shape[0], logits.shape[1])) | |
| return all_per_token_logps | |
| def calculate_pad_tokens_in_prompt( | |
| input_ids: torch.Tensor, | |
| logits_to_keep: int, | |
| pad_token_id: int | |
| ) -> torch.Tensor: | |
| """ | |
| Given prompt tensor, it returns all the left padded tokens in that sequence. so [pad, pad, pad, cat] = 3 tokens | |
| """ | |
| if logits_to_keep >= input_ids.shape[1]: | |
| raise ValueError("logits_to_keep must be smaller than the sequence length.") | |
| prompt_section = input_ids[:, :-logits_to_keep] | |
| padding_mask = (prompt_section == pad_token_id) | |
| pad_token_counts = padding_mask.sum(dim=1) | |
| return pad_token_counts | |
| def create_completion_attention_mask( | |
| completion_input_ids: torch.Tensor, | |
| left_pad_tokens_per_prompt: torch.Tensor, | |
| max_left_pad: int, | |
| pad_token_id: int | |
| ) -> torch.Tensor: | |
| """ | |
| Given that we have a sequence, [p,p,p,c,c,c,pad,pad,pad] | |
| Where p are extra prompt tokens we got from slicing the torch tensor, c is completion tokens | |
| and pad are pad tokens, this function would make a completion mask that would 0 out the pad | |
| and p tokens. so in this example [0,0,0,1,1,1,0,0,0] | |
| """ | |
| batch_size, completion_len = completion_input_ids.shape | |
| device = completion_input_ids.device | |
| num_tokens_to_mask = max_left_pad - left_pad_tokens_per_prompt | |
| indices = torch.arange(completion_len, device=device).unsqueeze(0) | |
| shift_mask = indices >= num_tokens_to_mask.unsqueeze(1) | |
| non_padding_mask = (completion_input_ids != pad_token_id) | |
| final_mask = shift_mask & non_padding_mask | |
| return final_mask | |
| def left_pack_padding(tensor: torch.Tensor, pad_id: int) -> torch.Tensor: | |
| """ | |
| Moves all padding tokens in each sequence of a batch to the right. | |
| """ | |
| mask = (tensor != pad_id) | |
| # Must do stable=True since binary mark is unordered | |
| sorted_indices = torch.argsort(mask, dim=1, descending=True, stable=True) | |
| packed_tensor = torch.gather(tensor, 1, sorted_indices) | |
| return packed_tensor | |
| def align_logprobs_with_mask( | |
| logprob_tensor: torch.Tensor, | |
| attention_mask: torch.Tensor, | |
| pad_value: float = 0.0 | |
| ) -> torch.Tensor: | |
| """ | |
| Aligns a log probability tensor with a given attention mask. | |
| """ | |
| device = logprob_tensor.device | |
| batch_size, logprob_seq_len = logprob_tensor.shape | |
| mask_seq_len = attention_mask.shape[1] | |
| padded_logprobs = torch.full( | |
| attention_mask.shape, | |
| fill_value=pad_value, | |
| dtype=logprob_tensor.dtype, | |
| device=device | |
| ) | |
| left_pad_counts = torch.argmax(attention_mask, dim=1) | |
| cols = torch.arange(logprob_seq_len, device=device) | |
| dest_indices = left_pad_counts.unsqueeze(1) + cols | |
| # Create destination row indices | |
| # Shape: [batch_size, logprob_seq_len] | |
| row_indices = torch.arange(batch_size, device=device).unsqueeze(1).expand_as(dest_indices) | |
| # --- 4. Filter out-of-bounds indices and perform assignment --- | |
| # Create a mask to identify only the indices that are within the bounds | |
| # of the target tensor's sequence length. | |
| valid_mask = dest_indices < mask_seq_len | |
| # Use this mask to select only the valid row indices, column indices, | |
| # and the corresponding values from the logprob tensor. | |
| # This flattens the selected elements into 1D tensors. | |
| valid_rows = row_indices[valid_mask] | |
| valid_cols = dest_indices[valid_mask] | |
| valid_vals = logprob_tensor[valid_mask] | |
| # Place the valid values into their correct positions in the padded tensor | |
| # using a single, efficient advanced indexing operation. | |
| padded_logprobs[valid_rows, valid_cols] = valid_vals | |
| return padded_logprobs | |
| def autotune_batch_and_chunks( | |
| total_input_rows, | |
| seq_len, | |
| hidden_size, | |
| vocab_size, | |
| dtype_bytes=16, | |
| multiplier=None | |
| ): | |
| if multiplier is None: | |
| final_m = max(4, seq_len // 4096) | |
| else: | |
| final_m = multiplier | |
| if torch.cuda.is_available(): | |
| free_bytes, _ = torch.cuda.mem_get_info() | |
| limit_gb = (free_bytes / (1024**3))*.80 | |
| elif hasattr(torch, "xpu") and torch.xpu.is_available(): | |
| # For XPU: estimate free memory from total - reserved | |
| total_mem = torch.xpu.get_device_properties(0).total_memory | |
| reserved_mem = torch.xpu.memory_reserved() | |
| free_bytes = total_mem - reserved_mem | |
| limit_gb = (free_bytes / (1024**3)) * 0.80 | |
| else: | |
| # Fallback: assume 8GB available | |
| limit_gb = 8.0 | |
| bytes_to_gb = 1024**3 | |
| b_vals = torch.arange(total_input_rows, 0, -1, device='cpu', dtype=torch.float32) | |
| hidden_gb = (b_vals * seq_len * hidden_size * dtype_bytes) / bytes_to_gb | |
| base_logits = ((b_vals/total_input_rows) * b_vals * seq_len * vocab_size * dtype_bytes) / bytes_to_gb | |
| logits_gb = base_logits / final_m | |
| total_mem_gb = hidden_gb + logits_gb | |
| valid_mask = total_mem_gb <= limit_gb | |
| valid_indices = torch.nonzero(valid_mask, as_tuple=False) | |
| if valid_indices.shape[0] == 0: | |
| #This means your GPU will OOM | |
| return 4, final_m | |
| best_idx = valid_indices[0].item() | |
| final_b = int(b_vals[best_idx].item()) | |
| return final_b, final_m | |
| def sanitize_logprob(logprob): | |
| """Local port of trl.scripts.vllm_serve.sanitize_logprob. | |
| Filters NaN logprobs from vLLM outputs.""" | |
| value = logprob.logprob | |
| if math.isnan(value): | |
| logging.getLogger(__name__).warning( | |
| f"Generated NaN logprob, token logprob '{logprob}' will be ignored" | |
| ) | |
| return None | |
| return value | |
| class UnslothORPOConfig(ORPOConfig): | |
| """ | |
| Configuration class for the [`ORPOTrainer`]. | |
| This class includes only the parameters that are specific to ORPO training. For a full list of training arguments, | |
| please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this class may | |
| differ from those in [`~transformers.TrainingArguments`]. | |
| Using [`~transformers.HfArgumentParser`] we can turn this class into | |
| [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the | |
| command line. | |
| Parameters: | |
| max_length (`int` or `None`, *optional*, defaults to `1024`): | |
| Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want | |
| to use the default data collator. | |
| max_prompt_length (`int` or `None`, *optional*, defaults to `512`): | |
| Maximum length of the prompt. This argument is required if you want to use the default data collator. | |
| max_completion_length (`int`, *optional*): | |
| Maximum length of the completion. This argument is required if you want to use the default data collator | |
| and your model is an encoder-decoder. | |
| beta (`float`, *optional*, defaults to `0.1`): | |
| Parameter controlling the relative ratio loss weight in the ORPO loss. In the | |
| [paper](https://huggingface.co/papers/2403.07691), it is denoted by λ. In the | |
| [code](https://github.com/xfactlab/orpo), it is denoted by `alpha`. | |
| disable_dropout (`bool`, *optional*, defaults to `True`): | |
| Whether to disable dropout in the model. | |
| label_pad_token_id (`int`, *optional*, defaults to `-100`): | |
| Label pad token id. This argument is required if you want to use the default data collator. | |
| padding_value (`int`, *optional*): | |
| Padding value to use. If `None`, the padding value of the tokenizer is used. | |
| truncation_mode (`str`, *optional*, defaults to `"keep_end"`): | |
| Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`. | |
| This argument is required if you want to use the default data collator. | |
| generate_during_eval (`bool`, *optional*, defaults to `False`): | |
| If `True`, generates and logs completions from the model to W&B or Comet during evaluation. | |
| is_encoder_decoder (`bool`, *optional*): | |
| When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument, | |
| you need to specify if the model returned by the callable is an encoder-decoder model. | |
| model_init_kwargs (`dict[str, Any]`, *optional*): | |
| Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a | |
| string. | |
| dataset_num_proc (`int`, *optional*): | |
| Number of processes to use for processing the dataset. | |
| """ | |
| vllm_sampling_params: Optional[Any] = field( | |
| default = None, | |
| metadata = {'help': 'vLLM SamplingParams'}, | |
| ) | |
| unsloth_num_chunks : Optional[int] = field( | |
| default = -1, | |
| metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'}, | |
| ) | |
| unsloth_logit_chunk_multiplier : Optional[int] = field( | |
| default = None, | |
| metadata = {'help': 'Multiplier for chunked logit computations.'}, | |
| ) | |
| unsloth_grpo_mini_batch : Optional[int] = field( | |
| default = None, | |
| metadata = {'help': 'Mini batch size for GRPO hidden state accumulation. Default is None unless user defines it.'}, | |
| ) | |
| max_seq_length : Optional[int] = field( | |
| default = None, | |
| metadata = {'help': 'Maximum sequence length to truncate to.'}, | |
| ) | |
| def __init__( | |
| self, | |
| output_dir = None, | |
| per_device_train_batch_size = 4, | |
| num_train_epochs = 3.0, | |
| max_steps = -1, | |
| learning_rate = 5e-05, | |
| lr_scheduler_type = 'linear', | |
| lr_scheduler_kwargs = None, | |
| warmup_steps = 0.1, | |
| optim = 'adamw_8bit', | |
| optim_args = None, | |
| weight_decay = 0.01, | |
| adam_beta1 = 0.9, | |
| adam_beta2 = 0.999, | |
| adam_epsilon = 1e-08, | |
| optim_target_modules = None, | |
| gradient_accumulation_steps = 2, | |
| average_tokens_across_devices = True, | |
| max_grad_norm = 1.0, | |
| label_smoothing_factor = 0.0, | |
| bf16 = False, | |
| fp16 = False, | |
| bf16_full_eval = False, | |
| fp16_full_eval = False, | |
| tf32 = None, | |
| gradient_checkpointing = True, | |
| gradient_checkpointing_kwargs = None, | |
| torch_compile = False, | |
| torch_compile_backend = None, | |
| torch_compile_mode = None, | |
| use_liger_kernel = False, | |
| liger_kernel_config = None, | |
| use_cache = False, | |
| neftune_noise_alpha = None, | |
| torch_empty_cache_steps = 250, | |
| auto_find_batch_size = False, | |
| logging_strategy = 'steps', | |
| logging_steps = 1, | |
| logging_first_step = False, | |
| log_on_each_node = True, | |
| logging_nan_inf_filter = False, | |
| include_num_input_tokens_seen = False, | |
| log_level = 'passive', | |
| log_level_replica = 'warning', | |
| disable_tqdm = None, | |
| report_to = 'none', | |
| run_name = None, | |
| project = 'huggingface', | |
| trackio_space_id = 'trackio', | |
| eval_strategy = 'no', | |
| eval_steps = None, | |
| eval_delay = 0, | |
| per_device_eval_batch_size = 4, | |
| prediction_loss_only = False, | |
| eval_on_start = False, | |
| eval_do_concat_batches = True, | |
| eval_use_gather_object = False, | |
| eval_accumulation_steps = 2, | |
| batch_eval_metrics = False, | |
| save_only_model = False, | |
| save_strategy = 'steps', | |
| save_steps = 500, | |
| save_on_each_node = False, | |
| save_total_limit = None, | |
| enable_jit_checkpoint = False, | |
| push_to_hub = False, | |
| hub_token = None, | |
| hub_private_repo = None, | |
| hub_model_id = None, | |
| hub_strategy = 'every_save', | |
| hub_always_push = False, | |
| hub_revision = None, | |
| load_best_model_at_end = False, | |
| metric_for_best_model = None, | |
| greater_is_better = None, | |
| ignore_data_skip = False, | |
| restore_callback_states_from_checkpoint = False, | |
| full_determinism = False, | |
| seed = 3407, | |
| data_seed = 3407, | |
| use_cpu = False, | |
| accelerator_config = None, | |
| parallelism_config = None, | |
| dataloader_drop_last = False, | |
| dataloader_num_workers = 0, | |
| dataloader_pin_memory = True, | |
| dataloader_persistent_workers = False, | |
| dataloader_prefetch_factor = None, | |
| remove_unused_columns = True, | |
| label_names = None, | |
| train_sampling_strategy = 'random', | |
| length_column_name = 'length', | |
| ddp_find_unused_parameters = None, | |
| ddp_bucket_cap_mb = None, | |
| ddp_broadcast_buffers = None, | |
| ddp_backend = None, | |
| ddp_timeout = 1800, | |
| fsdp = None, | |
| fsdp_config = None, | |
| deepspeed = None, | |
| debug = '', | |
| skip_memory_metrics = True, | |
| do_train = False, | |
| do_eval = False, | |
| do_predict = False, | |
| resume_from_checkpoint = None, | |
| warmup_ratio = None, | |
| logging_dir = None, | |
| local_rank = -1, | |
| max_length = 1024, | |
| max_prompt_length = 512, | |
| max_completion_length = None, | |
| beta = 0.1, | |
| disable_dropout = True, | |
| label_pad_token_id = -100, | |
| padding_value = None, | |
| truncation_mode = 'keep_end', | |
| generate_during_eval = False, | |
| is_encoder_decoder = None, | |
| model_init_kwargs = None, | |
| dataset_num_proc = None, | |
| vllm_sampling_params = None, | |
| unsloth_num_chunks = -1, | |
| unsloth_logit_chunk_multiplier = None, | |
| unsloth_grpo_mini_batch = None, | |
| max_seq_length = None, | |
| **kwargs, | |
| ): | |
| if learning_rate < 1e-7: print(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!') | |
| if learning_rate > 1: print(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!') | |
| if num_train_epochs is None: | |
| num_train_epochs = 3.0 # Default to 3 epochs if None, max_steps will override | |
| if output_dir is None and save_strategy == 'steps' and save_steps == 500: | |
| output_dir = 'unsloth_training_checkpoints' | |
| save_strategy = 'no' | |
| import multiprocessing as _mp | |
| if _mp.get_start_method() != 'fork': | |
| dataset_num_proc = None | |
| elif dataset_num_proc is None: | |
| import psutil | |
| dataset_num_proc = min(max((psutil.cpu_count() or 1)+4, 2), 64) | |
| memory_gb_left = psutil.virtual_memory().available / (1024**3) | |
| if memory_gb_left <= 2: dataset_num_proc = 1 | |
| else: dataset_num_proc = min(dataset_num_proc, int(memory_gb_left)) | |
| super().__init__( | |
| output_dir = output_dir, | |
| per_device_train_batch_size = per_device_train_batch_size, | |
| num_train_epochs = num_train_epochs, | |
| max_steps = max_steps, | |
| learning_rate = learning_rate, | |
| lr_scheduler_type = lr_scheduler_type, | |
| lr_scheduler_kwargs = lr_scheduler_kwargs, | |
| warmup_steps = warmup_steps, | |
| optim = optim, | |
| optim_args = optim_args, | |
| weight_decay = weight_decay, | |
| adam_beta1 = adam_beta1, | |
| adam_beta2 = adam_beta2, | |
| adam_epsilon = adam_epsilon, | |
| optim_target_modules = optim_target_modules, | |
| gradient_accumulation_steps = gradient_accumulation_steps, | |
| average_tokens_across_devices = average_tokens_across_devices, | |
| max_grad_norm = max_grad_norm, | |
| label_smoothing_factor = label_smoothing_factor, | |
| bf16 = bf16, | |
| fp16 = fp16, | |
| bf16_full_eval = bf16_full_eval, | |
| fp16_full_eval = fp16_full_eval, | |
| tf32 = tf32, | |
| gradient_checkpointing = gradient_checkpointing, | |
| gradient_checkpointing_kwargs = gradient_checkpointing_kwargs, | |
| torch_compile = torch_compile, | |
| torch_compile_backend = torch_compile_backend, | |
| torch_compile_mode = torch_compile_mode, | |
| use_liger_kernel = use_liger_kernel, | |
| liger_kernel_config = liger_kernel_config, | |
| use_cache = use_cache, | |
| neftune_noise_alpha = neftune_noise_alpha, | |
| torch_empty_cache_steps = torch_empty_cache_steps, | |
| auto_find_batch_size = auto_find_batch_size, | |
| logging_strategy = logging_strategy, | |
| logging_steps = logging_steps, | |
| logging_first_step = logging_first_step, | |
| log_on_each_node = log_on_each_node, | |
| logging_nan_inf_filter = logging_nan_inf_filter, | |
| include_num_input_tokens_seen = include_num_input_tokens_seen, | |
| log_level = log_level, | |
| log_level_replica = log_level_replica, | |
| disable_tqdm = disable_tqdm, | |
| report_to = report_to, | |
| run_name = run_name, | |
| project = project, | |
| trackio_space_id = trackio_space_id, | |
| eval_strategy = eval_strategy, | |
| eval_steps = eval_steps, | |
| eval_delay = eval_delay, | |
| per_device_eval_batch_size = per_device_eval_batch_size, | |
| prediction_loss_only = prediction_loss_only, | |
| eval_on_start = eval_on_start, | |
| eval_do_concat_batches = eval_do_concat_batches, | |
| eval_use_gather_object = eval_use_gather_object, | |
| eval_accumulation_steps = eval_accumulation_steps, | |
| batch_eval_metrics = batch_eval_metrics, | |
| save_only_model = save_only_model, | |
| save_strategy = save_strategy, | |
| save_steps = save_steps, | |
| save_on_each_node = save_on_each_node, | |
| save_total_limit = save_total_limit, | |
| enable_jit_checkpoint = enable_jit_checkpoint, | |
| push_to_hub = push_to_hub, | |
| hub_token = hub_token, | |
| hub_private_repo = hub_private_repo, | |
| hub_model_id = hub_model_id, | |
| hub_strategy = hub_strategy, | |
| hub_always_push = hub_always_push, | |
| hub_revision = hub_revision, | |
| load_best_model_at_end = load_best_model_at_end, | |
| metric_for_best_model = metric_for_best_model, | |
| greater_is_better = greater_is_better, | |
| ignore_data_skip = ignore_data_skip, | |
| restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint, | |
| full_determinism = full_determinism, | |
| seed = seed, | |
| data_seed = data_seed, | |
| use_cpu = use_cpu, | |
| accelerator_config = accelerator_config, | |
| parallelism_config = parallelism_config, | |
| dataloader_drop_last = dataloader_drop_last, | |
| dataloader_num_workers = dataloader_num_workers, | |
| dataloader_pin_memory = dataloader_pin_memory, | |
| dataloader_persistent_workers = dataloader_persistent_workers, | |
| dataloader_prefetch_factor = dataloader_prefetch_factor, | |
| remove_unused_columns = remove_unused_columns, | |
| label_names = label_names, | |
| train_sampling_strategy = train_sampling_strategy, | |
| length_column_name = length_column_name, | |
| ddp_find_unused_parameters = ddp_find_unused_parameters, | |
| ddp_bucket_cap_mb = ddp_bucket_cap_mb, | |
| ddp_broadcast_buffers = ddp_broadcast_buffers, | |
| ddp_backend = ddp_backend, | |
| ddp_timeout = ddp_timeout, | |
| fsdp = fsdp, | |
| fsdp_config = fsdp_config, | |
| deepspeed = deepspeed, | |
| debug = debug, | |
| skip_memory_metrics = skip_memory_metrics, | |
| do_train = do_train, | |
| do_eval = do_eval, | |
| do_predict = do_predict, | |
| resume_from_checkpoint = resume_from_checkpoint, | |
| warmup_ratio = warmup_ratio, | |
| logging_dir = logging_dir, | |
| local_rank = local_rank, | |
| max_length = max_length, | |
| max_prompt_length = max_prompt_length, | |
| max_completion_length = max_completion_length, | |
| beta = beta, | |
| disable_dropout = disable_dropout, | |
| label_pad_token_id = label_pad_token_id, | |
| padding_value = padding_value, | |
| truncation_mode = truncation_mode, | |
| generate_during_eval = generate_during_eval, | |
| is_encoder_decoder = is_encoder_decoder, | |
| model_init_kwargs = model_init_kwargs, | |
| dataset_num_proc = dataset_num_proc,**kwargs) | |
| self.vllm_sampling_params = vllm_sampling_params | |
| self.unsloth_num_chunks = unsloth_num_chunks | |
| if unsloth_grpo_mini_batch is not None: | |
| if self.generation_batch_size >= unsloth_grpo_mini_batch: | |
| self.unsloth_grpo_mini_batch = unsloth_grpo_mini_batch | |
| else: | |
| raise ValueError( | |
| f"Unsloth GRPO mini batch size needs to be less than or equal to the effective generation batch size, " | |
| f"which is self.per_device_train_batch_size * gradient_accumulation_steps." | |
| ) | |
| self.unsloth_logit_chunk_multiplier = unsloth_logit_chunk_multiplier | |
| self.max_seq_length = max_seq_length | |
| pass | |
| class _UnslothORPOTrainer(BaseTrainer): | |
| r"""""" | |
| _tag_names = ["trl", "orpo"] | |
| _name = "ORPO" | |
| _paper = { | |
| "title": "ORPO: Monolithic Preference Optimization without Reference Model", | |
| "id": "2403.07691", | |
| # docstyle-ignore | |
| "citation": textwrap.dedent("""\ | |
| @article{hong2024orpo, | |
| title = {{ORPO: Monolithic Preference Optimization without Reference Model}}, | |
| author = {Jiwoo Hong and Noah Lee and James Thorne}, | |
| year = 2024, | |
| eprint = {arXiv:2403.07691} | |
| }"""), | |
| } | |
| def __init__( | |
| self, | |
| model: Optional[Union[PreTrainedModel, nn.Module, str]] = None, | |
| args: Optional[ORPOConfig] = None, | |
| data_collator: Optional[DataCollator] = None, | |
| train_dataset: Optional[Dataset] = None, | |
| eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None, | |
| processing_class: Optional[ | |
| Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] | |
| ] = None, | |
| model_init: Optional[Callable[[], PreTrainedModel]] = None, | |
| callbacks: Optional[list[TrainerCallback]] = None, | |
| optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), | |
| preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, | |
| peft_config: Optional[dict] = None, | |
| compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None, | |
| ): | |
| if not os.environ.get("TRL_EXPERIMENTAL_SILENCE"): | |
| warnings.warn( | |
| "This trainer will soon be moved to trl.experimental and is a candidate for removal. If you rely on " | |
| "it and want it to remain, please share your comments here: " | |
| "https://github.com/huggingface/trl/issues/4223. Silence this warning by setting environment variable " | |
| "TRL_EXPERIMENTAL_SILENCE=1." | |
| ) | |
| if args.model_init_kwargs is None: | |
| model_init_kwargs = {} | |
| elif not isinstance(model, str): | |
| raise ValueError("You passed model_kwargs to the ORPOTrainer. But your model is already instantiated.") | |
| else: | |
| model_init_kwargs = args.model_init_kwargs | |
| dtype = model_init_kwargs.get("dtype") | |
| if dtype is not None: | |
| # Convert to `torch.dtype` if an str is passed | |
| if isinstance(dtype, str) and dtype != "auto": | |
| dtype = getattr(torch, dtype) | |
| if dtype != "auto" and not isinstance(dtype, torch.dtype): | |
| raise ValueError( | |
| f"Invalid `dtype` passed to the ORPOConfig. Expected a string with either `torch.dtype` or 'auto', but got {dtype}." | |
| ) | |
| model_init_kwargs["dtype"] = dtype | |
| if isinstance(model, str): | |
| model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs) | |
| # Initialize this variable to False. This helps tracking the case when `peft_module_casting_to_bf16` | |
| # has been called in order to properly call autocast if needed. | |
| self._peft_has_been_casted_to_bf16 = False | |
| if not is_peft_available() and peft_config is not None: | |
| raise ValueError( | |
| "PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models" | |
| ) | |
| elif is_peft_available() and peft_config is not None: | |
| # if model is a peft model and we have a peft_config, we merge and unload it first | |
| if isinstance(model, PeftModel): | |
| model = model.merge_and_unload() | |
| if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False): | |
| _support_gc_kwargs = hasattr( | |
| args, "gradient_checkpointing_kwargs" | |
| ) and "gradient_checkpointing_kwargs" in list( | |
| inspect.signature(prepare_model_for_kbit_training).parameters | |
| ) | |
| prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing} | |
| if _support_gc_kwargs: | |
| prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs | |
| model = prepare_model_for_kbit_training(model, **prepare_model_kwargs) | |
| elif args.gradient_checkpointing: | |
| # For backward compatibility with older versions of transformers | |
| if hasattr(model, "enable_input_require_grads"): | |
| model.enable_input_require_grads() | |
| else: | |
| def make_inputs_require_grad(module, input, output): | |
| output.requires_grad_(True) | |
| model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) | |
| # get peft model with the given config | |
| model = model | |
| if args.bf16 and getattr(model, "is_loaded_in_4bit", False): | |
| peft_module_casting_to_bf16(model) | |
| # If args.bf16 we need to explicitly call `generate` with torch amp autocast context manager | |
| self._peft_has_been_casted_to_bf16 = True | |
| # For models that use gradient_checkpointing, we need to attach a hook that enables input | |
| # to explicitly have `requires_grad=True`, otherwise training will either silently | |
| # fail or completely fail. | |
| elif args.gradient_checkpointing: | |
| # For backward compatibility with older versions of transformers | |
| if hasattr(model, "enable_input_require_grads"): | |
| model.enable_input_require_grads() | |
| else: | |
| def make_inputs_require_grad(module, input, output): | |
| output.requires_grad_(True) | |
| model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) | |
| if args.generate_during_eval and not (is_wandb_available() or is_comet_available()): | |
| raise ValueError( | |
| "`generate_during_eval=True` requires Weights and Biases or Comet to be installed." | |
| " Please install `wandb` or `comet-ml` to resolve." | |
| ) | |
| if model is not None: | |
| self.is_encoder_decoder = model.config.is_encoder_decoder | |
| elif args.is_encoder_decoder is None: | |
| raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.") | |
| else: | |
| self.is_encoder_decoder = args.is_encoder_decoder | |
| if self.is_encoder_decoder: | |
| self.decoder_start_token_id = model.config.decoder_start_token_id | |
| self.pad_token_id = model.config.pad_token_id | |
| if processing_class is None: | |
| raise ValueError("processing_class must be specified to tokenize a ORPO dataset.") | |
| if args.max_length is None: | |
| logger.warning( | |
| "`max_length` is not set in the ORPOConfig's init" | |
| " it will default to `512` by default, but you should do it yourself in the future.", | |
| ) | |
| max_length = 512 | |
| else: | |
| max_length = args.max_length | |
| if args.max_prompt_length is None: | |
| logger.warning( | |
| "`max_prompt_length` is not set in the ORPOConfig's init" | |
| " it will default to `128` by default, but you should do it yourself in the future.", | |
| ) | |
| max_prompt_length = 128 | |
| else: | |
| max_prompt_length = args.max_prompt_length | |
| if args.max_completion_length is None and self.is_encoder_decoder: | |
| logger.warning( | |
| "When using an encoder decoder architecture, you should set `max_completion_length` in the ORPOConfig's init" | |
| " it will default to `128` by default, but you should do it yourself in the future.", | |
| ) | |
| self.max_completion_length = 128 | |
| else: | |
| self.max_completion_length = args.max_completion_length | |
| if data_collator is None: | |
| data_collator = DPODataCollatorWithPadding( | |
| pad_token_id=processing_class.pad_token_id, | |
| label_pad_token_id=args.label_pad_token_id, | |
| is_encoder_decoder=self.is_encoder_decoder, | |
| ) | |
| if args.remove_unused_columns: | |
| args.remove_unused_columns = False | |
| # warn users | |
| logger.warning( | |
| "When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your TrainingArguments" | |
| " we have set it for you, but you should do it yourself in the future.", | |
| ) | |
| self.use_dpo_data_collator = True | |
| else: | |
| self.use_dpo_data_collator = False | |
| # Disable dropout in the model and reference model | |
| if args.disable_dropout: | |
| disable_dropout_in_model(model) | |
| self.max_length = max_length | |
| self.generate_during_eval = args.generate_during_eval | |
| self.label_pad_token_id = args.label_pad_token_id | |
| self.padding_value = args.padding_value if args.padding_value is not None else processing_class.pad_token_id | |
| self.max_prompt_length = max_prompt_length | |
| self.truncation_mode = args.truncation_mode | |
| self.processing_class = processing_class | |
| self.beta = args.beta | |
| self.aux_loss_enabled = getattr(model.config, "output_router_logits", False) | |
| self.aux_loss_coef = getattr(model.config, "router_aux_loss_coef", 0.0) | |
| if self.aux_loss_enabled and self.aux_loss_coef == 0.0: | |
| logger.warning( | |
| "You set `output_router_logits` to `True` in the model config, but `router_aux_loss_coef` is set to " | |
| "`0.0`, meaning the auxiliary loss will not be used. Either set `router_aux_loss_coef` to a value " | |
| "greater than `0.0`, or set `output_router_logits` to `False` if you don't want to use the auxiliary " | |
| "loss.", | |
| ) | |
| self._stored_metrics = defaultdict(lambda: defaultdict(list)) | |
| # The trainer estimates the number of FLOPs [floating-point operations] using the number of elements in the | |
| # input tensor associated with the key "input_ids". However, in ORPO, the sampled data does not include the | |
| # "input_ids" key. Instead, the available keys are "prompt_input_ids", "chosen_input_ids", and | |
| # "rejected_input_ids". As a result, the trainer issues the warning: "Could not estimate the number of tokens | |
| # of the input, floating-point operations will not be computed." To suppress this warning, we set the | |
| # "estimate_tokens" key in the model's "warnings_issued" dictionary to True. This acts as a flag to indicate | |
| # that the warning has already been issued. | |
| model.warnings_issued["estimate_tokens"] = True | |
| # Compute that only on the main process for faster data processing. | |
| # see: https://github.com/huggingface/trl/pull/1255 | |
| with PartialState().main_process_first(): | |
| # Extract the prompt if needed, and apply the chat template if needed | |
| train_dataset = train_dataset.map(maybe_extract_prompt, num_proc=args.dataset_num_proc) | |
| train_dataset = train_dataset.map( | |
| maybe_apply_chat_template, fn_kwargs={"tokenizer": processing_class}, num_proc=args.dataset_num_proc | |
| ) | |
| train_dataset = train_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc) | |
| if eval_dataset is not None: | |
| eval_dataset = eval_dataset.map(maybe_extract_prompt, num_proc=args.dataset_num_proc) | |
| eval_dataset = eval_dataset.map( | |
| maybe_apply_chat_template, | |
| fn_kwargs={"tokenizer": processing_class}, | |
| num_proc=args.dataset_num_proc, | |
| ) | |
| eval_dataset = eval_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc) | |
| super().__init__( | |
| model=model, | |
| args=args, | |
| data_collator=data_collator, | |
| train_dataset=train_dataset, | |
| eval_dataset=eval_dataset, | |
| processing_class=processing_class, | |
| model_init=model_init, | |
| compute_metrics=compute_metrics, | |
| callbacks=callbacks, | |
| optimizers=optimizers, | |
| preprocess_logits_for_metrics=preprocess_logits_for_metrics, | |
| ) | |
| # Gradient accumulation requires scaled loss. Normally, loss scaling in the parent class depends on whether the | |
| # model accepts loss-related kwargs. Since we compute our own loss, this check is irrelevant. We set | |
| # self.model_accepts_loss_kwargs to False to enable scaling. | |
| self.model_accepts_loss_kwargs = False | |
| # Add tags for models that have been loaded with the correct transformers version | |
| if hasattr(self.model, "add_model_tags"): | |
| self.model.add_model_tags(self._tag_names) | |
| if not hasattr(self, "accelerator"): | |
| raise AttributeError( | |
| "Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`." | |
| ) | |
| def build_tokenized_answer(self, prompt, answer): | |
| """ | |
| Llama tokenizer does satisfy `enc(a + b) = enc(a) + enc(b)`. It does ensure `enc(a + b) = enc(a) + enc(a + | |
| b)[len(enc(a)):]`. Reference: | |
| https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257 | |
| """ | |
| full_tokenized = self.processing_class(prompt + answer, add_special_tokens=False) | |
| prompt_input_ids = self.processing_class(prompt, add_special_tokens=False)["input_ids"] | |
| answer_input_ids = full_tokenized["input_ids"][len(prompt_input_ids) :] | |
| answer_attention_mask = full_tokenized["attention_mask"][len(prompt_input_ids) :] | |
| # Concat tokens to form `enc(a) + enc(a + b)[len(enc(a)):]` | |
| full_concat_input_ids = np.concatenate([prompt_input_ids, answer_input_ids]) | |
| # Prepare input tokens for token by token comparison | |
| full_input_ids = np.array(full_tokenized["input_ids"]) | |
| if len(full_input_ids) != len(full_concat_input_ids): | |
| raise ValueError("Prompt input ids and answer input ids should have the same length.") | |
| # On some tokenizers, like Llama-2 tokenizer, there are occasions where tokens | |
| # can be merged together when tokenizing prompt+answer. This could result | |
| # on the last token from the prompt being different when tokenized on its own | |
| # vs when done as prompt+answer. | |
| response_token_ids_start_idx = len(prompt_input_ids) | |
| # If tokenized prompt is different than both prompt+answer, then it means the | |
| # last token has changed due to merging. | |
| if prompt_input_ids != full_tokenized["input_ids"][:response_token_ids_start_idx]: | |
| response_token_ids_start_idx -= 1 | |
| prompt_input_ids = full_tokenized["input_ids"][:response_token_ids_start_idx] | |
| prompt_attention_mask = full_tokenized["attention_mask"][:response_token_ids_start_idx] | |
| if len(prompt_input_ids) != len(prompt_attention_mask): | |
| raise ValueError("Prompt input ids and attention mask should have the same length.") | |
| answer_input_ids = full_tokenized["input_ids"][response_token_ids_start_idx:] | |
| answer_attention_mask = full_tokenized["attention_mask"][response_token_ids_start_idx:] | |
| return dict( | |
| prompt_input_ids=prompt_input_ids, | |
| prompt_attention_mask=prompt_attention_mask, | |
| input_ids=answer_input_ids, | |
| attention_mask=answer_attention_mask, | |
| ) | |
| def tokenize_row(self, feature, model: Optional[Union[PreTrainedModel, nn.Module]] = None) -> dict: | |
| """Tokenize a single row from a ORPO specific dataset. | |
| At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation in case the prompt + | |
| chosen or prompt + rejected responses is/are too long. First we truncate the prompt; if we're still too long, | |
| we truncate the chosen/rejected. | |
| We also create the labels for the chosen/rejected responses, which are of length equal to the sum of the length | |
| of the prompt and the chosen/rejected response, with label_pad_token_id for the prompt tokens. | |
| """ | |
| batch = {} | |
| prompt = feature["prompt"] | |
| chosen = feature["chosen"] | |
| rejected = feature["rejected"] | |
| if not self.is_encoder_decoder: | |
| # Check issues below for more details | |
| # 1. https://github.com/huggingface/trl/issues/907 | |
| # 2. https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257 | |
| # 3. https://github.com/LianjiaTech/BELLE/issues/337 | |
| if not isinstance(prompt, str): | |
| raise ValueError(f"prompt should be an str but got {type(prompt)}") | |
| prompt_tokens = self.processing_class(prompt, add_special_tokens=False) | |
| prompt_tokens = {f"prompt_{k}": v for k, v in prompt_tokens.items()} | |
| if not isinstance(chosen, str): | |
| raise ValueError(f"chosen should be an str but got {type(chosen)}") | |
| chosen_tokens = self.build_tokenized_answer(prompt, chosen) | |
| if not isinstance(rejected, str): | |
| raise ValueError(f"rejected should be an str but got {type(rejected)}") | |
| rejected_tokens = self.build_tokenized_answer(prompt, rejected) | |
| # Last prompt token might get merged by tokenizer and | |
| # it should not be included for generation if that happens | |
| prompt_len_input_ids = len(prompt_tokens["prompt_input_ids"]) | |
| chosen_prompt_len_input_ids = len(chosen_tokens["prompt_input_ids"]) | |
| rejected_prompt_len_input_ids = len(rejected_tokens["prompt_input_ids"]) | |
| prompt_len_input_ids = min(chosen_prompt_len_input_ids, rejected_prompt_len_input_ids) | |
| for k, v in prompt_tokens.items(): | |
| prompt_tokens[k] = v[:prompt_len_input_ids] | |
| # Make sure prompts only have one different token at most an | |
| # and length only differs by 1 at most | |
| num_diff_tokens = sum( | |
| a != b for a, b in zip(chosen_tokens["prompt_input_ids"], rejected_tokens["prompt_input_ids"]) | |
| ) | |
| num_diff_len = abs(chosen_prompt_len_input_ids - rejected_prompt_len_input_ids) | |
| if num_diff_tokens > 1 or num_diff_len > 1: | |
| raise ValueError( | |
| "Chosen and rejected prompt_input_ids might only differ on the " | |
| "last token due to tokenizer merge ops." | |
| ) | |
| # add BOS token to head of prompt. Avoid adding if it's already there | |
| prompt_tokens, chosen_tokens, rejected_tokens = add_bos_token_if_needed( | |
| self.processing_class.bos_token_id, | |
| prompt_len_input_ids, | |
| prompt_tokens, | |
| chosen_prompt_len_input_ids, | |
| chosen_tokens, | |
| rejected_prompt_len_input_ids, | |
| rejected_tokens, | |
| ) | |
| # add EOS token to end of answer. Avoid adding if it's already there | |
| chosen_tokens, rejected_tokens = add_eos_token_if_needed( | |
| self.processing_class.eos_token_id, chosen_tokens, rejected_tokens | |
| ) | |
| longer_response_length = max(len(chosen_tokens["input_ids"]), len(rejected_tokens["input_ids"])) | |
| # if combined sequence is too long, truncate the prompt | |
| for answer_tokens in [chosen_tokens, rejected_tokens, prompt_tokens]: | |
| if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length: | |
| if self.truncation_mode == "keep_start": | |
| for k in ["prompt_input_ids", "prompt_attention_mask"]: | |
| answer_tokens[k] = answer_tokens[k][: self.max_prompt_length] | |
| elif self.truncation_mode == "keep_end": | |
| for k in ["prompt_input_ids", "prompt_attention_mask"]: | |
| answer_tokens[k] = answer_tokens[k][-self.max_prompt_length :] | |
| else: | |
| raise ValueError(f"Unknown truncation mode: {self.truncation_mode}") | |
| # if that's still too long, truncate the response | |
| for answer_tokens in [chosen_tokens, rejected_tokens]: | |
| if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length: | |
| for k in ["input_ids", "attention_mask"]: | |
| answer_tokens[k] = answer_tokens[k][: self.max_length - self.max_prompt_length] | |
| # Create labels | |
| chosen_sequence_tokens = { | |
| k: chosen_tokens[f"prompt_{k}"] + chosen_tokens[k] for k in ["input_ids", "attention_mask"] | |
| } | |
| rejected_sequence_tokens = { | |
| k: rejected_tokens[f"prompt_{k}"] + rejected_tokens[k] for k in ["input_ids", "attention_mask"] | |
| } | |
| chosen_sequence_tokens["labels"] = chosen_sequence_tokens["input_ids"][:] | |
| chosen_sequence_tokens["labels"][: len(chosen_tokens["prompt_input_ids"])] = [ | |
| self.label_pad_token_id | |
| ] * len(chosen_tokens["prompt_input_ids"]) | |
| rejected_sequence_tokens["labels"] = rejected_sequence_tokens["input_ids"][:] | |
| rejected_sequence_tokens["labels"][: len(rejected_tokens["prompt_input_ids"])] = [ | |
| self.label_pad_token_id | |
| ] * len(rejected_tokens["prompt_input_ids"]) | |
| for k, toks in { | |
| "chosen_": chosen_sequence_tokens, | |
| "rejected_": rejected_sequence_tokens, | |
| "": prompt_tokens, | |
| }.items(): | |
| for type_key, tokens in toks.items(): | |
| if type_key == "token_type_ids": | |
| continue | |
| batch[f"{k}{type_key}"] = tokens | |
| else: | |
| chosen_tokens = self.processing_class( | |
| chosen, truncation=True, max_length=self.max_completion_length, add_special_tokens=True | |
| ) | |
| rejected_tokens = self.processing_class( | |
| rejected, truncation=True, max_length=self.max_completion_length, add_special_tokens=True | |
| ) | |
| prompt_tokens = self.processing_class( | |
| prompt, truncation=True, max_length=self.max_prompt_length, add_special_tokens=True | |
| ) | |
| batch["chosen_labels"] = chosen_tokens["input_ids"] | |
| batch["rejected_labels"] = rejected_tokens["input_ids"] | |
| batch["prompt_input_ids"] = prompt_tokens["input_ids"] | |
| batch["prompt_attention_mask"] = prompt_tokens["attention_mask"] | |
| if model is not None and hasattr(model, "prepare_decoder_input_ids_from_labels"): | |
| batch["rejected_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels( | |
| labels=torch.tensor(batch["rejected_labels"]) | |
| ) | |
| batch["chosen_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels( | |
| labels=torch.tensor(batch["chosen_labels"]) | |
| ) | |
| if is_torch_xla_available(): | |
| # Pad the sequences to global max_length to avoid TorchXLA recompilation | |
| for k in batch: | |
| if "labels" in k or self.is_encoder_decoder: | |
| pad_value = self.label_pad_token_id | |
| elif k.endswith("_input_ids"): | |
| pad_value = self.padding_value | |
| elif k.endswith("_attention_mask"): | |
| pad_value = 0 | |
| batch[k] = batch[k] + [pad_value] * (self.max_length - len(batch[k])) | |
| return batch | |
| def concatenated_inputs( | |
| batch: dict[str, Union[list, torch.LongTensor]], | |
| is_encoder_decoder: bool = False, | |
| label_pad_token_id: int = -100, | |
| padding_value: int = 0, | |
| device: Optional[torch.device] = None, | |
| ) -> dict[str, torch.LongTensor]: | |
| """Concatenate the chosen and rejected inputs into a single tensor. | |
| Args: | |
| batch: | |
| A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', which are tensors | |
| of shape (batch_size, sequence_length). | |
| is_encoder_decoder: | |
| Whether the model is an encoder-decoder model. | |
| label_pad_token_id: | |
| The label pad token id. | |
| padding_value: | |
| The padding value to use for the concatenated inputs_ids. | |
| device: | |
| The device for the concatenated inputs. | |
| Returns: | |
| A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'. | |
| """ | |
| concatenated_batch = {} | |
| if is_encoder_decoder: | |
| max_length = max(batch["chosen_labels"].shape[1], batch["rejected_labels"].shape[1]) | |
| else: | |
| max_length = max(batch["chosen_input_ids"].shape[1], batch["rejected_input_ids"].shape[1]) | |
| for k in batch: | |
| if k.startswith("chosen") and isinstance(batch[k], torch.Tensor): | |
| if "labels" in k or is_encoder_decoder: | |
| pad_value = label_pad_token_id | |
| elif k.endswith("_input_ids"): | |
| pad_value = padding_value | |
| elif k.endswith("_attention_mask"): | |
| pad_value = 0 | |
| concatenated_key = k.replace("chosen", "concatenated") | |
| concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value) | |
| for k in batch: | |
| if k.startswith("rejected") and isinstance(batch[k], torch.Tensor): | |
| if "labels" in k or is_encoder_decoder: | |
| pad_value = label_pad_token_id | |
| elif k.endswith("_input_ids"): | |
| pad_value = padding_value | |
| elif k.endswith("_attention_mask"): | |
| pad_value = 0 | |
| concatenated_key = k.replace("rejected", "concatenated") | |
| concatenated_batch[concatenated_key] = torch.cat( | |
| ( | |
| concatenated_batch[concatenated_key], | |
| pad_to_length(batch[k], max_length, pad_value=pad_value), | |
| ), | |
| dim=0, | |
| ).to(device=device) | |
| if is_encoder_decoder: | |
| concatenated_batch["concatenated_input_ids"] = batch["prompt_input_ids"].repeat(2, 1).to(device=device) | |
| concatenated_batch["concatenated_attention_mask"] = ( | |
| batch["prompt_attention_mask"].repeat(2, 1).to(device=device) | |
| ) | |
| return concatenated_batch | |
| def odds_ratio_loss( | |
| self, | |
| policy_chosen_logps: torch.FloatTensor, | |
| policy_rejected_logps: torch.FloatTensor, | |
| ) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: | |
| """Compute ORPO's odds ratio (OR) loss for a batch of policy and reference model log probabilities. | |
| Args: | |
| policy_chosen_logps: | |
| Log probabilities of the policy model for the chosen responses. Shape: (batch_size,) | |
| policy_rejected_logps: | |
| Log probabilities of the policy model for the rejected responses. Shape: (batch_size,) | |
| Returns: | |
| A tuple of three tensors: (losses, chosen_rewards, rejected_rewards). The losses tensor contains the ORPO | |
| loss for each example in the batch. The chosen_rewards and rejected_rewards tensors contain the rewards for | |
| the chosen and rejected responses, respectively. The log odds ratio of the chosen responses over the | |
| rejected responses ratio for logging purposes. The `log(sigmoid(log_odds_chosen))` for logging purposes. | |
| """ | |
| # Derived from Eqs. (4) and (7) from https://huggingface.co/papers/2403.07691 by using log identities and exp(log(P(y|x)) = P(y|x) | |
| log_odds = (policy_chosen_logps - policy_rejected_logps) - ( | |
| torch.log1p(-torch.exp(policy_chosen_logps)) - torch.log1p(-torch.exp(policy_rejected_logps)) | |
| ) | |
| ratio = F.logsigmoid(log_odds) | |
| losses = self.beta * ratio | |
| chosen_rewards = self.beta * (policy_chosen_logps.to(self.accelerator.device)).detach() | |
| rejected_rewards = self.beta * (policy_rejected_logps.to(self.accelerator.device)).detach() | |
| return losses, chosen_rewards, rejected_rewards, torch.mean(ratio), torch.mean(log_odds) | |
| def get_batch_logps( | |
| logits: torch.FloatTensor, | |
| labels: torch.LongTensor, | |
| average_log_prob: bool = False, | |
| label_pad_token_id: int = -100, | |
| is_encoder_decoder: bool = False, | |
| ) -> torch.FloatTensor: | |
| """Compute the log probabilities of the given labels under the given logits. | |
| Args: | |
| logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size) | |
| labels: | |
| Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are | |
| ignored. Shape: (batch_size, sequence_length) | |
| average_log_prob: | |
| If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the | |
| log probabilities of the (non-masked) tokens. | |
| label_pad_token_id: The label pad token id. | |
| is_encoder_decoder: Whether the model is an encoder-decoder model. | |
| Returns: | |
| A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the | |
| given logits. | |
| """ | |
| if logits.shape[:-1] != labels.shape: | |
| raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.") | |
| if not is_encoder_decoder: | |
| labels = labels[:, 1:].clone() | |
| logits = logits[:, :-1, :] | |
| loss_mask = labels != label_pad_token_id | |
| # dummy token; we'll ignore the losses on these tokens later | |
| labels = torch.where(labels == label_pad_token_id, 0, labels) | |
| per_token_logps = selective_log_softmax(logits, labels) | |
| if average_log_prob: | |
| return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1) | |
| else: | |
| return (per_token_logps * loss_mask).sum(-1) | |
| def concatenated_forward( | |
| self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]] | |
| ) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: | |
| """Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together. | |
| We do this to avoid doing two forward passes, because it's faster for FSDP. | |
| """ | |
| concatenated_batch = self.concatenated_inputs( | |
| batch, | |
| is_encoder_decoder=self.is_encoder_decoder, | |
| label_pad_token_id=self.label_pad_token_id, | |
| padding_value=self.padding_value, | |
| device=self.accelerator.device, | |
| ) | |
| len_chosen = batch["chosen_labels"].shape[0] | |
| model_kwargs = ( | |
| { | |
| "decoder_input_ids": self._shift_right(concatenated_batch["concatenated_labels"]), | |
| } | |
| if self.is_encoder_decoder | |
| else {} | |
| ) | |
| if self.aux_loss_enabled: | |
| model_kwargs["output_router_logits"] = True | |
| outputs = model( | |
| concatenated_batch["concatenated_input_ids"], | |
| attention_mask=concatenated_batch["concatenated_attention_mask"], | |
| use_cache=False, | |
| **model_kwargs, | |
| ) | |
| all_logits = outputs.logits | |
| def cross_entropy_loss(logits, labels): | |
| if not self.is_encoder_decoder: | |
| # Shift so that tokens < n predict n | |
| logits = logits[..., :-1, :].contiguous() | |
| labels = labels[..., 1:].contiguous() | |
| # Flatten the tokens | |
| loss_fct = nn.CrossEntropyLoss() | |
| logits = logits.view(-1, logits.shape[-1]) | |
| labels = labels.view(-1) | |
| # Enable model parallelism | |
| labels = labels.to(logits.device) | |
| loss = loss_fct(logits, labels) | |
| return loss | |
| if self.is_encoder_decoder: | |
| labels = concatenated_batch["concatenated_labels"].clone() | |
| else: | |
| labels = concatenated_batch["concatenated_input_ids"].clone() | |
| attention_mask = concatenated_batch["concatenated_attention_mask"] | |
| labels = torch.where(attention_mask == 1, labels, self.label_pad_token_id) | |
| # orpo chosen nll loss is computed over the full prompt and response | |
| chosen_nll_loss = cross_entropy_loss(all_logits[:len_chosen], labels[:len_chosen]) | |
| all_logps = self.get_batch_logps( | |
| all_logits, | |
| concatenated_batch["concatenated_labels"], | |
| average_log_prob=True, | |
| is_encoder_decoder=self.is_encoder_decoder, | |
| label_pad_token_id=self.label_pad_token_id, | |
| ) | |
| chosen_logps = all_logps[:len_chosen] | |
| rejected_logps = all_logps[len_chosen:] | |
| if not self.is_encoder_decoder: | |
| chosen_logits = all_logits[:len_chosen, :-1, :] | |
| rejected_logits = all_logits[len_chosen:, :-1, :] | |
| else: | |
| chosen_logits = all_logits[:len_chosen] | |
| rejected_logits = all_logits[len_chosen:] | |
| if self.aux_loss_enabled: | |
| return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, chosen_nll_loss, outputs.aux_loss) | |
| return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, chosen_nll_loss) | |
| def get_batch_loss_metrics( | |
| self, | |
| model, | |
| batch: dict[str, Union[list, torch.LongTensor]], | |
| train_eval: Literal["train", "eval"] = "train", | |
| ): | |
| """Compute the ORPO loss and other metrics for the given batch of inputs for train or test.""" | |
| metrics = {} | |
| forward_output = self.concatenated_forward(model, batch) | |
| ( | |
| policy_chosen_logps, | |
| policy_rejected_logps, | |
| policy_chosen_logits, | |
| policy_rejected_logits, | |
| policy_nll_loss, | |
| ) = forward_output[:5] | |
| if self.aux_loss_enabled: | |
| aux_loss = forward_output[5] | |
| losses, chosen_rewards, rejected_rewards, log_odds_ratio, log_odds_chosen = self.odds_ratio_loss( | |
| policy_chosen_logps, policy_rejected_logps | |
| ) | |
| # full ORPO loss | |
| loss = policy_nll_loss - losses.mean() | |
| reward_accuracies = (chosen_rewards > rejected_rewards).float() | |
| prefix = "eval_" if train_eval == "eval" else "" | |
| metrics[f"{prefix}rewards/chosen"] = self.accelerator.gather_for_metrics(chosen_rewards).mean() | |
| metrics[f"{prefix}rewards/rejected"] = self.accelerator.gather_for_metrics(rejected_rewards).mean() | |
| metrics[f"{prefix}rewards/accuracies"] = self.accelerator.gather_for_metrics(reward_accuracies).mean() | |
| metrics[f"{prefix}rewards/margins"] = self.accelerator.gather_for_metrics( | |
| chosen_rewards - rejected_rewards | |
| ).mean() | |
| metrics[f"{prefix}logps/rejected"] = self.accelerator.gather_for_metrics(policy_rejected_logps).detach().mean() | |
| metrics[f"{prefix}logps/chosen"] = self.accelerator.gather_for_metrics(policy_chosen_logps).detach().mean() | |
| metrics[f"{prefix}logits/rejected"] = self.accelerator.gather_for_metrics( | |
| policy_rejected_logits.detach().mean() | |
| ).mean() | |
| metrics[f"{prefix}logits/chosen"] = self.accelerator.gather_for_metrics( | |
| policy_chosen_logits.detach().mean() | |
| ).mean() | |
| metrics[f"{prefix}nll_loss"] = self.accelerator.gather_for_metrics(policy_nll_loss).detach().mean() | |
| metrics[f"{prefix}log_odds_ratio"] = self.accelerator.gather_for_metrics(log_odds_ratio).detach().mean() | |
| metrics[f"{prefix}log_odds_chosen"] = self.accelerator.gather_for_metrics(log_odds_chosen).detach().mean() | |
| if is_torch_xla_available(): | |
| xm.mark_step() # needed because .item() calls | |
| for k, v in metrics.items(): | |
| metrics[k] = v.item() | |
| if self.aux_loss_enabled: | |
| loss += self.aux_loss_coef * aux_loss | |
| return loss, metrics | |
| def compute_loss( | |
| self, | |
| model: Union[PreTrainedModel, nn.Module], | |
| inputs: dict[str, Union[torch.Tensor, Any]], | |
| return_outputs=False, | |
| num_items_in_batch=None, | |
| ) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, torch.Tensor]]]: | |
| compute_loss_context_manager = ( | |
| autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext() | |
| ) | |
| with compute_loss_context_manager: | |
| loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="train") | |
| # Make sure to move the loss to the device the original accumulating loss is at back in the `Trainer` class: | |
| loss = loss.to(self.args.device) | |
| # force log the metrics | |
| self.store_metrics(metrics, train_eval="train") | |
| if return_outputs: | |
| return (loss, metrics) | |
| return loss | |
| def generate_from_model(self, model, batch: dict[str, torch.LongTensor]) -> str: | |
| """Generate samples from the model and reference model for the given batch of inputs.""" | |
| # If one uses `generate_during_eval` with peft + bf16, we need to explicitly call generate with | |
| # the torch amp context manager as some hidden states are silently casted to full precision. | |
| generate_context_manager = ( | |
| autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext() | |
| ) | |
| with generate_context_manager: | |
| policy_output = model.generate( | |
| input_ids=batch["prompt_input_ids"], | |
| attention_mask=batch["prompt_attention_mask"], | |
| max_length=self.max_length, | |
| do_sample=True, | |
| pad_token_id=self.processing_class.pad_token_id, | |
| ) | |
| policy_output = pad_to_length(policy_output, self.max_length, self.processing_class.pad_token_id) | |
| policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True) | |
| return policy_output_decoded | |
| def prediction_step( | |
| self, | |
| model: Union[PreTrainedModel, nn.Module], | |
| inputs: dict[str, Union[torch.Tensor, Any]], | |
| prediction_loss_only: bool, | |
| ignore_keys: Optional[list[str]] = None, | |
| ): | |
| if not self.use_dpo_data_collator: | |
| logger.warning( | |
| "prediction_step is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than " | |
| "DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator" | |
| ) | |
| if ignore_keys is None: | |
| if hasattr(model, "config"): | |
| ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", []) | |
| else: | |
| ignore_keys = [] | |
| prediction_context_manager = ( | |
| autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext() | |
| ) | |
| with torch.no_grad(), prediction_context_manager: | |
| loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="eval") | |
| # force log the metrics | |
| self.store_metrics(metrics, train_eval="eval") | |
| if prediction_loss_only: | |
| return (loss.detach(), None, None) | |
| # logits for the chosen and rejected samples from model | |
| logits_dict = { | |
| "eval_logits/chosen": metrics["eval_logits/chosen"], | |
| "eval_logits/rejected": metrics["eval_logits/rejected"], | |
| } | |
| logits = [v for k, v in logits_dict.items() if k not in ignore_keys] | |
| logits = torch.tensor(logits, device=self.accelerator.device) | |
| labels = torch.zeros(logits.shape[0], device=self.accelerator.device) | |
| return (loss.detach(), logits, labels) | |
| def store_metrics(self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None: | |
| for key, value in metrics.items(): | |
| self._stored_metrics[train_eval][key].append(value) | |
| def evaluation_loop( | |
| self, | |
| dataloader: DataLoader, | |
| description: str, | |
| prediction_loss_only: Optional[bool] = None, | |
| ignore_keys: Optional[list[str]] = None, | |
| metric_key_prefix: str = "eval", | |
| ) -> EvalLoopOutput: | |
| """ | |
| Overriding built-in evaluation loop to store metrics for each batch. Prediction/evaluation loop, shared by | |
| `Trainer.evaluate()` and `Trainer.predict()`. | |
| Works both with or without labels. | |
| """ | |
| # Sample and save to game log if requested (for one batch to save time) | |
| if self.generate_during_eval: | |
| # Generate random indices within the range of the total number of samples | |
| num_samples = len(dataloader.dataset) | |
| random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size) | |
| # Use dataloader.dataset.select to get the random batch without iterating over the DataLoader | |
| random_batch_dataset = dataloader.dataset.select(random_indices) | |
| random_batch = self.data_collator(random_batch_dataset) | |
| random_batch = self._prepare_inputs(random_batch) | |
| policy_output_decoded = self.generate_from_model(self.model, random_batch) | |
| table = pd.DataFrame( | |
| columns=["Prompt", "Policy"], | |
| data=[ | |
| [prompt, pol[len(prompt) :]] for prompt, pol in zip(random_batch["prompt"], policy_output_decoded) | |
| ], | |
| ) | |
| if "wandb" in self.args.report_to: | |
| wandb.log({"game_log": wandb.Table(data=table)}) | |
| if "comet_ml" in self.args.report_to: | |
| log_table_to_comet_experiment( | |
| name="game_log.csv", | |
| table=table, | |
| ) | |
| # Base evaluation | |
| initial_output = super().evaluation_loop( | |
| dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix | |
| ) | |
| return initial_output | |
| def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None: | |
| """ | |
| Log `logs` on the various objects watching training, including stored metrics. | |
| Args: | |
| logs (`dict[str, float]`): | |
| The values to log. | |
| start_time (`float`, *optional*): | |
| Start time of the training. | |
| """ | |
| # logs either has 'loss' or 'eval_loss' | |
| train_eval = "train" if "loss" in logs else "eval" | |
| # Add averaged stored metrics to logs | |
| for key, metrics in self._stored_metrics[train_eval].items(): | |
| logs[key] = torch.tensor(metrics).mean().item() | |
| del self._stored_metrics[train_eval] | |
| return super().log(logs, start_time) | |
| def _shift_right(self, input_ids): | |
| if self.decoder_start_token_id is None: | |
| raise ValueError( | |
| "model.config.decoder_start_token_id has to be defined. It is usually set to the pad_token_id." | |
| ) | |
| # shift inputs to the right | |
| if is_torch_fx_proxy(input_ids): | |
| # Item assignment is not supported natively for proxies. | |
| shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), self.decoder_start_token_id) | |
| shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1) | |
| else: | |
| shifted_input_ids = input_ids.new_zeros(input_ids.shape) | |
| shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() | |
| shifted_input_ids[..., 0] = self.decoder_start_token_id | |
| if self.pad_token_id is None: | |
| raise ValueError("model.config.pad_token_id has to be defined.") | |
| # replace possible -100 values in labels by `pad_token_id` | |
| shifted_input_ids.masked_fill_(shifted_input_ids == -100, self.pad_token_id) | |
| return shifted_input_ids | |
| # Ensure the model card is saved along with the checkpoint | |
| def _save_checkpoint(self, model, trial): | |
| if self.args.hub_model_id is None: | |
| model_name = Path(self.args.output_dir).name | |
| else: | |
| model_name = self.args.hub_model_id.split("/")[-1] | |
| self.create_model_card(model_name=model_name) | |
| super()._save_checkpoint(model, trial) | |
| class UnslothORPOTrainer(_UnslothORPOTrainer): | |
| """ | |
| Initialize ORPOTrainer. | |
| Args: | |
| model ([`~transformers.PreTrainedModel`]): | |
| The model to train, preferably an [`~transformers.AutoModelForSequenceClassification`]. | |
| args ([`ORPOConfig`]): | |
| The ORPO config arguments to use for training. | |
| data_collator ([`~transformers.DataCollator`]): | |
| The data collator to use for training. If None is specified, the default data collator | |
| ([`DPODataCollatorWithPadding`]) will be used which will pad the sequences to the maximum length of the | |
| sequences in the batch, given a dataset of paired sequences. | |
| train_dataset ([`~datasets.Dataset`]): | |
| The dataset to use for training. | |
| eval_dataset ([`~datasets.Dataset`]): | |
| The dataset to use for evaluation. | |
| processing_class ([`~transformers.PreTrainedTokenizerBase`], [`~transformers.BaseImageProcessor`], [`~transformers.FeatureExtractionMixin`] or [`~transformers.ProcessorMixin`], *optional*): | |
| Processing class used to process the data. If provided, will be used to automatically process the inputs | |
| for the model, and it will be saved along the model to make it easier to rerun an interrupted training or | |
| reuse the fine-tuned model. | |
| model_init (`Callable[[], transformers.PreTrainedModel]`): | |
| The model initializer to use for training. If None is specified, the default model initializer will be | |
| used. | |
| callbacks (`list[transformers.TrainerCallback]`): | |
| The callbacks to use for training. | |
| optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): | |
| The optimizer and scheduler to use for training. | |
| preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): | |
| The function to use to preprocess the logits before computing the metrics. | |
| peft_config (`dict`, defaults to `None`): | |
| The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in | |
| a PEFT model. | |
| compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*): | |
| The function to use to compute the metrics. Must take a `EvalPrediction` and return a dictionary string to | |
| metric values. | |
| """ | |
| def __init__( | |
| self, | |
| model = None, | |
| args = None, | |
| data_collator = None, | |
| train_dataset = None, | |
| eval_dataset = None, | |
| processing_class = None, | |
| model_init = None, | |
| callbacks = None, | |
| preprocess_logits_for_metrics = None, | |
| peft_config = None, | |
| compute_metrics = None, | |
| **kwargs | |
| ): | |
| if args is None: args = UnslothORPOConfig() | |
| use_bf16 = getattr(args, 'bf16', False) | |
| if type(use_bf16) is not bool: use_bf16 = False | |
| use_fp16 = getattr(args, 'fp16', False) | |
| if type(use_fp16) is not bool: use_fp16 = False | |
| force_float32 = False | |
| full_finetuning = os.environ.get('UNSLOTH_ENABLE_FULL_FINETUNING', '0') == '1' | |
| if not full_finetuning and (os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1'): | |
| print('Unsloth: Switching to float32 training since model cannot work with float16') | |
| force_float32 = True | |
| mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') | |
| dtype = getattr(model.config, 'dtype', None) or getattr(model.config, 'torch_dtype', None) | |
| if dtype is None: dtype = model.get_input_embeddings().weight.dtype | |
| from unsloth_zoo.utils import _get_dtype | |
| dtype = _get_dtype(dtype) | |
| float16 = dtype == torch.float16 | |
| if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`') | |
| if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`') | |
| if force_float32: | |
| # Forced float32 training | |
| args.fp16 = False | |
| args.bf16 = False | |
| os.environ['ACCELERATE_MIXED_PRECISION'] = 'no' | |
| if hasattr(args, 'mixed_precision'): args.mixed_precision = 'no' | |
| # args.mixed_precision is a new argument which needs to be set now | |
| elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32': | |
| # Mixed precision training | |
| args.fp16 = float16 | |
| args.bf16 = not float16 | |
| os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16' | |
| if hasattr(args, 'mixed_precision'): args.mixed_precision = 'fp16' if float16 else 'bf16' | |
| # args.mixed_precision is a new argument which needs to be set now | |
| elif mixed_precision_dtype == 'bfloat16': | |
| # Both False since bfloat16 full finetuning doesn't do any autocasting. | |
| args.fp16 = False | |
| args.bf16 = False | |
| os.environ['ACCELERATE_MIXED_PRECISION'] = 'no' | |
| if hasattr(args, 'mixed_precision'): args.mixed_precision = 'no' | |
| # args.mixed_precision is a new argument which needs to be set now | |
| if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no': | |
| args.eval_strategy = 'steps' | |
| if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1 | |
| ga_steps = getattr(args, 'gradient_accumulation_steps', None) | |
| if ga_steps is not None and ga_steps > 1: | |
| from transformers import __version__ as transformers_version | |
| if Version(transformers_version) <= Version('4.45.2'): | |
| print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n' | |
| '`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`') | |
| if getattr(args, 'eval_strategy', 'no') != 'no': | |
| eval_bsz = getattr(args, 'per_device_eval_batch_size', 8) | |
| if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size | |
| if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps | |
| fp16_full_eval = getattr(args, 'fp16_full_eval', False) | |
| if type(fp16_full_eval) is not bool: fp16_full_eval = False | |
| bf16_full_eval = getattr(args, 'bf16_full_eval', False) | |
| if type(bf16_full_eval) is not bool: bf16_full_eval = False | |
| if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True | |
| if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False | |
| if force_float32: | |
| args.bf16_full_eval = False | |
| args.fp16_full_eval = False | |
| elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16': | |
| args.bf16_full_eval = True | |
| args.fp16_full_eval = False | |
| elif not bf16_full_eval and not fp16_full_eval: | |
| args.bf16_full_eval = args.bf16 | |
| args.fp16_full_eval = args.fp16 | |
| _output_logits = False | |
| if locals().get('compute_metrics', None) is not None: _output_logits = True | |
| if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True | |
| if _output_logits: | |
| os.environ['UNSLOTH_RETURN_LOGITS'] = '1' | |
| if model is not None: | |
| _warnings_issued = getattr(model, 'warnings_issued', None) | |
| if _warnings_issued is None: | |
| model.warnings_issued = {} | |
| elif not isinstance(_warnings_issued, dict): | |
| try: | |
| model.warnings_issued = dict(_warnings_issued) | |
| except Exception: | |
| model.warnings_issued = {} | |
| if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'): | |
| pass | |
| else: | |
| model_max_seq_length = getattr(model, 'max_seq_length', None) | |
| args_max_seq_length = getattr(args, 'max_seq_length', None) | |
| if args_max_seq_length is None and model_max_seq_length is not None: | |
| max_seq_length = model.max_seq_length | |
| if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length | |
| elif args_max_seq_length is not None and model_max_seq_length is not None: | |
| if args_max_seq_length > model_max_seq_length: | |
| print('Unsloth: You set `max_seq_length` as ' + str(args_max_seq_length) + ' but ' | |
| 'the maximum the model supports is ' + str(model_max_seq_length) + '. We shall reduce it.') | |
| args.max_seq_length = model_max_seq_length | |
| if model is not None and hasattr(model, 'for_training'): | |
| model.for_training(use_gradient_checkpointing=getattr(args, 'gradient_checkpointing', True)) | |
| if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right' | |
| if 'processing_class' in locals(): | |
| if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right' | |
| if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right' | |
| __tokenizer = processing_class if 'processing_class' in locals() else tokenizer | |
| from unsloth_zoo.vision_utils import UnslothVisionDataCollator | |
| if not isinstance(data_collator, UnslothVisionDataCollator): | |
| if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names: | |
| data_collator = TransformersDataCollatorForLanguageModeling( | |
| __tokenizer, | |
| mlm = False, | |
| mlm_probability = 0.0, | |
| pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None), | |
| ) | |
| elif isinstance(data_collator, TransformersDataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names: | |
| data_collator = DataCollatorForSeq2Seq( | |
| __tokenizer, | |
| pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None), | |
| ) | |
| else: | |
| if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False | |
| if hasattr(args, 'dataset_text_field'): args.dataset_text_field = '' | |
| if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True} | |
| if not isinstance(data_collator, UnslothVisionDataCollator): | |
| if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'): | |
| if isinstance(data_collator, DataCollatorForSeq2Seq): | |
| data_collator = DataCollatorForSeq2Seq( | |
| __tokenizer.tokenizer, | |
| pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None), | |
| ) | |
| else: | |
| data_collator = TransformersDataCollatorForLanguageModeling( | |
| __tokenizer.tokenizer, | |
| mlm = False, | |
| mlm_probability = 0.0, | |
| pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None), | |
| ) | |
| other_metrics = [] | |
| from unsloth_zoo.logging_utils import PatchRLStatistics | |
| PatchRLStatistics('orpo_trainer', other_metrics) | |
| # [TODO] Fix up DataParallel multiplying batch sizes | |
| # [TODO] DDP works, but DP seems to not work? [TODO] | |
| if getattr(args, "parallel_mode", None) == ParallelMode.NOT_DISTRIBUTED and args.n_gpu > 1: | |
| if getattr(args, "_n_gpu", 1) != 1: | |
| args._n_gpu = 1 | |
| if "model" in locals() and hasattr(model, "for_training"): | |
| model.for_training(use_gradient_checkpointing=getattr(args, 'gradient_checkpointing', True)) | |
| super().__init__( | |
| model = model, | |
| args = args, | |
| data_collator = data_collator, | |
| train_dataset = train_dataset, | |
| eval_dataset = eval_dataset, | |
| processing_class = processing_class, | |
| model_init = model_init, | |
| callbacks = callbacks, | |
| preprocess_logits_for_metrics = preprocess_logits_for_metrics, | |
| peft_config = peft_config, | |
| compute_metrics = compute_metrics,**kwargs) | |
| if "model" in locals() and hasattr(model, "for_inference"): | |
| model.for_inference() | |
| if hasattr(self, 'neftune_hook_handle'): | |
| self.neftune_hook_handle.remove() | |
| if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle | |
| if getattr(args, 'neftune_noise_alpha', None) is not None: | |
| model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha | |
| pass | |
| if hasattr(self, 'accelerator'): | |
| scaler = self.accelerator.scaler | |
| current_model = model | |
| while hasattr(current_model, 'model'): | |
| current_model.accelerator_scaler = scaler | |
| current_model = current_model.model | |
| current_model.accelerator_scaler = scaler | |
| pass | |
| if hasattr(self, 'train'): | |
| self.train = MethodType(prepare_for_training_mode(self.__class__.train), self) | |
| pass | |
| if hasattr(self, 'llm') and self.llm is not None and hasattr(self.llm, 'get_tokenizer'): | |
| _vllm_tok = self.llm.get_tokenizer() | |
| _pc = getattr(self, 'processing_class', None) or getattr(self, 'tokenizer', None) | |
| if _vllm_tok is not None and _pc is not None and getattr(_pc, 'chat_template', None) is not None and getattr(_vllm_tok, 'chat_template', None) is None: | |
| _vllm_tok.chat_template = _pc.chat_template | |
| pass | |
| pass | |
| if hasattr(logger, "addFilter"): | |
| import logging | |
| class HideLoggingMessage(logging.Filter): | |
| def __init__(self, text): self.text = text | |
| def filter(self, x): return not (self.text in x.getMessage()) | |
| pass | |
| logger.addFilter(HideLoggingMessage("`use_cache=True`")) | |