| """ |
| 2026.2.1 |
| 2026.2.1 |
| 5.2.0 |
| 0.24.0 |
| __UNSLOTH_VERSIONING__ |
| """ |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| from torch import Tensor |
| import torch |
| import torch.nn as nn |
| from torch.nn import functional as F |
| from unsloth_zoo.temporary_patches.common import torch_compile |
| from typing import Any, List, Optional, Tuple, Union, Dict, Set, Callable |
| from trl.trainer.bco_trainer import (Any, AutoModelForCausalLM, BCOConfig, BCOTrainer, BaseImageProcessor, BaseTrainer, CLF_NAME, Callable, DPODataCollatorWithPadding, DataCollator, DataLoader, Dataset, EvalLoopOutput, F, FeatureExtractionMixin, Literal, Optional, PartialState, Path, PeftModel, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, RUNNING_NAME, RunningMoments, SequentialSampler, TrainerCallback, TrainingArguments, Union, _process_tokens, _tokenize, autocast, contextmanager, create_reference_model, defaultdict, disable_dropout_in_model, has_length, inspect, is_comet_available, is_joblib_available, is_peft_available, is_sklearn_available, is_wandb_available, itemgetter, joblib, log_table_to_comet_experiment, logger, logging, maybe_apply_chat_template, maybe_extract_prompt, maybe_unpair_preference_dataset, nn, np, nullcontext, os, pad_to_length, pd, peft_module_casting_to_bf16, prepare_deepspeed, prepare_model_for_kbit_training, random, selective_log_softmax, textwrap, torch, tqdm, warnings, AutoModelForCausalLM, BCOConfig, BCOTrainer, BaseImageProcessor, Callable, DPODataCollatorWithPadding, DataCollator, Dataset, EvalLoopOutput, F, FeatureExtractionMixin, Optional, PartialState, PeftModel, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, RunningMoments, TrainerCallback, TrainingArguments, Union, autocast, create_reference_model, defaultdict, disable_dropout_in_model, inspect, is_comet_available, is_joblib_available, is_peft_available, is_sklearn_available, is_wandb_available, joblib, logger, maybe_apply_chat_template, maybe_extract_prompt, maybe_unpair_preference_dataset, nn, np, os, peft_module_casting_to_bf16, prepare_deepspeed, prepare_model_for_kbit_training, torch, warnings, F, PeftModel, PreTrainedModel, is_peft_available, logger, os, torch) |
|
|
|
|
| import os |
| from typing import * |
| from dataclasses import dataclass, field |
| from packaging.version import Version |
| import torch |
| import numpy as np |
| from contextlib import nullcontext |
| from torch.nn import functional as F |
| import inspect |
| from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling as TransformersDataCollatorForLanguageModeling |
| from transformers.training_args import ParallelMode |
| from unsloth_zoo.device_type import DEVICE_TYPE, device_synchronize |
|
|
| |
| |
| import functools |
| from types import MethodType |
| try: |
| from unsloth_zoo.gradient_checkpointing import reset_unsloth_gradient_checkpointing_buffers |
| except: |
| def reset_unsloth_gradient_checkpointing_buffers(): pass |
| def prepare_for_training_mode(f): |
| @functools.wraps(f) |
| def wrapper(self, *args, **kwargs): |
| |
| _was_training = None |
| |
| use_gc = getattr(self.args, 'gradient_checkpointing', True) |
| if hasattr(self, 'model') and hasattr(self.model, "training"): |
| _was_training = self.model.training |
| if hasattr(self, 'model') and hasattr(self.model, "for_training"): |
| self.model.for_training(use_gradient_checkpointing=use_gc) |
| output = f(self, *args, **kwargs) |
| |
| if hasattr(self, 'model') and hasattr(self.model, "for_inference"): |
| if _was_training is False: |
| self.model.for_inference() |
| elif _was_training is True and hasattr(self.model, "for_training"): |
| self.model.for_training(use_gradient_checkpointing=use_gc) |
| |
| try: |
| reset_unsloth_gradient_checkpointing_buffers() |
| except: |
| pass |
| |
| try: |
| import wandb |
| wandb.finish() |
| except: |
| pass |
| return output |
| return wrapper |
| pass |
|
|
| torch_compile_options = { |
| "epilogue_fusion" : True, |
| "max_autotune" : False, |
| "shape_padding" : True, |
| "trace.enabled" : False, |
| "triton.cudagraphs" : False, |
| } |
|
|
| @torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,) |
| def chunked_hidden_states_selective_log_softmax( |
| hidden_states: torch.Tensor, |
| lm_head: torch.Tensor, |
| index: torch.Tensor, |
| chunks: int = 4, |
| logit_scale_multiply: float = 0.0, |
| logit_scale_divide: float = 0.0, |
| logit_softcapping: float = 0.0, |
| temperature: float = 1.0, |
| ) -> torch.Tensor: |
| |
| flat_hidden_states = hidden_states.reshape(-1, hidden_states.shape[-1]) |
| flat_index = index.reshape(-1) |
|
|
| chunked_hidden_states = torch.chunk(flat_hidden_states, chunks=chunks, dim=0) |
| chunked_index = torch.chunk(flat_index, chunks=chunks, dim=0) |
|
|
| all_per_token_logps = [] |
|
|
| for chunk_hidden_states, chunk_index in zip(chunked_hidden_states, chunked_index): |
| chunk_logits = chunk_hidden_states.to(lm_head.dtype) @ lm_head.t() |
|
|
| if logit_scale_multiply != 0.0: |
| chunk_logits = chunk_logits * logit_scale_multiply |
| if logit_scale_divide != 0.0: |
| chunk_logits = chunk_logits / logit_scale_divide |
| if logit_softcapping != 0.0: |
| chunk_logits = chunk_logits * torch.tanh(chunk_logits / logit_softcapping) |
|
|
| chunk_logits = chunk_logits.to(torch.float32) |
|
|
| if temperature != 1.0: |
| chunk_logits = chunk_logits / temperature |
|
|
| selected_logits = torch.gather(chunk_logits, dim=-1, index=chunk_index.unsqueeze(-1)).squeeze(-1) |
| logsumexp_values = torch.logsumexp(chunk_logits, dim=-1) |
| per_token_logps = selected_logits - logsumexp_values |
| all_per_token_logps.append(per_token_logps) |
|
|
| all_per_token_logps = torch.concat(all_per_token_logps) |
|
|
| all_per_token_logps = all_per_token_logps.reshape((hidden_states.shape[0], hidden_states.shape[1])) |
| return all_per_token_logps |
|
|
| @torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,) |
| def chunked_selective_log_softmax(logits, index): |
| |
| chunked_logits = torch.chunk(logits.reshape(-1, logits.shape[-1]), chunks = 4, dim = 0) |
| chunked_index = torch.chunk(index.reshape(-1), chunks = 4, dim = 0) |
| all_per_token_logps = [] |
| |
| for chunk_logits, chunk_index in zip(chunked_logits, chunked_index): |
| chunk_logits = chunk_logits.to(torch.float32) |
| selected_logits = torch.gather(chunk_logits, dim = -1, index = chunk_index.unsqueeze(-1)).squeeze(-1) |
| logsumexp_values = torch.logsumexp(chunk_logits, dim = -1) |
| per_token_logps = selected_logits - logsumexp_values |
| all_per_token_logps.append(per_token_logps) |
| pass |
| all_per_token_logps = torch.concat(all_per_token_logps) |
| all_per_token_logps = all_per_token_logps.reshape((logits.shape[0], logits.shape[1])) |
| return all_per_token_logps |
|
|
| def calculate_pad_tokens_in_prompt( |
| input_ids: torch.Tensor, |
| logits_to_keep: int, |
| pad_token_id: int |
| ) -> torch.Tensor: |
| """ |
| Given prompt tensor, it returns all the left padded tokens in that sequence. so [pad, pad, pad, cat] = 3 tokens |
| """ |
| if logits_to_keep >= input_ids.shape[1]: |
| raise ValueError("logits_to_keep must be smaller than the sequence length.") |
|
|
| prompt_section = input_ids[:, :-logits_to_keep] |
|
|
| padding_mask = (prompt_section == pad_token_id) |
|
|
| pad_token_counts = padding_mask.sum(dim=1) |
|
|
| return pad_token_counts |
|
|
| def create_completion_attention_mask( |
| completion_input_ids: torch.Tensor, |
| left_pad_tokens_per_prompt: torch.Tensor, |
| max_left_pad: int, |
| pad_token_id: int |
| ) -> torch.Tensor: |
| """ |
| Given that we have a sequence, [p,p,p,c,c,c,pad,pad,pad] |
| |
| Where p are extra prompt tokens we got from slicing the torch tensor, c is completion tokens |
| and pad are pad tokens, this function would make a completion mask that would 0 out the pad |
| and p tokens. so in this example [0,0,0,1,1,1,0,0,0] |
| """ |
| batch_size, completion_len = completion_input_ids.shape |
| device = completion_input_ids.device |
|
|
| num_tokens_to_mask = max_left_pad - left_pad_tokens_per_prompt |
|
|
| indices = torch.arange(completion_len, device=device).unsqueeze(0) |
| shift_mask = indices >= num_tokens_to_mask.unsqueeze(1) |
|
|
| non_padding_mask = (completion_input_ids != pad_token_id) |
|
|
| final_mask = shift_mask & non_padding_mask |
|
|
| return final_mask |
|
|
| def left_pack_padding(tensor: torch.Tensor, pad_id: int) -> torch.Tensor: |
| """ |
| Moves all padding tokens in each sequence of a batch to the right. |
| """ |
| mask = (tensor != pad_id) |
| |
| sorted_indices = torch.argsort(mask, dim=1, descending=True, stable=True) |
| packed_tensor = torch.gather(tensor, 1, sorted_indices) |
| return packed_tensor |
|
|
| def align_logprobs_with_mask( |
| logprob_tensor: torch.Tensor, |
| attention_mask: torch.Tensor, |
| pad_value: float = 0.0 |
| ) -> torch.Tensor: |
| """ |
| Aligns a log probability tensor with a given attention mask. |
| """ |
|
|
| device = logprob_tensor.device |
| batch_size, logprob_seq_len = logprob_tensor.shape |
| mask_seq_len = attention_mask.shape[1] |
|
|
| padded_logprobs = torch.full( |
| attention_mask.shape, |
| fill_value=pad_value, |
| dtype=logprob_tensor.dtype, |
| device=device |
| ) |
|
|
| left_pad_counts = torch.argmax(attention_mask, dim=1) |
|
|
| cols = torch.arange(logprob_seq_len, device=device) |
| dest_indices = left_pad_counts.unsqueeze(1) + cols |
|
|
| |
| |
| row_indices = torch.arange(batch_size, device=device).unsqueeze(1).expand_as(dest_indices) |
|
|
| |
| |
| |
| valid_mask = dest_indices < mask_seq_len |
|
|
| |
| |
| |
| valid_rows = row_indices[valid_mask] |
| valid_cols = dest_indices[valid_mask] |
| valid_vals = logprob_tensor[valid_mask] |
|
|
| |
| |
| padded_logprobs[valid_rows, valid_cols] = valid_vals |
|
|
| return padded_logprobs |
|
|
| def autotune_batch_and_chunks( |
| total_input_rows, |
| seq_len, |
| hidden_size, |
| vocab_size, |
| dtype_bytes=16, |
| multiplier=None |
| ): |
| if multiplier is None: |
| final_m = max(4, seq_len // 4096) |
| else: |
| final_m = multiplier |
|
|
| if torch.cuda.is_available(): |
| free_bytes, _ = torch.cuda.mem_get_info() |
| limit_gb = (free_bytes / (1024**3))*.80 |
| elif hasattr(torch, "xpu") and torch.xpu.is_available(): |
| |
| total_mem = torch.xpu.get_device_properties(0).total_memory |
| reserved_mem = torch.xpu.memory_reserved() |
| free_bytes = total_mem - reserved_mem |
| limit_gb = (free_bytes / (1024**3)) * 0.80 |
| else: |
| |
| limit_gb = 8.0 |
|
|
| bytes_to_gb = 1024**3 |
|
|
| b_vals = torch.arange(total_input_rows, 0, -1, device='cpu', dtype=torch.float32) |
|
|
| hidden_gb = (b_vals * seq_len * hidden_size * dtype_bytes) / bytes_to_gb |
|
|
| base_logits = ((b_vals/total_input_rows) * b_vals * seq_len * vocab_size * dtype_bytes) / bytes_to_gb |
| logits_gb = base_logits / final_m |
|
|
| total_mem_gb = hidden_gb + logits_gb |
|
|
| valid_mask = total_mem_gb <= limit_gb |
| valid_indices = torch.nonzero(valid_mask, as_tuple=False) |
|
|
| if valid_indices.shape[0] == 0: |
| |
| return 4, final_m |
|
|
| best_idx = valid_indices[0].item() |
| final_b = int(b_vals[best_idx].item()) |
|
|
| return final_b, final_m |
| @dataclass |
| class UnslothBCOConfig(BCOConfig): |
| """ |
| |
| Configuration class for the [`BCOTrainer`]. |
| |
| This class includes only the parameters that are specific to BCO training. For a full list of training arguments, |
| please refer to the [`~transformers.TrainingArguments`] documentation. Note that default values in this class may |
| differ from those in [`~transformers.TrainingArguments`]. |
| |
| Using [`~transformers.HfArgumentParser`] we can turn this class into |
| [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the |
| command line. |
| |
| Parameters: |
| max_length (`int` or `None`, *optional*, defaults to `1024`): |
| Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want |
| to use the default data collator. |
| max_prompt_length (`int` or `None`, *optional*, defaults to `512`): |
| Maximum length of the prompt. This argument is required if you want to use the default data collator. |
| max_completion_length (`int`, *optional*): |
| Maximum length of the completion. This argument is required if you want to use the default data collator |
| and your model is an encoder-decoder. |
| beta (`float`, *optional*, defaults to `0.1`): |
| Parameter controlling the deviation from the reference model. Higher β means less deviation from the |
| reference model. |
| label_pad_token_id (`int`, *optional*, defaults to `-100`): |
| Label pad token id. This argument is required if you want to use the default data collator. |
| padding_value (`int`, *optional*): |
| Padding value to use. If `None`, the padding value of the tokenizer is used. |
| truncation_mode (`str`, *optional*, defaults to `"keep_end"`): |
| Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`. |
| This argument is required if you want to use the default data collator. |
| disable_dropout (`bool`, *optional*, defaults to `True`): |
| Whether to disable dropout in the model and reference model. |
| generate_during_eval (`bool`, *optional*, defaults to `False`): |
| If `True`, generates and logs completions from both the model and the reference model to W&B or Comet |
| during evaluation. |
| is_encoder_decoder (`bool`, *optional*): |
| When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument, |
| you need to specify if the model returned by the callable is an encoder-decoder model. |
| precompute_ref_log_probs (`bool`, *optional*, defaults to `False`): |
| Whether to precompute reference model log probabilities for training and evaluation datasets. This is |
| useful when training without the reference model to reduce the total GPU memory needed. |
| model_init_kwargs (`dict[str, Any]`, *optional*): |
| Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a |
| string. |
| ref_model_init_kwargs (`dict[str, Any]`, *optional*): |
| Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the reference model |
| from a string. |
| dataset_num_proc (`int`, *optional*): |
| Number of processes to use for processing the dataset. |
| prompt_sample_size (`int`, *optional*, defaults to `1024`): |
| Number of prompts that are fed to density ratio classifier. |
| min_density_ratio (`float`, *optional*, defaults to `0.5`): |
| Minimum value of the density ratio. The estimated density ratio is clamped to this value. |
| max_density_ratio (`float`, *optional*, defaults to `10.0`): |
| Maximum value of the density ratio. The estimated density ratio is clamped to this value. |
| |
| """ |
| vllm_sampling_params: Optional[Any] = field( |
| default = None, |
| metadata = {'help': 'vLLM SamplingParams'}, |
| ) |
| unsloth_num_chunks : Optional[int] = field( |
| default = -1, |
| metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'}, |
| ) |
| unsloth_logit_chunk_multiplier : Optional[int] = field( |
| default = None, |
| metadata = {'help': 'Multiplier for chunked logit computations.'}, |
| ) |
| unsloth_grpo_mini_batch : Optional[int] = field( |
| default = None, |
| metadata = {'help': 'Mini batch size for GRPO hidden state accumulation. Default is None unless user defines it.'}, |
| ) |
| max_seq_length : Optional[int] = field( |
| default = None, |
| metadata = {'help': 'Maximum sequence length to truncate to.'}, |
| ) |
| def __init__( |
| self, |
| output_dir = None, |
| per_device_train_batch_size = 4, |
| num_train_epochs = 3.0, |
| max_steps = -1, |
| learning_rate = 5e-05, |
| lr_scheduler_type = 'linear', |
| lr_scheduler_kwargs = None, |
| warmup_steps = 0.1, |
| optim = 'adamw_8bit', |
| optim_args = None, |
| weight_decay = 0.01, |
| adam_beta1 = 0.9, |
| adam_beta2 = 0.999, |
| adam_epsilon = 1e-08, |
| optim_target_modules = None, |
| gradient_accumulation_steps = 2, |
| average_tokens_across_devices = True, |
| max_grad_norm = 1.0, |
| label_smoothing_factor = 0.0, |
| bf16 = False, |
| fp16 = False, |
| bf16_full_eval = False, |
| fp16_full_eval = False, |
| tf32 = None, |
| gradient_checkpointing = True, |
| gradient_checkpointing_kwargs = None, |
| torch_compile = False, |
| torch_compile_backend = None, |
| torch_compile_mode = None, |
| use_liger_kernel = False, |
| liger_kernel_config = None, |
| use_cache = False, |
| neftune_noise_alpha = None, |
| torch_empty_cache_steps = 250, |
| auto_find_batch_size = False, |
| logging_strategy = 'steps', |
| logging_steps = 1, |
| logging_first_step = False, |
| log_on_each_node = True, |
| logging_nan_inf_filter = False, |
| include_num_input_tokens_seen = False, |
| log_level = 'passive', |
| log_level_replica = 'warning', |
| disable_tqdm = None, |
| report_to = 'none', |
| run_name = None, |
| project = 'huggingface', |
| trackio_space_id = 'trackio', |
| eval_strategy = 'no', |
| eval_steps = None, |
| eval_delay = 0, |
| per_device_eval_batch_size = 4, |
| prediction_loss_only = False, |
| eval_on_start = False, |
| eval_do_concat_batches = True, |
| eval_use_gather_object = False, |
| eval_accumulation_steps = 2, |
| batch_eval_metrics = False, |
| save_only_model = False, |
| save_strategy = 'steps', |
| save_steps = 500, |
| save_on_each_node = False, |
| save_total_limit = None, |
| enable_jit_checkpoint = False, |
| push_to_hub = False, |
| hub_token = None, |
| hub_private_repo = None, |
| hub_model_id = None, |
| hub_strategy = 'every_save', |
| hub_always_push = False, |
| hub_revision = None, |
| load_best_model_at_end = False, |
| metric_for_best_model = None, |
| greater_is_better = None, |
| ignore_data_skip = False, |
| restore_callback_states_from_checkpoint = False, |
| full_determinism = False, |
| seed = 3407, |
| data_seed = 3407, |
| use_cpu = False, |
| accelerator_config = None, |
| parallelism_config = None, |
| dataloader_drop_last = False, |
| dataloader_num_workers = 0, |
| dataloader_pin_memory = True, |
| dataloader_persistent_workers = False, |
| dataloader_prefetch_factor = None, |
| remove_unused_columns = True, |
| label_names = None, |
| train_sampling_strategy = 'random', |
| length_column_name = 'length', |
| ddp_find_unused_parameters = None, |
| ddp_bucket_cap_mb = None, |
| ddp_broadcast_buffers = None, |
| ddp_backend = None, |
| ddp_timeout = 1800, |
| fsdp = None, |
| fsdp_config = None, |
| deepspeed = None, |
| debug = '', |
| skip_memory_metrics = True, |
| do_train = False, |
| do_eval = False, |
| do_predict = False, |
| resume_from_checkpoint = None, |
| warmup_ratio = None, |
| logging_dir = None, |
| local_rank = -1, |
| max_length = 1024, |
| max_prompt_length = 512, |
| max_completion_length = None, |
| beta = 0.1, |
| label_pad_token_id = -100, |
| padding_value = None, |
| truncation_mode = 'keep_end', |
| disable_dropout = True, |
| generate_during_eval = False, |
| is_encoder_decoder = None, |
| precompute_ref_log_probs = False, |
| model_init_kwargs = None, |
| ref_model_init_kwargs = None, |
| dataset_num_proc = None, |
| prompt_sample_size = 1024, |
| min_density_ratio = 0.5, |
| max_density_ratio = 10.0, |
| vllm_sampling_params = None, |
| unsloth_num_chunks = -1, |
| unsloth_logit_chunk_multiplier = None, |
| unsloth_grpo_mini_batch = None, |
| max_seq_length = None, |
| **kwargs, |
| ): |
| if learning_rate < 1e-7: print(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!') |
| if learning_rate > 1: print(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!') |
| if num_train_epochs is None: |
| num_train_epochs = 3.0 |
| if output_dir is None and save_strategy == 'steps' and save_steps == 500: |
| output_dir = 'unsloth_training_checkpoints' |
| save_strategy = 'no' |
| import multiprocessing as _mp |
| if _mp.get_start_method() != 'fork': |
| dataset_num_proc = None |
| elif dataset_num_proc is None: |
| import psutil |
| dataset_num_proc = min(max((psutil.cpu_count() or 1)+4, 2), 64) |
| memory_gb_left = psutil.virtual_memory().available / (1024**3) |
| if memory_gb_left <= 2: dataset_num_proc = 1 |
| else: dataset_num_proc = min(dataset_num_proc, int(memory_gb_left)) |
| |
| super().__init__( |
| output_dir = output_dir, |
| per_device_train_batch_size = per_device_train_batch_size, |
| num_train_epochs = num_train_epochs, |
| max_steps = max_steps, |
| learning_rate = learning_rate, |
| lr_scheduler_type = lr_scheduler_type, |
| lr_scheduler_kwargs = lr_scheduler_kwargs, |
| warmup_steps = warmup_steps, |
| optim = optim, |
| optim_args = optim_args, |
| weight_decay = weight_decay, |
| adam_beta1 = adam_beta1, |
| adam_beta2 = adam_beta2, |
| adam_epsilon = adam_epsilon, |
| optim_target_modules = optim_target_modules, |
| gradient_accumulation_steps = gradient_accumulation_steps, |
| average_tokens_across_devices = average_tokens_across_devices, |
| max_grad_norm = max_grad_norm, |
| label_smoothing_factor = label_smoothing_factor, |
| bf16 = bf16, |
| fp16 = fp16, |
| bf16_full_eval = bf16_full_eval, |
| fp16_full_eval = fp16_full_eval, |
| tf32 = tf32, |
| gradient_checkpointing = gradient_checkpointing, |
| gradient_checkpointing_kwargs = gradient_checkpointing_kwargs, |
| torch_compile = torch_compile, |
| torch_compile_backend = torch_compile_backend, |
| torch_compile_mode = torch_compile_mode, |
| use_liger_kernel = use_liger_kernel, |
| liger_kernel_config = liger_kernel_config, |
| use_cache = use_cache, |
| neftune_noise_alpha = neftune_noise_alpha, |
| torch_empty_cache_steps = torch_empty_cache_steps, |
| auto_find_batch_size = auto_find_batch_size, |
| logging_strategy = logging_strategy, |
| logging_steps = logging_steps, |
| logging_first_step = logging_first_step, |
| log_on_each_node = log_on_each_node, |
| logging_nan_inf_filter = logging_nan_inf_filter, |
| include_num_input_tokens_seen = include_num_input_tokens_seen, |
| log_level = log_level, |
| log_level_replica = log_level_replica, |
| disable_tqdm = disable_tqdm, |
| report_to = report_to, |
| run_name = run_name, |
| project = project, |
| trackio_space_id = trackio_space_id, |
| eval_strategy = eval_strategy, |
| eval_steps = eval_steps, |
| eval_delay = eval_delay, |
| per_device_eval_batch_size = per_device_eval_batch_size, |
| prediction_loss_only = prediction_loss_only, |
| eval_on_start = eval_on_start, |
| eval_do_concat_batches = eval_do_concat_batches, |
| eval_use_gather_object = eval_use_gather_object, |
| eval_accumulation_steps = eval_accumulation_steps, |
| batch_eval_metrics = batch_eval_metrics, |
| save_only_model = save_only_model, |
| save_strategy = save_strategy, |
| save_steps = save_steps, |
| save_on_each_node = save_on_each_node, |
| save_total_limit = save_total_limit, |
| enable_jit_checkpoint = enable_jit_checkpoint, |
| push_to_hub = push_to_hub, |
| hub_token = hub_token, |
| hub_private_repo = hub_private_repo, |
| hub_model_id = hub_model_id, |
| hub_strategy = hub_strategy, |
| hub_always_push = hub_always_push, |
| hub_revision = hub_revision, |
| load_best_model_at_end = load_best_model_at_end, |
| metric_for_best_model = metric_for_best_model, |
| greater_is_better = greater_is_better, |
| ignore_data_skip = ignore_data_skip, |
| restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint, |
| full_determinism = full_determinism, |
| seed = seed, |
| data_seed = data_seed, |
| use_cpu = use_cpu, |
| accelerator_config = accelerator_config, |
| parallelism_config = parallelism_config, |
| dataloader_drop_last = dataloader_drop_last, |
| dataloader_num_workers = dataloader_num_workers, |
| dataloader_pin_memory = dataloader_pin_memory, |
| dataloader_persistent_workers = dataloader_persistent_workers, |
| dataloader_prefetch_factor = dataloader_prefetch_factor, |
| remove_unused_columns = remove_unused_columns, |
| label_names = label_names, |
| train_sampling_strategy = train_sampling_strategy, |
| length_column_name = length_column_name, |
| ddp_find_unused_parameters = ddp_find_unused_parameters, |
| ddp_bucket_cap_mb = ddp_bucket_cap_mb, |
| ddp_broadcast_buffers = ddp_broadcast_buffers, |
| ddp_backend = ddp_backend, |
| ddp_timeout = ddp_timeout, |
| fsdp = fsdp, |
| fsdp_config = fsdp_config, |
| deepspeed = deepspeed, |
| debug = debug, |
| skip_memory_metrics = skip_memory_metrics, |
| do_train = do_train, |
| do_eval = do_eval, |
| do_predict = do_predict, |
| resume_from_checkpoint = resume_from_checkpoint, |
| warmup_ratio = warmup_ratio, |
| logging_dir = logging_dir, |
| local_rank = local_rank, |
| max_length = max_length, |
| max_prompt_length = max_prompt_length, |
| max_completion_length = max_completion_length, |
| beta = beta, |
| label_pad_token_id = label_pad_token_id, |
| padding_value = padding_value, |
| truncation_mode = truncation_mode, |
| disable_dropout = disable_dropout, |
| generate_during_eval = generate_during_eval, |
| is_encoder_decoder = is_encoder_decoder, |
| precompute_ref_log_probs = precompute_ref_log_probs, |
| model_init_kwargs = model_init_kwargs, |
| ref_model_init_kwargs = ref_model_init_kwargs, |
| dataset_num_proc = dataset_num_proc, |
| prompt_sample_size = prompt_sample_size, |
| min_density_ratio = min_density_ratio, |
| max_density_ratio = max_density_ratio,**kwargs) |
| self.vllm_sampling_params = vllm_sampling_params |
| self.unsloth_num_chunks = unsloth_num_chunks |
| if unsloth_grpo_mini_batch is not None: |
| if self.generation_batch_size >= unsloth_grpo_mini_batch: |
| self.unsloth_grpo_mini_batch = unsloth_grpo_mini_batch |
| else: |
| raise ValueError( |
| f"Unsloth GRPO mini batch size needs to be less than or equal to the effective generation batch size, " |
| f"which is self.per_device_train_batch_size * gradient_accumulation_steps." |
| ) |
| self.unsloth_logit_chunk_multiplier = unsloth_logit_chunk_multiplier |
| self.max_seq_length = max_seq_length |
|
|
| pass |
|
|
| class _UnslothBCOTrainer(BaseTrainer): |
| r"""""" |
|
|
| _tag_names = ["trl", "bco"] |
| _name = "BCO" |
| _paper = { |
| "title": "Binary Classifier Optimization for Large Language Model Alignment", |
| "id": "2404.04656", |
| |
| "citation": textwrap.dedent("""\ |
| @article{jung2024binary, |
| title = {{Binary Classifier Optimization for Large Language Model Alignment}}, |
| author = {Seungjae Jung and Gunsoo Han and Daniel Wontae Nam and Kyoung{-}Woon On}, |
| year = 2024, |
| eprint = {arXiv:2404.04656} |
| }"""), |
| } |
|
|
| def __init__( |
| self, |
| model: Union[PreTrainedModel, nn.Module, str] = None, |
| ref_model: Optional[Union[PreTrainedModel, nn.Module, str]] = None, |
| args: BCOConfig = None, |
| train_dataset: Optional[Dataset] = None, |
| eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None, |
| processing_class: Optional[ |
| Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] |
| ] = None, |
| data_collator: Optional[DataCollator] = None, |
| model_init: Optional[Callable[[], PreTrainedModel]] = None, |
| callbacks: Optional[list[TrainerCallback]] = None, |
| optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), |
| preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, |
| peft_config: Optional[dict] = None, |
| compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None, |
| model_adapter_name: Optional[str] = None, |
| ref_adapter_name: Optional[str] = None, |
| embedding_func: Optional[Callable] = None, |
| embedding_tokenizer: Optional[PreTrainedTokenizerBase] = None, |
| ): |
| if not os.environ.get("TRL_EXPERIMENTAL_SILENCE"): |
| warnings.warn( |
| "This trainer will soon be moved to trl.experimental and is a candidate for removal. If you rely on " |
| "it and want it to remain, please share your comments here: " |
| "https://github.com/huggingface/trl/issues/4223. Silence this warning by setting environment variable " |
| "TRL_EXPERIMENTAL_SILENCE=1." |
| ) |
| if embedding_func is not None and not (is_sklearn_available() and is_joblib_available()): |
| raise ImportError( |
| "BCOTrainer with UDM requires the scikit-learn and joblib libraries. Please install it with `pip install scikit-learn joblib`." |
| ) |
|
|
| if type(args) is TrainingArguments: |
| raise ValueError("Please use `BCOConfig` instead `TrainingArguments`.") |
|
|
| if not isinstance(model, str) and model is not None and ref_model is model: |
| raise ValueError( |
| "`model` and `ref_model` cannot be the same object. If you want `ref_model` to be the " |
| "same as `model`, you must mass a copy of it, or `None` if you use peft." |
| ) |
|
|
| if args.model_init_kwargs is None: |
| model_init_kwargs = {} |
| elif not isinstance(model, str): |
| raise ValueError("You passed model_kwargs to the BCOTrainer. But your model is already instantiated.") |
| else: |
| model_init_kwargs = args.model_init_kwargs |
| dtype = model_init_kwargs.get("dtype") |
| if dtype is not None: |
| |
| if isinstance(dtype, str) and dtype != "auto": |
| dtype = getattr(torch, dtype) |
| if dtype != "auto" and not isinstance(dtype, torch.dtype): |
| raise ValueError( |
| f"Invalid `dtype` passed to the BCOConfig. Expected a string with either `torch.dtype` or 'auto', but got {dtype}." |
| ) |
| model_init_kwargs["dtype"] = dtype |
|
|
| if args.ref_model_init_kwargs is None: |
| ref_model_init_kwargs = {} |
| elif not isinstance(ref_model, str): |
| raise ValueError( |
| "You passed ref_model_kwargs to the BCOTrainer. But your ref_model is already instantiated." |
| ) |
| else: |
| ref_model_init_kwargs = args.ref_model_init_kwargs |
| dtype = ref_model_init_kwargs.get("dtype") |
| if dtype is not None: |
| |
| if isinstance(dtype, str) and dtype != "auto": |
| dtype = getattr(torch, dtype) |
| if dtype != "auto" and not isinstance(dtype, torch.dtype): |
| raise ValueError( |
| f"Invalid `dtype` passed to the BCOConfig. Expected a string with either `torch.dtype` or 'auto', but got {dtype}." |
| ) |
| ref_model_init_kwargs["dtype"] = dtype |
|
|
| if isinstance(model, str): |
| model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs) |
|
|
| if isinstance(ref_model, str): |
| ref_model = AutoModelForCausalLM.from_pretrained(ref_model, **ref_model_init_kwargs) |
|
|
| |
| |
| self._peft_has_been_casted_to_bf16 = False |
|
|
| if not is_peft_available() and peft_config is not None: |
| raise ValueError( |
| "PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it with `pip install peft` to use the PEFT models" |
| ) |
| elif is_peft_available() and peft_config is not None: |
| |
| if isinstance(model, PeftModel): |
| model = model.merge_and_unload() |
|
|
| if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False): |
| _support_gc_kwargs = hasattr( |
| args, "gradient_checkpointing_kwargs" |
| ) and "gradient_checkpointing_kwargs" in list( |
| inspect.signature(prepare_model_for_kbit_training).parameters |
| ) |
|
|
| prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing} |
|
|
| if _support_gc_kwargs: |
| prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs |
|
|
| model = prepare_model_for_kbit_training(model, **prepare_model_kwargs) |
| elif args.gradient_checkpointing: |
| |
| if hasattr(model, "enable_input_require_grads"): |
| model.enable_input_require_grads() |
| else: |
|
|
| def make_inputs_require_grad(module, input, output): |
| output.requires_grad_(True) |
|
|
| model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) |
|
|
| |
| model = model |
| if args.bf16 and getattr(model, "is_loaded_in_4bit", False): |
| peft_module_casting_to_bf16(model) |
| |
| self._peft_has_been_casted_to_bf16 = True |
|
|
| |
| |
| |
| elif args.gradient_checkpointing: |
| |
| if hasattr(model, "enable_input_require_grads"): |
| model.enable_input_require_grads() |
| else: |
|
|
| def make_inputs_require_grad(module, input, output): |
| output.requires_grad_(True) |
|
|
| model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) |
|
|
| if args.generate_during_eval and not (is_wandb_available() or is_comet_available()): |
| raise ValueError( |
| "`generate_during_eval=True` requires Weights and Biases or Comet to be installed." |
| " Please install `wandb` or `comet-ml` to resolve." |
| ) |
|
|
| if model is not None: |
| self.is_encoder_decoder = model.config.is_encoder_decoder |
| elif args.is_encoder_decoder is None: |
| raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.") |
| else: |
| self.is_encoder_decoder = args.is_encoder_decoder |
|
|
| self.is_peft_model = is_peft_available() and isinstance(model, PeftModel) |
| self.model_adapter_name = model_adapter_name |
| self.ref_adapter_name = ref_adapter_name |
|
|
| if ref_model: |
| self.ref_model = ref_model |
| elif self.is_peft_model or args.precompute_ref_log_probs: |
| |
| self.ref_model = None |
| else: |
| self.ref_model = create_reference_model(model) |
|
|
| if processing_class is None: |
| raise ValueError( |
| "max_length or a processing_class must be specified when using the default DPODataCollatorWithPadding" |
| ) |
| if args.max_length is None: |
| logger.warning( |
| "When using DPODataCollatorWithPadding, you should set `max_length` in the `BCOConfig`. " |
| "It will be set to `512` by default, but you should do it yourself in the future.", |
| ) |
| max_length = 512 |
| if args.max_length is not None: |
| max_length = args.max_length |
|
|
| if args.max_prompt_length is None: |
| logger.warning( |
| "When using DPODataCollatorWithPadding, you should set `max_prompt_length` in the `BCOConfig`. " |
| "It will be set to `128` by default, but you should do it yourself in the future.", |
| ) |
| max_prompt_length = 128 |
| if args.max_prompt_length is not None: |
| max_prompt_length = args.max_prompt_length |
|
|
| max_completion_length = None |
| if args.max_completion_length is None and self.is_encoder_decoder: |
| logger.warning( |
| "When using DPODataCollatorWithPadding with an encoder decoder architecture, you should set `max_completion_length` in the BCOTrainer's init" |
| " it will be set to `128` by default, but you should do it yourself in the future.", |
| ) |
| max_completion_length = 128 |
| if args.max_completion_length is not None and self.is_encoder_decoder: |
| max_completion_length = args.max_completion_length |
|
|
| if data_collator is None: |
| data_collator = DPODataCollatorWithPadding( |
| pad_token_id=processing_class.pad_token_id, |
| label_pad_token_id=args.label_pad_token_id, |
| is_encoder_decoder=self.is_encoder_decoder, |
| ) |
|
|
| if args.remove_unused_columns: |
| args.remove_unused_columns = False |
| |
| logger.warning( |
| "When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your BCOConfig" |
| " we have set it for you, but you should do it yourself in the future.", |
| ) |
|
|
| self.use_dpo_data_collator = True |
| else: |
| self.use_dpo_data_collator = False |
|
|
| |
| if args.disable_dropout: |
| disable_dropout_in_model(model) |
| if self.ref_model is not None: |
| disable_dropout_in_model(self.ref_model) |
|
|
| self.max_length = max_length |
| self.generate_during_eval = args.generate_during_eval |
| self.label_pad_token_id = args.label_pad_token_id |
| self.padding_value = args.padding_value if args.padding_value is not None else processing_class.pad_token_id |
| self.max_prompt_length = max_prompt_length |
| self.truncation_mode = args.truncation_mode |
| self.max_completion_length = max_completion_length |
| self.precompute_ref_log_probs = args.precompute_ref_log_probs |
|
|
| |
| |
| self._precomputed_train_ref_log_probs = False |
| self._precomputed_eval_ref_log_probs = False |
|
|
| |
| self._stored_metrics = defaultdict(lambda: defaultdict(list)) |
|
|
| |
| self.beta = args.beta |
| self.aux_loss_enabled = getattr(model.config, "output_router_logits", False) |
| self.aux_loss_coef = getattr(model.config, "router_aux_loss_coef", 0.0) |
| if self.aux_loss_enabled and self.aux_loss_coef == 0.0: |
| logger.warning( |
| "You set `output_router_logits` to `True` in the model config, but `router_aux_loss_coef` is set to " |
| "`0.0`, meaning the auxiliary loss will not be used. Either set `router_aux_loss_coef` to a value " |
| "greater than `0.0`, or set `output_router_logits` to `False` if you don't want to use the auxiliary " |
| "loss.", |
| ) |
|
|
| |
| self.embedding_func = embedding_func |
| self.embedding_tokenizer = embedding_tokenizer |
|
|
| |
| |
| |
| |
| |
| |
| |
| model.warnings_issued["estimate_tokens"] = True |
|
|
| with PartialState().main_process_first(): |
| |
| train_dataset = train_dataset.map( |
| maybe_extract_prompt, num_proc=args.dataset_num_proc, desc="Extracting prompt from train dataset" |
| ) |
| |
| train_dataset = maybe_unpair_preference_dataset( |
| train_dataset, args.dataset_num_proc, desc="Unpairing train dataset" |
| ) |
| |
| train_dataset = train_dataset.map( |
| maybe_apply_chat_template, fn_kwargs={"tokenizer": processing_class}, num_proc=args.dataset_num_proc |
| ) |
| if eval_dataset is not None: |
| |
| eval_dataset = eval_dataset.map( |
| maybe_extract_prompt, num_proc=args.dataset_num_proc, desc="Extracting prompt from eval dataset" |
| ) |
| |
| eval_dataset = maybe_unpair_preference_dataset( |
| eval_dataset, args.dataset_num_proc, desc="Unpairing eval dataset" |
| ) |
| eval_dataset = eval_dataset.map( |
| maybe_apply_chat_template, |
| fn_kwargs={"tokenizer": processing_class}, |
| num_proc=args.dataset_num_proc, |
| ) |
|
|
| |
| train_dataset = train_dataset.map( |
| _tokenize, |
| batched=True, |
| fn_kwargs={"tokenizer": processing_class, "embedding_tokenizer": self.embedding_tokenizer}, |
| num_proc=args.dataset_num_proc, |
| desc="Tokenizing train dataset", |
| ) |
|
|
| |
| fn_kwargs = { |
| "prefix": "", |
| "is_encoder_decoder": self.is_encoder_decoder, |
| "tokenizer": processing_class, |
| "max_length": self.max_length, |
| "truncation_mode": self.truncation_mode, |
| "label_pad_token_id": self.label_pad_token_id, |
| "max_prompt_length": self.max_prompt_length, |
| "max_completion_length": self.max_completion_length, |
| } |
| train_dataset = train_dataset.map( |
| _process_tokens, |
| fn_kwargs=fn_kwargs, |
| num_proc=args.dataset_num_proc, |
| desc="Processing tokenized train dataset", |
| ) |
|
|
| if eval_dataset is not None: |
| |
| eval_dataset = eval_dataset.map( |
| _tokenize, |
| fn_kwargs={"tokenizer": processing_class, "embedding_tokenizer": self.embedding_tokenizer}, |
| batched=True, |
| num_proc=args.dataset_num_proc, |
| desc="Tokenizing eval dataset", |
| ) |
|
|
| |
| fn_kwargs = { |
| "prefix": "", |
| "is_encoder_decoder": self.is_encoder_decoder, |
| "tokenizer": processing_class, |
| "max_length": self.max_length, |
| "truncation_mode": self.truncation_mode, |
| "label_pad_token_id": self.label_pad_token_id, |
| "max_prompt_length": self.max_prompt_length, |
| "max_completion_length": self.max_completion_length, |
| } |
| eval_dataset = eval_dataset.map( |
| _process_tokens, |
| fn_kwargs=fn_kwargs, |
| num_proc=args.dataset_num_proc, |
| desc="Processing tokenized eval dataset", |
| ) |
|
|
| desirable = train_dataset.filter( |
| lambda x: x["label"], num_proc=args.dataset_num_proc, desc="Filtering desirable examples" |
| ) |
| undesirable = train_dataset.filter( |
| lambda x: not x["label"], num_proc=args.dataset_num_proc, desc="Filtering undesirable examples" |
| ) |
|
|
| super().__init__( |
| model=model, |
| args=args, |
| data_collator=data_collator, |
| train_dataset=train_dataset, |
| eval_dataset=eval_dataset, |
| processing_class=processing_class, |
| model_init=model_init, |
| compute_metrics=compute_metrics, |
| callbacks=callbacks, |
| optimizers=optimizers, |
| preprocess_logits_for_metrics=preprocess_logits_for_metrics, |
| ) |
|
|
| |
| |
| |
| self.model_accepts_loss_kwargs = False |
|
|
| |
| if hasattr(self.model, "add_model_tags"): |
| self.model.add_model_tags(self._tag_names) |
|
|
| if not hasattr(self, "accelerator"): |
| raise AttributeError( |
| "Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`." |
| ) |
|
|
| |
| if self.is_deepspeed_enabled: |
| if self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.precompute_ref_log_probs: |
| raise ValueError( |
| "You cannot use `precompute_ref_log_probs=True` with Deepspeed ZeRO-3. Please set `precompute_ref_log_probs=False`." |
| ) |
|
|
| if self.ref_model is None: |
| if not (self.is_peft_model or self.precompute_ref_log_probs): |
| raise ValueError( |
| "No reference model and model is not a Peft model. Try setting `precompute_ref_log_probs=True`" |
| ) |
| else: |
| if self.is_deepspeed_enabled: |
| self.ref_model = prepare_deepspeed(self.ref_model, self.accelerator) |
| else: |
| self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True) |
|
|
| self.running = RunningMoments(accelerator=self.accelerator) |
|
|
| if self.embedding_func is None or args.resume_from_checkpoint: |
| return |
|
|
| chosen_embeddings = self._get_sample_prompt_embeddings(desirable, sample_size=self.args.prompt_sample_size) |
| rejected_embeddings = self._get_sample_prompt_embeddings(undesirable, sample_size=self.args.prompt_sample_size) |
|
|
| embeddings = torch.cat((chosen_embeddings, rejected_embeddings), dim=0) |
| labels = torch.cat( |
| (torch.ones_like(chosen_embeddings[:, 0]), torch.zeros_like(rejected_embeddings[:, 0])), dim=0 |
| ) |
|
|
| self.clf = LogisticRegression(class_weight="balanced").fit( |
| embeddings.cpu().float().numpy(), labels.cpu().numpy() |
| ) |
| chosen_mean = self.clf.score( |
| chosen_embeddings.cpu().float().numpy(), torch.ones_like(chosen_embeddings[:, 0]).cpu().numpy() |
| ) |
| rejected_mean = self.clf.score( |
| rejected_embeddings.cpu().float().numpy(), torch.zeros_like(rejected_embeddings[:, 0]).cpu().numpy() |
| ) |
| logger.info(f"UDM classifier training scores: chosen: {chosen_mean}, rejected: {rejected_mean}") |
|
|
| @property |
| def match_underlying_distribution(self): |
| return self.embedding_func is not None and self.embedding_tokenizer is not None |
|
|
| def _get_chosen_prob(self, prompt_embeddings: torch.FloatTensor) -> torch.FloatTensor: |
| """ |
| Calculates the probability if the given prompt embedding is from desirable dataset. This function calculates |
| the probability in the process and ensemble across processes. |
| """ |
| dtype = prompt_embeddings.dtype |
| device = prompt_embeddings.device |
| rank = self.accelerator.process_index |
|
|
| padded_prompt_embeddings = self.accelerator.pad_across_processes( |
| prompt_embeddings, pad_index=self.embedding_tokenizer.pad_token_id |
| ) |
| sample_size = padded_prompt_embeddings.shape[0] |
| nonzero = padded_prompt_embeddings.mean(dim=1) != self.embedding_tokenizer.pad_token_id |
| prompt_embeddings = self.accelerator.gather(padded_prompt_embeddings) |
|
|
| |
| if prompt_embeddings.shape[0] == 0: |
| return torch.tensor([], device=device, dtype=dtype) |
|
|
| prob = self.clf.predict_proba(prompt_embeddings.cpu().float().numpy())[:, 1] |
| prob = torch.as_tensor(prob, dtype=dtype, device=device) |
| prob = self.accelerator.reduce(prob, reduction="mean") |
|
|
| prob = prob[sample_size * rank : sample_size * (rank + 1)] |
| prob = prob[nonzero] |
|
|
| return prob |
|
|
| def _vectorize_prompt(self, input_ids: torch.LongTensor, attention_mask: torch.LongTensor) -> torch.FloatTensor: |
| """ |
| Replaces processing_class.pad_token_id to embedding_tokenizer.pad_token_id and applies self.embedding_func |
| """ |
| input_ids = torch.where( |
| input_ids == self.processing_class.pad_token_id, |
| self.embedding_tokenizer.pad_token_id, |
| input_ids, |
| ) |
|
|
| with torch.no_grad(): |
| embeddings = self.embedding_func( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| ) |
|
|
| return embeddings |
|
|
| def _get_prompt_embeddings( |
| self, batch: dict[str, Union[list, torch.LongTensor]] |
| ) -> tuple[torch.FloatTensor, torch.FloatTensor]: |
| """Extract embeddings from frozen embedding model""" |
|
|
| if not self.match_underlying_distribution: |
| return None, None |
|
|
| embeddings = self._vectorize_prompt( |
| input_ids=batch["embedding_input_ids"], |
| attention_mask=batch["embedding_attention_mask"], |
| ) |
|
|
| labels = torch.tensor(batch["label"], dtype=torch.bool, device=embeddings.device) |
| chosen_idx = torch.where(labels)[0] |
| rejected_idx = torch.where(~labels)[0] |
|
|
| chosen_embeddings = embeddings[chosen_idx, ...] |
| rejected_embeddings = embeddings[rejected_idx, ...] |
|
|
| return (chosen_embeddings, rejected_embeddings) |
|
|
| def _get_sample_prompt_embeddings(self, dataset: Dataset, sample_size: int = 512) -> torch.FloatTensor: |
| """ |
| Sample instances from dataset and get prompt embeddings. Used for density ratio classifier training. |
| """ |
| n_samples = min(len(dataset), sample_size) |
| rand_indices = np.random.choice(len(dataset), size=(n_samples,)) |
|
|
| embedding_dataset = dataset.select(rand_indices) |
|
|
| dataloader_params = { |
| "batch_size": self.args.per_device_train_batch_size, |
| "collate_fn": self.data_collator, |
| "num_workers": self.args.dataloader_num_workers, |
| "pin_memory": self.args.dataloader_pin_memory, |
| "shuffle": False, |
| } |
|
|
| |
| data_loader = self.accelerator.prepare(DataLoader(embedding_dataset, **dataloader_params)) |
|
|
| with torch.no_grad(): |
| all_embeddings = torch.empty(0) |
| for padded_batch in tqdm(iterable=data_loader, desc="Building sample prompt embeddings"): |
| embeddings = self._vectorize_prompt( |
| input_ids=padded_batch["embedding_input_ids"], |
| attention_mask=padded_batch["embedding_attention_mask"], |
| ) |
| embeddings = self.accelerator.gather_for_metrics(embeddings) |
| all_embeddings = torch.cat((all_embeddings, embeddings.cpu())) |
|
|
| return all_embeddings |
|
|
| def _save_optimizer_and_scheduler(self, output_dir): |
| output_dir = output_dir if output_dir is not None else self.args.output_dir |
| super()._save_optimizer_and_scheduler(output_dir) |
|
|
| if self.accelerator.is_main_process: |
| |
| self.running.save_to_json(os.path.join(output_dir, RUNNING_NAME)) |
|
|
| if self.match_underlying_distribution: |
| joblib.dump(self.clf, os.path.join(output_dir, CLF_NAME), compress=True) |
|
|
| def _load_optimizer_and_scheduler(self, checkpoint): |
| if checkpoint is None: |
| logger.warning_once(f"Missing Checkpoint {checkpoint}") |
| return |
|
|
| super()._load_optimizer_and_scheduler(checkpoint) |
|
|
| |
| running_file = os.path.join(checkpoint, RUNNING_NAME) |
| if os.path.isfile(running_file): |
| self.running = RunningMoments.load_from_json(self.accelerator, running_file) |
|
|
| if self.match_underlying_distribution: |
| clf_file = os.path.join(checkpoint, CLF_NAME) |
| if os.path.isfile(clf_file): |
| self.clf = joblib.load(clf_file) |
|
|
| @contextmanager |
| def null_ref_context(self): |
| """Context manager for handling null reference model (that is, peft adapter manipulation).""" |
| with ( |
| self.accelerator.unwrap_model(self.model).disable_adapter() |
| if self.is_peft_model and not self.ref_adapter_name |
| else nullcontext() |
| ): |
| if self.ref_adapter_name: |
| self.model.set_adapter(self.ref_adapter_name) |
| yield |
| if self.ref_adapter_name: |
| self.model.set_adapter(self.model_adapter_name or "default") |
|
|
| def get_train_dataloader(self) -> DataLoader: |
| """ |
| Returns the training [`~torch.utils.data.DataLoader`]. |
| |
| Subclass of transformers.src.transformers.trainer.get_train_dataloader to precompute `ref_log_probs`. |
| """ |
|
|
| if self.precompute_ref_log_probs and not self._precomputed_train_ref_log_probs: |
| dataloader_params = { |
| "batch_size": self.args.per_device_train_batch_size, |
| "collate_fn": self.data_collator, |
| "num_workers": self.args.dataloader_num_workers, |
| "pin_memory": self.args.dataloader_pin_memory, |
| "shuffle": False, |
| } |
|
|
| |
| data_loader = self.accelerator.prepare(DataLoader(self.train_dataset, **dataloader_params)) |
| reference_completion_logps = [] |
|
|
| for padded_batch in tqdm(iterable=data_loader, desc="Train dataset reference log probs"): |
| reference_completion_logp = self.compute_reference_log_probs(padded_batch) |
|
|
| reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp) |
| reference_completion_logps.append(reference_completion_logp.cpu()) |
|
|
| self.train_dataset = self.train_dataset.add_column( |
| name="reference_logps", column=torch.cat(reference_completion_logps).float().numpy() |
| ) |
|
|
| self._precomputed_train_ref_log_probs = True |
|
|
| return super().get_train_dataloader() |
|
|
| def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: |
| """ |
| Returns the evaluation [`~torch.utils.data.DataLoader`]. |
| |
| Subclass of transformers.src.transformers.trainer.get_eval_dataloader to precompute `ref_log_probs`. |
| |
| Args: |
| eval_dataset (`torch.utils.data.Dataset`, *optional*): |
| If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted |
| by the `model.forward()` method are automatically removed. It must implement `__len__`. |
| """ |
| if eval_dataset is None and self.eval_dataset is None: |
| raise ValueError("Trainer: evaluation requires an eval_dataset.") |
| eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset |
|
|
| if self.precompute_ref_log_probs and not self._precomputed_eval_ref_log_probs: |
| dataloader_params = { |
| "batch_size": self.args.per_device_eval_batch_size, |
| "collate_fn": self.data_collator, |
| "num_workers": self.args.dataloader_num_workers, |
| "pin_memory": self.args.dataloader_pin_memory, |
| "shuffle": False, |
| } |
|
|
| |
| data_loader = self.accelerator.prepare(DataLoader(eval_dataset, **dataloader_params)) |
|
|
| reference_completion_logps = [] |
|
|
| for padded_batch in tqdm(iterable=data_loader, desc="Eval dataset reference log probs"): |
| reference_completion_logp = self.compute_reference_log_probs(padded_batch) |
|
|
| reference_completion_logp = self.accelerator.gather_for_metrics(reference_completion_logp) |
| reference_completion_logps.append(reference_completion_logp.cpu()) |
|
|
| eval_dataset = eval_dataset.add_column( |
| name="reference_logps", column=torch.cat(reference_completion_logps).float().numpy() |
| ) |
|
|
| |
| if self.eval_dataset is not None: |
| self.eval_dataset = eval_dataset |
| self._precomputed_eval_ref_log_probs = True |
|
|
| return super().get_eval_dataloader(eval_dataset=eval_dataset) |
|
|
| def compute_reference_log_probs(self, padded_batch: dict) -> dict: |
| """Computes log probabilities of the reference model for a single padded batch of a BCO specific dataset.""" |
| with torch.no_grad(): |
| if self.ref_model is None: |
| with self.null_ref_context(): |
| if self.is_encoder_decoder: |
| completion_logits = self.model( |
| padded_batch["prompt_input_ids"], |
| attention_mask=padded_batch["prompt_attention_mask"], |
| decoder_input_ids=padded_batch.get("completion_decoder_input_ids"), |
| labels=padded_batch["completion_labels"], |
| ).logits |
|
|
| else: |
| completion_logits = self.model( |
| padded_batch["completion_input_ids"], |
| attention_mask=padded_batch["completion_attention_mask"], |
| ).logits |
|
|
| else: |
| if self.is_encoder_decoder: |
| completion_logits = self.ref_model( |
| padded_batch["prompt_input_ids"], |
| attention_mask=padded_batch["prompt_attention_mask"], |
| decoder_input_ids=padded_batch.get("completion_decoder_input_ids"), |
| labels=padded_batch["completion_labels"], |
| ).logits |
|
|
| else: |
| completion_logits = self.ref_model( |
| padded_batch["completion_input_ids"], attention_mask=padded_batch["completion_attention_mask"] |
| ).logits |
|
|
| completion_logps = self.get_batch_logps( |
| completion_logits, |
| padded_batch["completion_labels"], |
| average_log_prob=False, |
| is_encoder_decoder=self.is_encoder_decoder, |
| label_pad_token_id=self.label_pad_token_id, |
| ) |
|
|
| return completion_logps |
|
|
| @staticmethod |
| def get_batch_logps( |
| logits: torch.FloatTensor, |
| labels: torch.LongTensor, |
| average_log_prob: bool = False, |
| label_pad_token_id: int = -100, |
| is_encoder_decoder: bool = False, |
| ) -> torch.FloatTensor: |
| """Compute the log probabilities of the given labels under the given logits. |
| |
| Args: |
| logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size) |
| labels: |
| Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are |
| ignored. Shape: (batch_size, sequence_length) |
| average_log_prob: |
| If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the |
| log probabilities of the (non-masked) tokens. |
| label_pad_token_id: |
| The label value to ignore when computing log probabilities. |
| is_encoder_decoder: |
| Whether the model is an encoder-decoder model. If True, the labels are not shifted, and the logits are |
| assumed to already be aligned with the labels. If False, the labels are shifted to the right by one |
| position, and the logits are assumed to be aligned with the shifted labels. |
| |
| Returns: |
| A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the |
| given logits. |
| """ |
| if logits.shape[:-1] != labels.shape: |
| raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.") |
|
|
| if not is_encoder_decoder: |
| labels = labels[:, 1:].clone() |
| logits = logits[:, :-1, :] |
| else: |
| |
| labels = labels.clone() |
|
|
| loss_mask = labels != label_pad_token_id |
|
|
| |
| labels[labels == label_pad_token_id] = 0 |
|
|
| per_token_logps = selective_log_softmax(logits, labels) |
|
|
| if average_log_prob: |
| return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1) |
| else: |
| return (per_token_logps * loss_mask).sum(-1) |
|
|
| def forward( |
| self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]] |
| ) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: |
| model_kwargs = ( |
| { |
| "labels": batch["completion_labels"], |
| "decoder_input_ids": batch.get("completion_decoder_input_ids"), |
| } |
| if self.is_encoder_decoder |
| else {} |
| ) |
| if self.aux_loss_enabled: |
| model_kwargs["output_router_logits"] = True |
|
|
| outputs = model( |
| batch["completion_input_ids"], |
| attention_mask=batch["completion_attention_mask"], |
| **model_kwargs, |
| ) |
| completion_logits = outputs.logits |
|
|
| completion_logps = self.get_batch_logps( |
| completion_logits, |
| batch["completion_labels"], |
| average_log_prob=False, |
| is_encoder_decoder=self.is_encoder_decoder, |
| label_pad_token_id=self.label_pad_token_id, |
| ) |
|
|
| if completion_logps.shape[0] != len(batch["label"]): |
| raise ValueError( |
| "There is a mismatch between the number of examples in this batch and the number of " |
| "examples for which an output sequence was predicted." |
| ) |
|
|
| chosen_idx = [i for i in range(completion_logps.shape[0]) if batch["label"][i] is True] |
| rejected_idx = [i for i in range(completion_logps.shape[0]) if batch["label"][i] is False] |
|
|
| chosen_logps = completion_logps[chosen_idx, ...] |
| rejected_logps = completion_logps[rejected_idx, ...] |
|
|
| chosen_logits = completion_logits[chosen_idx, ...] |
| rejected_logits = completion_logits[rejected_idx, ...] |
|
|
| if self.aux_loss_enabled: |
| return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, outputs.aux_loss) |
| else: |
| return (chosen_logps, rejected_logps, chosen_logits, rejected_logits) |
|
|
| def _get_udm_weight(self, rejected_embeddings: torch.FloatTensor) -> torch.FloatTensor: |
| prob_desirable = self._get_chosen_prob(rejected_embeddings) |
| min_ratio = self.args.min_density_ratio |
| max_ratio = self.args.max_density_ratio |
|
|
| weight = (prob_desirable / (1 - prob_desirable + 1e-8)).clamp(min=min_ratio, max=max_ratio) |
|
|
| return weight |
|
|
| def bco_loss( |
| self, |
| policy_chosen_logps: torch.FloatTensor, |
| policy_rejected_logps: torch.FloatTensor, |
| reference_chosen_logps: torch.FloatTensor, |
| reference_rejected_logps: torch.FloatTensor, |
| chosen_embeddings: Optional[torch.FloatTensor], |
| rejected_embeddings: Optional[torch.FloatTensor], |
| do_train: bool = True, |
| ) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: |
| """Compute the BCO loss for a batch of policy and reference model log probabilities. |
| |
| Args: |
| policy_chosen_logps: |
| Log probabilities of the policy model for the chosen responses. Shape: (num(chosen) in batch_size,) |
| policy_rejected_logps: |
| Log probabilities of the policy model for the rejected responses. Shape: (num(rejected) in batch_size,) |
| reference_chosen_logps: |
| Log probabilities of the reference model for the chosen responses. Shape: (num(chosen) in batch_size,) |
| reference_rejected_logps: |
| Log probabilities of the reference model for the rejected responses. Shape: (num(rejected) in |
| batch_size,) |
| chosen_embeddings: embeddings of desirable prompts |
| rejected_embeddings: embeddings of undesirable prompts |
| do_train: whether to update the running delta value. Default is True. |
| |
| Returns: |
| A tuple of four tensors: (losses, chosen_rewards, rejected_rewards, delta). The losses tensor contains the |
| BCO loss for each example in the batch. The chosen_rewards and rejected_rewards tensors contain the rewards |
| for the chosen and rejected responses, respectively. The delta value contains the moving average of all |
| implicit rewards. |
| """ |
|
|
| chosen_logratios = policy_chosen_logps - reference_chosen_logps |
| chosen_rewards = self.beta * chosen_logratios |
|
|
| rejected_logratios = policy_rejected_logps - reference_rejected_logps |
| rejected_rewards = self.beta * rejected_logratios |
|
|
| if do_train: |
| self.running.update(torch.cat((chosen_rewards, rejected_rewards), 0).detach()) |
| delta = torch.as_tensor(self.running.mean, device=chosen_rewards.device) |
|
|
| chosen_losses = -F.logsigmoid(chosen_rewards - delta) |
| rejected_losses = -F.logsigmoid(-(rejected_rewards - delta)) |
|
|
| if self.match_underlying_distribution: |
| chosen_weight = torch.ones_like(chosen_losses) |
| rejected_weight = self._get_udm_weight(rejected_embeddings) |
|
|
| losses = torch.cat((chosen_weight * chosen_losses, rejected_weight * rejected_losses), dim=0) |
| else: |
| losses = torch.cat((chosen_losses, rejected_losses), dim=0) |
|
|
| return losses, chosen_rewards, rejected_rewards, delta |
|
|
| def get_batch_loss_metrics( |
| self, |
| model, |
| batch: dict[str, Union[list, torch.LongTensor]], |
| do_train: bool = True, |
| ): |
| """Compute the BCO loss and other metrics for the given batch of inputs for train or test.""" |
| metrics = {} |
| batch = {k: (v.to(self.accelerator.device) if isinstance(v, torch.Tensor) else v) for k, v in batch.items()} |
|
|
| forward_output = self.forward(model, batch) |
| ( |
| policy_chosen_logps, |
| policy_rejected_logps, |
| policy_chosen_logits, |
| policy_rejected_logits, |
| ) = forward_output[:4] |
| if self.aux_loss_enabled: |
| aux_loss = forward_output[4] |
|
|
| |
| if "reference_logps" in batch: |
| chosen_idx = [i for i in range(batch["reference_logps"].shape[0]) if batch["label"][i] is True] |
| rejected_idx = [i for i in range(batch["reference_logps"].shape[0]) if batch["label"][i] is False] |
|
|
| reference_chosen_logps = batch["reference_logps"][chosen_idx, ...] |
| reference_rejected_logps = batch["reference_logps"][rejected_idx, ...] |
| else: |
| with torch.no_grad(): |
| if self.ref_model is None: |
| with self.null_ref_context(): |
| ( |
| reference_chosen_logps, |
| reference_rejected_logps, |
| _, |
| _, |
| ) = self.forward(self.model, batch)[:4] |
| else: |
| ( |
| reference_chosen_logps, |
| reference_rejected_logps, |
| _, |
| _, |
| ) = self.forward(self.ref_model, batch)[:4] |
|
|
| chosen_embeddings, rejected_embeddings = self._get_prompt_embeddings(batch) |
|
|
| losses, chosen_rewards, rejected_rewards, delta = self.bco_loss( |
| policy_chosen_logps, |
| policy_rejected_logps, |
| reference_chosen_logps, |
| reference_rejected_logps, |
| chosen_embeddings, |
| rejected_embeddings, |
| do_train=do_train, |
| ) |
| metrics["delta"] = self.accelerator.gather_for_metrics(delta).mean().item() |
|
|
| num_chosen = torch.Tensor([len(chosen_rewards)]).to(self.accelerator.device) |
| num_rejected = torch.Tensor([len(rejected_rewards)]).to(self.accelerator.device) |
|
|
| all_num_chosen = self.accelerator.gather_for_metrics(num_chosen).sum().item() |
| all_num_rejected = self.accelerator.gather_for_metrics(num_rejected).sum().item() |
|
|
| if all_num_chosen > 0: |
| metrics["rewards/chosen_sum"] = ( |
| self.accelerator.gather_for_metrics(chosen_rewards.nansum()).nansum().item() |
| ) |
| metrics["logps/chosen_sum"] = ( |
| self.accelerator.gather_for_metrics(policy_chosen_logps.nansum()).nansum().item() |
| ) |
| metrics["logits/chosen_sum"] = ( |
| self.accelerator.gather_for_metrics(policy_chosen_logits.nansum()).nansum().item() |
| ) |
| metrics["count/chosen"] = all_num_chosen |
|
|
| if all_num_rejected > 0: |
| metrics["rewards/rejected_sum"] = ( |
| self.accelerator.gather_for_metrics(rejected_rewards.nansum()).nansum().item() |
| ) |
| metrics["logps/rejected_sum"] = ( |
| self.accelerator.gather_for_metrics(policy_rejected_logps.nansum()).nansum().item() |
| ) |
| metrics["logits/rejected_sum"] = ( |
| self.accelerator.gather_for_metrics(policy_rejected_logits.nansum()).nansum().item() |
| ) |
| metrics["count/rejected"] = all_num_rejected |
|
|
| loss = losses.nanmean() |
| if self.aux_loss_enabled: |
| loss += self.aux_loss_coef * aux_loss |
|
|
| return loss, metrics |
|
|
| def compute_loss( |
| self, |
| model: Union[PreTrainedModel, nn.Module], |
| inputs: dict[str, Union[torch.Tensor, Any]], |
| return_outputs=False, |
| num_items_in_batch=None, |
| ) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, torch.Tensor]]]: |
| compute_loss_context_manager = ( |
| autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext() |
| ) |
|
|
| with compute_loss_context_manager: |
| loss, metrics = self.get_batch_loss_metrics(model, inputs) |
|
|
| |
| loss = loss.to(self.args.device) |
| |
| if self.accelerator.is_main_process: |
| self.store_metrics(metrics, train_eval="train") |
|
|
| if return_outputs: |
| return (loss, metrics) |
| return loss |
|
|
| def store_metrics(self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None: |
| for key, value in metrics.items(): |
| self._stored_metrics[train_eval][key].append(value) |
|
|
| def _get_train_sampler(self, dataset: Optional[Dataset] = None) -> Optional[torch.utils.data.Sampler]: |
| if dataset is None: |
| dataset = self.train_dataset |
| if dataset is None or not has_length(dataset): |
| return None |
| return SequentialSampler(dataset) |
|
|
| def generate_from_model_and_ref(self, model, batch: dict[str, torch.LongTensor]) -> tuple[str, str]: |
| """Generate samples from the model and reference model for the given batch of inputs.""" |
|
|
| |
| |
| generate_context_manager = ( |
| autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext() |
| ) |
| with generate_context_manager: |
| policy_output = model.generate( |
| input_ids=batch["prompt_input_ids"], |
| attention_mask=batch["prompt_attention_mask"], |
| max_length=self.max_length, |
| do_sample=True, |
| pad_token_id=self.processing_class.pad_token_id, |
| ) |
|
|
| |
| if "reference_output" in batch: |
| reference_output = batch["reference_output"] |
| else: |
| if self.ref_model is None: |
| with self.null_ref_context(): |
| reference_output = self.model.generate( |
| input_ids=batch["prompt_input_ids"], |
| attention_mask=batch["prompt_attention_mask"], |
| max_length=self.max_length, |
| do_sample=True, |
| pad_token_id=self.processing_class.pad_token_id, |
| ) |
| else: |
| reference_output = self.ref_model.generate( |
| input_ids=batch["prompt_input_ids"], |
| attention_mask=batch["prompt_attention_mask"], |
| max_length=self.max_length, |
| do_sample=True, |
| pad_token_id=self.processing_class.pad_token_id, |
| ) |
|
|
| policy_output = pad_to_length(policy_output, self.max_length, self.processing_class.pad_token_id) |
| policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True) |
|
|
| reference_output = pad_to_length(reference_output, self.max_length, self.processing_class.pad_token_id) |
| reference_output_decoded = self.processing_class.batch_decode(reference_output, skip_special_tokens=True) |
|
|
| return policy_output_decoded, reference_output_decoded |
|
|
| def prediction_step( |
| self, |
| model: Union[PreTrainedModel, nn.Module], |
| inputs: dict[str, Union[torch.Tensor, Any]], |
| prediction_loss_only: bool, |
| ignore_keys: Optional[list[str]] = None, |
| ): |
| if ignore_keys is None: |
| if hasattr(model, "config"): |
| ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", []) |
| else: |
| ignore_keys = [] |
|
|
| prediction_context_manager = ( |
| autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext() |
| ) |
| with torch.no_grad(), prediction_context_manager: |
| loss, metrics = self.get_batch_loss_metrics(model, inputs, do_train=False) |
|
|
| |
| if self.accelerator.is_main_process: |
| self.store_metrics(metrics, train_eval="eval") |
|
|
| if prediction_loss_only: |
| return (loss.detach(), None, None) |
|
|
| |
| logits_dict = {} |
| if "logits/chosen_sum" in metrics: |
| logits_dict["eval_logits/chosen"] = metrics["logits/chosen_sum"] |
| if "logits/rejected_sum" in metrics: |
| logits_dict["eval_logits/rejected"] = metrics["logits/rejected_sum"] |
| logits = [v for k, v in logits_dict.items() if k not in ignore_keys] |
| logits = torch.tensor(logits, device=self.accelerator.device) |
| labels = torch.zeros(logits.shape[0], device=self.accelerator.device) |
|
|
| return (loss.detach(), logits, labels) |
|
|
| def evaluation_loop( |
| self, |
| dataloader: DataLoader, |
| description: str, |
| prediction_loss_only: Optional[bool] = None, |
| ignore_keys: Optional[list[str]] = None, |
| metric_key_prefix: str = "eval", |
| ) -> EvalLoopOutput: |
| """ |
| Overriding built-in evaluation loop to store metrics for each batch. Prediction/evaluation loop, shared by |
| `Trainer.evaluate()` and `Trainer.predict()`. |
| |
| Works both with or without labels. |
| """ |
|
|
| |
| if self.generate_during_eval: |
| |
| num_samples = len(dataloader.dataset) |
| random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size) |
|
|
| |
| random_batch_dataset = dataloader.dataset.select(random_indices) |
| random_batch = self.data_collator(random_batch_dataset) |
| random_batch = self._prepare_inputs(random_batch) |
|
|
| target_labels = torch.tensor(random_batch["label"], dtype=torch.bool, device=self.accelerator.device) |
| target_indices = torch.where(~target_labels)[0] |
| target_batch = { |
| "prompt_input_ids": random_batch["prompt_input_ids"][target_indices], |
| "prompt_attention_mask": random_batch["prompt_attention_mask"][target_indices], |
| "prompt": itemgetter(*target_indices)(random_batch["prompt"]), |
| } |
| policy_output_decoded, ref_output_decoded = self.generate_from_model_and_ref(self.model, target_batch) |
|
|
| table = pd.DataFrame( |
| columns=["Prompt", "Policy", "Ref Model"], |
| data=[ |
| [prompt, pol[len(prompt) :], ref[len(prompt) :]] |
| for prompt, pol, ref in zip(target_batch["prompt"], policy_output_decoded, ref_output_decoded) |
| ], |
| ) |
| if "wandb" in self.args.report_to: |
| wandb.log({"game_log": wandb.Table(data=table)}) |
|
|
| if "comet_ml" in self.args.report_to: |
| log_table_to_comet_experiment( |
| name="game_log.csv", |
| table=table, |
| ) |
|
|
| |
| initial_output = super().evaluation_loop( |
| dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix |
| ) |
|
|
| return initial_output |
|
|
| def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None: |
| """ |
| Log `logs` on the various objects watching training, including stored metrics. |
| |
| Args: |
| logs (`dict[str, float]`): |
| The values to log. |
| start_time (`float`, *optional*): |
| Start time of the training. |
| """ |
| |
| train_eval = "train" if "loss" in logs else "eval" |
| |
| prefix = "eval_" if train_eval == "eval" else "" |
| |
| for split in ["chosen", "rejected"]: |
| if f"count/{split}" in self._stored_metrics[train_eval]: |
| count_sum = torch.Tensor(self._stored_metrics[train_eval][f"count/{split}"]).sum().item() |
| for metric in ["rewards", "logps", "logits"]: |
| logs[f"{prefix}{metric}/{split}"] = ( |
| torch.Tensor(self._stored_metrics[train_eval][f"{metric}/{split}_sum"]).sum().item() |
| / count_sum |
| ) |
| |
| del self._stored_metrics[train_eval][f"{metric}/{split}_sum"] |
| del self._stored_metrics[train_eval][f"count/{split}"] |
| |
| if f"{prefix}rewards/chosen" in logs and f"{prefix}rewards/rejected" in logs: |
| logs[f"{prefix}rewards/margins"] = logs[f"{prefix}rewards/chosen"] - logs[f"{prefix}rewards/rejected"] |
| |
| for key, metrics in self._stored_metrics[train_eval].items(): |
| logs[f"{prefix}{key}"] = torch.Tensor(metrics).mean().item() |
| del self._stored_metrics[train_eval] |
| return super().log(logs, start_time) |
|
|
| |
| def _save_checkpoint(self, model, trial): |
| if self.args.hub_model_id is None: |
| model_name = Path(self.args.output_dir).name |
| else: |
| model_name = self.args.hub_model_id.split("/")[-1] |
| self.create_model_card(model_name=model_name) |
| super()._save_checkpoint(model, trial) |
| class UnslothBCOTrainer(_UnslothBCOTrainer): |
| """ |
| |
| Initialize BCOTrainer from [BCO](https://huggingface.co/papers/2404.04656) paper. |
| |
| Args: |
| model ([`~transformers.PreTrainedModel`]): |
| The model to train, preferably an [`~transformers.AutoModelForSequenceClassification`]. |
| ref_model ([`PreTrainedModelWrapper`]): |
| Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation |
| and loss. If no reference model is provided, the trainer will create a reference model with the same |
| architecture as the model to be optimized. |
| args ([`BCOConfig`]): |
| The arguments to use for training. |
| train_dataset ([`~datasets.Dataset`]): |
| The dataset to use for training. |
| eval_dataset ([`~datasets.Dataset`]): |
| The dataset to use for evaluation. |
| processing_class ([`~transformers.PreTrainedTokenizerBase`], [`~transformers.BaseImageProcessor`], [`~transformers.FeatureExtractionMixin`] or [`~transformers.ProcessorMixin`], *optional*): |
| Processing class used to process the data. If provided, will be used to automatically process the inputs |
| for the model, and it will be saved along the model to make it easier to rerun an interrupted training or |
| reuse the fine-tuned model. |
| data_collator ([`~transformers.DataCollator`], *optional*): |
| The data collator to use for training. If None is specified, the default data collator |
| ([`DPODataCollatorWithPadding`]) will be used which will pad the sequences to the maximum length of the |
| sequences in the batch, given a dataset of paired sequences. |
| model_init (`Callable[[], transformers.PreTrainedModel]`): |
| The model initializer to use for training. If None is specified, the default model initializer will be |
| used. |
| callbacks (`list[transformers.TrainerCallback]`): |
| The callbacks to use for training. |
| optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): |
| The optimizer and scheduler to use for training. |
| preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): |
| The function to use to preprocess the logits before computing the metrics. |
| peft_config (`dict`, defaults to `None`): |
| The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in |
| a PEFT model. |
| compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*): |
| The function to use to compute the metrics. Must take a `EvalPrediction` and return a dictionary string to |
| metric values. |
| model_adapter_name (`str`, defaults to `None`): |
| Name of the train target PEFT adapter, when using LoRA with multiple adapters. |
| ref_adapter_name (`str`, defaults to `None`): |
| Name of the reference PEFT adapter, when using LoRA with multiple adapters. |
| |
| """ |
| def __init__( |
| self, |
| model = None, |
| ref_model = None, |
| args = None, |
| train_dataset = None, |
| eval_dataset = None, |
| processing_class = None, |
| data_collator = None, |
| model_init = None, |
| callbacks = None, |
| preprocess_logits_for_metrics = None, |
| peft_config = None, |
| compute_metrics = None, |
| model_adapter_name = None, |
| ref_adapter_name = None, |
| embedding_func = None, |
| embedding_tokenizer = None, |
| **kwargs |
| ): |
| if args is None: args = UnslothBCOConfig() |
| use_bf16 = getattr(args, 'bf16', False) |
| if type(use_bf16) is not bool: use_bf16 = False |
| use_fp16 = getattr(args, 'fp16', False) |
| if type(use_fp16) is not bool: use_fp16 = False |
| force_float32 = False |
| full_finetuning = os.environ.get('UNSLOTH_ENABLE_FULL_FINETUNING', '0') == '1' |
| if not full_finetuning and (os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1'): |
| print('Unsloth: Switching to float32 training since model cannot work with float16') |
| force_float32 = True |
| mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') |
| dtype = getattr(model.config, 'dtype', None) or getattr(model.config, 'torch_dtype', None) |
| if dtype is None: dtype = model.get_input_embeddings().weight.dtype |
| from unsloth_zoo.utils import _get_dtype |
| dtype = _get_dtype(dtype) |
| float16 = dtype == torch.float16 |
| if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`') |
| if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`') |
| if force_float32: |
| |
| args.fp16 = False |
| args.bf16 = False |
| os.environ['ACCELERATE_MIXED_PRECISION'] = 'no' |
| if hasattr(args, 'mixed_precision'): args.mixed_precision = 'no' |
| |
| elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32': |
| |
| args.fp16 = float16 |
| args.bf16 = not float16 |
| os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16' |
| if hasattr(args, 'mixed_precision'): args.mixed_precision = 'fp16' if float16 else 'bf16' |
| |
| elif mixed_precision_dtype == 'bfloat16': |
| |
| args.fp16 = False |
| args.bf16 = False |
| os.environ['ACCELERATE_MIXED_PRECISION'] = 'no' |
| if hasattr(args, 'mixed_precision'): args.mixed_precision = 'no' |
| |
| |
| if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no': |
| args.eval_strategy = 'steps' |
| if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1 |
| ga_steps = getattr(args, 'gradient_accumulation_steps', None) |
| if ga_steps is not None and ga_steps > 1: |
| from transformers import __version__ as transformers_version |
| if Version(transformers_version) <= Version('4.45.2'): |
| print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n' |
| '`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`') |
| if getattr(args, 'eval_strategy', 'no') != 'no': |
| eval_bsz = getattr(args, 'per_device_eval_batch_size', 8) |
| if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size |
| if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps |
| fp16_full_eval = getattr(args, 'fp16_full_eval', False) |
| if type(fp16_full_eval) is not bool: fp16_full_eval = False |
| bf16_full_eval = getattr(args, 'bf16_full_eval', False) |
| if type(bf16_full_eval) is not bool: bf16_full_eval = False |
| if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True |
| if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False |
| if force_float32: |
| args.bf16_full_eval = False |
| args.fp16_full_eval = False |
| elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16': |
| args.bf16_full_eval = True |
| args.fp16_full_eval = False |
| elif not bf16_full_eval and not fp16_full_eval: |
| args.bf16_full_eval = args.bf16 |
| args.fp16_full_eval = args.fp16 |
| _output_logits = False |
| if locals().get('compute_metrics', None) is not None: _output_logits = True |
| if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True |
| if _output_logits: |
| os.environ['UNSLOTH_RETURN_LOGITS'] = '1' |
| if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'): |
| pass |
| else: |
| model_max_seq_length = getattr(model, 'max_seq_length', None) |
| args_max_seq_length = getattr(args, 'max_seq_length', None) |
| if args_max_seq_length is None and model_max_seq_length is not None: |
| max_seq_length = model.max_seq_length |
| if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length |
| elif args_max_seq_length is not None and model_max_seq_length is not None: |
| if args_max_seq_length > model_max_seq_length: |
| print('Unsloth: You set `max_seq_length` as ' + str(args_max_seq_length) + ' but ' |
| 'the maximum the model supports is ' + str(model_max_seq_length) + '. We shall reduce it.') |
| args.max_seq_length = model_max_seq_length |
| if model is not None and hasattr(model, 'for_training'): |
| model.for_training(use_gradient_checkpointing=getattr(args, 'gradient_checkpointing', True)) |
| if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right' |
| if 'processing_class' in locals(): |
| if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right' |
| if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right' |
| __tokenizer = processing_class if 'processing_class' in locals() else tokenizer |
| from unsloth_zoo.vision_utils import UnslothVisionDataCollator |
| if not isinstance(data_collator, UnslothVisionDataCollator): |
| if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names: |
| data_collator = TransformersDataCollatorForLanguageModeling( |
| __tokenizer, |
| mlm = False, |
| mlm_probability = 0.0, |
| pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None), |
| ) |
| elif isinstance(data_collator, TransformersDataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names: |
| data_collator = DataCollatorForSeq2Seq( |
| __tokenizer, |
| pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None), |
| ) |
| else: |
| if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False |
| if hasattr(args, 'dataset_text_field'): args.dataset_text_field = '' |
| if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True} |
| if not isinstance(data_collator, UnslothVisionDataCollator): |
| if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'): |
| if isinstance(data_collator, DataCollatorForSeq2Seq): |
| data_collator = DataCollatorForSeq2Seq( |
| __tokenizer.tokenizer, |
| pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None), |
| ) |
| else: |
| data_collator = TransformersDataCollatorForLanguageModeling( |
| __tokenizer.tokenizer, |
| mlm = False, |
| mlm_probability = 0.0, |
| pad_to_multiple_of = getattr(args, 'pad_to_multiple_of', None), |
| ) |
| other_metrics = [] |
| |
| from unsloth_zoo.logging_utils import PatchRLStatistics |
| PatchRLStatistics('bco_trainer', other_metrics) |
| |
| |
| |
| if getattr(args, "parallel_mode", None) == ParallelMode.NOT_DISTRIBUTED and args.n_gpu > 1: |
| if getattr(args, "_n_gpu", 1) != 1: |
| args._n_gpu = 1 |
| if "model" in locals() and hasattr(model, "for_training"): |
| model.for_training(use_gradient_checkpointing=getattr(args, 'gradient_checkpointing', True)) |
| super().__init__( |
| model = model, |
| ref_model = ref_model, |
| args = args, |
| train_dataset = train_dataset, |
| eval_dataset = eval_dataset, |
| processing_class = processing_class, |
| data_collator = data_collator, |
| model_init = model_init, |
| callbacks = callbacks, |
| preprocess_logits_for_metrics = preprocess_logits_for_metrics, |
| peft_config = peft_config, |
| compute_metrics = compute_metrics, |
| model_adapter_name = model_adapter_name, |
| ref_adapter_name = ref_adapter_name, |
| embedding_func = embedding_func, |
| embedding_tokenizer = embedding_tokenizer,**kwargs) |
| if "model" in locals() and hasattr(model, "for_inference"): |
| model.for_inference() |
| if hasattr(self, 'neftune_hook_handle'): |
| self.neftune_hook_handle.remove() |
| if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle |
| if getattr(args, 'neftune_noise_alpha', None) is not None: |
| model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha |
| pass |
| if hasattr(self, 'accelerator'): |
| scaler = self.accelerator.scaler |
| current_model = model |
| while hasattr(current_model, 'model'): |
| current_model.accelerator_scaler = scaler |
| current_model = current_model.model |
| current_model.accelerator_scaler = scaler |
| pass |
| if hasattr(self, 'train'): |
| self.train = MethodType(prepare_for_training_mode(self.__class__.train), self) |
| pass |
| if hasattr(self, 'llm') and self.llm is not None and hasattr(self.llm, 'get_tokenizer'): |
| _vllm_tok = self.llm.get_tokenizer() |
| _pc = getattr(self, 'processing_class', None) or getattr(self, 'tokenizer', None) |
| if _vllm_tok is not None and _pc is not None and getattr(_pc, 'chat_template', None) is not None and getattr(_vllm_tok, 'chat_template', None) is None: |
| _vllm_tok.chat_template = _pc.chat_template |
| pass |
| |
| pass |
|
|
|
|
| if hasattr(logger, "addFilter"): |
| import logging |
| class HideLoggingMessage(logging.Filter): |
| def __init__(self, text): self.text = text |
| def filter(self, x): return not (self.text in x.getMessage()) |
| pass |
| logger.addFilter(HideLoggingMessage("`use_cache=True`")) |
|
|
|
|