| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| import json |
| import logging |
| import os |
| import signal |
| import sys |
| import time |
| from concurrent.futures import ThreadPoolExecutor |
| from datetime import timedelta |
| from typing import TYPE_CHECKING, Any, Dict, Optional |
|
|
| import torch |
| import transformers |
| from peft import PeftModel |
| from transformers import PreTrainedModel, ProcessorMixin, TrainerCallback |
| from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, has_length |
| from transformers.utils import ( |
| SAFE_WEIGHTS_NAME, |
| WEIGHTS_NAME, |
| is_safetensors_available, |
| ) |
|
|
| from llamafactory.extras.constants import TRAINER_LOG, V_HEAD_SAFE_WEIGHTS_NAME, V_HEAD_WEIGHTS_NAME |
| from llamafactory.extras.logging import LoggerHandler, get_logger |
|
|
|
|
| if is_safetensors_available(): |
| from safetensors import safe_open |
| from safetensors.torch import save_file |
|
|
| if TYPE_CHECKING: |
| from transformers import TrainerControl, TrainerState, TrainingArguments |
| from trl import AutoModelForCausalLMWithValueHead |
|
|
|
|
| logger = get_logger(__name__) |
|
|
|
|
| def fix_valuehead_checkpoint( |
| model: "AutoModelForCausalLMWithValueHead", output_dir: str, safe_serialization: bool |
| ) -> None: |
| r""" |
| The model is already unwrapped. |
| |
| There are three cases: |
| 1. full tuning without ds_zero3: state_dict = {"model.layers.*": ..., "v_head.summary.*": ...} |
| 2. lora tuning without ds_zero3: state_dict = {"v_head.summary.*": ...} |
| 3. under deepspeed zero3: state_dict = {"pretrained_model.model.layers.*": ..., "v_head.summary.*": ...} |
| |
| We assume `stage3_gather_16bit_weights_on_model_save=true`. |
| """ |
| if not isinstance(model.pretrained_model, (PreTrainedModel, PeftModel)): |
| return |
|
|
| if safe_serialization: |
| path_to_checkpoint = os.path.join(output_dir, SAFE_WEIGHTS_NAME) |
| with safe_open(path_to_checkpoint, framework="pt", device="cpu") as f: |
| state_dict: Dict[str, torch.Tensor] = {key: f.get_tensor(key) for key in f.keys()} |
| else: |
| path_to_checkpoint = os.path.join(output_dir, WEIGHTS_NAME) |
| state_dict: Dict[str, torch.Tensor] = torch.load(path_to_checkpoint, map_location="cpu") |
|
|
| decoder_state_dict = {} |
| v_head_state_dict = {} |
| for name, param in state_dict.items(): |
| if name.startswith("v_head."): |
| v_head_state_dict[name] = param |
| else: |
| decoder_state_dict[name.replace("pretrained_model.", "")] = param |
|
|
| os.remove(path_to_checkpoint) |
| model.pretrained_model.save_pretrained( |
| output_dir, state_dict=decoder_state_dict or None, safe_serialization=safe_serialization |
| ) |
|
|
| if safe_serialization: |
| save_file(v_head_state_dict, os.path.join(output_dir, V_HEAD_SAFE_WEIGHTS_NAME), metadata={"format": "pt"}) |
| else: |
| torch.save(v_head_state_dict, os.path.join(output_dir, V_HEAD_WEIGHTS_NAME)) |
|
|
| logger.info("Value head model saved at: {}".format(output_dir)) |
|
|
|
|
| class FixValueHeadModelCallback(TrainerCallback): |
| def on_save(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): |
| r""" |
| Event called after a checkpoint save. |
| """ |
| if args.should_save: |
| if args.local_process_index == 0 if args.save_on_each_node else args.process_index == 0: |
| fix_valuehead_checkpoint( |
| model=kwargs.pop("model"), |
| output_dir=os.path.join(args.output_dir, "{}-{}".format(PREFIX_CHECKPOINT_DIR, state.global_step)), |
| safe_serialization=args.save_safetensors, |
| ) |