| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| from typing import TYPE_CHECKING, Optional |
|
|
| from ...data import PairwiseDataCollatorWithPadding, get_dataset, get_template_and_fix_tokenizer |
| from ...extras.ploting import plot_loss |
| from ...model import load_model, load_tokenizer |
| from ..callbacks import fix_valuehead_checkpoint |
| from ..trainer_utils import create_modelcard_and_push |
| from .metric import ComputeAccuracy |
| from .trainer import PairwiseTrainer |
|
|
|
|
| if TYPE_CHECKING: |
| from transformers import Seq2SeqTrainingArguments, TrainerCallback |
|
|
| from ...hparams import DataArguments, FinetuningArguments, ModelArguments |
|
|
|
|
| def run_rm( |
| model_args: "ModelArguments", |
| data_args: "DataArguments", |
| training_args: "Seq2SeqTrainingArguments", |
| finetuning_args: "FinetuningArguments", |
| callbacks: Optional[list["TrainerCallback"]] = None, |
| ): |
| tokenizer_module = load_tokenizer(model_args) |
| tokenizer = tokenizer_module["tokenizer"] |
| template = get_template_and_fix_tokenizer(tokenizer, data_args) |
| dataset_module = get_dataset(template, model_args, data_args, training_args, stage="rm", **tokenizer_module) |
| model = load_model(tokenizer, model_args, finetuning_args, training_args.do_train, add_valuehead=True) |
| data_collator = PairwiseDataCollatorWithPadding( |
| template=template, model=model, pad_to_multiple_of=8, **tokenizer_module |
| ) |
|
|
| |
| trainer = PairwiseTrainer( |
| model=model, |
| args=training_args, |
| finetuning_args=finetuning_args, |
| data_collator=data_collator, |
| callbacks=callbacks, |
| compute_metrics=ComputeAccuracy(), |
| **dataset_module, |
| **tokenizer_module, |
| ) |
|
|
| |
| if training_args.do_train: |
| train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) |
| trainer.save_model() |
| if training_args.should_save: |
| fix_valuehead_checkpoint(model, training_args.output_dir, training_args.save_safetensors) |
|
|
| trainer.log_metrics("train", train_result.metrics) |
| trainer.save_metrics("train", train_result.metrics) |
| trainer.save_state() |
| if trainer.is_world_process_zero() and finetuning_args.plot_loss: |
| keys = ["loss"] |
| if isinstance(dataset_module.get("eval_dataset"), dict): |
| keys += sum( |
| [[f"eval_{key}_loss", f"eval_{key}_accuracy"] for key in dataset_module["eval_dataset"].keys()], [] |
| ) |
| else: |
| keys += ["eval_loss", "eval_accuracy"] |
|
|
| plot_loss(training_args.output_dir, keys=keys) |
|
|
| |
| if training_args.do_eval: |
| metrics = trainer.evaluate(metric_key_prefix="eval") |
| trainer.log_metrics("eval", metrics) |
| trainer.save_metrics("eval", metrics) |
|
|
| |
| if training_args.do_predict: |
| predict_results = trainer.predict(dataset_module["eval_dataset"], metric_key_prefix="predict") |
| trainer.log_metrics("predict", predict_results.metrics) |
| trainer.save_metrics("predict", predict_results.metrics) |
| trainer.save_predictions(predict_results) |
|
|
| |
| create_modelcard_and_push(trainer, model_args, data_args, training_args, finetuning_args) |
|
|