| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| """ |
| Run the KTO training script with the commands below. In general, the optimal configuration for KTO will be similar to that of DPO. |
| |
| # Full training: |
| python examples/scripts/kto.py \ |
| --dataset_name trl-lib/kto-mix-14k \ |
| --model_name_or_path=trl-lib/qwen1.5-1.8b-sft \ |
| --per_device_train_batch_size 16 \ |
| --num_train_epochs 1 \ |
| --learning_rate 5e-7 \ |
| --lr_scheduler_type=cosine \ |
| --gradient_accumulation_steps 1 \ |
| --logging_steps 10 \ |
| --eval_steps 500 \ |
| --output_dir=kto-aligned-model \ |
| --warmup_ratio 0.1 \ |
| --report_to wandb \ |
| --bf16 \ |
| --logging_first_step |
| |
| # QLoRA: |
| python examples/scripts/kto.py \ |
| --dataset_name trl-lib/kto-mix-14k \ |
| --model_name_or_path=trl-lib/qwen1.5-1.8b-sft \ |
| --per_device_train_batch_size 8 \ |
| --num_train_epochs 1 \ |
| --learning_rate 5e-7 \ |
| --lr_scheduler_type=cosine \ |
| --gradient_accumulation_steps 1 \ |
| --logging_steps 10 \ |
| --eval_steps 500 \ |
| --output_dir=kto-aligned-model-lora \ |
| --warmup_ratio 0.1 \ |
| --report_to wandb \ |
| --bf16 \ |
| --logging_first_step \ |
| --use_peft \ |
| --load_in_4bit \ |
| --lora_target_modules=all-linear \ |
| --lora_r=16 \ |
| --lora_alpha=16 |
| """ |
|
|
| from datasets import load_dataset |
| from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser |
|
|
| from trl import ( |
| KTOConfig, |
| KTOTrainer, |
| ModelConfig, |
| ScriptArguments, |
| get_peft_config, |
| setup_chat_format, |
| ) |
|
|
|
|
| if __name__ == "__main__": |
| parser = HfArgumentParser((ScriptArguments, KTOConfig, ModelConfig)) |
| script_args, training_args, model_args = parser.parse_args_into_dataclasses() |
|
|
| |
| model = AutoModelForCausalLM.from_pretrained( |
| model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code |
| ) |
| ref_model = AutoModelForCausalLM.from_pretrained( |
| model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code |
| ) |
|
|
| tokenizer = AutoTokenizer.from_pretrained( |
| model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code |
| ) |
| if tokenizer.pad_token is None: |
| tokenizer.pad_token = tokenizer.eos_token |
|
|
| |
| if tokenizer.chat_template is None: |
| model, tokenizer = setup_chat_format(model, tokenizer) |
|
|
| |
| dataset = load_dataset(script_args.dataset_name) |
|
|
| |
| trainer = KTOTrainer( |
| model, |
| ref_model, |
| args=training_args, |
| train_dataset=dataset[script_args.dataset_train_split], |
| eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, |
| processing_class=tokenizer, |
| peft_config=get_peft_config(model_args), |
| ) |
|
|
| |
| trainer.train() |
|
|
| |
| trainer.save_model(training_args.output_dir) |
| if training_args.push_to_hub: |
| trainer.push_to_hub(dataset_name=script_args.dataset_name) |
|
|