| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import shutil |
| |
|
| | import torch |
| | from accelerate import PartialState |
| | from datasets import load_dataset |
| | from transformers import ( |
| | AutoModelForCausalLM, |
| | AutoModelForSequenceClassification, |
| | AutoTokenizer, |
| | HfArgumentParser, |
| | ) |
| |
|
| | from trl import ( |
| | ModelConfig, |
| | PPOConfig, |
| | PPOTrainer, |
| | ScriptArguments, |
| | get_kbit_device_map, |
| | get_peft_config, |
| | get_quantization_config, |
| | ) |
| | from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE |
| |
|
| |
|
| | """ |
| | python -i examples/scripts/ppo/ppo.py \ |
| | --dataset_name trl-internal-testing/descriptiveness-sentiment-trl-style \ |
| | --dataset_train_split descriptiveness \ |
| | --learning_rate 3e-6 \ |
| | --output_dir models/minimal/ppo \ |
| | --per_device_train_batch_size 64 \ |
| | --gradient_accumulation_steps 1 \ |
| | --total_episodes 10000 \ |
| | --model_name_or_path EleutherAI/pythia-1b-deduped \ |
| | --missing_eos_penalty 1.0 |
| | |
| | accelerate launch --config_file examples/accelerate_configs/deepspeed_zero3.yaml \ |
| | examples/scripts/ppo/ppo.py \ |
| | --dataset_name trl-internal-testing/descriptiveness-sentiment-trl-style \ |
| | --dataset_train_split descriptiveness \ |
| | --output_dir models/minimal/ppo \ |
| | --num_ppo_epochs 1 \ |
| | --num_mini_batches 1 \ |
| | --learning_rate 3e-6 \ |
| | --per_device_train_batch_size 1 \ |
| | --gradient_accumulation_steps 16 \ |
| | --total_episodes 10000 \ |
| | --model_name_or_path EleutherAI/pythia-1b-deduped \ |
| | --sft_model_path EleutherAI/pythia-1b-deduped \ |
| | --reward_model_path EleutherAI/pythia-1b-deduped \ |
| | --local_rollout_forward_batch_size 1 \ |
| | --missing_eos_penalty 1.0 |
| | """ |
| |
|
| |
|
| | if __name__ == "__main__": |
| | parser = HfArgumentParser((ScriptArguments, PPOConfig, ModelConfig)) |
| | script_args, training_args, model_config = parser.parse_args_into_dataclasses() |
| | |
| | shutil.rmtree(training_args.output_dir, ignore_errors=True) |
| |
|
| | |
| | |
| | |
| | torch_dtype = ( |
| | model_config.torch_dtype |
| | if model_config.torch_dtype in ["auto", None] |
| | else getattr(torch, model_config.torch_dtype) |
| | ) |
| | quantization_config = get_quantization_config(model_config) |
| | model_kwargs = dict( |
| | revision=model_config.model_revision, |
| | attn_implementation=model_config.attn_implementation, |
| | torch_dtype=torch_dtype, |
| | device_map=get_kbit_device_map() if quantization_config is not None else None, |
| | quantization_config=quantization_config, |
| | ) |
| |
|
| | tokenizer = AutoTokenizer.from_pretrained( |
| | model_config.model_name_or_path, |
| | padding_side="left", |
| | trust_remote_code=model_config.trust_remote_code, |
| | ) |
| | tokenizer.add_special_tokens({"pad_token": "[PAD]"}) |
| | if tokenizer.chat_template is None: |
| | tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE |
| | value_model = AutoModelForSequenceClassification.from_pretrained( |
| | training_args.reward_model_path, trust_remote_code=model_config.trust_remote_code, num_labels=1 |
| | ) |
| | reward_model = AutoModelForSequenceClassification.from_pretrained( |
| | training_args.reward_model_path, trust_remote_code=model_config.trust_remote_code, num_labels=1 |
| | ) |
| | policy = AutoModelForCausalLM.from_pretrained( |
| | training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code |
| | ) |
| |
|
| | peft_config = get_peft_config(model_config) |
| | if peft_config is None: |
| | ref_policy = AutoModelForCausalLM.from_pretrained( |
| | training_args.sft_model_path, trust_remote_code=model_config.trust_remote_code |
| | ) |
| | else: |
| | ref_policy = None |
| |
|
| | |
| | |
| | |
| | dataset = load_dataset(script_args.dataset_name, split=script_args.dataset_train_split) |
| | eval_samples = 100 |
| | train_dataset = dataset.select(range(len(dataset) - eval_samples)) |
| | eval_dataset = dataset.select(range(len(dataset) - eval_samples, len(dataset))) |
| | dataset_text_field = "prompt" |
| |
|
| | def prepare_dataset(dataset, tokenizer): |
| | """pre-tokenize the dataset before training; only collate during training""" |
| |
|
| | def tokenize(element): |
| | outputs = tokenizer( |
| | element[dataset_text_field], |
| | padding=False, |
| | ) |
| | return {"input_ids": outputs["input_ids"]} |
| |
|
| | return dataset.map( |
| | tokenize, |
| | batched=True, |
| | remove_columns=dataset.column_names, |
| | num_proc=training_args.dataset_num_proc, |
| | ) |
| |
|
| | |
| | |
| | with PartialState().local_main_process_first(): |
| | train_dataset = prepare_dataset(train_dataset, tokenizer) |
| | eval_dataset = prepare_dataset(eval_dataset, tokenizer) |
| |
|
| | |
| | |
| | |
| | trainer = PPOTrainer( |
| | config=training_args, |
| | processing_class=tokenizer, |
| | policy=policy, |
| | ref_policy=ref_policy, |
| | reward_model=reward_model, |
| | value_model=value_model, |
| | train_dataset=train_dataset, |
| | eval_dataset=eval_dataset, |
| | peft_config=peft_config, |
| | ) |
| | trainer.train() |
| |
|
| | |
| | trainer.save_model(training_args.output_dir) |
| | if training_args.push_to_hub: |
| | trainer.push_to_hub(dataset_name=script_args.dataset_name) |
| |
|
| | trainer.generate_completions() |
| |
|