| from datasets import load_dataset |
| from transformers import TrainingArguments |
| from trl import SFTTrainer |
| import torch |
| from transformers import AutoTokenizer, AutoModelForCausalLM |
| from peft import LoraConfig |
|
|
| |
| dataset = load_dataset("philschmid/dolly-15k-oai-style", split="train") |
|
|
| |
| model_id = "google/gemma-7b" |
| tokenizer_id = "philschmid/gemma-tokenizer-chatml" |
|
|
| |
| model = AutoModelForCausalLM.from_pretrained( |
| model_id, |
| device_map="auto", |
| attn_implementation="flash_attention_2", |
| torch_dtype=torch.bfloat16, |
| ) |
| tokenizer = AutoTokenizer.from_pretrained(tokenizer_id) |
| tokenizer.padding_side = 'right' |
|
|
| |
| peft_config = LoraConfig( |
| lora_alpha=8, |
| lora_dropout=0.05, |
| r=16, |
| bias="none", |
| target_modules="all-linear", |
| task_type="CAUSAL_LM", |
| ) |
|
|
| args = TrainingArguments( |
| output_dir="gemma-7b-dolly-chatml", |
| num_train_epochs=3, |
| per_device_train_batch_size=8, |
| gradient_checkpointing=True, |
| optim="adamw_torch_fused", |
| logging_steps=10, |
| save_strategy="epoch", |
| bf16=True, |
| tf32=True, |
| |
| learning_rate=2e-4, |
| max_grad_norm=0.3, |
| warmup_ratio=0.03, |
| lr_scheduler_type="constant", |
| report_to="tensorboard", |
| push_to_hub=True, |
| |
| ) |
|
|
| max_seq_length = 1512 |
|
|
| trainer = SFTTrainer( |
| model=model, |
| args=args, |
| train_dataset=dataset, |
| |
| peft_config=peft_config, |
| max_seq_length=max_seq_length, |
| tokenizer=tokenizer, |
| packing=True, |
| dataset_kwargs={ |
| "add_special_tokens": False, |
| "append_concat_token": False, |
| } |
| ) |
|
|
| |
| trainer.train() |
|
|
| |
| trainer.save_model() |