# /// script # dependencies = ["trl>=0.12.0", "peft>=0.7.0", "datasets>=2.18.0", "trackio"] # /// from datasets import load_dataset from peft import LoraConfig from trl import SFTTrainer, SFTConfig import trackio model_id = "Qwen/Qwen2.5-0.5B" # Small subset for a quick test run dataset = load_dataset("trl-lib/Capybara", split="train") dataset = dataset.shuffle(seed=42).select(range(500)) # Train/eval split for monitoring dataset_split = dataset.train_test_split(test_size=0.1, seed=42) peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) trainer = SFTTrainer( model=model_id, train_dataset=dataset_split["train"], eval_dataset=dataset_split["test"], peft_config=peft_config, args=SFTConfig( output_dir="qwen2.5-0.5b-sft-demo", push_to_hub=True, hub_model_id="davidsmts/qwen2.5-0.5b-sft-demo", hub_strategy="every_save", num_train_epochs=1, per_device_train_batch_size=1, gradient_accumulation_steps=8, max_length=512, eval_strategy="steps", eval_steps=50, save_strategy="steps", save_steps=100, logging_steps=10, report_to="trackio", project="qwen2.5-sft-demo", run_name="qwen2.5-0.5b-capybara", ), ) trainer.train() trainer.push_to_hub()