# /// script # dependencies = [ # "trl>=0.12.0", # "peft>=0.7.0", # "transformers>=4.36.0", # "accelerate>=0.24.0", # "datasets>=2.16.0", # "trackio", # ] # /// import os import trackio from datasets import load_dataset from peft import LoraConfig from trl import SFTTrainer, SFTConfig def main() -> None: base_model = "Qwen/Qwen2.5-0.5B" hub_model_id = os.environ.get("HUB_MODEL_ID", "davidsmts/qwen25-0_5b-sft-demo") project = os.environ.get("TRACKIO_PROJECT", "qwen25_sft_demo") run_name = os.environ.get("TRACKIO_RUN", "qwen25-0_5b-sft-lora") print("Loading dataset...") dataset = load_dataset("trl-lib/Capybara", split="train") print(f"Loaded {len(dataset)} examples") print("Creating train/eval split...") dataset_split = dataset.train_test_split(test_size=0.1, seed=42) train_ds = dataset_split["train"] eval_ds = dataset_split["test"] print(f"Train {len(train_ds)}, Eval {len(eval_ds)}") trackio.init( project=project, run_name=run_name, config={"model": base_model, "dataset": "trl-lib/Capybara"}, ) peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", target_modules=["q_proj", "v_proj"], ) training_args = SFTConfig( output_dir="qwen25-0_5b-sft-demo", push_to_hub=True, hub_model_id=hub_model_id, hub_strategy="every_save", num_train_epochs=1, per_device_train_batch_size=4, gradient_accumulation_steps=4, learning_rate=2e-5, logging_steps=10, save_strategy="steps", save_steps=50, save_total_limit=2, eval_strategy="steps", eval_steps=50, warmup_ratio=0.1, lr_scheduler_type="cosine", gradient_checkpointing=True, fp16=True, report_to="trackio", project=project, run_name=run_name, ) print("Initializing trainer...") trainer = SFTTrainer( model=base_model, args=training_args, train_dataset=train_ds, eval_dataset=eval_ds, peft_config=peft_config, ) print("Starting training...") trainer.train() print("Pushing to Hub...") trainer.push_to_hub() print(f"Complete! Model available at https://huggingface.co/{hub_model_id}") if __name__ == "__main__": main()