jobs-scripts / train_sft_qwen25.py
davidsmts's picture
Upload train_sft_qwen25.py with huggingface_hub
fca888c verified
# /// script
# dependencies = [
# "trl>=0.12.0",
# "peft>=0.7.0",
# "transformers>=4.44.0",
# "accelerate>=0.34.0",
# "datasets>=2.19.0",
# "trackio",
# ]
# ///
"""
SFT demo for Qwen/Qwen2.5-0.5B on the Capybara dataset with LoRA and Trackio.
Designed for Hugging Face Jobs (uv) with Hub push enabled.
"""
import os
import random
from datasets import load_dataset
from transformers import AutoTokenizer
from peft import LoraConfig
from trl import SFTConfig, SFTTrainer
import trackio
MODEL_ID = os.environ.get("MODEL_ID", "Qwen/Qwen2.5-0.5B")
DATASET_ID = os.environ.get("DATASET_ID", "trl-lib/Capybara")
HUB_MODEL_ID = os.environ.get("HUB_MODEL_ID", "davidsmts/qwen2_5-0.5b-capybara-sft")
RUN_NAME = os.environ.get("RUN_NAME", "qwen25-0.5b-capybara-demo")
PROJECT = os.environ.get("TRACKIO_PROJECT", "qwen-sft-demo")
SPACE_ID = os.environ.get("TRACKIO_SPACE", "davidsmts/trackio")
MAX_TRAIN_SAMPLES = int(os.environ.get("MAX_TRAIN_SAMPLES", "200"))
SEED = int(os.environ.get("SEED", "42"))
random.seed(SEED)
print("Loading dataset...")
dataset = load_dataset(DATASET_ID, split="train")
print(f"Loaded {len(dataset)} examples")
if MAX_TRAIN_SAMPLES and len(dataset) > MAX_TRAIN_SAMPLES:
dataset = dataset.shuffle(seed=SEED).select(range(MAX_TRAIN_SAMPLES))
print(f"Subsampled to {len(dataset)} examples for quick demo")
print("Creating train/test split...")
split = dataset.train_test_split(test_size=0.1, seed=SEED)
train_dataset = split["train"]
eval_dataset = split["test"]
print("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
def formatting_func(example):
# Capybara provides conversation-style "messages"; apply chat template.
return tokenizer.apply_chat_template(example["messages"], tokenize=False, add_generation_prompt=False)
print("Initializing Trackio...")
trackio.init(
project=PROJECT,
name=RUN_NAME,
space_id=SPACE_ID,
config={
"model": MODEL_ID,
"dataset": DATASET_ID,
"lr": 2e-5,
"epochs": 1,
"max_train_samples": MAX_TRAIN_SAMPLES,
},
)
print("Building LoRA config...")
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
target_modules=["q_proj", "v_proj"],
)
print("Setting trainer args...")
training_args = SFTConfig(
output_dir="./outputs",
push_to_hub=True,
hub_model_id=HUB_MODEL_ID,
hub_strategy="every_save",
num_train_epochs=1,
per_device_train_batch_size=2,
gradient_accumulation_steps=8,
learning_rate=2e-5,
max_length=1024,
logging_steps=5,
save_strategy="steps",
save_steps=50,
save_total_limit=2,
eval_strategy="steps",
eval_steps=50,
warmup_ratio=0.03,
lr_scheduler_type="cosine",
gradient_checkpointing=True,
fp16=True,
report_to="trackio",
project=PROJECT,
run_name=RUN_NAME,
)
print("Initializing trainer...")
trainer = SFTTrainer(
model=MODEL_ID,
tokenizer=tokenizer,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
formatting_func=formatting_func,
peft_config=peft_config,
args=training_args,
)
print("Starting training...")
trainer.train()
print("Saving and pushing to hub...")
trainer.push_to_hub()
trackio.finish()
print(f"Done! Model pushed to https://huggingface.co/{HUB_MODEL_ID}")
print(f"Track metrics at https://huggingface.co/spaces/{SPACE_ID}")