training-scripts / train.py
gengxin-zhang's picture
Upload train.py with huggingface_hub
9b87529 verified
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "trl>=0.12.0",
# "peft>=0.7.0",
# "transformers>=4.36.0",
# "accelerate>=0.24.0",
# "trackio",
# ]
# ///
import trackio
from datasets import load_dataset
from peft import LoraConfig
from trl import SFTTrainer, SFTConfig
print("πŸ“¦ Loading dataset...")
dataset = load_dataset("open-r1/codeforces-cots", "solutions_w_editorials", split="train")
print(f"βœ… Dataset loaded: {len(dataset)} examples")
print("πŸ”€ Creating train/eval split...")
dataset_split = dataset.train_test_split(test_size=0.05, seed=42)
train_dataset = dataset_split["train"].select_columns(["messages"])
eval_dataset = dataset_split["test"].select_columns(["messages"])
config = SFTConfig(
output_dir="qwen3-0.6b-codeforces-cots",
push_to_hub=True,
hub_model_id="gengxin-zhang/qwen3-0.6b-codeforces-cots",
hub_strategy="every_save",
num_train_epochs=3,
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
learning_rate=2e-5,
logging_steps=10,
save_strategy="steps",
save_steps=100,
save_total_limit=2,
eval_strategy="steps",
eval_steps=100,
warmup_steps=100,
lr_scheduler_type="cosine",
report_to="trackio",
project="qwen3_codeforces",
run_name="qwen3-0.6b-cots-sft",
max_length=2048,
)
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
)
print("🎯 Initializing trainer...")
trainer = SFTTrainer(
model="Qwen/Qwen3-0.6B",
train_dataset=train_dataset,
eval_dataset=eval_dataset,
args=config,
peft_config=peft_config,
)
print("πŸš€ Starting training...")
trainer.train()
print("πŸ’Ύ Pushing to Hub...")
trainer.push_to_hub()
trackio.finish()