training-scripts / train_qwen3_codeforces_test.py
kneeraj's picture
Upload train_qwen3_codeforces_test.py with huggingface_hub
919edcd verified
# /// script
# dependencies = ["trl>=0.12.0", "peft>=0.7.0", "trackio", "transformers>=4.44.0", "datasets"]
# ///
import sys
from datasets import load_dataset
from peft import LoraConfig
from trl import SFTTrainer, SFTConfig
import trackio
print("="*60)
print("πŸš€ STARTING TRAINING JOB - VERBOSE MODE")
print("="*60)
# Step 1: Load dataset
print("\nπŸ“₯ Step 1/5: Loading dataset...")
try:
dataset = load_dataset(
"open-r1/codeforces-cots",
name="solutions_w_editorials_decontaminated",
split="train[:500]" # Small subset for quick testing
)
print(f"βœ… Dataset loaded: {len(dataset)} examples")
print(f" Columns: {dataset.column_names}")
print(f" First example keys: {list(dataset[0].keys())}")
except Exception as e:
print(f"❌ FAILED to load dataset: {e}")
sys.exit(1)
# Step 2: Create train/eval split
print("\nπŸ“Š Step 2/5: Creating train/eval split...")
try:
dataset_split = dataset.train_test_split(test_size=0.1, seed=42)
print(f"βœ… Split created:")
print(f" Train: {len(dataset_split['train'])} examples")
print(f" Eval: {len(dataset_split['test'])} examples")
except Exception as e:
print(f"❌ FAILED to create split: {e}")
sys.exit(1)
# Step 3: Configure LoRA
print("\nπŸ”§ Step 3/5: Configuring LoRA...")
try:
peft_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
task_type="CAUSAL_LM"
)
print(f"βœ… LoRA configured: r={peft_config.r}, alpha={peft_config.lora_alpha}")
except Exception as e:
print(f"❌ FAILED to configure LoRA: {e}")
sys.exit(1)
# Step 4: Configure training
print("\nβš™οΈ Step 4/5: Configuring training...")
try:
training_args = SFTConfig(
output_dir="qwen3-0.6b-codeforces-test",
# Quick training for testing
num_train_epochs=1, # Just 1 epoch for quick test
per_device_train_batch_size=2,
per_device_eval_batch_size=2,
gradient_accumulation_steps=2,
gradient_checkpointing=True,
# Learning rate
learning_rate=2e-4,
lr_scheduler_type="cosine",
warmup_ratio=0.1,
optim="paged_adamw_8bit",
# Frequent logging for visibility
eval_strategy="steps",
eval_steps=20,
logging_steps=5, # Log every 5 steps
save_strategy="steps",
save_steps=50,
save_total_limit=2,
# Hub integration
push_to_hub=True,
hub_model_id="kneeraj/qwen3-0.6b-codeforces-test",
hub_strategy="every_save",
hub_private_repo=False,
# Trackio monitoring
report_to="trackio",
project="codeforces-finetuning-test",
run_name="qwen3-quick-test",
# Performance
bf16=True,
max_grad_norm=1.0,
# Data processing
max_seq_length=1024, # Shorter for faster processing
dataset_text_field="messages",
packing=False,
)
print(f"βœ… Training config created")
print(f" Epochs: {training_args.num_train_epochs}")
print(f" Batch size: {training_args.per_device_train_batch_size}")
print(f" Output: {training_args.hub_model_id}")
except Exception as e:
print(f"❌ FAILED to configure training: {e}")
sys.exit(1)
# Step 5: Initialize trainer and train
print("\nπŸ‹οΈ Step 5/5: Initializing trainer and starting training...")
try:
print(" Loading model: Qwen/Qwen2.5-0.5B-Instruct...")
trainer = SFTTrainer(
model="Qwen/Qwen2.5-0.5B-Instruct",
train_dataset=dataset_split["train"],
eval_dataset=dataset_split["test"],
peft_config=peft_config,
args=training_args,
)
print(f"βœ… Trainer initialized")
print(f" Training samples: {len(dataset_split['train'])}")
print(f" Evaluation samples: {len(dataset_split['test'])}")
print("\n" + "="*60)
print("🎯 STARTING TRAINING...")
print("="*60 + "\n")
trainer.train()
print("\n" + "="*60)
print("πŸ’Ύ Pushing final model to Hub...")
trainer.push_to_hub()
print("\n" + "="*60)
print("βœ… TRAINING COMPLETE!")
print("="*60)
print(f"Model saved to: kneeraj/qwen3-0.6b-codeforces-test")
print(f"View at: https://huggingface.co/kneeraj/qwen3-0.6b-codeforces-test")
except Exception as e:
print(f"\n❌ TRAINING FAILED: {e}")
import traceback
traceback.print_exc()
sys.exit(1)