# /// script # dependencies = [ # "trl>=0.12.0", # "peft>=0.7.0", # "transformers>=4.45.0", # "accelerate>=0.24.0", # "trackio", # "datasets", # ] # /// """ Fine-tune Qwen3-0.6B on open-r1/codeforces-cots for competitive programming. """ import trackio from datasets import load_dataset from peft import LoraConfig from trl import SFTTrainer, SFTConfig from transformers import AutoTokenizer # Load dataset - using the solutions config with messages column print("Loading dataset...") dataset = load_dataset("open-r1/codeforces-cots", "solutions", split="train") print(f"Dataset loaded: {len(dataset)} examples") # The dataset has a 'messages' column in chat format # We need to keep only the 'messages' column for SFT training print("Preparing dataset - keeping only messages column...") dataset = dataset.select_columns(["messages"]) # Create train/eval split print("Creating train/eval split...") dataset_split = dataset.train_test_split(test_size=0.05, seed=42) train_dataset = dataset_split["train"] eval_dataset = dataset_split["test"] print(f"Train: {len(train_dataset)} examples") print(f"Eval: {len(eval_dataset)} examples") # Load tokenizer for chat template print("Loading tokenizer...") tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B") if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token # Processing function to convert messages to text using chat template def formatting_func(example): return tokenizer.apply_chat_template(example["messages"], tokenize=False) # Training configuration config = SFTConfig( # Hub settings - CRITICAL for saving results output_dir="qwen3-codeforces-sft", push_to_hub=True, hub_model_id="kintopp/qwen3-0.6b-codeforces-cots", hub_strategy="every_save", # Training parameters num_train_epochs=1, per_device_train_batch_size=2, gradient_accumulation_steps=8, learning_rate=2e-5, max_length=2048, # Logging & checkpointing logging_steps=25, save_strategy="steps", save_steps=500, save_total_limit=2, # Evaluation eval_strategy="steps", eval_steps=500, # Optimization warmup_ratio=0.1, lr_scheduler_type="cosine", gradient_checkpointing=True, bf16=True, # Monitoring with Trackio report_to="trackio", project="qwen3-codeforces", run_name="qwen3-0.6b-codeforces-sft", ) # LoRA configuration for efficient training peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], ) # Initialize trainer print("Initializing trainer...") trainer = SFTTrainer( model="Qwen/Qwen3-0.6B", train_dataset=train_dataset, eval_dataset=eval_dataset, args=config, peft_config=peft_config, formatting_func=formatting_func, ) print("Starting training...") trainer.train() print("Pushing final model to Hub...") trainer.push_to_hub() print("Complete! Model at: https://huggingface.co/kintopp/qwen3-0.6b-codeforces-cots")