training-scripts / train_qwen3_hf.py
gilbaes's picture
Upload train_qwen3_hf.py with huggingface_hub
60f3f5e verified
# /// script
# dependencies = ["trl>=0.12.0", "peft>=0.7.0", "transformers>=4.45.0", "datasets", "accelerate", "torch"]
# ///
"""Fine-tune Qwen3-0.6B on CodeForces-CoTS (100 examples)"""
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
from datasets import load_dataset
from peft import LoraConfig
from trl import SFTTrainer, SFTConfig
import torch
print(f"CUDA available: {torch.cuda.is_available()}")
if torch.cuda.is_available():
print(f"GPU: {torch.cuda.get_device_name(0)}")
dataset = load_dataset("open-r1/codeforces-cots", "solutions", split="train").select(range(100))
print(f"Dataset: {len(dataset)} examples")
peft_config = LoraConfig(
r=16, lora_alpha=32, lora_dropout=0.05,
target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
bias="none", task_type="CAUSAL_LM"
)
training_args = SFTConfig(
output_dir="./qwen3-0.6b-codeforces-cots",
num_train_epochs=1,
per_device_train_batch_size=1,
gradient_accumulation_steps=8,
learning_rate=2e-4,
warmup_ratio=0.1,
logging_steps=5,
save_strategy="no",
eval_strategy="no",
max_length=2048,
push_to_hub=True,
hub_model_id="gilbaes/qwen3-0.6b-codeforces-cots",
report_to="none",
bf16=True,
gradient_checkpointing=True,
optim="adamw_torch_fused",
)
trainer = SFTTrainer(
model="Qwen/Qwen3-0.6B",
train_dataset=dataset,
peft_config=peft_config,
args=training_args,
)
print(f"Trainable params: {trainer.model.num_parameters(only_trainable=True):,}")
trainer.train()
trainer.push_to_hub()
print("Done! Model at gilbaes/qwen3-0.6b-codeforces-cots")