| |
| |
| |
|
|
| from datasets import load_dataset |
| from peft import LoraConfig |
| from trl import SFTTrainer, SFTConfig |
| import trackio |
|
|
| |
| print("π¦ Loading dataset...") |
| dataset = load_dataset( |
| "open-r1/codeforces-cots", |
| "solutions_w_editorials_py_decontaminated", |
| split="train[:1000]" |
| ) |
| print(f"π Training on {len(dataset)} examples for 3 epochs") |
|
|
| |
| peft_config = LoraConfig( |
| r=8, |
| lora_alpha=16, |
| lora_dropout=0.05, |
| bias="none", |
| task_type="CAUSAL_LM", |
| target_modules=["q_proj", "k_proj", "v_proj", "o_proj"] |
| ) |
|
|
| |
| config = SFTConfig( |
| |
| output_dir="qwen-codeforces-finetuned", |
| push_to_hub=True, |
| hub_model_id="papebaba/qwen-codeforces-finetuned", |
| hub_strategy="end", |
| hub_private_repo=False, |
|
|
| |
| num_train_epochs=3, |
| per_device_train_batch_size=1, |
| gradient_accumulation_steps=8, |
| learning_rate=2e-4, |
| max_length=512, |
|
|
| |
| logging_steps=10, |
| save_strategy="epoch", |
| save_total_limit=1, |
|
|
| |
| gradient_checkpointing=True, |
| bf16=True, |
| max_grad_norm=1.0, |
| warmup_ratio=0.1, |
| lr_scheduler_type="cosine", |
| optim="adamw_torch", |
|
|
| |
| report_to="trackio", |
| run_name="qwen-codeforces-sft-1k", |
| ) |
|
|
| |
| print("π― Initializing trainer...") |
| trainer = SFTTrainer( |
| model="Qwen/Qwen2.5-0.5B", |
| train_dataset=dataset, |
| args=config, |
| peft_config=peft_config, |
| ) |
|
|
| |
| print("π Starting training on T4 small...") |
| trainer.train() |
|
|
| |
| print("π€ Pushing final model to Hub...") |
| trainer.push_to_hub() |
|
|
| print("β
Training complete!") |
| print("π View metrics at: https://huggingface.co/spaces/papebaba/trackio") |
| print("π€ Model at: https://huggingface.co/papebaba/qwen-codeforces-finetuned") |
|
|