| | |
| | |
| | |
| |
|
| | from datasets import load_dataset |
| | from peft import LoraConfig |
| | from trl import SFTTrainer, SFTConfig |
| | import trackio |
| |
|
| | print("Loading dataset...") |
| | dataset = load_dataset("mattPearce/wordpress-blocks-sft", split="train") |
| | dataset_dict = dataset.train_test_split(test_size=41, seed=42) |
| |
|
| | print("Configuring LoRA for 14B model...") |
| | peft_config = LoraConfig( |
| | r=16, |
| | lora_alpha=32, |
| | lora_dropout=0.05, |
| | bias="none", |
| | task_type="CAUSAL_LM", |
| | target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"], |
| | ) |
| |
|
| | print("Setting up trainer...") |
| | trainer = SFTTrainer( |
| | model="Qwen/Qwen2.5-Coder-14B-Instruct", |
| | train_dataset=dataset_dict["train"], |
| | eval_dataset=dataset_dict["test"], |
| | peft_config=peft_config, |
| | args=SFTConfig( |
| | output_dir="qwen-wordpress-coder", |
| | num_train_epochs=3, |
| | per_device_train_batch_size=1, |
| | per_device_eval_batch_size=1, |
| | gradient_accumulation_steps=16, |
| | gradient_checkpointing=True, |
| | learning_rate=2e-4, |
| | lr_scheduler_type="cosine", |
| | warmup_ratio=0.03, |
| | eval_strategy="steps", |
| | eval_steps=25, |
| | logging_steps=5, |
| | save_strategy="steps", |
| | save_steps=50, |
| | save_total_limit=3, |
| | push_to_hub=True, |
| | hub_model_id="mattPearce/qwen-wordpress-coder", |
| | hub_strategy="every_save", |
| | hub_private_repo=False, |
| | bf16=True, |
| | optim="adamw_8bit", |
| | max_grad_norm=1.0, |
| | report_to="trackio", |
| | run_name="qwen-wordpress-14b-v2", |
| | project="wordpress-coder", |
| | ), |
| | ) |
| |
|
| | print("Starting training...") |
| | trainer.train() |
| |
|
| | print("Pushing final model to Hub...") |
| | trainer.push_to_hub() |
| |
|
| | print("✓ Training complete!") |
| |
|