File size: 2,231 Bytes
94590dc | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 | """Fine-tune Qwen 2.5 1.5B for Supabase/GitHub/Shell command adapter."""
import json
import torch
from datasets import Dataset
from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments
from peft import LoraConfig, get_peft_model
from trl import SFTTrainer
MODEL_ID = "Qwen/Qwen2.5-1.5B-Instruct"
OUTPUT_DIR = "./adapter-model"
# Load dataset
print("Loading dataset...")
examples = []
with open("dataset_v3.jsonl") as f:
for line in f:
d = json.loads(line)
# Format as chat
text = f"<|im_start|>system\nYou are a command adapter. Output ONLY valid JSON. No explanation.<|im_end|>\n<|im_start|>user\n{d['input']}<|im_end|>\n<|im_start|>assistant\n{d['output']}<|im_end|>"
examples.append({"text": text})
# Duplicate dataset 3x for more training signal
examples = examples * 4
dataset = Dataset.from_list(examples)
print(f"Dataset: {len(examples)} examples")
# Load model
print("Loading model...")
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True,
)
# LoRA config
lora_config = LoraConfig(
r=32,
lora_alpha=64,
target_modules=["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()
# Training
print("Starting training...")
training_args = TrainingArguments(
output_dir=OUTPUT_DIR,
num_train_epochs=7,
per_device_train_batch_size=4,
gradient_accumulation_steps=2,
learning_rate=2e-4,
fp16=True,
logging_steps=10,
save_strategy="epoch",
warmup_ratio=0.1,
lr_scheduler_type="cosine",
report_to="none",
)
trainer = SFTTrainer(
model=model,
train_dataset=dataset,
args=training_args,
processing_class=tokenizer,
)
trainer.train()
# Save
print("Saving adapter...")
model.save_pretrained(OUTPUT_DIR)
tokenizer.save_pretrained(OUTPUT_DIR)
print(f"Done! Adapter saved to {OUTPUT_DIR}")
|