audreyt's picture
Add DGX Spark fine-tuning plan, scripts, and Apertus 70B as deep tier
04ac740
#!/usr/bin/env python3
"""Fine-tune TAIDE 12B on Audrey Tang transcripts using SFT.
On DGX Spark (128GB unified memory), this runs full-parameter SFT
on a 12B model. For memory safety, we use gradient checkpointing
and bf16 mixed precision.
Expected time: ~4-8 hours for 2 epochs on 56K training examples.
"""
import torch
from datasets import load_dataset
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
TrainingArguments,
)
from trl import SFTTrainer, SFTConfig
# Model and data paths
MODEL_PATH = "./models/taide-12b"
TRAIN_PATH = "./training_data/train.jsonl"
EVAL_PATH = "./training_data/eval.jsonl"
OUTPUT_DIR = "./models/taide-12b-audrey"
# Load tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# Load model — full precision on DGX Spark's 128GB unified memory
model = AutoModelForCausalLM.from_pretrained(
MODEL_PATH,
torch_dtype=torch.bfloat16,
device_map="auto",
attn_implementation="sdpa", # Flash attention via SDPA
)
model.config.use_cache = False # Required for gradient checkpointing
# Load datasets
train_dataset = load_dataset("json", data_files=TRAIN_PATH, split="train")
eval_dataset = load_dataset("json", data_files=EVAL_PATH, split="train")
print(f"Train: {len(train_dataset)}, Eval: {len(eval_dataset)}")
def format_chat(example):
"""Format messages into the Gemma chat template."""
return tokenizer.apply_chat_template(
example["messages"],
tokenize=False,
add_generation_prompt=False,
)
# Training config
training_args = SFTConfig(
output_dir=OUTPUT_DIR,
num_train_epochs=2,
per_device_train_batch_size=2,
per_device_eval_batch_size=2,
gradient_accumulation_steps=8, # effective batch size = 16
gradient_checkpointing=True,
gradient_checkpointing_kwargs={"use_reentrant": False},
learning_rate=2e-5,
lr_scheduler_type="cosine",
warmup_ratio=0.05,
weight_decay=0.01,
bf16=True,
logging_steps=10,
eval_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
save_total_limit=3,
max_seq_length=4096,
packing=True, # Pack short examples together for efficiency
dataset_text_field="text",
report_to="none",
)
# Preprocess: apply chat template
train_dataset = train_dataset.map(
lambda x: {"text": format_chat(x)}, remove_columns=train_dataset.column_names
)
eval_dataset = eval_dataset.map(
lambda x: {"text": format_chat(x)}, remove_columns=eval_dataset.column_names
)
trainer = SFTTrainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
processing_class=tokenizer,
)
print("Starting training...")
trainer.train()
# Save final model
trainer.save_model(OUTPUT_DIR)
tokenizer.save_pretrained(OUTPUT_DIR)
print(f"Model saved to {OUTPUT_DIR}")