| |
| """Fine-tune TAIDE 12B on Audrey Tang transcripts using SFT. |
| |
| On DGX Spark (128GB unified memory), this runs full-parameter SFT |
| on a 12B model. For memory safety, we use gradient checkpointing |
| and bf16 mixed precision. |
| |
| Expected time: ~4-8 hours for 2 epochs on 56K training examples. |
| """ |
|
|
| import torch |
| from datasets import load_dataset |
| from transformers import ( |
| AutoTokenizer, |
| AutoModelForCausalLM, |
| TrainingArguments, |
| ) |
| from trl import SFTTrainer, SFTConfig |
|
|
| |
| MODEL_PATH = "./models/taide-12b" |
| TRAIN_PATH = "./training_data/train.jsonl" |
| EVAL_PATH = "./training_data/eval.jsonl" |
| OUTPUT_DIR = "./models/taide-12b-audrey" |
|
|
| |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH) |
| if tokenizer.pad_token is None: |
| tokenizer.pad_token = tokenizer.eos_token |
|
|
| |
| model = AutoModelForCausalLM.from_pretrained( |
| MODEL_PATH, |
| torch_dtype=torch.bfloat16, |
| device_map="auto", |
| attn_implementation="sdpa", |
| ) |
| model.config.use_cache = False |
|
|
| |
| train_dataset = load_dataset("json", data_files=TRAIN_PATH, split="train") |
| eval_dataset = load_dataset("json", data_files=EVAL_PATH, split="train") |
|
|
| print(f"Train: {len(train_dataset)}, Eval: {len(eval_dataset)}") |
|
|
|
|
| def format_chat(example): |
| """Format messages into the Gemma chat template.""" |
| return tokenizer.apply_chat_template( |
| example["messages"], |
| tokenize=False, |
| add_generation_prompt=False, |
| ) |
|
|
|
|
| |
| training_args = SFTConfig( |
| output_dir=OUTPUT_DIR, |
| num_train_epochs=2, |
| per_device_train_batch_size=2, |
| per_device_eval_batch_size=2, |
| gradient_accumulation_steps=8, |
| gradient_checkpointing=True, |
| gradient_checkpointing_kwargs={"use_reentrant": False}, |
| learning_rate=2e-5, |
| lr_scheduler_type="cosine", |
| warmup_ratio=0.05, |
| weight_decay=0.01, |
| bf16=True, |
| logging_steps=10, |
| eval_strategy="steps", |
| eval_steps=500, |
| save_strategy="steps", |
| save_steps=500, |
| save_total_limit=3, |
| max_seq_length=4096, |
| packing=True, |
| dataset_text_field="text", |
| report_to="none", |
| ) |
|
|
| |
| train_dataset = train_dataset.map( |
| lambda x: {"text": format_chat(x)}, remove_columns=train_dataset.column_names |
| ) |
| eval_dataset = eval_dataset.map( |
| lambda x: {"text": format_chat(x)}, remove_columns=eval_dataset.column_names |
| ) |
|
|
| trainer = SFTTrainer( |
| model=model, |
| args=training_args, |
| train_dataset=train_dataset, |
| eval_dataset=eval_dataset, |
| processing_class=tokenizer, |
| ) |
|
|
| print("Starting training...") |
| trainer.train() |
|
|
| |
| trainer.save_model(OUTPUT_DIR) |
| tokenizer.save_pretrained(OUTPUT_DIR) |
| print(f"Model saved to {OUTPUT_DIR}") |
|
|