File size: 6,296 Bytes
1a74f0f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 | #!/usr/bin/env python3
import os
import csv
import sys
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from transformers import (
AutoTokenizer,
AutoModelForCausalLM,
get_linear_schedule_with_warmup
)
from peft import PeftModel
from torch.cuda.amp import autocast, GradScaler
from tqdm.auto import tqdm
from multiprocessing import freeze_support
class TripletDataset(Dataset):
def __init__(self, path):
self.samples = []
with open(path, newline="") as f:
reader = csv.DictReader(f)
for row in reader:
a_ids = torch.tensor(list(map(int, row["a_ids"].split())), dtype=torch.long)
a_mask = torch.tensor(list(map(int, row["a_mask"].split())), dtype=torch.long)
p_ids = torch.tensor(list(map(int, row["p_ids"].split())), dtype=torch.long)
p_mask = torch.tensor(list(map(int, row["p_mask"].split())), dtype=torch.long)
n_ids = torch.tensor(list(map(int, row["n_ids"].split())), dtype=torch.long)
n_mask = torch.tensor(list(map(int, row["n_mask"].split())), dtype=torch.long)
self.samples.append((a_ids, a_mask, p_ids, p_mask, n_ids, n_mask))
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
return self.samples[idx]
def collate_fn(batch):
return tuple(torch.stack(x) for x in zip(*batch))
def main():
# Config
MODEL_NAME = "google/gemma-3-1b-pt"
STAGE1_DIR = "stage1_simcse/final"
TRAIN_FILE = "train.csv"
VAL_FILE = "val.csv"
BATCH_SIZE = 12
LR = 1e-5
WEIGHT_DECAY = 0.01
NUM_EPOCHS = 3
MARGIN = 0.2
OUTPUT_DIR = "phase2_triplet_amp"
SEED = 42
os.makedirs(OUTPUT_DIR, exist_ok=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.manual_seed(SEED)
# Tokenizer & PEFT Model (load Stage 1)
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True)
base = AutoModelForCausalLM.from_pretrained(MODEL_NAME, attn_implementation="eager")
peft_model = PeftModel.from_pretrained(base, STAGE1_DIR).to(device)
# Embed + Projector (now outputs hidden_size)
class GemmaTripletModel(nn.Module):
def __init__(self, peft_model):
super().__init__()
self.peft = peft_model
H = peft_model.config.hidden_size
self.proj = nn.Sequential(
nn.Linear(H, 512),
nn.ReLU(),
nn.Linear(512, H),
)
def forward(self, ids, mask):
out = self.peft.base_model(
input_ids=ids,
attention_mask=mask,
output_hidden_states=True,
return_dict=True
)
last = out.hidden_states[-1] # (B, T, H)
pooled = last.mean(dim=1) # mean pooling
z = self.proj(pooled) # now (B, H)
norm = z.norm(p=2, dim=1, keepdim=True).clamp_min(1e-6)
return z / norm
model = GemmaTripletModel(peft_model).to(device)
# Datasets & Loaders
train_ds = TripletDataset(TRAIN_FILE)
val_ds = TripletDataset(VAL_FILE)
train_loader = DataLoader(train_ds, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_fn)
val_loader = DataLoader(val_ds, batch_size=BATCH_SIZE, shuffle=False, collate_fn=collate_fn)
# Optimizer, Scheduler, AMP
optimizer = torch.optim.AdamW(model.parameters(), lr=LR, weight_decay=WEIGHT_DECAY)
total_steps = len(train_loader) * NUM_EPOCHS
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=int(0.1 * total_steps),
num_training_steps=total_steps
)
scaler = GradScaler()
triplet_loss = nn.TripletMarginLoss(margin=MARGIN, p=2)
# Training Loop
for epoch in range(1, NUM_EPOCHS + 1):
model.train()
running_loss = 0.0
for a_ids, a_mask, p_ids, p_mask, n_ids, n_mask in tqdm(train_loader, desc=f"Train {epoch}", unit="batch"):
a_ids, a_mask = a_ids.to(device), a_mask.to(device)
p_ids, p_mask = p_ids.to(device), p_mask.to(device)
n_ids, n_mask = n_ids.to(device), n_mask.to(device)
optimizer.zero_grad()
with autocast():
emb_a = model(a_ids, a_mask)
emb_p = model(p_ids, p_mask)
emb_n = model(n_ids, n_mask)
loss = triplet_loss(emb_a, emb_p, emb_n)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
scheduler.step()
running_loss += loss.item()
print(f"Epoch {epoch} Train Loss: {running_loss/len(train_loader):.6f}")
# Validation
model.eval()
val_loss = 0.0
with torch.no_grad():
for a_ids, a_mask, p_ids, p_mask, n_ids, n_mask in tqdm(val_loader, desc=f"Val {epoch}", unit="batch"):
a_ids, a_mask = a_ids.to(device), a_mask.to(device)
p_ids, p_mask = p_ids.to(device), p_mask.to(device)
n_ids, n_mask = n_ids.to(device), n_mask.to(device)
with autocast():
emb_a = model(a_ids, a_mask)
emb_p = model(p_ids, p_mask)
emb_n = model(n_ids, n_mask)
val_loss += triplet_loss(emb_a, emb_p, emb_n).item()
print(f"Epoch {epoch} Val Loss: {val_loss/len(val_loader):.6f}")
# Checkpoint LoRA only
ckpt_dir = os.path.join(OUTPUT_DIR, f"epoch{epoch}")
peft_model.save_pretrained(ckpt_dir)
tokenizer.save_pretrained(ckpt_dir)
# Final Save
final_dir = os.path.join(OUTPUT_DIR, "final")
os.makedirs(final_dir, exist_ok=True)
peft_model.save_pretrained(final_dir)
tokenizer.save_pretrained(final_dir)
print("Phase 2 complete. Checkpoints in", OUTPUT_DIR)
if __name__ == "__main__":
freeze_support()
main()
|