AGILLM-3-large / experiments /joint_test.py
OpenTransformer's picture
Add experiments/joint_test.py
04806b0 verified
#!/usr/bin/env python3
"""
Joint AR+SAT training - what AGILLM-3 actually does
Test which attention mechanism works best for BOTH modes simultaneously
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
import math
import argparse
DEV = torch.device("cuda" if torch.cuda.is_available() else "cpu")
VOCAB = 128256
def get_mask(n, mode, block_size=4):
if mode == "nar":
return None
elif mode == "ar":
return torch.triu(torch.full((n, n), float("-inf"), device=DEV), 1)
elif mode == "sat":
idx = torch.arange(n, device=DEV)
block_idx = idx // block_size
mask = torch.where(
block_idx.unsqueeze(0) <= block_idx.unsqueeze(1),
torch.tensor(0.0, device=DEV),
torch.tensor(float("-inf"), device=DEV)
)
return mask
def alibi_bias(h, n):
def slopes(n):
start = 2 ** (-2 ** -(math.log2(n) - 3))
return [start * (start ** i) for i in range(n)]
s = slopes(h) if h > 0 and math.log2(h).is_integer() else slopes(2 ** math.floor(math.log2(max(1,h))))[:h]
s = torch.tensor(s, device=DEV).view(1, h, 1, 1)
i = torch.arange(n, device=DEV).view(1, 1, n, 1)
j = torch.arange(n, device=DEV).view(1, 1, 1, n)
return -s * (j - i).clamp_min(0).float()
class StandardAttn(nn.Module):
def __init__(self, d, h):
super().__init__()
self.h, self.dk = h, d // h
self.qkv = nn.Linear(d, 3*d, bias=False)
self.proj = nn.Linear(d, d, bias=False)
def forward(self, x, mask=None):
B, N, _ = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.h, self.dk).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk) + alibi_bias(self.h, N)
if mask is not None:
att = att + mask.unsqueeze(0).unsqueeze(0)
return self.proj((att.softmax(-1) @ v).transpose(1, 2).reshape(B, N, -1))
class MQAAttn(nn.Module):
def __init__(self, d, h):
super().__init__()
self.h, self.dk = h, d // h
self.q = nn.Linear(d, d, bias=False)
self.k = nn.Linear(d, self.dk, bias=False)
self.v = nn.Linear(d, self.dk, bias=False)
self.proj = nn.Linear(d, d, bias=False)
def forward(self, x, mask=None):
B, N, _ = x.shape
q = self.q(x).view(B, N, self.h, self.dk).transpose(1, 2)
k = self.k(x).view(B, N, 1, self.dk).transpose(1, 2)
v = self.v(x).view(B, N, 1, self.dk).transpose(1, 2)
att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk) + alibi_bias(self.h, N)
if mask is not None:
att = att + mask.unsqueeze(0).unsqueeze(0)
return self.proj((att.softmax(-1) @ v).transpose(1, 2).reshape(B, N, -1))
class GQAAttn(nn.Module):
def __init__(self, d, h, kv_heads=2):
super().__init__()
self.h, self.dk, self.kv_heads = h, d // h, kv_heads
self.q = nn.Linear(d, d, bias=False)
self.k = nn.Linear(d, kv_heads * self.dk, bias=False)
self.v = nn.Linear(d, kv_heads * self.dk, bias=False)
self.proj = nn.Linear(d, d, bias=False)
def forward(self, x, mask=None):
B, N, _ = x.shape
q = self.q(x).view(B, N, self.h, self.dk).transpose(1, 2)
k = self.k(x).view(B, N, self.kv_heads, self.dk).transpose(1, 2)
v = self.v(x).view(B, N, self.kv_heads, self.dk).transpose(1, 2)
k = k.repeat_interleave(self.h // self.kv_heads, dim=1)
v = v.repeat_interleave(self.h // self.kv_heads, dim=1)
att = (q @ k.transpose(-1, -2)) / math.sqrt(self.dk) + alibi_bias(self.h, N)
if mask is not None:
att = att + mask.unsqueeze(0).unsqueeze(0)
return self.proj((att.softmax(-1) @ v).transpose(1, 2).reshape(B, N, -1))
class Block(nn.Module):
def __init__(self, d, h, attn_type):
super().__init__()
self.ln1, self.ln2 = nn.LayerNorm(d), nn.LayerNorm(d)
if attn_type == "standard":
self.attn = StandardAttn(d, h)
elif attn_type == "mqa":
self.attn = MQAAttn(d, h)
elif attn_type == "gqa":
self.attn = GQAAttn(d, h, kv_heads=2)
elif attn_type == "gqa4":
self.attn = GQAAttn(d, h, kv_heads=4)
self.ff = nn.Sequential(nn.Linear(d, 4*d), nn.GELU(), nn.Linear(4*d, d))
def forward(self, x, mask=None):
x = x + self.attn(self.ln1(x), mask)
return x + self.ff(self.ln2(x))
class Model(nn.Module):
def __init__(self, d, layers, h, attn_type):
super().__init__()
self.emb = nn.Embedding(VOCAB, d)
self.blocks = nn.ModuleList([Block(d, h, attn_type) for _ in range(layers)])
self.ln = nn.LayerNorm(d)
self.head = nn.Linear(d, VOCAB, bias=False)
self.head.weight = self.emb.weight
def forward(self, x, mask=None):
x = self.emb(x)
for b in self.blocks:
x = b(x, mask)
return self.head(self.ln(x))
def train_joint(attn_type, d, layers, h, batch, seq, steps, ar_weight=0.5, block_size=4):
"""Train with mixed AR and SAT objectives"""
print(f"\n{'='*60}")
print(f"JOINT AR+SAT: {attn_type.upper()} (AR weight={ar_weight})")
print(f"{'='*60}")
model = Model(d, layers, h, attn_type).to(DEV)
params = sum(p.numel() for p in model.parameters())
print(f"Parameters: {params:,}")
opt = torch.optim.AdamW(model.parameters(), lr=1e-4)
ar_mask = get_mask(seq - 1, "ar")
sat_mask = get_mask(seq - 1, "sat", block_size)
ar_losses, sat_losses, times = [], [], []
for step in range(steps):
ids = torch.randint(0, VOCAB, (batch, seq), device=DEV)
target = ids[:, 1:]
input_ids = ids[:, :-1]
start = time.time()
opt.zero_grad()
# AR forward
ar_logits = model(input_ids, ar_mask)
ar_loss = F.cross_entropy(ar_logits.view(-1, VOCAB), target.reshape(-1))
# SAT forward (same input, different mask)
sat_logits = model(input_ids, sat_mask)
sat_loss = F.cross_entropy(sat_logits.view(-1, VOCAB), target.reshape(-1))
# Combined loss
loss = ar_weight * ar_loss + (1 - ar_weight) * sat_loss
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
opt.step()
elapsed = time.time() - start
ar_losses.append(ar_loss.item())
sat_losses.append(sat_loss.item())
times.append(elapsed)
if step % 50 == 0 or step == steps - 1:
tok_s = batch * seq / elapsed
print(f"Step {step:3d} | AR: {ar_loss.item():.2f} | SAT: {sat_loss.item():.2f} | {tok_s:.0f} tok/s")
avg_ar = sum(ar_losses[-20:]) / 20
avg_sat = sum(sat_losses[-20:]) / 20
avg_tok = batch * seq / (sum(times[-20:]) / 20)
return {"type": attn_type, "ar_loss": avg_ar, "sat_loss": avg_sat, "tok_s": avg_tok, "params": params}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--d", type=int, default=256)
parser.add_argument("--layers", type=int, default=4)
parser.add_argument("--heads", type=int, default=8)
parser.add_argument("--batch", type=int, default=16)
parser.add_argument("--seq", type=int, default=128)
parser.add_argument("--steps", type=int, default=200)
parser.add_argument("--block_size", type=int, default=4)
args = parser.parse_args()
print(f"Device: {DEV}")
if torch.cuda.is_available():
print(f"GPU: {torch.cuda.get_device_name()}")
print(f"\nJoint AR+SAT Training (block_size={args.block_size})")
results = []
for attn_type in ["standard", "mqa", "gqa", "gqa4"]:
r = train_joint(attn_type, args.d, args.layers, args.heads,
args.batch, args.seq, args.steps,
ar_weight=0.5, block_size=args.block_size)
results.append(r)
torch.cuda.empty_cache()
print(f"\n{'='*60}")
print("JOINT AR+SAT RESULTS")
print(f"{'='*60}")
std = next(r for r in results if r['type'] == 'standard')
for r in sorted(results, key=lambda x: x['ar_loss'] + x['sat_loss']):
combined = r['ar_loss'] + r['sat_loss']
std_combined = std['ar_loss'] + std['sat_loss']
diff = (std_combined - combined) / std_combined * 100
kv_ratio = {"standard": "1.00", "mqa": "0.12", "gqa": "0.25", "gqa4": "0.50"}[r['type']]
print(f"{r['type']:10s} | AR: {r['ar_loss']:.2f} | SAT: {r['sat_loss']:.2f} | "
f"Combined: {combined:.2f} ({diff:+.1f}%) | {r['tok_s']:.0f} tok/s | KV: {kv_ratio}x")
if __name__ == "__main__":
main()