File size: 6,692 Bytes
4196369 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
# /// script
# dependencies = [
# "accelerate",
# "adam-atan2-pytorch>=0.1.18",
# "setuptools",
# "titans-pytorch",
# "tqdm",
# "wandb"
# ]
# ///
import random
import tqdm
import gzip
import numpy as np
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from adam_atan2_pytorch import AdoptAtan2
from titans_pytorch import (
MemoryAsContextTransformer,
MemoryMLP,
MemoryAttention
)
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
PRIME_LENGTH = 100
GENERATE_LENGTH = 512
SHOULD_GENERATE = True
SEQ_LEN = 512
# neural memory related
NEURAL_MEMORY_DEPTH = 2
NUM_PERSIST_MEM = 4
NUM_LONGTERM_MEM = 4
NEURAL_MEM_LAYERS = (2, 4, 6) # layers 2, 4, 6 have neural memory, can add more
NEURAL_MEM_GATE_ATTN_OUTPUT = False
NEURAL_MEM_MOMENTUM = True
NEURAL_MEM_MOMENTUM_ORDER = 1
NEURAL_MEM_QK_NORM = True
NEURAL_MEM_MAX_LR = 1e-1
USE_MEM_ATTENTION_MODEL = False
WINDOW_SIZE = 32
NEURAL_MEM_SEGMENT_LEN = 4 # set smaller for more granularity for learning rate / momentum etc
NEURAL_MEM_BATCH_SIZE = 128 # set smaller to update the neural memory weights more often as it traverses the sequence
SLIDING_WINDOWS = True
STORE_ATTN_POOL_CHUNKS = True # whether to use attention pooling for chunk derived momentum, per-layer lr mod, decay
MEMORY_MODEL_PER_LAYER_LEARNED_LR = True
NEURAL_MEM_WEIGHT_RESIDUAL = True # learning to accept contributions from the weights of the previous neural mem layer brings about significant improvements. this was improvised and not in the paper, but inspired by the value residual learning free lunch paper
NEURAL_MEM_QKV_RECEIVES_DIFF_VIEW = True # will allow the neural memory to select what layers from which to derive queries / keys / values, effectively allowing it to graft itself to the transformer in any way to be beneficial. this is to address an issue from a phd student who noted that the mem network is learning nothing more than wk @ wv. this also generalizes all possible ways to connect the neural memory to a transformer, a sort of NAS
NEURAL_MEM_SPEC_NORM_SURPRISES = True # applying lessons from Muon optimizer to surprise updates, by spectral norming the surprises
# experiment related
PROJECT_NAME = 'titans-mac-transformer'
RUN_NAME = f'mac - {NUM_LONGTERM_MEM} longterm mems, layers {NEURAL_MEM_LAYERS}'
WANDB_ONLINE = False # turn this on to pipe experiment to cloud
# perf related
USE_ACCELERATED_SCAN = True
USE_FLEX_ATTN = True
USE_FAST_INFERENCE = False
# wandb experiment tracker
import wandb
wandb.init(project = PROJECT_NAME, mode = 'disabled' if not WANDB_ONLINE else 'online')
wandb.run.name = RUN_NAME
wandb.run.save()
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# memory model
if USE_MEM_ATTENTION_MODEL:
neural_memory_model = MemoryAttention(
dim = 64
)
else:
neural_memory_model = MemoryMLP(
dim = 64,
depth = NEURAL_MEMORY_DEPTH
)
# instantiate memory-as-context transformer
model = MemoryAsContextTransformer(
num_tokens = 256,
dim = 384,
depth = 8,
segment_len = WINDOW_SIZE,
num_persist_mem_tokens = NUM_PERSIST_MEM,
num_longterm_mem_tokens = NUM_LONGTERM_MEM,
neural_memory_layers = NEURAL_MEM_LAYERS,
neural_memory_segment_len = NEURAL_MEM_SEGMENT_LEN,
neural_memory_batch_size = NEURAL_MEM_BATCH_SIZE,
neural_mem_gate_attn_output = NEURAL_MEM_GATE_ATTN_OUTPUT,
neural_mem_weight_residual = NEURAL_MEM_WEIGHT_RESIDUAL,
neural_memory_qkv_receives_diff_views = NEURAL_MEM_QKV_RECEIVES_DIFF_VIEW,
use_flex_attn = USE_FLEX_ATTN,
sliding_window_attn = SLIDING_WINDOWS,
neural_memory_model = neural_memory_model,
neural_memory_kwargs = dict(
dim_head = 64,
heads = 4,
attn_pool_chunks = STORE_ATTN_POOL_CHUNKS,
qk_rmsnorm = NEURAL_MEM_QK_NORM,
momentum = NEURAL_MEM_MOMENTUM,
momentum_order = NEURAL_MEM_MOMENTUM_ORDER,
default_step_transform_max_lr = NEURAL_MEM_MAX_LR,
use_accelerated_scan = USE_ACCELERATED_SCAN,
per_parameter_lr_modulation = MEMORY_MODEL_PER_LAYER_LEARNED_LR,
spectral_norm_surprises = NEURAL_MEM_SPEC_NORM_SURPRISES
)
).cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
data = np.frombuffer(file.read(int(95e6)), dtype = np.uint8).copy()
data_train, data_val = np.split(data, [int(90e6)])
data_train, data_val = map(torch.from_numpy, (data_train, data_val))
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = AdoptAtan2(model.parameters(), lr = LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval = 10., desc = 'training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
loss.backward()
print(f'training loss: {loss.item():.4f}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
wandb.log(dict(loss = loss.item()))
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
print(f'validation loss: {loss.item():.4f}')
if SHOULD_GENERATE and i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:PRIME_LENGTH]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.sample(inp[None, ...], GENERATE_LENGTH, use_cache = USE_FAST_INFERENCE)
output_str = decode_tokens(sample[0])
print(output_str)
|