repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/optimizers/performance_test.py | labml_nn/optimizers/performance_test.py | """
---
title: Test performance of Adam implementations
summary: This experiment compares performance of Adam implementations.
---
# Performance testing Adam
```text
TorchAdam warmup...[DONE] 222.59ms
TorchAdam...[DONE] 1,356.01ms
MyAdam warmup...[DONE] 119.15ms
MyAdam...[DONE] 1,192.89ms
```
[](https://colab.research.google.com/drive/1ngowaAsADj8VdZfBifu_6L6rtjGoEeoR?usp=sharing)
"""
import torch
import torch.nn as nn
from labml_nn.helpers.device import DeviceInfo
from torch.optim import Adam as TorchAdam
from labml import monit
from labml_nn.optimizers.adam import Adam as MyAdam
from labml_nn.optimizers.mnist_experiment import Model
def test():
device_info = DeviceInfo(use_cuda=True, cuda_device=0)
print(device_info)
inp = torch.randn((64, 1, 28, 28), device=device_info.device)
target = torch.ones(64, dtype=torch.long, device=device_info.device)
loss_func = nn.CrossEntropyLoss()
model = Model().to(device_info.device)
my_adam = MyAdam(model.parameters())
torch_adam = TorchAdam(model.parameters())
loss = loss_func(model(inp), target)
loss.backward()
with monit.section('MyAdam warmup'):
for i in range(100):
my_adam.step()
with monit.section('MyAdam'):
for i in range(1000):
my_adam.step()
with monit.section('TorchAdam warmup'):
for i in range(100):
torch_adam.step()
with monit.section('TorchAdam'):
for i in range(1000):
torch_adam.step()
if __name__ == '__main__':
test()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/optimizers/amsgrad.py | labml_nn/optimizers/amsgrad.py | """
---
title: AMSGrad Optimizer
summary: A simple PyTorch implementation/tutorial of AMSGrad optimizer.
---
# AMSGrad
This is a [PyTorch](https://pytorch.org) implementation of the paper
[On the Convergence of Adam and Beyond](https://arxiv.org/abs/1904.09237).
We implement this as an extension to our [Adam optimizer implementation](adam.html).
The implementation it self is really small since it's very similar to Adam.
We also have an implementation of the synthetic example described in the paper where Adam fails to converge.
"""
from typing import Dict
import torch
from torch import nn
from labml_nn.optimizers import WeightDecay
from labml_nn.optimizers.adam import Adam
class AMSGrad(Adam):
"""
## AMSGrad Optimizer
This class extends from Adam optimizer defined in [`adam.py`](adam.html).
Adam optimizer is extending the class `GenericAdaptiveOptimizer`
defined in [`__init__.py`](index.html).
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16,
weight_decay: WeightDecay = WeightDecay(),
optimized_update: bool = True,
amsgrad=True, defaults=None):
"""
### Initialize the optimizer
* `params` is the list of parameters
* `lr` is the learning rate $\alpha$
* `betas` is a tuple of ($\beta_1$, $\beta_2$)
* `eps` is $\hat{\epsilon}$ or $\epsilon$ based on `optimized_update`
* `weight_decay` is an instance of class `WeightDecay` defined in [`__init__.py`](index.html)
* 'optimized_update' is a flag whether to optimize the bias correction of the second moment
by doing it after adding $\epsilon$
* `amsgrad` is a flag indicating whether to use AMSGrad or fallback to plain Adam
* `defaults` is a dictionary of default for group values.
This is useful when you want to extend the class `Adam`.
"""
defaults = {} if defaults is None else defaults
defaults.update(dict(amsgrad=amsgrad))
super().__init__(params, lr, betas, eps, weight_decay, optimized_update, defaults)
def init_state(self, state: Dict[str, any], group: Dict[str, any], param: nn.Parameter):
"""
### Initialize a parameter state
* `state` is the optimizer state of the parameter (tensor)
* `group` stores optimizer attributes of the parameter group
* `param` is the parameter tensor $\theta_{t-1}$
"""
# Call `init_state` of Adam optimizer which we are extending
super().init_state(state, group, param)
# If `amsgrad` flag is `True` for this parameter group, we maintain the maximum of
# exponential moving average of squared gradient
if group['amsgrad']:
state['max_exp_avg_sq'] = torch.zeros_like(param, memory_format=torch.preserve_format)
def get_mv(self, state: Dict[str, any], group: Dict[str, any], grad: torch.Tensor):
"""
### Calculate $m_t$ and and $v_t$ or $\max(v_1, v_2, ..., v_{t-1}, v_t)$
* `state` is the optimizer state of the parameter (tensor)
* `group` stores optimizer attributes of the parameter group
* `grad` is the current gradient tensor $g_t$ for the parameter $\theta_{t-1}$
"""
# Get $m_t$ and $v_t$ from *Adam*
m, v = super().get_mv(state, group, grad)
# If this parameter group is using `amsgrad`
if group['amsgrad']:
# Get $\max(v_1, v_2, ..., v_{t-1})$.
#
# 🗒 The paper uses the notation $\hat{v}_t$ for this, which we don't use
# that here because it confuses with the Adam's usage of the same notation
# for bias corrected exponential moving average.
v_max = state['max_exp_avg_sq']
# Calculate $\max(v_1, v_2, ..., v_{t-1}, v_t)$.
#
# 🤔 I feel you should be taking / maintaining the max of the bias corrected
# second exponential average of squared gradient.
# But this is how it's
# [implemented in PyTorch also](https://github.com/pytorch/pytorch/blob/19f4c5110e8bcad5e7e75375194262fca0a6293a/torch/optim/functional.py#L90).
# I guess it doesn't really matter since bias correction only increases the value
# and it only makes an actual difference during the early few steps of the training.
torch.maximum(v_max, v, out=v_max)
return m, v_max
else:
# Fall back to *Adam* if the parameter group is not using `amsgrad`
return m, v
def _synthetic_experiment(is_adam: bool):
"""
## Synthetic Experiment
This is the synthetic experiment described in the paper,
that shows a scenario where *Adam* fails.
The paper (and Adam) formulates the problem of optimizing as
minimizing the expected value of a function, $\mathbb{E}[f(\theta)]$
with respect to the parameters $\theta$.
In the stochastic training setting we do not get hold of the function $f$
it self; that is,
when you are optimizing a NN $f$ would be the function on entire
batch of data.
What we actually evaluate is a mini-batch so the actual function is
realization of the stochastic $f$.
This is why we are talking about an expected value.
So let the function realizations be $f_1, f_2, ..., f_T$ for each time step
of training.
We measure the performance of the optimizer as the regret,
$$R(T) = \sum_{t=1}^T \big[ f_t(\theta_t) - f_t(\theta^*) \big]$$
where $\theta_t$ is the parameters at time step $t$, and $\theta^*$ is the
optimal parameters that minimize $\mathbb{E}[f(\theta)]$.
Now lets define the synthetic problem,
\begin{align}
f_t(x) =
\begin{cases}
1010 x, & \text{for } t \mod 101 = 1 \\
-10 x, & \text{otherwise}
\end{cases}
\end{align}
where $-1 \le x \le +1$.
The optimal solution is $x = -1$.
This code will try running *Adam* and *AMSGrad* on this problem.
"""
# Define $x$ parameter
x = nn.Parameter(torch.tensor([.0]))
# Optimal, $x^* = -1$
x_star = nn.Parameter(torch.tensor([-1]), requires_grad=False)
def func(t: int, x_: nn.Parameter):
"""
### $f_t(x)$
"""
if t % 101 == 1:
return (1010 * x_).sum()
else:
return (-10 * x_).sum()
# Initialize the relevant optimizer
if is_adam:
optimizer = Adam([x], lr=1e-2, betas=(0.9, 0.99))
else:
optimizer = AMSGrad([x], lr=1e-2, betas=(0.9, 0.99))
# $R(T)$
total_regret = 0
from labml import monit, tracker, experiment
# Create experiment to record results
with experiment.record(name='synthetic', comment='Adam' if is_adam else 'AMSGrad'):
# Run for $10^7$ steps
for step in monit.loop(10_000_000):
# $f_t(\theta_t) - f_t(\theta^*)$
regret = func(step, x) - func(step, x_star)
# $R(T) = \sum_{t=1}^T \big[ f_t(\theta_t) - f_t(\theta^*) \big]$
total_regret += regret.item()
# Track results every 1,000 steps
if (step + 1) % 1000 == 0:
tracker.save(loss=regret, x=x, regret=total_regret / (step + 1))
# Calculate gradients
regret.backward()
# Optimize
optimizer.step()
# Clear gradients
optimizer.zero_grad()
# Make sure $-1 \le x \le +1$
x.data.clamp_(-1., +1.)
if __name__ == '__main__':
# Run the synthetic experiment is *Adam*.
# You can see that Adam converges at $x = +1$
_synthetic_experiment(True)
# Run the synthetic experiment is *AMSGrad*
# You can see that AMSGrad converges to true optimal $x = -1$
_synthetic_experiment(False)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/optimizers/mnist_experiment.py | labml_nn/optimizers/mnist_experiment.py | """
---
title: MNIST example to test the optimizers
summary: This is a simple MNIST example with a CNN model to test the optimizers.
---
# MNIST example to test the optimizers
"""
import torch.nn as nn
import torch.utils.data
from labml import experiment, tracker
from labml.configs import option
from labml_nn.helpers.datasets import MNISTConfigs
from labml_nn.helpers.device import DeviceConfigs
from labml_nn.helpers.metrics import Accuracy
from labml_nn.helpers.trainer import TrainValidConfigs, BatchIndex
from labml_nn.optimizers.configs import OptimizerConfigs
class Model(nn.Module):
"""
## The model
"""
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.pool1 = nn.MaxPool2d(2)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(16 * 50, 500)
self.fc2 = nn.Linear(500, 10)
self.activation = nn.ReLU()
def forward(self, x):
x = self.activation(self.conv1(x))
x = self.pool1(x)
x = self.activation(self.conv2(x))
x = self.pool2(x)
x = self.activation(self.fc1(x.view(-1, 16 * 50)))
return self.fc2(x)
class Configs(MNISTConfigs, TrainValidConfigs):
"""
## Configurable Experiment Definition
"""
optimizer: torch.optim.Adam
model: nn.Module
device: torch.device = DeviceConfigs()
epochs: int = 10
is_save_models = True
model: nn.Module
inner_iterations = 10
accuracy_func = Accuracy()
loss_func = nn.CrossEntropyLoss()
def init(self):
tracker.set_queue("loss.*", 20, True)
tracker.set_scalar("accuracy.*", True)
self.state_modules = [self.accuracy_func]
def step(self, batch: any, batch_idx: BatchIndex):
# Get the batch
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Add global step if we are in training mode
if self.mode.is_train:
tracker.add_global_step(len(data))
# Run the model
output = self.model(data)
# Calculate the loss
loss = self.loss_func(output, target)
# Calculate the accuracy
self.accuracy_func(output, target)
# Log the loss
tracker.add("loss.", loss)
# Optimize if we are in training mode
if self.mode.is_train:
# Calculate the gradients
loss.backward()
# Take optimizer step
self.optimizer.step()
# Log the parameter and gradient L2 norms once per epoch
if batch_idx.is_last:
tracker.add('model', self.model)
tracker.add('optimizer', (self.optimizer, {'model': self.model}))
# Clear the gradients
self.optimizer.zero_grad()
# Save logs
tracker.save()
@option(Configs.model)
def model(c: Configs):
return Model().to(c.device)
@option(Configs.optimizer)
def _optimizer(c: Configs):
"""
Create a configurable optimizer.
We can change the optimizer type and hyper-parameters using configurations.
"""
opt_conf = OptimizerConfigs()
opt_conf.parameters = c.model.parameters()
return opt_conf
def main():
conf = Configs()
conf.inner_iterations = 10
experiment.create(name='mnist_ada_belief')
experiment.configs(conf, {'inner_iterations': 10,
# Specify the optimizer
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 1.5e-4})
experiment.add_pytorch_models(dict(model=conf.model))
with experiment.start():
conf.run()
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/optimizers/noam.py | labml_nn/optimizers/noam.py | """
---
title: Noam optimizer from Attention is All You Need paper
summary: >
This is a tutorial/implementation of Noam optimizer.
Noam optimizer has a warm-up period and then an exponentially decaying learning rate.
---
# Noam Optimizer
This is the [PyTorch](https://pytorch.org) implementation of optimizer introduced in the paper
[Attention Is All You Need](https://arxiv.org/abs/1706.03762).
"""
from typing import Dict
from labml_nn.optimizers import WeightDecay
from labml_nn.optimizers.amsgrad import AMSGrad
class Noam(AMSGrad):
"""
## Noam Optimizer
This class extends from Adam optimizer defined in [`adam.py`](adam.html).
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16,
weight_decay: WeightDecay = WeightDecay(),
optimized_update: bool = True,
amsgrad=False,
warmup=0, d_model=512, defaults=None):
"""
### Initialize the optimizer
* `params` is the list of parameters
* `lr` is the learning rate $\alpha$
* `betas` is a tuple of ($\beta_1$, $\beta_2$)
* `eps` is $\hat{\epsilon}$ or $\epsilon$ based on `optimized_update`
* `weight_decay` is an instance of class `WeightDecay` defined in [`__init__.py`](index.html)
* 'optimized_update' is a flag whether to optimize the bias correction of the second moment
by doing it after adding $\epsilon$
* `amsgrad` is a flag indicating whether to use AMSGrad or fallback to plain Adam
* `warmup` number of warmup steps
* `d_model` model size; i.e. number of dimensions in the transformer
* `defaults` is a dictionary of default for group values.
This is useful when you want to extend the class `AdamWarmup`.
"""
defaults = {} if defaults is None else defaults
defaults.update(dict(warmup=warmup))
super().__init__(params, lr, betas, eps, weight_decay, optimized_update, amsgrad, defaults)
self.d_model = d_model
def get_lr(self, state: Dict[str, any], group: Dict[str, any]):
"""
### Get learning-rate
$$\alpha \frac{1}{\sqrt{d_{model}}} \min \bigg(\frac{1}{\sqrt{t}}, \frac{t}{w^{3/2}}\bigg)$$
where $w$ is the number of warmup steps.
"""
# $$\min \bigg(\frac{1}{\sqrt{t}}, \frac{t}{w^{3/2}}\bigg)$$
factor = min(state['step'] ** (-0.5), state['step'] * group['warmup'] ** (-1.5))
# $$\alpha \frac{1}{\sqrt{d_{model}}} \min \bigg(\frac{1}{\sqrt{t}}, \frac{t}{w^{3/2}}\bigg)$$
return group['lr'] * self.d_model ** (-0.5) * factor
def _test_noam_lr():
"""
### Plot learning rate for different warmups and model sizes

"""
import matplotlib.pyplot as plt
import numpy as np
from torch import nn
model = nn.Linear(10, 10)
opts = [Noam(model.parameters(), d_model=512, warmup=4000, lr=1),
Noam(model.parameters(), d_model=512, warmup=8000, lr=1),
Noam(model.parameters(), d_model=2048, warmup=2000, lr=1)]
plt.plot(np.arange(1, 20000), [[opt.get_lr({'step': i}, opt.defaults) for opt in opts] for i in range(1, 20000)])
plt.legend(["512:4000", "512:8000", "2048:2000"])
plt.title("Learning Rate")
plt.show()
if __name__ == '__main__':
_test_noam_lr()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/helpers/metrics.py | labml_nn/helpers/metrics.py | import dataclasses
from abc import ABC
import torch
from labml import tracker
class StateModule:
def __init__(self):
pass
# def __call__(self):
# raise NotImplementedError
def create_state(self) -> any:
raise NotImplementedError
def set_state(self, data: any):
raise NotImplementedError
def on_epoch_start(self):
raise NotImplementedError
def on_epoch_end(self):
raise NotImplementedError
class Metric(StateModule, ABC):
def track(self):
pass
@dataclasses.dataclass
class AccuracyState:
samples: int = 0
correct: int = 0
def reset(self):
self.samples = 0
self.correct = 0
class Accuracy(Metric):
data: AccuracyState
def __init__(self, ignore_index: int = -1):
super().__init__()
self.ignore_index = ignore_index
def __call__(self, output: torch.Tensor, target: torch.Tensor):
output = output.view(-1, output.shape[-1])
target = target.view(-1)
pred = output.argmax(dim=-1)
mask = target == self.ignore_index
pred.masked_fill_(mask, self.ignore_index)
n_masked = mask.sum().item()
self.data.correct += pred.eq(target).sum().item() - n_masked
self.data.samples += len(target) - n_masked
def create_state(self):
return AccuracyState()
def set_state(self, data: any):
self.data = data
def on_epoch_start(self):
self.data.reset()
def on_epoch_end(self):
self.track()
def track(self):
if self.data.samples == 0:
return
tracker.add("accuracy.", self.data.correct / self.data.samples)
class AccuracyDirect(Accuracy):
data: AccuracyState
def __call__(self, output: torch.Tensor, target: torch.Tensor):
output = output.view(-1)
target = target.view(-1)
self.data.correct += output.eq(target).sum().item()
self.data.samples += len(target)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/helpers/datasets.py | labml_nn/helpers/datasets.py | import random
from pathlib import PurePath, Path
from typing import List, Callable, Dict, Optional
from torchvision import datasets, transforms
import torch
from labml import lab
from labml import monit
from labml.configs import BaseConfigs
from labml.configs import aggregate, option
from labml.utils.download import download_file
from torch.utils.data import DataLoader
from torch.utils.data import IterableDataset, Dataset
def _mnist_dataset(is_train, transform):
return datasets.MNIST(str(lab.get_data_path()),
train=is_train,
download=True,
transform=transform)
class MNISTConfigs(BaseConfigs):
"""
Configurable MNIST data set.
Arguments:
dataset_name (str): name of the data set, ``MNIST``
dataset_transforms (torchvision.transforms.Compose): image transformations
train_dataset (torchvision.datasets.MNIST): training dataset
valid_dataset (torchvision.datasets.MNIST): validation dataset
train_loader (torch.utils.data.DataLoader): training data loader
valid_loader (torch.utils.data.DataLoader): validation data loader
train_batch_size (int): training batch size
valid_batch_size (int): validation batch size
train_loader_shuffle (bool): whether to shuffle training data
valid_loader_shuffle (bool): whether to shuffle validation data
"""
dataset_name: str = 'MNIST'
dataset_transforms: transforms.Compose
train_dataset: datasets.MNIST
valid_dataset: datasets.MNIST
train_loader: DataLoader
valid_loader: DataLoader
train_batch_size: int = 64
valid_batch_size: int = 1024
train_loader_shuffle: bool = True
valid_loader_shuffle: bool = False
@option(MNISTConfigs.dataset_transforms)
def mnist_transforms():
return transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
@option(MNISTConfigs.train_dataset)
def mnist_train_dataset(c: MNISTConfigs):
return _mnist_dataset(True, c.dataset_transforms)
@option(MNISTConfigs.valid_dataset)
def mnist_valid_dataset(c: MNISTConfigs):
return _mnist_dataset(False, c.dataset_transforms)
@option(MNISTConfigs.train_loader)
def mnist_train_loader(c: MNISTConfigs):
return DataLoader(c.train_dataset,
batch_size=c.train_batch_size,
shuffle=c.train_loader_shuffle)
@option(MNISTConfigs.valid_loader)
def mnist_valid_loader(c: MNISTConfigs):
return DataLoader(c.valid_dataset,
batch_size=c.valid_batch_size,
shuffle=c.valid_loader_shuffle)
aggregate(MNISTConfigs.dataset_name, 'MNIST',
(MNISTConfigs.dataset_transforms, 'mnist_transforms'),
(MNISTConfigs.train_dataset, 'mnist_train_dataset'),
(MNISTConfigs.valid_dataset, 'mnist_valid_dataset'),
(MNISTConfigs.train_loader, 'mnist_train_loader'),
(MNISTConfigs.valid_loader, 'mnist_valid_loader'))
def _cifar_dataset(is_train, transform):
return datasets.CIFAR10(str(lab.get_data_path()),
train=is_train,
download=True,
transform=transform)
class CIFAR10Configs(BaseConfigs):
"""
Configurable CIFAR 10 data set.
Arguments:
dataset_name (str): name of the data set, ``CIFAR10``
dataset_transforms (torchvision.transforms.Compose): image transformations
train_dataset (torchvision.datasets.CIFAR10): training dataset
valid_dataset (torchvision.datasets.CIFAR10): validation dataset
train_loader (torch.utils.data.DataLoader): training data loader
valid_loader (torch.utils.data.DataLoader): validation data loader
train_batch_size (int): training batch size
valid_batch_size (int): validation batch size
train_loader_shuffle (bool): whether to shuffle training data
valid_loader_shuffle (bool): whether to shuffle validation data
"""
dataset_name: str = 'CIFAR10'
dataset_transforms: transforms.Compose
train_dataset: datasets.CIFAR10
valid_dataset: datasets.CIFAR10
train_loader: DataLoader
valid_loader: DataLoader
train_batch_size: int = 64
valid_batch_size: int = 1024
train_loader_shuffle: bool = True
valid_loader_shuffle: bool = False
@CIFAR10Configs.calc(CIFAR10Configs.dataset_transforms)
def cifar10_transforms():
return transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
@CIFAR10Configs.calc(CIFAR10Configs.train_dataset)
def cifar10_train_dataset(c: CIFAR10Configs):
return _cifar_dataset(True, c.dataset_transforms)
@CIFAR10Configs.calc(CIFAR10Configs.valid_dataset)
def cifar10_valid_dataset(c: CIFAR10Configs):
return _cifar_dataset(False, c.dataset_transforms)
@CIFAR10Configs.calc(CIFAR10Configs.train_loader)
def cifar10_train_loader(c: CIFAR10Configs):
return DataLoader(c.train_dataset,
batch_size=c.train_batch_size,
shuffle=c.train_loader_shuffle)
@CIFAR10Configs.calc(CIFAR10Configs.valid_loader)
def cifar10_valid_loader(c: CIFAR10Configs):
return DataLoader(c.valid_dataset,
batch_size=c.valid_batch_size,
shuffle=c.valid_loader_shuffle)
CIFAR10Configs.aggregate(CIFAR10Configs.dataset_name, 'CIFAR10',
(CIFAR10Configs.dataset_transforms, 'cifar10_transforms'),
(CIFAR10Configs.train_dataset, 'cifar10_train_dataset'),
(CIFAR10Configs.valid_dataset, 'cifar10_valid_dataset'),
(CIFAR10Configs.train_loader, 'cifar10_train_loader'),
(CIFAR10Configs.valid_loader, 'cifar10_valid_loader'))
class TextDataset:
itos: List[str]
stoi: Dict[str, int]
n_tokens: int
train: str
valid: str
standard_tokens: List[str] = []
@staticmethod
def load(path: PurePath):
with open(str(path), 'r') as f:
return f.read()
def __init__(self, path: PurePath, tokenizer: Callable, train: str, valid: str, test: str, *,
n_tokens: Optional[int] = None,
stoi: Optional[Dict[str, int]] = None,
itos: Optional[List[str]] = None):
self.test = test
self.valid = valid
self.train = train
self.tokenizer = tokenizer
self.path = path
if n_tokens or stoi or itos:
assert stoi and itos and n_tokens
self.n_tokens = n_tokens
self.stoi = stoi
self.itos = itos
else:
self.n_tokens = len(self.standard_tokens)
self.stoi = {t: i for i, t in enumerate(self.standard_tokens)}
with monit.section("Tokenize"):
tokens = self.tokenizer(self.train) + self.tokenizer(self.valid)
tokens = sorted(list(set(tokens)))
for t in monit.iterate("Build vocabulary", tokens):
self.stoi[t] = self.n_tokens
self.n_tokens += 1
self.itos = [''] * self.n_tokens
for t, n in self.stoi.items():
self.itos[n] = t
def text_to_i(self, text: str) -> torch.Tensor:
tokens = self.tokenizer(text)
return torch.tensor([self.stoi[s] for s in tokens if s in self.stoi], dtype=torch.long)
def __repr__(self):
return f'{len(self.train) / 1_000_000 :,.2f}M, {len(self.valid) / 1_000_000 :,.2f}M - {str(self.path)}'
class SequentialDataLoader(IterableDataset):
def __init__(self, *, text: str, dataset: TextDataset,
batch_size: int, seq_len: int):
self.seq_len = seq_len
data = dataset.text_to_i(text)
n_batch = data.shape[0] // batch_size
data = data.narrow(0, 0, n_batch * batch_size)
data = data.view(batch_size, -1).t().contiguous()
self.data = data
def __len__(self):
return self.data.shape[0] // self.seq_len
def __iter__(self):
self.idx = 0
return self
def __next__(self):
if self.idx >= self.data.shape[0] - 1:
raise StopIteration()
seq_len = min(self.seq_len, self.data.shape[0] - 1 - self.idx)
i = self.idx + seq_len
data = self.data[self.idx: i]
target = self.data[self.idx + 1: i + 1]
self.idx = i
return data, target
def __getitem__(self, idx):
seq_len = min(self.seq_len, self.data.shape[0] - 1 - idx)
i = idx + seq_len
data = self.data[idx: i]
target = self.data[idx + 1: i + 1]
return data, target
class SequentialUnBatchedDataset(Dataset):
def __init__(self, *, text: str, dataset: TextDataset,
seq_len: int,
is_random_offset: bool = True):
self.is_random_offset = is_random_offset
self.seq_len = seq_len
self.data = dataset.text_to_i(text)
def __len__(self):
return (self.data.shape[0] - 1) // self.seq_len
def __getitem__(self, idx):
start = idx * self.seq_len
assert start + self.seq_len + 1 <= self.data.shape[0]
if self.is_random_offset:
start += random.randint(0, min(self.seq_len - 1, self.data.shape[0] - (start + self.seq_len + 1)))
end = start + self.seq_len
data = self.data[start: end]
target = self.data[start + 1: end + 1]
return data, target
class TextFileDataset(TextDataset):
standard_tokens = []
def __init__(self, path: PurePath, tokenizer: Callable, *,
url: Optional[str] = None,
filter_subset: Optional[int] = None):
path = Path(path)
if not path.exists():
if not url:
raise FileNotFoundError(str(path))
else:
download_file(url, path)
with monit.section("Load data"):
text = self.load(path)
if filter_subset:
text = text[:filter_subset]
split = int(len(text) * .9)
train = text[:split]
valid = text[split:]
super().__init__(path, tokenizer, train, valid, '')
def _test_tiny_shakespeare():
from labml import lab
_ = TextFileDataset(lab.get_data_path() / 'tiny_shakespeare.txt', lambda x: list(x),
url='https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt')
if __name__ == '__main__':
_test_tiny_shakespeare()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/helpers/schedule.py | labml_nn/helpers/schedule.py | from typing import Tuple, List
class Schedule:
def __call__(self, x):
raise NotImplementedError()
class Flat(Schedule):
def __init__(self, value):
self.__value = value
def __call__(self, x):
return self.__value
def __str__(self):
return f"Schedule({self.__value})"
class Dynamic(Schedule):
def __init__(self, value):
self.__value = value
def __call__(self, x):
return self.__value
def update(self, value):
self.__value = value
def __str__(self):
return "Dynamic"
class Piecewise(Schedule):
"""
## Piecewise schedule
"""
def __init__(self, endpoints: List[Tuple[float, float]], outside_value: float = None):
"""
### Initialize
`endpoints` is list of pairs `(x, y)`.
The values between endpoints are linearly interpolated.
`y` values outside the range covered by `x` are
`outside_value`.
"""
# `(x, y)` pairs should be sorted
indexes = [e[0] for e in endpoints]
assert indexes == sorted(indexes)
self._outside_value = outside_value
self._endpoints = endpoints
def __call__(self, x):
"""
### Find `y` for given `x`
"""
# iterate through each segment
for (x1, y1), (x2, y2) in zip(self._endpoints[:-1], self._endpoints[1:]):
# interpolate if `x` is within the segment
if x1 <= x < x2:
dx = float(x - x1) / (x2 - x1)
return y1 + dx * (y2 - y1)
# return outside value otherwise
return self._outside_value
def __str__(self):
endpoints = ", ".join([f"({e[0]}, {e[1]})" for e in self._endpoints])
return f"Schedule[{endpoints}, {self._outside_value}]"
class RelativePiecewise(Piecewise):
def __init__(self, relative_endpoits: List[Tuple[float, float]], total_steps: int):
endpoints = []
for e in relative_endpoits:
index = int(total_steps * e[0])
assert index >= 0
endpoints.append((index, e[1]))
super().__init__(endpoints, outside_value=relative_endpoits[-1][1])
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/helpers/optimizer.py | labml_nn/helpers/optimizer.py | from typing import Tuple
import torch
from labml import tracker
from labml.configs import BaseConfigs, option, meta_config
class OptimizerConfigs(BaseConfigs):
r"""
This creates a configurable optimizer.
Arguments:
learning_rate (float): Learning rate of the optimizer. Defaults to ``0.01``.
momentum (float): Momentum of the optimizer. Defaults to ``0.5``.
parameters: Model parameters to optimize.
d_model (int): Embedding size of the model (for Noam optimizer).
betas (Tuple[float, float]): Betas for Adam optimizer. Defaults to ``(0.9, 0.999)``.
eps (float): Epsilon for Adam/RMSProp optimizers. Defaults to ``1e-8``.
step_factor (int): Step factor for Noam optimizer. Defaults to ``1024``.
Also there is a better (more options) implementation in ``labml_nn``.
`We recommend using that <https://nn.labml.ai/optimizers/configs.html>`_.
"""
optimizer: torch.optim.Adam
learning_rate: float = 0.01
momentum: float = 0.5
parameters: any
d_model: int
betas: Tuple[float, float] = (0.9, 0.999)
eps: float = 1e-8
step_factor: int = 1024
def __init__(self):
super().__init__(_primary='optimizer')
meta_config(OptimizerConfigs.parameters)
@option(OptimizerConfigs.optimizer, 'SGD')
def sgd_optimizer(c: OptimizerConfigs):
return torch.optim.SGD(c.parameters, c.learning_rate, c.momentum)
@option(OptimizerConfigs.optimizer, 'Adam')
def adam_optimizer(c: OptimizerConfigs):
return torch.optim.Adam(c.parameters, lr=c.learning_rate,
betas=c.betas, eps=c.eps)
class NoamOpt:
def __init__(self, model_size: int, learning_rate: float, warmup: int, step_factor: int, optimizer):
self.step_factor = step_factor
self.optimizer = optimizer
self.warmup = warmup
self.learning_rate = learning_rate
self.model_size = model_size
self._rate = 0
def step(self):
rate = self.rate(tracker.get_global_step() / self.step_factor)
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step):
factor = self.model_size ** (-0.5) * min(step ** (-0.5), step * self.warmup ** (-1.5))
return self.learning_rate * factor
def zero_grad(self):
self.optimizer.zero_grad()
@option(OptimizerConfigs.optimizer, 'Noam')
def noam_optimizer(c: OptimizerConfigs):
optimizer = torch.optim.Adam(c.parameters, lr=0.0, betas=c.betas, eps=c.eps)
return NoamOpt(c.d_model, 1, 2000, c.step_factor, optimizer)
def _test_noam_optimizer():
import matplotlib.pyplot as plt
import numpy as np
opts = [NoamOpt(512, 1, 4000, None),
NoamOpt(512, 1, 8000, None),
NoamOpt(2048, 1, 2000, None)]
plt.plot(np.arange(1, 20000), [[opt.rate(i) for opt in opts] for i in range(1, 20000)])
plt.legend(["512:4000", "512:8000", "256:4000"])
plt.title("Optimizer")
plt.show()
if __name__ == '__main__':
_test_noam_optimizer()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/helpers/trainer.py | labml_nn/helpers/trainer.py | import signal
import typing
from typing import Dict, List, Callable
from typing import Optional, Tuple, Any, Collection
import torch.optim
import torch.optim
import torch.utils.data
import torch.utils.data
from labml import tracker, logger, monit
from labml.configs import BaseConfigs, meta_config, option
from labml.internal.monitor import Loop
from labml.logger import Text
from torch import nn
from .device import DeviceConfigs
from .metrics import StateModule
class TrainingLoopIterator(Collection):
def __init__(self, start: int, total: int, step: Optional[int]):
self.step = step
self.total = total
self.start = start
self.i = None
def __iter__(self):
self.i = None
return self
def __next__(self):
if self.step is not None:
if self.i is None:
self.i = self.start
else:
self.i += self.step
else:
if self.i is None:
self.i = 0
else:
self.i += 1
if self.i >= self.total:
raise StopIteration()
if self.step is None:
return tracker.get_global_step()
else:
return self.i
def __len__(self) -> int:
if self.step is not None:
return (self.total - self.start) // self.step
else:
return self.total
def __contains__(self, x: object) -> bool:
return False
class TrainingLoop:
_iter: Optional[TrainingLoopIterator]
__loop: Loop
__signal_received: Optional[Tuple[Any, Any]]
def __init__(self, *,
loop_count: int,
loop_step: Optional[int],
log_new_line_interval: int,
log_write_interval: int,
is_loop_on_interrupt: bool):
self.__loop_count = loop_count
self.__loop_step = loop_step
self.__log_new_line_interval = log_new_line_interval
self.__log_write_interval = log_write_interval
self.__last_write_step = 0
self.__last_new_line_step = 0
self.__last_save_step = 0
self.__signal_received = None
self.__is_loop_on_interrupt = is_loop_on_interrupt
self._iter = None
def __iter__(self):
self._iter = TrainingLoopIterator(tracker.get_global_step(),
self.__loop_count,
self.__loop_step)
self.__loop = monit.loop(typing.cast(Collection, self._iter))
iter(self.__loop)
try:
self.old_handler = signal.signal(signal.SIGINT, self.__handler)
except ValueError:
pass
return self
@property
def idx(self):
if not self._iter:
return 0
if not self._iter.i:
return 0
if self.__loop_step is None:
return self._iter.i
return self._iter.i / self.__loop_step
def __finish(self):
try:
signal.signal(signal.SIGINT, self.old_handler)
except ValueError:
pass
tracker.save()
tracker.new_line()
def __next__(self):
if self.__signal_received is not None:
logger.log('\nKilling Loop.', Text.danger)
monit.finish_loop()
self.__finish()
raise StopIteration("SIGINT")
try:
global_step = next(self.__loop)
except StopIteration as e:
self.__finish()
raise e
tracker.set_global_step(global_step)
if global_step - self.__last_write_step >= self.__log_write_interval:
tracker.save()
self.__last_write_step = global_step
if global_step - self.__last_new_line_step >= self.__log_new_line_interval:
tracker.new_line()
self.__last_new_line_step = global_step
return global_step
def __handler(self, sig, frame):
# Pass second interrupt without delaying
if self.__signal_received is not None:
logger.log('\nSIGINT received twice. Stopping...', Text.danger)
self.old_handler(*self.__signal_received)
return
if self.__is_loop_on_interrupt:
# Store the interrupt signal for later
self.__signal_received = (sig, frame)
logger.log('\nSIGINT received. Delaying KeyboardInterrupt.', Text.danger)
else:
self.__finish()
logger.log('Killing loop...', Text.danger)
self.old_handler(sig, frame)
def __str__(self):
return "LabTrainingLoop"
class TrainingLoopConfigs(BaseConfigs):
r"""
This is a configurable training loop. You can extend this class for your configurations
if it involves a training loop.
>>> for step in conf.training_loop:
>>> ...
Arguments:
loop_count (int): Total number of steps. Defaults to ``10``.
loop_step (int): Number of steps to increment per iteration. Defaults to ``1``.
log_new_line_interval (int): The interval (in steps) to print a new line to the screen.
Defaults to ``1``.
log_write_interval (int): The interval (in steps) to call :func:`labml.tracker.save`.
Defaults to ``1``.
is_loop_on_interrupt (bool): Whether to handle keyboard interrupts and wait until a iteration is complete.
Defaults to ``False``.
"""
loop_count: int = 10
loop_step: int = 1
log_new_line_interval: int = 1
log_write_interval: int = 1
is_loop_on_interrupt: bool = False
training_loop: TrainingLoop
@option(TrainingLoopConfigs.training_loop)
def _loop_configs(c: TrainingLoopConfigs):
return TrainingLoop(loop_count=c.loop_count,
loop_step=c.loop_step,
log_new_line_interval=c.log_new_line_interval,
log_write_interval=c.log_write_interval,
is_loop_on_interrupt=c.is_loop_on_interrupt)
meta_config(TrainingLoopConfigs.loop_step,
TrainingLoopConfigs.loop_count,
TrainingLoopConfigs.log_new_line_interval,
TrainingLoopConfigs.log_write_interval,
TrainingLoopConfigs.is_loop_on_interrupt)
class ModeState:
def __init__(self):
self._rollback_stack = []
self.is_train = False
self.is_optimize = False
def _enter(self, mode: Dict[str, any]):
rollback = {}
for k, v in mode.items():
if v is None:
continue
rollback[k] = getattr(self, k)
setattr(self, k, v)
self._rollback_stack.append(rollback)
return len(self._rollback_stack)
def _exit(self, n: int):
assert n == len(self._rollback_stack)
rollback = self._rollback_stack[-1]
self._rollback_stack.pop(-1)
for k, v in rollback.items():
setattr(self, k, v)
def update(self, *,
is_train: Optional[bool] = None,
is_optimize: Optional[bool] = None):
return Mode(self,
is_train=is_train,
is_optimize=is_optimize)
class Mode:
def __init__(self, mode: ModeState, **kwargs: any):
self.mode = mode
self.update = {}
for k, v in kwargs.items():
if v is not None:
self.update[k] = v
self.idx = -1
def __enter__(self):
self.idx = self.mode._enter(self.update)
def __exit__(self, exc_type, exc_val, exc_tb):
self.mode._exit(self.idx)
class Trainer:
def __init__(self, *,
name: str,
mode: ModeState,
data_loader: torch.utils.data.DataLoader,
inner_iterations: int,
state_modules: List[StateModule],
is_track_time: bool,
step: Callable[[any, 'BatchIndex'], None]):
self.is_track_time = is_track_time
self.mode = mode
self.name = name
self.step = step
self.state_modules = state_modules
self.__iterable = None
self.__states = [sm.create_state() for sm in self.state_modules]
self.inner_iterations = inner_iterations
self.data_loader = data_loader
self._batch_index = BatchIndex(len(self.data_loader), self.inner_iterations)
def set_data_loader(self, data_loader: torch.utils.data.DataLoader):
self.data_loader = data_loader
self._batch_index = BatchIndex(len(data_loader), self.inner_iterations)
self.__iterable = None
def __call__(self):
for sm, s in zip(self.state_modules, self.__states):
sm.set_state(s)
if self.__iterable is None or self._batch_index.completed:
self.__iterable = iter(self.data_loader)
self._batch_index.reset(len(self.data_loader), self.inner_iterations)
for sm in self.state_modules:
sm.on_epoch_start()
with torch.set_grad_enabled(self.mode.is_train):
self.__iterate()
if self._batch_index.completed:
for sm in self.state_modules:
sm.on_epoch_end()
def __iterate(self):
with monit.section(self.name, is_partial=True, is_track=self.is_track_time):
if self._batch_index.idx == 0:
monit.progress(0)
while not self._batch_index.iteration_completed:
batch = next(self.__iterable)
self.step(batch, self._batch_index)
self._batch_index.step()
monit.progress(self._batch_index.epoch_progress)
self._batch_index.step_inner()
class BatchIndex:
idx: int
total: int
iteration: int
total_iterations: int
def __init__(self, total: int, total_iterations: int):
self.total_iterations = total_iterations
self.total = total
def is_interval(self, interval: int):
if interval <= 0:
return False
if self.idx + 1 == self.total:
return True
else:
return (self.idx + 1) % interval == 0
@property
def is_last(self):
return self.idx + 1 == self.total
@property
def completed(self):
return self.iteration >= self.total_iterations
@property
def iteration_completed(self):
# // is important so that the last step happens on the last iteration
return self.idx >= (self.iteration + 1) * self.total // self.total_iterations
@property
def epoch_progress(self):
return self.idx / self.total
def step(self):
self.idx += 1
def step_inner(self):
self.iteration += 1
def reset(self, total: int, total_iterations: int):
self.total = total
self.total_iterations = total_iterations
self.idx = 0
self.iteration = 0
class TrainValidConfigs(TrainingLoopConfigs):
r"""
This is a configurable module that you can extend for experiments that involve a
training and validation datasets (i.e. most DL experiments).
Arguments:
epochs (int): Number of epochs to train on. Defaults to ``10``.
train_loader (torch.utils.data.DataLoader): Training data loader.
valid_loader (torch.utils.data.DataLoader): Training data loader.
inner_iterations (int): Number of times to switch between training and validation
within an epoch. Defaults to ``1``.
You can override ``init``, ``step`` functions. There is also a ``sample`` function
that you can override to generate samples ever time it switches between training and validation.
"""
state_modules: List[StateModule]
mode: ModeState
epochs: int = 10
trainer: Trainer
validator: Trainer
train_loader: torch.utils.data.DataLoader
valid_loader: torch.utils.data.DataLoader
loop_count = '_data_loop_count'
loop_step = None
inner_iterations: int = 1
is_track_time: bool = False
def init(self):
pass
def step(self, batch: Any, batch_idx: BatchIndex):
raise NotImplementedError
def run_step(self):
for i in range(self.inner_iterations):
with tracker.namespace('sample'):
self.sample()
with self.mode.update(is_train=True):
with tracker.namespace('train'):
self.trainer()
if self.validator:
with tracker.namespace('valid'):
self.validator()
tracker.save()
def run(self):
with monit.section("Initialize"):
self.init()
_ = self.validator
_ = self.trainer
for _ in self.training_loop:
self.run_step()
def sample(self):
pass
@option(TrainValidConfigs.trainer)
def _default_trainer(c: TrainValidConfigs):
return Trainer(name='Train',
mode=c.mode,
data_loader=c.train_loader,
inner_iterations=c.inner_iterations,
state_modules=c.state_modules,
is_track_time=c.is_track_time,
step=c.step)
@option(TrainValidConfigs.validator)
def _default_validator(c: TrainValidConfigs):
return Trainer(name='Valid',
mode=c.mode,
data_loader=c.valid_loader,
inner_iterations=c.inner_iterations,
state_modules=c.state_modules,
is_track_time=c.is_track_time,
step=c.step)
@option(TrainValidConfigs.loop_count)
def _data_loop_count(c: TrainValidConfigs):
return c.epochs
class SimpleTrainValidConfigs(TrainValidConfigs):
r"""
This is a configurable module that works for many standard DL experiments.
Arguments:
model: A PyTorch model.
optimizer: A PyTorch optimizer to update model.
device: The device to train the model on. This defaults to a configurable device
loss_function: A function to calculate the loss. This should accept ``model_output, target`` as
arguments.
update_batches (int): Number of batches to accumulate before taking an optimizer step.
Defaults to ``1``.
log_save_batches (int): How often to call :func:`labml.tracker.save`.
"""
optimizer: torch.optim.Adam
model: nn.Module
device: torch.device = DeviceConfigs()
loss_func: nn.Module
update_batches: int = 1
log_save_batches: int = 1
state_modules: List[StateModule] = []
def init(self):
pass
def step(self, batch: Any, batch_idx: BatchIndex):
self.model.train(self.mode.is_train)
data, target = batch[0].to(self.device), batch[1].to(self.device)
if self.mode.is_train:
tracker.add_global_step(len(data))
with monit.section("model"):
output = self.model(data)
loss = self.loss_func(output, target)
tracker.add("loss.", loss)
if self.mode.is_train:
with monit.section('backward'):
loss.backward()
if batch_idx.is_interval(self.update_batches):
with monit.section('optimize'):
self.optimizer.step()
self.optimizer.zero_grad()
if batch_idx.is_interval(self.log_save_batches):
tracker.save()
meta_config(SimpleTrainValidConfigs.update_batches,
)
@option(SimpleTrainValidConfigs.optimizer)
def _default_optimizer(c: SimpleTrainValidConfigs):
from .optimizer import OptimizerConfigs
opt_conf = OptimizerConfigs()
opt_conf.parameters = c.model.parameters()
return opt_conf
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/helpers/__init__.py | labml_nn/helpers/__init__.py | python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false | |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/helpers/device.py | labml_nn/helpers/device.py | import torch
from labml.configs import BaseConfigs, hyperparams, option
class DeviceInfo:
def __init__(self, *,
use_cuda: bool,
cuda_device: int):
self.use_cuda = use_cuda
self.cuda_device = cuda_device
self.cuda_count = torch.cuda.device_count()
self.is_cuda = self.use_cuda and torch.cuda.is_available()
if not self.is_cuda:
self.device = torch.device('cpu')
else:
if self.cuda_device < self.cuda_count:
self.device = torch.device('cuda', self.cuda_device)
else:
self.device = torch.device('cuda', self.cuda_count - 1)
def __str__(self):
if not self.is_cuda:
return "CPU"
if self.cuda_device < self.cuda_count:
return f"GPU:{self.cuda_device} - {torch.cuda.get_device_name(self.cuda_device)}"
else:
return (f"GPU:{self.cuda_count - 1}({self.cuda_device}) "
f"- {torch.cuda.get_device_name(self.cuda_count - 1)}")
class DeviceConfigs(BaseConfigs):
r"""
This is a configurable module to get a single device to train model on.
It can pick up CUDA devices and it will fall back to CPU if they are not available.
It has other small advantages such as being able to view the
actual device name on configurations view of
`labml app <https://github.com/labmlai/labml/tree/master/app>`_
Arguments:
cuda_device (int): The CUDA device number. Defaults to ``0``.
use_cuda (bool): Whether to use CUDA devices. Defaults to ``True``.
"""
cuda_device: int = 0
use_cuda: bool = True
device_info: DeviceInfo
device: torch.device
def __init__(self):
super().__init__(_primary='device')
@option(DeviceConfigs.device)
def _device(c: DeviceConfigs):
return c.device_info.device
hyperparams(DeviceConfigs.cuda_device, DeviceConfigs.use_cuda,
is_hyperparam=False)
@option(DeviceConfigs.device_info)
def _device_info(c: DeviceConfigs):
return DeviceInfo(use_cuda=c.use_cuda,
cuda_device=c.cuda_device)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/capsule_networks/mnist.py | labml_nn/capsule_networks/mnist.py | """
---
title: Classify MNIST digits with Capsule Networks
summary: Code for training Capsule Networks on MNIST dataset
---
# Classify MNIST digits with Capsule Networks
This is an annotated PyTorch code to classify MNIST digits with PyTorch.
This paper implements the experiment described in paper
[Dynamic Routing Between Capsules](https://arxiv.org/abs/1710.09829).
"""
from typing import Any
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from labml import experiment, tracker
from labml.configs import option
from labml_nn.capsule_networks import Squash, Router, MarginLoss
from labml_nn.helpers.datasets import MNISTConfigs
from labml_nn.helpers.metrics import AccuracyDirect
from labml_nn.helpers.trainer import SimpleTrainValidConfigs, BatchIndex
class MNISTCapsuleNetworkModel(nn.Module):
"""
## Model for classifying MNIST digits
"""
def __init__(self):
super().__init__()
# First convolution layer has $256$, $9 \times 9$ convolution kernels
self.conv1 = nn.Conv2d(in_channels=1, out_channels=256, kernel_size=9, stride=1)
# The second layer (Primary Capsules) s a convolutional capsule layer with $32$ channels
# of convolutional $8D$ capsules ($8$ features per capsule).
# That is, each primary capsule contains 8 convolutional units with a 9 × 9 kernel and a stride of 2.
# In order to implement this we create a convolutional layer with $32 \times 8$ channels and
# reshape and permutate its output to get the capsules of $8$ features each.
self.conv2 = nn.Conv2d(in_channels=256, out_channels=32 * 8, kernel_size=9, stride=2, padding=0)
self.squash = Squash()
# Routing layer gets the $32 \times 6 \times 6$ primary capsules and produces $10$ capsules.
# Each of the primary capsules have $8$ features, while output capsules (Digit Capsules)
# have $16$ features.
# The routing algorithm iterates $3$ times.
self.digit_capsules = Router(32 * 6 * 6, 10, 8, 16, 3)
# This is the decoder mentioned in the paper.
# It takes the outputs of the $10$ digit capsules, each with $16$ features to reproduce the
# image. It goes through linear layers of sizes $512$ and $1024$ with $ReLU$ activations.
self.decoder = nn.Sequential(
nn.Linear(16 * 10, 512),
nn.ReLU(),
nn.Linear(512, 1024),
nn.ReLU(),
nn.Linear(1024, 784),
nn.Sigmoid()
)
def forward(self, data: torch.Tensor):
"""
`data` are the MNIST images, with shape `[batch_size, 1, 28, 28]`
"""
# Pass through the first convolution layer.
# Output of this layer has shape `[batch_size, 256, 20, 20]`
x = F.relu(self.conv1(data))
# Pass through the second convolution layer.
# Output of this has shape `[batch_size, 32 * 8, 6, 6]`.
# *Note that this layer has a stride length of $2$*.
x = self.conv2(x)
# Resize and permutate to get the capsules
caps = x.view(x.shape[0], 8, 32 * 6 * 6).permute(0, 2, 1)
# Squash the capsules
caps = self.squash(caps)
# Take them through the router to get digit capsules.
# This has shape `[batch_size, 10, 16]`.
caps = self.digit_capsules(caps)
# Get masks for reconstructioon
with torch.no_grad():
# The prediction by the capsule network is the capsule with longest length
pred = (caps ** 2).sum(-1).argmax(-1)
# Create a mask to maskout all the other capsules
mask = torch.eye(10, device=data.device)[pred]
# Mask the digit capsules to get only the capsule that made the prediction and
# take it through decoder to get reconstruction
reconstructions = self.decoder((caps * mask[:, :, None]).view(x.shape[0], -1))
# Reshape the reconstruction to match the image dimensions
reconstructions = reconstructions.view(-1, 1, 28, 28)
return caps, reconstructions, pred
class Configs(MNISTConfigs, SimpleTrainValidConfigs):
"""
Configurations with MNIST data and Train & Validation setup
"""
epochs: int = 10
model: nn.Module = 'capsule_network_model'
reconstruction_loss = nn.MSELoss()
margin_loss = MarginLoss(n_labels=10)
accuracy = AccuracyDirect()
def init(self):
# Print losses and accuracy to screen
tracker.set_scalar('loss.*', True)
tracker.set_scalar('accuracy.*', True)
# We need to set the metrics to calculate them for the epoch for training and validation
self.state_modules = [self.accuracy]
def step(self, batch: Any, batch_idx: BatchIndex):
"""
This method gets called by the trainer
"""
# Set the model mode
self.model.train(self.mode.is_train)
# Get the images and labels and move them to the model's device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Increment step in training mode
if self.mode.is_train:
tracker.add_global_step(len(data))
# Run the model
caps, reconstructions, pred = self.model(data)
# Calculate the total loss
loss = self.margin_loss(caps, target) + 0.0005 * self.reconstruction_loss(reconstructions, data)
tracker.add("loss.", loss)
# Call accuracy metric
self.accuracy(pred, target)
if self.mode.is_train:
loss.backward()
self.optimizer.step()
# Log parameters and gradients
if batch_idx.is_last:
tracker.add('model', self.model)
self.optimizer.zero_grad()
tracker.save()
@option(Configs.model)
def capsule_network_model(c: Configs):
"""Set the model"""
return MNISTCapsuleNetworkModel().to(c.device)
def main():
"""
Run the experiment
"""
experiment.create(name='capsule_network_mnist')
conf = Configs()
experiment.configs(conf, {'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 1e-3})
experiment.add_pytorch_models({'model': conf.model})
with experiment.start():
conf.run()
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/capsule_networks/__init__.py | labml_nn/capsule_networks/__init__.py | """
---
title: Capsule Networks
summary: >
PyTorch implementation and tutorial of Capsule Networks.
Capsule network is a neural network architecture that embeds features
as capsules and routes them with a voting mechanism to next layer of capsules.
---
# Capsule Networks
This is a [PyTorch](https://pytorch.org) implementation/tutorial of
[Dynamic Routing Between Capsules](https://arxiv.org/abs/1710.09829).
Capsule network is a neural network architecture that embeds features
as capsules and routes them with a voting mechanism to next layer of capsules.
Unlike in other implementations of models, we've included a sample, because
it is difficult to understand some concepts with just the modules.
[This is the annotated code for a model that uses capsules to classify MNIST dataset](mnist.html)
This file holds the implementations of the core modules of Capsule Networks.
I used [jindongwang/Pytorch-CapsuleNet](https://github.com/jindongwang/Pytorch-CapsuleNet) to clarify some
confusions I had with the paper.
Here's a notebook for training a Capsule Network on MNIST dataset.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/capsule_networks/mnist.ipynb)
"""
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Squash(nn.Module):
"""
## Squash
This is **squashing** function from paper, given by equation $(1)$.
$$\mathbf{v}_j = \frac{{\lVert \mathbf{s}_j \rVert}^2}{1 + {\lVert \mathbf{s}_j \rVert}^2}
\frac{\mathbf{s}_j}{\lVert \mathbf{s}_j \rVert}$$
$\frac{\mathbf{s}_j}{\lVert \mathbf{s}_j \rVert}$
normalizes the length of all the capsules, whilst
$\frac{{\lVert \mathbf{s}_j \rVert}^2}{1 + {\lVert \mathbf{s}_j \rVert}^2}$
shrinks the capsules that have a length smaller than one .
"""
def __init__(self, epsilon=1e-8):
super().__init__()
self.epsilon = epsilon
def forward(self, s: torch.Tensor):
"""
The shape of `s` is `[batch_size, n_capsules, n_features]`
"""
# ${\lVert \mathbf{s}_j \rVert}^2$
s2 = (s ** 2).sum(dim=-1, keepdims=True)
# We add an epsilon when calculating $\lVert \mathbf{s}_j \rVert$ to make sure it doesn't become zero.
# If this becomes zero it starts giving out `nan` values and training fails.
# $$\mathbf{v}_j = \frac{{\lVert \mathbf{s}_j \rVert}^2}{1 + {\lVert \mathbf{s}_j \rVert}^2}
# \frac{\mathbf{s}_j}{\sqrt{{\lVert \mathbf{s}_j \rVert}^2 + \epsilon}}$$
return (s2 / (1 + s2)) * (s / torch.sqrt(s2 + self.epsilon))
class Router(nn.Module):
"""
## Routing Algorithm
This is the routing mechanism described in the paper.
You can use multiple routing layers in your models.
This combines calculating $\mathbf{s}_j$ for this layer and
the routing algorithm described in *Procedure 1*.
"""
def __init__(self, in_caps: int, out_caps: int, in_d: int, out_d: int, iterations: int):
"""
`in_caps` is the number of capsules, and `in_d` is the number of features per capsule from the layer below.
`out_caps` and `out_d` are the same for this layer.
`iterations` is the number of routing iterations, symbolized by $r$ in the paper.
"""
super().__init__()
self.in_caps = in_caps
self.out_caps = out_caps
self.iterations = iterations
self.softmax = nn.Softmax(dim=1)
self.squash = Squash()
# This is the weight matrix $\mathbf{W}_{ij}$. It maps each capsule in the
# lower layer to each capsule in this layer
self.weight = nn.Parameter(torch.randn(in_caps, out_caps, in_d, out_d), requires_grad=True)
def forward(self, u: torch.Tensor):
"""
The shape of `u` is `[batch_size, n_capsules, n_features]`.
These are the capsules from the lower layer.
"""
# $$\hat{\mathbf{u}}_{j|i} = \mathbf{W}_{ij} \mathbf{u}_i$$
# Here $j$ is used to index capsules in this layer, whilst $i$ is
# used to index capsules in the layer below (previous).
u_hat = torch.einsum('ijnm,bin->bijm', self.weight, u)
# Initial logits $b_{ij}$ are the log prior probabilities that capsule $i$
# should be coupled with $j$.
# We initialize these at zero
b = u.new_zeros(u.shape[0], self.in_caps, self.out_caps)
v = None
# Iterate
for i in range(self.iterations):
# routing softmax $$c_{ij} = \frac{\exp({b_{ij}})}{\sum_k\exp({b_{ik}})}$$
c = self.softmax(b)
# $$\mathbf{s}_j = \sum_i{c_{ij} \hat{\mathbf{u}}_{j|i}}$$
s = torch.einsum('bij,bijm->bjm', c, u_hat)
# $$\mathbf{v}_j = squash(\mathbf{s}_j)$$
v = self.squash(s)
# $$a_{ij} = \mathbf{v}_j \cdot \hat{\mathbf{u}}_{j|i}$$
a = torch.einsum('bjm,bijm->bij', v, u_hat)
# $$b_{ij} \gets b_{ij} + \mathbf{v}_j \cdot \hat{\mathbf{u}}_{j|i}$$
b = b + a
return v
class MarginLoss(nn.Module):
"""
## Margin loss for class existence
A separate margin loss is used for each output capsule and the total loss is the sum of them.
The length of each output capsule is the probability that class is present in the input.
Loss for each output capsule or class $k$ is,
$$\mathcal{L}_k = T_k \max(0, m^{+} - \lVert\mathbf{v}_k\rVert)^2 +
\lambda (1 - T_k) \max(0, \lVert\mathbf{v}_k\rVert - m^{-})^2$$
$T_k$ is $1$ if the class $k$ is present and $0$ otherwise.
The first component of the loss is $0$ when the class is not present,
and the second component is $0$ if the class is present.
The $\max(0, x)$ is used to avoid predictions going to extremes.
$m^{+}$ is set to be $0.9$ and $m^{-}$ to be $0.1$ in the paper.
The $\lambda$ down-weighting is used to stop the length of all capsules from
falling during the initial phase of training.
"""
def __init__(self, *, n_labels: int, lambda_: float = 0.5, m_positive: float = 0.9, m_negative: float = 0.1):
super().__init__()
self.m_negative = m_negative
self.m_positive = m_positive
self.lambda_ = lambda_
self.n_labels = n_labels
def forward(self, v: torch.Tensor, labels: torch.Tensor):
"""
`v`, $\mathbf{v}_j$ are the squashed output capsules.
This has shape `[batch_size, n_labels, n_features]`; that is, there is a capsule for each label.
`labels` are the labels, and has shape `[batch_size]`.
"""
# $$\lVert \mathbf{v}_j \rVert$$
v_norm = torch.sqrt((v ** 2).sum(dim=-1))
# $$\mathcal{L}$$
# `labels` is one-hot encoded labels of shape `[batch_size, n_labels]`
labels = torch.eye(self.n_labels, device=labels.device)[labels]
# $$\mathcal{L}_k = T_k \max(0, m^{+} - \lVert\mathbf{v}_k\rVert)^2 +
# \lambda (1 - T_k) \max(0, \lVert\mathbf{v}_k\rVert - m^{-})^2$$
# `loss` has shape `[batch_size, n_labels]`. We have parallelized the computation
# of $\mathcal{L}_k$ for for all $k$.
loss = labels * F.relu(self.m_positive - v_norm) + \
self.lambda_ * (1.0 - labels) * F.relu(v_norm - self.m_negative)
# $$\sum_k \mathcal{L}_k$$
return loss.sum(dim=-1).mean()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/sketch_rnn/__init__.py | labml_nn/sketch_rnn/__init__.py | """
---
title: Sketch RNN
summary: >
This is an annotated PyTorch implementation of the Sketch RNN from paper A Neural Representation of Sketch Drawings.
Sketch RNN is a sequence-to-sequence model that generates sketches of objects such as bicycles, cats, etc.
---
# Sketch RNN
This is an annotated [PyTorch](https://pytorch.org) implementation of the paper
[A Neural Representation of Sketch Drawings](https://arxiv.org/abs/1704.03477).
Sketch RNN is a sequence-to-sequence variational auto-encoder.
Both encoder and decoder are recurrent neural network models.
It learns to reconstruct stroke based simple drawings, by predicting
a series of strokes.
Decoder predicts each stroke as a mixture of Gaussian's.
### Getting data
Download data from [Quick, Draw! Dataset](https://github.com/googlecreativelab/quickdraw-dataset).
There is a link to download `npz` files in *Sketch-RNN QuickDraw Dataset* section of the readme.
Place the downloaded `npz` file(s) in `data/sketch` folder.
This code is configured to use `bicycle` dataset.
You can change this in configurations.
### Acknowledgements
Took help from [PyTorch Sketch RNN](https://github.com/alexis-jacq/Pytorch-Sketch-RNN) project by
[Alexis David Jacq](https://github.com/alexis-jacq)
"""
import math
from typing import Optional, Tuple, Any
import einops
import numpy as np
from matplotlib import pyplot as plt
import torch
import torch.nn as nn
from labml import lab, experiment, tracker, monit
from labml_nn.helpers.device import DeviceConfigs
from labml_nn.helpers.optimizer import OptimizerConfigs
from labml_nn.helpers.trainer import TrainValidConfigs, BatchIndex
from torch import optim
from torch.utils.data import Dataset, DataLoader
class StrokesDataset(Dataset):
"""
## Dataset
This class loads and pre-processes the data.
"""
def __init__(self, dataset: np.array, max_seq_length: int, scale: Optional[float] = None):
"""
`dataset` is a list of numpy arrays of shape [seq_len, 3].
It is a sequence of strokes, and each stroke is represented by
3 integers.
First two are the displacements along x and y ($\Delta x$, $\Delta y$)
and the last integer represents the state of the pen, $1$ if it's touching
the paper and $0$ otherwise.
"""
data = []
# We iterate through each of the sequences and filter
for seq in dataset:
# Filter if the length of the sequence of strokes is within our range
if 10 < len(seq) <= max_seq_length:
# Clamp $\Delta x$, $\Delta y$ to $[-1000, 1000]$
seq = np.minimum(seq, 1000)
seq = np.maximum(seq, -1000)
# Convert to a floating point array and add to `data`
seq = np.array(seq, dtype=np.float32)
data.append(seq)
# We then calculate the scaling factor which is the
# standard deviation of ($\Delta x$, $\Delta y$) combined.
# Paper notes that the mean is not adjusted for simplicity,
# since the mean is anyway close to $0$.
if scale is None:
scale = np.std(np.concatenate([np.ravel(s[:, 0:2]) for s in data]))
self.scale = scale
# Get the longest sequence length among all sequences
longest_seq_len = max([len(seq) for seq in data])
# We initialize PyTorch data array with two extra steps for start-of-sequence (sos)
# and end-of-sequence (eos).
# Each step is a vector $(\Delta x, \Delta y, p_1, p_2, p_3)$.
# Only one of $p_1, p_2, p_3$ is $1$ and the others are $0$.
# They represent *pen down*, *pen up* and *end-of-sequence* in that order.
# $p_1$ is $1$ if the pen touches the paper in the next step.
# $p_2$ is $1$ if the pen doesn't touch the paper in the next step.
# $p_3$ is $1$ if it is the end of the drawing.
self.data = torch.zeros(len(data), longest_seq_len + 2, 5, dtype=torch.float)
# The mask array needs only one extra-step since it is for the outputs of the
# decoder, which takes in `data[:-1]` and predicts next step.
self.mask = torch.zeros(len(data), longest_seq_len + 1)
for i, seq in enumerate(data):
seq = torch.from_numpy(seq)
len_seq = len(seq)
# Scale and set $\Delta x, \Delta y$
self.data[i, 1:len_seq + 1, :2] = seq[:, :2] / scale
# $p_1$
self.data[i, 1:len_seq + 1, 2] = 1 - seq[:, 2]
# $p_2$
self.data[i, 1:len_seq + 1, 3] = seq[:, 2]
# $p_3$
self.data[i, len_seq + 1:, 4] = 1
# Mask is on until end of sequence
self.mask[i, :len_seq + 1] = 1
# Start-of-sequence is $(0, 0, 1, 0, 0)$
self.data[:, 0, 2] = 1
def __len__(self):
"""Size of the dataset"""
return len(self.data)
def __getitem__(self, idx: int):
"""Get a sample"""
return self.data[idx], self.mask[idx]
class BivariateGaussianMixture:
"""
## Bi-variate Gaussian mixture
The mixture is represented by $\Pi$ and
$\mathcal{N}(\mu_{x}, \mu_{y}, \sigma_{x}, \sigma_{y}, \rho_{xy})$.
This class adjusts temperatures and creates the categorical and Gaussian
distributions from the parameters.
"""
def __init__(self, pi_logits: torch.Tensor, mu_x: torch.Tensor, mu_y: torch.Tensor,
sigma_x: torch.Tensor, sigma_y: torch.Tensor, rho_xy: torch.Tensor):
self.pi_logits = pi_logits
self.mu_x = mu_x
self.mu_y = mu_y
self.sigma_x = sigma_x
self.sigma_y = sigma_y
self.rho_xy = rho_xy
@property
def n_distributions(self):
"""Number of distributions in the mixture, $M$"""
return self.pi_logits.shape[-1]
def set_temperature(self, temperature: float):
"""
Adjust by temperature $\tau$
"""
# $$\hat{\Pi_k} \leftarrow \frac{\hat{\Pi_k}}{\tau}$$
self.pi_logits /= temperature
# $$\sigma^2_x \leftarrow \sigma^2_x \tau$$
self.sigma_x *= math.sqrt(temperature)
# $$\sigma^2_y \leftarrow \sigma^2_y \tau$$
self.sigma_y *= math.sqrt(temperature)
def get_distribution(self):
# Clamp $\sigma_x$, $\sigma_y$ and $\rho_{xy}$ to avoid getting `NaN`s
sigma_x = torch.clamp_min(self.sigma_x, 1e-5)
sigma_y = torch.clamp_min(self.sigma_y, 1e-5)
rho_xy = torch.clamp(self.rho_xy, -1 + 1e-5, 1 - 1e-5)
# Get means
mean = torch.stack([self.mu_x, self.mu_y], -1)
# Get covariance matrix
cov = torch.stack([
sigma_x * sigma_x, rho_xy * sigma_x * sigma_y,
rho_xy * sigma_x * sigma_y, sigma_y * sigma_y
], -1)
cov = cov.view(*sigma_y.shape, 2, 2)
# Create bi-variate normal distribution.
#
# 📝 It would be efficient to `scale_tril` matrix as `[[a, 0], [b, c]]`
# where
# $$a = \sigma_x, b = \rho_{xy} \sigma_y, c = \sigma_y \sqrt{1 - \rho^2_{xy}}$$.
# But for simplicity we use co-variance matrix.
# [This is a good resource](https://www2.stat.duke.edu/courses/Spring12/sta104.1/Lectures/Lec22.pdf)
# if you want to read up more about bi-variate distributions, their co-variance matrix,
# and probability density function.
multi_dist = torch.distributions.MultivariateNormal(mean, covariance_matrix=cov)
# Create categorical distribution $\Pi$ from logits
cat_dist = torch.distributions.Categorical(logits=self.pi_logits)
#
return cat_dist, multi_dist
class EncoderRNN(nn.Module):
"""
## Encoder module
This consists of a bidirectional LSTM
"""
def __init__(self, d_z: int, enc_hidden_size: int):
super().__init__()
# Create a bidirectional LSTM taking a sequence of
# $(\Delta x, \Delta y, p_1, p_2, p_3)$ as input.
self.lstm = nn.LSTM(5, enc_hidden_size, bidirectional=True)
# Head to get $\mu$
self.mu_head = nn.Linear(2 * enc_hidden_size, d_z)
# Head to get $\hat{\sigma}$
self.sigma_head = nn.Linear(2 * enc_hidden_size, d_z)
def forward(self, inputs: torch.Tensor, state=None):
# The hidden state of the bidirectional LSTM is the concatenation of the
# output of the last token in the forward direction and
# first token in the reverse direction, which is what we want.
# $$h_{\rightarrow} = encode_{\rightarrow}(S),
# h_{\leftarrow} = encode←_{\leftarrow}(S_{reverse}),
# h = [h_{\rightarrow}; h_{\leftarrow}]$$
_, (hidden, cell) = self.lstm(inputs.float(), state)
# The state has shape `[2, batch_size, hidden_size]`,
# where the first dimension is the direction.
# We rearrange it to get $h = [h_{\rightarrow}; h_{\leftarrow}]$
hidden = einops.rearrange(hidden, 'fb b h -> b (fb h)')
# $\mu$
mu = self.mu_head(hidden)
# $\hat{\sigma}$
sigma_hat = self.sigma_head(hidden)
# $\sigma = \exp(\frac{\hat{\sigma}}{2})$
sigma = torch.exp(sigma_hat / 2.)
# Sample $z = \mu + \sigma \cdot \mathcal{N}(0, I)$
z = mu + sigma * torch.normal(mu.new_zeros(mu.shape), mu.new_ones(mu.shape))
#
return z, mu, sigma_hat
class DecoderRNN(nn.Module):
"""
## Decoder module
This consists of a LSTM
"""
def __init__(self, d_z: int, dec_hidden_size: int, n_distributions: int):
super().__init__()
# LSTM takes $[(\Delta x, \Delta y, p_1, p_2, p_3); z]$ as input
self.lstm = nn.LSTM(d_z + 5, dec_hidden_size)
# Initial state of the LSTM is $[h_0; c_0] = \tanh(W_{z}z + b_z)$.
# `init_state` is the linear transformation for this
self.init_state = nn.Linear(d_z, 2 * dec_hidden_size)
# This layer produces outputs for each of the `n_distributions`.
# Each distribution needs six parameters
# $(\hat{\Pi_i}, \mu_{x_i}, \mu_{y_i}, \hat{\sigma_{x_i}}, \hat{\sigma_{y_i}} \hat{\rho_{xy_i}})$
self.mixtures = nn.Linear(dec_hidden_size, 6 * n_distributions)
# This head is for the logits $(\hat{q_1}, \hat{q_2}, \hat{q_3})$
self.q_head = nn.Linear(dec_hidden_size, 3)
# This is to calculate $\log(q_k)$ where
# $$q_k = \operatorname{softmax}(\hat{q})_k = \frac{\exp(\hat{q_k})}{\sum_{j = 1}^3 \exp(\hat{q_j})}$$
self.q_log_softmax = nn.LogSoftmax(-1)
# These parameters are stored for future reference
self.n_distributions = n_distributions
self.dec_hidden_size = dec_hidden_size
def forward(self, x: torch.Tensor, z: torch.Tensor, state: Optional[Tuple[torch.Tensor, torch.Tensor]]):
# Calculate the initial state
if state is None:
# $[h_0; c_0] = \tanh(W_{z}z + b_z)$
h, c = torch.split(torch.tanh(self.init_state(z)), self.dec_hidden_size, 1)
# `h` and `c` have shapes `[batch_size, lstm_size]`. We want to shape them
# to `[1, batch_size, lstm_size]` because that's the shape used in LSTM.
state = (h.unsqueeze(0).contiguous(), c.unsqueeze(0).contiguous())
# Run the LSTM
outputs, state = self.lstm(x, state)
# Get $\log(q)$
q_logits = self.q_log_softmax(self.q_head(outputs))
# Get $(\hat{\Pi_i}, \mu_{x,i}, \mu_{y,i}, \hat{\sigma_{x,i}},
# \hat{\sigma_{y,i}} \hat{\rho_{xy,i}})$.
# `torch.split` splits the output into 6 tensors of size `self.n_distribution`
# across dimension `2`.
pi_logits, mu_x, mu_y, sigma_x, sigma_y, rho_xy = \
torch.split(self.mixtures(outputs), self.n_distributions, 2)
# Create a bi-variate Gaussian mixture
# $\Pi$ and
# $\mathcal{N}(\mu_{x}, \mu_{y}, \sigma_{x}, \sigma_{y}, \rho_{xy})$
# where
# $$\sigma_{x,i} = \exp(\hat{\sigma_{x,i}}), \sigma_{y,i} = \exp(\hat{\sigma_{y,i}}),
# \rho_{xy,i} = \tanh(\hat{\rho_{xy,i}})$$
# and
# $$\Pi_i = \operatorname{softmax}(\hat{\Pi})_i = \frac{\exp(\hat{\Pi_i})}{\sum_{j = 1}^3 \exp(\hat{\Pi_j})}$$
#
# $\Pi$ is the categorical probabilities of choosing the distribution out of the mixture
# $\mathcal{N}(\mu_{x}, \mu_{y}, \sigma_{x}, \sigma_{y}, \rho_{xy})$.
dist = BivariateGaussianMixture(pi_logits, mu_x, mu_y,
torch.exp(sigma_x), torch.exp(sigma_y), torch.tanh(rho_xy))
#
return dist, q_logits, state
class ReconstructionLoss(nn.Module):
"""
## Reconstruction Loss
"""
def forward(self, mask: torch.Tensor, target: torch.Tensor,
dist: 'BivariateGaussianMixture', q_logits: torch.Tensor):
# Get $\Pi$ and $\mathcal{N}(\mu_{x}, \mu_{y}, \sigma_{x}, \sigma_{y}, \rho_{xy})$
pi, mix = dist.get_distribution()
# `target` has shape `[seq_len, batch_size, 5]` where the last dimension is the features
# $(\Delta x, \Delta y, p_1, p_2, p_3)$.
# We want to get $\Delta x, \Delta$ y and get the probabilities from each of the distributions
# in the mixture $\mathcal{N}(\mu_{x}, \mu_{y}, \sigma_{x}, \sigma_{y}, \rho_{xy})$.
#
# `xy` will have shape `[seq_len, batch_size, n_distributions, 2]`
xy = target[:, :, 0:2].unsqueeze(-2).expand(-1, -1, dist.n_distributions, -1)
# Calculate the probabilities
# $$p(\Delta x, \Delta y) =
# \sum_{j=1}^M \Pi_j \mathcal{N} \big( \Delta x, \Delta y \vert
# \mu_{x,j}, \mu_{y,j}, \sigma_{x,j}, \sigma_{y,j}, \rho_{xy,j}
# \big)$$
probs = torch.sum(pi.probs * torch.exp(mix.log_prob(xy)), 2)
# $$L_s = - \frac{1}{N_{max}} \sum_{i=1}^{N_s} \log \big (p(\Delta x, \Delta y) \big)$$
# Although `probs` has $N_{max}$ (`longest_seq_len`) elements, the sum is only taken
# upto $N_s$ because the rest is masked out.
#
# It might feel like we should be taking the sum and dividing by $N_s$ and not $N_{max}$,
# but this will give higher weight for individual predictions in shorter sequences.
# We give equal weight to each prediction $p(\Delta x, \Delta y)$ when we divide by $N_{max}$
loss_stroke = -torch.mean(mask * torch.log(1e-5 + probs))
# $$L_p = - \frac{1}{N_{max}} \sum_{i=1}^{N_{max}} \sum_{k=1}^{3} p_{k,i} \log(q_{k,i})$$
loss_pen = -torch.mean(target[:, :, 2:] * q_logits)
# $$L_R = L_s + L_p$$
return loss_stroke + loss_pen
class KLDivLoss(nn.Module):
"""
## KL-Divergence loss
This calculates the KL divergence between a given normal distribution and $\mathcal{N}(0, 1)$
"""
def forward(self, sigma_hat: torch.Tensor, mu: torch.Tensor):
# $$L_{KL} = - \frac{1}{2 N_z} \bigg( 1 + \hat{\sigma} - \mu^2 - \exp(\hat{\sigma}) \bigg)$$
return -0.5 * torch.mean(1 + sigma_hat - mu ** 2 - torch.exp(sigma_hat))
class Sampler:
"""
## Sampler
This samples a sketch from the decoder and plots it
"""
def __init__(self, encoder: EncoderRNN, decoder: DecoderRNN):
self.decoder = decoder
self.encoder = encoder
def sample(self, data: torch.Tensor, temperature: float):
# $N_{max}$
longest_seq_len = len(data)
# Get $z$ from the encoder
z, _, _ = self.encoder(data)
# Start-of-sequence stroke is $(0, 0, 1, 0, 0)$
s = data.new_tensor([0, 0, 1, 0, 0])
seq = [s]
# Initial decoder is `None`.
# The decoder will initialize it to $[h_0; c_0] = \tanh(W_{z}z + b_z)$
state = None
# We don't need gradients
with torch.no_grad():
# Sample $N_{max}$ strokes
for i in range(longest_seq_len):
# $[(\Delta x, \Delta y, p_1, p_2, p_3); z]$ is the input to the decoder
data = torch.cat([s.view(1, 1, -1), z.unsqueeze(0)], 2)
# Get $\Pi$, $\mathcal{N}(\mu_{x}, \mu_{y}, \sigma_{x}, \sigma_{y}, \rho_{xy})$,
# $q$ and the next state from the decoder
dist, q_logits, state = self.decoder(data, z, state)
# Sample a stroke
s = self._sample_step(dist, q_logits, temperature)
# Add the new stroke to the sequence of strokes
seq.append(s)
# Stop sampling if $p_3 = 1$. This indicates that sketching has stopped
if s[4] == 1:
break
# Create a PyTorch tensor of the sequence of strokes
seq = torch.stack(seq)
# Plot the sequence of strokes
self.plot(seq)
@staticmethod
def _sample_step(dist: 'BivariateGaussianMixture', q_logits: torch.Tensor, temperature: float):
# Set temperature $\tau$ for sampling. This is implemented in class `BivariateGaussianMixture`.
dist.set_temperature(temperature)
# Get temperature adjusted $\Pi$ and $\mathcal{N}(\mu_{x}, \mu_{y}, \sigma_{x}, \sigma_{y}, \rho_{xy})$
pi, mix = dist.get_distribution()
# Sample from $\Pi$ the index of the distribution to use from the mixture
idx = pi.sample()[0, 0]
# Create categorical distribution $q$ with log-probabilities `q_logits` or $\hat{q}$
q = torch.distributions.Categorical(logits=q_logits / temperature)
# Sample from $q$
q_idx = q.sample()[0, 0]
# Sample from the normal distributions in the mixture and pick the one indexed by `idx`
xy = mix.sample()[0, 0, idx]
# Create an empty stroke $(\Delta x, \Delta y, q_1, q_2, q_3)$
stroke = q_logits.new_zeros(5)
# Set $\Delta x, \Delta y$
stroke[:2] = xy
# Set $q_1, q_2, q_3$
stroke[q_idx + 2] = 1
#
return stroke
@staticmethod
def plot(seq: torch.Tensor):
# Take the cumulative sums of $(\Delta x, \Delta y)$ to get $(x, y)$
seq[:, 0:2] = torch.cumsum(seq[:, 0:2], dim=0)
# Create a new numpy array of the form $(x, y, q_2)$
seq[:, 2] = seq[:, 3]
seq = seq[:, 0:3].detach().cpu().numpy()
# Split the array at points where $q_2$ is $1$.
# i.e. split the array of strokes at the points where the pen is lifted from the paper.
# This gives a list of sequence of strokes.
strokes = np.split(seq, np.where(seq[:, 2] > 0)[0] + 1)
# Plot each sequence of strokes
for s in strokes:
plt.plot(s[:, 0], -s[:, 1])
# Don't show axes
plt.axis('off')
# Show the plot
plt.show()
class Configs(TrainValidConfigs):
"""
## Configurations
These are default configurations which can later be adjusted by passing a `dict`.
"""
# Device configurations to pick the device to run the experiment
device: torch.device = DeviceConfigs()
#
encoder: EncoderRNN
decoder: DecoderRNN
optimizer: optim.Adam
sampler: Sampler
dataset_name: str
train_loader: DataLoader
valid_loader: DataLoader
train_dataset: StrokesDataset
valid_dataset: StrokesDataset
# Encoder and decoder sizes
enc_hidden_size = 256
dec_hidden_size = 512
# Batch size
batch_size = 100
# Number of features in $z$
d_z = 128
# Number of distributions in the mixture, $M$
n_distributions = 20
# Weight of KL divergence loss, $w_{KL}$
kl_div_loss_weight = 0.5
# Gradient clipping
grad_clip = 1.
# Temperature $\tau$ for sampling
temperature = 0.4
# Filter out stroke sequences longer than $200$
max_seq_length = 200
epochs = 100
kl_div_loss = KLDivLoss()
reconstruction_loss = ReconstructionLoss()
def init(self):
# Initialize encoder & decoder
self.encoder = EncoderRNN(self.d_z, self.enc_hidden_size).to(self.device)
self.decoder = DecoderRNN(self.d_z, self.dec_hidden_size, self.n_distributions).to(self.device)
# Set optimizer. Things like type of optimizer and learning rate are configurable
optimizer = OptimizerConfigs()
optimizer.parameters = list(self.encoder.parameters()) + list(self.decoder.parameters())
self.optimizer = optimizer
# Create sampler
self.sampler = Sampler(self.encoder, self.decoder)
# `npz` file path is `data/sketch/[DATASET NAME].npz`
path = lab.get_data_path() / 'sketch' / f'{self.dataset_name}.npz'
# Load the numpy file
dataset = np.load(str(path), encoding='latin1', allow_pickle=True)
# Create training dataset
self.train_dataset = StrokesDataset(dataset['train'], self.max_seq_length)
# Create validation dataset
self.valid_dataset = StrokesDataset(dataset['valid'], self.max_seq_length, self.train_dataset.scale)
# Create training data loader
self.train_loader = DataLoader(self.train_dataset, self.batch_size, shuffle=True)
# Create validation data loader
self.valid_loader = DataLoader(self.valid_dataset, self.batch_size)
# Configure the tracker to print the total train/validation loss
tracker.set_scalar("loss.total.*", True)
self.state_modules = []
def step(self, batch: Any, batch_idx: BatchIndex):
self.encoder.train(self.mode.is_train)
self.decoder.train(self.mode.is_train)
# Move `data` and `mask` to device and swap the sequence and batch dimensions.
# `data` will have shape `[seq_len, batch_size, 5]` and
# `mask` will have shape `[seq_len, batch_size]`.
data = batch[0].to(self.device).transpose(0, 1)
mask = batch[1].to(self.device).transpose(0, 1)
# Increment step in training mode
if self.mode.is_train:
tracker.add_global_step(len(data))
# Encode the sequence of strokes
with monit.section("encoder"):
# Get $z$, $\mu$, and $\hat{\sigma}$
z, mu, sigma_hat = self.encoder(data)
# Decode the mixture of distributions and $\hat{q}$
with monit.section("decoder"):
# Concatenate $[(\Delta x, \Delta y, p_1, p_2, p_3); z]$
z_stack = z.unsqueeze(0).expand(data.shape[0] - 1, -1, -1)
inputs = torch.cat([data[:-1], z_stack], 2)
# Get mixture of distributions and $\hat{q}$
dist, q_logits, _ = self.decoder(inputs, z, None)
# Compute the loss
with monit.section('loss'):
# $L_{KL}$
kl_loss = self.kl_div_loss(sigma_hat, mu)
# $L_R$
reconstruction_loss = self.reconstruction_loss(mask, data[1:], dist, q_logits)
# $Loss = L_R + w_{KL} L_{KL}$
loss = reconstruction_loss + self.kl_div_loss_weight * kl_loss
# Track losses
tracker.add("loss.kl.", kl_loss)
tracker.add("loss.reconstruction.", reconstruction_loss)
tracker.add("loss.total.", loss)
# Only if we are in training state
if self.mode.is_train:
# Run optimizer
with monit.section('optimize'):
# Set `grad` to zero
self.optimizer.zero_grad()
# Compute gradients
loss.backward()
# Log model parameters and gradients
if batch_idx.is_last:
tracker.add(encoder=self.encoder, decoder=self.decoder)
# Clip gradients
nn.utils.clip_grad_norm_(self.encoder.parameters(), self.grad_clip)
nn.utils.clip_grad_norm_(self.decoder.parameters(), self.grad_clip)
# Optimize
self.optimizer.step()
tracker.save()
def sample(self):
# Randomly pick a sample from validation dataset to encoder
data, *_ = self.valid_dataset[np.random.choice(len(self.valid_dataset))]
# Add batch dimension and move it to device
data = data.unsqueeze(1).to(self.device)
# Sample
self.sampler.sample(data, self.temperature)
def main():
configs = Configs()
experiment.create(name="sketch_rnn")
# Pass a dictionary of configurations
experiment.configs(configs, {
'optimizer.optimizer': 'Adam',
# We use a learning rate of `1e-3` because we can see results faster.
# Paper had suggested `1e-4`.
'optimizer.learning_rate': 1e-3,
# Name of the dataset
'dataset_name': 'bicycle',
# Number of inner iterations within an epoch to switch between training, validation and sampling.
'inner_iterations': 10
})
with experiment.start():
# Run the experiment
configs.run()
if __name__ == "__main__":
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/activations/__init__.py | labml_nn/activations/__init__.py | """
---
title: Neural Network Activation Functions
summary: >
A set of PyTorch implementations/tutorials related to neural network activations
---
# Neural Networks Activations
* [Fuzzy Tiling Activations](fta/index.html)
* 🚧 [Swish](swish/index.html)
"""
from .swish import Swish
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/activations/swish.py | labml_nn/activations/swish.py | import torch
from torch import nn
class Swish(nn.Module):
def __init__(self):
super().__init__()
self.sigmoid = nn.Sigmoid()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return x * self.sigmoid(x)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/activations/fta/experiment.py | labml_nn/activations/fta/experiment.py | """
---
title: Fuzzy Tiling Activation Experiment
summary: >
Training a transformer with FTA in FFN on Tiny Shakespeare.
---
# [Fuzzy Tiling Activation](index.html) Experiment
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/activations/fta/experiment.ipynb)
Here we train a transformer that uses [Fuzzy Tiling Activation](index.html) in the
[Feed-Forward Network](../../transformers/feed_forward.html).
We use it for a language model and train it on Tiny Shakespeare dataset
for demonstration.
However, this is probably not the ideal task for FTA, and we
believe FTA is more suitable for modeling data with continuous variables.
"""
import copy
import torch
import torch.nn as nn
from labml import experiment
from labml.configs import option
from labml_nn.activations.fta import FTA
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.transformers import MultiHeadAttention, TransformerLayer
from labml_nn.transformers.utils import subsequent_mask
class FeedForwardFTA(nn.Module):
"""
## FFN module with [FTA](index.html) activation
"""
def __init__(self, d_model: int, d_ff: int,
activation: FTA,
dropout: float = 0.1):
"""
* `d_model` is the number of features in a token embedding
* `d_ff` is the number of features in the hidden layer of the FFN
* `activation` is FTA activation module
* `dropout` is dropout probability for the hidden layer
"""
super().__init__()
# Layer one parameterized by weight $W_1$ and bias $b_1$
self.layer1 = nn.Linear(d_model, d_ff)
# Layer two parameterized by weight $W_1$ and bias $b_1$
self.layer2 = nn.Linear(d_ff * activation.expansion_factor, d_model)
# Hidden layer dropout
self.dropout = nn.Dropout(dropout)
# Activation function $f$
self.activation = activation
def forward(self, x: torch.Tensor):
# $f(x W_1 + b_1)$
x = self.activation(self.layer1(x))
# Apply dropout
x = self.dropout(x)
#
return self.layer2(x)
class AutoregressiveTransformer(nn.Module):
"""
## Auto-Regressive model
This is an autoregressive transformer model that uses Feed-Forward Networks with
(Fuzzy Tiling Activations)(index.html).
"""
def __init__(self, n_tokens: int, d_model: int, n_layers: int, layer: TransformerLayer):
"""
:param n_tokens: is the number of tokens in the vocabulary
:param d_model: is the embedding size
:param n_layers: is the number of transformer layers
:param layer: is the layer. We use `n_layers` copies of this for the transformer.
"""
super().__init__()
# Transformer with `n_layers` layers
self.transformer_layers = nn.ModuleList([copy.deepcopy(layer) for _ in range(n_layers)])
# Token embedding layer
self.emb = nn.Embedding(n_tokens, d_model)
# Readout layer
self.readout = nn.Linear(d_model, n_tokens)
# The mask will be initialized on the first call
self.mask = None
def forward(self, x: torch.Tensor):
"""
:param x: are the input tokens of shape `[seq_len, batch_size]`
"""
# Create auto-regressive mask
if self.mask is None or self.mask.size(0) != len(x):
# Subsequent mask, will mask out tokens from seeing future tokens
self.mask = subsequent_mask(len(x)).to(x.device)
# Get the token embeddings
x = self.emb(x)
# Transformer encoder
for layer in self.transformer_layers:
x = layer(x=x, mask=self.mask)
# Get logits
x = self.readout(x)
# Return results
return x, None
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
This inherits from
[`NLPAutoRegressionConfigs`](../../experiments/nlp_autoregression.html#NLPAutoRegressionConfigs)
"""
# Model
model: AutoregressiveTransformer
# Number of layers
n_layers: int = 4
# $\alpha$ and $\beta$ for DeepNorm
deep_norm_alpha: float
deep_norm_beta: float
# Number of heads in the attention
n_heads: int = 4
# Embedding size
d_model: int = 256
# Size of each attention head
d_k: int = 16
# Feed forward layer size
d_ff: int = 256
# FTA
fta_lower_limit: float = -1.
fta_upper_limit: float = +1.
fta_delta: float = 0.2
fta_eta: float = 0.05
@option(Configs.model)
def _model(c: Configs):
"""
#### Initialize the model
"""
# Create FTA activation module
fta = FTA(c.fta_lower_limit, c.fta_upper_limit, c.fta_delta, c.fta_eta)
# Create the transformer.
# We re-use [`TransformerLayer`](../../transformers/models.html#TransformerLayer) and
# [`MultiHeadAttention`](../../transformers/mha.html) implementations.
m = AutoregressiveTransformer(c.n_tokens, c.d_model, c.n_layers,
TransformerLayer(d_model=c.d_model,
feed_forward=FeedForwardFTA(d_model=c.d_model,
d_ff=c.d_ff,
activation=fta,
dropout=0.1),
self_attn=MultiHeadAttention(c.n_heads, c.d_model,
dropout_prob=0.0),
dropout_prob=0.0))
# Move to the device
return m.to(c.device)
def main():
"""
#### Create and run the experiment
"""
# Create experiment
experiment.create(name="fta", writers={'screen', 'labml'})
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# Use a context size of $256$
'seq_len': 256,
# Train for 32 epochs
'epochs': 32,
# Batch size $16$
'batch_size': 16,
# Switch between training and validation for $10$ times per epoch
'inner_iterations': 10,
# Adam optimizer with no warmup
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 3e-4,
})
# Set model(s) for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/activations/fta/__init__.py | labml_nn/activations/fta/__init__.py | """
---
title: Fuzzy Tiling Activations
summary: >
PyTorch implementation and tutorial of Fuzzy Tiling Activations from the
paper Fuzzy Tiling Activations: A Simple Approach to Learning Sparse Representations Online.
---
# Fuzzy Tiling Activations (FTA)
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/activations/fta/experiment.ipynb)
This is a [PyTorch](https://pytorch.org) implementation/tutorial of
[Fuzzy Tiling Activations: A Simple Approach to Learning Sparse Representations Online](https://arxiv.org/abs/1911.08068).
Fuzzy tiling activations are a form of sparse activations based on binning.
Binning is classification of a scalar value into a bin based on intervals.
One problem with binning is that it gives zero gradients for most values (except at the boundary of bins).
The other is that binning loses precision if the bin intervals are large.
FTA overcomes these disadvantages.
Instead of hard boundaries like in Tiling Activations, FTA uses soft boundaries
between bins.
This gives non-zero gradients for all or a wide range of values.
And also doesn't lose precision since it's captured in partial values.
#### Tiling Activations
$\mathbf{c}$ is the tiling vector,
$$\mathbf{c} = (l, l + \delta, l + 2 \delta, \dots, u - 2 \delta, u - \delta)$$
where $[l, u]$ is the input range, $\delta$ is the bin size, and $u - l$ is divisible by $\delta$.
Tiling activation is,
$$\phi(z) = 1 - I_+ \big( \max(\mathbf{c} - z, 0) + \max(z - \delta - \mathbf{c}) \big)$$
where $I_+(\cdot)$ is the indicator function which gives $1$ if the input is positive and $0$ otherwise.
Note that tiling activation gives zero gradients because it has hard boundaries.
#### Fuzzy Tiling Activations
The fuzzy indicator function,
$$I_{\eta,+}(x) = I_+(\eta - x) x + I_+ (x - \eta)$$
which increases linearly from $0$ to $1$ when $0 \le x \lt \eta$
and is equal to $1$ for $\eta \le x$.
$\eta$ is a hyper-parameter.
FTA uses this to create soft boundaries between bins.
$$\phi_\eta(z) = 1 - I_{\eta,+} \big( \max(\mathbf{c} - z, 0) + \max(z - \delta - \mathbf{c}, 0) \big)$$
[Here's a simple experiment](experiment.html) that uses FTA in a transformer.
"""
import torch
from torch import nn
class FTA(nn.Module):
"""
### Fuzzy Tiling Activations (FTA)
"""
def __init__(self, lower_limit: float, upper_limit: float, delta: float, eta: float):
"""
:param lower_limit: is the lower limit $l$
:param upper_limit: is the upper limit $u$
:param delta: is the bin size $\delta$
:param eta: is the parameter $\eta$ that detemines the softness of the boundaries.
"""
super().__init__()
# Initialize tiling vector
# $$\mathbf{c} = (l, l + \delta, l + 2 \delta, \dots, u - 2 \delta, u - \delta)$$
self.c = nn.Parameter(torch.arange(lower_limit, upper_limit, delta), requires_grad=False)
# The input vector expands by a factor equal to the number of bins $\frac{u - l}{\delta}$
self.expansion_factor = len(self.c)
# $\delta$
self.delta = delta
# $\eta$
self.eta = eta
def fuzzy_i_plus(self, x: torch.Tensor):
"""
#### Fuzzy indicator function
$$I_{\eta,+}(x) = I_+(\eta - x) x + I_+ (x - \eta)$$
"""
return (x <= self.eta) * x + (x > self.eta)
def forward(self, z: torch.Tensor):
# Add another dimension of size $1$.
# We will expand this into bins.
z = z.view(*z.shape, 1)
# $$\phi_\eta(z) = 1 - I_{\eta,+} \big( \max(\mathbf{c} - z, 0) + \max(z - \delta - \mathbf{c}, 0) \big)$$
z = 1. - self.fuzzy_i_plus(torch.clip(self.c - z, min=0.) + torch.clip(z - self.delta - self.c, min=0.))
# Reshape back to original number of dimensions.
# The last dimension size gets expanded by the number of bins, $\frac{u - l}{\delta}$.
return z.view(*z.shape[:-2], -1)
def _test():
"""
#### Code to test the FTA module
"""
from labml.logger import inspect
# Initialize
a = FTA(-10, 10, 2., 0.5)
# Print $\mathbf{c}$
inspect(a.c)
# Print number of bins $\frac{u - l}{\delta}$
inspect(a.expansion_factor)
# Input $z$
z = torch.tensor([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9., 10., 11.])
# Print $z$
inspect(z)
# Print $\phi_\eta(z)$
inspect(a(z))
if __name__ == '__main__':
_test()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/utils/__init__.py | labml_nn/utils/__init__.py | """
---
title: Utilities
summary: A bunch of utility functions and classes
---
# Utilities
"""
import copy
from torch import nn
from torch.utils.data import Dataset, IterableDataset
def clone_module_list(module: nn.Module, n: int) -> nn.ModuleList:
"""
## Clone Module
Make a `nn.ModuleList` with clones of a given module
"""
return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])
def cycle_dataloader(data_loader):
"""
<a id="cycle_dataloader"></a>
## Cycle Data Loader
Infinite loader that recycles the data loader after each epoch
"""
while True:
for batch in data_loader:
yield batch
class MapStyleDataset(Dataset):
"""
<a id="map_style_dataset"></a>
## Map Style Dataset
This converts an [`IterableDataset`](https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset)
to a [map-style dataset](https://pytorch.org/docs/stable/data.html#map-style-datasets)
so that we can shuffle the dataset.
*This only works when the dataset size is small and can be held in memory.*
"""
def __init__(self, dataset: IterableDataset):
# Load the data to memory
self.data = [d for d in dataset]
def __getitem__(self, idx: int):
"""Get a sample by index"""
return self.data[idx]
def __iter__(self):
"""Create an iterator"""
return iter(self.data)
def __len__(self):
"""Size of the dataset"""
return len(self.data)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/utils/tokenizer.py | labml_nn/utils/tokenizer.py | from typing import Callable
from labml.configs import BaseConfigs, option
class TokenizerConfigs(BaseConfigs):
"""
<a id="TokenizerConfigs"></a>
## Tokenizer Configurations
"""
tokenizer: Callable = 'character'
def __init__(self):
super().__init__(_primary='tokenizer')
@option(TokenizerConfigs.tokenizer)
def basic_english():
"""
### Basic english tokenizer
We use character level tokenizer in this experiment.
You can switch by setting,
```
'tokenizer': 'basic_english'
```
in the configurations dictionary when starting the experiment.
"""
from torchtext.data import get_tokenizer
return get_tokenizer('basic_english')
def character_tokenizer(x: str):
"""
### Character level tokenizer
"""
return list(x)
@option(TokenizerConfigs.tokenizer)
def character():
"""
Character level tokenizer configuration
"""
return character_tokenizer
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/__init__.py | labml_nn/diffusion/__init__.py | """
---
title: Diffusion models
summary: >
A set of PyTorch implementations/tutorials of diffusion models.
---
# Diffusion models
* [Denoising Diffusion Probabilistic Models (DDPM)](ddpm/index.html)
* [Stable Diffusion](stable_diffusion/index.html)
* [Latent Diffusion Model](stable_diffusion/latent_diffusion.html)
* [Denoising Diffusion Implicit Models (DDIM) Sampling](stable_diffusion/sampler/ddim.html)
"""
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/ddpm/unet.py | labml_nn/diffusion/ddpm/unet.py | """
---
title: U-Net model for Denoising Diffusion Probabilistic Models (DDPM)
summary: >
UNet model for Denoising Diffusion Probabilistic Models (DDPM)
---
# U-Net model for [Denoising Diffusion Probabilistic Models (DDPM)](index.html)
This is a [U-Net](../../unet/index.html) based model to predict noise
$\textcolor{lightgreen}{\epsilon_\theta}(x_t, t)$.
U-Net is a gets it's name from the U shape in the model diagram.
It processes a given image by progressively lowering (halving) the feature map resolution and then
increasing the resolution.
There are pass-through connection at each resolution.

This implementation contains a bunch of modifications to original U-Net (residual blocks, multi-head attention)
and also adds time-step embeddings $t$.
"""
import math
from typing import Optional, Tuple, Union, List
import torch
from torch import nn
class Swish(nn.Module):
"""
### Swish activation function
$$x \cdot \sigma(x)$$
"""
def forward(self, x):
return x * torch.sigmoid(x)
class TimeEmbedding(nn.Module):
"""
### Embeddings for $t$
"""
def __init__(self, n_channels: int):
"""
* `n_channels` is the number of dimensions in the embedding
"""
super().__init__()
self.n_channels = n_channels
# First linear layer
self.lin1 = nn.Linear(self.n_channels // 4, self.n_channels)
# Activation
self.act = Swish()
# Second linear layer
self.lin2 = nn.Linear(self.n_channels, self.n_channels)
def forward(self, t: torch.Tensor):
# Create sinusoidal position embeddings
# [same as those from the transformer](../../transformers/positional_encoding.html)
#
# \begin{align}
# PE^{(1)}_{t,i} &= sin\Bigg(\frac{t}{10000^{\frac{i}{d - 1}}}\Bigg) \\
# PE^{(2)}_{t,i} &= cos\Bigg(\frac{t}{10000^{\frac{i}{d - 1}}}\Bigg)
# \end{align}
#
# where $d$ is `half_dim`
half_dim = self.n_channels // 8
emb = math.log(10_000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=t.device) * -emb)
emb = t[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=1)
# Transform with the MLP
emb = self.act(self.lin1(emb))
emb = self.lin2(emb)
#
return emb
class ResidualBlock(nn.Module):
"""
### Residual block
A residual block has two convolution layers with group normalization.
Each resolution is processed with two residual blocks.
"""
def __init__(self, in_channels: int, out_channels: int, time_channels: int,
n_groups: int = 32, dropout: float = 0.1):
"""
* `in_channels` is the number of input channels
* `out_channels` is the number of input channels
* `time_channels` is the number channels in the time step ($t$) embeddings
* `n_groups` is the number of groups for [group normalization](../../normalization/group_norm/index.html)
* `dropout` is the dropout rate
"""
super().__init__()
# Group normalization and the first convolution layer
self.norm1 = nn.GroupNorm(n_groups, in_channels)
self.act1 = Swish()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=(3, 3), padding=(1, 1))
# Group normalization and the second convolution layer
self.norm2 = nn.GroupNorm(n_groups, out_channels)
self.act2 = Swish()
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=(3, 3), padding=(1, 1))
# If the number of input channels is not equal to the number of output channels we have to
# project the shortcut connection
if in_channels != out_channels:
self.shortcut = nn.Conv2d(in_channels, out_channels, kernel_size=(1, 1))
else:
self.shortcut = nn.Identity()
# Linear layer for time embeddings
self.time_emb = nn.Linear(time_channels, out_channels)
self.time_act = Swish()
self.dropout = nn.Dropout(dropout)
def forward(self, x: torch.Tensor, t: torch.Tensor):
"""
* `x` has shape `[batch_size, in_channels, height, width]`
* `t` has shape `[batch_size, time_channels]`
"""
# First convolution layer
h = self.conv1(self.act1(self.norm1(x)))
# Add time embeddings
h += self.time_emb(self.time_act(t))[:, :, None, None]
# Second convolution layer
h = self.conv2(self.dropout(self.act2(self.norm2(h))))
# Add the shortcut connection and return
return h + self.shortcut(x)
class AttentionBlock(nn.Module):
"""
### Attention block
This is similar to [transformer multi-head attention](../../transformers/mha.html).
"""
def __init__(self, n_channels: int, n_heads: int = 1, d_k: int = None, n_groups: int = 32):
"""
* `n_channels` is the number of channels in the input
* `n_heads` is the number of heads in multi-head attention
* `d_k` is the number of dimensions in each head
* `n_groups` is the number of groups for [group normalization](../../normalization/group_norm/index.html)
"""
super().__init__()
# Default `d_k`
if d_k is None:
d_k = n_channels
# Normalization layer
self.norm = nn.GroupNorm(n_groups, n_channels)
# Projections for query, key and values
self.projection = nn.Linear(n_channels, n_heads * d_k * 3)
# Linear layer for final transformation
self.output = nn.Linear(n_heads * d_k, n_channels)
# Scale for dot-product attention
self.scale = d_k ** -0.5
#
self.n_heads = n_heads
self.d_k = d_k
def forward(self, x: torch.Tensor, t: Optional[torch.Tensor] = None):
"""
* `x` has shape `[batch_size, in_channels, height, width]`
* `t` has shape `[batch_size, time_channels]`
"""
# `t` is not used, but it's kept in the arguments because for the attention layer function signature
# to match with `ResidualBlock`.
_ = t
# Get shape
batch_size, n_channels, height, width = x.shape
# Change `x` to shape `[batch_size, seq, n_channels]`
x = x.view(batch_size, n_channels, -1).permute(0, 2, 1)
# Get query, key, and values (concatenated) and shape it to `[batch_size, seq, n_heads, 3 * d_k]`
qkv = self.projection(x).view(batch_size, -1, self.n_heads, 3 * self.d_k)
# Split query, key, and values. Each of them will have shape `[batch_size, seq, n_heads, d_k]`
q, k, v = torch.chunk(qkv, 3, dim=-1)
# Calculate scaled dot-product $\frac{Q K^\top}{\sqrt{d_k}}$
attn = torch.einsum('bihd,bjhd->bijh', q, k) * self.scale
# Softmax along the sequence dimension $\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_k}}\Bigg)$
attn = attn.softmax(dim=2)
# Multiply by values
res = torch.einsum('bijh,bjhd->bihd', attn, v)
# Reshape to `[batch_size, seq, n_heads * d_k]`
res = res.view(batch_size, -1, self.n_heads * self.d_k)
# Transform to `[batch_size, seq, n_channels]`
res = self.output(res)
# Add skip connection
res += x
# Change to shape `[batch_size, in_channels, height, width]`
res = res.permute(0, 2, 1).view(batch_size, n_channels, height, width)
#
return res
class DownBlock(nn.Module):
"""
### Down block
This combines `ResidualBlock` and `AttentionBlock`. These are used in the first half of U-Net at each resolution.
"""
def __init__(self, in_channels: int, out_channels: int, time_channels: int, has_attn: bool):
super().__init__()
self.res = ResidualBlock(in_channels, out_channels, time_channels)
if has_attn:
self.attn = AttentionBlock(out_channels)
else:
self.attn = nn.Identity()
def forward(self, x: torch.Tensor, t: torch.Tensor):
x = self.res(x, t)
x = self.attn(x)
return x
class UpBlock(nn.Module):
"""
### Up block
This combines `ResidualBlock` and `AttentionBlock`. These are used in the second half of U-Net at each resolution.
"""
def __init__(self, in_channels: int, out_channels: int, time_channels: int, has_attn: bool):
super().__init__()
# The input has `in_channels + out_channels` because we concatenate the output of the same resolution
# from the first half of the U-Net
self.res = ResidualBlock(in_channels + out_channels, out_channels, time_channels)
if has_attn:
self.attn = AttentionBlock(out_channels)
else:
self.attn = nn.Identity()
def forward(self, x: torch.Tensor, t: torch.Tensor):
x = self.res(x, t)
x = self.attn(x)
return x
class MiddleBlock(nn.Module):
"""
### Middle block
It combines a `ResidualBlock`, `AttentionBlock`, followed by another `ResidualBlock`.
This block is applied at the lowest resolution of the U-Net.
"""
def __init__(self, n_channels: int, time_channels: int):
super().__init__()
self.res1 = ResidualBlock(n_channels, n_channels, time_channels)
self.attn = AttentionBlock(n_channels)
self.res2 = ResidualBlock(n_channels, n_channels, time_channels)
def forward(self, x: torch.Tensor, t: torch.Tensor):
x = self.res1(x, t)
x = self.attn(x)
x = self.res2(x, t)
return x
class Upsample(nn.Module):
"""
### Scale up the feature map by $2 \times$
"""
def __init__(self, n_channels):
super().__init__()
self.conv = nn.ConvTranspose2d(n_channels, n_channels, (4, 4), (2, 2), (1, 1))
def forward(self, x: torch.Tensor, t: torch.Tensor):
# `t` is not used, but it's kept in the arguments because for the attention layer function signature
# to match with `ResidualBlock`.
_ = t
return self.conv(x)
class Downsample(nn.Module):
"""
### Scale down the feature map by $\frac{1}{2} \times$
"""
def __init__(self, n_channels):
super().__init__()
self.conv = nn.Conv2d(n_channels, n_channels, (3, 3), (2, 2), (1, 1))
def forward(self, x: torch.Tensor, t: torch.Tensor):
# `t` is not used, but it's kept in the arguments because for the attention layer function signature
# to match with `ResidualBlock`.
_ = t
return self.conv(x)
class UNet(nn.Module):
"""
## U-Net
"""
def __init__(self, image_channels: int = 3, n_channels: int = 64,
ch_mults: Union[Tuple[int, ...], List[int]] = (1, 2, 2, 4),
is_attn: Union[Tuple[bool, ...], List[bool]] = (False, False, True, True),
n_blocks: int = 2):
"""
* `image_channels` is the number of channels in the image. $3$ for RGB.
* `n_channels` is number of channels in the initial feature map that we transform the image into
* `ch_mults` is the list of channel numbers at each resolution. The number of channels is `ch_mults[i] * n_channels`
* `is_attn` is a list of booleans that indicate whether to use attention at each resolution
* `n_blocks` is the number of `UpDownBlocks` at each resolution
"""
super().__init__()
# Number of resolutions
n_resolutions = len(ch_mults)
# Project image into feature map
self.image_proj = nn.Conv2d(image_channels, n_channels, kernel_size=(3, 3), padding=(1, 1))
# Time embedding layer. Time embedding has `n_channels * 4` channels
self.time_emb = TimeEmbedding(n_channels * 4)
# #### First half of U-Net - decreasing resolution
down = []
# Number of channels
out_channels = in_channels = n_channels
# For each resolution
for i in range(n_resolutions):
# Number of output channels at this resolution
out_channels = in_channels * ch_mults[i]
# Add `n_blocks`
for _ in range(n_blocks):
down.append(DownBlock(in_channels, out_channels, n_channels * 4, is_attn[i]))
in_channels = out_channels
# Down sample at all resolutions except the last
if i < n_resolutions - 1:
down.append(Downsample(in_channels))
# Combine the set of modules
self.down = nn.ModuleList(down)
# Middle block
self.middle = MiddleBlock(out_channels, n_channels * 4, )
# #### Second half of U-Net - increasing resolution
up = []
# Number of channels
in_channels = out_channels
# For each resolution
for i in reversed(range(n_resolutions)):
# `n_blocks` at the same resolution
out_channels = in_channels
for _ in range(n_blocks):
up.append(UpBlock(in_channels, out_channels, n_channels * 4, is_attn[i]))
# Final block to reduce the number of channels
out_channels = in_channels // ch_mults[i]
up.append(UpBlock(in_channels, out_channels, n_channels * 4, is_attn[i]))
in_channels = out_channels
# Up sample at all resolutions except last
if i > 0:
up.append(Upsample(in_channels))
# Combine the set of modules
self.up = nn.ModuleList(up)
# Final normalization and convolution layer
self.norm = nn.GroupNorm(8, n_channels)
self.act = Swish()
self.final = nn.Conv2d(in_channels, image_channels, kernel_size=(3, 3), padding=(1, 1))
def forward(self, x: torch.Tensor, t: torch.Tensor):
"""
* `x` has shape `[batch_size, in_channels, height, width]`
* `t` has shape `[batch_size]`
"""
# Get time-step embeddings
t = self.time_emb(t)
# Get image projection
x = self.image_proj(x)
# `h` will store outputs at each resolution for skip connection
h = [x]
# First half of U-Net
for m in self.down:
x = m(x, t)
h.append(x)
# Middle (bottom)
x = self.middle(x, t)
# Second half of U-Net
for m in self.up:
if isinstance(m, Upsample):
x = m(x, t)
else:
# Get the skip connection from first half of U-Net and concatenate
s = h.pop()
x = torch.cat((x, s), dim=1)
#
x = m(x, t)
# Final normalization and convolution
return self.final(self.act(self.norm(x)))
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/ddpm/experiment.py | labml_nn/diffusion/ddpm/experiment.py | """
---
title: Denoising Diffusion Probabilistic Models (DDPM) training
summary: >
Training code for
Denoising Diffusion Probabilistic Model.
---
# [Denoising Diffusion Probabilistic Models (DDPM)](index.html) training
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/diffusion/ddpm/experiment.ipynb)
This trains a DDPM based model on CelebA HQ dataset. You can find the download instruction in this
[discussion on fast.ai](https://forums.fast.ai/t/download-celeba-hq-dataset/45873/3).
Save the images inside [`data/celebA` folder](#dataset_path).
The paper had used a exponential moving average of the model with a decay of $0.9999$. We have skipped this for
simplicity.
"""
from typing import List
import torchvision
from PIL import Image
import torch
import torch.utils.data
from labml import lab, tracker, experiment, monit
from labml.configs import BaseConfigs, option
from labml_nn.diffusion.ddpm import DenoiseDiffusion
from labml_nn.diffusion.ddpm.unet import UNet
from labml_nn.helpers.device import DeviceConfigs
class Configs(BaseConfigs):
"""
## Configurations
"""
# Device to train the model on.
# [`DeviceConfigs`](../../device.html)
# picks up an available CUDA device or defaults to CPU.
device: torch.device = DeviceConfigs()
# U-Net model for $\textcolor{lightgreen}{\epsilon_\theta}(x_t, t)$
eps_model: UNet
# [DDPM algorithm](index.html)
diffusion: DenoiseDiffusion
# Number of channels in the image. $3$ for RGB.
image_channels: int = 3
# Image size
image_size: int = 32
# Number of channels in the initial feature map
n_channels: int = 64
# The list of channel numbers at each resolution.
# The number of channels is `channel_multipliers[i] * n_channels`
channel_multipliers: List[int] = [1, 2, 2, 4]
# The list of booleans that indicate whether to use attention at each resolution
is_attention: List[int] = [False, False, False, True]
# Number of time steps $T$
n_steps: int = 1_000
# Batch size
batch_size: int = 64
# Number of samples to generate
n_samples: int = 16
# Learning rate
learning_rate: float = 2e-5
# Number of training epochs
epochs: int = 1_000
# Dataset
dataset: torch.utils.data.Dataset
# Dataloader
data_loader: torch.utils.data.DataLoader
# Adam optimizer
optimizer: torch.optim.Adam
def init(self):
# Create $\textcolor{lightgreen}{\epsilon_\theta}(x_t, t)$ model
self.eps_model = UNet(
image_channels=self.image_channels,
n_channels=self.n_channels,
ch_mults=self.channel_multipliers,
is_attn=self.is_attention,
).to(self.device)
# Create [DDPM class](index.html)
self.diffusion = DenoiseDiffusion(
eps_model=self.eps_model,
n_steps=self.n_steps,
device=self.device,
)
# Create dataloader
self.data_loader = torch.utils.data.DataLoader(self.dataset, self.batch_size, shuffle=True, pin_memory=True)
# Create optimizer
self.optimizer = torch.optim.Adam(self.eps_model.parameters(), lr=self.learning_rate)
# Image logging
tracker.set_image("sample", True)
def sample(self):
"""
### Sample images
"""
with torch.no_grad():
# $x_T \sim p(x_T) = \mathcal{N}(x_T; \mathbf{0}, \mathbf{I})$
x = torch.randn([self.n_samples, self.image_channels, self.image_size, self.image_size],
device=self.device)
# Remove noise for $T$ steps
for t_ in monit.iterate('Sample', self.n_steps):
# $t$
t = self.n_steps - t_ - 1
# Sample from $\textcolor{lightgreen}{p_\theta}(x_{t-1}|x_t)$
x = self.diffusion.p_sample(x, x.new_full((self.n_samples,), t, dtype=torch.long))
# Log samples
tracker.save('sample', x)
def train(self):
"""
### Train
"""
# Iterate through the dataset
for data in monit.iterate('Train', self.data_loader):
# Increment global step
tracker.add_global_step()
# Move data to device
data = data.to(self.device)
# Make the gradients zero
self.optimizer.zero_grad()
# Calculate loss
loss = self.diffusion.loss(data)
# Compute gradients
loss.backward()
# Take an optimization step
self.optimizer.step()
# Track the loss
tracker.save('loss', loss)
def run(self):
"""
### Training loop
"""
for _ in monit.loop(self.epochs):
# Train the model
self.train()
# Sample some images
self.sample()
# New line in the console
tracker.new_line()
class CelebADataset(torch.utils.data.Dataset):
"""
### CelebA HQ dataset
"""
def __init__(self, image_size: int):
super().__init__()
# CelebA images folder
folder = lab.get_data_path() / 'celebA'
# List of files
self._files = [p for p in folder.glob(f'**/*.jpg')]
# Transformations to resize the image and convert to tensor
self._transform = torchvision.transforms.Compose([
torchvision.transforms.Resize(image_size),
torchvision.transforms.ToTensor(),
])
def __len__(self):
"""
Size of the dataset
"""
return len(self._files)
def __getitem__(self, index: int):
"""
Get an image
"""
img = Image.open(self._files[index])
return self._transform(img)
@option(Configs.dataset, 'CelebA')
def celeb_dataset(c: Configs):
"""
Create CelebA dataset
"""
return CelebADataset(c.image_size)
class MNISTDataset(torchvision.datasets.MNIST):
"""
### MNIST dataset
"""
def __init__(self, image_size):
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize(image_size),
torchvision.transforms.ToTensor(),
])
super().__init__(str(lab.get_data_path()), train=True, download=True, transform=transform)
def __getitem__(self, item):
return super().__getitem__(item)[0]
@option(Configs.dataset, 'MNIST')
def mnist_dataset(c: Configs):
"""
Create MNIST dataset
"""
return MNISTDataset(c.image_size)
def main():
# Create experiment
experiment.create(name='diffuse', writers={'screen', 'labml'})
# Create configurations
configs = Configs()
# Set configurations. You can override the defaults by passing the values in the dictionary.
experiment.configs(configs, {
'dataset': 'CelebA', # 'MNIST'
'image_channels': 3, # 1,
'epochs': 100, # 5,
})
# Initialize
configs.init()
# Set models for saving and loading
experiment.add_pytorch_models({'eps_model': configs.eps_model})
# Start and run the training loop
with experiment.start():
configs.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/ddpm/utils.py | labml_nn/diffusion/ddpm/utils.py | """
---
title: Utility functions for DDPM experiment
summary: >
Utility functions for DDPM experiment
---
# Utility functions for [DDPM](index.html) experiemnt
"""
import torch.utils.data
def gather(consts: torch.Tensor, t: torch.Tensor):
"""Gather consts for $t$ and reshape to feature map shape"""
c = consts.gather(-1, t)
return c.reshape(-1, 1, 1, 1)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/ddpm/__init__.py | labml_nn/diffusion/ddpm/__init__.py | """
---
title: Denoising Diffusion Probabilistic Models (DDPM)
summary: >
PyTorch implementation and tutorial of the paper
Denoising Diffusion Probabilistic Models (DDPM).
---
# Denoising Diffusion Probabilistic Models (DDPM)
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/diffusion/ddpm/experiment.ipynb)
This is a [PyTorch](https://pytorch.org) implementation/tutorial of the paper
[Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239).
In simple terms, we get an image from data and add noise step by step.
Then We train a model to predict that noise at each step and use the model to
generate images.
The following definitions and derivations show how this works.
For details please refer to [the paper](https://arxiv.org/abs/2006.11239).
## Forward Process
The forward process adds noise to the data $x_0 \sim q(x_0)$, for $T$ timesteps.
\begin{align}
q(x_t | x_{t-1}) = \mathcal{N}\big(x_t; \sqrt{1- \beta_t} x_{t-1}, \beta_t \mathbf{I}\big) \\
q(x_{1:T} | x_0) = \prod_{t = 1}^{T} q(x_t | x_{t-1})
\end{align}
where $\beta_1, \dots, \beta_T$ is the variance schedule.
We can sample $x_t$ at any timestep $t$ with,
\begin{align}
q(x_t|x_0) &= \mathcal{N} \Big(x_t; \sqrt{\bar\alpha_t} x_0, (1-\bar\alpha_t) \mathbf{I} \Big)
\end{align}
where $\alpha_t = 1 - \beta_t$ and $\bar\alpha_t = \prod_{s=1}^t \alpha_s$
## Reverse Process
The reverse process removes noise starting at $p(x_T) = \mathcal{N}(x_T; \mathbf{0}, \mathbf{I})$
for $T$ time steps.
\begin{align}
\textcolor{lightgreen}{p_\theta}(x_{t-1} | x_t) &= \mathcal{N}\big(x_{t-1};
\textcolor{lightgreen}{\mu_\theta}(x_t, t), \textcolor{lightgreen}{\Sigma_\theta}(x_t, t)\big) \\
\textcolor{lightgreen}{p_\theta}(x_{0:T}) &= \textcolor{lightgreen}{p_\theta}(x_T) \prod_{t = 1}^{T} \textcolor{lightgreen}{p_\theta}(x_{t-1} | x_t) \\
\textcolor{lightgreen}{p_\theta}(x_0) &= \int \textcolor{lightgreen}{p_\theta}(x_{0:T}) dx_{1:T}
\end{align}
$\textcolor{lightgreen}\theta$ are the parameters we train.
## Loss
We optimize the ELBO (from Jenson's inequality) on the negative log likelihood.
\begin{align}
\mathbb{E}[-\log \textcolor{lightgreen}{p_\theta}(x_0)]
&\le \mathbb{E}_q [ -\log \frac{\textcolor{lightgreen}{p_\theta}(x_{0:T})}{q(x_{1:T}|x_0)} ] \\
&=L
\end{align}
The loss can be rewritten as follows.
\begin{align}
L
&= \mathbb{E}_q [ -\log \frac{\textcolor{lightgreen}{p_\theta}(x_{0:T})}{q(x_{1:T}|x_0)} ] \\
&= \mathbb{E}_q [ -\log p(x_T) - \sum_{t=1}^T \log \frac{\textcolor{lightgreen}{p_\theta}(x_{t-1}|x_t)}{q(x_t|x_{t-1})} ] \\
&= \mathbb{E}_q [
-\log \frac{p(x_T)}{q(x_T|x_0)}
-\sum_{t=2}^T \log \frac{\textcolor{lightgreen}{p_\theta}(x_{t-1}|x_t)}{q(x_{t-1}|x_t,x_0)}
-\log \textcolor{lightgreen}{p_\theta}(x_0|x_1)] \\
&= \mathbb{E}_q [
D_{KL}(q(x_T|x_0) \Vert p(x_T))
+\sum_{t=2}^T D_{KL}(q(x_{t-1}|x_t,x_0) \Vert \textcolor{lightgreen}{p_\theta}(x_{t-1}|x_t))
-\log \textcolor{lightgreen}{p_\theta}(x_0|x_1)]
\end{align}
$D_{KL}(q(x_T|x_0) \Vert p(x_T))$ is constant since we keep $\beta_1, \dots, \beta_T$ constant.
### Computing $L_{t-1} = D_{KL}(q(x_{t-1}|x_t,x_0) \Vert \textcolor{lightgreen}{p_\theta}(x_{t-1}|x_t))$
The forward process posterior conditioned by $x_0$ is,
\begin{align}
q(x_{t-1}|x_t, x_0) &= \mathcal{N} \Big(x_{t-1}; \tilde\mu_t(x_t, x_0), \tilde\beta_t \mathbf{I} \Big) \\
\tilde\mu_t(x_t, x_0) &= \frac{\sqrt{\bar\alpha_{t-1}}\beta_t}{1 - \bar\alpha_t}x_0
+ \frac{\sqrt{\alpha_t}(1 - \bar\alpha_{t-1})}{1-\bar\alpha_t}x_t \\
\tilde\beta_t &= \frac{1 - \bar\alpha_{t-1}}{1 - \bar\alpha_t} \beta_t
\end{align}
The paper sets $\textcolor{lightgreen}{\Sigma_\theta}(x_t, t) = \sigma_t^2 \mathbf{I}$ where $\sigma_t^2$ is set to constants
$\beta_t$ or $\tilde\beta_t$.
Then,
$$\textcolor{lightgreen}{p_\theta}(x_{t-1} | x_t) = \mathcal{N}\big(x_{t-1}; \textcolor{lightgreen}{\mu_\theta}(x_t, t), \sigma_t^2 \mathbf{I} \big)$$
For given noise $\epsilon \sim \mathcal{N}(\mathbf{0}, \mathbf{I})$ using $q(x_t|x_0)$
\begin{align}
x_t(x_0, \epsilon) &= \sqrt{\bar\alpha_t} x_0 + \sqrt{1-\bar\alpha_t}\epsilon \\
x_0 &= \frac{1}{\sqrt{\bar\alpha_t}} \Big(x_t(x_0, \epsilon) - \sqrt{1-\bar\alpha_t}\epsilon\Big)
\end{align}
This gives,
\begin{align}
L_{t-1}
&= D_{KL}(q(x_{t-1}|x_t,x_0) \Vert \textcolor{lightgreen}{p_\theta}(x_{t-1}|x_t)) \\
&= \mathbb{E}_q \Bigg[ \frac{1}{2\sigma_t^2}
\Big \Vert \tilde\mu(x_t, x_0) - \textcolor{lightgreen}{\mu_\theta}(x_t, t) \Big \Vert^2 \Bigg] \\
&= \mathbb{E}_{x_0, \epsilon} \Bigg[ \frac{1}{2\sigma_t^2}
\bigg\Vert \frac{1}{\sqrt{\alpha_t}} \Big(
x_t(x_0, \epsilon) - \frac{\beta_t}{\sqrt{1 - \bar\alpha_t}} \epsilon
\Big) - \textcolor{lightgreen}{\mu_\theta}(x_t(x_0, \epsilon), t) \bigg\Vert^2 \Bigg] \\
\end{align}
Re-parameterizing with a model to predict noise
\begin{align}
\textcolor{lightgreen}{\mu_\theta}(x_t, t) &= \tilde\mu \bigg(x_t,
\frac{1}{\sqrt{\bar\alpha_t}} \Big(x_t -
\sqrt{1-\bar\alpha_t}\textcolor{lightgreen}{\epsilon_\theta}(x_t, t) \Big) \bigg) \\
&= \frac{1}{\sqrt{\alpha_t}} \Big(x_t -
\frac{\beta_t}{\sqrt{1-\bar\alpha_t}}\textcolor{lightgreen}{\epsilon_\theta}(x_t, t) \Big)
\end{align}
where $\epsilon_\theta$ is a learned function that predicts $\epsilon$ given $(x_t, t)$.
This gives,
\begin{align}
L_{t-1}
&= \mathbb{E}_{x_0, \epsilon} \Bigg[ \frac{\beta_t^2}{2\sigma_t^2 \alpha_t (1 - \bar\alpha_t)}
\Big\Vert
\epsilon - \textcolor{lightgreen}{\epsilon_\theta}(\sqrt{\bar\alpha_t} x_0 + \sqrt{1-\bar\alpha_t}\epsilon, t)
\Big\Vert^2 \Bigg]
\end{align}
That is, we are training to predict the noise.
### Simplified loss
$$L_{\text{simple}}(\theta) = \mathbb{E}_{t,x_0, \epsilon} \Bigg[ \bigg\Vert
\epsilon - \textcolor{lightgreen}{\epsilon_\theta}(\sqrt{\bar\alpha_t} x_0 + \sqrt{1-\bar\alpha_t}\epsilon, t)
\bigg\Vert^2 \Bigg]$$
This minimizes $-\log \textcolor{lightgreen}{p_\theta}(x_0|x_1)$ when $t=1$ and $L_{t-1}$ for $t\gt1$ discarding the
weighting in $L_{t-1}$. Discarding the weights $\frac{\beta_t^2}{2\sigma_t^2 \alpha_t (1 - \bar\alpha_t)}$
increase the weight given to higher $t$ (which have higher noise levels), therefore increasing the sample quality.
This file implements the loss calculation and a basic sampling method that we use to generate images during
training.
Here is the [UNet model](unet.html) that gives $\textcolor{lightgreen}{\epsilon_\theta}(x_t, t)$ and
[training code](experiment.html).
[This file](evaluate.html) can generate samples and interpolations from a trained model.
"""
from typing import Tuple, Optional
import torch
import torch.nn.functional as F
import torch.utils.data
from torch import nn
from labml_nn.diffusion.ddpm.utils import gather
class DenoiseDiffusion:
"""
## Denoise Diffusion
"""
def __init__(self, eps_model: nn.Module, n_steps: int, device: torch.device):
"""
* `eps_model` is $\textcolor{lightgreen}{\epsilon_\theta}(x_t, t)$ model
* `n_steps` is $t$
* `device` is the device to place constants on
"""
super().__init__()
self.eps_model = eps_model
# Create $\beta_1, \dots, \beta_T$ linearly increasing variance schedule
self.beta = torch.linspace(0.0001, 0.02, n_steps).to(device)
# $\alpha_t = 1 - \beta_t$
self.alpha = 1. - self.beta
# $\bar\alpha_t = \prod_{s=1}^t \alpha_s$
self.alpha_bar = torch.cumprod(self.alpha, dim=0)
# $T$
self.n_steps = n_steps
# $\sigma^2 = \beta$
self.sigma2 = self.beta
def q_xt_x0(self, x0: torch.Tensor, t: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
#### Get $q(x_t|x_0)$ distribution
\begin{align}
q(x_t|x_0) &= \mathcal{N} \Big(x_t; \sqrt{\bar\alpha_t} x_0, (1-\bar\alpha_t) \mathbf{I} \Big)
\end{align}
"""
# [gather](utils.html) $\alpha_t$ and compute $\sqrt{\bar\alpha_t} x_0$
mean = gather(self.alpha_bar, t) ** 0.5 * x0
# $(1-\bar\alpha_t) \mathbf{I}$
var = 1 - gather(self.alpha_bar, t)
#
return mean, var
def q_sample(self, x0: torch.Tensor, t: torch.Tensor, eps: Optional[torch.Tensor] = None):
"""
#### Sample from $q(x_t|x_0)$
\begin{align}
q(x_t|x_0) &= \mathcal{N} \Big(x_t; \sqrt{\bar\alpha_t} x_0, (1-\bar\alpha_t) \mathbf{I} \Big)
\end{align}
"""
# $\epsilon \sim \mathcal{N}(\mathbf{0}, \mathbf{I})$
if eps is None:
eps = torch.randn_like(x0)
# get $q(x_t|x_0)$
mean, var = self.q_xt_x0(x0, t)
# Sample from $q(x_t|x_0)$
return mean + (var ** 0.5) * eps
def p_sample(self, xt: torch.Tensor, t: torch.Tensor):
"""
#### Sample from $\textcolor{lightgreen}{p_\theta}(x_{t-1}|x_t)$
\begin{align}
\textcolor{lightgreen}{p_\theta}(x_{t-1} | x_t) &= \mathcal{N}\big(x_{t-1};
\textcolor{lightgreen}{\mu_\theta}(x_t, t), \sigma_t^2 \mathbf{I} \big) \\
\textcolor{lightgreen}{\mu_\theta}(x_t, t)
&= \frac{1}{\sqrt{\alpha_t}} \Big(x_t -
\frac{\beta_t}{\sqrt{1-\bar\alpha_t}}\textcolor{lightgreen}{\epsilon_\theta}(x_t, t) \Big)
\end{align}
"""
# $\textcolor{lightgreen}{\epsilon_\theta}(x_t, t)$
eps_theta = self.eps_model(xt, t)
# [gather](utils.html) $\bar\alpha_t$
alpha_bar = gather(self.alpha_bar, t)
# $\alpha_t$
alpha = gather(self.alpha, t)
# $\frac{\beta}{\sqrt{1-\bar\alpha_t}}$
eps_coef = (1 - alpha) / (1 - alpha_bar) ** .5
# $$\frac{1}{\sqrt{\alpha_t}} \Big(x_t -
# \frac{\beta_t}{\sqrt{1-\bar\alpha_t}}\textcolor{lightgreen}{\epsilon_\theta}(x_t, t) \Big)$$
mean = 1 / (alpha ** 0.5) * (xt - eps_coef * eps_theta)
# $\sigma^2$
var = gather(self.sigma2, t)
# $\epsilon \sim \mathcal{N}(\mathbf{0}, \mathbf{I})$
eps = torch.randn(xt.shape, device=xt.device)
# Sample
return mean + (var ** .5) * eps
def loss(self, x0: torch.Tensor, noise: Optional[torch.Tensor] = None):
"""
#### Simplified Loss
$$L_{\text{simple}}(\theta) = \mathbb{E}_{t,x_0, \epsilon} \Bigg[ \bigg\Vert
\epsilon - \textcolor{lightgreen}{\epsilon_\theta}(\sqrt{\bar\alpha_t} x_0 + \sqrt{1-\bar\alpha_t}\epsilon, t)
\bigg\Vert^2 \Bigg]$$
"""
# Get batch size
batch_size = x0.shape[0]
# Get random $t$ for each sample in the batch
t = torch.randint(0, self.n_steps, (batch_size,), device=x0.device, dtype=torch.long)
# $\epsilon \sim \mathcal{N}(\mathbf{0}, \mathbf{I})$
if noise is None:
noise = torch.randn_like(x0)
# Sample $x_t$ for $q(x_t|x_0)$
xt = self.q_sample(x0, t, eps=noise)
# Get $\textcolor{lightgreen}{\epsilon_\theta}(\sqrt{\bar\alpha_t} x_0 + \sqrt{1-\bar\alpha_t}\epsilon, t)$
eps_theta = self.eps_model(xt, t)
# MSE loss
return F.mse_loss(noise, eps_theta)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/ddpm/evaluate.py | labml_nn/diffusion/ddpm/evaluate.py | """
---
title: Denoising Diffusion Probabilistic Models (DDPM) evaluation/sampling
summary: >
Code to generate samples from a trained
Denoising Diffusion Probabilistic Model.
---
# [Denoising Diffusion Probabilistic Models (DDPM)](index.html) evaluation/sampling
This is the code to generate images and create interpolations between given images.
"""
import numpy as np
import torch
from matplotlib import pyplot as plt
from torchvision.transforms.functional import to_pil_image, resize
from labml import experiment, monit
from labml_nn.diffusion.ddpm import DenoiseDiffusion, gather
from labml_nn.diffusion.ddpm.experiment import Configs
class Sampler:
"""
## Sampler class
"""
def __init__(self, diffusion: DenoiseDiffusion, image_channels: int, image_size: int, device: torch.device):
"""
* `diffusion` is the `DenoiseDiffusion` instance
* `image_channels` is the number of channels in the image
* `image_size` is the image size
* `device` is the device of the model
"""
self.device = device
self.image_size = image_size
self.image_channels = image_channels
self.diffusion = diffusion
# $T$
self.n_steps = diffusion.n_steps
# $\textcolor{lightgreen}{\epsilon_\theta}(x_t, t)$
self.eps_model = diffusion.eps_model
# $\beta_t$
self.beta = diffusion.beta
# $\alpha_t$
self.alpha = diffusion.alpha
# $\bar\alpha_t$
self.alpha_bar = diffusion.alpha_bar
# $\bar\alpha_{t-1}$
alpha_bar_tm1 = torch.cat([self.alpha_bar.new_ones((1,)), self.alpha_bar[:-1]])
# To calculate
#
# \begin{align}
# q(x_{t-1}|x_t, x_0) &= \mathcal{N} \Big(x_{t-1}; \tilde\mu_t(x_t, x_0), \tilde\beta_t \mathbf{I} \Big) \\
# \tilde\mu_t(x_t, x_0) &= \frac{\sqrt{\bar\alpha_{t-1}}\beta_t}{1 - \bar\alpha_t}x_0
# + \frac{\sqrt{\alpha_t}(1 - \bar\alpha_{t-1})}{1-\bar\alpha_t}x_t \\
# \tilde\beta_t &= \frac{1 - \bar\alpha_{t-1}}{1 - \bar\alpha_t} \beta_t
# \end{align}
# $$\tilde\beta_t = \frac{1 - \bar\alpha_{t-1}}{1 - \bar\alpha_t} \beta_t$$
self.beta_tilde = self.beta * (1 - alpha_bar_tm1) / (1 - self.alpha_bar)
# $$\frac{\sqrt{\bar\alpha_{t-1}}\beta_t}{1 - \bar\alpha_t}$$
self.mu_tilde_coef1 = self.beta * (alpha_bar_tm1 ** 0.5) / (1 - self.alpha_bar)
# $$\frac{\sqrt{\alpha_t}(1 - \bar\alpha_{t-1}}{1-\bar\alpha_t}$$
self.mu_tilde_coef2 = (self.alpha ** 0.5) * (1 - alpha_bar_tm1) / (1 - self.alpha_bar)
# $\sigma^2 = \beta$
self.sigma2 = self.beta
def show_image(self, img, title=""):
"""Helper function to display an image"""
img = img.clip(0, 1)
img = img.cpu().numpy()
plt.imshow(img.transpose(1, 2, 0))
plt.title(title)
plt.show()
def make_video(self, frames, path="video.mp4"):
"""Helper function to create a video"""
import imageio
# 20 second video
writer = imageio.get_writer(path, fps=len(frames) // 20)
# Add each image
for f in frames:
f = f.clip(0, 1)
f = to_pil_image(resize(f, [368, 368]))
writer.append_data(np.array(f))
#
writer.close()
def sample_animation(self, n_frames: int = 1000, create_video: bool = True):
"""
#### Sample an image step-by-step using $\textcolor{lightgreen}{p_\theta}(x_{t-1}|x_t)$
We sample an image step-by-step using $\textcolor{lightgreen}{p_\theta}(x_{t-1}|x_t)$ and at each step
show the estimate
$$x_0 \approx \hat{x}_0 = \frac{1}{\sqrt{\bar\alpha}}
\Big( x_t - \sqrt{1 - \bar\alpha_t} \textcolor{lightgreen}{\epsilon_\theta}(x_t, t) \Big)$$
"""
# $x_T \sim p(x_T) = \mathcal{N}(x_T; \mathbf{0}, \mathbf{I})$
xt = torch.randn([1, self.image_channels, self.image_size, self.image_size], device=self.device)
# Interval to log $\hat{x}_0$
interval = self.n_steps // n_frames
# Frames for video
frames = []
# Sample $T$ steps
for t_inv in monit.iterate('Denoise', self.n_steps):
# $t$
t_ = self.n_steps - t_inv - 1
# $t$ in a tensor
t = xt.new_full((1,), t_, dtype=torch.long)
# $\textcolor{lightgreen}{\epsilon_\theta}(x_t, t)$
eps_theta = self.eps_model(xt, t)
if t_ % interval == 0:
# Get $\hat{x}_0$ and add to frames
x0 = self.p_x0(xt, t, eps_theta)
frames.append(x0[0])
if not create_video:
self.show_image(x0[0], f"{t_}")
# Sample from $\textcolor{lightgreen}{p_\theta}(x_{t-1}|x_t)$
xt = self.p_sample(xt, t, eps_theta)
# Make video
if create_video:
self.make_video(frames)
def interpolate(self, x1: torch.Tensor, x2: torch.Tensor, lambda_: float, t_: int = 100):
"""
#### Interpolate two images $x_0$ and $x'_0$
We get $x_t \sim q(x_t|x_0)$ and $x'_t \sim q(x'_t|x_0)$.
Then interpolate to
$$\bar{x}_t = (1 - \lambda)x_t + \lambda x'_0$$
Then get
$$\bar{x}_0 \sim \textcolor{lightgreen}{p_\theta}(x_0|\bar{x}_t)$$
* `x1` is $x_0$
* `x2` is $x'_0$
* `lambda_` is $\lambda$
* `t_` is $t$
"""
# Number of samples
n_samples = x1.shape[0]
# $t$ tensor
t = torch.full((n_samples,), t_, device=self.device)
# $$\bar{x}_t = (1 - \lambda)x_t + \lambda x'_0$$
xt = (1 - lambda_) * self.diffusion.q_sample(x1, t) + lambda_ * self.diffusion.q_sample(x2, t)
# $$\bar{x}_0 \sim \textcolor{lightgreen}{p_\theta}(x_0|\bar{x}_t)$$
return self._sample_x0(xt, t_)
def interpolate_animate(self, x1: torch.Tensor, x2: torch.Tensor, n_frames: int = 100, t_: int = 100,
create_video=True):
"""
#### Interpolate two images $x_0$ and $x'_0$ and make a video
* `x1` is $x_0$
* `x2` is $x'_0$
* `n_frames` is the number of frames for the image
* `t_` is $t$
* `create_video` specifies whether to make a video or to show each frame
"""
# Show original images
self.show_image(x1, "x1")
self.show_image(x2, "x2")
# Add batch dimension
x1 = x1[None, :, :, :]
x2 = x2[None, :, :, :]
# $t$ tensor
t = torch.full((1,), t_, device=self.device)
# $x_t \sim q(x_t|x_0)$
x1t = self.diffusion.q_sample(x1, t)
# $x'_t \sim q(x'_t|x_0)$
x2t = self.diffusion.q_sample(x2, t)
frames = []
# Get frames with different $\lambda$
for i in monit.iterate('Interpolate', n_frames + 1, is_children_silent=True):
# $\lambda$
lambda_ = i / n_frames
# $$\bar{x}_t = (1 - \lambda)x_t + \lambda x'_0$$
xt = (1 - lambda_) * x1t + lambda_ * x2t
# $$\bar{x}_0 \sim \textcolor{lightgreen}{p_\theta}(x_0|\bar{x}_t)$$
x0 = self._sample_x0(xt, t_)
# Add to frames
frames.append(x0[0])
# Show frame
if not create_video:
self.show_image(x0[0], f"{lambda_ :.2f}")
# Make video
if create_video:
self.make_video(frames)
def _sample_x0(self, xt: torch.Tensor, n_steps: int):
"""
#### Sample an image using $\textcolor{lightgreen}{p_\theta}(x_{t-1}|x_t)$
* `xt` is $x_t$
* `n_steps` is $t$
"""
# Number of sampels
n_samples = xt.shape[0]
# Iterate until $t$ steps
for t_ in monit.iterate('Denoise', n_steps):
t = n_steps - t_ - 1
# Sample from $\textcolor{lightgreen}{p_\theta}(x_{t-1}|x_t)$
xt = self.diffusion.p_sample(xt, xt.new_full((n_samples,), t, dtype=torch.long))
# Return $x_0$
return xt
def sample(self, n_samples: int = 16):
"""
#### Generate images
"""
# $x_T \sim p(x_T) = \mathcal{N}(x_T; \mathbf{0}, \mathbf{I})$
xt = torch.randn([n_samples, self.image_channels, self.image_size, self.image_size], device=self.device)
# $$x_0 \sim \textcolor{lightgreen}{p_\theta}(x_0|x_t)$$
x0 = self._sample_x0(xt, self.n_steps)
# Show images
for i in range(n_samples):
self.show_image(x0[i])
def p_sample(self, xt: torch.Tensor, t: torch.Tensor, eps_theta: torch.Tensor):
"""
#### Sample from $\textcolor{lightgreen}{p_\theta}(x_{t-1}|x_t)$
\begin{align}
\textcolor{lightgreen}{p_\theta}(x_{t-1} | x_t) &= \mathcal{N}\big(x_{t-1};
\textcolor{lightgreen}{\mu_\theta}(x_t, t), \sigma_t^2 \mathbf{I} \big) \\
\textcolor{lightgreen}{\mu_\theta}(x_t, t)
&= \frac{1}{\sqrt{\alpha_t}} \Big(x_t -
\frac{\beta_t}{\sqrt{1-\bar\alpha_t}}\textcolor{lightgreen}{\epsilon_\theta}(x_t, t) \Big)
\end{align}
"""
# [gather](utils.html) $\bar\alpha_t$
alpha_bar = gather(self.alpha_bar, t)
# $\alpha_t$
alpha = gather(self.alpha, t)
# $\frac{\beta}{\sqrt{1-\bar\alpha_t}}$
eps_coef = (1 - alpha) / (1 - alpha_bar) ** .5
# $$\frac{1}{\sqrt{\alpha_t}} \Big(x_t -
# \frac{\beta_t}{\sqrt{1-\bar\alpha_t}}\textcolor{lightgreen}{\epsilon_\theta}(x_t, t) \Big)$$
mean = 1 / (alpha ** 0.5) * (xt - eps_coef * eps_theta)
# $\sigma^2$
var = gather(self.sigma2, t)
# $\epsilon \sim \mathcal{N}(\mathbf{0}, \mathbf{I})$
eps = torch.randn(xt.shape, device=xt.device)
# Sample
return mean + (var ** .5) * eps
def p_x0(self, xt: torch.Tensor, t: torch.Tensor, eps: torch.Tensor):
"""
#### Estimate $x_0$
$$x_0 \approx \hat{x}_0 = \frac{1}{\sqrt{\bar\alpha}}
\Big( x_t - \sqrt{1 - \bar\alpha_t} \textcolor{lightgreen}{\epsilon_\theta}(x_t, t) \Big)$$
"""
# [gather](utils.html) $\bar\alpha_t$
alpha_bar = gather(self.alpha_bar, t)
# $$x_0 \approx \hat{x}_0 = \frac{1}{\sqrt{\bar\alpha}}
# \Big( x_t - \sqrt{1 - \bar\alpha_t} \textcolor{lightgreen}{\epsilon_\theta}(x_t, t) \Big)$$
return (xt - (1 - alpha_bar) ** 0.5 * eps) / (alpha_bar ** 0.5)
def main():
"""Generate samples"""
# Training experiment run UUID
run_uuid = "a44333ea251411ec8007d1a1762ed686"
# Start an evaluation
experiment.evaluate()
# Create configs
configs = Configs()
# Load custom configuration of the training run
configs_dict = experiment.load_configs(run_uuid)
# Set configurations
experiment.configs(configs, configs_dict)
# Initialize
configs.init()
# Set PyTorch modules for saving and loading
experiment.add_pytorch_models({'eps_model': configs.eps_model})
# Load training experiment
experiment.load(run_uuid)
# Create sampler
sampler = Sampler(diffusion=configs.diffusion,
image_channels=configs.image_channels,
image_size=configs.image_size,
device=configs.device)
# Start evaluation
with experiment.start():
# No gradients
with torch.no_grad():
# Sample an image with an denoising animation
sampler.sample_animation()
if False:
# Get some images fro data
data = next(iter(configs.data_loader)).to(configs.device)
# Create an interpolation animation
sampler.interpolate_animate(data[0], data[1])
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/stable_diffusion/latent_diffusion.py | labml_nn/diffusion/stable_diffusion/latent_diffusion.py | """
---
title: Latent Diffusion Models
summary: >
Annotated PyTorch implementation/tutorial of latent diffusion models from paper
High-Resolution Image Synthesis with Latent Diffusion Models
---
# Latent Diffusion Models
Latent diffusion models use an auto-encoder to map between image space and
latent space. The diffusion model works on the latent space, which makes it
a lot easier to train.
It is based on paper
[High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752).
They use a pre-trained auto-encoder and train the diffusion U-Net on the latent
space of the pre-trained auto-encoder.
For a simpler diffusion implementation refer to our [DDPM implementation](../ddpm/index.html).
We use same notations for $\alpha_t$, $\beta_t$ schedules, etc.
"""
from typing import List
import torch
import torch.nn as nn
from labml_nn.diffusion.stable_diffusion.model.autoencoder import Autoencoder
from labml_nn.diffusion.stable_diffusion.model.clip_embedder import CLIPTextEmbedder
from labml_nn.diffusion.stable_diffusion.model.unet import UNetModel
class DiffusionWrapper(nn.Module):
"""
*This is an empty wrapper class around the [U-Net](model/unet.html).
We keep this to have the same model structure as
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion)
so that we do not have to map the checkpoint weights explicitly*.
"""
def __init__(self, diffusion_model: UNetModel):
super().__init__()
self.diffusion_model = diffusion_model
def forward(self, x: torch.Tensor, time_steps: torch.Tensor, context: torch.Tensor):
return self.diffusion_model(x, time_steps, context)
class LatentDiffusion(nn.Module):
"""
## Latent diffusion model
This contains following components:
* [AutoEncoder](model/autoencoder.html)
* [U-Net](model/unet.html) with [attention](model/unet_attention.html)
* [CLIP embeddings generator](model/clip_embedder.html)
"""
model: DiffusionWrapper
first_stage_model: Autoencoder
cond_stage_model: CLIPTextEmbedder
def __init__(self,
unet_model: UNetModel,
autoencoder: Autoencoder,
clip_embedder: CLIPTextEmbedder,
latent_scaling_factor: float,
n_steps: int,
linear_start: float,
linear_end: float,
):
"""
:param unet_model: is the [U-Net](model/unet.html) that predicts noise
$\epsilon_\text{cond}(x_t, c)$, in latent space
:param autoencoder: is the [AutoEncoder](model/autoencoder.html)
:param clip_embedder: is the [CLIP embeddings generator](model/clip_embedder.html)
:param latent_scaling_factor: is the scaling factor for the latent space. The encodings of
the autoencoder are scaled by this before feeding into the U-Net.
:param n_steps: is the number of diffusion steps $T$.
:param linear_start: is the start of the $\beta$ schedule.
:param linear_end: is the end of the $\beta$ schedule.
"""
super().__init__()
# Wrap the [U-Net](model/unet.html) to keep the same model structure as
# [CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion).
self.model = DiffusionWrapper(unet_model)
# Auto-encoder and scaling factor
self.first_stage_model = autoencoder
self.latent_scaling_factor = latent_scaling_factor
# [CLIP embeddings generator](model/clip_embedder.html)
self.cond_stage_model = clip_embedder
# Number of steps $T$
self.n_steps = n_steps
# $\beta$ schedule
beta = torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_steps, dtype=torch.float64) ** 2
self.beta = nn.Parameter(beta.to(torch.float32), requires_grad=False)
# $\alpha_t = 1 - \beta_t$
alpha = 1. - beta
# $\bar\alpha_t = \prod_{s=1}^t \alpha_s$
alpha_bar = torch.cumprod(alpha, dim=0)
self.alpha_bar = nn.Parameter(alpha_bar.to(torch.float32), requires_grad=False)
@property
def device(self):
"""
### Get model device
"""
return next(iter(self.model.parameters())).device
def get_text_conditioning(self, prompts: List[str]):
"""
### Get [CLIP embeddings](model/clip_embedder.html) for a list of text prompts
"""
return self.cond_stage_model(prompts)
def autoencoder_encode(self, image: torch.Tensor):
"""
### Get scaled latent space representation of the image
The encoder output is a distribution.
We sample from that and multiply by the scaling factor.
"""
return self.latent_scaling_factor * self.first_stage_model.encode(image).sample()
def autoencoder_decode(self, z: torch.Tensor):
"""
### Get image from the latent representation
We scale down by the scaling factor and then decode.
"""
return self.first_stage_model.decode(z / self.latent_scaling_factor)
def forward(self, x: torch.Tensor, t: torch.Tensor, context: torch.Tensor):
"""
### Predict noise
Predict noise given the latent representation $x_t$, time step $t$, and the
conditioning context $c$.
$$\epsilon_\text{cond}(x_t, c)$$
"""
return self.model(x, t, context)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/stable_diffusion/util.py | labml_nn/diffusion/stable_diffusion/util.py | """
---
title: Utility functions for stable diffusion
summary: >
Utility functions for stable diffusion
---
# Utility functions for [stable diffusion](index.html)
"""
import os
import random
from pathlib import Path
import PIL
import numpy as np
import torch
from PIL import Image
from labml import monit
from labml.logger import inspect
from labml_nn.diffusion.stable_diffusion.latent_diffusion import LatentDiffusion
from labml_nn.diffusion.stable_diffusion.model.autoencoder import Encoder, Decoder, Autoencoder
from labml_nn.diffusion.stable_diffusion.model.clip_embedder import CLIPTextEmbedder
from labml_nn.diffusion.stable_diffusion.model.unet import UNetModel
def set_seed(seed: int):
"""
### Set random seeds
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def load_model(path: Path = None) -> LatentDiffusion:
"""
### Load [`LatentDiffusion` model](latent_diffusion.html)
"""
# Initialize the autoencoder
with monit.section('Initialize autoencoder'):
encoder = Encoder(z_channels=4,
in_channels=3,
channels=128,
channel_multipliers=[1, 2, 4, 4],
n_resnet_blocks=2)
decoder = Decoder(out_channels=3,
z_channels=4,
channels=128,
channel_multipliers=[1, 2, 4, 4],
n_resnet_blocks=2)
autoencoder = Autoencoder(emb_channels=4,
encoder=encoder,
decoder=decoder,
z_channels=4)
# Initialize the CLIP text embedder
with monit.section('Initialize CLIP Embedder'):
clip_text_embedder = CLIPTextEmbedder()
# Initialize the U-Net
with monit.section('Initialize U-Net'):
unet_model = UNetModel(in_channels=4,
out_channels=4,
channels=320,
attention_levels=[0, 1, 2],
n_res_blocks=2,
channel_multipliers=[1, 2, 4, 4],
n_heads=8,
tf_layers=1,
d_cond=768)
# Initialize the Latent Diffusion model
with monit.section('Initialize Latent Diffusion model'):
model = LatentDiffusion(linear_start=0.00085,
linear_end=0.0120,
n_steps=1000,
latent_scaling_factor=0.18215,
autoencoder=autoencoder,
clip_embedder=clip_text_embedder,
unet_model=unet_model)
# Load the checkpoint
with monit.section(f"Loading model from {path}"):
checkpoint = torch.load(path, map_location="cpu")
# Set model state
with monit.section('Load state'):
missing_keys, extra_keys = model.load_state_dict(checkpoint["state_dict"], strict=False)
# Debugging output
inspect(global_step=checkpoint.get('global_step', -1), missing_keys=missing_keys, extra_keys=extra_keys,
_expand=True)
#
model.eval()
return model
def load_img(path: str):
"""
### Load an image
This loads an image from a file and returns a PyTorch tensor.
:param path: is the path of the image
"""
# Open Image
image = Image.open(path).convert("RGB")
# Get image size
w, h = image.size
# Resize to a multiple of 32
w = w - w % 32
h = h - h % 32
image = image.resize((w, h), resample=PIL.Image.LANCZOS)
# Convert to numpy and map to `[-1, 1]` for `[0, 255]`
image = np.array(image).astype(np.float32) * (2. / 255.0) - 1
# Transpose to shape `[batch_size, channels, height, width]`
image = image[None].transpose(0, 3, 1, 2)
# Convert to torch
return torch.from_numpy(image)
def save_images(images: torch.Tensor, dest_path: str, prefix: str = '', img_format: str = 'jpeg'):
"""
### Save a images
:param images: is the tensor with images of shape `[batch_size, channels, height, width]`
:param dest_path: is the folder to save images in
:param prefix: is the prefix to add to file names
:param img_format: is the image format
"""
# Create the destination folder
os.makedirs(dest_path, exist_ok=True)
# Map images to `[0, 1]` space and clip
images = torch.clamp((images + 1.0) / 2.0, min=0.0, max=1.0)
# Transpose to `[batch_size, height, width, channels]` and convert to numpy
images = images.cpu().permute(0, 2, 3, 1).numpy()
# Save images
for i, img in enumerate(images):
img = Image.fromarray((255. * img).astype(np.uint8))
img.save(os.path.join(dest_path, f"{prefix}{i:05}.{img_format}"), format=img_format)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/stable_diffusion/__init__.py | labml_nn/diffusion/stable_diffusion/__init__.py | """
---
title: Stable Diffusion
summary: >
Annotated PyTorch implementation/tutorial of stable diffusion.
---
# Stable Diffusion
This is based on official stable diffusion repository
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion).
We have kept the model structure same so that open sourced weights could be directly loaded.
Our implementation does not contain training code.
### [PromptArt](https://promptart.labml.ai)

We have deployed a stable diffusion based image generation service
at [promptart.labml.ai](https://promptart.labml.ai)
### [Latent Diffusion Model](latent_diffusion.html)
The core is the [Latent Diffusion Model](latent_diffusion.html).
It consists of:
* [AutoEncoder](model/autoencoder.html)
* [U-Net](model/unet.html) with [attention](model/unet_attention.html)
We have also (optionally) integrated [Flash Attention](https://github.com/HazyResearch/flash-attention)
into our [U-Net attention](model/unet_attention.html) which lets you speed up
the performance by close to 50% on an RTX A6000 GPU.
The diffusion is conditioned based on [CLIP embeddings](model/clip_embedder.html).
### [Sampling Algorithms](sampler/index.html)
We have implemented the following [sampling algorithms](sampler/index.html):
* [Denoising Diffusion Probabilistic Models (DDPM) Sampling](sampler/ddpm.html)
* [Denoising Diffusion Implicit Models (DDIM) Sampling](sampler/ddim.html)
### [Example Scripts](scripts/index.html)
Here are the image generation scripts:
* [Generate images from text prompts](scripts/text_to_image.html)
* [Generate images based on a given image, guided by a prompt](scripts/image_to_image.html)
* [Modify parts of a given image based on a text prompt](scripts/in_paint.html)
#### [Utilities](util.html)
[`util.py`](util.html) defines the utility functions.
"""
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/stable_diffusion/scripts/in_paint.py | labml_nn/diffusion/stable_diffusion/scripts/in_paint.py | """
---
title: In-paint images using stable diffusion with a prompt
summary: >
In-paint images using stable diffusion with a prompt
---
# In-paint images using [stable diffusion](../index.html) with a prompt
"""
import argparse
from pathlib import Path
from typing import Optional
import torch
from labml import lab, monit
from labml_nn.diffusion.stable_diffusion.latent_diffusion import LatentDiffusion
from labml_nn.diffusion.stable_diffusion.sampler import DiffusionSampler
from labml_nn.diffusion.stable_diffusion.sampler.ddim import DDIMSampler
from labml_nn.diffusion.stable_diffusion.util import load_model, save_images, load_img, set_seed
class InPaint:
"""
### Image in-painting class
"""
model: LatentDiffusion
sampler: DiffusionSampler
def __init__(self, *, checkpoint_path: Path,
ddim_steps: int = 50,
ddim_eta: float = 0.0):
"""
:param checkpoint_path: is the path of the checkpoint
:param ddim_steps: is the number of sampling steps
:param ddim_eta: is the [DDIM sampling](../sampler/ddim.html) $\eta$ constant
"""
self.ddim_steps = ddim_steps
# Load [latent diffusion model](../latent_diffusion.html)
self.model = load_model(checkpoint_path)
# Get device
self.device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
# Move the model to device
self.model.to(self.device)
# Initialize [DDIM sampler](../sampler/ddim.html)
self.sampler = DDIMSampler(self.model,
n_steps=ddim_steps,
ddim_eta=ddim_eta)
@torch.no_grad()
def __call__(self, *,
dest_path: str,
orig_img: str,
strength: float,
batch_size: int = 3,
prompt: str,
uncond_scale: float = 5.0,
mask: Optional[torch.Tensor] = None,
):
"""
:param dest_path: is the path to store the generated images
:param orig_img: is the image to transform
:param strength: specifies how much of the original image should not be preserved
:param batch_size: is the number of images to generate in a batch
:param prompt: is the prompt to generate images with
:param uncond_scale: is the unconditional guidance scale $s$. This is used for
$\epsilon_\theta(x_t, c) = s\epsilon_\text{cond}(x_t, c) + (s - 1)\epsilon_\text{cond}(x_t, c_u)$
"""
# Make a batch of prompts
prompts = batch_size * [prompt]
# Load image
orig_image = load_img(orig_img).to(self.device)
# Encode the image in the latent space and make `batch_size` copies of it
orig = self.model.autoencoder_encode(orig_image).repeat(batch_size, 1, 1, 1)
# If `mask` is not provided,
# we set a sample mask to preserve the bottom half of the image
if mask is None:
mask = torch.zeros_like(orig, device=self.device)
mask[:, :, mask.shape[2] // 2:, :] = 1.
else:
mask = mask.to(self.device)
# Noise diffuse the original image
orig_noise = torch.randn(orig.shape, device=self.device)
# Get the number of steps to diffuse the original
assert 0. <= strength <= 1., 'can only work with strength in [0.0, 1.0]'
t_index = int(strength * self.ddim_steps)
# AMP auto casting
with torch.cuda.amp.autocast():
# In unconditional scaling is not $1$ get the embeddings for empty prompts (no conditioning).
if uncond_scale != 1.0:
un_cond = self.model.get_text_conditioning(batch_size * [""])
else:
un_cond = None
# Get the prompt embeddings
cond = self.model.get_text_conditioning(prompts)
# Add noise to the original image
x = self.sampler.q_sample(orig, t_index, noise=orig_noise)
# Reconstruct from the noisy image, while preserving the masked area
x = self.sampler.paint(x, cond, t_index,
orig=orig,
mask=mask,
orig_noise=orig_noise,
uncond_scale=uncond_scale,
uncond_cond=un_cond)
# Decode the image from the [autoencoder](../model/autoencoder.html)
images = self.model.autoencoder_decode(x)
# Save images
save_images(images, dest_path, 'paint_')
def main():
"""
### CLI
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--prompt",
type=str,
nargs="?",
default="a painting of a cute monkey playing guitar",
help="the prompt to render"
)
parser.add_argument(
"--orig-img",
type=str,
nargs="?",
help="path to the input image"
)
parser.add_argument("--batch_size", type=int, default=4, help="batch size", )
parser.add_argument("--steps", type=int, default=50, help="number of sampling steps")
parser.add_argument("--scale", type=float, default=5.0,
help="unconditional guidance scale: "
"eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))")
parser.add_argument("--strength", type=float, default=0.75,
help="strength for noise: "
" 1.0 corresponds to full destruction of information in init image")
opt = parser.parse_args()
set_seed(42)
in_paint = InPaint(checkpoint_path=lab.get_data_path() / 'stable-diffusion' / 'sd-v1-4.ckpt',
ddim_steps=opt.steps)
with monit.section('Generate'):
in_paint(dest_path='outputs',
orig_img=opt.orig_img,
strength=opt.strength,
batch_size=opt.batch_size,
prompt=opt.prompt,
uncond_scale=opt.scale)
#
if __name__ == "__main__":
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/stable_diffusion/scripts/text_to_image.py | labml_nn/diffusion/stable_diffusion/scripts/text_to_image.py | """
---
title: Generate images using stable diffusion with a prompt
summary: >
Generate images using stable diffusion with a prompt
---
# Generate images using [stable diffusion](../index.html) with a prompt
"""
import argparse
import os
from pathlib import Path
import torch
from labml import lab, monit
from labml_nn.diffusion.stable_diffusion.latent_diffusion import LatentDiffusion
from labml_nn.diffusion.stable_diffusion.sampler.ddim import DDIMSampler
from labml_nn.diffusion.stable_diffusion.sampler.ddpm import DDPMSampler
from labml_nn.diffusion.stable_diffusion.util import load_model, save_images, set_seed
class Txt2Img:
"""
### Text to image class
"""
model: LatentDiffusion
def __init__(self, *,
checkpoint_path: Path,
sampler_name: str,
n_steps: int = 50,
ddim_eta: float = 0.0,
):
"""
:param checkpoint_path: is the path of the checkpoint
:param sampler_name: is the name of the [sampler](../sampler/index.html)
:param n_steps: is the number of sampling steps
:param ddim_eta: is the [DDIM sampling](../sampler/ddim.html) $\eta$ constant
"""
# Load [latent diffusion model](../latent_diffusion.html)
self.model = load_model(checkpoint_path)
# Get device
self.device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
# Move the model to device
self.model.to(self.device)
# Initialize [sampler](../sampler/index.html)
if sampler_name == 'ddim':
self.sampler = DDIMSampler(self.model,
n_steps=n_steps,
ddim_eta=ddim_eta)
elif sampler_name == 'ddpm':
self.sampler = DDPMSampler(self.model)
@torch.no_grad()
def __call__(self, *,
dest_path: str,
batch_size: int = 3,
prompt: str,
h: int = 512, w: int = 512,
uncond_scale: float = 7.5,
):
"""
:param dest_path: is the path to store the generated images
:param batch_size: is the number of images to generate in a batch
:param prompt: is the prompt to generate images with
:param h: is the height of the image
:param w: is the width of the image
:param uncond_scale: is the unconditional guidance scale $s$. This is used for
$\epsilon_\theta(x_t, c) = s\epsilon_\text{cond}(x_t, c) + (s - 1)\epsilon_\text{cond}(x_t, c_u)$
"""
# Number of channels in the image
c = 4
# Image to latent space resolution reduction
f = 8
# Make a batch of prompts
prompts = batch_size * [prompt]
# AMP auto casting
with torch.cuda.amp.autocast():
# In unconditional scaling is not $1$ get the embeddings for empty prompts (no conditioning).
if uncond_scale != 1.0:
un_cond = self.model.get_text_conditioning(batch_size * [""])
else:
un_cond = None
# Get the prompt embeddings
cond = self.model.get_text_conditioning(prompts)
# [Sample in the latent space](../sampler/index.html).
# `x` will be of shape `[batch_size, c, h / f, w / f]`
x = self.sampler.sample(cond=cond,
shape=[batch_size, c, h // f, w // f],
uncond_scale=uncond_scale,
uncond_cond=un_cond)
# Decode the image from the [autoencoder](../model/autoencoder.html)
images = self.model.autoencoder_decode(x)
# Save images
save_images(images, dest_path, 'txt_')
def main():
"""
### CLI
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--prompt",
type=str,
nargs="?",
default="a painting of a virus monster playing guitar",
help="the prompt to render"
)
parser.add_argument("--batch_size", type=int, default=4, help="batch size")
parser.add_argument(
'--sampler',
dest='sampler_name',
choices=['ddim', 'ddpm'],
default='ddim',
help=f'Set the sampler.',
)
parser.add_argument("--flash", action='store_true', help="whether to use flash attention")
parser.add_argument("--steps", type=int, default=50, help="number of sampling steps")
parser.add_argument("--scale", type=float, default=7.5,
help="unconditional guidance scale: "
"eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))")
opt = parser.parse_args()
set_seed(42)
# Set flash attention
from labml_nn.diffusion.stable_diffusion.model.unet_attention import CrossAttention
CrossAttention.use_flash_attention = opt.flash
#
txt2img = Txt2Img(checkpoint_path=lab.get_data_path() / 'stable-diffusion' / 'sd-v1-4.ckpt',
sampler_name=opt.sampler_name,
n_steps=opt.steps)
with monit.section('Generate'):
txt2img(dest_path='outputs',
batch_size=opt.batch_size,
prompt=opt.prompt,
uncond_scale=opt.scale)
#
if __name__ == "__main__":
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/stable_diffusion/scripts/image_to_image.py | labml_nn/diffusion/stable_diffusion/scripts/image_to_image.py | """
---
title: Generate images using stable diffusion with a prompt from a given image
summary: >
Generate images using stable diffusion with a prompt from a given image
---
# Generate images using [stable diffusion](../index.html) with a prompt from a given image
"""
import argparse
from pathlib import Path
import torch
from labml import lab, monit
from labml_nn.diffusion.stable_diffusion.sampler.ddim import DDIMSampler
from labml_nn.diffusion.stable_diffusion.util import load_model, load_img, save_images, set_seed
class Img2Img:
"""
### Image to image class
"""
def __init__(self, *, checkpoint_path: Path,
ddim_steps: int = 50,
ddim_eta: float = 0.0):
"""
:param checkpoint_path: is the path of the checkpoint
:param ddim_steps: is the number of sampling steps
:param ddim_eta: is the [DDIM sampling](../sampler/ddim.html) $\eta$ constant
"""
self.ddim_steps = ddim_steps
# Load [latent diffusion model](../latent_diffusion.html)
self.model = load_model(checkpoint_path)
# Get device
self.device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
# Move the model to device
self.model.to(self.device)
# Initialize [DDIM sampler](../sampler/ddim.html)
self.sampler = DDIMSampler(self.model,
n_steps=ddim_steps,
ddim_eta=ddim_eta)
@torch.no_grad()
def __call__(self, *,
dest_path: str,
orig_img: str,
strength: float,
batch_size: int = 3,
prompt: str,
uncond_scale: float = 5.0,
):
"""
:param dest_path: is the path to store the generated images
:param orig_img: is the image to transform
:param strength: specifies how much of the original image should not be preserved
:param batch_size: is the number of images to generate in a batch
:param prompt: is the prompt to generate images with
:param uncond_scale: is the unconditional guidance scale $s$. This is used for
$\epsilon_\theta(x_t, c) = s\epsilon_\text{cond}(x_t, c) + (s - 1)\epsilon_\text{cond}(x_t, c_u)$
"""
# Make a batch of prompts
prompts = batch_size * [prompt]
# Load image
orig_image = load_img(orig_img).to(self.device)
# Encode the image in the latent space and make `batch_size` copies of it
orig = self.model.autoencoder_encode(orig_image).repeat(batch_size, 1, 1, 1)
# Get the number of steps to diffuse the original
assert 0. <= strength <= 1., 'can only work with strength in [0.0, 1.0]'
t_index = int(strength * self.ddim_steps)
# AMP auto casting
with torch.cuda.amp.autocast():
# In unconditional scaling is not $1$ get the embeddings for empty prompts (no conditioning).
if uncond_scale != 1.0:
un_cond = self.model.get_text_conditioning(batch_size * [""])
else:
un_cond = None
# Get the prompt embeddings
cond = self.model.get_text_conditioning(prompts)
# Add noise to the original image
x = self.sampler.q_sample(orig, t_index)
# Reconstruct from the noisy image
x = self.sampler.paint(x, cond, t_index,
uncond_scale=uncond_scale,
uncond_cond=un_cond)
# Decode the image from the [autoencoder](../model/autoencoder.html)
images = self.model.autoencoder_decode(x)
# Save images
save_images(images, dest_path, 'img_')
def main():
"""
### CLI
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--prompt",
type=str,
nargs="?",
default="a painting of a cute monkey playing guitar",
help="the prompt to render"
)
parser.add_argument(
"--orig-img",
type=str,
nargs="?",
help="path to the input image"
)
parser.add_argument("--batch_size", type=int, default=4, help="batch size", )
parser.add_argument("--steps", type=int, default=50, help="number of ddim sampling steps")
parser.add_argument("--scale", type=float, default=5.0,
help="unconditional guidance scale: "
"eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))")
parser.add_argument("--strength", type=float, default=0.75,
help="strength for noise: "
" 1.0 corresponds to full destruction of information in init image")
opt = parser.parse_args()
set_seed(42)
img2img = Img2Img(checkpoint_path=lab.get_data_path() / 'stable-diffusion' / 'sd-v1-4.ckpt',
ddim_steps=opt.steps)
with monit.section('Generate'):
img2img(
dest_path='outputs',
orig_img=opt.orig_img,
strength=opt.strength,
batch_size=opt.batch_size,
prompt=opt.prompt,
uncond_scale=opt.scale)
#
if __name__ == "__main__":
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/stable_diffusion/scripts/__init__.py | labml_nn/diffusion/stable_diffusion/scripts/__init__.py | """
---
title: Scripts to show example usages stable diffusion
summary: >
Annotated PyTorch implementation/tutorial of example usages of stable diffusion
---
# Scripts to show example usages [stable diffusion](../index.html)
* [Prompt to image diffusion](text_to_image.html)
* [Image to image diffusion](image_to_image.html)
* [In-painting](in_paint.html)
"""
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/stable_diffusion/model/unet.py | labml_nn/diffusion/stable_diffusion/model/unet.py | """
---
title: U-Net for Stable Diffusion
summary: >
Annotated PyTorch implementation/tutorial of the U-Net in stable diffusion.
---
# U-Net for [Stable Diffusion](../index.html)
This implements the U-Net that
gives $\epsilon_\text{cond}(x_t, c)$
We have kept to the model definition and naming unchanged from
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion)
so that we can load the checkpoints directly.
"""
import math
from typing import List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from labml_nn.diffusion.stable_diffusion.model.unet_attention import SpatialTransformer
class UNetModel(nn.Module):
"""
## U-Net model
"""
def __init__(
self, *,
in_channels: int,
out_channels: int,
channels: int,
n_res_blocks: int,
attention_levels: List[int],
channel_multipliers: List[int],
n_heads: int,
tf_layers: int = 1,
d_cond: int = 768):
"""
:param in_channels: is the number of channels in the input feature map
:param out_channels: is the number of channels in the output feature map
:param channels: is the base channel count for the model
:param n_res_blocks: number of residual blocks at each level
:param attention_levels: are the levels at which attention should be performed
:param channel_multipliers: are the multiplicative factors for number of channels for each level
:param n_heads: is the number of attention heads in the transformers
:param tf_layers: is the number of transformer layers in the transformers
:param d_cond: is the size of the conditional embedding in the transformers
"""
super().__init__()
self.channels = channels
# Number of levels
levels = len(channel_multipliers)
# Size time embeddings
d_time_emb = channels * 4
self.time_embed = nn.Sequential(
nn.Linear(channels, d_time_emb),
nn.SiLU(),
nn.Linear(d_time_emb, d_time_emb),
)
# Input half of the U-Net
self.input_blocks = nn.ModuleList()
# Initial $3 \times 3$ convolution that maps the input to `channels`.
# The blocks are wrapped in `TimestepEmbedSequential` module because
# different modules have different forward function signatures;
# for example, convolution only accepts the feature map and
# residual blocks accept the feature map and time embedding.
# `TimestepEmbedSequential` calls them accordingly.
self.input_blocks.append(TimestepEmbedSequential(
nn.Conv2d(in_channels, channels, 3, padding=1)))
# Number of channels at each block in the input half of U-Net
input_block_channels = [channels]
# Number of channels at each level
channels_list = [channels * m for m in channel_multipliers]
# Prepare levels
for i in range(levels):
# Add the residual blocks and attentions
for _ in range(n_res_blocks):
# Residual block maps from previous number of channels to the number of
# channels in the current level
layers = [ResBlock(channels, d_time_emb, out_channels=channels_list[i])]
channels = channels_list[i]
# Add transformer
if i in attention_levels:
layers.append(SpatialTransformer(channels, n_heads, tf_layers, d_cond))
# Add them to the input half of the U-Net and keep track of the number of channels of
# its output
self.input_blocks.append(TimestepEmbedSequential(*layers))
input_block_channels.append(channels)
# Down sample at all levels except last
if i != levels - 1:
self.input_blocks.append(TimestepEmbedSequential(DownSample(channels)))
input_block_channels.append(channels)
# The middle of the U-Net
self.middle_block = TimestepEmbedSequential(
ResBlock(channels, d_time_emb),
SpatialTransformer(channels, n_heads, tf_layers, d_cond),
ResBlock(channels, d_time_emb),
)
# Second half of the U-Net
self.output_blocks = nn.ModuleList([])
# Prepare levels in reverse order
for i in reversed(range(levels)):
# Add the residual blocks and attentions
for j in range(n_res_blocks + 1):
# Residual block maps from previous number of channels plus the
# skip connections from the input half of U-Net to the number of
# channels in the current level.
layers = [ResBlock(channels + input_block_channels.pop(), d_time_emb, out_channels=channels_list[i])]
channels = channels_list[i]
# Add transformer
if i in attention_levels:
layers.append(SpatialTransformer(channels, n_heads, tf_layers, d_cond))
# Up-sample at every level after last residual block
# except the last one.
# Note that we are iterating in reverse; i.e. `i == 0` is the last.
if i != 0 and j == n_res_blocks:
layers.append(UpSample(channels))
# Add to the output half of the U-Net
self.output_blocks.append(TimestepEmbedSequential(*layers))
# Final normalization and $3 \times 3$ convolution
self.out = nn.Sequential(
normalization(channels),
nn.SiLU(),
nn.Conv2d(channels, out_channels, 3, padding=1),
)
def time_step_embedding(self, time_steps: torch.Tensor, max_period: int = 10000):
"""
## Create sinusoidal time step embeddings
:param time_steps: are the time steps of shape `[batch_size]`
:param max_period: controls the minimum frequency of the embeddings.
"""
# $\frac{c}{2}$; half the channels are sin and the other half is cos,
half = self.channels // 2
# $\frac{1}{10000^{\frac{2i}{c}}}$
frequencies = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
).to(device=time_steps.device)
# $\frac{t}{10000^{\frac{2i}{c}}}$
args = time_steps[:, None].float() * frequencies[None]
# $\cos\Bigg(\frac{t}{10000^{\frac{2i}{c}}}\Bigg)$ and $\sin\Bigg(\frac{t}{10000^{\frac{2i}{c}}}\Bigg)$
return torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
def forward(self, x: torch.Tensor, time_steps: torch.Tensor, cond: torch.Tensor):
"""
:param x: is the input feature map of shape `[batch_size, channels, width, height]`
:param time_steps: are the time steps of shape `[batch_size]`
:param cond: conditioning of shape `[batch_size, n_cond, d_cond]`
"""
# To store the input half outputs for skip connections
x_input_block = []
# Get time step embeddings
t_emb = self.time_step_embedding(time_steps)
t_emb = self.time_embed(t_emb)
# Input half of the U-Net
for module in self.input_blocks:
x = module(x, t_emb, cond)
x_input_block.append(x)
# Middle of the U-Net
x = self.middle_block(x, t_emb, cond)
# Output half of the U-Net
for module in self.output_blocks:
x = torch.cat([x, x_input_block.pop()], dim=1)
x = module(x, t_emb, cond)
# Final normalization and $3 \times 3$ convolution
return self.out(x)
class TimestepEmbedSequential(nn.Sequential):
"""
### Sequential block for modules with different inputs
This sequential module can compose of different modules such as `ResBlock`,
`nn.Conv` and `SpatialTransformer` and calls them with the matching signatures
"""
def forward(self, x, t_emb, cond=None):
for layer in self:
if isinstance(layer, ResBlock):
x = layer(x, t_emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, cond)
else:
x = layer(x)
return x
class UpSample(nn.Module):
"""
### Up-sampling layer
"""
def __init__(self, channels: int):
"""
:param channels: is the number of channels
"""
super().__init__()
# $3 \times 3$ convolution mapping
self.conv = nn.Conv2d(channels, channels, 3, padding=1)
def forward(self, x: torch.Tensor):
"""
:param x: is the input feature map with shape `[batch_size, channels, height, width]`
"""
# Up-sample by a factor of $2$
x = F.interpolate(x, scale_factor=2, mode="nearest")
# Apply convolution
return self.conv(x)
class DownSample(nn.Module):
"""
## Down-sampling layer
"""
def __init__(self, channels: int):
"""
:param channels: is the number of channels
"""
super().__init__()
# $3 \times 3$ convolution with stride length of $2$ to down-sample by a factor of $2$
self.op = nn.Conv2d(channels, channels, 3, stride=2, padding=1)
def forward(self, x: torch.Tensor):
"""
:param x: is the input feature map with shape `[batch_size, channels, height, width]`
"""
# Apply convolution
return self.op(x)
class ResBlock(nn.Module):
"""
## ResNet Block
"""
def __init__(self, channels: int, d_t_emb: int, *, out_channels=None):
"""
:param channels: the number of input channels
:param d_t_emb: the size of timestep embeddings
:param out_channels: is the number of out channels. defaults to `channels.
"""
super().__init__()
# `out_channels` not specified
if out_channels is None:
out_channels = channels
# First normalization and convolution
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
nn.Conv2d(channels, out_channels, 3, padding=1),
)
# Time step embeddings
self.emb_layers = nn.Sequential(
nn.SiLU(),
nn.Linear(d_t_emb, out_channels),
)
# Final convolution layer
self.out_layers = nn.Sequential(
normalization(out_channels),
nn.SiLU(),
nn.Dropout(0.),
nn.Conv2d(out_channels, out_channels, 3, padding=1)
)
# `channels` to `out_channels` mapping layer for residual connection
if out_channels == channels:
self.skip_connection = nn.Identity()
else:
self.skip_connection = nn.Conv2d(channels, out_channels, 1)
def forward(self, x: torch.Tensor, t_emb: torch.Tensor):
"""
:param x: is the input feature map with shape `[batch_size, channels, height, width]`
:param t_emb: is the time step embeddings of shape `[batch_size, d_t_emb]`
"""
# Initial convolution
h = self.in_layers(x)
# Time step embeddings
t_emb = self.emb_layers(t_emb).type(h.dtype)
# Add time step embeddings
h = h + t_emb[:, :, None, None]
# Final convolution
h = self.out_layers(h)
# Add skip connection
return self.skip_connection(x) + h
class GroupNorm32(nn.GroupNorm):
"""
### Group normalization with float32 casting
"""
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def normalization(channels):
"""
### Group normalization
This is a helper function, with fixed number of groups..
"""
return GroupNorm32(32, channels)
def _test_time_embeddings():
"""
Test sinusoidal time step embeddings
"""
import matplotlib.pyplot as plt
plt.figure(figsize=(15, 5))
m = UNetModel(in_channels=1, out_channels=1, channels=320, n_res_blocks=1, attention_levels=[],
channel_multipliers=[],
n_heads=1, tf_layers=1, d_cond=1)
te = m.time_step_embedding(torch.arange(0, 1000))
plt.plot(np.arange(1000), te[:, [50, 100, 190, 260]].numpy())
plt.legend(["dim %d" % p for p in [50, 100, 190, 260]])
plt.title("Time embeddings")
plt.show()
#
if __name__ == '__main__':
_test_time_embeddings()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/stable_diffusion/model/autoencoder.py | labml_nn/diffusion/stable_diffusion/model/autoencoder.py | """
---
title: Autoencoder for Stable Diffusion
summary: >
Annotated PyTorch implementation/tutorial of the autoencoder
for stable diffusion.
---
# Autoencoder for [Stable Diffusion](../index.html)
This implements the auto-encoder model used to map between image space and latent space.
We have kept to the model definition and naming unchanged from
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion)
so that we can load the checkpoints directly.
"""
from typing import List
import torch
import torch.nn.functional as F
from torch import nn
class Autoencoder(nn.Module):
"""
## Autoencoder
This consists of the encoder and decoder modules.
"""
def __init__(self, encoder: 'Encoder', decoder: 'Decoder', emb_channels: int, z_channels: int):
"""
:param encoder: is the encoder
:param decoder: is the decoder
:param emb_channels: is the number of dimensions in the quantized embedding space
:param z_channels: is the number of channels in the embedding space
"""
super().__init__()
self.encoder = encoder
self.decoder = decoder
# Convolution to map from embedding space to
# quantized embedding space moments (mean and log variance)
self.quant_conv = nn.Conv2d(2 * z_channels, 2 * emb_channels, 1)
# Convolution to map from quantized embedding space back to
# embedding space
self.post_quant_conv = nn.Conv2d(emb_channels, z_channels, 1)
def encode(self, img: torch.Tensor) -> 'GaussianDistribution':
"""
### Encode images to latent representation
:param img: is the image tensor with shape `[batch_size, img_channels, img_height, img_width]`
"""
# Get embeddings with shape `[batch_size, z_channels * 2, z_height, z_height]`
z = self.encoder(img)
# Get the moments in the quantized embedding space
moments = self.quant_conv(z)
# Return the distribution
return GaussianDistribution(moments)
def decode(self, z: torch.Tensor):
"""
### Decode images from latent representation
:param z: is the latent representation with shape `[batch_size, emb_channels, z_height, z_height]`
"""
# Map to embedding space from the quantized representation
z = self.post_quant_conv(z)
# Decode the image of shape `[batch_size, channels, height, width]`
return self.decoder(z)
class Encoder(nn.Module):
"""
## Encoder module
"""
def __init__(self, *, channels: int, channel_multipliers: List[int], n_resnet_blocks: int,
in_channels: int, z_channels: int):
"""
:param channels: is the number of channels in the first convolution layer
:param channel_multipliers: are the multiplicative factors for the number of channels in the
subsequent blocks
:param n_resnet_blocks: is the number of resnet layers at each resolution
:param in_channels: is the number of channels in the image
:param z_channels: is the number of channels in the embedding space
"""
super().__init__()
# Number of blocks of different resolutions.
# The resolution is halved at the end each top level block
n_resolutions = len(channel_multipliers)
# Initial $3 \times 3$ convolution layer that maps the image to `channels`
self.conv_in = nn.Conv2d(in_channels, channels, 3, stride=1, padding=1)
# Number of channels in each top level block
channels_list = [m * channels for m in [1] + channel_multipliers]
# List of top-level blocks
self.down = nn.ModuleList()
# Create top-level blocks
for i in range(n_resolutions):
# Each top level block consists of multiple ResNet Blocks and down-sampling
resnet_blocks = nn.ModuleList()
# Add ResNet Blocks
for _ in range(n_resnet_blocks):
resnet_blocks.append(ResnetBlock(channels, channels_list[i + 1]))
channels = channels_list[i + 1]
# Top-level block
down = nn.Module()
down.block = resnet_blocks
# Down-sampling at the end of each top level block except the last
if i != n_resolutions - 1:
down.downsample = DownSample(channels)
else:
down.downsample = nn.Identity()
#
self.down.append(down)
# Final ResNet blocks with attention
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(channels, channels)
self.mid.attn_1 = AttnBlock(channels)
self.mid.block_2 = ResnetBlock(channels, channels)
# Map to embedding space with a $3 \times 3$ convolution
self.norm_out = normalization(channels)
self.conv_out = nn.Conv2d(channels, 2 * z_channels, 3, stride=1, padding=1)
def forward(self, img: torch.Tensor):
"""
:param img: is the image tensor with shape `[batch_size, img_channels, img_height, img_width]`
"""
# Map to `channels` with the initial convolution
x = self.conv_in(img)
# Top-level blocks
for down in self.down:
# ResNet Blocks
for block in down.block:
x = block(x)
# Down-sampling
x = down.downsample(x)
# Final ResNet blocks with attention
x = self.mid.block_1(x)
x = self.mid.attn_1(x)
x = self.mid.block_2(x)
# Normalize and map to embedding space
x = self.norm_out(x)
x = swish(x)
x = self.conv_out(x)
#
return x
class Decoder(nn.Module):
"""
## Decoder module
"""
def __init__(self, *, channels: int, channel_multipliers: List[int], n_resnet_blocks: int,
out_channels: int, z_channels: int):
"""
:param channels: is the number of channels in the final convolution layer
:param channel_multipliers: are the multiplicative factors for the number of channels in the
previous blocks, in reverse order
:param n_resnet_blocks: is the number of resnet layers at each resolution
:param out_channels: is the number of channels in the image
:param z_channels: is the number of channels in the embedding space
"""
super().__init__()
# Number of blocks of different resolutions.
# The resolution is halved at the end each top level block
num_resolutions = len(channel_multipliers)
# Number of channels in each top level block, in the reverse order
channels_list = [m * channels for m in channel_multipliers]
# Number of channels in the top-level block
channels = channels_list[-1]
# Initial $3 \times 3$ convolution layer that maps the embedding space to `channels`
self.conv_in = nn.Conv2d(z_channels, channels, 3, stride=1, padding=1)
# ResNet blocks with attention
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(channels, channels)
self.mid.attn_1 = AttnBlock(channels)
self.mid.block_2 = ResnetBlock(channels, channels)
# List of top-level blocks
self.up = nn.ModuleList()
# Create top-level blocks
for i in reversed(range(num_resolutions)):
# Each top level block consists of multiple ResNet Blocks and up-sampling
resnet_blocks = nn.ModuleList()
# Add ResNet Blocks
for _ in range(n_resnet_blocks + 1):
resnet_blocks.append(ResnetBlock(channels, channels_list[i]))
channels = channels_list[i]
# Top-level block
up = nn.Module()
up.block = resnet_blocks
# Up-sampling at the end of each top level block except the first
if i != 0:
up.upsample = UpSample(channels)
else:
up.upsample = nn.Identity()
# Prepend to be consistent with the checkpoint
self.up.insert(0, up)
# Map to image space with a $3 \times 3$ convolution
self.norm_out = normalization(channels)
self.conv_out = nn.Conv2d(channels, out_channels, 3, stride=1, padding=1)
def forward(self, z: torch.Tensor):
"""
:param z: is the embedding tensor with shape `[batch_size, z_channels, z_height, z_height]`
"""
# Map to `channels` with the initial convolution
h = self.conv_in(z)
# ResNet blocks with attention
h = self.mid.block_1(h)
h = self.mid.attn_1(h)
h = self.mid.block_2(h)
# Top-level blocks
for up in reversed(self.up):
# ResNet Blocks
for block in up.block:
h = block(h)
# Up-sampling
h = up.upsample(h)
# Normalize and map to image space
h = self.norm_out(h)
h = swish(h)
img = self.conv_out(h)
#
return img
class GaussianDistribution:
"""
## Gaussian Distribution
"""
def __init__(self, parameters: torch.Tensor):
"""
:param parameters: are the means and log of variances of the embedding of shape
`[batch_size, z_channels * 2, z_height, z_height]`
"""
# Split mean and log of variance
self.mean, log_var = torch.chunk(parameters, 2, dim=1)
# Clamp the log of variances
self.log_var = torch.clamp(log_var, -30.0, 20.0)
# Calculate standard deviation
self.std = torch.exp(0.5 * self.log_var)
def sample(self):
# Sample from the distribution
return self.mean + self.std * torch.randn_like(self.std)
class AttnBlock(nn.Module):
"""
## Attention block
"""
def __init__(self, channels: int):
"""
:param channels: is the number of channels
"""
super().__init__()
# Group normalization
self.norm = normalization(channels)
# Query, key and value mappings
self.q = nn.Conv2d(channels, channels, 1)
self.k = nn.Conv2d(channels, channels, 1)
self.v = nn.Conv2d(channels, channels, 1)
# Final $1 \times 1$ convolution layer
self.proj_out = nn.Conv2d(channels, channels, 1)
# Attention scaling factor
self.scale = channels ** -0.5
def forward(self, x: torch.Tensor):
"""
:param x: is the tensor of shape `[batch_size, channels, height, width]`
"""
# Normalize `x`
x_norm = self.norm(x)
# Get query, key and vector embeddings
q = self.q(x_norm)
k = self.k(x_norm)
v = self.v(x_norm)
# Reshape to query, key and vector embeedings from
# `[batch_size, channels, height, width]` to
# `[batch_size, channels, height * width]`
b, c, h, w = q.shape
q = q.view(b, c, h * w)
k = k.view(b, c, h * w)
v = v.view(b, c, h * w)
# Compute $\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_{key}}}\Bigg)$
attn = torch.einsum('bci,bcj->bij', q, k) * self.scale
attn = F.softmax(attn, dim=2)
# Compute $\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_{key}}}\Bigg)V$
out = torch.einsum('bij,bcj->bci', attn, v)
# Reshape back to `[batch_size, channels, height, width]`
out = out.view(b, c, h, w)
# Final $1 \times 1$ convolution layer
out = self.proj_out(out)
# Add residual connection
return x + out
class UpSample(nn.Module):
"""
## Up-sampling layer
"""
def __init__(self, channels: int):
"""
:param channels: is the number of channels
"""
super().__init__()
# $3 \times 3$ convolution mapping
self.conv = nn.Conv2d(channels, channels, 3, padding=1)
def forward(self, x: torch.Tensor):
"""
:param x: is the input feature map with shape `[batch_size, channels, height, width]`
"""
# Up-sample by a factor of $2$
x = F.interpolate(x, scale_factor=2.0, mode="nearest")
# Apply convolution
return self.conv(x)
class DownSample(nn.Module):
"""
## Down-sampling layer
"""
def __init__(self, channels: int):
"""
:param channels: is the number of channels
"""
super().__init__()
# $3 \times 3$ convolution with stride length of $2$ to down-sample by a factor of $2$
self.conv = nn.Conv2d(channels, channels, 3, stride=2, padding=0)
def forward(self, x: torch.Tensor):
"""
:param x: is the input feature map with shape `[batch_size, channels, height, width]`
"""
# Add padding
x = F.pad(x, (0, 1, 0, 1), mode="constant", value=0)
# Apply convolution
return self.conv(x)
class ResnetBlock(nn.Module):
"""
## ResNet Block
"""
def __init__(self, in_channels: int, out_channels: int):
"""
:param in_channels: is the number of channels in the input
:param out_channels: is the number of channels in the output
"""
super().__init__()
# First normalization and convolution layer
self.norm1 = normalization(in_channels)
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1)
# Second normalization and convolution layer
self.norm2 = normalization(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, stride=1, padding=1)
# `in_channels` to `out_channels` mapping layer for residual connection
if in_channels != out_channels:
self.nin_shortcut = nn.Conv2d(in_channels, out_channels, 1, stride=1, padding=0)
else:
self.nin_shortcut = nn.Identity()
def forward(self, x: torch.Tensor):
"""
:param x: is the input feature map with shape `[batch_size, channels, height, width]`
"""
h = x
# First normalization and convolution layer
h = self.norm1(h)
h = swish(h)
h = self.conv1(h)
# Second normalization and convolution layer
h = self.norm2(h)
h = swish(h)
h = self.conv2(h)
# Map and add residual
return self.nin_shortcut(x) + h
def swish(x: torch.Tensor):
"""
### Swish activation
$$x \cdot \sigma(x)$$
"""
return x * torch.sigmoid(x)
def normalization(channels: int):
"""
### Group normalization
This is a helper function, with fixed number of groups and `eps`.
"""
return nn.GroupNorm(num_groups=32, num_channels=channels, eps=1e-6)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/stable_diffusion/model/unet_attention.py | labml_nn/diffusion/stable_diffusion/model/unet_attention.py | """
---
title: Transformer for Stable Diffusion U-Net
summary: >
Annotated PyTorch implementation/tutorial of the transformer
for U-Net in stable diffusion.
---
# Transformer for Stable Diffusion [U-Net](unet.html)
This implements the transformer module used in [U-Net](unet.html) that
gives $\epsilon_\text{cond}(x_t, c)$
We have kept to the model definition and naming unchanged from
[CompVis/stable-diffusion](https://github.com/CompVis/stable-diffusion)
so that we can load the checkpoints directly.
"""
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn
class SpatialTransformer(nn.Module):
"""
## Spatial Transformer
"""
def __init__(self, channels: int, n_heads: int, n_layers: int, d_cond: int):
"""
:param channels: is the number of channels in the feature map
:param n_heads: is the number of attention heads
:param n_layers: is the number of transformer layers
:param d_cond: is the size of the conditional embedding
"""
super().__init__()
# Initial group normalization
self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=channels, eps=1e-6, affine=True)
# Initial $1 \times 1$ convolution
self.proj_in = nn.Conv2d(channels, channels, kernel_size=1, stride=1, padding=0)
# Transformer layers
self.transformer_blocks = nn.ModuleList(
[BasicTransformerBlock(channels, n_heads, channels // n_heads, d_cond=d_cond) for _ in range(n_layers)]
)
# Final $1 \times 1$ convolution
self.proj_out = nn.Conv2d(channels, channels, kernel_size=1, stride=1, padding=0)
def forward(self, x: torch.Tensor, cond: torch.Tensor):
"""
:param x: is the feature map of shape `[batch_size, channels, height, width]`
:param cond: is the conditional embeddings of shape `[batch_size, n_cond, d_cond]`
"""
# Get shape `[batch_size, channels, height, width]`
b, c, h, w = x.shape
# For residual connection
x_in = x
# Normalize
x = self.norm(x)
# Initial $1 \times 1$ convolution
x = self.proj_in(x)
# Transpose and reshape from `[batch_size, channels, height, width]`
# to `[batch_size, height * width, channels]`
x = x.permute(0, 2, 3, 1).view(b, h * w, c)
# Apply the transformer layers
for block in self.transformer_blocks:
x = block(x, cond)
# Reshape and transpose from `[batch_size, height * width, channels]`
# to `[batch_size, channels, height, width]`
x = x.view(b, h, w, c).permute(0, 3, 1, 2)
# Final $1 \times 1$ convolution
x = self.proj_out(x)
# Add residual
return x + x_in
class BasicTransformerBlock(nn.Module):
"""
### Transformer Layer
"""
def __init__(self, d_model: int, n_heads: int, d_head: int, d_cond: int):
"""
:param d_model: is the input embedding size
:param n_heads: is the number of attention heads
:param d_head: is the size of a attention head
:param d_cond: is the size of the conditional embeddings
"""
super().__init__()
# Self-attention layer and pre-norm layer
self.attn1 = CrossAttention(d_model, d_model, n_heads, d_head)
self.norm1 = nn.LayerNorm(d_model)
# Cross attention layer and pre-norm layer
self.attn2 = CrossAttention(d_model, d_cond, n_heads, d_head)
self.norm2 = nn.LayerNorm(d_model)
# Feed-forward network and pre-norm layer
self.ff = FeedForward(d_model)
self.norm3 = nn.LayerNorm(d_model)
def forward(self, x: torch.Tensor, cond: torch.Tensor):
"""
:param x: are the input embeddings of shape `[batch_size, height * width, d_model]`
:param cond: is the conditional embeddings of shape `[batch_size, n_cond, d_cond]`
"""
# Self attention
x = self.attn1(self.norm1(x)) + x
# Cross-attention with conditioning
x = self.attn2(self.norm2(x), cond=cond) + x
# Feed-forward network
x = self.ff(self.norm3(x)) + x
#
return x
class CrossAttention(nn.Module):
"""
### Cross Attention Layer
This falls-back to self-attention when conditional embeddings are not specified.
"""
use_flash_attention: bool = False
def __init__(self, d_model: int, d_cond: int, n_heads: int, d_head: int, is_inplace: bool = True):
"""
:param d_model: is the input embedding size
:param n_heads: is the number of attention heads
:param d_head: is the size of a attention head
:param d_cond: is the size of the conditional embeddings
:param is_inplace: specifies whether to perform the attention softmax computation inplace to
save memory
"""
super().__init__()
self.is_inplace = is_inplace
self.n_heads = n_heads
self.d_head = d_head
# Attention scaling factor
self.scale = d_head ** -0.5
# Query, key and value mappings
d_attn = d_head * n_heads
self.to_q = nn.Linear(d_model, d_attn, bias=False)
self.to_k = nn.Linear(d_cond, d_attn, bias=False)
self.to_v = nn.Linear(d_cond, d_attn, bias=False)
# Final linear layer
self.to_out = nn.Sequential(nn.Linear(d_attn, d_model))
# Setup [flash attention](https://github.com/HazyResearch/flash-attention).
# Flash attention is only used if it's installed
# and `CrossAttention.use_flash_attention` is set to `True`.
try:
# You can install flash attention by cloning their Github repo,
# [https://github.com/HazyResearch/flash-attention](https://github.com/HazyResearch/flash-attention)
# and then running `python setup.py install`
from flash_attn.flash_attention import FlashAttention
self.flash = FlashAttention()
# Set the scale for scaled dot-product attention.
self.flash.softmax_scale = self.scale
# Set to `None` if it's not installed
except ImportError:
self.flash = None
def forward(self, x: torch.Tensor, cond: Optional[torch.Tensor] = None):
"""
:param x: are the input embeddings of shape `[batch_size, height * width, d_model]`
:param cond: is the conditional embeddings of shape `[batch_size, n_cond, d_cond]`
"""
# If `cond` is `None` we perform self attention
has_cond = cond is not None
if not has_cond:
cond = x
# Get query, key and value vectors
q = self.to_q(x)
k = self.to_k(cond)
v = self.to_v(cond)
# Use flash attention if it's available and the head size is less than or equal to `128`
if CrossAttention.use_flash_attention and self.flash is not None and not has_cond and self.d_head <= 128:
return self.flash_attention(q, k, v)
# Otherwise, fallback to normal attention
else:
return self.normal_attention(q, k, v)
def flash_attention(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor):
"""
#### Flash Attention
:param q: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]`
:param k: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]`
:param v: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]`
"""
# Get batch size and number of elements along sequence axis (`width * height`)
batch_size, seq_len, _ = q.shape
# Stack `q`, `k`, `v` vectors for flash attention, to get a single tensor of
# shape `[batch_size, seq_len, 3, n_heads * d_head]`
qkv = torch.stack((q, k, v), dim=2)
# Split the heads
qkv = qkv.view(batch_size, seq_len, 3, self.n_heads, self.d_head)
# Flash attention works for head sizes `32`, `64` and `128`, so we have to pad the heads to
# fit this size.
if self.d_head <= 32:
pad = 32 - self.d_head
elif self.d_head <= 64:
pad = 64 - self.d_head
elif self.d_head <= 128:
pad = 128 - self.d_head
else:
raise ValueError(f'Head size ${self.d_head} too large for Flash Attention')
# Pad the heads
if pad:
qkv = torch.cat((qkv, qkv.new_zeros(batch_size, seq_len, 3, self.n_heads, pad)), dim=-1)
# Compute attention
# $$\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_{key}}}\Bigg)V$$
# This gives a tensor of shape `[batch_size, seq_len, n_heads, d_padded]`
out, _ = self.flash(qkv)
# Truncate the extra head size
out = out[:, :, :, :self.d_head]
# Reshape to `[batch_size, seq_len, n_heads * d_head]`
out = out.reshape(batch_size, seq_len, self.n_heads * self.d_head)
# Map to `[batch_size, height * width, d_model]` with a linear layer
return self.to_out(out)
def normal_attention(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor):
"""
#### Normal Attention
:param q: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]`
:param k: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]`
:param v: are the query vectors before splitting heads, of shape `[batch_size, seq, d_attn]`
"""
# Split them to heads of shape `[batch_size, seq_len, n_heads, d_head]`
q = q.view(*q.shape[:2], self.n_heads, -1)
k = k.view(*k.shape[:2], self.n_heads, -1)
v = v.view(*v.shape[:2], self.n_heads, -1)
# Calculate attention $\frac{Q K^\top}{\sqrt{d_{key}}}$
attn = torch.einsum('bihd,bjhd->bhij', q, k) * self.scale
# Compute softmax
# $$\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_{key}}}\Bigg)$$
if self.is_inplace:
half = attn.shape[0] // 2
attn[half:] = attn[half:].softmax(dim=-1)
attn[:half] = attn[:half].softmax(dim=-1)
else:
attn = attn.softmax(dim=-1)
# Compute attention output
# $$\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_{key}}}\Bigg)V$$
out = torch.einsum('bhij,bjhd->bihd', attn, v)
# Reshape to `[batch_size, height * width, n_heads * d_head]`
out = out.reshape(*out.shape[:2], -1)
# Map to `[batch_size, height * width, d_model]` with a linear layer
return self.to_out(out)
class FeedForward(nn.Module):
"""
### Feed-Forward Network
"""
def __init__(self, d_model: int, d_mult: int = 4):
"""
:param d_model: is the input embedding size
:param d_mult: is multiplicative factor for the hidden layer size
"""
super().__init__()
self.net = nn.Sequential(
GeGLU(d_model, d_model * d_mult),
nn.Dropout(0.),
nn.Linear(d_model * d_mult, d_model)
)
def forward(self, x: torch.Tensor):
return self.net(x)
class GeGLU(nn.Module):
"""
### GeGLU Activation
$$\text{GeGLU}(x) = (xW + b) * \text{GELU}(xV + c)$$
"""
def __init__(self, d_in: int, d_out: int):
super().__init__()
# Combined linear projections $xW + b$ and $xV + c$
self.proj = nn.Linear(d_in, d_out * 2)
def forward(self, x: torch.Tensor):
# Get $xW + b$ and $xV + c$
x, gate = self.proj(x).chunk(2, dim=-1)
# $\text{GeGLU}(x) = (xW + b) * \text{GELU}(xV + c)$
return x * F.gelu(gate)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/stable_diffusion/model/__init__.py | labml_nn/diffusion/stable_diffusion/model/__init__.py | """
---
title: Modules used in stable diffusion
summary: >
Models and components for stable diffusion.
---
# [Stable Diffusion](../index.html) Models
* [AutoEncoder](autoencoder.html)
* [U-Net](unet.html) with [attention](unet_attention.html)
* [CLIP embedder](clip_embedder.html).
"""
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/stable_diffusion/model/clip_embedder.py | labml_nn/diffusion/stable_diffusion/model/clip_embedder.py | """
---
title: CLIP Text Embedder
summary: >
CLIP embedder to get prompt embeddings for stable diffusion
---
# CLIP Text Embedder
This is used to get prompt embeddings for [stable diffusion](../index.html).
It uses HuggingFace Transformers CLIP model.
"""
from typing import List
from torch import nn
from transformers import CLIPTokenizer, CLIPTextModel
class CLIPTextEmbedder(nn.Module):
"""
## CLIP Text Embedder
"""
def __init__(self, version: str = "openai/clip-vit-large-patch14", device="cuda:0", max_length: int = 77):
"""
:param version: is the model version
:param device: is the device
:param max_length: is the max length of the tokenized prompt
"""
super().__init__()
# Load the tokenizer
self.tokenizer = CLIPTokenizer.from_pretrained(version)
# Load the CLIP transformer
self.transformer = CLIPTextModel.from_pretrained(version).eval()
self.device = device
self.max_length = max_length
def forward(self, prompts: List[str]):
"""
:param prompts: are the list of prompts to embed
"""
# Tokenize the prompts
batch_encoding = self.tokenizer(prompts, truncation=True, max_length=self.max_length, return_length=True,
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
# Get token ids
tokens = batch_encoding["input_ids"].to(self.device)
# Get CLIP embeddings
return self.transformer(input_ids=tokens).last_hidden_state
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/stable_diffusion/sampler/ddim.py | labml_nn/diffusion/stable_diffusion/sampler/ddim.py | """
---
title: Denoising Diffusion Implicit Models (DDIM) Sampling
summary: >
Annotated PyTorch implementation/tutorial of
Denoising Diffusion Implicit Models (DDIM) Sampling
for stable diffusion model.
---
# Denoising Diffusion Implicit Models (DDIM) Sampling
This implements DDIM sampling from the paper
[Denoising Diffusion Implicit Models](https://arxiv.org/abs/2010.02502)
"""
from typing import Optional, List
import numpy as np
import torch
from labml import monit
from labml_nn.diffusion.stable_diffusion.latent_diffusion import LatentDiffusion
from labml_nn.diffusion.stable_diffusion.sampler import DiffusionSampler
class DDIMSampler(DiffusionSampler):
"""
## DDIM Sampler
This extends the [`DiffusionSampler` base class](index.html).
DDIM samples images by repeatedly removing noise by sampling step by step using,
\begin{align}
x_{\tau_{i-1}} &= \sqrt{\alpha_{\tau_{i-1}}}\Bigg(
\frac{x_{\tau_i} - \sqrt{1 - \alpha_{\tau_i}}\epsilon_\theta(x_{\tau_i})}{\sqrt{\alpha_{\tau_i}}}
\Bigg) \\
&+ \sqrt{1 - \alpha_{\tau_{i- 1}} - \sigma_{\tau_i}^2} \cdot \epsilon_\theta(x_{\tau_i}) \\
&+ \sigma_{\tau_i} \epsilon_{\tau_i}
\end{align}
where $\epsilon_{\tau_i}$ is random noise,
$\tau$ is a subsequence of $[1,2,\dots,T]$ of length $S$,
and
$$\sigma_{\tau_i} =
\eta \sqrt{\frac{1 - \alpha_{\tau_{i-1}}}{1 - \alpha_{\tau_i}}}
\sqrt{1 - \frac{\alpha_{\tau_i}}{\alpha_{\tau_{i-1}}}}$$
Note that, $\alpha_t$ in DDIM paper refers to ${\color{lightgreen}\bar\alpha_t}$ from [DDPM](ddpm.html).
"""
model: LatentDiffusion
def __init__(self, model: LatentDiffusion, n_steps: int, ddim_discretize: str = "uniform", ddim_eta: float = 0.):
"""
:param model: is the model to predict noise $\epsilon_\text{cond}(x_t, c)$
:param n_steps: is the number of DDIM sampling steps, $S$
:param ddim_discretize: specifies how to extract $\tau$ from $[1,2,\dots,T]$.
It can be either `uniform` or `quad`.
:param ddim_eta: is $\eta$ used to calculate $\sigma_{\tau_i}$. $\eta = 0$ makes the
sampling process deterministic.
"""
super().__init__(model)
# Number of steps, $T$
self.n_steps = model.n_steps
# Calculate $\tau$ to be uniformly distributed across $[1,2,\dots,T]$
if ddim_discretize == 'uniform':
c = self.n_steps // n_steps
self.time_steps = np.asarray(list(range(0, self.n_steps, c))) + 1
# Calculate $\tau$ to be quadratically distributed across $[1,2,\dots,T]$
elif ddim_discretize == 'quad':
self.time_steps = ((np.linspace(0, np.sqrt(self.n_steps * .8), n_steps)) ** 2).astype(int) + 1
else:
raise NotImplementedError(ddim_discretize)
with torch.no_grad():
# Get ${\color{lightgreen}\bar\alpha_t}$
alpha_bar = self.model.alpha_bar
# $\alpha_{\tau_i}$
self.ddim_alpha = alpha_bar[self.time_steps].clone().to(torch.float32)
# $\sqrt{\alpha_{\tau_i}}$
self.ddim_alpha_sqrt = torch.sqrt(self.ddim_alpha)
# $\alpha_{\tau_{i-1}}$
self.ddim_alpha_prev = torch.cat([alpha_bar[0:1], alpha_bar[self.time_steps[:-1]]])
# $$\sigma_{\tau_i} =
# \eta \sqrt{\frac{1 - \alpha_{\tau_{i-1}}}{1 - \alpha_{\tau_i}}}
# \sqrt{1 - \frac{\alpha_{\tau_i}}{\alpha_{\tau_{i-1}}}}$$
self.ddim_sigma = (ddim_eta *
((1 - self.ddim_alpha_prev) / (1 - self.ddim_alpha) *
(1 - self.ddim_alpha / self.ddim_alpha_prev)) ** .5)
# $\sqrt{1 - \alpha_{\tau_i}}$
self.ddim_sqrt_one_minus_alpha = (1. - self.ddim_alpha) ** .5
@torch.no_grad()
def sample(self,
shape: List[int],
cond: torch.Tensor,
repeat_noise: bool = False,
temperature: float = 1.,
x_last: Optional[torch.Tensor] = None,
uncond_scale: float = 1.,
uncond_cond: Optional[torch.Tensor] = None,
skip_steps: int = 0,
):
"""
### Sampling Loop
:param shape: is the shape of the generated images in the
form `[batch_size, channels, height, width]`
:param cond: is the conditional embeddings $c$
:param temperature: is the noise temperature (random noise gets multiplied by this)
:param x_last: is $x_{\tau_S}$. If not provided random noise will be used.
:param uncond_scale: is the unconditional guidance scale $s$. This is used for
$\epsilon_\theta(x_t, c) = s\epsilon_\text{cond}(x_t, c) + (s - 1)\epsilon_\text{cond}(x_t, c_u)$
:param uncond_cond: is the conditional embedding for empty prompt $c_u$
:param skip_steps: is the number of time steps to skip $i'$. We start sampling from $S - i'$.
And `x_last` is then $x_{\tau_{S - i'}}$.
"""
# Get device and batch size
device = self.model.device
bs = shape[0]
# Get $x_{\tau_S}$
x = x_last if x_last is not None else torch.randn(shape, device=device)
# Time steps to sample at $\tau_{S - i'}, \tau_{S - i' - 1}, \dots, \tau_1$
time_steps = np.flip(self.time_steps)[skip_steps:]
for i, step in monit.enum('Sample', time_steps):
# Index $i$ in the list $[\tau_1, \tau_2, \dots, \tau_S]$
index = len(time_steps) - i - 1
# Time step $\tau_i$
ts = x.new_full((bs,), step, dtype=torch.long)
# Sample $x_{\tau_{i-1}}$
x, pred_x0, e_t = self.p_sample(x, cond, ts, step, index=index,
repeat_noise=repeat_noise,
temperature=temperature,
uncond_scale=uncond_scale,
uncond_cond=uncond_cond)
# Return $x_0$
return x
@torch.no_grad()
def p_sample(self, x: torch.Tensor, c: torch.Tensor, t: torch.Tensor, step: int, index: int, *,
repeat_noise: bool = False,
temperature: float = 1.,
uncond_scale: float = 1.,
uncond_cond: Optional[torch.Tensor] = None):
"""
### Sample $x_{\tau_{i-1}}$
:param x: is $x_{\tau_i}$ of shape `[batch_size, channels, height, width]`
:param c: is the conditional embeddings $c$ of shape `[batch_size, emb_size]`
:param t: is $\tau_i$ of shape `[batch_size]`
:param step: is the step $\tau_i$ as an integer
:param index: is index $i$ in the list $[\tau_1, \tau_2, \dots, \tau_S]$
:param repeat_noise: specified whether the noise should be same for all samples in the batch
:param temperature: is the noise temperature (random noise gets multiplied by this)
:param uncond_scale: is the unconditional guidance scale $s$. This is used for
$\epsilon_\theta(x_t, c) = s\epsilon_\text{cond}(x_t, c) + (s - 1)\epsilon_\text{cond}(x_t, c_u)$
:param uncond_cond: is the conditional embedding for empty prompt $c_u$
"""
# Get $\epsilon_\theta(x_{\tau_i})$
e_t = self.get_eps(x, t, c,
uncond_scale=uncond_scale,
uncond_cond=uncond_cond)
# Calculate $x_{\tau_{i - 1}}$ and predicted $x_0$
x_prev, pred_x0 = self.get_x_prev_and_pred_x0(e_t, index, x,
temperature=temperature,
repeat_noise=repeat_noise)
#
return x_prev, pred_x0, e_t
def get_x_prev_and_pred_x0(self, e_t: torch.Tensor, index: int, x: torch.Tensor, *,
temperature: float,
repeat_noise: bool):
"""
### Sample $x_{\tau_{i-1}}$ given $\epsilon_\theta(x_{\tau_i})$
"""
# $\alpha_{\tau_i}$
alpha = self.ddim_alpha[index]
# $\alpha_{\tau_{i-1}}$
alpha_prev = self.ddim_alpha_prev[index]
# $\sigma_{\tau_i}$
sigma = self.ddim_sigma[index]
# $\sqrt{1 - \alpha_{\tau_i}}$
sqrt_one_minus_alpha = self.ddim_sqrt_one_minus_alpha[index]
# Current prediction for $x_0$,
# $$\frac{x_{\tau_i} - \sqrt{1 - \alpha_{\tau_i}}\epsilon_\theta(x_{\tau_i})}{\sqrt{\alpha_{\tau_i}}}$$
pred_x0 = (x - sqrt_one_minus_alpha * e_t) / (alpha ** 0.5)
# Direction pointing to $x_t$
# $$\sqrt{1 - \alpha_{\tau_{i- 1}} - \sigma_{\tau_i}^2} \cdot \epsilon_\theta(x_{\tau_i})$$
dir_xt = (1. - alpha_prev - sigma ** 2).sqrt() * e_t
# No noise is added, when $\eta = 0$
if sigma == 0.:
noise = 0.
# If same noise is used for all samples in the batch
elif repeat_noise:
noise = torch.randn((1, *x.shape[1:]), device=x.device)
# Different noise for each sample
else:
noise = torch.randn(x.shape, device=x.device)
# Multiply noise by the temperature
noise = noise * temperature
# \begin{align}
# x_{\tau_{i-1}} &= \sqrt{\alpha_{\tau_{i-1}}}\Bigg(
# \frac{x_{\tau_i} - \sqrt{1 - \alpha_{\tau_i}}\epsilon_\theta(x_{\tau_i})}{\sqrt{\alpha_{\tau_i}}}
# \Bigg) \\
# &+ \sqrt{1 - \alpha_{\tau_{i- 1}} - \sigma_{\tau_i}^2} \cdot \epsilon_\theta(x_{\tau_i}) \\
# &+ \sigma_{\tau_i} \epsilon_{\tau_i}
# \end{align}
x_prev = (alpha_prev ** 0.5) * pred_x0 + dir_xt + sigma * noise
#
return x_prev, pred_x0
@torch.no_grad()
def q_sample(self, x0: torch.Tensor, index: int, noise: Optional[torch.Tensor] = None):
"""
### Sample from $q_{\sigma,\tau}(x_{\tau_i}|x_0)$
$$q_{\sigma,\tau}(x_t|x_0) =
\mathcal{N} \Big(x_t; \sqrt{\alpha_{\tau_i}} x_0, (1-\alpha_{\tau_i}) \mathbf{I} \Big)$$
:param x0: is $x_0$ of shape `[batch_size, channels, height, width]`
:param index: is the time step $\tau_i$ index $i$
:param noise: is the noise, $\epsilon$
"""
# Random noise, if noise is not specified
if noise is None:
noise = torch.randn_like(x0)
# Sample from
# $$q_{\sigma,\tau}(x_t|x_0) =
# \mathcal{N} \Big(x_t; \sqrt{\alpha_{\tau_i}} x_0, (1-\alpha_{\tau_i}) \mathbf{I} \Big)$$
return self.ddim_alpha_sqrt[index] * x0 + self.ddim_sqrt_one_minus_alpha[index] * noise
@torch.no_grad()
def paint(self, x: torch.Tensor, cond: torch.Tensor, t_start: int, *,
orig: Optional[torch.Tensor] = None,
mask: Optional[torch.Tensor] = None, orig_noise: Optional[torch.Tensor] = None,
uncond_scale: float = 1.,
uncond_cond: Optional[torch.Tensor] = None,
):
"""
### Painting Loop
:param x: is $x_{S'}$ of shape `[batch_size, channels, height, width]`
:param cond: is the conditional embeddings $c$
:param t_start: is the sampling step to start from, $S'$
:param orig: is the original image in latent page which we are in paining.
If this is not provided, it'll be an image to image transformation.
:param mask: is the mask to keep the original image.
:param orig_noise: is fixed noise to be added to the original image.
:param uncond_scale: is the unconditional guidance scale $s$. This is used for
$\epsilon_\theta(x_t, c) = s\epsilon_\text{cond}(x_t, c) + (s - 1)\epsilon_\text{cond}(x_t, c_u)$
:param uncond_cond: is the conditional embedding for empty prompt $c_u$
"""
# Get batch size
bs = x.shape[0]
# Time steps to sample at $\tau_{S`}, \tau_{S' - 1}, \dots, \tau_1$
time_steps = np.flip(self.time_steps[:t_start])
for i, step in monit.enum('Paint', time_steps):
# Index $i$ in the list $[\tau_1, \tau_2, \dots, \tau_S]$
index = len(time_steps) - i - 1
# Time step $\tau_i$
ts = x.new_full((bs,), step, dtype=torch.long)
# Sample $x_{\tau_{i-1}}$
x, _, _ = self.p_sample(x, cond, ts, step, index=index,
uncond_scale=uncond_scale,
uncond_cond=uncond_cond)
# Replace the masked area with original image
if orig is not None:
# Get the $q_{\sigma,\tau}(x_{\tau_i}|x_0)$ for original image in latent space
orig_t = self.q_sample(orig, index, noise=orig_noise)
# Replace the masked area
x = orig_t * mask + x * (1 - mask)
#
return x
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/stable_diffusion/sampler/ddpm.py | labml_nn/diffusion/stable_diffusion/sampler/ddpm.py | """
---
title: Denoising Diffusion Probabilistic Models (DDPM) Sampling
summary: >
Annotated PyTorch implementation/tutorial of
Denoising Diffusion Probabilistic Models (DDPM) Sampling
for stable diffusion model.
---
# Denoising Diffusion Probabilistic Models (DDPM) Sampling
For a simpler DDPM implementation refer to our [DDPM implementation](../../ddpm/index.html).
We use same notations for $\alpha_t$, $\beta_t$ schedules, etc.
"""
from typing import Optional, List
import numpy as np
import torch
from labml import monit
from labml_nn.diffusion.stable_diffusion.latent_diffusion import LatentDiffusion
from labml_nn.diffusion.stable_diffusion.sampler import DiffusionSampler
class DDPMSampler(DiffusionSampler):
"""
## DDPM Sampler
This extends the [`DiffusionSampler` base class](index.html).
DDPM samples images by repeatedly removing noise by sampling step by step from
$p_\theta(x_{t-1} | x_t)$,
\begin{align}
p_\theta(x_{t-1} | x_t) &= \mathcal{N}\big(x_{t-1}; \mu_\theta(x_t, t), \tilde\beta_t \mathbf{I} \big) \\
\mu_t(x_t, t) &= \frac{\sqrt{\bar\alpha_{t-1}}\beta_t}{1 - \bar\alpha_t}x_0
+ \frac{\sqrt{\alpha_t}(1 - \bar\alpha_{t-1})}{1-\bar\alpha_t}x_t \\
\tilde\beta_t &= \frac{1 - \bar\alpha_{t-1}}{1 - \bar\alpha_t} \beta_t \\
x_0 &= \frac{1}{\sqrt{\bar\alpha_t}} x_t - \Big(\sqrt{\frac{1}{\bar\alpha_t} - 1}\Big)\epsilon_\theta \\
\end{align}
"""
model: LatentDiffusion
def __init__(self, model: LatentDiffusion):
"""
:param model: is the model to predict noise $\epsilon_\text{cond}(x_t, c)$
"""
super().__init__(model)
# Sampling steps $1, 2, \dots, T$
self.time_steps = np.asarray(list(range(self.n_steps)))
with torch.no_grad():
# $\bar\alpha_t$
alpha_bar = self.model.alpha_bar
# $\beta_t$ schedule
beta = self.model.beta
# $\bar\alpha_{t-1}$
alpha_bar_prev = torch.cat([alpha_bar.new_tensor([1.]), alpha_bar[:-1]])
# $\sqrt{\bar\alpha}$
self.sqrt_alpha_bar = alpha_bar ** .5
# $\sqrt{1 - \bar\alpha}$
self.sqrt_1m_alpha_bar = (1. - alpha_bar) ** .5
# $\frac{1}{\sqrt{\bar\alpha_t}}$
self.sqrt_recip_alpha_bar = alpha_bar ** -.5
# $\sqrt{\frac{1}{\bar\alpha_t} - 1}$
self.sqrt_recip_m1_alpha_bar = (1 / alpha_bar - 1) ** .5
# $\frac{1 - \bar\alpha_{t-1}}{1 - \bar\alpha_t} \beta_t$
variance = beta * (1. - alpha_bar_prev) / (1. - alpha_bar)
# Clamped log of $\tilde\beta_t$
self.log_var = torch.log(torch.clamp(variance, min=1e-20))
# $\frac{\sqrt{\bar\alpha_{t-1}}\beta_t}{1 - \bar\alpha_t}$
self.mean_x0_coef = beta * (alpha_bar_prev ** .5) / (1. - alpha_bar)
# $\frac{\sqrt{\alpha_t}(1 - \bar\alpha_{t-1})}{1-\bar\alpha_t}$
self.mean_xt_coef = (1. - alpha_bar_prev) * ((1 - beta) ** 0.5) / (1. - alpha_bar)
@torch.no_grad()
def sample(self,
shape: List[int],
cond: torch.Tensor,
repeat_noise: bool = False,
temperature: float = 1.,
x_last: Optional[torch.Tensor] = None,
uncond_scale: float = 1.,
uncond_cond: Optional[torch.Tensor] = None,
skip_steps: int = 0,
):
"""
### Sampling Loop
:param shape: is the shape of the generated images in the
form `[batch_size, channels, height, width]`
:param cond: is the conditional embeddings $c$
:param temperature: is the noise temperature (random noise gets multiplied by this)
:param x_last: is $x_T$. If not provided random noise will be used.
:param uncond_scale: is the unconditional guidance scale $s$. This is used for
$\epsilon_\theta(x_t, c) = s\epsilon_\text{cond}(x_t, c) + (s - 1)\epsilon_\text{cond}(x_t, c_u)$
:param uncond_cond: is the conditional embedding for empty prompt $c_u$
:param skip_steps: is the number of time steps to skip $t'$. We start sampling from $T - t'$.
And `x_last` is then $x_{T - t'}$.
"""
# Get device and batch size
device = self.model.device
bs = shape[0]
# Get $x_T$
x = x_last if x_last is not None else torch.randn(shape, device=device)
# Time steps to sample at $T - t', T - t' - 1, \dots, 1$
time_steps = np.flip(self.time_steps)[skip_steps:]
# Sampling loop
for step in monit.iterate('Sample', time_steps):
# Time step $t$
ts = x.new_full((bs,), step, dtype=torch.long)
# Sample $x_{t-1}$
x, pred_x0, e_t = self.p_sample(x, cond, ts, step,
repeat_noise=repeat_noise,
temperature=temperature,
uncond_scale=uncond_scale,
uncond_cond=uncond_cond)
# Return $x_0$
return x
@torch.no_grad()
def p_sample(self, x: torch.Tensor, c: torch.Tensor, t: torch.Tensor, step: int,
repeat_noise: bool = False,
temperature: float = 1.,
uncond_scale: float = 1., uncond_cond: Optional[torch.Tensor] = None):
"""
### Sample $x_{t-1}$ from $p_\theta(x_{t-1} | x_t)$
:param x: is $x_t$ of shape `[batch_size, channels, height, width]`
:param c: is the conditional embeddings $c$ of shape `[batch_size, emb_size]`
:param t: is $t$ of shape `[batch_size]`
:param step: is the step $t$ as an integer
:repeat_noise: specified whether the noise should be same for all samples in the batch
:param temperature: is the noise temperature (random noise gets multiplied by this)
:param uncond_scale: is the unconditional guidance scale $s$. This is used for
$\epsilon_\theta(x_t, c) = s\epsilon_\text{cond}(x_t, c) + (s - 1)\epsilon_\text{cond}(x_t, c_u)$
:param uncond_cond: is the conditional embedding for empty prompt $c_u$
"""
# Get $\epsilon_\theta$
e_t = self.get_eps(x, t, c,
uncond_scale=uncond_scale,
uncond_cond=uncond_cond)
# Get batch size
bs = x.shape[0]
# $\frac{1}{\sqrt{\bar\alpha_t}}$
sqrt_recip_alpha_bar = x.new_full((bs, 1, 1, 1), self.sqrt_recip_alpha_bar[step])
# $\sqrt{\frac{1}{\bar\alpha_t} - 1}$
sqrt_recip_m1_alpha_bar = x.new_full((bs, 1, 1, 1), self.sqrt_recip_m1_alpha_bar[step])
# Calculate $x_0$ with current $\epsilon_\theta$
#
# $$x_0 = \frac{1}{\sqrt{\bar\alpha_t}} x_t - \Big(\sqrt{\frac{1}{\bar\alpha_t} - 1}\Big)\epsilon_\theta$$
x0 = sqrt_recip_alpha_bar * x - sqrt_recip_m1_alpha_bar * e_t
# $\frac{\sqrt{\bar\alpha_{t-1}}\beta_t}{1 - \bar\alpha_t}$
mean_x0_coef = x.new_full((bs, 1, 1, 1), self.mean_x0_coef[step])
# $\frac{\sqrt{\alpha_t}(1 - \bar\alpha_{t-1})}{1-\bar\alpha_t}$
mean_xt_coef = x.new_full((bs, 1, 1, 1), self.mean_xt_coef[step])
# Calculate $\mu_t(x_t, t)$
#
# $$\mu_t(x_t, t) = \frac{\sqrt{\bar\alpha_{t-1}}\beta_t}{1 - \bar\alpha_t}x_0
# + \frac{\sqrt{\alpha_t}(1 - \bar\alpha_{t-1})}{1-\bar\alpha_t}x_t$$
mean = mean_x0_coef * x0 + mean_xt_coef * x
# $\log \tilde\beta_t$
log_var = x.new_full((bs, 1, 1, 1), self.log_var[step])
# Do not add noise when $t = 1$ (final step sampling process).
# Note that `step` is `0` when $t = 1$)
if step == 0:
noise = 0
# If same noise is used for all samples in the batch
elif repeat_noise:
noise = torch.randn((1, *x.shape[1:]))
# Different noise for each sample
else:
noise = torch.randn(x.shape)
# Multiply noise by the temperature
noise = noise * temperature
# Sample from,
#
# $$p_\theta(x_{t-1} | x_t) = \mathcal{N}\big(x_{t-1}; \mu_\theta(x_t, t), \tilde\beta_t \mathbf{I} \big)$$
x_prev = mean + (0.5 * log_var).exp() * noise
#
return x_prev, x0, e_t
@torch.no_grad()
def q_sample(self, x0: torch.Tensor, index: int, noise: Optional[torch.Tensor] = None):
"""
### Sample from $q(x_t|x_0)$
$$q(x_t|x_0) = \mathcal{N} \Big(x_t; \sqrt{\bar\alpha_t} x_0, (1-\bar\alpha_t) \mathbf{I} \Big)$$
:param x0: is $x_0$ of shape `[batch_size, channels, height, width]`
:param index: is the time step $t$ index
:param noise: is the noise, $\epsilon$
"""
# Random noise, if noise is not specified
if noise is None:
noise = torch.randn_like(x0)
# Sample from $\mathcal{N} \Big(x_t; \sqrt{\bar\alpha_t} x_0, (1-\bar\alpha_t) \mathbf{I} \Big)$
return self.sqrt_alpha_bar[index] * x0 + self.sqrt_1m_alpha_bar[index] * noise
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/diffusion/stable_diffusion/sampler/__init__.py | labml_nn/diffusion/stable_diffusion/sampler/__init__.py | """
---
title: Sampling algorithms for stable diffusion
summary: >
Annotated PyTorch implementation/tutorial of
sampling algorithms
for stable diffusion model.
---
# Sampling algorithms for [stable diffusion](../index.html)
We have implemented the following [sampling algorithms](sampler/index.html):
* [Denoising Diffusion Probabilistic Models (DDPM) Sampling](ddpm.html)
* [Denoising Diffusion Implicit Models (DDIM) Sampling](ddim.html)
"""
from typing import Optional, List
import torch
from labml_nn.diffusion.stable_diffusion.latent_diffusion import LatentDiffusion
class DiffusionSampler:
"""
## Base class for sampling algorithms
"""
model: LatentDiffusion
def __init__(self, model: LatentDiffusion):
"""
:param model: is the model to predict noise $\epsilon_\text{cond}(x_t, c)$
"""
super().__init__()
# Set the model $\epsilon_\text{cond}(x_t, c)$
self.model = model
# Get number of steps the model was trained with $T$
self.n_steps = model.n_steps
def get_eps(self, x: torch.Tensor, t: torch.Tensor, c: torch.Tensor, *,
uncond_scale: float, uncond_cond: Optional[torch.Tensor]):
"""
## Get $\epsilon(x_t, c)$
:param x: is $x_t$ of shape `[batch_size, channels, height, width]`
:param t: is $t$ of shape `[batch_size]`
:param c: is the conditional embeddings $c$ of shape `[batch_size, emb_size]`
:param uncond_scale: is the unconditional guidance scale $s$. This is used for
$\epsilon_\theta(x_t, c) = s\epsilon_\text{cond}(x_t, c) + (s - 1)\epsilon_\text{cond}(x_t, c_u)$
:param uncond_cond: is the conditional embedding for empty prompt $c_u$
"""
# When the scale $s = 1$
# $$\epsilon_\theta(x_t, c) = \epsilon_\text{cond}(x_t, c)$$
if uncond_cond is None or uncond_scale == 1.:
return self.model(x, t, c)
# Duplicate $x_t$ and $t$
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)
# Concatenated $c$ and $c_u$
c_in = torch.cat([uncond_cond, c])
# Get $\epsilon_\text{cond}(x_t, c)$ and $\epsilon_\text{cond}(x_t, c_u)$
e_t_uncond, e_t_cond = self.model(x_in, t_in, c_in).chunk(2)
# Calculate
# $$\epsilon_\theta(x_t, c) = s\epsilon_\text{cond}(x_t, c) + (s - 1)\epsilon_\text{cond}(x_t, c_u)$$
e_t = e_t_uncond + uncond_scale * (e_t_cond - e_t_uncond)
#
return e_t
def sample(self,
shape: List[int],
cond: torch.Tensor,
repeat_noise: bool = False,
temperature: float = 1.,
x_last: Optional[torch.Tensor] = None,
uncond_scale: float = 1.,
uncond_cond: Optional[torch.Tensor] = None,
skip_steps: int = 0,
):
"""
### Sampling Loop
:param shape: is the shape of the generated images in the
form `[batch_size, channels, height, width]`
:param cond: is the conditional embeddings $c$
:param temperature: is the noise temperature (random noise gets multiplied by this)
:param x_last: is $x_T$. If not provided random noise will be used.
:param uncond_scale: is the unconditional guidance scale $s$. This is used for
$\epsilon_\theta(x_t, c) = s\epsilon_\text{cond}(x_t, c) + (s - 1)\epsilon_\text{cond}(x_t, c_u)$
:param uncond_cond: is the conditional embedding for empty prompt $c_u$
:param skip_steps: is the number of time steps to skip.
"""
raise NotImplementedError()
def paint(self, x: torch.Tensor, cond: torch.Tensor, t_start: int, *,
orig: Optional[torch.Tensor] = None,
mask: Optional[torch.Tensor] = None, orig_noise: Optional[torch.Tensor] = None,
uncond_scale: float = 1.,
uncond_cond: Optional[torch.Tensor] = None,
):
"""
### Painting Loop
:param x: is $x_{T'}$ of shape `[batch_size, channels, height, width]`
:param cond: is the conditional embeddings $c$
:param t_start: is the sampling step to start from, $T'$
:param orig: is the original image in latent page which we are in paining.
:param mask: is the mask to keep the original image.
:param orig_noise: is fixed noise to be added to the original image.
:param uncond_scale: is the unconditional guidance scale $s$. This is used for
$\epsilon_\theta(x_t, c) = s\epsilon_\text{cond}(x_t, c) + (s - 1)\epsilon_\text{cond}(x_t, c_u)$
:param uncond_cond: is the conditional embedding for empty prompt $c_u$
"""
raise NotImplementedError()
def q_sample(self, x0: torch.Tensor, index: int, noise: Optional[torch.Tensor] = None):
"""
### Sample from $q(x_t|x_0)$
:param x0: is $x_0$ of shape `[batch_size, channels, height, width]`
:param index: is the time step $t$ index
:param noise: is the noise, $\epsilon$
"""
raise NotImplementedError()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/resnet/experiment.py | labml_nn/resnet/experiment.py | """
---
title: Train a ResNet on CIFAR 10
summary: >
Train a ResNet on CIFAR 10
---
# Train a [ResNet](index.html) on CIFAR 10
"""
from typing import List, Optional
from torch import nn
from labml import experiment
from labml.configs import option
from labml_nn.experiments.cifar10 import CIFAR10Configs
from labml_nn.resnet import ResNetBase
class Configs(CIFAR10Configs):
"""
## Configurations
We use [`CIFAR10Configs`](../experiments/cifar10.html) which defines all the
dataset related configurations, optimizer, and a training loop.
"""
# Number fo blocks for each feature map size
n_blocks: List[int] = [3, 3, 3]
# Number of channels for each feature map size
n_channels: List[int] = [16, 32, 64]
# Bottleneck sizes
bottlenecks: Optional[List[int]] = None
# Kernel size of the initial convolution layer
first_kernel_size: int = 3
@option(Configs.model)
def _resnet(c: Configs):
"""
### Create model
"""
# [ResNet](index.html)
base = ResNetBase(c.n_blocks, c.n_channels, c.bottlenecks, img_channels=3, first_kernel_size=c.first_kernel_size)
# Linear layer for classification
classification = nn.Linear(c.n_channels[-1], 10)
# Stack them
model = nn.Sequential(base, classification)
# Move the model to the device
return model.to(c.device)
def main():
# Create experiment
experiment.create(name='resnet', comment='cifar10')
# Create configurations
conf = Configs()
# Load configurations
experiment.configs(conf, {
'bottlenecks': [8, 16, 16],
'n_blocks': [6, 6, 6],
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
'epochs': 500,
'train_batch_size': 256,
'train_dataset': 'cifar10_train_augmented',
'valid_dataset': 'cifar10_valid_no_augment',
})
# Set model for saving/loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/resnet/__init__.py | labml_nn/resnet/__init__.py | """
---
title: Deep Residual Learning for Image Recognition (ResNet)
summary: >
A PyTorch implementation/tutorial of Deep Residual Learning for Image Recognition (ResNet).
---
# Deep Residual Learning for Image Recognition (ResNet)
This is a [PyTorch](https://pytorch.org) implementation of the paper
[Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385).
ResNets train layers as residual functions to overcome the
*degradation problem*.
The degradation problem is the accuracy of deep neural networks degrading when
the number of layers becomes very high.
The accuracy increases as the number of layers increase, then saturates,
and then starts to degrade.
The paper argues that deeper models should perform at least as well as shallower
models because the extra layers can just learn to perform an identity mapping.
## Residual Learning
If $\mathcal{H}(x)$ is the mapping that needs to be learned by a few layers,
they train the residual function
$$\mathcal{F}(x) = \mathcal{H}(x) - x$$
instead. And the original function becomes $\mathcal{F}(x) + x$.
In this case, learning identity mapping for $\mathcal{H}(x)$ is
equivalent to learning $\mathcal{F}(x)$ to be $0$, which is easier to
learn.
In the parameterized form this can be written as,
$$\mathcal{F}(x, \{W_i\}) + x$$
and when the feature map sizes of $\mathcal{F}(x, {W_i})$ and $x$ are different
the paper suggests doing a linear projection, with learned weights $W_s$.
$$\mathcal{F}(x, \{W_i\}) + W_s x$$
Paper experimented with zero padding instead of linear projections and found linear projections
to work better. Also when the feature map sizes match they found identity mapping
to be better than linear projections.
$\mathcal{F}$ should have more than one layer, otherwise the sum $\mathcal{F}(x, \{W_i\}) + W_s x$
also won't have non-linearities and will be like a linear layer.
Here is [the training code](experiment.html) for training a ResNet on CIFAR-10.
"""
from typing import List, Optional
import torch
from torch import nn
class ShortcutProjection(nn.Module):
"""
## Linear projections for shortcut connection
This does the $W_s x$ projection described above.
"""
def __init__(self, in_channels: int, out_channels: int, stride: int):
"""
* `in_channels` is the number of channels in $x$
* `out_channels` is the number of channels in $\mathcal{F}(x, \{W_i\})$
* `stride` is the stride length in the convolution operation for $F$.
We do the same stride on the shortcut connection, to match the feature-map size.
"""
super().__init__()
# Convolution layer for linear projection $W_s x$
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride)
# Paper suggests adding batch normalization after each convolution operation
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x: torch.Tensor):
# Convolution and batch normalization
return self.bn(self.conv(x))
class ResidualBlock(nn.Module):
"""
<a id="residual_block"></a>
## Residual Block
This implements the residual block described in the paper.
It has two $3 \times 3$ convolution layers.

The first convolution layer maps from `in_channels` to `out_channels`,
where the `out_channels` is higher than `in_channels` when we reduce the
feature map size with a stride length greater than $1$.
The second convolution layer maps from `out_channels` to `out_channels` and
always has a stride length of 1.
Both convolution layers are followed by batch normalization.
"""
def __init__(self, in_channels: int, out_channels: int, stride: int):
"""
* `in_channels` is the number of channels in $x$
* `out_channels` is the number of output channels
* `stride` is the stride length in the convolution operation.
"""
super().__init__()
# First $3 \times 3$ convolution layer, this maps to `out_channels`
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
# Batch normalization after the first convolution
self.bn1 = nn.BatchNorm2d(out_channels)
# First activation function (ReLU)
self.act1 = nn.ReLU()
# Second $3 \times 3$ convolution layer
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)
# Batch normalization after the second convolution
self.bn2 = nn.BatchNorm2d(out_channels)
# Shortcut connection should be a projection if the stride length is not $1$
# or if the number of channels change
if stride != 1 or in_channels != out_channels:
# Projection $W_s x$
self.shortcut = ShortcutProjection(in_channels, out_channels, stride)
else:
# Identity $x$
self.shortcut = nn.Identity()
# Second activation function (ReLU) (after adding the shortcut)
self.act2 = nn.ReLU()
def forward(self, x: torch.Tensor):
"""
* `x` is the input of shape `[batch_size, in_channels, height, width]`
"""
# Get the shortcut connection
shortcut = self.shortcut(x)
# First convolution and activation
x = self.act1(self.bn1(self.conv1(x)))
# Second convolution
x = self.bn2(self.conv2(x))
# Activation function after adding the shortcut
return self.act2(x + shortcut)
class BottleneckResidualBlock(nn.Module):
"""
<a id="bottleneck_residual_block"></a>
## Bottleneck Residual Block
This implements the bottleneck block described in the paper.
It has $1 \times 1$, $3 \times 3$, and $1 \times 1$ convolution layers.

The first convolution layer maps from `in_channels` to `bottleneck_channels` with a $1 \times 1$
convolution,
where the `bottleneck_channels` is lower than `in_channels`.
The second $3 \times 3$ convolution layer maps from `bottleneck_channels` to `bottleneck_channels`.
This can have a stride length greater than $1$ when we want to compress the
feature map size.
The third, final $1 \times 1$ convolution layer maps to `out_channels`.
`out_channels` is higher than `in_channels` if the stride length is greater than $1$;
otherwise, $out_channels$ is equal to `in_channels`.
`bottleneck_channels` is less than `in_channels` and the $3 \times 3$ convolution is performed
on this shrunk space (hence the bottleneck). The two $1 \times 1$ convolution decreases and increases
the number of channels.
"""
def __init__(self, in_channels: int, bottleneck_channels: int, out_channels: int, stride: int):
"""
* `in_channels` is the number of channels in $x$
* `bottleneck_channels` is the number of channels for the $3 \times 3$ convlution
* `out_channels` is the number of output channels
* `stride` is the stride length in the $3 \times 3$ convolution operation.
"""
super().__init__()
# First $1 \times 1$ convolution layer, this maps to `bottleneck_channels`
self.conv1 = nn.Conv2d(in_channels, bottleneck_channels, kernel_size=1, stride=1)
# Batch normalization after the first convolution
self.bn1 = nn.BatchNorm2d(bottleneck_channels)
# First activation function (ReLU)
self.act1 = nn.ReLU()
# Second $3 \times 3$ convolution layer
self.conv2 = nn.Conv2d(bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride, padding=1)
# Batch normalization after the second convolution
self.bn2 = nn.BatchNorm2d(bottleneck_channels)
# Second activation function (ReLU)
self.act2 = nn.ReLU()
# Third $1 \times 1$ convolution layer, this maps to `out_channels`.
self.conv3 = nn.Conv2d(bottleneck_channels, out_channels, kernel_size=1, stride=1)
# Batch normalization after the second convolution
self.bn3 = nn.BatchNorm2d(out_channels)
# Shortcut connection should be a projection if the stride length is not $1$
# or if the number of channels change
if stride != 1 or in_channels != out_channels:
# Projection $W_s x$
self.shortcut = ShortcutProjection(in_channels, out_channels, stride)
else:
# Identity $x$
self.shortcut = nn.Identity()
# Second activation function (ReLU) (after adding the shortcut)
self.act3 = nn.ReLU()
def forward(self, x: torch.Tensor):
"""
* `x` is the input of shape `[batch_size, in_channels, height, width]`
"""
# Get the shortcut connection
shortcut = self.shortcut(x)
# First convolution and activation
x = self.act1(self.bn1(self.conv1(x)))
# Second convolution and activation
x = self.act2(self.bn2(self.conv2(x)))
# Third convolution
x = self.bn3(self.conv3(x))
# Activation function after adding the shortcut
return self.act3(x + shortcut)
class ResNetBase(nn.Module):
"""
## ResNet Model
This is a the base of the resnet model without
the final linear layer and softmax for classification.
The resnet is made of stacked [residual blocks](#residual_block) or
[bottleneck residual blocks](#bottleneck_residual_block).
The feature map size is halved after a few blocks with a block of stride length $2$.
The number of channels is increased when the feature map size is reduced.
Finally the feature map is average pooled to get a vector representation.
"""
def __init__(self, n_blocks: List[int], n_channels: List[int],
bottlenecks: Optional[List[int]] = None,
img_channels: int = 3, first_kernel_size: int = 7):
"""
* `n_blocks` is a list of of number of blocks for each feature map size.
* `n_channels` is the number of channels for each feature map size.
* `bottlenecks` is the number of channels the bottlenecks.
If this is `None`, [residual blocks](#residual_block) are used.
* `img_channels` is the number of channels in the input.
* `first_kernel_size` is the kernel size of the initial convolution layer
"""
super().__init__()
# Number of blocks and number of channels for each feature map size
assert len(n_blocks) == len(n_channels)
# If [bottleneck residual blocks](#bottleneck_residual_block) are used,
# the number of channels in bottlenecks should be provided for each feature map size
assert bottlenecks is None or len(bottlenecks) == len(n_channels)
# Initial convolution layer maps from `img_channels` to number of channels in the first
# residual block (`n_channels[0]`)
self.conv = nn.Conv2d(img_channels, n_channels[0],
kernel_size=first_kernel_size, stride=2, padding=first_kernel_size // 2)
# Batch norm after initial convolution
self.bn = nn.BatchNorm2d(n_channels[0])
# List of blocks
blocks = []
# Number of channels from previous layer (or block)
prev_channels = n_channels[0]
# Loop through each feature map size
for i, channels in enumerate(n_channels):
# The first block for the new feature map size, will have a stride length of $2$
# except fro the very first block
stride = 2 if len(blocks) == 0 else 1
if bottlenecks is None:
# [residual blocks](#residual_block) that maps from `prev_channels` to `channels`
blocks.append(ResidualBlock(prev_channels, channels, stride=stride))
else:
# [bottleneck residual blocks](#bottleneck_residual_block)
# that maps from `prev_channels` to `channels`
blocks.append(BottleneckResidualBlock(prev_channels, bottlenecks[i], channels,
stride=stride))
# Change the number of channels
prev_channels = channels
# Add rest of the blocks - no change in feature map size or channels
for _ in range(n_blocks[i] - 1):
if bottlenecks is None:
# [residual blocks](#residual_block)
blocks.append(ResidualBlock(channels, channels, stride=1))
else:
# [bottleneck residual blocks](#bottleneck_residual_block)
blocks.append(BottleneckResidualBlock(channels, bottlenecks[i], channels, stride=1))
# Stack the blocks
self.blocks = nn.Sequential(*blocks)
def forward(self, x: torch.Tensor):
"""
* `x` has shape `[batch_size, img_channels, height, width]`
"""
# Initial convolution and batch normalization
x = self.bn(self.conv(x))
# Residual (or bottleneck) blocks
x = self.blocks(x)
# Change `x` from shape `[batch_size, channels, h, w]` to `[batch_size, channels, h * w]`
x = x.view(x.shape[0], x.shape[1], -1)
# Global average pooling
return x.mean(dim=-1)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/lstm/__init__.py | labml_nn/lstm/__init__.py | """
---
title: Long Short-Term Memory (LSTM)
summary: A simple PyTorch implementation/tutorial of Long Short-Term Memory (LSTM) modules.
---
# Long Short-Term Memory (LSTM)
This is a [PyTorch](https://pytorch.org) implementation of Long Short-Term Memory.
"""
from typing import Optional, Tuple
import torch
from torch import nn
class LSTMCell(nn.Module):
"""
## Long Short-Term Memory Cell
LSTM Cell computes $c$, and $h$. $c$ is like the long-term memory,
and $h$ is like the short term memory.
We use the input $x$ and $h$ to update the long term memory.
In the update, some features of $c$ are cleared with a forget gate $f$,
and some features $i$ are added through a gate $g$.
The new short term memory is the $\tanh$ of the long-term memory
multiplied by the output gate $o$.
Note that the cell doesn't look at long term memory $c$ when doing the update. It only modifies it.
Also $c$ never goes through a linear transformation.
This is what solves vanishing and exploding gradients.
Here's the update rule.
\begin{align}
c_t &= \sigma(f_t) \odot c_{t-1} + \sigma(i_t) \odot \tanh(g_t) \\
h_t &= \sigma(o_t) \odot \tanh(c_t)
\end{align}
$\odot$ stands for element-wise multiplication.
Intermediate values and gates are computed as linear transformations of the hidden
state and input.
\begin{align}
i_t &= lin_x^i(x_t) + lin_h^i(h_{t-1}) \\
f_t &= lin_x^f(x_t) + lin_h^f(h_{t-1}) \\
g_t &= lin_x^g(x_t) + lin_h^g(h_{t-1}) \\
o_t &= lin_x^o(x_t) + lin_h^o(h_{t-1})
\end{align}
"""
def __init__(self, input_size: int, hidden_size: int, layer_norm: bool = False):
super().__init__()
# These are the linear layer to transform the `input` and `hidden` vectors.
# One of them doesn't need a bias since we add the transformations.
# This combines $lin_x^i$, $lin_x^f$, $lin_x^g$, and $lin_x^o$ transformations.
self.hidden_lin = nn.Linear(hidden_size, 4 * hidden_size)
# This combines $lin_h^i$, $lin_h^f$, $lin_h^g$, and $lin_h^o$ transformations.
self.input_lin = nn.Linear(input_size, 4 * hidden_size, bias=False)
# Whether to apply layer normalizations.
#
# Applying layer normalization gives better results.
# $i$, $f$, $g$ and $o$ embeddings are normalized and $c_t$ is normalized in
# $h_t = o_t \odot \tanh(\mathop{LN}(c_t))$
if layer_norm:
self.layer_norm = nn.ModuleList([nn.LayerNorm(hidden_size) for _ in range(4)])
self.layer_norm_c = nn.LayerNorm(hidden_size)
else:
self.layer_norm = nn.ModuleList([nn.Identity() for _ in range(4)])
self.layer_norm_c = nn.Identity()
def forward(self, x: torch.Tensor, h: torch.Tensor, c: torch.Tensor):
# We compute the linear transformations for $i_t$, $f_t$, $g_t$ and $o_t$
# using the same linear layers.
ifgo = self.hidden_lin(h) + self.input_lin(x)
# Each layer produces an output of 4 times the `hidden_size` and we split them
ifgo = ifgo.chunk(4, dim=-1)
# Apply layer normalization (not in original paper, but gives better results)
ifgo = [self.layer_norm[i](ifgo[i]) for i in range(4)]
# $$i_t, f_t, g_t, o_t$$
i, f, g, o = ifgo
# $$c_t = \sigma(f_t) \odot c_{t-1} + \sigma(i_t) \odot \tanh(g_t) $$
c_next = torch.sigmoid(f) * c + torch.sigmoid(i) * torch.tanh(g)
# $$h_t = \sigma(o_t) \odot \tanh(c_t)$$
# Optionally, apply layer norm to $c_t$
h_next = torch.sigmoid(o) * torch.tanh(self.layer_norm_c(c_next))
return h_next, c_next
class LSTM(nn.Module):
"""
## Multilayer LSTM
"""
def __init__(self, input_size: int, hidden_size: int, n_layers: int):
"""
Create a network of `n_layers` of LSTM.
"""
super().__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
# Create cells for each layer. Note that only the first layer gets the input directly.
# Rest of the layers get the input from the layer below
self.cells = nn.ModuleList([LSTMCell(input_size, hidden_size)] +
[LSTMCell(hidden_size, hidden_size) for _ in range(n_layers - 1)])
def forward(self, x: torch.Tensor, state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None):
"""
`x` has shape `[n_steps, batch_size, input_size]` and
`state` is a tuple of $h$ and $c$, each with a shape of `[batch_size, hidden_size]`.
"""
n_steps, batch_size = x.shape[:2]
# Initialize the state if `None`
if state is None:
h = [x.new_zeros(batch_size, self.hidden_size) for _ in range(self.n_layers)]
c = [x.new_zeros(batch_size, self.hidden_size) for _ in range(self.n_layers)]
else:
(h, c) = state
# Reverse stack the tensors to get the states of each layer
#
# 📝 You can just work with the tensor itself but this is easier to debug
h, c = list(torch.unbind(h)), list(torch.unbind(c))
# Array to collect the outputs of the final layer at each time step.
out = []
for t in range(n_steps):
# Input to the first layer is the input itself
inp = x[t]
# Loop through the layers
for layer in range(self.n_layers):
# Get the state of the layer
h[layer], c[layer] = self.cells[layer](inp, h[layer], c[layer])
# Input to the next layer is the state of this layer
inp = h[layer]
# Collect the output $h$ of the final layer
out.append(h[-1])
# Stack the outputs and states
out = torch.stack(out)
h = torch.stack(h)
c = torch.stack(c)
return out, (h, c)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/scaling/__init__.py | labml_nn/scaling/__init__.py | """
---
title: Large scale model training
summary: >
Large scale model training/inference implementations.
---
# Large scale model training
* [Zero-DP optimizer](zero3/index.html)
"""
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/scaling/zero3/finetune_neox.py | labml_nn/scaling/zero3/finetune_neox.py | """
---
title: Finetune GPT-NeoX with Zero3 memory optimizer
summary: >
This script trains the bias parameters of the GPT-NeoX on multiple devices with Zero-DP Memory Optimization.
---
# Finetune [GPT-NeoX](../../neox/index.html) with [Zero3 memory optimizer](index.html)
This script trains the bias parameters of the [GPT-NeoX model](../../neox/model.html)
on multiple devices with Zero-DP Memory Optimization.
"""
import datetime
import torch
import torch.distributed
from labml import experiment, monit, tracker
from labml.configs import option
from labml.logger import inspect
from labml_nn.neox.samples.finetune import PipelineParallelTrainerConf
# Use the [Pipeline Parallel Trainer configurations](../../neox/samples/finetune.html) and adapt it for
# Zero3 memory optimizer.
class Configs(PipelineParallelTrainerConf):
rank: int
world_size: int
@option(Configs.optimizer, 'Zero3Adam')
def _optimizer(c: Configs):
"""
#### Set the optimizers for the model
Note that we pass the sharded parameters from `get_trainable_chunk`.
"""
from labml_nn.optimizers.adam_fp16 import AdamFP16
return AdamFP16(c.model.get_trainable_chunk(), lr=c.learning_rate)
@option(Configs.model, 'Zero3')
def _model(c: Configs):
"""
#### Create the model with Zero3 memory optimizer
"""
from labml_nn.scaling.zero3 import Zero3Layer, Zero3Sequential
# To make sure the fine tuner sets the trainable parameters
_ = c.fine_tuner
# Wrap the layers with `Zero3Layer`
modules = []
for m in monit.iterate('Zero3', c.layers):
modules.append(Zero3Layer(m.to(c.device),
c.rank, c.world_size, c.device, c.dtype))
# Create a sequential model
model = Zero3Sequential(modules)
#
return model
def main(rank: int, world_size: int, init_method: str = 'tcp://localhost:23456'):
"""
#### Run the training on the node with rank `rank`.
"""
# Initialize PyTorch distributed process group
with monit.section('Distributed'):
torch.distributed.init_process_group('nccl',
timeout=datetime.timedelta(seconds=30),
init_method=init_method,
rank=rank,
world_size=world_size)
# Set current device
device = torch.device(f'cuda:{rank}')
torch.cuda.set_device(device)
# Create the experiment
experiment.create(name='zero3_neox', writers={'screen', 'labml'},
distributed_world_size=world_size,
distributed_rank=rank)
# Create configurations
conf = Configs()
# Load configurations
experiment.configs(conf, {
'model': 'Zero3',
'optimizer': 'Zero3Adam',
'device': device,
'rank': rank,
'world_size': world_size,
'learning_rate': 3e-4,
'max_seq_len': 128,
'batch_size': 16,
})
# Start the experiment
with experiment.start():
# Initialize the model. Do this before the loop for cleaner logs.
_ = conf.model
# Train the model
for epoch in monit.loop(conf.epochs):
conf.train_epoch()
tracker.new_line()
#
if __name__ == '__main__':
# Log the machine configurations
inspect([torch.cuda.get_device_name(i) for i in range(torch.cuda.device_count())])
inspect(
n_gpus=torch.cuda.device_count(),
mpi=torch.distributed.is_mpi_available(),
nccl=torch.distributed.is_nccl_available(),
)
n_gpu = torch.cuda.device_count()
# Start a process for each GPU. You will need a separate launcher if you are using multiple computers.
torch.multiprocessing.spawn(main, args=(n_gpu,), nprocs=n_gpu, join=True)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/scaling/zero3/__init__.py | labml_nn/scaling/zero3/__init__.py | """
---
title: Zero-DP Memory Optimization
summary: >
This is an implementation of Zero-DP Memory Optimization written in PyTorch.
---
# Zero-DP Memory Optimization
This is an implementation of Zero-DP introduced in the paper
[ZeRO: Memory Optimization Towards Training A Trillion Parameter Models](https://arxiv.org/abs/1910.02054),
It keeps shards of the optimizer state, gradients and parameters into multiple devices/nodes.
It reduces the memory consumption to $\frac{(2 + 2 + K)\Psi}{N_d}$ of the original model,
where $\Psi$ is the number of parameters, $N_d$ is the number of shards,
and $K$ is number of optimizer bytes per parameter.
$2 + 2$ are the parameter and gradient memory assuming 16-bit precision; i.e. 2 bytes per parameter and gradient.
$K = 12$ for Adam optimizer because it maintains a copy of parameters, and two moments per parameter in fp32.
The communication volume of Zero-DP is $\mathcal{O}(3\Psi)$. For comparison data-parallel training
has a communication volume of $\mathcal{O}(2\Psi)$.
Although this is named `Zero3`, we have only implemented the Zero-DP part of it and not the
Zero-R memory optimizations which target residual memory consumption.
Out implementation supports training only a subset of parameters.
This implementation is inspired by [Fairscale FSDP](https://fairscale.readthedocs.io/en/stable/api/nn/fsdp.html).
[Here's a script to fine-tune](finetune_neox.html) GPT NeoX using Zero-DP memory optimization.
"""
import functools
from typing import List, Optional, Tuple
import torch
import torch.distributed as dist
from torch import nn
class Zero3Layer(nn.Module):
"""
## Zero3 Layer
Each layer of the model (or a combination of a few consecutive layers) should be wrapped in
this module.
"""
# Each shard keeps parameters in `chunk` list.
# The `chunk[0]` is for trainable parameters and `chunk[1]` is for fixed parameters.
chunk: List[nn.Parameter]
# This is the sizes of the chunks in `chunk` list.
chunk_size: List[int]
# The first chunk is for trainable parameters.
TRAINING_PARAMS_IDX = 0
# This is the list of parameters split into lists as trainable and fixed parameters.
param_refs: List[List[nn.Parameter]]
# CUDA stream to featch parameters
fetch_stream: Optional[torch.cuda.Stream]
# CUDA stream to backup/accumulate gradients
backup_stream: Optional[torch.cuda.Stream]
# List of layers right before this layer
prev_layer: List['Zero3Layer']
# List of layers right after this layer
next_layer: List['Zero3Layer']
# The position of the current layer; used this for debugging logs
layer_idx: int
# Whether parameters have been fetched
is_fetched: bool
# Device of the layer
device: torch.device
# Data type of the layer
dtype: torch.dtype
# The module to be wrapped
module: nn.Module
# Number of nodes/devices the data is sharded across
world_size: int
def __init__(self, module: nn.Module, rank: int, world_size: int, device: torch.device, dtype: torch.dtype):
"""
:param module: The module to be wrapped.
:param rank: The rank of the current node.
:param world_size: The number of nodes/devices the data is sharded across.
:param device: The device of the layer.
:param dtype: The data type of the layer.
"""
super().__init__()
# Initialize the properties
self.device = device
self.dtype = dtype
self.module = module
self.prev_layer = []
self.next_layer = []
self.is_fetched = False
self.world_size = world_size
self.layer_idx = -1
self.fetch_stream = None
self.backup_stream = None
with torch.no_grad():
# Collect all the parameters of the layer
all_param_refs = [p for p in self.parameters()]
# Store the shape of the parameters because we need it later to reconstruct them
for p in all_param_refs:
p._orig_shape = p.shape
# All parameters should have the same type
for p in all_param_refs:
assert p.dtype == dtype, "All parameters should have same dtype"
# Separate parameters as trainable and fixed
self.param_refs = [[p for p in all_param_refs if p.requires_grad],
[p for p in all_param_refs if not p.requires_grad]]
del all_param_refs
# The `rank = 0` node will calculate the size each device/node should store, and
# distribute the parameters accordingly.
if rank == 0:
# Merge and pad trainable (`merged_params[0]`) and fixed (`merged_params[1]`) parameters
merged_params = [self._merge_and_pad_params(ps) for ps in self.param_refs]
# Calculate the chunk sizes of trainable and fixed params
self.chunk_size = [(len(p) // world_size if p is not None else 0) for p in merged_params]
# Broadcast the sizes
dist.broadcast(torch.tensor(self.chunk_size, device=device), src=0)
else:
# Create an empty tensor to receive the sizes
chunk_size = torch.tensor([0, 0], device=device)
# Receive the sizes
dist.broadcast(chunk_size, src=0)
self.chunk_size = chunk_size.tolist()
# Create parameters for trainable (`self.chunk[0]`) and fixed (`self.chunk[1]`)
# parameters to be stored in current device/node
self.chunk = [nn.Parameter(self._empty((s,)), requires_grad=i == self.TRAINING_PARAMS_IDX)
for i, s in enumerate(self.chunk_size)]
# An empty tensor to receive the trainable and fixed parameters combined
chunk = self._empty((sum(self.chunk_size),))
if rank == 0:
# Concatenate both trainable and fixed params
all_params = torch.cat([p.view(world_size, -1) for p in merged_params], dim=-1).view(-1)
del merged_params
# Scatter them to all the nodes/devices
dist.scatter(chunk, list(all_params.split(sum(self.chunk_size))))
del all_params
else:
# Receive the parameters
dist.scatter(chunk)
# Collect the chunk data
chunk = chunk.split(self.chunk_size)
for i, c in enumerate(chunk):
self.chunk[i].data[:] = c
del chunk
# Cleanup the normal parameters
self._cleanup_params()
# Add a backward hook. This gets called when the gradients relative to the module are computed.
self._backward_hook_ref = self.register_full_backward_hook(self._backward_hook) # type: ignore
def _merge_and_pad_params(self, params: List[nn.Parameter]) -> torch.Tensor:
"""
#### Merge all the parameters and pad it so that it's divisible by `world_size`.
"""
# Total number of parameters
size = sum(p.shape.numel() for p in params)
# If it is not divisible by `world_size`, pad it
if size % self.world_size != 0:
padding_fixed = self.world_size - (size % self.world_size)
# Otherwise, no need to pad
else:
padding_fixed = 0
# Create an empty padding tensor
padding = self._empty((padding_fixed,))
# Concatenate all the parameters and pad it
return torch.cat([p.view(-1) for p in params] + [padding], dim=0)
def get_trainable_chunk(self) -> List[nn.Parameter]:
"""
### Get trainable chunk/shard of the parameters.
This is what we pass on to the optimizer on the current node.
"""
# Return and empty list if there are no trainable parameters
if len(self.chunk[self.TRAINING_PARAMS_IDX]) == 0:
return []
# Return the trainable chunk as a list
return [self.chunk[self.TRAINING_PARAMS_IDX]]
def _empty(self, shape: Tuple[int, ...]) -> torch.Tensor:
"""
#### Create an empty tensor of the given shape.
"""
return torch.empty(shape, device=self.device, dtype=self.dtype)
@torch.no_grad()
def _cleanup_params(self):
"""
#### Cleanup the parameter data
This will release all the memory used by the layer parameters.
"""
# Set the flag to indicate that the parameters are not fetched
self.is_fetched = False
# Iterate through all parameters
for ps in self.param_refs:
for p in ps:
# Wait for operations on the parameters to complete before any new operations
p.data.record_stream(torch.cuda.current_stream())
# Check to make sure the parameter is not sharing storage with anything else
assert p.data.storage_offset() == 0, "The tensor is not the sole occupant of the storage."
# Resize the storage to $0$. This will release the memory used by the parameter.
#
# **Setting `p.data` will not release the memory, since the autograd graph keeps a reference to it.**
p.data.storage().resize_(0) # This is what actually clears the memory
# Make sure the parameter has no gradient data
assert p.grad is None, 'Gradients should be None'
@torch.no_grad()
def fetch_params(self):
"""
### Fetch the parameters from all shards
This will fetch all the parameter data from all the nodes and rebuild the parameters on each node.
"""
# Skip is already fetched
if self.is_fetched:
return
# Set the flag
self.is_fetched = True
# Skip if there's nothing to fetch or share.
if sum(self.chunk_size) == 0:
return
# Use `fetch_stream` to fetch the parameters from all the shards
with torch.cuda.stream(self.fetch_stream):
# Create an empty tensor to receive the parameters
buffer = self._empty((self.world_size * sum(self.chunk_size),))
# Split the continuous buffer into the number of nodes. These splits are views of `buffer'.
buffers = list(buffer.split(sum(self.chunk_size)))
# Concatenate both trainable and fixed chunks
chunk = torch.cat(self.chunk, dim=0)
# Gather the parameters from all the nodes/devices
dist.all_gather(buffers, chunk)
# Split the gathered parameters into the trainable and fixed chunks
params = buffer.view(-1, sum(self.chunk_size)).split(self.chunk_size, dim=1)
# Wait for the gather operation to complete and then clear the references to the buffers
buffer.record_stream(self.fetch_stream)
for b in buffers:
b.record_stream(self.fetch_stream)
buffer.record_stream(self.fetch_stream)
del buffer
del buffers
# Reshape the trainable and fixed parameters to continuous tensors
params = [p.reshape(-1) for p in params]
# Collect the individual parameter tensors
for cont, ps in zip(params, self.param_refs):
# If there are no parameters, skip
if not ps:
continue
# Offset of the continuous tensor
offset = 0
# Iterate through model parameters and assign the values from the continuous tensor
for p in ps:
# Original parameter shape
shape = p._orig_shape # type: ignore[attr-defined]
# Change the storage size of the parameter. This was set to $0$ when we cleaned up the parameters.
p.data.storage().resize_(shape.numel())
# Assign the values from the continuous tensor
p.data[:] = cont[offset: offset + shape.numel()].reshape(shape)
# Wait for the operations to complete before other operations can be performed
p.data.record_stream(self.fetch_stream)
# Update the offset
offset += shape.numel()
# Wait for the operation to complete before other operations can be performed
cont.record_stream(self.fetch_stream)
#
del params
def forward(self, *args, **kwargs):
"""
### Forward pass
"""
# Fetch all the parameters of the current node.
# This gets called by the previous layer so this call is just to make sure parameters are fetched.
self.fetch_params()
# Wait for parameter fetching to complete.
torch.cuda.current_stream().wait_stream(self.fetch_stream)
# Start fetching parameters of the proceeding layers, so that they will fetch them which the current layer
# does its computations.
for layer in self.next_layer:
layer.fetch_params()
# Add backward hooks to the parameters of the current layer if autograd is enabled.
if torch.is_grad_enabled():
self._add_backward_hooks()
# Compute the outputs of the current layer
res = self.module(*args, **kwargs)
# Cleanup the parameters of the layer.
#
# *Skip cleaning up if autograd is enabled and this is the last layer in the network,
# because we will need to fetch the parameters again for the backward pass.*
if not torch.is_grad_enabled() or self.next_layer:
self._cleanup_params()
return res
def _add_backward_hooks(self):
"""
#### Add backward hooks to the parameters of the current layer.
"""
# Number of backward hooks added
self._backward_hook_handles = 0
# Loop through trainable parameters of the current layer
for p in self.param_refs[self.TRAINING_PARAMS_IDX]:
# Make sure a hook hasn't already been added
assert not hasattr(p, "_hook_handle"), 'Parameter has already been hooked'
# Use `expand_as` to create an autograd step which we can intercept
p_tmp = p.expand_as(p)
# Get a handle to add the backward hook.
# [This blog discusses about `grad_acc`](https://amsword.medium.com/understanding-pytorchs-autograd-with-grad-fn-and-next-functions-b2c4836daa00).
grad_acc = p_tmp.grad_fn.next_functions[0][0]
# Add the backward hook
handle = grad_acc.register_hook(
functools.partial(self._post_backward_hook, p))
# Keep a reference to the handle
p._hook_handle = handle
# Increment the number of hooks added
self._backward_hook_handles += 1
def _backward_event(self):
"""
#### Handle a backward event
This gets called by parameter backward hooks and the module backward hook.
"""
# Decrement the hooks counter
self._backward_hook_handles -= 1
# If all the hooks (including the module hook) have been called,
# then we can back up gradients and clean up the parameters.
if self._backward_hook_handles == -1:
self._backup_grads()
self._cleanup_params()
# Start fetch parameters of the previous layer, because autograd will next process the gradients of it.
for layer in self.prev_layer:
layer.fetch_params()
def _post_backward_hook(self, p: nn.Parameter, *args):
"""
#### Parameter backward hook
"""
# Remove the handle from the parameter
p._hook_handle.remove() # type: ignore[attr-defined]
delattr(p, "_hook_handle")
# Handle a backward event
self._backward_event()
def _backward_hook(self, *args, **kwargs):
"""
#### Module backward hook
"""
# Handle a backward event
self._backward_event()
# The previous layer will start computing gradients. We need to make sure it has finished fetching params.
torch.cuda.current_stream().wait_stream(self.fetch_stream)
#
return None
@torch.no_grad()
def _backup_grads(self):
"""
### Backup the gradients of the current layer
"""
# Skip if there are no trainable parameters
if self.chunk_size[self.TRAINING_PARAMS_IDX] == 0:
return
# Use the backup stream to backup the gradients
with torch.cuda.stream(self.backup_stream):
# Buffer to store the gradients
buffer = self._empty((self.world_size * self.chunk_size[self.TRAINING_PARAMS_IDX],))
# Split the continuous buffer into number of nodes. These splits are views of `buffer'.
buffers = list(buffer.split(self.chunk_size[self.TRAINING_PARAMS_IDX]))
# Offset of the continuous buffer
offset = 0
# Iterate through trainable parameters
for p in self.param_refs[self.TRAINING_PARAMS_IDX]:
# Collect gradients
shape = p._orig_shape # type: ignore[attr-defined]
buffer[offset: offset + shape.numel()] = p.grad.view(-1)
# Update the offset
offset += shape.numel()
# Clean the gradients
p.grad = None
# Empty tensor to accumulate the gradients of the current shard
grad = self._empty((self.chunk_size[self.TRAINING_PARAMS_IDX],))
# Accumulate the gradients of each shard. It scatters the buffers across the nodes,
# and each node accumulates (reduces) the tensors it receives.
dist.reduce_scatter(grad, buffers)
# Wait for the operation to complete and then clear the references to the buffers
for b in buffers:
b.record_stream(self.fetch_stream)
buffer.record_stream(self.fetch_stream)
del buffer
del buffers
# Set the chunk gradients. This is what the optimizer sees.
self.chunk[self.TRAINING_PARAMS_IDX].grad = grad
del grad
class Zero3Sequential(nn.Module):
"""
## Sequential module for `Zero3Layer` layers
"""
def __init__(self, modules: List[Zero3Layer]):
"""
:param modules: List of `Zero3Layer` layers
"""
super().__init__()
# CUDA stream to fetch parameters
self.fetch_stream = torch.cuda.Stream()
# CUDA stream to back up (accumulate) gradients
self.backup_stream = torch.cuda.Stream()
# Set the streams and preceding and proceeding layers for each `Zero3Layer` layer
for i in range(len(modules)):
# Set layer index
modules[i].layer_idx = i
# Set streams
modules[i].fetch_stream = self.fetch_stream
modules[i].backup_stream = self.backup_stream
# Set proceeding layers
if i + 1 < len(modules):
modules[i].next_layer.append(modules[i + 1])
# Set preceding layers
if i - 1 >= 0:
modules[i].prev_layer.append(modules[i - 1])
# Store list of modules
self.module_list = nn.ModuleList(modules)
def get_trainable_chunk(self):
# Return the list of trainable chunks from each layer
return sum([m.get_trainable_chunk() for m in self.module_list], [])
def forward(self, x: torch.Tensor):
# Make sure gradient back up is complete
torch.cuda.current_stream().wait_stream(self.backup_stream)
# Forward pass
for m in self.module_list:
x = m(x)
#
return x
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/models.py | labml_nn/transformers/models.py | """
---
title: Transformer Encoder and Decoder Models
summary: >
These are PyTorch implementations of Transformer based encoder and decoder models,
as well as other related modules.
---
# Transformer Encoder and Decoder Models
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/transformers/basic/autoregressive_experiment.ipynb)
"""
import math
import torch
import torch.nn as nn
from labml_nn.utils import clone_module_list
from .feed_forward import FeedForward
from .mha import MultiHeadAttention
from .positional_encoding import get_positional_encoding
class EmbeddingsWithPositionalEncoding(nn.Module):
"""
<a id="EmbeddingsWithPositionalEncoding"></a>
## Embed tokens and add [fixed positional encoding](positional_encoding.html)
"""
def __init__(self, d_model: int, n_vocab: int, max_len: int = 5000):
super().__init__()
self.linear = nn.Embedding(n_vocab, d_model)
self.d_model = d_model
self.register_buffer('positional_encodings', get_positional_encoding(d_model, max_len))
def forward(self, x: torch.Tensor):
pe = self.positional_encodings[:x.shape[0]].requires_grad_(False)
return self.linear(x) * math.sqrt(self.d_model) + pe
class EmbeddingsWithLearnedPositionalEncoding(nn.Module):
"""
<a id="EmbeddingsWithLearnedPositionalEncoding"></a>
## Embed tokens and add parameterized positional encodings
"""
def __init__(self, d_model: int, n_vocab: int, max_len: int = 5000):
super().__init__()
self.linear = nn.Embedding(n_vocab, d_model)
self.d_model = d_model
self.positional_encodings = nn.Parameter(torch.zeros(max_len, 1, d_model), requires_grad=True)
def forward(self, x: torch.Tensor):
pe = self.positional_encodings[:x.shape[0]]
return self.linear(x) * math.sqrt(self.d_model) + pe
class TransformerLayer(nn.Module):
"""
<a id="TransformerLayer"></a>
## Transformer Layer
This can act as an encoder layer or a decoder layer. We use pre-norm.
"""
def __init__(self, *,
d_model: int,
self_attn: MultiHeadAttention,
src_attn: MultiHeadAttention = None,
feed_forward: FeedForward,
dropout_prob: float):
"""
* `d_model` is the token embedding size
* `self_attn` is the self attention module
* `src_attn` is the source attention module (when this is used in a decoder)
* `feed_forward` is the feed forward module
* `dropout_prob` is the probability of dropping out after self attention and FFN
"""
super().__init__()
self.size = d_model
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.dropout = nn.Dropout(dropout_prob)
self.norm_self_attn = nn.LayerNorm([d_model])
if self.src_attn is not None:
self.norm_src_attn = nn.LayerNorm([d_model])
self.norm_ff = nn.LayerNorm([d_model])
# Whether to save input to the feed forward layer
self.is_save_ff_input = False
def forward(self, *,
x: torch.Tensor,
mask: torch.Tensor,
src: torch.Tensor = None,
src_mask: torch.Tensor = None):
# Normalize the vectors before doing self attention
z = self.norm_self_attn(x)
# Run through self attention, i.e. keys and values are from self
self_attn = self.self_attn(query=z, key=z, value=z, mask=mask)
# Add the self attention results
x = x + self.dropout(self_attn)
# If a source is provided, get results from attention to source.
# This is when you have a decoder layer that pays attention to
# encoder outputs
if src is not None:
# Normalize vectors
z = self.norm_src_attn(x)
# Attention to source. i.e. keys and values are from source
attn_src = self.src_attn(query=z, key=src, value=src, mask=src_mask)
# Add the source attention results
x = x + self.dropout(attn_src)
# Normalize for feed-forward
z = self.norm_ff(x)
# Save the input to the feed forward layer if specified
if self.is_save_ff_input:
self.ff_input = z.clone()
# Pass through the feed-forward network
ff = self.feed_forward(z)
# Add the feed-forward results back
x = x + self.dropout(ff)
return x
class Encoder(nn.Module):
"""
<a id="Encoder"></a>
## Transformer Encoder
"""
def __init__(self, layer: TransformerLayer, n_layers: int):
super().__init__()
# Make copies of the transformer layer
self.layers = clone_module_list(layer, n_layers)
# Final normalization layer
self.norm = nn.LayerNorm([layer.size])
def forward(self, x: torch.Tensor, mask: torch.Tensor):
# Run through each transformer layer
for layer in self.layers:
x = layer(x=x, mask=mask)
# Finally, normalize the vectors
return self.norm(x)
class Decoder(nn.Module):
"""
<a id="Decoder"></a>
## Transformer Decoder
"""
def __init__(self, layer: TransformerLayer, n_layers: int):
super().__init__()
# Make copies of the transformer layer
self.layers = clone_module_list(layer, n_layers)
# Final normalization layer
self.norm = nn.LayerNorm([layer.size])
def forward(self, x: torch.Tensor, memory: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor):
# Run through each transformer layer
for layer in self.layers:
x = layer(x=x, mask=tgt_mask, src=memory, src_mask=src_mask)
# Finally, normalize the vectors
return self.norm(x)
class Generator(nn.Module):
"""
<a id="Generator"></a>
## Generator
This predicts the tokens and gives the lof softmax of those.
You don't need this if you are using `nn.CrossEntropyLoss`.
"""
def __init__(self, n_vocab: int, d_model: int):
super().__init__()
self.projection = nn.Linear(d_model, n_vocab)
def forward(self, x):
return self.projection(x)
class EncoderDecoder(nn.Module):
"""
<a id="EncoderDecoder"></a>
## Combined Encoder-Decoder
"""
def __init__(self, encoder: Encoder, decoder: Decoder, src_embed: nn.Module, tgt_embed: nn.Module, generator: nn.Module):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src: torch.Tensor, tgt: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor):
# Run the source through encoder
enc = self.encode(src, src_mask)
# Run encodings and targets through decoder
return self.decode(enc, src_mask, tgt, tgt_mask)
def encode(self, src: torch.Tensor, src_mask: torch.Tensor):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory: torch.Tensor, src_mask: torch.Tensor, tgt: torch.Tensor, tgt_mask: torch.Tensor):
return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/label_smoothing_loss.py | labml_nn/transformers/label_smoothing_loss.py | """
---
title: Label Smoothing Loss
summary: >
This is an implementation of label smoothing loss, that can be used as
an alternative to cross entropy loss for improved accuracy.
---
# Label Smoothing Loss
"""
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
class LabelSmoothingLoss(nn.Module):
def __init__(self, size: int, padding_idx: int, smoothing: float = 0.0):
super().__init__()
self.loss = nn.KLDivLoss(reduction='sum')
self.padding_idx = padding_idx
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.size = size
self.true_dist = None
def forward(self, x: torch.Tensor, target: torch.Tensor):
assert x.shape[1] == self.size
true_dist = x.clone()
true_dist.fill_(self.smoothing / (self.size - 2))
true_dist.scatter_(1, target.unsqueeze(1), self.confidence)
true_dist[:, self.padding_idx] = 0
mask = torch.nonzero(target == self.padding_idx, as_tuple=False)
if mask.dim() > 0:
true_dist.index_fill_(0, mask.squeeze(), 0.0)
self.true_dist = true_dist
return self.loss(x, true_dist.detach())
def _test_label_smoothing():
smooth_loss = LabelSmoothingLoss(5, 0, 0.4)
predict = torch.tensor([[0, 0.2, 0.7, 0.1, 0],
[0, 0.2, 0.7, 0.1, 0],
[0, 0.2, 0.7, 0.1, 0]], dtype=torch.float)
_ = smooth_loss(predict.log(),
torch.tensor([2, 1, 0], dtype=torch.long))
# Show the target distributions expected by the system.
plt.imshow(smooth_loss.true_dist)
plt.show()
smooth_loss = LabelSmoothingLoss(5, 0, 0.1)
def loss_sample(x):
d = x + 3 * 1
predict2 = torch.tensor([[0, x / d, 1 / d, 1 / d, 1 / d],
], dtype=torch.float)
# print(predict)
return smooth_loss(predict2.log(),
torch.tensor([1], dtype=torch.long)).item()
plt.plot(np.arange(1, 100), [loss_sample(x) for x in range(1, 100)])
plt.show()
if __name__ == '__main__':
_test_label_smoothing()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/configs.py | labml_nn/transformers/configs.py | """
---
title: Configurable Transformer Components
summary: These are configurable components that can be re-used quite easily.
---
# Configurable Transformer Components
"""
import copy
import torch.nn as nn
from labml.configs import BaseConfigs, option, calculate, aggregate
from .feed_forward import FeedForward
from .mha import MultiHeadAttention
from .models import EmbeddingsWithPositionalEncoding, EmbeddingsWithLearnedPositionalEncoding, TransformerLayer, \
Encoder, Decoder, Generator, EncoderDecoder
class FeedForwardConfigs(BaseConfigs):
"""
<a id="FFN"></a>
## FFN Configurations
Creates a Position-wise FeedForward Network defined in
[`feed_forward.py`](feed_forward.html).
"""
# Position-wise feedforward layer
ffn: FeedForward
# Number of features in the embedding
d_model: int
# Number of features in in the hidden layer
d_ff: int = 2048
# Dropout probability
dropout: float = 0.1
# Activation in position-wise feedforward layer
activation: nn.Module = 'ReLU'
# Whether the FFN layer should be gated
is_gated: bool = False
# Whether the first fully connected layer should have a learnable bias
bias1: bool = True
# Whether the second fully connected layer should have a learnable bias
bias2: bool = True
# Whether the fully connected layer for the gate should have a learnable bias
bias_gate: bool = False
# Predefined GLU variants
glu_variant: str = 'none'
@option(FeedForwardConfigs.activation, 'ReLU')
def _ffn_activation_relu():
"""
### ReLU activation
$$\max(0, x)$$
"""
return nn.ReLU()
@option(FeedForwardConfigs.activation, 'GELU')
def _ffn_activation_gelu():
"""
### GELU activation
$$x \Phi(x)$$ where $\Phi(x) = P(X \le x), X \sim \mathcal{N}(0,1)$
It was introduced in paper [Gaussian Error Linear Units](https://arxiv.org/abs/1606.08415).
"""
return nn.GELU()
@option(FeedForwardConfigs.ffn, 'default')
def _feed_forward(c: FeedForwardConfigs):
"""
Initialize a [feed forward network](feed_forward.html)
"""
return FeedForward(c.d_model, c.d_ff,
dropout=c.dropout,
activation=c.activation,
is_gated=c.is_gated,
bias1=c.bias1,
bias2=c.bias2,
bias_gate=c.bias_gate)
# ## GLU Variants
# These are variants with gated hidden layers for the FFN
# as introduced in paper [GLU Variants Improve Transformer](https://arxiv.org/abs/2002.05202).
# We have omitted the bias terms as specified in the paper.
# ### FFN with Gated Linear Units
#
# $$FFN_{GLU}(x)(x, W_1, V, W_2) = (\sigma(x W_1) \otimes x V) W_2$$
aggregate(FeedForwardConfigs.glu_variant, 'GLU',
(FeedForwardConfigs.is_gated, True),
(FeedForwardConfigs.bias1, False),
(FeedForwardConfigs.bias2, False),
(FeedForwardConfigs.bias_gate, False),
(FeedForwardConfigs.activation, nn.Sigmoid()))
# ### FFN with Bilinear hidden layer
#
# $$FFN_{Bilinear}(x)(x, W_1, V, W_2) = (x W_1 \otimes x V) W_2$$
aggregate(FeedForwardConfigs.glu_variant, 'Bilinear',
(FeedForwardConfigs.is_gated, True),
(FeedForwardConfigs.bias1, False),
(FeedForwardConfigs.bias2, False),
(FeedForwardConfigs.bias_gate, False),
(FeedForwardConfigs.activation, nn.Identity()))
# ### FFN with ReLU gate
#
# $$FFN_{ReGLU}(x)(x, W_1, V, W_2) = (\max(0, x W_1) \otimes x V) W_2$$
aggregate(FeedForwardConfigs.glu_variant, 'ReGLU',
(FeedForwardConfigs.is_gated, True),
(FeedForwardConfigs.bias1, False),
(FeedForwardConfigs.bias2, False),
(FeedForwardConfigs.bias_gate, False),
(FeedForwardConfigs.activation, nn.ReLU()))
# ### FFN with GELU gate
#
# $$FFN_{GEGLU}(x)(x, W_1, V, W_2) = (\text{GELU}(x W_1) \otimes x V) W_2$$
aggregate(FeedForwardConfigs.glu_variant, 'GEGLU',
(FeedForwardConfigs.is_gated, True),
(FeedForwardConfigs.bias1, False),
(FeedForwardConfigs.bias2, False),
(FeedForwardConfigs.bias_gate, False),
(FeedForwardConfigs.activation, nn.GELU()))
# ### FFN with Swish gate
#
# $$FFN_{SwiGLU}(x)(x, W_1, V, W_2) = (\text{Swish}_1(x W_1) \otimes x V) W_2$$
# where $\text{Swish}_\beta(x) = x \sigma(\beta x)$
aggregate(FeedForwardConfigs.glu_variant, 'SwiGLU',
(FeedForwardConfigs.is_gated, True),
(FeedForwardConfigs.bias1, False),
(FeedForwardConfigs.bias2, False),
(FeedForwardConfigs.bias_gate, False),
(FeedForwardConfigs.activation, nn.SiLU()))
class TransformerConfigs(BaseConfigs):
"""
<a id="TransformerConfigs"></a>
## Transformer Configurations
This defines configurations for a transformer.
The configurations are calculate using option functions.
These are lazy loaded and therefore only the necessary modules
are calculated.
"""
# Number of attention heads
n_heads: int = 8
# Transformer embedding size
d_model: int = 512
# Number of layers
n_layers: int = 6
# Dropout probability
dropout: float = 0.1
# Number of tokens in the source vocabulary (for token embeddings)
n_src_vocab: int
# Number of tokens in the target vocabulary (to generate logits for prediction)
n_tgt_vocab: int
# The encoder self attention
encoder_attn: MultiHeadAttention = 'mha'
# The decoder self attention
decoder_attn: MultiHeadAttention = 'mha'
# The decoder memory attention
decoder_mem_attn: MultiHeadAttention = 'mha'
# Configurable Feedforward Layer
ffn: FeedForwardConfigs
# Encoder layer
encoder_layer: TransformerLayer = 'default'
# Decoder layer
decoder_layer: TransformerLayer = 'default'
# Encoder consisting of multiple encoder layers
encoder: Encoder = 'default'
# Encoder consisting of multiple decoder layers
decoder: Decoder = 'default'
# Embedding layer for source
src_embed: nn.Module = 'fixed_pos'
# Embedding layer for target (for decoder)
tgt_embed: nn.Module = 'fixed_pos'
# Logit generator for prediction
generator: Generator = 'default'
# Encoder-decoder
encoder_decoder: EncoderDecoder
# ### Multi-head Attention
def _mha(c: TransformerConfigs):
return MultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout)
calculate(TransformerConfigs.encoder_attn, 'mha', _mha)
calculate(TransformerConfigs.decoder_attn, 'mha', _mha)
calculate(TransformerConfigs.decoder_mem_attn, 'mha', _mha)
# ### Relative Multi-head Attention
def _relative_mha(c: TransformerConfigs):
from labml_nn.transformers.xl.relative_mha import RelativeMultiHeadAttention
return RelativeMultiHeadAttention(c.n_heads, c.d_model)
calculate(TransformerConfigs.encoder_attn, 'relative', _relative_mha)
calculate(TransformerConfigs.decoder_attn, 'relative', _relative_mha)
calculate(TransformerConfigs.decoder_mem_attn, 'relative', _relative_mha)
@option(TransformerConfigs.ffn, 'default')
def _feed_forward(c: TransformerConfigs):
"""
Create feedforward layer configurations
"""
conf = FeedForwardConfigs()
conf.set_default(FeedForwardConfigs.d_model, func=lambda: c.d_model)
conf.set_default(FeedForwardConfigs.dropout, func=lambda: c.dropout)
return conf
@option(TransformerConfigs.encoder_layer, 'default')
def _encoder_layer(c: TransformerConfigs):
"""
Encoder layer
"""
return TransformerLayer(d_model=c.d_model, self_attn=c.encoder_attn,
src_attn=None, feed_forward=copy.deepcopy(c.ffn.ffn),
dropout_prob=c.dropout)
@option(TransformerConfigs.decoder_layer, 'default')
def _decoder_layer(c: TransformerConfigs):
"""
Decoder layer
"""
return TransformerLayer(d_model=c.d_model, self_attn=c.decoder_attn,
src_attn=c.decoder_mem_attn, feed_forward=copy.deepcopy(c.ffn.ffn),
dropout_prob=c.dropout)
@option(TransformerConfigs.encoder, 'default')
def _encoder(c: TransformerConfigs):
"""
Encoder
"""
return Encoder(c.encoder_layer, c.n_layers)
@option(TransformerConfigs.decoder, 'default')
def _decoder(c: TransformerConfigs):
"""
Decoder
"""
return Decoder(c.decoder_layer, c.n_layers)
@option(TransformerConfigs.generator, 'default')
def _generator(c: TransformerConfigs):
"""
Logit generator
"""
return Generator(c.n_tgt_vocab, c.d_model)
# ### Fixed Positional Embeddings
@option(TransformerConfigs.src_embed, 'fixed_pos')
def _src_embed_with_positional(c: TransformerConfigs):
"""
Source embedding with fixed positional encodings
"""
return EmbeddingsWithPositionalEncoding(c.d_model, c.n_src_vocab)
@option(TransformerConfigs.tgt_embed, 'fixed_pos')
def _tgt_embed_with_positional(c: TransformerConfigs):
"""
Target embedding with fixed positional encodings
"""
return EmbeddingsWithPositionalEncoding(c.d_model, c.n_tgt_vocab)
# ### Learned Positional Embeddings
@option(TransformerConfigs.src_embed, 'learned_pos')
def _src_embed_with_learned_positional(c: TransformerConfigs):
"""
Source embedding with learned positional encodings
"""
return EmbeddingsWithLearnedPositionalEncoding(c.d_model, c.n_src_vocab)
@option(TransformerConfigs.tgt_embed, 'learned_pos')
def _tgt_embed_with_learned_positional(c: TransformerConfigs):
"""
Target embedding with learned positional encodings
"""
return EmbeddingsWithLearnedPositionalEncoding(c.d_model, c.n_tgt_vocab)
# ### No Positional Embeddings
@option(TransformerConfigs.src_embed, 'no_pos')
def _src_embed_without_positional(c: TransformerConfigs):
"""
Source embedding without positional encodings
"""
return nn.Embedding(c.n_src_vocab, c.d_model)
@option(TransformerConfigs.tgt_embed, 'no_pos')
def _tgt_embed_without_positional(c: TransformerConfigs):
return nn.Embedding(c.n_tgt_vocab, c.d_model)
@option(TransformerConfigs.encoder_decoder, 'default')
def _encoder_decoder(c: TransformerConfigs):
return EncoderDecoder(c.encoder, c.decoder, c.src_embed, c.tgt_embed, c.generator)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/positional_encoding.py | labml_nn/transformers/positional_encoding.py | """
---
title: Fixed Positional Encodings
summary: >
Implementation with explanation of fixed positional encodings as
described in paper Attention is All You Need.
---
# Fixed Positional Encodings
The positional encoding encodes the position along the sequence into
a vector of size `d_model`.
\begin{align}
PE_{p,2i} &= sin\Bigg(\frac{p}{10000^{\frac{2i}{d_{model}}}}\Bigg) \\
PE_{p,2i + 1} &= cos\Bigg(\frac{p}{10000^{\frac{2i}{d_{model}}}}\Bigg)
\end{align}
Where $1 \leq 2i, 2i + 1 \leq d_{model}$
are the feature indexes in the encoding, and $p$ is the position.
"""
import math
import numpy as np
import torch
import torch.nn as nn
class PositionalEncoding(nn.Module):
def __init__(self, d_model: int, dropout_prob: float, max_len: int = 5000):
super().__init__()
self.dropout = nn.Dropout(dropout_prob)
self.register_buffer('positional_encodings', get_positional_encoding(d_model, max_len), False)
def forward(self, x: torch.Tensor):
pe = self.positional_encodings[:x.shape[0]].detach().requires_grad_(False)
x = x + pe
x = self.dropout(x)
return x
def get_positional_encoding(d_model: int, max_len: int = 5000):
# Empty encodings vectors
encodings = torch.zeros(max_len, d_model)
# Position indexes
position = torch.arange(0, max_len, dtype=torch.float32).unsqueeze(1)
# $2 * i$
two_i = torch.arange(0, d_model, 2, dtype=torch.float32)
# $10000^{\frac{2i}{d_{model}}}$
div_term = torch.exp(two_i * -(math.log(10000.0) / d_model))
# $PE_{p,2i} = sin\Bigg(\frac{p}{10000^{\frac{2i}{d_{model}}}}\Bigg)$
encodings[:, 0::2] = torch.sin(position * div_term)
# $PE_{p,2i + 1} = cos\Bigg(\frac{p}{10000^{\frac{2i}{d_{model}}}}\Bigg)$
encodings[:, 1::2] = torch.cos(position * div_term)
# Add batch dimension
encodings = encodings.unsqueeze(1).requires_grad_(False)
return encodings
def _test_positional_encoding():
import matplotlib.pyplot as plt
plt.figure(figsize=(15, 5))
pe = get_positional_encoding(20, 100)
plt.plot(np.arange(100), pe[:, 0, 4:8].numpy())
plt.legend(["dim %d" % p for p in [4, 5, 6, 7]])
plt.title("Positional encoding")
plt.show()
if __name__ == '__main__':
_test_positional_encoding()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/relative_mha.py | labml_nn/transformers/relative_mha.py | """
---
title: Relative Multi-Headed Attention
summary: Relative Multi-Headed Attention from paper Transformer-XL.
redirect: https://nn.labml.ai/transformers/xl/relative_mha.html
---
"""
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/utils.py | labml_nn/transformers/utils.py | """
---
title: Utilities for Transformer
summary: A bunch of utility functions and classes for transformers.
---
# Utilities for Transformer
"""
import torch
def subsequent_mask(seq_len):
"""
## Subsequent mask to mask out data from future (subsequent) time steps
"""
mask = torch.tril(torch.ones(seq_len, seq_len)).to(torch.bool).unsqueeze(-1)
return mask
def _subsequent_mask():
from labml.logger import inspect
inspect(subsequent_mask(10)[:, :, 0])
if __name__ == '__main__':
_subsequent_mask()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/__init__.py | labml_nn/transformers/__init__.py | """
---
title: Transformers
summary: >
This is a collection of PyTorch implementations/tutorials of
transformers and related techniques.
---
# Transformers
This module contains [PyTorch](https://pytorch.org/)
implementations and explanations of original transformer
from paper [Attention Is All You Need](https://arxiv.org/abs/1706.03762),
and derivatives and enhancements of it.
* [Multi-head attention](mha.html)
* [Transformer Encoder and Decoder Models](models.html)
* [Position-wise Feed Forward Network (FFN)](feed_forward.html)
* [Fixed positional encoding](positional_encoding.html)
## [Transformer XL](xl/index.html)
This implements Transformer XL model using
[relative multi-head attention](xl/relative_mha.html)
## [Rotary Positional Embeddings](rope/index.html)
This implements Rotary Positional Embeddings (RoPE)
## [Attention with Linear Biases](alibi/index.html)
This implements Attention with Linear Biases (ALiBi).
## [RETRO](retro/index.html)
This implements the Retrieval-Enhanced Transformer (RETRO).
## [Compressive Transformer](compressive/index.html)
This is an implementation of compressive transformer
that extends upon [Transformer XL](xl/index.html) by compressing
the oldest memories to give a longer attention span.
## [GPT Architecture](gpt/index.html)
This is an implementation of GPT-2 architecture.
## [GLU Variants](glu_variants/simple.html)
This is an implementation of the paper
[GLU Variants Improve Transformer](https://arxiv.org/abs/2002.05202).
## [kNN-LM](knn/index.html)
This is an implementation of the paper
[Generalization through Memorization: Nearest Neighbor Language Models](https://arxiv.org/abs/1911.00172).
## [Feedback Transformer](feedback/index.html)
This is an implementation of the paper
[Accessing Higher-level Representations in Sequential Transformers with Feedback Memory](https://arxiv.org/abs/2002.09402).
## [Switch Transformer](switch/index.html)
This is a miniature implementation of the paper
[Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity](https://arxiv.org/abs/2101.03961).
Our implementation only has a few million parameters and doesn't do model parallel distributed training.
It does single GPU training but we implement the concept of switching as described in the paper.
## [Fast Weights Transformer](fast_weights/index.html)
This is an implementation of the paper
[Linear Transformers Are Secretly Fast Weight Memory Systems in PyTorch](https://arxiv.org/abs/2102.11174).
## [FNet: Mixing Tokens with Fourier Transforms](fnet/index.html)
This is an implementation of the paper
[FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824).
## [Attention Free Transformer](aft/index.html)
This is an implementation of the paper
[An Attention Free Transformer](https://arxiv.org/abs/2105.14103).
## [Masked Language Model](mlm/index.html)
This is an implementation of Masked Language Model used for pre-training in paper
[BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805).
## [MLP-Mixer: An all-MLP Architecture for Vision](mlp_mixer/index.html)
This is an implementation of the paper
[MLP-Mixer: An all-MLP Architecture for Vision](https://arxiv.org/abs/2105.01601).
## [Pay Attention to MLPs (gMLP)](gmlp/index.html)
This is an implementation of the paper
[Pay Attention to MLPs](https://arxiv.org/abs/2105.08050).
## [Vision Transformer (ViT)](vit/index.html)
This is an implementation of the paper
[An Image Is Worth 16x16 Words: Transformers For Image Recognition At Scale](https://arxiv.org/abs/2010.11929).
## [Primer EZ](primer_ez/index.html)
This is an implementation of the paper
[Primer: Searching for Efficient Transformers for Language Modeling](https://arxiv.org/abs/2109.08668).
## [Hourglass](hour_glass/index.html)
This is an implementation of the paper
[Hierarchical Transformers Are More Efficient Language Models](https://arxiv.org/abs/2110.13711)
"""
from .configs import TransformerConfigs
from .models import TransformerLayer, Encoder, Decoder, Generator, EncoderDecoder
from .mha import MultiHeadAttention
from labml_nn.transformers.xl.relative_mha import RelativeMultiHeadAttention
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/mha.py | labml_nn/transformers/mha.py | """
---
title: Multi-Headed Attention (MHA)
summary: >
This implements the Multi-Headed Attention used in transformers
using PyTorch with explanations.
---
# Multi-Headed Attention (MHA)
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/transformers/basic/autoregressive_experiment.ipynb)
This is a tutorial/implementation of multi-headed attention
from paper [Attention Is All You Need](https://arxiv.org/abs/1706.03762)
in [PyTorch](https://pytorch.org/).
The implementation is inspired from [Annotated Transformer](https://nlp.seas.harvard.edu/2018/04/03/attention.html).
Here is the [training code](basic/autoregressive_experiment.html) that uses a basic transformer
with MHA for NLP auto-regression.
[Here is an experiment implementation](basic/autoregressive_experiment.html) that trains a simple transformer.
"""
import math
from typing import Optional, List
import torch
from torch import nn
from labml import tracker
class PrepareForMultiHeadAttention(nn.Module):
"""
<a id="PrepareMHA"></a>
## Prepare for multi-head attention
This module does a linear transformation and splits the vector into given
number of heads for multi-head attention.
This is used to transform **key**, **query**, and **value** vectors.
"""
def __init__(self, d_model: int, heads: int, d_k: int, bias: bool):
super().__init__()
# Linear layer for linear transform
self.linear = nn.Linear(d_model, heads * d_k, bias=bias)
# Number of heads
self.heads = heads
# Number of dimensions in vectors in each head
self.d_k = d_k
def forward(self, x: torch.Tensor):
# Input has shape `[seq_len, batch_size, d_model]` or `[batch_size, d_model]`.
# We apply the linear transformation to the last dimension and split that into
# the heads.
head_shape = x.shape[:-1]
# Linear transform
x = self.linear(x)
# Split last dimension into heads
x = x.view(*head_shape, self.heads, self.d_k)
# Output has shape `[seq_len, batch_size, heads, d_k]` or `[batch_size, heads, d_model]`
return x
class MultiHeadAttention(nn.Module):
r"""
<a id="MHA"></a>
## Multi-Head Attention Module
This computes scaled multi-headed attention for given `query`, `key` and `value` vectors.
$$\mathop{Attention}(Q, K, V) = \underset{seq}{\mathop{softmax}}\Bigg(\frac{Q K^\top}{\sqrt{d_k}}\Bigg)V$$
In simple terms, it finds keys that matches the query, and gets the values of
those keys.
It uses dot-product of query and key as the indicator of how matching they are.
Before taking the $softmax$ the dot-products are scaled by $\frac{1}{\sqrt{d_k}}$.
This is done to avoid large dot-product values causing softmax to
give very small gradients when $d_k$ is large.
Softmax is calculated along the axis of of the sequence (or time).
"""
def __init__(self, heads: int, d_model: int, dropout_prob: float = 0.1, bias: bool = True):
"""
* `heads` is the number of heads.
* `d_model` is the number of features in the `query`, `key` and `value` vectors.
"""
super().__init__()
# Number of features per head
self.d_k = d_model // heads
# Number of heads
self.heads = heads
# These transform the `query`, `key` and `value` vectors for multi-headed attention.
self.query = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=bias)
self.key = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=bias)
self.value = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=True)
# Softmax for attention along the time dimension of `key`
self.softmax = nn.Softmax(dim=1)
# Output layer
self.output = nn.Linear(d_model, d_model)
# Dropout
self.dropout = nn.Dropout(dropout_prob)
# Scaling factor before the softmax
self.scale = 1 / math.sqrt(self.d_k)
# We store attentions so that it can be used for logging, or other computations if needed
self.attn = None
def get_scores(self, query: torch.Tensor, key: torch.Tensor):
"""
### Calculate scores between queries and keys
This method can be overridden for other variations like relative attention.
"""
# Calculate $Q K^\top$ or $S_{ijbh} = \sum_d Q_{ibhd} K_{jbhd}$
return torch.einsum('ibhd,jbhd->ijbh', query, key)
def prepare_mask(self, mask: torch.Tensor, query_shape: List[int], key_shape: List[int]):
"""
`mask` has shape `[seq_len_q, seq_len_k, batch_size]`, where first dimension is the query dimension.
If the query dimension is equal to $1$ it will be broadcasted.
"""
assert mask.shape[0] == 1 or mask.shape[0] == query_shape[0]
assert mask.shape[1] == key_shape[0]
assert mask.shape[2] == 1 or mask.shape[2] == query_shape[1]
# Same mask applied to all heads.
mask = mask.unsqueeze(-1)
# resulting mask has shape `[seq_len_q, seq_len_k, batch_size, heads]`
return mask
def forward(self, *,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: Optional[torch.Tensor] = None):
"""
`query`, `key` and `value` are the tensors that store
collection of *query*, *key* and *value* vectors.
They have shape `[seq_len, batch_size, d_model]`.
`mask` has shape `[seq_len, seq_len, batch_size]` and
`mask[i, j, b]` indicates whether for batch `b`,
query at position `i` has access to key-value at position `j`.
"""
# `query`, `key` and `value` have shape `[seq_len, batch_size, d_model]`
seq_len, batch_size, _ = query.shape
if mask is not None:
mask = self.prepare_mask(mask, query.shape, key.shape)
# Prepare `query`, `key` and `value` for attention computation.
# These will then have shape `[seq_len, batch_size, heads, d_k]`.
query = self.query(query)
key = self.key(key)
value = self.value(value)
# Compute attention scores $Q K^\top$.
# This gives a tensor of shape `[seq_len, seq_len, batch_size, heads]`.
scores = self.get_scores(query, key)
# Scale scores $\frac{Q K^\top}{\sqrt{d_k}}$
scores *= self.scale
# Apply mask
if mask is not None:
scores = scores.masked_fill(mask == 0, float('-inf'))
# $softmax$ attention along the key sequence dimension
# $\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_k}}\Bigg)$
attn = self.softmax(scores)
# Save attentions if debugging
tracker.debug('attn', attn)
# Apply dropout
attn = self.dropout(attn)
# Multiply by values
# $$\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_k}}\Bigg)V$$
x = torch.einsum("ijbh,jbhd->ibhd", attn, value)
# Save attentions for any other calculations
self.attn = attn.detach()
# Concatenate multiple heads
x = x.reshape(seq_len, batch_size, -1)
# Output layer
return self.output(x)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/feed_forward.py | labml_nn/transformers/feed_forward.py | """
---
title: Position-wise Feed-Forward Network (FFN)
summary: Documented reusable implementation of the position wise feedforward network.
---
# Position-wise Feed-Forward Network (FFN)
This is a [PyTorch](https://pytorch.org) implementation
of position-wise feedforward network used in transformer.
FFN consists of two fully connected layers.
Number of dimensions in the hidden layer $d_{ff}$, is generally set to around
four times that of the token embedding $d_{model}$.
So it is sometime also called the expand-and-contract network.
There is an activation at the hidden layer, which is
usually set to ReLU (Rectified Linear Unit) activation, $$\max(0, x)$$
That is, the FFN function is,
$$FFN(x, W_1, W_2, b_1, b_2) = \max(0, x W_1 + b_1) W_2 + b_2$$
where $W_1$, $W_2$, $b_1$ and $b_2$ are learnable parameters.
Sometimes the
GELU (Gaussian Error Linear Unit) activation is also used instead of ReLU.
$$x \Phi(x)$$ where $\Phi(x) = P(X \le x), X \sim \mathcal{N}(0,1)$
### Gated Linear Units
This is a generic implementation that supports different variants including
[Gated Linear Units](https://arxiv.org/abs/2002.05202) (GLU).
We have also implemented experiments on these:
* [experiment that uses `labml.configs`](glu_variants/experiment.html)
* [simpler version from scratch](glu_variants/simple.html)
"""
import torch
from torch import nn
class FeedForward(nn.Module):
"""
## FFN module
"""
def __init__(self, d_model: int, d_ff: int,
dropout: float = 0.1,
activation=nn.ReLU(),
is_gated: bool = False,
bias1: bool = True,
bias2: bool = True,
bias_gate: bool = True):
"""
* `d_model` is the number of features in a token embedding
* `d_ff` is the number of features in the hidden layer of the FFN
* `dropout` is dropout probability for the hidden layer
* `is_gated` specifies whether the hidden layer is gated
* `bias1` specified whether the first fully connected layer should have a learnable bias
* `bias2` specified whether the second fully connected layer should have a learnable bias
* `bias_gate` specified whether the fully connected layer for the gate should have a learnable bias
"""
super().__init__()
# Layer one parameterized by weight $W_1$ and bias $b_1$
self.layer1 = nn.Linear(d_model, d_ff, bias=bias1)
# Layer one parameterized by weight $W_1$ and bias $b_1$
self.layer2 = nn.Linear(d_ff, d_model, bias=bias2)
# Hidden layer dropout
self.dropout = nn.Dropout(dropout)
# Activation function $f$
self.activation = activation
# Whether there is a gate
self.is_gated = is_gated
if is_gated:
# If there is a gate the linear layer to transform inputs to
# be multiplied by the gate, parameterized by weight $V$ and bias $c$
self.linear_v = nn.Linear(d_model, d_ff, bias=bias_gate)
def forward(self, x: torch.Tensor):
# $f(x W_1 + b_1)$
g = self.activation(self.layer1(x))
# If gated, $f(x W_1 + b_1) \otimes (x V + b) $
if self.is_gated:
x = g * self.linear_v(x)
# Otherwise
else:
x = g
# Apply dropout
x = self.dropout(x)
# $(f(x W_1 + b_1) \otimes (x V + b)) W_2 + b_2$ or $f(x W_1 + b_1) W_2 + b_2$
# depending on whether it is gated
return self.layer2(x)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/primer_ez/experiment.py | labml_nn/transformers/primer_ez/experiment.py | """
---
title: Primer EZ experiment
summary: This experiment trains Primer EZ on Tiny Shakespeare dataset.
---
# [Primer EZ](index.html) Experiment
This is an annotated PyTorch experiment to train a [Primer EZ transformer](index.html).
This is based on our [vanilla transformer experiment](../basic/experiment.html).
We use the same experiment and add the Primer EZ modifications.
"""
from labml import experiment
from labml.configs import option
from labml_nn.transformers import TransformerConfigs
from labml_nn.transformers.basic.autoregressive_experiment import Configs
from labml_nn.transformers.configs import FeedForwardConfigs
from labml_nn.transformers.primer_ez import SquaredReLU
@option(FeedForwardConfigs.activation, 'SquaredReLU')
def _squared_relu():
"""
Add the [option](https://docs.labml.ai/api/configs.html#labml.configs.option)
of [**squared ReLU**](index.html) to [configurable](../configs.html#FFN)
[feed forward module](../feed_forward.html).
"""
return SquaredReLU()
@option(TransformerConfigs.encoder_attn, 'MultiDConvHeadAttention')
def _d_conv_mha(c: TransformerConfigs):
"""
Add the [option](https://docs.labml.ai/api/configs.html#labml.configs.option)
of [**Multi-DConv-Head Attention**](index.html) to
[configurable transformer](../configs.html#TransformerConfigs)
"""
from labml_nn.transformers.primer_ez import MultiDConvHeadAttention
return MultiDConvHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout)
@option(TransformerConfigs.encoder_attn, 'MultiDSharedConvHeadAttention')
def _d_shared_conv_mha(c: TransformerConfigs):
"""
Add the [option](https://docs.labml.ai/api/configs.html#labml.configs.option)
of [**Multi Depth-wise Shared Conv Head Attention**](variations.html) to
[configurable transformer](../configs.html#TransformerConfigs)
📝 *This is a variation we tried*
"""
from labml_nn.transformers.primer_ez.variations import MultiDSharedConvHeadAttention
return MultiDSharedConvHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout)
@option(TransformerConfigs.encoder_attn, 'MultiDPHConvHeadAttention')
def _d_per_head_conv_mha(c: TransformerConfigs):
"""
Add the [option](https://docs.labml.ai/api/configs.html#labml.configs.option)
of [**Multi Depth-wise Per Head Conv Head Attention**](variation.html) to
[configurable transformer](../configs.html#TransformerConfigs)
📝 *This is a variation we tried*
"""
from labml_nn.transformers.primer_ez.variations import MultiDPHConvHeadAttention
return MultiDPHConvHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout)
def main():
# Create experiment
experiment.create(name="primer_ez")
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# Use a context size of $256$
'seq_len': 256,
# Train for $128$ epochs
'epochs': 128,
# Batch size $32$
'batch_size': 32,
# Switch between training and validation for $10$ times
# per epoch
'inner_iterations': 10,
# Model size
'd_model': 512,
'transformer.ffn.d_ff': 2048,
# Use Adam optimizer
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
# ⭐️ Use [**squared ReLU**](index.html) activation in the feed forward network.
#
# *Replace this with `ReLU` for $ReLU$.*
'transformer.ffn.activation': 'SquaredReLU',
# ⭐️ Use [**Multi-DConv-Head Attention**](index.html) for encoder attention.
#
# *Replace this with `mha` for original multi-head attention.*
'transformer.encoder_attn': 'MultiDConvHeadAttention',
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/primer_ez/variations.py | labml_nn/transformers/primer_ez/variations.py | """
---
title: Primer EZ variations
summary: We tried some variations to Primer EZ.
---
# [Primer EZ](index.html) Variations
We tried some variations to see which changes in Primer EZ has most benefits.
"""
import torch
from torch import nn
from labml_nn.transformers import MultiHeadAttention
class SpatialDepthWiseSharedConvolution(nn.Module):
"""
## Spatial Depth Wise Shared Convolution
We share the same kernel across all channels.
"""
def __init__(self, kernel_size: int = 3):
"""
"""
super().__init__()
self.kernel_size = kernel_size
# We use PyTorch's `Conv1d` module.
# We add padding to both sides and later crop the right most `kernel_size - 1` results
self.conv = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=(kernel_size,), padding=(kernel_size - 1,))
def forward(self, x: torch.Tensor):
"""
`x` has shape `[seq_len, batch_size, heads, d_k]`
"""
# Get the shape
seq_len, batch_size, heads, d_k = x.shape
# Permute to `[batch_size, heads, d_k, seq_len]`
x = x.permute(1, 2, 3, 0)
# Change the shape to `[batch_size * heads * d_k, seq_len]`
x = x.view(batch_size * heads * d_k, 1, seq_len)
# 1D convolution accepts input of the form `[N, channels, sequence]`
x = self.conv(x)
# Crop the right most `kernel_size - 1` results since we padded both sides
x = x[:, :, :-(self.kernel_size - 1)]
# Reshape to `[batch_size, heads, d_k, seq_len]`
x = x.view(batch_size, heads, d_k, seq_len)
# Permute to `[seq_len, batch_size, heads, d_k]`
x = x.permute(3, 0, 1, 2)
#
return x
class MultiDSharedConvHeadAttention(MultiHeadAttention):
"""
## Multi-Depth-wise-Shared-Conv-Head Attention
We extend our original implementation of [Multi-Head Attention](../mha.html#MHA)
and add the spatial depth-wise shared convolution to query, key and value projections.
"""
def __init__(self, heads: int, d_model: int, dropout_prob: float = 0.1):
super().__init__(heads, d_model, dropout_prob)
# [Multi-Head Attention](../mha.html#MHA) will create query, key and value projection modules
# `self.query`, `self.key`, and `self.value`.
#
# We combine a spatial depth-wise shared convolution layer to each of them and replace
# `self.query`, `self.key`, and `self.value`.
self.query = nn.Sequential(self.query, SpatialDepthWiseSharedConvolution())
self.key = nn.Sequential(self.key, SpatialDepthWiseSharedConvolution())
self.value = nn.Sequential(self.value, SpatialDepthWiseSharedConvolution())
class SpatialDepthWisePerHeadConvolution(nn.Module):
"""
## Spatial Depth Wise Per Head Convolution
"""
def __init__(self, heads: int, d_k: int, kernel_size: int = 3):
"""
* `heads` is the number of heads
* `d_k` is the number of channels in each head
"""
super().__init__()
self.kernel_size = kernel_size
# We use PyTorch's `Conv1d` module.
# We set the number of groups to be equal to the number of channels from each head
# so that it does a separate convolution
# (with different kernels) for each channel and head.
# We add padding to both sides and later crop the right most `kernel_size - 1` results
self.conv = nn.Conv1d(in_channels=d_k * heads, out_channels=d_k * heads,
kernel_size=(kernel_size,), padding=(kernel_size - 1,), groups=d_k * heads)
def forward(self, x: torch.Tensor):
"""
`x` has shape `[seq_len, batch_size, heads, d_k]`
"""
# Get the shape
seq_len, batch_size, heads, d_k = x.shape
# Permute to `[batch_size, heads, d_k, seq_len]`
x = x.permute(1, 2, 3, 0)
# Change the shape to `[batch_size heads * d_k, seq_len]`
x = x.view(batch_size, heads * d_k, seq_len)
# 1D convolution accepts input of the form `[N, channels, sequence]`
x = self.conv(x)
# Crop the right most `kernel_size - 1` results since we padded both sides
x = x[:, :, :-(self.kernel_size - 1)]
# Reshape to `[batch_size, heads, d_k, seq_len]`
x = x.view(batch_size, heads, d_k, seq_len)
# Permute to `[seq_len, batch_size, heads, d_k]`
x = x.permute(3, 0, 1, 2)
#
return x
class MultiDPHConvHeadAttention(MultiHeadAttention):
"""
## Multi-per-Head-Depth-wise-Conv-Head Attention
We extend our original implementation of [Multi-Head Attention](../mha.html#MHA)
and add the spatial depth-wise convolution to query, key and value projections.
"""
def __init__(self, heads: int, d_model: int, dropout_prob: float = 0.1):
super().__init__(heads, d_model, dropout_prob)
# [Multi-Head Attention](../mha.html#MHA) will create query, key and value projection modules
# `self.query`, `self.key`, and `self.value`.
#
# We combine a spatial per-head depth-wise convolution layer to each of them and replace
# `self.query`, `self.key`, and `self.value`.
self.query = nn.Sequential(self.query, SpatialDepthWisePerHeadConvolution(heads, self.d_k))
self.key = nn.Sequential(self.key, SpatialDepthWisePerHeadConvolution(heads, self.d_k))
self.value = nn.Sequential(self.value, SpatialDepthWisePerHeadConvolution(heads, self.d_k))
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/primer_ez/efficient.py | labml_nn/transformers/primer_ez/efficient.py | import math
import torch
from torch import nn
from labml_nn.transformers import MultiHeadAttention
class SpatialDepthWiseConvolution(nn.Module):
"""
## Spatial Depth Wise Convolution
This is actually slower
"""
def __init__(self, d_k: int, kernel_size: int = 3):
"""
* `d_k` is the number of channels in each head
"""
super().__init__()
self.kernel_size = kernel_size
# We use PyTorch's `Conv1d` module.
# We set the number of groups to be equal to the number of channels so that it does a separate convolution
# (with different kernels) for each channel.
# We add padding to both sides and later crop the right most `kernel_size - 1` results
rng = 1 / math.sqrt(kernel_size)
self.kernels = nn.Parameter(torch.zeros((kernel_size, d_k)).uniform_(-rng, rng))
def forward(self, x: torch.Tensor):
"""
`x` has shape `[seq_len, batch_size, heads, d_k]`
"""
res = x * self.kernels[0].view(1, 1, 1, -1)
for i in range(1, len(self.kernels)):
res[i:] += x[:-i] * self.kernels[i].view(1, 1, 1, -1)
return res
class MultiDConvHeadAttention(MultiHeadAttention):
"""
## Multi-DConv-Head Attention (MDHA)
We extend our original implementation of [Multi-Head Attention](../mha.html#MHA)
and add the spatial depth-wise convolution to query, key and value projections.
"""
def __init__(self, heads: int, d_model: int, dropout_prob: float = 0.1):
super().__init__(heads, d_model, dropout_prob)
# [Multi-Head Attention](../mha.html#MHA) will create query, key and value projection modules
# `self.query`, `self.key`, and `self.value`.
#
# We combine a spatial depth-wise convolution layer to each of them and replace
# `self.query`, `self.key`, and `self.value`.
self.query = nn.Sequential(self.query, SpatialDepthWiseConvolution(self.d_k))
self.key = nn.Sequential(self.key, SpatialDepthWiseConvolution(self.d_k))
self.value = nn.Sequential(self.value, SpatialDepthWiseConvolution(self.d_k))
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/primer_ez/__init__.py | labml_nn/transformers/primer_ez/__init__.py | """
---
title: "Primer: Searching for Efficient Transformers for Language Modeling"
summary: >
This is an annotated implementation/tutorial of
Primer: Searching for Efficient Transformers for Language Modeling for Vision in PyTorch.
---
# Primer: Searching for Efficient Transformers for Language Modeling
This is a [PyTorch](https://pytorch.org) implementation of the paper
[Primer: Searching for Efficient Transformers for Language Modeling](https://arxiv.org/abs/2109.08668).
The authors do an evolutionary search for transformer architectures.
They name the architecture found using the search Primer (PRIMitives searched transformER).
**Primer EZ** is the architecture with the two most robust modifications in Primer compared to
the original transformer.
Primer EZ trains a lot faster than the vanilla transformer.
### Squared ReLU
The most effective modification found by the search is using a square ReLU instead of ReLU in
the [position-wise feedforward module](../feed_forward.html).
$$y = {\max(x, 0)}^2$$
### Multi-DConv-Head Attention (MDHA)
The next effective modification is a depth-wise $3 \times 1$ convolution after multi-head projection
for queries, keys, and values.
The convolution is along the sequence dimension and per channel (depth-wise).
To be clear, if the number of channels in each head is $d_k$ the convolution will have $1 \times 3$
kernels for each of the $d_k$ channels.
[Here is the experiment code](experiment.html), for Primer EZ.
"""
import torch
from torch import nn
from labml_nn.transformers import MultiHeadAttention
class SquaredReLU(nn.Module):
"""
## Squared ReLU activation
$$y = {\max(x, 0)}^2$$
Squared ReLU is used as the activation function in the
[position wise feedforward module](../feed_forward.html).
"""
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
def forward(self, x: torch.Tensor):
# Apply ReLU
x = self.relu(x)
# Square it
return x * x
class SpatialDepthWiseConvolution(nn.Module):
"""
## Spatial Depth Wise Convolution
"""
def __init__(self, d_k: int, kernel_size: int = 3):
"""
* `d_k` is the number of channels in each head
"""
super().__init__()
self.kernel_size = kernel_size
# We use PyTorch's `Conv1d` module.
# We set the number of groups to be equal to the number of channels so that it does a separate convolution
# (with different kernels) for each channel.
# We add padding to both sides and later crop the right most `kernel_size - 1` results
self.conv = nn.Conv1d(in_channels=d_k, out_channels=d_k,
kernel_size=(kernel_size,), padding=(kernel_size - 1,), groups=d_k)
def forward(self, x: torch.Tensor):
"""
`x` has shape `[seq_len, batch_size, heads, d_k]`
"""
# Get the shape
seq_len, batch_size, heads, d_k = x.shape
# Permute to `[batch_size, heads, d_k, seq_len]`
x = x.permute(1, 2, 3, 0)
# Change the shape to `[batch_size * heads, d_k, seq_len]`
x = x.view(batch_size * heads, d_k, seq_len)
# 1D convolution accepts input of the form `[N, channels, sequence]`
x = self.conv(x)
# Crop the right most `kernel_size - 1` results since we padded both sides
x = x[:, :, :-(self.kernel_size - 1)]
# Reshape to `[batch_size, heads, d_k, seq_len]`
x = x.view(batch_size, heads, d_k, seq_len)
# Permute to `[seq_len, batch_size, heads, d_k]`
x = x.permute(3, 0, 1, 2)
#
return x
class MultiDConvHeadAttention(MultiHeadAttention):
"""
## Multi-DConv-Head Attention (MDHA)
We extend our original implementation of [Multi-Head Attention](../mha.html#MHA)
and add the spatial depth-wise convolution to query, key and value projections.
"""
def __init__(self, heads: int, d_model: int, dropout_prob: float = 0.1):
super().__init__(heads, d_model, dropout_prob)
# [Multi-Head Attention](../mha.html#MHA) will create query, key and value projection modules
# `self.query`, `self.key`, and `self.value`.
#
# We combine a spatial depth-wise convolution layer to each of them and replace
# `self.query`, `self.key`, and `self.value`.
#
# 📝 *We feel this cleaner implementation is easier to understand since it clearly shows the difference
# between this and vanilla transformer multi-head attention*.
self.query = nn.Sequential(self.query, SpatialDepthWiseConvolution(self.d_k))
self.key = nn.Sequential(self.key, SpatialDepthWiseConvolution(self.d_k))
self.value = nn.Sequential(self.value, SpatialDepthWiseConvolution(self.d_k))
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/vit/experiment.py | labml_nn/transformers/vit/experiment.py | """
---
title: Train a Vision Transformer (ViT) on CIFAR 10
summary: >
Train a Vision Transformer (ViT) on CIFAR 10
---
# Train a [Vision Transformer (ViT)](index.html) on CIFAR 10
"""
from labml import experiment
from labml.configs import option
from labml_nn.experiments.cifar10 import CIFAR10Configs
from labml_nn.transformers import TransformerConfigs
class Configs(CIFAR10Configs):
"""
## Configurations
We use [`CIFAR10Configs`](../../experiments/cifar10.html) which defines all the
dataset related configurations, optimizer, and a training loop.
"""
# [Transformer configurations](../configs.html#TransformerConfigs)
# to get [transformer layer](../models.html#TransformerLayer)
transformer: TransformerConfigs
# Size of a patch
patch_size: int = 4
# Size of the hidden layer in classification head
n_hidden_classification: int = 2048
# Number of classes in the task
n_classes: int = 10
@option(Configs.transformer)
def _transformer():
"""
Create transformer configs
"""
return TransformerConfigs()
@option(Configs.model)
def _vit(c: Configs):
"""
### Create model
"""
from labml_nn.transformers.vit import VisionTransformer, LearnedPositionalEmbeddings, ClassificationHead, \
PatchEmbeddings
# Transformer size from [Transformer configurations](../configs.html#TransformerConfigs)
d_model = c.transformer.d_model
# Create a vision transformer
return VisionTransformer(c.transformer.encoder_layer, c.transformer.n_layers,
PatchEmbeddings(d_model, c.patch_size, 3),
LearnedPositionalEmbeddings(d_model),
ClassificationHead(d_model, c.n_hidden_classification, c.n_classes)).to(c.device)
def main():
# Create experiment
experiment.create(name='ViT', comment='cifar10')
# Create configurations
conf = Configs()
# Load configurations
experiment.configs(conf, {
# Optimizer
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
# Transformer embedding size
'transformer.d_model': 512,
# Training epochs and batch size
'epochs': 32,
'train_batch_size': 64,
# Augment CIFAR 10 images for training
'train_dataset': 'cifar10_train_augmented',
# Do not augment CIFAR 10 images for validation
'valid_dataset': 'cifar10_valid_no_augment',
})
# Set model for saving/loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/vit/__init__.py | labml_nn/transformers/vit/__init__.py | """
---
title: Vision Transformer (ViT)
summary: >
A PyTorch implementation/tutorial of the paper
"An Image Is Worth 16x16 Words: Transformers For Image Recognition At Scale"
---
# Vision Transformer (ViT)
This is a [PyTorch](https://pytorch.org) implementation of the paper
[An Image Is Worth 16x16 Words: Transformers For Image Recognition At Scale](https://arxiv.org/abs/2010.11929).
Vision transformer applies a pure transformer to images
without any convolution layers.
They split the image into patches and apply a transformer on patch embeddings.
[Patch embeddings](#PathEmbeddings) are generated by applying a simple linear transformation
to the flattened pixel values of the patch.
Then a standard transformer encoder is fed with the patch embeddings, along with a
classification token `[CLS]`.
The encoding on the `[CLS]` token is used to classify the image with an MLP.
When feeding the transformer with the patches, learned positional embeddings are
added to the patch embeddings, because the patch embeddings do not have any information
about where that patch is from.
The positional embeddings are a set of vectors for each patch location that get trained
with gradient descent along with other parameters.
ViTs perform well when they are pre-trained on large datasets.
The paper suggests pre-training them with an MLP classification head and
then using a single linear layer when fine-tuning.
The paper beats SOTA with a ViT pre-trained on a 300 million image dataset.
They also use higher resolution images during inference while keeping the
patch size the same.
The positional embeddings for new patch locations are calculated by interpolating
learning positional embeddings.
Here's [an experiment](experiment.html) that trains ViT on CIFAR-10.
This doesn't do very well because it's trained on a small dataset.
It's a simple experiment that anyone can run and play with ViTs.
"""
import torch
from torch import nn
from labml_nn.transformers import TransformerLayer
from labml_nn.utils import clone_module_list
class PatchEmbeddings(nn.Module):
"""
<a id="PatchEmbeddings"></a>
## Get patch embeddings
The paper splits the image into patches of equal size and do a linear transformation
on the flattened pixels for each patch.
We implement the same thing through a convolution layer, because it's simpler to implement.
"""
def __init__(self, d_model: int, patch_size: int, in_channels: int):
"""
* `d_model` is the transformer embeddings size
* `patch_size` is the size of the patch
* `in_channels` is the number of channels in the input image (3 for rgb)
"""
super().__init__()
# We create a convolution layer with a kernel size and and stride length equal to patch size.
# This is equivalent to splitting the image into patches and doing a linear
# transformation on each patch.
self.conv = nn.Conv2d(in_channels, d_model, patch_size, stride=patch_size)
def forward(self, x: torch.Tensor):
"""
* `x` is the input image of shape `[batch_size, channels, height, width]`
"""
# Apply convolution layer
x = self.conv(x)
# Get the shape.
bs, c, h, w = x.shape
# Rearrange to shape `[patches, batch_size, d_model]`
x = x.permute(2, 3, 0, 1)
x = x.view(h * w, bs, c)
# Return the patch embeddings
return x
class LearnedPositionalEmbeddings(nn.Module):
"""
<a id="LearnedPositionalEmbeddings"></a>
## Add parameterized positional encodings
This adds learned positional embeddings to patch embeddings.
"""
def __init__(self, d_model: int, max_len: int = 5_000):
"""
* `d_model` is the transformer embeddings size
* `max_len` is the maximum number of patches
"""
super().__init__()
# Positional embeddings for each location
self.positional_encodings = nn.Parameter(torch.zeros(max_len, 1, d_model), requires_grad=True)
def forward(self, x: torch.Tensor):
"""
* `x` is the patch embeddings of shape `[patches, batch_size, d_model]`
"""
# Get the positional embeddings for the given patches
pe = self.positional_encodings[:x.shape[0]]
# Add to patch embeddings and return
return x + pe
class ClassificationHead(nn.Module):
"""
<a id="ClassificationHead"></a>
## MLP Classification Head
This is the two layer MLP head to classify the image based on `[CLS]` token embedding.
"""
def __init__(self, d_model: int, n_hidden: int, n_classes: int):
"""
* `d_model` is the transformer embedding size
* `n_hidden` is the size of the hidden layer
* `n_classes` is the number of classes in the classification task
"""
super().__init__()
# First layer
self.linear1 = nn.Linear(d_model, n_hidden)
# Activation
self.act = nn.ReLU()
# Second layer
self.linear2 = nn.Linear(n_hidden, n_classes)
def forward(self, x: torch.Tensor):
"""
* `x` is the transformer encoding for `[CLS]` token
"""
# First layer and activation
x = self.act(self.linear1(x))
# Second layer
x = self.linear2(x)
#
return x
class VisionTransformer(nn.Module):
"""
## Vision Transformer
This combines the [patch embeddings](#PatchEmbeddings),
[positional embeddings](#LearnedPositionalEmbeddings),
transformer and the [classification head](#ClassificationHead).
"""
def __init__(self, transformer_layer: TransformerLayer, n_layers: int,
patch_emb: PatchEmbeddings, pos_emb: LearnedPositionalEmbeddings,
classification: ClassificationHead):
"""
* `transformer_layer` is a copy of a single [transformer layer](../models.html#TransformerLayer).
We make copies of it to make the transformer with `n_layers`.
* `n_layers` is the number of [transformer layers](../models.html#TransformerLayer).
* `patch_emb` is the [patch embeddings layer](#PatchEmbeddings).
* `pos_emb` is the [positional embeddings layer](#LearnedPositionalEmbeddings).
* `classification` is the [classification head](#ClassificationHead).
"""
super().__init__()
# Patch embeddings
self.patch_emb = patch_emb
self.pos_emb = pos_emb
# Classification head
self.classification = classification
# Make copies of the transformer layer
self.transformer_layers = clone_module_list(transformer_layer, n_layers)
# `[CLS]` token embedding
self.cls_token_emb = nn.Parameter(torch.randn(1, 1, transformer_layer.size), requires_grad=True)
# Final normalization layer
self.ln = nn.LayerNorm([transformer_layer.size])
def forward(self, x: torch.Tensor):
"""
* `x` is the input image of shape `[batch_size, channels, height, width]`
"""
# Get patch embeddings. This gives a tensor of shape `[patches, batch_size, d_model]`
x = self.patch_emb(x)
# Concatenate the `[CLS]` token embeddings before feeding the transformer
cls_token_emb = self.cls_token_emb.expand(-1, x.shape[1], -1)
x = torch.cat([cls_token_emb, x])
# Add positional embeddings
x = self.pos_emb(x)
# Pass through transformer layers with no attention masking
for layer in self.transformer_layers:
x = layer(x=x, mask=None)
# Get the transformer output of the `[CLS]` token (which is the first in the sequence).
x = x[0]
# Layer normalization
x = self.ln(x)
# Classification head, to get logits
x = self.classification(x)
#
return x
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/switch/experiment.py | labml_nn/transformers/switch/experiment.py | """
---
title: Switch Transformer Experiment
summary: This experiment trains a small switch transformer on tiny Shakespeare dataset.
---
# Switch Transformer Experiment
This is an annotated PyTorch experiment to train a switch transformer.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/transformers/switch/experiment.ipynb)
"""
import torch
import torch.nn as nn
from labml import experiment, tracker
from labml.configs import option
from labml_nn.helpers.trainer import BatchIndex
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
class AutoregressiveModel(nn.Module):
"""
## Auto regressive model
"""
def __init__(self, n_vocab: int, d_model: int, transformer: nn.Module):
super().__init__()
# Token embedding module
self.src_embed = nn.Embedding(n_vocab, d_model)
# Transformer
self.transformer = transformer
# Final layer
self.generator = nn.Linear(d_model, n_vocab)
self.mask = None
def forward(self, x: torch.Tensor):
# Initialize the subsequent mask
if self.mask is None or self.mask.size(0) != len(x):
from labml_nn.transformers.utils import subsequent_mask
self.mask = subsequent_mask(len(x)).to(x.device)
# Token embeddings
x = self.src_embed(x)
# Run it through the transformer
res, counts, route_prob, n_dropped, route_prob_max = self.transformer(x, self.mask)
# Generate logits of the next token
res = self.generator(res)
#
return res, counts, route_prob, n_dropped, route_prob_max
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
This extends [`NLPAutoRegressionConfigs`](../../experiments/nlp_autoregression.html).
The default configs can and will be over-ridden when we start the experiment
"""
model: AutoregressiveModel
transformer: nn.Module
# Token embedding size
d_model: int = 128
# Number of attention heads
heads: int = 4
# Dropout probability
dropout: float = 0.0
# Number of features in FFN hidden layer
d_ff: int = 256
# Number of transformer layers
n_layers: int = 6
# Number of experts
n_experts: int = 4
# Load balancing coefficient
load_balancing_loss_ceof = 0.01
# Whether to scale the chosen expert outputs by the routing probability
is_scale_prob: bool = True
# Whether to drop tokens
drop_tokens: bool = False
# Capacity factor to determine capacity of each model
capacity_factor: float = 1.0
def init(self):
super().init()
# Initialize tracking indicators
tracker.set_scalar("lb_loss.*", False)
tracker.set_scalar("route.*", False)
tracker.set_scalar("dropped.*", False)
def step(self, batch: any, batch_idx: BatchIndex):
"""
### Training or validation step
"""
# Move data to the device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Update global step (number of tokens processed) when in training mode
if self.mode.is_train:
tracker.add_global_step(data.shape[0] * data.shape[1])
# Get model outputs.
output, counts, route_prob, n_dropped, route_prob_max = self.model(data)
# Calculate and cross entropy loss
cross_entropy_loss = self.loss_func(output, target)
# Total number of tokens processed, $T$, in the current batch $\mathscr{B}$
total = counts.sum(dim=-1, keepdims=True)
# Fraction of tokens routed to each expert
# $$f_i = \frac{1}{T} \sum_{x \in \mathscr{B}} \mathbf{1} \{ \mathop{argmax} p(x), i \}$$
# $f_i$ is the count of tokens where the argmax of $p(x)$ is equal to $i$.
route_frac = counts / total
# Mean routing probability
# $$P_i = \frac{1}{T} \sum_{x \in \mathscr{B}} p_i (x)$$
route_prob = route_prob / total
# Load balancing loss
# $$\mathscr{L} = N \sum_{i=1}^N f_i \cdot P_i$$
# $\mathscr{L}$ is the loss for a single layer and here we are
# taking the sum of losses across all layers.
load_balancing_loss = self.n_experts * (route_frac * route_prob).sum()
# Track stats
tracker.add('dropped.', total.new_tensor(n_dropped) / total)
tracker.add('route.min.', route_frac.min())
tracker.add('route.max.', route_frac.max())
tracker.add('route.std.', route_frac.std())
tracker.add('route.max_prob.', route_prob_max)
tracker.add("loss.", cross_entropy_loss)
tracker.add("lb_loss.", load_balancing_loss)
# Combined loss.
# The load balancing loss is multiplied by a coefficient $\alpha$ which is
# set to something small like $\alpha = 0.01$.
loss = cross_entropy_loss + self.load_balancing_loss_ceof * load_balancing_loss
# Calculate and log accuracy
self.accuracy(output, target)
self.accuracy.track()
# Train the model
if self.mode.is_train:
# Calculate gradients
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip)
# Take optimizer step
self.optimizer.step()
# Log the model parameters and gradients on last batch of every epoch
if batch_idx.is_last:
tracker.add('model', self.model)
# Clear the gradients
self.optimizer.zero_grad()
# Save the tracked metrics
tracker.save()
@option(Configs.model)
def autoregressive_model(c: Configs):
"""
### Initialize the auto-regressive model
"""
m = AutoregressiveModel(c.n_tokens, c.d_model, c.transformer)
return m.to(c.device)
@option(Configs.transformer)
def switch_transformer(c: Configs):
"""
### Initialize the switch transformer
"""
from labml_nn.transformers.switch import SwitchTransformer, SwitchTransformerLayer, SwitchFeedForward
from labml_nn.transformers import MultiHeadAttention
from labml_nn.transformers.feed_forward import FeedForward
return SwitchTransformer(
SwitchTransformerLayer(d_model=c.d_model,
attn=MultiHeadAttention(c.heads, c.d_model, c.dropout),
feed_forward=SwitchFeedForward(capacity_factor=c.capacity_factor,
drop_tokens=c.drop_tokens,
is_scale_prob=c.is_scale_prob,
n_experts=c.n_experts,
expert=FeedForward(c.d_model, c.d_ff, c.dropout),
d_model=c.d_model),
dropout_prob=c.dropout),
c.n_layers)
def main():
"""
### Run the experiment
"""
# Create experiment
experiment.create(name="switch_transformer", comment='')
# Create configs
conf = Configs()
# Load configurations
experiment.configs(conf,
# A dictionary of configurations to override
{'tokenizer': 'character',
'text': 'tiny_shakespeare',
'optimizer.learning_rate': 1.,
'optimizer.optimizer': 'Noam',
'prompt': 'It is',
'prompt_separator': '',
'transformer': 'switch_transformer',
'n_experts': 4,
'drop_tokens': True,
'capacity_factor': 1.2,
'train_loader': 'shuffled_train_loader',
'valid_loader': 'shuffled_valid_loader',
'seq_len': 64,
'epochs': 128,
'batch_size': 32,
'inner_iterations': 25,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# `TrainValidConfigs.run`
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/switch/__init__.py | labml_nn/transformers/switch/__init__.py | """
---
title: Switch Transformer
summary: >
This is an annotated implementation/tutorial a miniature version of Switch Transformer in PyTorch.
---
# Switch Transformer
This is a miniature [PyTorch](https://pytorch.org) implementation of the paper
[Switch Transformers: Scaling to Trillion Parameter Models with Simple and Efficient Sparsity](https://arxiv.org/abs/2101.03961).
Our implementation only has a few million parameters and doesn't do model parallel distributed training.
It does single GPU training, but we implement the concept of switching as described in the paper.
The Switch Transformer uses different parameters for each token by switching among parameters
based on the token.
Therefore, only a fraction of parameters are chosen for each token.
So you can have more parameters but less computational cost.
The switching happens at the Position-wise Feedforward network (FFN) of each transformer block.
Position-wise feedforward network consists of two sequentially fully connected layers.
In switch transformer we have multiple FFNs (multiple experts),
and we chose which one to use based on a router.
The output is a set of probabilities for picking a FFN,
and we pick the one with the highest probability and only evaluate that.
So essentially the computational cost is the same as having a single FFN.
In our implementation this doesn't parallelize well when you have many or large FFNs since it's all
happening on a single GPU.
In a distributed setup you would have each FFN (each very large) on a different device.
The paper introduces another loss term to balance load among the experts (FFNs) and
discusses dropping tokens when routing is not balanced.
Here's [the training code](experiment.html) and a notebook for training a switch transformer on Tiny Shakespeare dataset.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/transformers/switch/experiment.ipynb)
"""
import torch
from torch import nn
from labml_nn.transformers.feed_forward import FeedForward
from labml_nn.transformers.mha import MultiHeadAttention
from labml_nn.utils import clone_module_list
class SwitchFeedForward(nn.Module):
"""
## Routing among multiple FFNs
"""
def __init__(self, *,
capacity_factor: float,
drop_tokens: bool,
is_scale_prob: bool,
n_experts: int,
expert: FeedForward,
d_model: int):
"""
* `capacity_factor` is the capacity of each expert as a factor relative to ideally balanced load
* `drop_tokens` specifies whether to drop tokens if more tokens are routed to an expert than the capacity
* `is_scale_prob` specifies whether to multiply the input to the FFN by the routing probability
* `n_experts` is the number of experts
* `expert` is the expert layer, a [FFN module](../feed_forward.html)
* `d_model` is the number of features in a token embedding
* `d_ff` is the number of features in the hidden layer of the FFN
* `dropout` is dropout probability in the FFN
"""
super().__init__()
self.capacity_factor = capacity_factor
self.is_scale_prob = is_scale_prob
self.n_experts = n_experts
self.drop_tokens = drop_tokens
# make copies of the FFNs
self.experts = clone_module_list(expert, n_experts)
# Routing layer and softmax
self.switch = nn.Linear(d_model, n_experts)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x: torch.Tensor):
"""
* `x` is the input to the switching module with shape `[seq_len, batch_size, d_model]`
"""
# Capture the shape to change shapes later
seq_len, batch_size, d_model = x.shape
# Flatten the sequence and batch dimensions
x = x.view(-1, d_model)
# Get routing probabilities for each of the tokens.
# $$p_i(x) = \frac{e^{h(x)_i}}{\sum^N_j e^{h(x)_j}}$$
# where $N$ is the number of experts `n_experts` and
# $h(\cdot)$ is the linear transformation of token embeddings.
route_prob = self.softmax(self.switch(x))
# Get the maximum routing probabilities and the routes.
# We route to the expert with highest probability
route_prob_max, routes = torch.max(route_prob, dim=-1)
# Get indexes of tokens going to each expert
indexes_list = [torch.eq(routes, i).nonzero(as_tuple=True)[0] for i in range(self.n_experts)]
# Initialize an empty tensor to store outputs
final_output = x.new_zeros(x.shape)
# Capacity of each expert.
# $$\mathrm{expert\;capacity} =
# \frac{\mathrm{tokens\;per\;batch}}{\mathrm{number\;of\;experts}}
# \times \mathrm{capacity\;factor}$$
capacity = int(self.capacity_factor * len(x) / self.n_experts)
# Number of tokens routed to each expert.
counts = x.new_tensor([len(indexes_list[i]) for i in range(self.n_experts)])
# Initialize an empty list of dropped tokens
dropped = []
# Only drop tokens if `drop_tokens` is `True`.
if self.drop_tokens:
# Drop tokens in each of the experts
for i in range(self.n_experts):
# Ignore if the expert is not over capacity
if len(indexes_list[i]) <= capacity:
continue
# Shuffle indexes before dropping
indexes_list[i] = indexes_list[i][torch.randperm(len(indexes_list[i]))]
# Collect the tokens over capacity as dropped tokens
dropped.append(indexes_list[i][capacity:])
# Keep only the tokens upto the capacity of the expert
indexes_list[i] = indexes_list[i][:capacity]
# Get outputs of the expert FFNs
expert_output = [self.experts[i](x[indexes_list[i], :]) for i in range(self.n_experts)]
# Assign to final output
for i in range(self.n_experts):
final_output[indexes_list[i], :] = expert_output[i]
# Pass through the dropped tokens
if dropped:
dropped = torch.cat(dropped)
final_output[dropped, :] = x[dropped, :]
if self.is_scale_prob:
# Multiply by the expert outputs by the probabilities $y = p_i(x) E_i(x)$
final_output = final_output * route_prob_max.view(-1, 1)
else:
# Don't scale the values but multiply by $\frac{p}{\hat{p}} = 1$ so that the gradients flow
# (this is something we experimented with).
final_output = final_output * (route_prob_max / route_prob_max.detach()).view(-1, 1)
# Change the shape of the final output back to `[seq_len, batch_size, d_model]`
final_output = final_output.view(seq_len, batch_size, d_model)
# Return
#
# * the final output
# * number of tokens routed to each expert
# * sum of probabilities for each expert
# * number of tokens dropped.
# * routing probabilities of the selected experts
#
# These are used for the load balancing loss and logging
return final_output, counts, route_prob.sum(0), len(dropped), route_prob_max
class SwitchTransformerLayer(nn.Module):
"""
# Switch Transformer Block
This is the same as [normal transformer block](../models.html#TransformerLayer)
with handling extra outputs of switch feedforward module.
"""
def __init__(self, *,
d_model: int,
attn: MultiHeadAttention,
feed_forward: SwitchFeedForward,
dropout_prob: float):
"""
* `d_model` is the token embedding size
* `attn` is the attention module
* `feed_forward` is the feed forward module (which is the switching module in this case)
* `dropout_prob` is the probability of dropping out after self attention and FFN
"""
super().__init__()
self.size = d_model
self.attn = attn
self.feed_forward = feed_forward
self.dropout = nn.Dropout(dropout_prob)
self.norm_self_attn = nn.LayerNorm([d_model])
self.norm_ff = nn.LayerNorm([d_model])
def forward(self, *,
x: torch.Tensor,
mask: torch.Tensor):
# Normalize the vectors before doing self attention
z = self.norm_self_attn(x)
# Run through self attention, i.e. keys and values are from self
self_attn = self.attn(query=z, key=z, value=z, mask=mask)
# Add the self attention results
x = x + self.dropout(self_attn)
# Normalize for feed-forward
z = self.norm_ff(x)
# Pass through the switching feed-forward network
ff, counts, route_prob, n_dropped, route_prob_max = self.feed_forward(z)
# Add the feed-forward results back
x = x + self.dropout(ff)
return x, counts, route_prob, n_dropped, route_prob_max
class SwitchTransformer(nn.Module):
"""
## Switch Transformer
"""
def __init__(self, layer: SwitchTransformerLayer, n_layers: int):
super().__init__()
# Make copies of the transformer layer
self.layers = clone_module_list(layer, n_layers)
# Final normalization layer
self.norm = nn.LayerNorm([layer.size])
def forward(self, x: torch.Tensor, mask: torch.Tensor):
# Run through each transformer layer
counts, route_prob, n_dropped, route_prob_max = [], [], [], []
for layer in self.layers:
x, f, p, n_d, p_max = layer(x=x, mask=mask)
counts.append(f)
route_prob.append(p)
n_dropped.append(n_d)
route_prob_max.append(p_max)
# Finally, normalize the vectors
x = self.norm(x)
#
return x, torch.stack(counts), torch.stack(route_prob), n_dropped, torch.stack(route_prob_max)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/aft/experiment.py | labml_nn/transformers/aft/experiment.py | """
---
title: Attention Free Transformer (AFT) Experiment
summary: This experiment trains an Attention Free Transformer (AFT) based model on Tiny Shakespeare dataset.
---
# [Attention Free Transformer (AFT)](index.html) Experiment
This is an annotated PyTorch experiment to train a [AFT model](index.html).
This is based on
[general training loop and configurations for auto-regressive NLP task](../../experiments/nlp_autoregression.html).
"""
import torch
from labml import experiment
from labml.configs import option
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.transformers import TransformerConfigs, Encoder
from labml_nn.transformers.utils import subsequent_mask
from torch import nn
class AutoregressiveTransformer(nn.Module):
"""
## Simple autoregressive model
This consists of a token embedding layer, transformer encoder, and
a final linear layer that gives token logits.
"""
def __init__(self, encoder: Encoder, src_embed: nn.Module, generator: nn.Module):
"""
* `encoder` is the transformer [Encoder](../models.html#Encoder)
* `src_embed` is the token
[embedding module (with positional encodings)](../models.html#EmbeddingsWithLearnedPositionalEncoding)
* `generator` is the [final fully connected layer](../models.html#Generator) that gives the logits.
"""
super().__init__()
self.src_embed = src_embed
self.encoder = encoder
self.generator = generator
# The mask will be initialized on the first call
self.mask = None
def forward(self, x: torch.Tensor):
# Create subsequent mask if mask is not initialized
# or if the size of the mask is different
if self.mask is None or self.mask.size(0) != len(x):
# Subsequent mask, will mask out tokens from seeing future tokens
self.mask = subsequent_mask(len(x)).to(x.device)
# Get the token embeddings with positional encodings
x = self.src_embed(x)
# Transformer encoder
x = self.encoder(x, self.mask)
# Get logits
x = self.generator(x)
# Return results
# (second value is for state, since our trainer is used with RNNs also)
return x, None
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
This inherits from
[`NLPAutoRegressionConfigs`](../../experiments/nlp_autoregression.html#NLPAutoRegressionConfigs)
"""
# GPT model
model: AutoregressiveTransformer
# Transformer
transformer: TransformerConfigs
local_window_size: int = 32
@option(Configs.transformer, 'Transformer')
def _transformer_configs(c: Configs):
"""
### Transformer configurations
"""
# We use our
# [configurable transformer implementation](../configs.html#TransformerConfigs)
conf = TransformerConfigs()
# Set the vocabulary sizes for embeddings and generating logits
conf.n_src_vocab = c.n_tokens
conf.n_tgt_vocab = c.n_tokens
# Set the embedding size
conf.d_model = c.d_model
# Replace self-attention with an [AFT Local Module](index.html)
from labml_nn.transformers.aft import AFTLocal
conf.encoder_attn = AFTLocal(c.d_model, c.seq_len, c.local_window_size)
#
return conf
@option(Configs.model)
def _model(c: Configs):
"""
Create an auto-regressive model
"""
m = AutoregressiveTransformer(c.transformer.encoder,
c.transformer.src_embed,
c.transformer.generator).to(c.device)
return m
def main():
# Create experiment
experiment.create(name="aft")
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# Use a context size of $128$
'seq_len': 256,
# Train for $32$ epochs
'epochs': 128,
# Batch size $128$
'batch_size': 32,
# Switch between training and validation for $10$ times
# per epoch
'inner_iterations': 10,
# Embedding size
'd_model': 128,
# FFN hidden dimension size
'transformer.ffn.d_ff': 256,
# Optimizer
'optimizer.optimizer': 'Noam',
'optimizer.learning_rate': 1.,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/aft/__init__.py | labml_nn/transformers/aft/__init__.py | """
---
title: An Attention Free Transformer
summary: >
This is an annotated implementation/tutorial of the AFT (Attention Free Transformer) in PyTorch.
---
# An Attention Free Transformer
This is a [PyTorch](https://pytorch.org) implementation of the paper
[An Attention Free Transformer](https://arxiv.org/abs/2105.14103).
This paper replaces the [self-attention layer](../mha.html) with a new efficient operation,
that has memory complexity of $\mathcal{O}(Td)$, where $T$ is the sequence length
and $d$ is the dimensionality of embeddings.
The paper introduces AFT along with AFT-local and AFT-conv.
Here we have implemented AFT-local which pays attention to closeby tokens
in an autoregressive model.
## Attention Free Transformer
AFT (similar to [MHA](../mha.html)) first transforms the embeddings $X$ into
query $Q = XW^Q$, key $K = XW^K$ and value $V = XW^V$ tensors with learned weights.
The output for each position $t \in [1, T]$ is calculated with the following operation.
$$Y_t = \sigma(Q_t) \odot
\frac{\sum_{t'=1}^T \exp(K_{t'} + w_{t,t'}) \odot V_{t'}}
{\sum_{t'=1}^T \exp(K_{t'} + w_{t,t'})}$$
, where $\odot$ is element-wise product, $\sigma$ is a non-linearity (sigmoid) and
$w \in \mathbb{R}^{T \times T}$ is a learned matrix of pair-wise position biases.
This means that we take the weighted average of values
and multiply them by the query. This eliminates the need to calculate the $T \times T$ attention
matrix that [MHA](../mha.html) requires, and therefore reduce the memory requirement.
## AFT Local
AFT Local only apply learned pair-wise position biases locally:
\begin{align}
w'_{t,t'} =
\begin{cases}
w_{t,t'}, & {\text{for } \lvert t-t' \rvert \lt s} \\
0, & \text{otherwise}
\end{cases}
\end{align}
, where $s \le T$ is the local window size.
Although $w'_{t,t'}$ is $0$ outside the local window the AFT operation still uses key-value pairs from
other areas. This is different from local transformers where embeddings outside the local window are
completely not visible.
Here is [the training code](experiment.html) for a AFT Local model.
"""
from typing import Optional
import torch
from torch import nn
class AFTLocal(nn.Module):
"""
### AFT Local Operation
$$Y_t = \sigma(Q_t) \odot
\frac{\sum_{t'=1}^T \exp(K_{t'} + w_{t,t'}) \odot V_{t'}}
{\sum_{t'=1}^T \exp(K_{t'} + w_{t,t'})}$$
where,
\begin{align}
w'_{t,t'} =
\begin{cases}
w_{t,t'}, & {\text{for } \lvert t-t' \rvert \lt s} \\
0, & \text{otherwise}
\end{cases}
\end{align}
"""
def __init__(self, d_model: int, seq_len: int, local_window_size: int, bias: bool = True):
"""
* `d_model` is the number of features in the `query`, `key` and `value` vectors.
* `seq_len` is $T$
* `local_window_size` is the local window size $s$
* `bias` is whether to have a bias parameter for transformations for $Q$, $K$ and $V$.
"""
super().__init__()
# Local window size $s$
self.local_window_size = local_window_size
# These transform the `query`, `key` and `value` vectors.
self.query = nn.Linear(d_model, d_model, bias=bias)
self.key = nn.Linear(d_model, d_model, bias=bias)
self.value = nn.Linear(d_model, d_model, bias=bias)
# Pair-wise positional biases $w \in \mathbb{R}^{T \times T}$
self.pos_bias = nn.Parameter(torch.zeros(seq_len, seq_len), requires_grad=True)
# Mask for $w_{t,t'}$
self.local_mask = nn.Parameter(self.create_local_mask(seq_len, local_window_size), requires_grad=False)
# Activation $\sigma$
self.activation = nn.Sigmoid()
# Output layer
self.output = nn.Linear(d_model, d_model)
@staticmethod
def create_local_mask(seq_len, local_window_size):
"""
#### Create local mask
This creates a mask for
\begin{align}
m_{t,t'} =
\begin{cases}
1, & {\text{for } \lvert t-t' \rvert \lt s} \\
0, & \text{otherwise}
\end{cases}
\end{align}
"""
# Initialize to ones
local_mask = torch.ones(seq_len, seq_len, dtype=torch.bool)
# Make $t' - t \ge s$ zero
local_mask = torch.tril(local_mask, local_window_size - 1)
# Make $t - t' \ge s$ zero
local_mask = torch.triu(local_mask, -(local_window_size - 1))
#
return local_mask
def forward(self, *,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: Optional[torch.Tensor] = None):
"""
`query`, `key` and `value` are the tensors that store
collection of token embeddings for *query*, *key* and *value*.
They have shape `[seq_len, batch_size, d_model]`.
`mask` has shape `[seq_len, seq_len, batch_size]` and
`mask[i, j, b]` indicates whether for batch `b`,
query at position `i` has access to key-value at position `j`.
"""
# `query`, `key` and `value` have shape `[seq_len, batch_size, d_model]`
seq_len, _, _ = query.shape
if mask is not None:
# `mask` has shape `[seq_len_q, seq_len_k, batch_size]`,
# where first dimension is the query dimension.
# If the query dimension is equal to $1$ it will be broadcasted.
assert mask.shape[0] == 1 or mask.shape[0] == query.shape[0]
assert mask.shape[1] == key.shape[0]
assert mask.shape[2] == 1 or mask.shape[2] == query.shape[1]
# Transform query, key and value embeddings
query = self.query(query)
key = self.key(key)
value = self.value(value)
# Get
#
# \begin{align}
# w'_{t,t'} =
# \begin{cases}
# w_{t,t'}, & {\text{for }\lvert t-t' \rvert \lt s} \\
# 0, & \text{otherwise}
# \end{cases}
# \end{align}
#
# using the mask
pos_bias = self.pos_bias[:seq_len, :seq_len] * self.local_mask[:seq_len, :seq_len]
pos_bias = pos_bias.unsqueeze(-1)
pos_bias.masked_fill_(~mask, float('-inf'))
# \begin{align}
# Y_t &= \sigma(Q_t) \odot
# \frac{\sum_{t'=1}^T \exp(K_{t'} + w_{t,t'}) \odot V_{t'}}
# {\sum_{t'=1}^T \exp(K_{t'} + w_{t,t'})} \\
# &= \sigma(Q_t) \odot
# \frac{\sum_{t'=1}^T \exp(w_{t,t'}) \odot \exp(K_{t'}) \odot V_{t'}}
# {\sum_{t'=1}^T \exp(w_{t,t'}) \odot \exp(K_{t'})}
# \end{align}
#
# We compute $\exp(w_{t,t'})$, $\exp(K_{t'}) \odot V_{t'}$ and $\exp(K_{t'})$
# separately and do a matrix multiplication. We use einsum for clarity.
# We subtract $\max_{t'}(K_{t'})$ and $\max_{t'}(w_{t,t'})$ before calculating the exponents to stabilize
# the softmax calculation.
#
# If $x_i$ is large $\exp(x_i)$ becomes huge and the computation of
# $\frac{\sum\exp(x_i)y_i}{\sum\exp(x_i)}$becomes unstable.
# Subtracting a constant before calculating the exponent from numerator and denominator will cancel out.
# and can help stabilize the computation.
# So we subtract $\max(x_i)$ to stabilize the computation.
max_key = key.max(dim=0, keepdims=True)[0]
max_pos_bias = pos_bias.max(dim=1, keepdims=True)[0]
# $\exp \big(K_{t'}- \max_{t'}(K_{t'})\big)$
exp_key = torch.exp(key - max_key)
# $\exp \big(w_{t,t'} - \max_{t'}(w_{t,t'})\big)$
exp_pos_bias = torch.exp(pos_bias - max_pos_bias)
# The numerator part $\sum_{t'=1}^T \exp(w_{t,t'}) \odot \exp(K_{t'}) \odot V_{t'}$
num = torch.einsum('ijb,jbd->ibd', exp_pos_bias, exp_key * value)
# The denominator part $\sum_{t'=1}^T \exp(w_{t,t'}) \odot \exp(K_{t'})$
den = torch.einsum('ijb,jbd->ibd', exp_pos_bias, exp_key)
# Output $$Y_t = \sigma(Q_t) \odot
# \frac{\sum_{t'=1}^T \exp(w_{t,t'}) \odot \exp(K_{t'}) \odot V_{t'}}
# {\sum_{t'=1}^T \exp(w_{t,t'}) \odot \exp(K_{t'})}$$
y = self.activation(query) * num / den
# Output layer
return self.output(y)
def _test_local_mask():
"""
Test local mask
"""
from labml.logger import inspect
inspect(AFTLocal.create_local_mask(10, 4))
#
if __name__ == '__main__':
_test_local_mask()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/feedback/experiment.py | labml_nn/transformers/feedback/experiment.py | """
---
title: Train Feedback Transformer
summary: This is training code with notes for a feedback transformer.
---
# Train Feedback Transformer
This trains a [feedback transformer](index.html) model for auto-regression.
You can pick the original feedback transformer or the new version
where the keys and values are precalculated.
Here's a Colab notebook for training a feedback transformer on Tiny Shakespeare dataset.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/transformers/feedback/experiment.ipynb)
"""
import torch
from labml import experiment
from labml.configs import option
from labml.utils.pytorch import get_modules
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from torch import nn
class AutoregressiveModel(nn.Module):
"""
## Auto regressive model
"""
def __init__(self, n_vocab: int, d_model: int, transformer: nn.Module):
super().__init__()
# Token embedding module
self.src_embed = nn.Embedding(n_vocab, d_model)
self.transformer = transformer
self.generator = nn.Linear(d_model, n_vocab)
def forward(self, x: torch.Tensor):
# Embed the tokens
x = self.src_embed(x)
# Run it through the the transformer
res = self.transformer(x)
# Generate logits of the next token
return self.generator(res), None
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
The default configs can and will be over-ridden when we start the experiment
"""
model: AutoregressiveModel
d_model: int = 512
heads: int = 8
dropout: float = 0.0
d_ff: int = 2048
n_layers: int = 6
@option(Configs.model)
def feedback_transformer(c: Configs):
"""
Create [original feedback transformer](index.html).
"""
from labml_nn.transformers.feedback import FeedbackTransformer, FeedbackTransformerLayer, \
FeedbackAttention, FeedForward
return AutoregressiveModel(
c.n_tokens, c.d_model,
FeedbackTransformer(
FeedbackTransformerLayer(d_model=c.d_model,
attn=FeedbackAttention(c.heads, c.d_model, c.dropout),
feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout),
dropout_prob=c.dropout),
c.n_layers)).to(c.device)
@option(Configs.model)
def feedback_transformer_kv(c: Configs):
"""
Create [updated feedback transformer](index.html#kv_shared), with precalculated keys and values.
"""
from labml_nn.transformers.feedback import FeedbackTransformerKV, FeedbackTransformerLayer, \
FeedbackAttention, FeedForward
return AutoregressiveModel(
c.n_tokens, c.d_model,
FeedbackTransformerKV(
FeedbackTransformerLayer(d_model=c.d_model,
attn=FeedbackAttention(c.heads, c.d_model, c.dropout,
is_kv_precomputed=True),
feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout),
dropout_prob=c.dropout),
c.n_layers, c.d_model, c.heads)).to(c.device)
def main():
# Create experiment
experiment.create(name="feedback_transformer")
# Create configs
conf = Configs()
# Load configurations
experiment.configs(conf,
# A dictionary of configurations to override
{'tokenizer': 'character',
'text': 'tiny_shakespeare',
'optimizer.learning_rate': 1.0,
'optimizer.optimizer': 'Noam',
'prompt': 'It is',
'prompt_separator': '',
# Use `feedback_transformer` for original feedback transformer
'model': 'feedback_transformer_kv',
'train_loader': 'shuffled_train_loader',
'valid_loader': 'shuffled_valid_loader',
'seq_len': 128,
'epochs': 128,
'batch_size': 64,
'inner_iterations': 25})
# Set models for saving and loading
experiment.add_pytorch_models(get_modules(conf))
# Start the experiment
with experiment.start():
# Run the training loop
conf.run()
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/feedback/__init__.py | labml_nn/transformers/feedback/__init__.py | """
---
title: Feedback Transformer
summary: >
This is an annotated implementation/tutorial the Feedback Transformer in PyTorch.
---
# Feedback Transformer
This is a [PyTorch](https://pytorch.org) implementation of the paper
[Accessing Higher-level Representations in Sequential Transformers with Feedback Memory](https://arxiv.org/abs/2002.09402).
Normal transformers process tokens in parallel. Each transformer layer pays attention
to the outputs of the previous layer.
Feedback transformer pays attention to the output of all layers in previous steps.
So this adds recurrence, and we need to process token-by-token.
This slows down the training significantly (about 5X - 10X depending on the sequence length).
However, when predicting Feedback Transformer is faster because you can predict the next token
if you cache the memory vectors.
In order to speed up the training, the paper discusses starting with a short sequence length and
gradually increasing it.
They also discuss using a pretrained parallel transformer as the starting point.
The original feedback transformer doesn't keep the outputs of all layers.
Instead it keeps weighted sum of the output of all layers.
This reduces the memory used for caching during prediction.
The first half of this file implements this.
The updated feedback transformer shares weights $W^l_k$ and $W^l_v$ used
to calculate keys and values among the layers.
We then calculate the keys and values for each step only once and keep
them cached.
The [second half](#shared_kv) of this file implements this.
We implemented a custom PyTorch function to improve performance.
Here's [the training code](experiment.html) and a notebook for training a feedback transformer on Tiny Shakespeare dataset.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/transformers/feedback/experiment.ipynb)
"""
import math
from typing import Optional
import torch
from torch import nn
from labml_nn.transformers.feed_forward import FeedForward
from labml_nn.transformers.mha import PrepareForMultiHeadAttention
from labml_nn.utils import clone_module_list
class FeedbackAttention(nn.Module):
r"""
## Feedback Attention
This module computes recurrent attention similar to attention from original transformers
paper.
$$\mathop{Attention}(Q, K, V) = \underset{seq}{\mathop{softmax}}\Bigg(\frac{Q^\top K}{\sqrt{d_k}}\Bigg)V$$
"""
def __init__(self, heads: int, d_model: int, dropout_prob: float = 0.1, *,
is_kv_precomputed: bool = False):
"""
* 'heads' is the number of attention heads
* `d_model` is the number of features in the transformer
* `dropout_prob` is the attention dropout probability
* `is_kv_precomputed` is whether key, value tensors are already calculated
"""
super().__init__()
# Number of features per head
self.d_k = d_model // heads
#
self.heads = heads
# These transform the `query` multi-headed attention.
self.query = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=False)
# These transform the `key` and `value` for multi-headed attention.
if not is_kv_precomputed:
self.key = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=False)
self.value = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=True)
# Keys and values are already calculated
else:
self.key = None
self.value = None
# Output layer
self.output = nn.Linear(d_model, d_model)
# Dropout
self.dropout = nn.Dropout(dropout_prob)
# Scaling factor before the softmax
self.scale = 1 / math.sqrt(self.d_k)
# Softmax for attention along the time dimension of `key`
self.softmax = nn.Softmax(dim=0)
# Number of relative positions
self.P = 2 ** 12
# Relative positional embeddings for key relative to the query.
self.key_pos_embeddings = nn.Parameter(torch.zeros((self.P, heads, self.d_k)), requires_grad=True)
# Relative positional embedding bias for key relative to the query.
self.key_pos_bias = nn.Parameter(torch.zeros((self.P, heads)), requires_grad=True)
# Positional embeddings for the query is independent of the position of the query
self.query_pos_bias = nn.Parameter(torch.zeros((heads, self.d_k)), requires_grad=True)
# We store attentions so that it can be used for logging, or other computations if needed
self.attn = None
def get_scores(self, query: torch.Tensor, key: torch.Tensor):
r"""
### Get attention scores
We use relative positional encodings for attention, similar
to [relative multi-head attention form Transformer-XL paper](../relative_mha.html).
Attention from current step's query to key in step $j$ (relative to current step) is,
\begin{align}
A_{j} &= Q^\top K_j \\
&= lin_q(X^q + P_q)^\top lin_k(X^k_j + P_j) \\
&= (Q + U^Q)^\top(K_j + U^K_j) \\
&= \underset{\textcolor{lightgreen}{A}}{Q^\top K_j} +
\underset{\textcolor{lightgreen}{B}}{Q^\top U^K_j} +
\underset{\textcolor{lightgreen}{C}}{{U^Q}^\top K_j} +
\underset{\textcolor{lightgreen}{D}}{{U^Q}^\top U^K_j}
\end{align}
where $Q, K_j$, are linear transformations of
original embeddings $X^q, X^k_j$
and $U^Q, U^K_j$ are linear transformations of
positional encodings $P_q, P_j$.
We replace term $\textcolor{lightgreen}{D}$ with $S_j$.
"""
# $U^K_j$
key_pos_emb = self.key_pos_embeddings[-key.shape[0]:]
# $U^Q$
query_pos_bias = self.query_pos_bias[None, :, :]
# $S_j$
key_pos_bias = self.key_pos_bias[-key.shape[0]:]
# $\underset{\textcolor{lightgreen}{A}}{Q^\top K_j} + \underset{\textcolor{lightgreen}{C}}{{U^Q}^\top K_j}$
ac = torch.einsum('bhd,jbhd->jbh', query + query_pos_bias, key)
# $\underset{\textcolor{lightgreen}{B}}{Q^\top U^K_j} + \underset{\textcolor{lightgreen}{D}}{S_j}$
bd = torch.einsum('bhd,jhd->jbh', query, key_pos_emb) + key_pos_bias[:, None, :]
# $A_j$
return ac + bd
def forward(self, *,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor):
"""
* `query` has shape `[batch_size, d_model]`
* `key` and `value` has shape `[seq_len, batch_size, d_model]`
"""
# Prepare `query`, `key` and `value` for attention computation
# `key` and `value` will then have shape `[seq_len, batch_size, heads, d_k]`
# and `query` will have shape `[batch_size, heads, d_k]`
query = self.query(query)
if self.key:
key = self.key(key)
if self.value:
value = self.value(value)
# Compute attention scores.
# Results in a tensor of shape `[seq_len, batch_size, heads]`
scores = self.get_scores(query, key)
# Scale scores $\frac{1}{\sqrt{d_k}}$
scores *= self.scale
# Softmax
attn = self.softmax(scores)
# Apply dropout
attn = self.dropout(attn)
# Multiply by the values
x = torch.einsum("jbh,jbhd->bhd", attn, value)
# Concatenate multiple heads
x = x.reshape(x.shape[0], -1)
# Output layer
return self.output(x)
class FeedbackTransformerLayer(nn.Module):
"""
## Feedback Transformer Layer
This implements a single transformer layer in the feedback transformer.
"""
def __init__(self, *,
d_model: int,
attn: FeedbackAttention,
feed_forward: FeedForward,
dropout_prob: float):
"""
* `d_model` is the number of features in the transformer
* `attn` is the feedback attention module
* `feed_forward` is the position-wise feed forward layer
* `dropout_prob` is the dropout probability for dropout layers after attention and feed-forward
"""
super().__init__()
# Transformer size $d_{model}$
self.size = d_model
#
self.attn = attn
self.feed_forward = feed_forward
self.dropout = nn.Dropout(dropout_prob)
# Normalization layers
self.norm_self_attn = nn.LayerNorm([d_model])
self.norm_ff = nn.LayerNorm([d_model])
def forward(self, *,
x: torch.Tensor,
key: Optional[torch.Tensor],
value: Optional[torch.Tensor]):
# If there is memory
if key is not None:
# Normalize the vectors before doing self attention
z = self.norm_self_attn(x)
# Run through self attention, i.e. keys and values are from self
self_attn = self.attn(query=z, key=key, value=value)
# Add the self attention results
x = x + self.dropout(self_attn)
# Normalize for feed-forward
z = self.norm_ff(x)
# Pass through the feed-forward network
ff = self.feed_forward(z)
# Add the feed-forward results back
x = x + self.dropout(ff)
#
return x
class FeedbackTransformer(nn.Module):
"""
## Feedback Transformer Module
"""
def __init__(self, layer: FeedbackTransformerLayer, n_layers: int):
"""
* `layer` is the feedback transformer layer, which we clone for each layer
* `n_layers` is the number of layers in the transformer
"""
super().__init__()
# Make copies of the transformer layer
self.layers = clone_module_list(layer, n_layers)
# Final normalization layer
self.norm = nn.LayerNorm([layer.size])
# Memory vectors are computed as a weighted sum of representations of each layer.
# This is the weights parameter for that.
self.weights = nn.Parameter(torch.ones(n_layers + 1), requires_grad=True)
# Softmax for weights before taking the weighted sum
self.softmax = nn.Softmax(0)
def forward(self, x_seq: torch.Tensor):
"""
* `x_seq` is the input with shape `[seq_len, batch_size, d_model]`
"""
# Split the input to a list along the sequence axis
x_seq = torch.unbind(x_seq, dim=0)
# List to store the outputs
res = []
# List to store the memory vectors
mem = []
# For each input step
for x in x_seq:
# List to store layer outputs
layer_outputs = [x]
# If there is memory, stack them into a vector
mem_tensor = torch.stack(mem) if mem else None
# Run through each layer
for layer in self.layers:
# Get layer output
x = layer(x=x, key=mem_tensor, value=mem_tensor)
# Append them to the list of layer outputs
layer_outputs.append(x)
# Stack the layer outputs to a tensor
layer_outputs = torch.stack(layer_outputs)
# Calculate the memory vector as a weighted sum of layer outputs
mem.append(torch.einsum('lbd,l->bd', layer_outputs, self.softmax(self.weights)))
# Append the output to results
res.append(x)
# Stack the output tensors
res = torch.stack(res)
# Normalize the output
return self.norm(res)
# <a id="shared_kv"></a>
#
# # Shared keys and values among layers
class StackFunction(torch.autograd.Function):
"""
### Stack Function implementation
We implement a custom function instead of appending to a python list
and then doing `torch.stack`.
This greatly improves the performance over calling `torch.stack` at
each step along the sequence.
Everytime `torch.stack` is called, it creates a new tensor, while
this method and the accompanying class `Stack` share memory for each step.
"""
@staticmethod
def forward(ctx, memory, memory_grad, last, n):
"""
* `ctx` is the context of the function (which lets us cache stuff)
* `memory` is the shared memory tensor where we stack and store the values of each step (keys & values)
* `memory_grad` is the shared memory tensor to store and accumulate gradients of each step
* `last` is the last value stacked
* `n` is the number of steps (i.e. size of the stack)
This returns the stacked tensor for steps upto `n`.
"""
# Cache accumulated gradients
ctx._mem_grad = memory_grad
# Cache the size of the stack
ctx._n = n
# Return the stack
return memory[:n + 1]
@staticmethod
def backward(ctx, grad_output):
"""
* `grad_output` is the gradient with respect to the output of about `forward` function
This accumulates the gradients in the shared memory tensor and return the
gradients with respect to the `last` result in the stack.
"""
# Get the current size of the stack
n = ctx._n
# Get the accumulated gradients
memory_grad = ctx._mem_grad
# Add the gradients
memory_grad[:n + 1] += grad_output
# Return the gradients w.r.t to last value in the stack
return None, None, memory_grad[n], None
class Stack:
"""
### Stack Module
This uses the stack function defined above, and does the necessary initializations.
"""
def __init__(self, max_len: int):
"""
* `max_len` is the maximum size of the stack
"""
self.max_len = max_len
self.memory = None
self.memory_grad = None
self.last = None
self.n = -1
self.last_get_n = -1
def append(self, n: int, value: torch.Tensor):
"""
* `n` is the size of the stack
* `value` is the tensor that needs to be added to the stack
"""
# You need to get (use) the stack after adding a value.
# Otherwise this implementation fails
assert n == 0 or self.last_get_n == n - 1, f"{n}, {self.last_get_n}"
# Do this without gradients
with torch.no_grad():
# Initialize the shared memory tensor to keep the stack
if self.memory is None or self.memory.shape[1:] != value.shape:
# This should only happen when the stack is empty
assert n == 0
# Create a tensor for the stack
self.memory = value.new_zeros(self.max_len, *value.shape, requires_grad=False)
# Create a tensor to accumulate the gradients
self.memory_grad = value.new_zeros(self.memory.shape, requires_grad=False)
# The memory is already initialized but we are resetting the stack.
#
# This could have been another function like `reset`, but
# we found this easier to use.
elif n == 0:
# Reset accumulated gradients
self.memory_grad.fill_(0.)
# Set the value in the correct position of the stack
self.memory.data[n] = value.detach()
# Keep track of the stack (for debugging)
self.n = n
# Keep track of the last value added to the stack.
# We need this to be passed on to `StackFunction` in order
# to get the gradients propagated backwards.
self.last = value
def get(self):
"""
Returns the stack
"""
# Keep track of the size of the stack when it was used.
# This is used for a sanity check in `append`.
self.last_get_n = self.n
# Take it all through `StackFunction` so that `StackFunction.backwards`
# is called by PyTorch during backpropagation.
return StackFunction.apply(self.memory, self.memory_grad, self.last, self.n)
def free(self):
"""
To release memory
"""
self.memory = None
self.memory_grad = None
self.last = None
class FeedbackTransformerKV(nn.Module):
"""
## Updated Feedback Transformer Module
This is the updated feedback transformer module that caches the keys and values.
"""
def __init__(self, layer: FeedbackTransformerLayer, n_layers: int, d_model: int, heads: int):
"""
* `layer` is the feedback transformer layer, which we clone for each layer
* `n_layers` is the number of layers in the transformer
* `d_model` is the number of features in the transformer
* 'heads' is the number of attention heads
"""
super().__init__()
# Make copies of the transformer layer
self.layers = clone_module_list(layer, n_layers)
# Final normalization layer
self.norm = nn.LayerNorm([layer.size])
# Memory vectors are computed as a weighted sum of representations of each layer.
# This is the weights parameter for that.
self.weights = nn.Parameter(torch.ones(n_layers + 1), requires_grad=True)
# Softmax for weights before taking the weighted sum
self.softmax = nn.Softmax(0)
# Number of features in a head
d_k = d_model // heads
# Module to transform embeddings (memory) to get keys
self.key = PrepareForMultiHeadAttention(d_model, heads, d_k, bias=False)
# Module to transform embeddings (memory) to get keys
self.value = PrepareForMultiHeadAttention(d_model, heads, d_k, bias=False)
# Memory for stacked keys
self.mem_key = Stack(512)
# Memory for stacked values
self.mem_value = Stack(512)
def forward(self, x_seq: torch.Tensor):
"""
* `x_seq` is the input with shape `[seq_len, batch_size, d_model]`
"""
# Split the input to a list along the sequence axis
x_seq = torch.unbind(x_seq, dim=0)
# List to store the outputs
res = []
# For each input step
for step, x in enumerate(x_seq):
# List to store layer outputs
layer_outputs = [x]
# Stack of keys and values
key_tensor = None
value_tensor = None
# Get the keys and values tensors if we are beyond the initial step
if step > 0:
key_tensor = self.mem_key.get()
value_tensor = self.mem_value.get()
# Run through each layer
for layer in self.layers:
# Get layer output
x = layer(x=x, key=key_tensor, value=value_tensor)
# Append them to the list of layer outputs
layer_outputs.append(x)
# Stack the layer outputs to a tensor
layer_outputs = torch.stack(layer_outputs)
# Calculate the memory vector as a weighted sum of layer outputs
mem = torch.einsum('lbd,l->bd', layer_outputs, self.softmax(self.weights))
# Calculate the keys from memory and add it to the stack
self.mem_key.append(step, self.key(mem))
# Calculate the values from memory and add it to the stack
self.mem_value.append(step, self.value(mem))
# Append the output to results
res.append(x)
# Stack the output tensors
res = torch.stack(res)
# Normalize the output
return self.norm(res)
def free(self):
self.mem_key.free()
self.mem_value.free()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/rope/experiment.py | labml_nn/transformers/rope/experiment.py | """
---
title: Rotary Positional Embeddings (RoPE) Experiment
summary: This experiment trains a transformer model with Rotary Positional Embeddings (RoPE) on tiny Shakespeare dataset.
---
# Rotary Positional Embeddings (RoPE) Experiment
This is an annotated PyTorch experiment to train a transformer model with Rotary Positional Embeddings (RoPE).
"""
from labml import experiment
from labml.configs import option, calculate
from labml_nn.transformers import TransformerConfigs
from labml_nn.transformers.basic.autoregressive_experiment import AutoregressiveTransformer, Configs
# ### Rotary PE attention
def _rotary_pe_mha(c: TransformerConfigs):
from labml_nn.transformers.rope import RotaryPEMultiHeadAttention
return RotaryPEMultiHeadAttention(c.n_heads, c.d_model, 1.)
# Configuration options
calculate(TransformerConfigs.encoder_attn, 'rotary', _rotary_pe_mha)
calculate(TransformerConfigs.decoder_attn, 'rotary', _rotary_pe_mha)
calculate(TransformerConfigs.decoder_mem_attn, 'rotary', _rotary_pe_mha)
@option(Configs.model, 'rotary_pe_transformer')
def _model(c: Configs):
"""
Create an autoregressive model and initialize weights
"""
m = AutoregressiveTransformer(c.transformer.encoder,
c.transformer.src_embed,
c.transformer.generator).to(c.device)
return m
def main():
# Create experiment
experiment.create(name="rotary_pe_transformer", writers={'screen'})
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# No fixed positional embeddings
'transformer.src_embed': 'no_pos',
'transformer.tgt_embed': 'no_pos',
# Encoder with RoPE
'transformer.encoder_attn': 'rotary',
#
'model': 'rotary_pe_transformer',
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# Use a context size of $256$
'seq_len': 512,
# Train for 32 epochs
'epochs': 32,
# Batch size $4$
'batch_size': 4,
# Switch between training and validation for $10$ times
# per epoch
'inner_iterations': 10,
# Model size
'd_model': 128,
'transformer.ffn.d_ff': 512,
'transformer.n_heads': 16,
'transformer.dropout': 0.0,
# Use [Noam optimizer](../../optimizers/noam.html)
'optimizer.optimizer': 'Noam',
'optimizer.learning_rate': 1.,
'dataloader_shuffle_with_replacement': True
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/rope/__init__.py | labml_nn/transformers/rope/__init__.py | """
---
title: Rotary Positional Embeddings (RoPE)
summary: >
Annotated implementation of RoPE from paper
RoFormer: Enhanced Transformer with Rotary Position Embedding
---
# Rotary Positional Embeddings (RoPE)
This is an implementation of
[Rotary Positional Embeddings (RoPE)](https://arxiv.org/abs/2104.09864)
in [PyTorch](https://pytorch.org).
Rotary Positional Embeddings (RoPE) encode position information of tokens
with a rotation matrix that naturally incorporates explicit relative position
dependency.
Here's [the training code](experiment.html) for training a transformer model with RoPE
on Tiny Shakespeare dataset.
"""
import torch
from torch import nn
from labml.logger import inspect
from labml_nn.transformers.mha import MultiHeadAttention
class RotaryPositionalEmbeddings(nn.Module):
"""
## RoPE module
Rotary encoding transforms pairs of features by rotating in the 2D plane.
That is, it organizes the $d$ features as $\frac{d}{2}$ pairs.
Each pair can be considered a coordinate in a 2D plane, and the encoding will rotate it
by an angle depending on the position of the token.
### For a pair of features
Let $x^{(1)}_m$ and $x^{(2)}_m$ be two features of the
key or query of any head at position $m$.
Or for simplicity assume $x$ has only two features.
Then the transformation is,
\begin{align}
RoPE\big(x^{(1)}_m, x^{(2)}_m, m\big) &=
\begin{pmatrix}
\cos m \theta & - \sin m \theta \\
\sin m \theta & \cos m \theta
\end{pmatrix}
\begin{pmatrix}
x^{(1)}_m \\
x^{(2)}_m \\
\end{pmatrix} \\
&=
\begin{pmatrix}
x^{(1)}_m \cos m\theta - x^{(2)}_m \sin m \theta \\
x^{(2)}_m \cos m\theta + x^{(1)}_m \sin m \theta \\
\end{pmatrix} \\
\end{align}
where $\theta$ is a constant angle. The other pairs of features are transformed similarly.
### Attention is relative
For a pair of features, dot-product attention score between two positions $m$ and $n$ would be
\begin{align}
\Big \langle RoPE\big(x^{(1)}_m, x^{(2)}_m, m\big), RoPE\big(x^{(1)}_n, x^{(2)}_n, n\big) \Big \rangle &= \\
(x^{(1)}_m \cos m\theta - x^{(2)}_m \sin m \theta)(x^{(1)}_n \cos n\theta - x^{(2)}_n \sin n \theta) &+ \\
(x^{(2)}_m \cos m\theta + x^{(1)}_m \sin m \theta)(x^{(2)}_n \cos n\theta + x^{(1)}_n \sin n \theta) &= \\
x^{(1)}_m x^{(1)}_n (\cos m\theta \cos n\theta + \sin m \theta \sin n \theta) &+ \\
x^{(1)}_m x^{(2)}_n (-\cos m\theta \sin n\theta + \sin m \theta \cos n \theta) &+ \\
x^{(2)}_m x^{(1)}_n (-\sin m\theta \cos n\theta + \cos m \theta \sin n \theta) &+ \\
x^{(2)}_m x^{(2)}_n (\sin m\theta \sin n\theta + \cos m \theta \cos n \theta) &= \\
x^{(1)}_m x^{(1)}_n \cos (m - n) \theta +
x^{(1)}_m x^{(2)}_n \sin(m - n) \theta &+ \\
- x^{(2)}_m x^{(1)}_n \sin (m - n) \theta +
x^{(2)}_m x^{(2)}_n \cos (m - n) \theta &= \\
\big(x^{(1)}_m \cos (m - n)\theta - x^{(2)}_m \sin (m - n) \theta\big) x^{(1)}_n &+ \\
\big(x^{(2)}_m \cos (m - n)m\theta + x^{(1)}_m \sin (m - n) \theta\big) x^{(2)}_n &= \\
\Big \langle RoPE\big(x^{(1)}_m, x^{(2)}_m, m - n\big), RoPE\big(x^{(1)}_n, x^{(2)}_n, 0\big) \Big \rangle
\end{align}
This shows that for dot-production attention the rotary encodings gives relative attention.
### For all features
The features are grouped into pairs and handled as above. They use a different $\theta$ for each pair.
The paper suggests using $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
for the $\frac{d}{2}$ pairs of features.
We pair feature $i$ with feature $i + \frac{d}{2}$. So for position $m$ we transform
\begin{align}
\begin{pmatrix}
x^{(i)}_m \\
x^{(i + \frac{d}{2})}_m
\end{pmatrix}
\end{align}
to
\begin{align}
\begin{pmatrix}
x^{(i)}_m \cos m \theta_i - x^{(i + \frac{d}{2})}_m \sin m \theta_i \\
x^{(i + \frac{d}{2})}_m \cos m\theta_i + x^{(i)}_m \sin m \theta_i \\
\end{pmatrix} \\
\end{align}
"""
def __init__(self, d: int, base: int = 10_000):
"""
* `d` is the number of features $d$
* `base` is the constant used for calculating $\Theta$
"""
super().__init__()
self.base = base
self.d = d
self.cos_cached = None
self.sin_cached = None
def _build_cache(self, x: torch.Tensor):
"""
Cache $\cos$ and $\sin$ values
"""
# Return if cache is already built
if self.cos_cached is not None and x.shape[0] <= self.cos_cached.shape[0]:
return
# Get sequence length
seq_len = x.shape[0]
# $\Theta = {\theta_i = 10000^{-\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
theta = 1. / (self.base ** (torch.arange(0, self.d, 2).float() / self.d)).to(x.device)
# Create position indexes `[0, 1, ..., seq_len - 1]`
seq_idx = torch.arange(seq_len, device=x.device).float().to(x.device)
# Calculate the product of position index and $\theta_i$
idx_theta = torch.einsum('n,d->nd', seq_idx, theta)
# Concatenate so that for row $m$ we have
# $[m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}, m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}]$
idx_theta2 = torch.cat([idx_theta, idx_theta], dim=1)
# Cache them
self.cos_cached = idx_theta2.cos()[:, None, None, :]
self.sin_cached = idx_theta2.sin()[:, None, None, :]
def _neg_half(self, x: torch.Tensor):
# $\frac{d}{2}$
d_2 = self.d // 2
# Calculate $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$
return torch.cat([-x[:, :, :, d_2:], x[:, :, :, :d_2]], dim=-1)
def forward(self, x: torch.Tensor):
"""
* `x` is the Tensor at the head of a key or a query with shape `[seq_len, batch_size, n_heads, d]`
"""
# Cache $\cos$ and $\sin$ values
self._build_cache(x)
# Sequence length
seq_len = x.shape[0]
# Split the features, we can choose to apply rotary embeddings only to a partial set of features.
x_rope, x_pass = x[..., :self.d], x[..., self.d:]
# Calculate
# $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$
neg_half_x = self._neg_half(x_rope)
# Calculate
#
# \begin{align}
# \begin{pmatrix}
# x^{(i)}_m \cos m \theta_i - x^{(i + \frac{d}{2})}_m \sin m \theta_i \\
# x^{(i + \frac{d}{2})}_m \cos m\theta_i + x^{(i)}_m \sin m \theta_i \\
# \end{pmatrix} \\
# \end{align}
#
# for $i \in {1, 2, ..., \frac{d}{2}}$
x_rope = (x_rope * self.cos_cached[:seq_len]) + (neg_half_x * self.sin_cached[:seq_len])
#
return torch.cat((x_rope, x_pass), dim=-1)
class RotaryPEMultiHeadAttention(MultiHeadAttention):
"""
## Multi-head attention with rotary positional embeddings
We override [multi-head attention from original transformer](../mha.html).
"""
def __init__(self, heads: int, d_model: int, rope_percentage: float = 0.5, dropout_prob: float = 0.0):
super().__init__(heads, d_model, dropout_prob)
# Rotary positional embedding layers
d_rope = int(self.d_k * rope_percentage)
self.query_rotary_pe = RotaryPositionalEmbeddings(d_rope)
self.key_rotary_pe = RotaryPositionalEmbeddings(d_rope)
def get_scores(self, query: torch.Tensor, key: torch.Tensor):
"""
### Calculate scores between queries and keys
"""
# Calculate dot-product with RoPE
return torch.einsum('ibhd,jbhd->ijbh', self.query_rotary_pe(query), self.key_rotary_pe(key))
def _test_rotary():
"""
Testing RoPE with a simple example
"""
x = torch.tensor([[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]], dtype=torch.float)
x = x[:, None, None, :]
inspect(x)
rotary_pe = RotaryPositionalEmbeddings(4)
inspect(rotary_pe(x))
if __name__ == '__main__':
_test_rotary()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/rope/value_pe/arithmetic_experiment.py | labml_nn/transformers/rope/value_pe/arithmetic_experiment.py | """
---
title: Rotary Positional Embeddings with Relative distance (RoPER) Experiment
summary: This experiment trains a transformer model with Rotary Positional Embeddings with
Relative Distance (RoPER) on the arithmetic addition task.
---
# Rotary Positional Embeddings with Relative distance ([RoPER](index.html)) Experiment
"""
from labml import experiment
from labml.configs import calculate
from labml_nn.experiments.arithmetic_dataset import ArithmeticAutoregression
from labml_nn.transformers import TransformerConfigs
from labml_nn.transformers.rope.experiment import Configs as RoPEConfigs
class Configs(RoPEConfigs, ArithmeticAutoregression):
"""
We inherit [RoPE experiment](../experiment.html) and use it for
[arithmetic addition task](../../experiments/arithmetic_dataset.html).
We add the option to change attention to use Rotary Positional Embeddings with Relative distance (RoPER)
below.
"""
pass
def _rotary_value_pe_mha(c: TransformerConfigs):
"""
Use Rotary Positional Embeddings with Relative distance ([RoPER](index.html)) in attention.
"""
from labml_nn.transformers.rope.value_pe import RotaryValuePEMultiHeadAttention
return RotaryValuePEMultiHeadAttention(c.n_heads, c.d_model, 1., 1.)
# Configuration options
calculate(TransformerConfigs.encoder_attn, 'rotary_value', _rotary_value_pe_mha)
calculate(TransformerConfigs.decoder_attn, 'rotary_value', _rotary_value_pe_mha)
calculate(TransformerConfigs.decoder_mem_attn, 'rotary_value', _rotary_value_pe_mha)
def main():
# Create experiment
experiment.create(name="roper_addition", comment="rotary value 7", writers={'screen', 'labml'})
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
'max_digits': 7,
# No fixed positional embeddings
'transformer.src_embed': 'no_pos',
'transformer.tgt_embed': 'no_pos',
# Encoder with RoPER attention
'transformer.encoder_attn': 'rotary_value',
# Encoder with RoPE attention
# 'transformer.encoder_attn': 'rotary',
#
'model': 'rotary_pe_transformer',
# Use a context size of $256$
'seq_len': 512,
# Train for 32 epochs
'epochs': 20,
# Batch size $4$
'batch_size': 16,
# Model size
'd_model': 128,
'transformer.ffn.d_ff': 512,
'transformer.n_heads': 4,
'transformer.dropout': 0.0,
# Use [Adam optimizer](../../optimizers/noam.html)
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/rope/value_pe/experiment.py | labml_nn/transformers/rope/value_pe/experiment.py | """
---
title: Rotary Positional Embeddings (RoPE) Experiment
summary: This experiment trains a transformer model with Rotary Positional Embeddings (RoPE) on tiny Shakespeare dataset.
---
# Rotary Positional Embeddings (RoPE) Experiment
This is an annotated PyTorch experiment to train a transformer model with Rotary Positional Embeddings (RoPE).
"""
from labml import experiment
from labml.configs import calculate
from labml_nn.transformers import TransformerConfigs
from labml_nn.transformers.rope.experiment import Configs as RoPEConfigs
# ### Rotary PE attention
class Configs(RoPEConfigs): # , ArithmeticAutoregression):
pass
def _rotary_value_pe_mha(c: TransformerConfigs):
from labml_nn.transformers.rope.value_pe import RotaryValuePEMultiHeadAttention
return RotaryValuePEMultiHeadAttention(c.n_heads, c.d_model, 1., 1.)
# Configuration options
calculate(TransformerConfigs.encoder_attn, 'rotary_value', _rotary_value_pe_mha)
calculate(TransformerConfigs.decoder_attn, 'rotary_value', _rotary_value_pe_mha)
calculate(TransformerConfigs.decoder_mem_attn, 'rotary_value', _rotary_value_pe_mha)
def main():
# Create experiment
experiment.create(name="rotary_shakespeare", comment="rotary value", writers={'screen', 'labml'})
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# No fixed positional embeddings
'transformer.src_embed': 'no_pos',
'transformer.tgt_embed': 'no_pos',
# Encoder with RoPE
'transformer.encoder_attn': 'rotary_value',
# 'transformer.encoder_attn': 'rotary',
#
'model': 'rotary_pe_transformer',
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# Use a context size of $256$
'seq_len': 512,
# Train for 32 epochs
'epochs': 24,
# Batch size $4$
'batch_size': 16,
# Switch between training and validation for $10$ times
# per epoch
'inner_iterations': 4,
# Model size
'd_model': 128,
'transformer.ffn.d_ff': 512,
'transformer.n_heads': 4,
'transformer.dropout': 0.0,
# Use [Adam optimizer](../../optimizers/noam.html)
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
'dataloader_shuffle_with_replacement': True
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/rope/value_pe/__init__.py | labml_nn/transformers/rope/value_pe/__init__.py | """
---
title: Rotary Positional Embeddings with Relative distance (RoPER)
summary: >
This is an implementation of RoPER which adds relative distance information to embeddings on
top of RoPE introduced in RoFormer: Enhanced Transformer with Rotary Position Embedding
---
*RoPER is work by [Georges Harik (@gharik)](https://twitter.com/gharik),
and this implementation is based on his original code.*
# Rotary Positional Embeddings with Relative distance (RoPER)
[Rotary Positional Embeddings (RoPE)](https://arxiv.org/abs/2104.09864) includes
relative positions in attention score calculation.
However, the embeddings themselves do not get any positional information
, [except what it can get implicitly from causal attention](https://arxiv.org/abs/2c364684b15b11ecac827bce58715ee7).
RoPER adds relative positional information explicitly to value embeddings.
Specifically, it adds the relative positions of the tokens it paid attention to.
We use same rotary positional embeddings to rotate the values in attention,
Then, after taking the weighted sum,
we rotate the final in the opposite direction.
Which is equivalent to rotating each of the values (before attention) relative to the current position.
Here's [the training code](experiment.html) for training a transformer model with RoPER
on an arithmetic addition where we can see significant improvement over RoPE.
### Relative distances in embeddings
For any head, let $a_{n,i}$ be the attention from position $n$ to position $i$,
and $v_i$ be the value embeddings at position $i$. Let's denote individual features
as $v^{(1)}_i, v^{(2)}_i, \dots$.
Normally, we would take the weight sum of value embeddings
$$o^{(j)}_n = \sum_i a_{n,i} v^{(j)}_i$$
This doesn't explicitly add any distance information about the positions $i$ to final
result $o^{(j)}_n$.
RoPER pairs features like RoPE and transform them.
For a pair $v^{(1)}_m$ and $v^{(2)}_m$ it transforms them by
$RoPE\big(v^{(1)}_m, v^{(2)}_m, m\big)$.
Let us donate the transformed features with $\hat{v}^{(1)}_m, \hat{v}^{(2)}_m$.
Then it rotates the weighted sum $\hat{o}^{(j)}_n$ in the the reverse direction with
$RoPE\big(\hat{o}^{(1)}_n, \hat{o}^{(2)}_n, -n\big)$.
*Note the *$-n$.
Note that,
\begin{align}
RoPE\big(x^{(1)}_m, x^{(2)}_m, m\big) &=
\begin{pmatrix}
\cos m \theta & - \sin m \theta \\
\sin m \theta & \cos m \theta
\end{pmatrix}
\begin{pmatrix}
x^{(1)}_m \\
x^{(2)}_m \\
\end{pmatrix} \\
&=
\begin{pmatrix}
x^{(1)}_m \cos m\theta - x^{(2)}_m \sin m \theta \\
x^{(2)}_m \cos m\theta + x^{(1)}_m \sin m \theta \\
\end{pmatrix} \\
\end{align}
Final output after with the transformations is,
\begin{align}
RoPE\big(\hat{o}^{(1)}_n, \hat{o}^{(2)}_n, -n\big) &= \\
\begin{pmatrix}
\hat{o}^{(1)}_n \cos n\theta + \hat{o}^{(2)}_n \sin n \theta \\
\hat{o}^{(2)}_n \cos n\theta - \hat{o}^{(1)}_n \sin n \theta \\
\end{pmatrix} \\
\end{align}
*Note that *$\sin (-n \theta) = -\sin n \theta$.
Let's expand the first term $\hat{o}^{(1)}_n \cos n\theta + \hat{o}^{(2)}_n \sin n \theta$,
\begin{align}
\hat{o}^{(1)}_n \cos n\theta + \hat{o}^{(2)}_n \sin n \theta &= \\
\sum_i a_{n,i} \hat{v}^{(1)}_i \cos n\theta + \sum_i a_{n,i} \hat{v}^{(2)}_i \sin n \theta &= \\
\sum_i a_{n,i} \Big( v^{(1)}_i \cos i\theta - v^{(2)}_i \sin i \theta \Big) \cos n\theta &+ \\
\sum_i a_{n,i} \Big( v^{(2)}_i \cos i\theta + v^{(1)}_i \sin i \theta \Big) \sin m \theta &= \\
\sum_i a_{n,i} v^{(1)}_i \Big( \cos i\theta \cos n\theta + \sin i \theta \sin n \theta \Big) &+ \\
\sum_i a_{n,i} v^{(2)}_i \Big( \cos i\theta \sin n\theta - \sin i \theta \cos n \theta \Big) &= \\
\sum_i a_{n,i} v^{(1)}_i \cos (i - n) \theta - \sum_i a_{n,i} v^{(2)}_i \sin (i - n) \theta &= \\
\sum_i a_{n,i} v^{(1)}_i \cos (i - n) \theta - \sum_i a_{n,i} v^{(2)}_i \sin (i - n) \theta
\end{align}
Simiarly we can show the second term is equal to,
$$\sum_i a_{n,i} v^{(1)}_i \cos (i - n) \theta + \sum_i a_{n,i} v^{(2)}_i \sin (i - n) \theta$$
Which gives,
\begin{align}
RoPE\big(\hat{o}^{(1)}_n, \hat{o}^{(2)}_n, -n\big) &= \\
\begin{pmatrix}
\sum_i a_{n,i} v^{(1)}_i \cos (i - n) \theta - \sum_i a_{n,i} v^{(2)}_i \sin (i - n) \theta \\
\sum_i a_{n,i} v^{(1)}_i \cos (i - n) \theta + \sum_i a_{n,i} v^{(2)}_i \sin (i - n) \theta \\
\end{pmatrix} &= \\
\sum_i a_{n,i} RoPE \big (v^{(1)}_i, v^{(1)}_i, (i - n) \theta \big)
\end{align}
That is, the weighted average of values rotated relative to current position.
[Here's an experiment](arithmetic_experiment.html) that uses RoPER on an arthmetic addition task.
"""
from typing import Optional
import torch
from labml_nn.transformers.rope import RotaryPositionalEmbeddings, RotaryPEMultiHeadAttention
class ReverseRotaryPositionalEmbeddings(RotaryPositionalEmbeddings):
"""
## RoPE module that rotates in the opposite direction
This inherits from [RoPE rotation implementation](../index.html) and changes the direction.
"""
def forward(self, x: torch.Tensor):
"""
* `x` is the Tensor at the head of a key or a query with shape `[seq_len, batch_size, n_heads, d]`
"""
# Cache $\cos$ and $\sin$ values
self._build_cache(x)
# Split the features, we can choose to apply rotary embeddings only to a partial set of features.
x_rope, x_pass = x[..., :self.d], x[..., self.d:]
# Calculate
# $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$
neg_half_x = self._neg_half(x_rope)
# Calculate
#
# \begin{align}
# \begin{pmatrix}
# x^{(i)}_m \cos -m \theta_i - x^{(i + \frac{d}{2})}_m \sin -m \theta_i \\
# x^{(i + \frac{d}{2})}_m \cos -m\theta_i + x^{(i)}_m \sin -m \theta_i \\
# \end{pmatrix} = \\
# \begin{pmatrix}
# x^{(i)}_m \cos m \theta_i + x^{(i + \frac{d}{2})}_m \sin m \theta_i \\
# x^{(i + \frac{d}{2})}_m \cos m\theta_i - x^{(i)}_m \sin m \theta_i \\
# \end{pmatrix} \\
# \end{align}
#
# for $i \in {1, 2, ..., \frac{d}{2}}$
x_rope = (x_rope * self.cos_cached[:x.shape[0]]) - (neg_half_x * self.sin_cached[:x.shape[0]])
#
return torch.cat((x_rope, x_pass), dim=-1)
class RotaryValuePEMultiHeadAttention(RotaryPEMultiHeadAttention):
"""
## Multi-head attention with rotary positional embeddings
We override [multi-head attention from original transformer](../mha.html).
"""
def __init__(self, heads: int, d_model: int,
rope_percentage: float = 0.5, rope_value_percentage: float = 0.5,
dropout_prob: float = 0.0):
super().__init__(heads, d_model, rope_percentage, dropout_prob)
# Rotary positional embedding layers
d_rope_value = int(self.d_k * rope_value_percentage)
self.value_rotary_pe = RotaryPositionalEmbeddings(d_rope_value)
self.value_reverse_rotary_pe = ReverseRotaryPositionalEmbeddings(d_rope_value)
def forward(self, *,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: Optional[torch.Tensor] = None):
"""
`query`, `key` and `value` are the tensors that store
collection of *query*, *key* and *value* vectors.
They have shape `[seq_len, batch_size, d_model]`.
`mask` has shape `[seq_len, seq_len, batch_size]` and
`mask[i, j, b]` indicates whether for batch `b`,
query at position `i` has access to key-value at position `j`.
"""
# `query`, `key` and `value` have shape `[seq_len, batch_size, d_model]`
seq_len, batch_size, _ = query.shape
if mask is not None:
mask = self.prepare_mask(mask, query.shape, key.shape)
# Prepare `query`, `key` and `value` for attention computation.
# These will then have shape `[seq_len, batch_size, heads, d_k]`.
query = self.query(query)
key = self.key(key)
value = self.value(value)
# Compute attention scores $Q K^\top$.
# This gives a tensor of shape `[seq_len, seq_len, batch_size, heads]`.
scores = self.get_scores(query, key)
# Scale scores $\frac{Q K^\top}{\sqrt{d_k}}$
scores *= self.scale
# Apply mask
if mask is not None:
scores = scores.masked_fill(mask == 0, float('-inf'))
# $softmax$ attention along the key sequence dimension
# $\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_k}}\Bigg)$
attn = self.softmax(scores)
# Apply dropout
attn = self.dropout(attn)
# Rotate value embeddings before taking the weighted sum so that they contain positional information
value = self.value_rotary_pe(value)
# Multiply by values
# $$\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_k}}\Bigg)V$$
x = torch.einsum("ijbh,jbhd->ibhd", attn, value)
# Rotate in the opposite direction so that each embedding hold the relative positions
x = self.value_reverse_rotary_pe(x)
# Save attentions for any other calculations
self.attn = attn.detach()
# Concatenate multiple heads
x = x.reshape(seq_len, batch_size, -1)
# Output layer
return self.output(x)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/basic/__init__.py | labml_nn/transformers/basic/__init__.py | python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false | |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/basic/with_sophia.py | labml_nn/transformers/basic/with_sophia.py | """
---
title: Transformer Auto-Regression Experiment with [Sophia-G optimizer](../../optimizers/sophia.html)
summary: >
This trains a simple transformer model on NLP auto-regression with Sophia-G optimizer.
---
# Transformer Auto-Regression Experiment with [Sophia-G optimizer](../../optimizers/sophia.html)
This trains a simple transformer introduced in [Attention Is All You Need](https://arxiv.org/abs/1706.03762)
on an NLP auto-regression task (with Tiny Shakespeare dataset) with [Sophia-G optimizer](../../optimizers/sophia.html).
"""
import torch
from labml import experiment, tracker
from labml_nn.helpers.trainer import BatchIndex
from labml_nn.optimizers.sophia import Sophia
from labml_nn.transformers.basic.autoregressive_experiment import Configs as TransformerAutoRegressionConfigs
class Configs(TransformerAutoRegressionConfigs):
"""
## Configurations
This inherits from [`Configs`](autoregressive_experiment.html)
"""
hess_interval: int = 10
optimizer: Sophia
def step(self, batch: any, batch_idx: BatchIndex):
"""
### Training or validation step with Gauss-Newton-Bartlett (GNB) Hessian diagonal estimator
"""
# Set training/eval mode
self.model.train(self.mode.is_train)
# Move data to the device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Estimate the Hessian diagonal every $k$ steps
if isinstance(self.optimizer, Sophia) and self.mode.is_train and batch_idx.idx % self.hess_interval == 0:
# Get model outputs
output, *_ = self.model(data)
# Create a categorical distribution from logits
samp_dist = torch.distributions.Categorical(logits=output)
# Sample $\hat{y}$
y_sample = samp_dist.sample()
# Calculate and log loss
loss = self.loss_func(output, y_sample)
tracker.add("loss.hess.", loss)
# Calculate gradients
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip)
# Update EMA Hessian diagonal
#
# \begin{align}
# \hat{h}_t &= B \cdot \nabla_\theta \hat{L} (\theta) \odot \nabla_\theta \hat{L} (\theta) \\
# h_t &= \beta_2 h_{t-k} + (1 - \beta_2) \hat{h}_t
# \end{align}
self.optimizer.update_hessian(data.numel())
# Clear the gradients
self.optimizer.zero_grad()
else:
# Move data to the device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Update global step (number of tokens processed) when in training mode
if self.mode.is_train:
tracker.add_global_step(data.shape[0] * data.shape[1])
# Get model outputs.
# It's returning a tuple for states when using RNNs.
# This is not implemented yet. 😜
output, *_ = self.model(data)
# Calculate and log loss
loss = self.loss_func(output, target)
tracker.add("loss.", loss)
# Calculate and log accuracy
self.accuracy(output, target)
self.accuracy.track()
self.other_metrics(output, target)
# Train the model
if self.mode.is_train:
# Calculate gradients
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip)
# Take optimizer step
self.optimizer.step()
# Log the model parameters and gradients on last batch of every epoch
if batch_idx.is_last and self.is_log_model_params_grads:
tracker.add('model', self.model)
# Clear the gradients
self.optimizer.zero_grad()
# Save the tracked metrics
tracker.save()
def main():
# Create experiment
experiment.create(name="transformer")
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# Use a context size of $256$
'seq_len': 512,
# Train for 32 epochs
'epochs': 32,
# Batch size $32$
'batch_size': 16,
# Switch between training and validation for $10$ times
# per epoch
'inner_iterations': 10,
# Model size
'd_model': 256,
'transformer.n_heads': 16,
'transformer.ffn.d_ff': 1024,
# Use [Sophia optimizer](../../optimizers/sophia.html)
'optimizer.optimizer': 'Sophia',
'optimizer.learning_rate': 3e-4,
'optimizer.rho': 0.03,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/basic/autoregressive_experiment.py | labml_nn/transformers/basic/autoregressive_experiment.py | """
---
title: Transformer Auto-Regression Experiment
summary: >
This trains a simple transformer model on NLP auto-regression.
---
# Transformer Auto-Regression Experiment
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/transformers/basic/autoregressive_experiment.ipynb)
This trains a simple transformer introduced in [Attention Is All You Need](https://arxiv.org/abs/1706.03762)
on an NLP auto-regression task (with Tiny Shakespeare dataset).
"""
import torch
from torch import nn
from labml import experiment
from labml.configs import option
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.transformers import TransformerConfigs, Encoder
from labml_nn.transformers.utils import subsequent_mask
class AutoregressiveTransformer(nn.Module):
"""
## Auto-Regressive model
"""
def __init__(self, encoder: Encoder, src_embed: nn.Module, generator: nn.Module):
"""
* `encoder` is the transformer [Encoder](../models.html#Encoder)
* `src_embed` is the token
[embedding module (with positional encodings)](../models.html#EmbeddingsWithLearnedPositionalEncoding)
* `generator` is the [final fully connected layer](../models.html#Generator) that gives the logits.
"""
super().__init__()
self.src_embed = src_embed
self.encoder = encoder
self.generator = generator
# The mask will be initialized on the first call
self.mask = None
def forward(self, x: torch.Tensor):
# Create subsequent mask if mask is not initialized
# or if the size of the mask is different
if self.mask is None or self.mask.size(0) != len(x):
# Subsequent mask, will mask out tokens from seeing future tokens
self.mask = subsequent_mask(len(x)).to(x.device)
# Get the token embeddings with positional encodings
x = self.src_embed(x)
# Transformer encoder
x = self.encoder(x, self.mask)
# Get logits
x = self.generator(x)
# Return results
# (second value is for state, since our trainer is used with RNNs also)
return x, None
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
This inherits from
[`NLPAutoRegressionConfigs`](../../experiments/nlp_autoregression.html#NLPAutoRegressionConfigs)
"""
# GPT model
model: AutoregressiveTransformer
# Transformer
transformer: TransformerConfigs
@option(Configs.transformer, 'Transformer')
def _transformer_configs(c: Configs):
"""
### Transformer configurations
"""
# We use our
# [configurable transformer implementation](../configs.html#TransformerConfigs)
conf = TransformerConfigs()
# Set the vocabulary sizes for embeddings and generating logits
conf.n_src_vocab = c.n_tokens
conf.n_tgt_vocab = c.n_tokens
#
conf.d_model = c.d_model
#
return conf
@option(Configs.model)
def _model(c: Configs):
"""
Create GPT model and initialize weights
"""
m = AutoregressiveTransformer(c.transformer.encoder,
c.transformer.src_embed,
c.transformer.generator).to(c.device)
return m
def main():
# Create experiment
experiment.create(name="transformer")
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# Use a context size of $256$
'seq_len': 512,
# Train for 32 epochs
'epochs': 32,
# Batch size $32$
'batch_size': 16,
# Switch between training and validation for $10$ times
# per epoch
'inner_iterations': 10,
# Model size
'd_model': 256,
'transformer.n_heads': 16,
'transformer.ffn.d_ff': 1024,
# Use [Noam optimizer](../../optimizers/noam.html)
'optimizer.optimizer': 'Noam',
'optimizer.learning_rate': 1.,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/retro/bert_embeddings.py | labml_nn/transformers/retro/bert_embeddings.py | """
---
title: BERT Embeddings of chunks of text
summary: >
Generate BERT embeddings for chunks using a frozen BERT model
---
# BERT Embeddings of chunks of text
This is the code to get BERT embeddings of chunks for [RETRO model](index.html).
"""
from typing import List
import torch
from transformers import BertTokenizer, BertModel
from labml import lab, monit
class BERTChunkEmbeddings:
"""
## BERT Embeddings
For a given chunk of text $N$ this class generates BERT embeddings $\text{B\small{ERT}}(N)$.
$\text{B\small{ERT}}(N)$ is the average of BERT embeddings of all the tokens in $N$.
"""
def __init__(self, device: torch.device):
self.device = device
# Load the BERT tokenizer from [HuggingFace](https://huggingface.co/bert-base-uncased)
with monit.section('Load BERT tokenizer'):
self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased',
cache_dir=str(
lab.get_data_path() / 'cache' / 'bert-tokenizer'))
# Load the BERT model from [HuggingFace](https://huggingface.co/bert-base-uncased)
with monit.section('Load BERT model'):
self.model = BertModel.from_pretrained("bert-base-uncased",
cache_dir=str(lab.get_data_path() / 'cache' / 'bert-model'))
# Move the model to `device`
self.model.to(device)
@staticmethod
def _trim_chunk(chunk: str):
"""
In this implementation, we do not make chunks with a fixed number of tokens.
One of the reasons is that this implementation uses character-level tokens and BERT
uses its sub-word tokenizer.
So this method will truncate the text to make sure there are no partial tokens.
For instance, a chunk could be like `s a popular programming la`, with partial
words (partial sub-word tokens) on the ends.
We strip them off to get better BERT embeddings.
As mentioned earlier this is not necessary if we broke chunks after tokenizing.
"""
# Strip whitespace
stripped = chunk.strip()
# Break words
parts = stripped.split()
# Remove first and last pieces
stripped = stripped[len(parts[0]):-len(parts[-1])]
# Remove whitespace
stripped = stripped.strip()
# If empty return original string
if not stripped:
return chunk
# Otherwise, return the stripped string
else:
return stripped
def __call__(self, chunks: List[str]):
"""
### Get $\text{B\small{ERT}}(N)$ for a list of chunks.
"""
# We don't need to compute gradients
with torch.no_grad():
# Trim the chunks
trimmed_chunks = [self._trim_chunk(c) for c in chunks]
# Tokenize the chunks with BERT tokenizer
tokens = self.tokenizer(trimmed_chunks, return_tensors='pt', add_special_tokens=False, padding=True)
# Move token ids, attention mask and token types to the device
input_ids = tokens['input_ids'].to(self.device)
attention_mask = tokens['attention_mask'].to(self.device)
token_type_ids = tokens['token_type_ids'].to(self.device)
# Evaluate the model
output = self.model(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids)
# Get the token embeddings
state = output['last_hidden_state']
# Calculate the average token embeddings.
# Note that the attention mask is `0` if the token is empty padded.
# We get empty tokens because the chunks are of different lengths.
emb = (state * attention_mask[:, :, None]).sum(dim=1) / attention_mask[:, :, None].sum(dim=1)
#
return emb
def _test():
"""
### Code to test BERT embeddings
"""
from labml.logger import inspect
# Initialize
device = torch.device('cuda:0')
bert = BERTChunkEmbeddings(device)
# Sample
text = ["Replace me by any text you'd like.",
"Second sentence"]
# Check BERT tokenizer
encoded_input = bert.tokenizer(text, return_tensors='pt', add_special_tokens=False, padding=True)
inspect(encoded_input, _expand=True)
# Check BERT model outputs
output = bert.model(input_ids=encoded_input['input_ids'].to(device),
attention_mask=encoded_input['attention_mask'].to(device),
token_type_ids=encoded_input['token_type_ids'].to(device))
inspect({'last_hidden_state': output['last_hidden_state'],
'pooler_output': output['pooler_output']},
_expand=True)
# Check recreating text from token ids
inspect(bert.tokenizer.convert_ids_to_tokens(encoded_input['input_ids'][0]), _n=-1)
inspect(bert.tokenizer.convert_ids_to_tokens(encoded_input['input_ids'][1]), _n=-1)
# Get chunk embeddings
inspect(bert(text))
#
if __name__ == '__main__':
_test()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/retro/train.py | labml_nn/transformers/retro/train.py | """
---
title: RETRO training
summary: >
Training RETRO model with Tiny Shakespeare dataset
---
# RETRO training
This is the training code for
[RETRO](index.html).
"""
import torch
from labml import monit, lab, tracker, experiment, logger
from labml.logger import Text
from labml_nn.helpers.datasets import TextFileDataset
from labml_nn.optimizers.noam import Noam
from labml_nn.transformers.retro import model as retro
from labml_nn.transformers.retro.dataset import Dataset, RetroIndex
from labml_nn.transformers.retro.model import RetroModel, NearestNeighborEncoder
from torch import nn
from torch.utils.data import DataLoader, RandomSampler
class Sampler:
"""
## Sampler
This class greedily samples from a model.
"""
def __init__(self, device: torch.device, model: retro.RetroModel, tds: TextFileDataset, chunk_len: int):
"""
* `device` is the device of the model
* `model` is the [Retro mode](retro.html)
* `tds` is the text dataset (used to get neighbor chunks)
* `chunk_len` is the length of a chunk
"""
self.chunk_len = chunk_len
self.tds = tds
self.model = model
self.device = device
# [Retro index](database.html)
self.index = RetroIndex()
def retrieve_nearest_neighbours(self, chunk: str):
"""
### Retrieve nearest neighbors of a given chunk
"""
# Retrieve the offsets of the nearest neighbors
neighbor_offsets = self.index([chunk], None)
# Get the neighbors (with neighbor length equal to `chunk_len * 2`)
text = self.tds.train
neighbors = [text[j: j + self.chunk_len * 2] for j in neighbor_offsets[0]]
#
return neighbors
def sample(self, prompt: str, sample_len: int):
"""
### Sample text from the given prompt
"""
# To store nearest neighbors as strings
neighbors_str = []
# Sampled text
sampled = ''
# Sample `sample_len` tokens
for i in range(sample_len):
# We need to retrieve neighbors,
# if there are more sampled chunks than we have already retrieved for
while len(neighbors_str) < len(prompt) // self.chunk_len:
# Get the last chunk for which we haven't retrieved neighbors
off = len(neighbors_str) * self.chunk_len
chunk = prompt[off: off + self.chunk_len]
# Retrieve nearest neighbors
neighbors_str.append(self.retrieve_nearest_neighbours(chunk))
# Tokenize the input
src = self.tds.text_to_i(prompt)
# Tokenize the retrieved neighbors
neighbors = torch.stack([torch.stack([self.tds.text_to_i(n) for n in chunk]) for chunk in neighbors_str])
# Move them to the same device as the model
src = src.to(self.device)
neighbors = neighbors.to(self.device)
# Get model output
res = self.model(src[None, :], neighbors[None, :, :, :])
# Greedily sample the last token
token = res[0, -1, :].argmax(dim=-1)
# Add the sampled token text to the prompt and sample text
prompt += self.tds.itos[token.item()]
sampled += self.tds.itos[token.item()]
#
return sampled
class Trainer:
"""
## Retro trainer
"""
def __init__(self, device: torch.device, model: retro.RetroModel,
dataloader: DataLoader, optimizer: torch.optim.Optimizer):
"""
* `device` is the device of the model
* `model` is the [Retro mode](retro.html)
* `dataloader` is the dataloader for the [dataset with pre-retrieved neighbors](dataset.html)
* `optimizer` is the optimizer
"""
self.optimizer = optimizer
self.device = device
self.dataloader = dataloader
self.model = model
self.loss_func = nn.CrossEntropyLoss()
def __call__(self):
"""
### Train the model for an epoch
"""
# Iterate through training data
for i, (src, tgt, neighbors) in monit.enum('Train', self.dataloader):
# Move data to the device
src, tgt, neighbors = src.to(self.device), tgt.to(self.device), neighbors.to(self.device)
# Forward pass
res = self.model(src, neighbors)
# Calculate loss
loss = self.loss_func(res.view(-1, res.shape[-1]), tgt.view(-1))
# Clear the gradients
self.optimizer.zero_grad()
# Backward pass
loss.backward()
# Optimize the model
self.optimizer.step()
# Save training statistics and increment the global step counter
tracker.save({'loss.train': loss})
tracker.add_global_step(len(src))
def train():
"""
## Create and train a small model
"""
# Create an experiment
experiment.create(name='retro_small')
# GPU device
device = torch.device('cuda:0')
# Load Tiny Shakespeare dataset
tds = TextFileDataset(
lab.get_data_path() / 'tiny_shakespeare.txt',
list,
url='https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt')
# Load [Retro dataset](dataset.html)
train_dataset = Dataset(lab.get_data_path() / 'retro_train_dataset.json', tds)
# Create dataloader
train_dl = DataLoader(train_dataset,
batch_size=4,
sampler=RandomSampler(train_dataset, replacement=True))
# Hyper-parameters
chunk_len = 16
d_model = 128
d_ff = 512
n_heads = 16
d_k = 16
# Create the nearest neighbor encoder
nearest_neighbor_encoder = NearestNeighborEncoder(chunk_len, 6, {3}, d_model, n_heads, d_k, d_ff)
# Create the model
model = RetroModel(tds.n_tokens, d_model, 6,
{3, 5},
chunk_len, n_heads, d_k, d_ff,
encoder=nearest_neighbor_encoder)
# Move the model to the device
model = model.to(device)
# Create the optimizer
optimizer = Noam(model.parameters(), lr=1., d_model=d_model, warmup=2_000)
# Create the `Trainer`
trainer = Trainer(device, model, train_dl, optimizer)
# Create the `Sampler`
sampler = Sampler(device, model, tds, chunk_len)
#
prompt = '''Second Citizen:\nOne word, good citizens.\n\nFirst Citizen:'''
# Set models for saving and loading
experiment.add_pytorch_models(model=model)
# Start the experiment
with experiment.start():
# Train for `32` epochs
for epoch in monit.loop(32):
# Train
trainer()
# Print a new line
tracker.new_line()
# Sample from the `prompt`
logger.log([(prompt.replace('\n', '\\n\n'), Text.subtle),
(sampler.sample(prompt, 128).replace('\n', '\\n\n'), Text.none)])
# Save models
#
if __name__ == '__main__':
train()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/retro/model.py | labml_nn/transformers/retro/model.py | """
---
title: RETRO model
summary: >
RETRO model with encoder for neighbors and autoregressive decoder
---
# RETRO model
This is the model definition for
[RETRO](index.html).
"""
import math
from typing import Set
import torch
from torch import nn
from labml.logger import inspect
class RotaryPositionalEmbeddings(nn.Module):
"""
## [RoPE embeddings](../rope/index.html)
*We use rotary position embeddings in self-attention layers.
We assume the positional information gets embedded in embeddings
and therefore not use them in causal attention.
[Non-causal self-attention needs explicit positional information
because it cannot infer it](https://arxiv.org/abs/3999902edc8511eba3db37f65e372566).*
"""
def __init__(self, d: int, base: int = 10_000):
"""
* `d` is the number of features $d$
* `base` is the constant used for calculating $\Theta$
"""
super().__init__()
# $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
self.theta = nn.Parameter(1. / (base ** (torch.arange(0, d, 2).float() / d)), requires_grad=False)
def forward(self, x: torch.Tensor):
"""
* `x` is the Tensor at the head of a key or a query with shape `[ batch_size, seq_len, n_heads, d]`
"""
# Extract the shape
batch_size, seq_len, n_heads, d = x.shape
# $\frac{d}{2}$
d_2 = d // 2
# Create position indexes `[0, 1, ..., seq_len - 1]`
seq_idx = torch.arange(seq_len, device=x.device).type_as(self.theta)
# Calculate the product of position index and $\theta_i$
idx_theta = torch.einsum('n,d->nd', seq_idx, self.theta)
# Concatenate so that for row $m$ we have
# $[m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}, m \theta 0, m \theta 1, ..., m \theta_{\frac{d}{2}}]$
idx_theta2 = torch.cat([idx_theta, idx_theta], dim=1)
# Calculate
# $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., -x^{(\frac{d}{2})}]$
neg_half_x = torch.cat([-x[:, :, :, d_2:], x[:, :, :, :d_2]], dim=-1)
# Calculate
#
# \begin{align}
# \begin{pmatrix}
# x^{(i)}_m \cos m \theta_i - x^{(i + \frac{d}{2})}_m \sin m \theta_i \\
# x^{(i + \frac{d}{2})}_m \cos m\theta_i + x^{(i)}_m \sin m \theta_i \\
# \end{pmatrix} \\
# \end{align}
#
# for $i \in {1, 2, ..., \frac{d}{2}}$
rx = (x * idx_theta2.cos()[None, :, None, :]) + (neg_half_x * idx_theta2.sin()[None, :, None, :])
#
return rx
class SelfAttention(nn.Module):
"""
## Self-Attention Layer $\text{A\small{TTN}}$
This applies causal and non-causal [multi-headed self-attention](../mha.html).
"""
def __init__(self, d_model: int, n_heads: int, d_k: int, is_causal: bool):
"""
* `d_model` is the number of features in transformer embeddings
* `n_heads` is the number of attention heads
* `d_k` is the number of features per head
* `is_causal` indicates whether this is causal attention (masked)
"""
super().__init__()
self.is_causal = is_causal
self.n_heads = n_heads
self.d_k = d_k
# To scale attentions before softmax by $\frac{1}{\sqrt{d_k}}$
self.scale = 1 / math.sqrt(self.d_k)
# Linear layers for query, key and value heads.
self.query = nn.Linear(d_model, n_heads * d_k)
self.key = nn.Linear(d_model, n_heads * d_k)
self.value = nn.Linear(d_model, n_heads * d_k)
# Pre-norm layer. The paper uses RMSNorm instead.
self.norm = nn.LayerNorm(d_model)
# Softmax for attention probabilities
self.softmax = nn.Softmax(dim=-1)
# Rotary positional embeddings
self.rotary_pe = RotaryPositionalEmbeddings(self.d_k)
# Final linear layer
self.output = nn.Linear(n_heads * d_k, d_model)
def mask_attention(self, attn: torch.Tensor):
"""
### Mask the attention layer for causal attention
* `attn` is the attention matrix of shape `[batch_size, n_heads, seq_len, seq_len]`
"""
# No masking for non-causal attention
if not self.is_causal:
return attn
# Create a triangular mask
mask = torch.tril(attn.new_ones(attn.shape[-2:]))
# Filter by the mask
return attn.masked_fill(mask == 0, float('-inf'))
def forward(self, h: torch.Tensor):
"""
* `h` is the transformer embeddings of shape `[batch_size, seq_len, d_model]`
"""
# Residual connection
h_res = h
# Pre-normalization
h = self.norm(h)
# Get query, key, and values and split them in to heads.
# These will have shapes `[batch_size, seq_len, n_heads, d_k]`
mh_shape = (*h.shape[:-1], self.n_heads, self.d_k)
q = self.query(h).view(mh_shape)
k = self.key(h).view(mh_shape)
v = self.value(h).view(mh_shape)
# Apply rotary positional embeddings
q = self.rotary_pe(q)
k = self.rotary_pe(k)
# Calculate attentions
attn = torch.einsum('bihd,bjhd->bhij', q, k)
# Scale it by $\frac{1}{\sqrt{d_k}}$
attn = attn * self.scale
# Apply masks if it's causal attention
attn = self.mask_attention(attn)
# Calculate attention probabilities
attn = self.softmax(attn)
# Get values
h = torch.einsum("bhij,bjhd->bihd", attn, v)
# Change from shape `[batch_size, seq_len, n_heads, d_k]`
# to `[batch_size, seq_len, n_heads * d_k]`
h = h.reshape(*h.shape[:-2], -1)
# Apply final linear layer.
# The result will have shape `[batch_size, seq_len, d_model]`
h = self.output(h)
# Add the residual connection
return h + h_res
class CrossAttention(nn.Module):
"""
## Cross-Attention Layer $\text{C\small{A}}$
This is similar to the self-attention layer defined above, except that
it gets keys and values from a different set of embeddings than the queries.
This is used in the encoder to encode the retrieved chunks based on the
input chunks.
*We do not use any explicit positional embeddings here.
We assume that the model can represent positional information in the embeddings implicitly.*
"""
def __init__(self, d_model: int, n_heads: int, d_k: int):
"""
* `d_model` is the number of features in transformer embeddings
* `n_heads` is the number of attention heads
* `d_k` is the number of features per head
"""
super().__init__()
self.n_heads = n_heads
self.d_k = d_k
# To scale attentions before softmax by $\frac{1}{\sqrt{d_k}}$
self.scale = 1 / math.sqrt(self.d_k)
# Linear layers for query, key and value heads.
self.query = nn.Linear(d_model, n_heads * d_k)
self.key = nn.Linear(d_model, n_heads * d_k)
self.value = nn.Linear(d_model, n_heads * d_k)
# Pre-norm layer for the query embeddings. The paper uses RMSNorm instead.
self.norm = nn.LayerNorm(d_model)
# Softmax for attention probabilities
self.softmax = nn.Softmax(dim=-1)
# Final linear layer
self.output = nn.Linear(n_heads * d_k, d_model)
def forward(self, e: torch.Tensor, h: torch.Tensor):
"""
* `e` are the retrieved nearest neighbor chunk embeddings with shape
`[batch_size, chunks, neighbors, neighbor_len, d_model]`
* `h` are the input chunks from which the nearest neighbors were retrieved with shape
`[batch_size, chunks, chunk_len, d_model]`. This is already normalized.
"""
# Residual connection
e_res = e
# Normalize retrieved chunks
e = self.norm(e)
# Get query from the retrieved chunks
q = self.query(e).view(*e.shape[:-1], self.n_heads, self.d_k)
# Get keys and values from the input chunks
k = self.key(h).view(*h.shape[:-1], self.n_heads, self.d_k)
v = self.value(h).view(*h.shape[:-1], self.n_heads, self.d_k)
# Calculate attention scores for all chunks.
# Each retrieved neighbor will pay attention to the original chunk that retrieved it.
# This will have shape `[batch_size, chunks, neighbors, n_heads, neighbor_len, chunk_len]`
attn = torch.einsum('bcnihd,bcjhd->bcnhij', q, k)
# Scale attention scores
attn = attn * self.scale
# Calculate softmax across the last dimension
attn = self.softmax(attn)
# Gather values
e = torch.einsum("bcnhij,bcjhd->bcnihd", attn, v)
# Change from shape `[batch_size, chunks, neighbors, neighbor_len, n_heads, d_k]`
# to `[batch_size, chunks, neighbors, neighbor_len, n_heads * d_k]`
e = e.reshape(*e.shape[:-2], -1)
# Apply final linear layer.
# The result will have shape `[batch_size, chunks, neighbors, neighbor_len, d_model]`
e = self.output(e)
# Add residual connection
return e + e_res
class ChunkedCrossAttention(nn.Module):
"""
## Chunked Cross-Attention Layer $\text{C\small{CA}}$
This is similar to the cross-attention layer defined above.
This is used in the decoder to pay attention to the retrieved neighbor chunks.
*We do not use any explicit positional embeddings here.
We assume that the model can represent positional information in the embeddings implicitly.*
"""
def __init__(self, d_model: int, n_heads: int, d_k: int, chunk_len: int):
"""
* `d_model` is the number of features in transformer embeddings
* `n_heads` is the number of attention heads
* `d_k` is the number of features per head
* `chunk_len` is the length of a chunk
"""
super().__init__()
self.chunk_len = chunk_len
self.n_heads = n_heads
self.d_k = d_k
# To scale attentions before softmax by $\frac{1}{\sqrt{d_k}}$
self.scale = 1 / math.sqrt(self.d_k)
# Linear layers for query, key and value heads.
self.query = nn.Linear(d_model, n_heads * d_k)
self.key = nn.Linear(d_model, n_heads * d_k)
self.value = nn.Linear(d_model, n_heads * d_k)
# Pre-norm layer for the query embeddings. The paper uses RMSNorm instead.
self.norm = nn.LayerNorm(d_model)
# Softmax for attention probabilities
self.softmax = nn.Softmax(dim=-1)
# Final linear layer
self.output = nn.Linear(n_heads * d_k, d_model)
def forward(self, h: torch.Tensor, e: torch.Tensor):
"""
`h` are the input embeddings of shape `[batch_size, seq_len, d_model]`
`e` are the retrieved nearest neighbors of shape `[batch_size, chunks, neighbors, neighbor_len, d_model]`
"""
# Get shape
batch_size, chunks, neighbors, neighbor_len, d_model = e.shape
# No attention if there are no chunks (for short inputs when sampling)
if chunks == 0:
return h
# Residual connection
h_res = h
# Remove the first `chunk_len - 1` embeddings.
# The input pays attention to neighbors retrieved and encoded using the past tokens only;
# so that there is no information leakage.
# That is the retrieved neighbors from the first chunks will have information from the first chunk.
# So by shifting the sequence to the left by `chunk_len - 1` we make sure that information only flows
# to the right.
h = h[:, self.chunk_len - 1:]
# Pre-norm
h = self.norm(h)
# Append empty embeddings to the end to be able to split the input into chunks
if h.shape[1] < chunks * self.chunk_len:
h = torch.cat((h, h.new_zeros(batch_size, chunks * self.chunk_len - h.shape[1], d_model)), dim=1)
# Reshape the input into chunks.
h = h.reshape(batch_size, chunks, self.chunk_len, d_model)
# Get query from the input
q = self.query(h).view(*h.shape[:-1], self.n_heads, self.d_k)
# Get keys and values from the retrieved neighbors
k = self.key(e).view(*e.shape[:-1], self.n_heads, self.d_k)
v = self.value(e).view(*e.shape[:-1], self.n_heads, self.d_k)
# Calculate attention scores for input chunks.
# Each chunk will pay attention to neighbors retrieved by the previous chunk.
# This will have shape `[batch_size, chunks, heads, chunk_len, neighbors, neighbor_len]`
attn = torch.einsum('bcihd,bcnjhd->bchinj', q, k)
# Scale attention scores
attn = attn * self.scale
# Apply softmax over the last two dimensions `neighbors, neighbor_len`
attn = self.softmax(attn.view(*attn.shape[:-2], -1)).view(attn.shape)
# Gather values
h = torch.einsum("bchinj,bcnjhd->bcihd", attn, v)
# Change from shape `[batch_size, chunks, chunk_len, n_heads, d_k]`
# to `[batch_size, chunks * chunk_len, n_heads * d_k]`
h = h.reshape(batch_size, chunks * self.chunk_len, -1)
# Apply final linear layer.
# The result will have shape `[batch_size, chunks * chunk_len, d_model]`
h = self.output(h)
# Append `chunk_len - 1` zero embedding to the left; i.e. right shift it back
h = torch.cat((h.new_zeros(batch_size, self.chunk_len - 1, d_model), h), dim=1)
# Truncate and add the residual connection
return h[:, :h_res.shape[1]] + h_res
class FeedForward(nn.Module):
"""
### Position-wise Feed Forward Layer $\text{F\small{FW}}$
This consists of two linear layers and an activation in the middle.
"""
def __init__(self, d_model: int, d_ff: int):
"""
* `d_model` is the number of features in transformer embeddings
* `d_ff` is the number features in the hidden layer
"""
super().__init__()
# The two linear layers
self.lin1 = nn.Linear(d_model, d_ff)
self.lin2 = nn.Linear(d_ff, d_model)
# ReLU Activation
self.act = nn.ReLU()
# Pre-norm layer
self.norm = nn.LayerNorm(d_model)
def forward(self, h: torch.Tensor):
"""
`h` are the embeddings of shape `[batch_size, seq_len, d_model]`
"""
# Residual
h_res = h
# Pre-norm
h = self.norm(h)
# First linear layer
h = self.lin1(h)
# Activation
h = self.act(h)
# Second linear layer
h = self.lin2(h)
# Add the residual connection
return h + h_res
class NearestNeighborEncoder(nn.Module):
"""
## Nearest Neighbor Encoder $\text{E\small{NCODER}}(\text{R\small{ET}}(C_u)_{1 \le u \le l}, H)$
This module encodes the retrieved nearest neighbors
"""
def __init__(self, chunk_len: int, n_layers: int, ca_layers: Set[int],
d_model: int, n_heads: int, d_k: int, d_ff: int):
"""
* `chunk_len` is the length of a chunk
* `n_layer` is the number of layers in the encoder $L_{\text{enc}}$
* `ca_layers` are the layers with cross attention $P_{\text{enc}}$
* `d_model` is the number of features in embeddings
* `n_heads` is the number of heads in attention layers
* `d_k` is the size of attention heads
* `d_ff` is the size of the feed-forward networks hidden layers
"""
super().__init__()
self.ca_layers = ca_layers
self.chunk_len = chunk_len
# Cross-attention layers
self.ca = nn.ModuleList([CrossAttention(d_model, n_heads, d_k) for _ in range(len(ca_layers))])
# Bi-directional self attention layers
self.attn = nn.ModuleList([SelfAttention(d_model, n_heads, d_k, is_causal=False) for _ in range(n_layers)])
# Feed forward layers
self.ffw = nn.ModuleList([FeedForward(d_model, d_ff) for _ in range(n_layers)])
# Pre-normalization layer for $H$
self.norm_h = nn.LayerNorm(d_model)
def forward(self, e: torch.Tensor, h: torch.Tensor):
"""
* `e` are token embeddings of the retrieved nearest neighbors,
$\text{E\small{MB}}\big(\text{R\small{ET}}(C_u)_{1 \le u \le l}\big)$
of shape `[batch_size, chunks, neighbors, neighbor_len, d_model]`
* `h` is are the input token embeddings, $H$
of shape `[batch_size, seq_len, d_model]`
*The chunks $u \in [1, l]$ and neighbors $j \in [1, k]$ are processed in parallel.*
"""
# Get shape
batch_size, chunks, neighbors, neighbor_len, d_model = e.shape
# $(H_u)_{u \in [1, l]} \leftarrow \text{S\small{PLIT}}(H)$
h_split = h[:, :self.chunk_len * chunks, :].reshape(batch_size, chunks, self.chunk_len, d_model)
# Pre-norm
h_split = self.norm_h(h_split)
# Keep the index of the cross attention layer
p_ca = 0
# For all layers $p' \in [1, L_{\text{enc}}]$
for p in range(len(self.attn)):
# Bi-directional self attention
# $E^j_u \leftarrow \text{A\small{TTN}}_{\text{enc}}(E^j_u)$
e = self.attn[p](e.view(-1, neighbor_len, d_model)).view(e.shape)
# Cross attention if $p' \in P_{\text{enc}}$
if p in self.ca_layers:
# $E^j_u \leftarrow \text{C\small{A}}_{\text{enc}}(E^j_u, H_u)$
e = self.ca[p_ca](e, h_split)
# Incremnt the cross attention index
p_ca += 1
# Feed forward layer $E^j_u \leftarrow \text{F\small{FW}}_{\text{enc}}(E^j_u)$
e = self.ffw[p](e)
# return $E$
return e
class RetroModel(nn.Module):
"""
## Retro Model
This is the Retro decoder
"""
def __init__(self, n_vocab: int, d_model: int, n_layers: int, ca_layers: Set[int], chunk_len: int,
n_heads: int, d_k: int, d_ff: int, encoder: NearestNeighborEncoder):
"""
* `v_vocab` is the number of tokens in the vocabulary
* `d_model` is the number of features in embeddings
* `n_layers` is the number of layers in the decoder $L$
* `ca_layers` are the layers with cross attention $P$
* `chunk_len` is the length of a chunk
* `n_heads` is the number of heads in attention layers
* `d_k` is the size of attention heads
* `d_ff` is the size of the feed-forward networks hidden layers
* `encoder` is the nearest neighbor encoder
"""
super().__init__()
self.ca_layers = ca_layers
self.encoder = encoder
# Token embedding layer
self.emb = nn.Embedding(n_vocab, d_model)
# Chunked cross attention layers $\text{C\small{CA}}$
self.cca = nn.ModuleList(
[ChunkedCrossAttention(d_model, n_heads, d_k, chunk_len) for _ in range(len(ca_layers))])
# Attention layers $\text{A\small{TTN}}$
self.attn = nn.ModuleList([SelfAttention(d_model, n_heads, d_k, is_causal=True) for _ in range(n_layers)])
# Feed forward layers $\text{F\small{FW}}$
self.ffw = nn.ModuleList([FeedForward(d_model, d_ff) for _ in range(n_layers)])
# Readout layer $\text{R\small{EAD}}$
self.read = nn.Linear(d_model, n_vocab)
# Pre-normalization layer for nearest neighbor embeddings from
# $\text{E\small{NCODER}}(\text{R\small{ET}}(C_u)_{1 \le u \le l}, H)$
self.norm_e = nn.LayerNorm(d_model)
def forward(self, x: torch.Tensor, ret: torch.Tensor):
"""
* `x` is the input sequence, $X$ of shape `[batch_size, seq_len]`
* `ret` are the retrieved neighbors
$\text{R\small{ET}}(C_u)_{1 \le u \le l}$
of shape `[batch_size, chunks, neighbors, neighbor_len]`
"""
# Get input embeddings $H \leftarrow \text{E\small{MB}}(X)$
h = self.emb(x)
# Embeddings of the retrieved neighbors
# $E^j_u = \text{E\small{MB}}_{\text{enc}}\big(\text{R\small{ET}}(C_u)^j\big)$.
#
# We use same embeddings for both input and neighbors
ret_emb = self.emb(ret)
# Keep index of the chunked cross attention layer
p_ca = 0
# For all layers $p \in [1, L]$
for p in range(len(self.attn)):
# Causal self attention $H \leftarrow \text{A\small{TTN}}(H)$
h = self.attn[p](h)
# Get encoder embeddings before the first $\text{C\small{CA}}$ layer,
# when $p = \min(P)$
if self.ca_layers and p == min(self.ca_layers):
# $E = \text{E\small{NCODER}}(\text{R\small{ET}}(C_u)_{1 \le u \le l}, H)$
#
# We passed the embeddings of $\text{R\small{ET}}(C_u)_{1 \le u \le l}$ to encoder.
e = self.encoder(ret_emb, h)
# Normalize encoder embeddings
e = self.norm_e(e)
# Chunked-cross attention if $p \in P$
if p in self.ca_layers:
# $H \leftarrow \text{C\small{CA}}(H, E)$
h = self.cca[p_ca](h, e)
# Increment chunked cross-attention index
p_ca += 1
# $H \leftarrow \text{F\small{FW}}(H)$
h = self.ffw[p](h)
# $O \leftarrow \text{R\small{EAD}}(H)$
return self.read(h)
def _test():
"""
### Test the model with fake data
"""
chunk_len = 4
d_model = 8
d_ff = 32
n_heads = 2
d_k = 4
device = torch.device('cuda:0')
m = RetroModel(5, d_model, 6, {2, 5}, chunk_len, n_heads, d_k, d_ff,
encoder=NearestNeighborEncoder(chunk_len, 2, {1}, d_model, n_heads, d_k, d_ff))
m.to(device)
x = [1, 2, 4, 4, 0, 1, 2, 3, 4, 3]
ret = [
[[0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1]],
[[0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1]],
]
res = m(torch.tensor([x] * 10).to(device), torch.tensor([ret] * 10).to(device))
inspect(res)
#
if __name__ == '__main__':
_test()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/retro/dataset.py | labml_nn/transformers/retro/dataset.py | """
---
title: Training dataset for RETRO
summary: >
Create a dataset for RETRO model training
---
# RETRO training dataset
We pre-retrieve nearest neighbors from the [key-value database](database.html)
and create the dataset to train the [RETRO](index.html)
[model](model.html).
"""
import json
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import Dataset as PyTorchDataset
from labml import lab, monit
from labml_nn.helpers.datasets import TextFileDataset, TextDataset
from labml_nn.transformers.retro.database import RetroIndex
def build_dataset(chunk_len: int = 16, chunks_per_sample: int = 32, skip_range: int = 8):
"""
## Build the dataset
* `chunk_len` is the chunk length
* `chunks_per_sample` is the number of chunks per training sample
* `skip_range` is the maximum number of characters to skip between two samples.
We skip a few characters between samples to make sure the samples
aren't aligned perfectly with the chunks in the [database](database.html)
"""
# Load the text file
dataset = TextFileDataset(
lab.get_data_path() / 'tiny_shakespeare.txt',
list,
url='https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt')
# Training portion of it
text = dataset.train
# Load the index for retrieving neighbors
index = RetroIndex()
# The input sample offsets
sample_offsets = []
# Cursor for the text
i = 0
while i < len(text):
# Skip a few characters to make sure it's not aligned with the neighbors
skip = np.random.randint(skip_range)
i += skip
# Stop if we've reached the end of the text
if i + chunks_per_sample * chunk_len > len(text):
break
# Collect the offset
sample_offsets.append(i)
# Increment the cursor
i += chunks_per_sample * chunk_len
# For samples
samples = []
# Iterate through sample offsets
for i in monit.iterate('Gather Neighbors', sample_offsets):
# Get the sample including an extra character (for prediction)
sample = text[i: i + chunks_per_sample * chunk_len + 1]
# The input
src = sample[:-1]
# Break it into chunks
chunks = [src[j:j + chunk_len] for j in range(0, len(src), chunk_len)]
# The chunk offsets
chunk_offsets = [j + i for j in range(0, len(src), chunk_len)]
# Retrieve nearest neighbors
neighbor_offsets = index(chunks, chunk_offsets)
# Get neighbor texts. The neighbor length is twice the `chunk_len`
neighbors = [[text[j: j + chunk_len * 2] for j in n_off] for n_off in neighbor_offsets]
# Add to list of samples
samples.append((sample[:-1], sample[1:], neighbors))
# Save the samples in JSON.
# We don't need to use complex dataset storage mechanisms or pre-tokenize
# since our dataset is small.
with open(str(lab.get_data_path() / 'retro_train_dataset.json'), 'w') as f:
f.write(json.dumps(samples))
class Dataset(PyTorchDataset):
"""
## Dataset
This is the PyTorch dataset that loads the dataset created
by `build_dataset`.
"""
def __init__(self, file_path: Path, tds: TextDataset):
"""
* `file_path` is the path of the saved JSON file
* `tds` is the `TextDataset`
"""
self.tds = tds
# Load the samples
with open(str(file_path), 'r') as f:
self.samples = json.loads(f.read())
def __len__(self):
"""
Number of samples
"""
return len(self.samples)
def __getitem__(self, idx: int):
"""
Get a sample
"""
# Get the sample
s = self.samples[idx]
# Tokenize
src = self.tds.text_to_i(s[0])
tgt = self.tds.text_to_i(s[1])
neighbors = torch.stack([torch.stack([self.tds.text_to_i(n) for n in chunks]) for chunks in s[2]])
#
return src, tgt, neighbors
#
if __name__ == '__main__':
build_dataset()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/retro/database.py | labml_nn/transformers/retro/database.py | """
---
title: Database for nearest neighbor retrieval
summary: >
Nearest neighbor retrieval and creation of the database
---
# Database for nearest neighbor retrieval
This is the build the database and retrieves nearest neighbors for
[RETRO model](index.html).
We use [FAISS library](https://faiss.ai/) for the database whilst the paper had used the SCaNN library.
"""
from typing import List, Optional
import faiss
import numpy as np
import torch
from labml import lab, monit
from labml_nn.helpers.datasets import TextFileDataset
from labml_nn.transformers.retro.bert_embeddings import BERTChunkEmbeddings
def build_database(chunk_len: int = 16, batch_size: int = 64, d_emb: int = 768, n_centeroids: int = 256,
code_size: int = 64, n_probe: int = 8, n_train: int = 50_000):
"""
## Build Database
* `chunk_len` is the length of a chunk (number of characters)
* `batch_size` is the batch size to use when calculating $\text{B\small{ERT}}(N)$
* `d_emb` is the number of features in $\text{B\small{ERT}}(N)$ embeddings
[lists to select in FAISS index](https://faiss.ai/cpp_api/struct/structfaiss_1_1IndexIVFPQ.html)
* `n_centeroids` is the number of lists in the index
* `code_size` encoded vector size in the index
* `n_probe` is the number of lists to probe
* `n_train' is the number of keys to train the index on
"""
# Load the dataset text file
dataset = TextFileDataset(
lab.get_data_path() / 'tiny_shakespeare.txt',
list,
url='https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt')
# Get training data (a string)
text = dataset.train
# Split the text into chunks of `chunk_length`
chunks = [text[i:i + chunk_len] for i in range(0, len(text), chunk_len) if i + chunk_len * 2 < len(text)]
# Get the offsets of each of the chunks
chunk_offsets = np.array([i for i in range(0, len(text), chunk_len) if i + chunk_len * 2 < len(text)])
# Number of chunks
n_chunks = len(chunks)
# Initialize BERT to get $\text{B\small{ERT}}(N)$
bert = BERTChunkEmbeddings(torch.device('cuda:0'))
# Get chunk embeddings by processing `batch_size` number of chunks on each iteration
chunk_emb = []
for i in monit.iterate('Get embeddings', range(0, n_chunks, batch_size)):
chunk_emb.append(bert(chunks[i: i + batch_size]).cpu())
# Merge them into a single tensor
chunk_emb = torch.cat(chunk_emb, dim=0).numpy()
# Create the [FAISS index](https://faiss.ai/cpp_api/struct/structfaiss_1_1IndexIVFPQ.html)
quantizer = faiss.IndexFlatL2(d_emb)
index = faiss.IndexIVFPQ(quantizer, d_emb, n_centeroids, code_size, 8)
index.nprobe = n_probe
# Get a random sample of the the chunk indexes
random_sample = np.random.choice(np.arange(n_chunks), size=[min(n_train, n_chunks)], replace=False)
# Train the index to store the keys
with monit.section('Train index'):
index.train(chunk_emb[random_sample])
# Add the chunks to the index in batches of size `1024`
for s in monit.iterate('Index', range(0, n_chunks, 1024)):
e = min(s + 1024, n_chunks)
# Add to index
index.add_with_ids(chunk_emb[s:e], chunk_offsets[s: e])
# Save the index
with monit.section('Save'):
faiss.write_index(index, str(lab.get_data_path() / 'retro.index'))
class RetroIndex:
"""
## Index for retrieving nearest neighbors
"""
def __init__(self, chunk_len: int = 16, n_probe: int = 8,
n_neighbors: int = 2, n_extra: int = 2,
exclude_neighbor_span: int = 8):
"""
* `chunk_len` is the chunk length
* `n_probe` is the number of lists to probe
* `n_neighbors` is the number of neighbors to retrieve
* `n_extra` is the number of extra neighbors to retrieve since we will be
removing neighbors overlapping with the query chunk
* `exclude_neighbor_span` is the extra text length to avoid when checking for overlaps
"""
self.n_neighbors = n_neighbors
self.chunk_len = chunk_len
self.exclude_neighbor_span = exclude_neighbor_span
self.n_extra = n_extra
# Initialize BERT to get $\text{B\small{ERT}}(N)$
self.bert = BERTChunkEmbeddings(torch.device('cuda:0'))
# Load the database
with monit.section('Load index'):
self.index = faiss.read_index(str(lab.get_data_path() / 'retro.index'))
self.index.nprobe = n_probe
def filter_neighbors(self, offset: int, neighbor_offsets: List[int]):
"""
#### Filter neighbors that overlap with the query
The positions of the neighbors are given by `neighbor_offsets` and the position
of the query chunk is `offset`.
"""
return [n for n in neighbor_offsets
if n < offset - (self.chunk_len + self.exclude_neighbor_span)
or n > offset + (self.chunk_len + self.exclude_neighbor_span)]
def __call__(self, query_chunks: List[str], offsets: Optional[List[int]]):
"""
### Retrieve nearest neighbors
"""
# Get $\text{B\small{ERT}}(N)$ of query chunks
emb = self.bert(query_chunks).cpu()
# Get `n_neighbors + n_extra` nearest neighbors from the database
distance, neighbor_offsets = self.index.search(emb.numpy(), self.n_neighbors + self.n_extra)
# If the query chunk offsets are given filter out overlapping chunks
if offsets is not None:
neighbor_offsets = [self.filter_neighbors(off, n_off)
for off, n_off in zip(offsets, neighbor_offsets)]
# Get the closest `n_neighbors` after filtering
neighbor_offsets = [n_off[:self.n_neighbors] for n_off in neighbor_offsets]
#
return neighbor_offsets
#
if __name__ == '__main__':
build_database()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/retro/__init__.py | labml_nn/transformers/retro/__init__.py | """
---
title: Retrieval-Enhanced Transformer (Retro)
summary: >
This is a PyTorch implementation/tutorial of the paper
Improving language models by retrieving from trillions of tokens.
It builds a key-value database of chunks of text and retrieves and uses them when
making predictions.
---
# Retrieval-Enhanced Transformer (Retro)
This is a [PyTorch](https://pytorch.org) implementation of the paper
[Improving language models by retrieving from trillions of tokens](https://arxiv.org/abs/2112.04426).
It builds a database of chunks of text.
It is a key-value database where the keys are indexed by the BERT embeddings of the chunks.
They use a frozen pre-trained BERT model to calculate these embeddings.
The values are the corresponding chunks and an equal length of text proceeding that chunk.
Then the model retrieves text similar (nearest neighbors) to the input to the model from this database.
These retrieved texts are used to predict the output.
Since we use a frozen BERT model for retrieval we can pre-calculate all the nearest neighbors for the
training dataset.
This speeds up the training process.
Components:
* [BERT embeddings](bert_embeddings.html): Code to get BERT embeddings of chunks of text.
* [Key-value database](database.html): Build and retrieve chunks
* [Model](model.html)
* [Dataset](dataset.html): Pre-calculate the nearest neighbors
* [Training code](train.html)
""" | python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/alibi/experiment.py | labml_nn/transformers/alibi/experiment.py | """
---
title: Attention with Linear Biases (ALiBi) Experiment
summary: This experiment trains an Attention with Linear Biases (ALiBi) based model on Tiny Shakespeare dataset.
---
# [Attention with Linear Biases (ALiBi)](index.html) Experiment
This is an annotated PyTorch experiment to train a [ALiBi model](index.html).
This is based on [our GPT model](../gpt/index.html).
"""
import torch
from torch.utils.data import DataLoader
from labml import experiment, tracker
from labml.configs import option, calculate
from labml_nn.helpers.datasets import SequentialUnBatchedDataset
from labml_nn.transformers.alibi import AlibiMultiHeadAttention
from labml_nn.experiments.nlp_autoregression import transpose_batch
from labml_nn.transformers import TransformerConfigs
from labml_nn.transformers.gpt import Configs as GPTConfigs
class Configs(GPTConfigs):
"""
## Configurations
We extend [GPT configurations](../gpt/index.html) and change the attention mechanism.
"""
# ALiBi based transformer (defined below)
transformer: TransformerConfigs = 'GPT_ALiBi'
# Longer validation set
valid_seq_len: int = 128
valid_loader = 'shuffled_longer_valid_loader'
def other_metrics(self, output: torch.Tensor, target: torch.Tensor):
"""
Log losses at the initial and final tokens
"""
# If there are more tokens that the training sequence length (during validation),
if self.seq_len < output.shape[0]:
# Log the loss at training sequence length
tracker.add(f'loss.{self.seq_len - 1}.', self.loss_func(output[self.seq_len - 1], target[self.seq_len - 1]))
# Log the loss at the first token
tracker.add(f'loss.0.', self.loss_func(output[0], target[0]))
# Log the loss at the final token
tracker.add(f'loss.{int(output.shape[0]) - 1}.', self.loss_func(output[-1], target[-1]))
def _alibi_mha(c: TransformerConfigs):
"""
Create an ALiBi attention module
"""
return AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout)
# Set all attention mechanisms to ALiBi
calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha)
calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha)
calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha)
@option(Configs.valid_loader)
def shuffled_longer_valid_loader(c: Configs):
"""
Shuffled validation data loader with `valid_seq_len` sequence length
"""
return DataLoader(SequentialUnBatchedDataset(text=c.text.valid,
dataset=c.text,
seq_len=c.valid_seq_len),
batch_size=c.batch_size,
collate_fn=transpose_batch,
shuffle=True)
@option(Configs.transformer, 'GPT_ALiBi')
def _transformer_configs(c: Configs):
"""
### ALiBi based Transformer configurations
"""
# We use our
# [configurable transformer implementation](../configs.html#TransformerConfigs)
conf = TransformerConfigs()
# Set the vocabulary sizes for embeddings and generating logits
conf.n_src_vocab = c.n_tokens
conf.n_tgt_vocab = c.n_tokens
# GPT uses GELU activation for position wise feedforward
conf.ffn.activation = 'GELU'
# ALiBi doesn't use positional embeddings
conf.src_embed = 'no_pos'
conf.tgt_embed = 'no_pos'
# Set all attention mechanisms to ALiBi
conf.encoder_attn = 'alibi_mha'
conf.decoder_attn = 'alibi_mha'
conf.decoder_mem_attn = 'alibi_mha'
#
return conf
def main():
# Create experiment
experiment.create(name="gpt_alibi")
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# 'text': 'tiny_shakespeare_no_split',
# Use a context size of $128$
'seq_len': 64,
# Use a context size of $128$
'valid_seq_len': 80,
# Train for $32$ epochs
'epochs': 128,
# Batch size $128$
'batch_size': 128,
# Switch between training and validation for $10$ times
# per epoch
'inner_iterations': 10,
# Transformer configurations
'transformer.d_model': 128,
'transformer.ffn.d_ff': 512,
'transformer.n_heads': 8,
'transformer.n_layers': 4,
'transformer.dropout': 0.1,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/alibi/__init__.py | labml_nn/transformers/alibi/__init__.py | """
---
title: Attention with Linear Biases (ALiBi)
summary: >
Documented implementation with explanations of Attention with Linear Biases (ALiBi)
---
# Attention with Linear Biases (ALiBi)
This is an implementation of Attention with Linear Biases (ALiBi) from the paper
[Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation](https://arxiv.org/abs/2108.12409).
This replaces positional encodings with biases added to attention scores (attention logits, before the softmax).
This is a relative scheme tested on autoregressive tasks, and the bias is higher for closeby tokens
and lower for far-away tokens.
The biases decrease linearly in the log scale (because it's before the softmax) and each head has a different slope.
Here's the attention formula for $i$-th token,
\begin{align}
\mathbf{a}_i
&= \text{softmax} \bigg( \mathbf{q}_i \mathbf{K}^\top + m \cdot \big[-(i-1), \dots, -1, 0 \big] \bigg) \\
&= \text{softmax} \bigg( \mathbf{q}_i \mathbf{K}^\top + m \cdot \big[0, 1, \dots, (i - 1) \big] \bigg)
\end{align}
where $\mathbf{q}_i \in \mathbb{R}^d$ is the query of the $i$-th token, $K \in \mathbb{R}^{i \times d}$ are the keys
up to $i$, and $d$ the number of features per head.
Note that the above equality halts because $\text{softmax}$ is invariant to translations
(you can add any constant to all elements without changing the result).
Here is [the training code](experiment.html) for a ALiBi model.
"""
import math
from typing import Optional
import torch
from torch import nn
from labml.logger import inspect
from labml_nn.transformers.mha import MultiHeadAttention
def get_slopes(n_heads: int):
"""
## Get head-specific slope $m$ for each head
* `n_heads` is the number of heads in the attention layer $n$
The slope for first head is
$$\frac{1}{2^{\frac{8}{n}}} = 2^{-\frac{8}{n}}$$
The slopes for the rest of the heads are in a geometric series with a ratio same as above.
For instance when the number of heads is $8$ the slopes are
$$\frac{1}{2^1}, \frac{1}{2^2}, \dots, \frac{1}{2^8}$$
"""
# Get the closest power of 2 to `n_heads`.
# If `n_heads` is not a power of 2, then we first calculate slopes to the closest (smaller) power of 2,
# and then add the remaining slopes.
n = 2 ** math.floor(math.log2(n_heads))
# $2^{-\frac{8}{n}}$
m_0 = 2.0 ** (-8.0 / n)
# $2^{-1\frac{8}{n}}, 2^{-2 \frac{8}{n}}, 2^{-3 \frac{8}{n}}, \dots$
m = torch.pow(m_0, torch.arange(1, 1 + n))
# If `n_heads` is not a power of 2, then we add the remaining slopes.
# We calculate the remaining slopes for $n * 2$ (avoiding slopes added previously).
# And pick the slopes upto `n_heads`.
if n < n_heads:
# $2^{-\frac{8}{2n}}$
m_hat_0 = 2.0 ** (-4.0 / n)
# $2^{-1\frac{8}{2n}}, 2^{-3 \frac{8}{2n}}, 2^{-5 \frac{8}{2n}}, \dots$
# Note that we take steps by $2$ to avoid slopes added previously.
m_hat = torch.pow(m_hat_0, torch.arange(1, 1 + 2 * (n_heads - n), 2))
# Concatenate the slopes with the remaining slopes.
m = torch.cat([m, m_hat])
return m
@torch.no_grad()
def get_alibi_biases(n_heads: int, mask: torch.Tensor):
"""
## Calculate the attention biases matrix
* `n_heads` is the number of heads in the attention layer
* `mask` is the attention mask of shape `[seq_len_q, seq_len_k]`
This returns a matrix of shape `[seq_len_q, seq_len_k, n_heads, ]` with ALiBi attention biases.
"""
# Get slopes $m$ for each head
m = get_slopes(n_heads).to(mask.device)
# Calculate distances $[0, 1, \dots, N]$
# Here we calculate the distances using the mask.
#
# Since it's causal mask we can just use $[0, 1, \dots, N]$ too.
# `distance = torch.arange(mask.shape[1], dtype=torch.long, device=mask.device)[None, :]`
distance = mask.cumsum(dim=-1)
# Multiply them pair-wise to get the AliBi bias matrix
return distance[:, :, None] * m[None, None, :]
class AlibiMultiHeadAttention(MultiHeadAttention):
"""
## Attention with Linear Biases (ALiBi)
We override [Multi-Head Attention](../mha.html).
"""
def __init__(self, heads: int, d_model: int, dropout_prob: float = 0.1):
super().__init__(heads, d_model, dropout_prob)
# To cache AliBi the biases
self.alibi_biases = None
def forward(self, *,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: Optional[torch.Tensor] = None):
"""
`query`, `key` and `value` are the tensors that store
collection of *query*, *key* and *value* vectors.
They have shape `[seq_len, batch_size, d_model]`.
`mask` has shape `[seq_len, seq_len, batch_size]` and
`mask[i, j, b]` indicates whether for batch `b`,
query at position `i` has access to key-value at position `j`.
"""
# ALiBi only works with causal masks.
assert mask is not None
assert mask.shape[0] == mask.shape[1] and mask.shape[2] == 1
# `query`, `key` and `value` have shape `[seq_len, batch_size, d_model]`
seq_len, batch_size, _ = query.shape
# Add head dimension to mask and check its shape.
mask = self.prepare_mask(mask, query.shape, key.shape)
# Prepare `query`, `key` and `value` for attention computation.
# These will then have shape `[seq_len, batch_size, heads, d_k]`.
query = self.query(query)
key = self.key(key)
value = self.value(value)
# Compute attention scores $Q K^\top$.
# This gives a tensor of shape `[seq_len, seq_len, batch_size, heads]`.
scores = self.get_scores(query, key)
# Scale scores $\frac{Q K^\top}{\sqrt{d_k}}$
scores *= self.scale
# Create AliBi biases if it's not cached
if self.alibi_biases is None or self.alibi_biases.shape[1] < seq_len:
# `mask` has shape `[seq_len, seq_len, 1, 1]`
self.alibi_biases = get_alibi_biases(scores.shape[-1], mask[:, :, 0, 0])
# Add AliBi biases to attention scores.
# ALiBi biases has shape `[seq_len, seq_len, n_heads]`
# and `scores` has shape `[seq_len, seq_len, batch_size, n_heads]`
scores += self.alibi_biases[:seq_len, :seq_len, None, :]
# Apply mask
scores = scores.masked_fill(mask == 0, float('-inf'))
# $softmax$ attention along the key sequence dimension
# $\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_k}}\Bigg)$
attn = self.softmax(scores)
# Apply dropout
attn = self.dropout(attn)
# Multiply by values
# $$\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_k}}\Bigg)V$$
x = torch.einsum("ijbh,jbhd->ibhd", attn, value)
# Concatenate multiple heads
x = x.reshape(seq_len, batch_size, -1)
# Output layer
return self.output(x)
def _test_alibi():
"""
Simple test function to see the slopes.
"""
inspect(get_slopes(12).tolist(), _n=-1)
from labml_nn.transformers.utils import subsequent_mask
mask = subsequent_mask(8)[:, :, 0]
inspect(mask)
inspect(get_alibi_biases(12, mask)[:, :, 3], _n=-1)
#
if __name__ == '__main__':
_test_alibi()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/knn/build_index.py | labml_nn/transformers/knn/build_index.py | """
---
title: Build FAISS index for k-NN search
summary: This builds the FAISS index with the transformer embeddings.
---
# Build FAISS index for k-NN search
We want to build the index of $\big(f(c_i), w_i\big)$.
We store $f(c_i)$ and $w_i$ in memory mapped numpy arrays.
We find $f(c_i)$ nearest to $f(c_t)$ using [FAISS](https://github.com/facebookresearch/faiss).
FAISS indexes $\big(f(c_i), i\big)$ and we query it with $f(c_t)$.
"""
from typing import Optional
import faiss
import numpy as np
import torch
from labml import experiment, monit, lab
from labml.utils.pytorch import get_modules
from labml_nn.transformers.knn.train_model import Configs
def load_experiment(run_uuid: str, checkpoint: Optional[int] = None):
"""
Load a saved experiment from [train model](train_model.html).
"""
# Create configurations object
conf = Configs()
# Load custom configurations used in the experiment
conf_dict = experiment.load_configs(run_uuid)
# We need to get inputs to the feed forward layer, $f(c_i)$
conf_dict['is_save_ff_input'] = True
# This experiment is just an evaluation; i.e. nothing is tracked or saved
experiment.evaluate()
# Initialize configurations
experiment.configs(conf, conf_dict)
# Set models for saving/loading
experiment.add_pytorch_models(get_modules(conf))
# Specify the experiment to load from
experiment.load(run_uuid, checkpoint)
# Start the experiment; this is when it actually loads models
experiment.start()
return conf
def gather_keys(conf: Configs):
"""
## Gather $\big(f(c_i), w_i\big)$ and save them in numpy arrays
*Note that these numpy arrays will take up a lot of space (even few hundred gigabytes)
depending on the size of your dataset*.
"""
# Dimensions of $f(c_i)$
d_model = conf.transformer.d_model
# Training data loader
data_loader = conf.trainer.data_loader
# Number of contexts; i.e. number of tokens in the training data minus one.
# $\big(f(c_i), w_i\big)$ for $i \in [2, T]$
n_keys = data_loader.data.shape[0] * data_loader.data.shape[1] - 1
# Numpy array for $f(c_i)$
keys_store = np.memmap(str(lab.get_data_path() / 'keys.npy'), dtype=np.float32, mode='w+', shape=(n_keys, d_model))
# Numpy array for $w_i$
vals_store = np.memmap(str(lab.get_data_path() / 'vals.npy'), dtype=np.int, mode='w+', shape=(n_keys, 1))
# Number of keys $f(c_i)$ collected
added = 0
with torch.no_grad():
# Loop through data
for i, batch in monit.enum("Collect data", data_loader, is_children_silent=True):
# $w_i$ the target labels
vals = batch[1].view(-1, 1)
# Input data moved to the device of the model
data = batch[0].to(conf.device)
# Run the model
_ = conf.model(data)
# Get $f(c_i)$
keys = conf.model.ff_input.view(-1, d_model)
# Save keys, $f(c_i)$ in the memory mapped numpy array
keys_store[added: added + keys.shape[0]] = keys.cpu()
# Save values, $w_i$ in the memory mapped numpy array
vals_store[added: added + keys.shape[0]] = vals
# Increment the number of collected keys
added += keys.shape[0]
def build_index(conf: Configs, n_centeroids: int = 2048, code_size: int = 64, n_probe: int = 8, n_train: int = 200_000):
"""
## Build FAISS index
[Getting started](https://github.com/facebookresearch/faiss/wiki/Getting-started),
[faster search](https://github.com/facebookresearch/faiss/wiki/Faster-search),
and [lower memory footprint](https://github.com/facebookresearch/faiss/wiki/Lower-memory-footprint)
tutorials on FAISS will help you learn more about FAISS usage.
"""
# Dimensions of $f(c_i)$
d_model = conf.transformer.d_model
# Training data loader
data_loader = conf.trainer.data_loader
# Number of contexts; i.e. number of tokens in the training data minus one.
# $\big(f(c_i), w_i\big)$ for $i \in [2, T]$
n_keys = data_loader.data.shape[0] * data_loader.data.shape[1] - 1
# Build an index with Verenoi cell based faster search with compression that
# doesn't store full vectors.
quantizer = faiss.IndexFlatL2(d_model)
index = faiss.IndexIVFPQ(quantizer, d_model, n_centeroids, code_size, 8)
index.nprobe = n_probe
# Load the memory mapped numpy array of keys
keys_store = np.memmap(str(lab.get_data_path() / 'keys.npy'), dtype=np.float32, mode='r', shape=(n_keys, d_model))
# Pick a random sample of keys to train the index with
random_sample = np.random.choice(np.arange(n_keys), size=[min(n_train, n_keys)], replace=False)
with monit.section('Train index'):
# Train the index to store the keys
index.train(keys_store[random_sample])
# Add keys to the index; $\big(f(c_i), i\big)$
for s in monit.iterate('Index', range(0, n_keys, 1024)):
e = min(s + 1024, n_keys)
# $f(c_i)$
keys = keys_store[s:e]
# $i$
idx = np.arange(s, e)
# Add to index
index.add_with_ids(keys, idx)
with monit.section('Save'):
# Save the index
faiss.write_index(index, str(lab.get_data_path() / 'faiss.index'))
def main():
# Load the experiment. Replace the run uuid with you run uuid from
# [training the model](train_model.html).
conf = load_experiment('4984b85c20bf11eb877a69c1a03717cd')
# Set model to evaluation mode
conf.model.eval()
# Collect $\big(f(c_i), w_i\big)$
gather_keys(conf)
# Add them to the index for fast search
build_index(conf)
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/knn/__init__.py | labml_nn/transformers/knn/__init__.py | """
---
title: k-Nearest Neighbor Language Models
summary: >
This is a simple PyTorch implementation/tutorial of the paper
Generalization through Memorization: Nearest Neighbor Language Models using FAISS.
It runs a kNN model on the final transformer layer embeddings to improve the
loss of transformer based language models.
It's also great for domain adaptation without pre-training.
---
# k-Nearest Neighbor Language Models
This is a [PyTorch](https://pytorch.org) implementation of the paper
[Generalization through Memorization: Nearest Neighbor Language Models](https://arxiv.org/abs/1911.00172).
It uses k-nearest neighbors to improve perplexity of autoregressive transformer models.
An autoregressive language model estimates $p(w_t | \textcolor{yellowgreen}{c_t})$,
where $w_t$ is the token at step $t$
and $c_t$ is the context, $\textcolor{yellowgreen}{c_t} = (w_1, w_2, ..., w_{t-1})$.
This paper, improves $p(w_t | \textcolor{yellowgreen}{c_t})$ using a k-nearest neighbor search
on key-value pairs $\big(f(c_i), w_i\big)$, with search key $f(\textcolor{yellowgreen}{c_t})$.
Here $f(\textcolor{yellowgreen}{c_t})$ is an embedding of the context $\textcolor{yellowgreen}{c_t}$.
The paper (and this implementation) uses the **input to the feed-forward layer of the
final layer of the transformer** as $f(\textcolor{yellowgreen}{c_t})$.
We use [FAISS](https://github.com/facebookresearch/faiss) to index $f(c_i)$.
### Implementation
So to run $k$NN-LM we need to:
* [Train a transformer model](train_model.html)
* [Build an index](build_index.html) of $\big(f(c_i), w_i\big)$
* [Evaluate kNN-ML](eval_knn.html) using $k$NN seach on $\big(f(c_i), w_i\big)$
with $f(\textcolor{yellowgreen}{c_t})$
This experiment uses a small dataset so that we can run this without using up a few hundred giga-bytes
of disk space for the index.
The official implementation of $k$NN-LM can be found [here](https://github.com/urvashik/knnlm).
"""
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/knn/eval_knn.py | labml_nn/transformers/knn/eval_knn.py | """
---
title: Evaluate k-nearest neighbor language model
summary: >
This runs the kNN model and merges the kNN results with transformer output to
achieve better results than just using the transformer.
---
# Evaluate k-nearest neighbor language model
"""
from typing import Optional, List
import faiss
import numpy as np
import torch
from labml import monit, lab
from labml.logger import inspect
from labml_nn.transformers.knn.train_model import Configs
def knn(queries: torch.Tensor, index: faiss.IndexFlatL2, keys_store: np.ndarray, vals_store: np.ndarray, n_tokens: int):
"""
## $k$-NN to get $p(w_t, c_t)$
Here we refer to $f(\textcolor{yellowgreen}{c_t})$ as queries,
$f(c_i)$ as keys and $w_i$ as values.
"""
# Save shape of queries to reshape results
queries_shape = queries.shape
# Flatten the `batch` and `sequence` dimensions of queries
queries = queries.view(-1, queries_shape[-1])
# Find 10 nearest neighbors of $f(\textcolor{yellowgreen}{c_t})$ among $f(c_i)$.
# `distance` is the distance given by FAISS and `idx`, $i$ is the index of it in `keys_store`.
distance, idx = index.search(queries.numpy(), 10)
# Get $f(c_i)$
keys_found = queries.new_tensor(keys_store[idx])
# Get $w_i$
vals_found = torch.tensor(vals_store[idx]).squeeze(-1)
# We are going to calculate the cosine similarity between normalized vectors
# Normalize $f(c_i)$
keys_found_n = keys_found / torch.sqrt((keys_found ** 2).sum(-1, keepdims=True) + 1e-10)
# Normalize $f(\textcolor{yellowgreen}{c_t})$
queries_n = queries / torch.sqrt((queries ** 2).sum(-1, keepdims=True) + 1e-10)
# Get the dot-product, or cosine similarity
dot_prod = (keys_found_n * queries_n.unsqueeze(1)).sum(-1)
# Token-wise logits
logits_token = dot_prod.new_zeros(queries.shape[0], n_tokens)
# Scatter and accumulate token logits based on the nearest neighbors
_ = logits_token.scatter_(dim=1, index=vals_found, src=dot_prod, reduce='add')
# Reshape the logits
logits_token = logits_token.reshape(queries_shape[0], queries_shape[1], -1)
return logits_token
def validation_loss(knn_weights: List[float], last_n: Optional[int], conf: Configs, index: faiss.IndexFlatL2,
keys_store: np.ndarray, vals_store: np.ndarray):
"""
## Calculate validation loss
We calculate the validation loss of the combined on $k$-NN prediction and transformer prediction.
The weight given to the $k$-NN model is given by `knn_weight`.
It's a list of weights and we calculate the validation loss for each.
"""
# List of losses for each `knn_weights`
losses = [[] for _ in knn_weights]
# Number of samples in each batch
n_samples = []
with torch.no_grad():
# Iterate through validation data
for i, batch in monit.enum("Validation", conf.validator.data_loader, is_children_silent=True):
# Get data and target labels
data, target = batch[0].to(conf.device), batch[1].to(conf.device)
# Run the model and get predictions $p(w_t, c_t)$
res = conf.model(data)
# Get $k$-NN predictions
res_knn = knn(conf.model.ff_input.cpu(), index, keys_store, vals_store, conf.n_tokens)
res_knn = res_knn.to(conf.device)
# This is to calculate only the loss for `last_n` tokens.
# This is important because the first predictions (along the sequence)
# of transformer model has very few past tokens to look at.
if last_n:
res = res[-last_n:]
res_knn = res_knn[-last_n:]
target = target[-last_n:]
# Number of samples
n_s = res.shape[0] * data.shape[1]
n_samples.append(n_s)
# Calculate scores for each of `knn_weights`.
for i, c in enumerate(knn_weights):
# Calculate the loss
loss = conf.loss_func(res_knn * c + (1 - c) * res, target)
losses[i].append(loss * n_s)
return losses, n_samples
def load_index(conf: Configs, n_probe: int = 8):
"""
## Load the index
"""
# Dimensions of $f(c_i)$
d_model = conf.transformer.d_model
# Training data loader
data_loader = conf.trainer.data_loader
# Number of contexts; i.e. number of tokens in the training data minus one.
# $\big(f(c_i), w_i\big)$ for $i \in [2, T]$
n_keys = data_loader.data.shape[0] * data_loader.data.shape[1] - 1
# Load FAISS index
with monit.section('Load index'):
index = faiss.read_index(str(lab.get_data_path() / 'faiss.index'))
# Set number of cells to probe
index.nprobe = n_probe
# Load memory mapped numpy arrays
keys_store = np.memmap(str(lab.get_data_path() / 'keys.npy'), dtype=np.float32, mode='r', shape=(n_keys, d_model))
vals_store = np.memmap(str(lab.get_data_path() / 'vals.npy'), dtype=np.int, mode='r', shape=(n_keys, 1))
return index, keys_store, vals_store
def main():
from labml_nn.transformers.knn.build_index import load_experiment
# Load the experiment. Replace the run uuid with you run uuid from
# [training the model](train_model.html).
conf = load_experiment('4984b85c20bf11eb877a69c1a03717cd')
# Set model to evaluation mode
conf.model.eval()
# Load index
index, keys_store, vals_store = load_index(conf)
# List of weights given to $k$-NN prediction. We will evaluate the validation loss for
# each of the weights
knn_weights = [i / 20 for i in range(10)]
# Evaluate validation loss
losses, n_samples = validation_loss(knn_weights, None, conf, index, keys_store, vals_store)
# Output the losses for each of `knn_weights`.
inspect({c: np.sum(losses[i]) / np.sum(n_samples) for i, c in enumerate(knn_weights)})
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/knn/train_model.py | labml_nn/transformers/knn/train_model.py | """
---
title: Train Autoregressive Transformer
summary: This is training code with notes for a basic auto-regressive transformer.
---
# Train Autoregressive Transformer
This trains a simple [transformer](../../) model for auto-regression.
"""
import torch
from torch import nn
from labml import experiment
from labml.configs import option
from labml.utils.pytorch import get_modules
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.transformers import Encoder, Generator, TransformerConfigs
from labml_nn.transformers.utils import subsequent_mask
class AutoregressiveModel(nn.Module):
"""
## Auto regressive model
"""
def __init__(self, src_embed: nn.Module, encoder: Encoder, generator: Generator, *,
is_save_ff_input: bool = False):
super().__init__()
# Token embedding module
self.src_embed = src_embed
# Transformer based encoder
self.encoder = encoder
# Whether the last layer of the encoder should
# save the input to the feed-forward layer.
# This is out $f(c_t)$, the embedding of the context.
self.encoder.layers[-1].is_save_ff_input = is_save_ff_input
# Next token generation layer;
# this give logits of the the next token
self.generator = generator
# This will be initialized on the first call
self.src_mask = None
@property
def ff_input(self) -> torch.Tensor:
"""
Retrieve saved $f(c_t)$
"""
return self.encoder.layers[-1].ff_input
def forward(self, src: torch.Tensor):
# Create subsequent mask, so that the transformer can only pay attention to past tokens.
if self.src_mask is None or self.src_mask.size(0) != len(src):
self.src_mask = subsequent_mask(len(src)).to(src.device)
# Embed the tokens (`src`) and run it through the the transformer
res = self.encoder(self.src_embed(src), self.src_mask)
# Generate logits of the next token
return self.generator(res), None
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
The default configs can and will be over-ridden when we start the experiment
"""
transformer: TransformerConfigs
model: AutoregressiveModel
is_save_ff_input = False
@option(Configs.model)
def autoregressive_model(c: Configs):
"""
Initialize the auto-regressive model
"""
m = AutoregressiveModel(
# Get the source token embedding layer, encoder and
# final token generator from configurable transformer
src_embed=c.transformer.src_embed,
encoder=c.transformer.encoder,
generator=c.transformer.generator,
# Whether to save $f(c_t)$
is_save_ff_input=c.is_save_ff_input)
return m.to(c.device)
@option(Configs.transformer)
def transformer_c(c: Configs):
"""
Initialize the configurable transformer encoder for our autoregressive model
"""
tc = TransformerConfigs()
tc.n_src_vocab = c.n_tokens
tc.n_tgt_vocab = c.n_tokens
return tc
def main():
# Create experiment
experiment.create(name="knn_lm")
# Create configs
conf = Configs()
# Load configurations
experiment.configs(conf,
# A dictionary of configurations to override
{'tokenizer': 'character',
'prompt_separator': '',
'prompt': 'It is ',
'text': 'tiny_shakespeare',
'optimizer.optimizer': 'Noam',
'optimizer.learning_rate': 1.,
'optimizer.d_model': 256,
'seq_len': 1024,
'epochs': 128,
'batch_size': 6,
'inner_iterations': 10,
# Transformer configurations
'transformer.d_model': 256,
'transformer.ffn.d_ff': 1024,
'transformer.n_heads': 8,
'transformer.n_layers': 6})
# This is needed to initialize models
conf.n_tokens = conf.text.n_tokens
# Set models for saving and loading
experiment.add_pytorch_models(get_modules(conf))
# Start the experiment
with experiment.start():
# `TrainValidConfigs.run`
conf.run()
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/gmlp/experiment.py | labml_nn/transformers/gmlp/experiment.py | """
---
title: Pay Attention to MLPs (gMLP) Experiment
summary: This experiment trains a gMLP based model on Tiny Shakespeare dataset.
---
# [Pay Attention to MLPs (gMLP)](index.html) Experiment
This is an annotated PyTorch experiment to train a [gMLP model](index.html).
The paper also applies a Stochastic Depth regularization where some layers are removed randomly during training.
We have not implemented that here.
This is based on
[training loop and configurations for a simple transformer auto-regressive NLP task](../basic/autoregressive_experiment.html).
"""
from labml import experiment
from labml.configs import option
from labml_nn.transformers import TransformerConfigs
from labml_nn.transformers.basic.autoregressive_experiment import Configs as BasicAutoRegressionConfigs
from labml_nn.transformers.gmlp import GMLPBlock
class Configs(BasicAutoRegressionConfigs):
"""
## Configurations
This inherits from
[training loop and configurations for a simple transformer auto-regressive NLP task](../basic/autoregressive_transformer.html).
"""
# Transformer
transformer: TransformerConfigs = 'gMLP'
# gMLP Block
gmlp: GMLPBlock
# `d_ffn` for gMLP projection layer
d_ffn: int = 2048
@option(Configs.gmlp, 'gMLP')
def _gmlp_configs(c: Configs):
"""
### Create a gMLP block
"""
return GMLPBlock(c.d_model, c.d_ffn, c.seq_len)
@option(Configs.transformer, 'gMLP')
def _transformer_configs(c: Configs):
"""
### Transformer configurations
"""
# We use our
# [configurable transformer implementation](../configs.html#TransformerConfigs)
conf = TransformerConfigs()
# Set the vocabulary sizes for embeddings and generating logits
conf.n_src_vocab = c.n_tokens
conf.n_tgt_vocab = c.n_tokens
# Set model size
conf.d_model = c.d_model
# Replace the encoder layer with a gMLP layer
conf.encoder_layer = c.gmlp
return conf
def main():
# Create experiment
experiment.create(name="gMLP")
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# Use a context size of $256$
'seq_len': 256,
# Train for $128$ epochs
'epochs': 128,
# Batch size $32$
'batch_size': 32,
# Switch between training and validation for $10$ times
# per epoch
'inner_iterations': 10,
# Model size
'd_model': 512,
'd_ffn': 2048,
# Use [Noam optimizer](../../optimizers/noam.html)
'optimizer.optimizer': 'Noam',
'optimizer.learning_rate': 1.,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/gmlp/__init__.py | labml_nn/transformers/gmlp/__init__.py | """
---
title: Pay Attention to MLPs (gMLP)
summary: >
This is an annotated implementation/tutorial of Pay Attention to MLPs (gMLP) in PyTorch.
---
# Pay Attention to MLPs (gMLP)
This is a [PyTorch](https://pytorch.org) implementation of the paper
[Pay Attention to MLPs](https://arxiv.org/abs/2105.08050).
This paper introduces a Multilayer Perceptron (MLP) based architecture with gating,
which they name **gMLP**. It consists of a stack of $L$ *gMLP* blocks.
Here is [the training code](experiment.html) for a gMLP model based autoregressive model.
"""
from typing import Optional
import torch
from torch import nn
class GMLPBlock(nn.Module):
"""
## gMLP Block
Each block does the following transformations to input embeddings
$X \in \mathbb{R}^{n \times d}$ where $n$ is the sequence length
and $d$ is the dimensionality of the embeddings:
\begin{align}
Z &= \sigma(XU) \\
\tilde{Z} &= s(Z) \\
Y &= \tilde{Z}V \\
\end{align}
where $V$ and $U$ are learnable projection weights.
$s(\cdot)$ is the Spacial Gating Unit defined below.
Output dimensionality of $s(\cdot)$ will be half of $Z$.
$\sigma$ is an activation function such as
[GeLU](https://pytorch.org/docs/stable/generated/torch.nn.GELU.html).
"""
def __init__(self, d_model: int, d_ffn: int, seq_len: int):
"""
* `d_model` is the dimensionality ($d$) of $X$
* `d_ffn` is the dimensionality of $Z$
* `seq_len` is the length of the token sequence ($n$)
"""
super().__init__()
# Normalization layer fro Pre-Norm
self.norm = nn.LayerNorm([d_model])
# Activation function $\sigma$
self.activation = nn.GELU()
# Projection layer for $Z = \sigma(XU)$
self.proj1 = nn.Linear(d_model, d_ffn)
# Spacial Gating Unit $s(\cdot)$
self.sgu = SpacialGatingUnit(d_ffn, seq_len)
# Projection layer for $Y = \tilde{Z}V$
self.proj2 = nn.Linear(d_ffn // 2, d_model)
# Embedding size (required by [Encoder](../models.html#Encoder).
# We use the encoder module from transformer architecture and plug
# *gMLP* block as a replacement for the [Transformer Layer](../models.html#Encoder).
self.size = d_model
def forward(self, *, x: torch.Tensor, mask: Optional[torch.Tensor] = None):
"""
* `x` is the input embedding tensor $X$ of shape `[seq_len, batch_size, d_model]`
* `mask` is a boolean mask of shape `[seq_len, seq_len, 1]` that controls the visibility of tokens
among each other.
"""
# Keep a copy for shortcut connection
shortcut = x
# Normalize $X$
x = self.norm(x)
# Projection and activation $Z = \sigma(XU)$
z = self.activation(self.proj1(x))
# Spacial Gating Unit $\tilde{Z} = s(Z)$
z = self.sgu(z, mask)
# Final projection $Y = \tilde{Z}V$
z = self.proj2(z)
# Add the shortcut connection
return z + shortcut
class SpacialGatingUnit(nn.Module):
"""
## Spatial Gating Unit
$$s(Z) = Z_1 \odot f_{W,b}(Z_2)$$
where $f_{W,b}(Z) = W Z + b$ is a linear transformation along the sequence dimension,
and $\odot$ is element-wise multiplication.
$Z$ is split into to parts of equal size $Z_1$ and $Z_2$ along the channel dimension (embedding dimension).
"""
def __init__(self, d_z: int, seq_len: int):
"""
* `d_z` is the dimensionality of $Z$
* `seq_len` is the sequence length
"""
super().__init__()
# Normalization layer before applying $f_{W,b}(\cdot)$
self.norm = nn.LayerNorm([d_z // 2])
# Weight $W$ in $f_{W,b}(\cdot)$.
#
# The paper notes that it's important to initialize weights to small values and the bias to $1$,
# so that during the initial training $s(\cdot)$ is close to identity (apart from the split).
self.weight = nn.Parameter(torch.zeros(seq_len, seq_len).uniform_(-0.01, 0.01), requires_grad=True)
# Weight $b$ in $f_{W,b}(\cdot)$
#
# The paper notes that it's important to initialize bias to $1$.
self.bias = nn.Parameter(torch.ones(seq_len), requires_grad=True)
def forward(self, z: torch.Tensor, mask: Optional[torch.Tensor] = None):
"""
* `z` is the input $Z$ of shape `[seq_len, batch_size, d_z]`
* `mask` is is a boolean mask of shape `[seq_len, seq_len, 1]` that controls the visibility of tokens
among each other. The last dimension of size `1` is the batch, which we have in other transformer
implementations and was left for compatibility.
"""
# Get sequence length
seq_len = z.shape[0]
# Split $Z$ into $Z_1$ and $Z_2$
z1, z2 = torch.chunk(z, 2, dim=-1)
# Check mask
if mask is not None:
# `mask` has shape `[seq_len_q, seq_len_k, batch_size]`.
# The batch dimension should be of size `1` because this implementation supports
# only same mask for all samples in the batch.
assert mask.shape[0] == 1 or mask.shape[0] == seq_len
assert mask.shape[1] == seq_len
# Here we only support the same mask for all samples
assert mask.shape[2] == 1
# Remove the batch dimension
mask = mask[:, :, 0]
# Normalize $Z_2$ before $f_{W,b}(\cdot)$
z2 = self.norm(z2)
# Get the weight matrix; truncate if larger than `seq_len`
weight = self.weight[:seq_len, :seq_len]
# Apply mask to the weights.
#
# If $W_{i,j}$ is $0$ then $f_{W,b}(Z_2)_i$ will not get any information
# from token $j$.
if mask is not None:
weight = weight * mask
# $f_{W,b}(Z_2) = W Z_2 + b$
z2 = torch.einsum('ij,jbd->ibd', weight, z2) + self.bias[:seq_len, None, None]
# $Z_1 \odot f_{W,b}(Z_2)$
return z1 * z2
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/fnet/experiment.py | labml_nn/transformers/fnet/experiment.py | """
---
title: FNet Experiment
summary: This experiment trains a FNet based model on AG News dataset.
---
# [FNet](index.html) Experiment
This is an annotated PyTorch experiment to train a [FNet model](index.html).
This is based on
[general training loop and configurations for AG News classification task](../../experiments/nlp_classification.html).
"""
import torch
from torch import nn
from labml import experiment
from labml.configs import option
from labml_nn.experiments.nlp_classification import NLPClassificationConfigs
from labml_nn.transformers import Encoder
from labml_nn.transformers import TransformerConfigs
class TransformerClassifier(nn.Module):
"""
# Transformer based classifier model
"""
def __init__(self, encoder: Encoder, src_embed: nn.Module, generator: nn.Linear):
"""
* `encoder` is the transformer [Encoder](../models.html#Encoder)
* `src_embed` is the token
[embedding module (with positional encodings)](../models.html#EmbeddingsWithLearnedPositionalEncoding)
* `generator` is the [final fully connected layer](../models.html#Generator) that gives the logits.
"""
super().__init__()
self.src_embed = src_embed
self.encoder = encoder
self.generator = generator
def forward(self, x: torch.Tensor):
# Get the token embeddings with positional encodings
x = self.src_embed(x)
# Transformer encoder
x = self.encoder(x, None)
# Get logits for classification.
#
# We set the `[CLS]` token at the last position of the sequence.
# This is extracted by `x[-1]`, where `x` is of
# shape `[seq_len, batch_size, d_model]`
x = self.generator(x[-1])
# Return results
# (second value is for state, since our trainer is used with RNNs also)
return x, None
class Configs(NLPClassificationConfigs):
"""
## Configurations
This inherits from
[`NLPClassificationConfigs`](../../experiments/nlp_classification.html)
"""
# Classification model
model: TransformerClassifier
# Transformer
transformer: TransformerConfigs
@option(Configs.transformer)
def _transformer_configs(c: Configs):
"""
### Transformer configurations
"""
# We use our
# [configurable transformer implementation](../configs.html#TransformerConfigs)
conf = TransformerConfigs()
# Set the vocabulary sizes for embeddings and generating logits
conf.n_src_vocab = c.n_tokens
conf.n_tgt_vocab = c.n_tokens
#
return conf
@option(TransformerConfigs.encoder_attn)
def fnet_mix():
"""
Create `FNetMix` module that can replace the self-attention in
[transformer encoder layer](../models.html#TransformerLayer)
.
"""
from labml_nn.transformers.fnet import FNetMix
return FNetMix()
@option(Configs.model)
def _model(c: Configs):
"""
Create classification model
"""
m = TransformerClassifier(c.transformer.encoder,
c.transformer.src_embed,
nn.Linear(c.d_model, c.n_classes)).to(c.device)
return m
def main():
# Create experiment
experiment.create(name="fnet")
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Use world level tokenizer
'tokenizer': 'basic_english',
# Train for $32$ epochs
'epochs': 32,
# Switch between training and validation for $10$ times
# per epoch
'inner_iterations': 10,
# Transformer configurations (same as defaults)
'transformer.d_model': 512,
'transformer.ffn.d_ff': 2048,
'transformer.n_heads': 8,
'transformer.n_layers': 6,
# Use [FNet](index.html) instead of self-a
# ttention
'transformer.encoder_attn': 'fnet_mix',
# Use [Noam optimizer](../../optimizers/noam.html)
'optimizer.optimizer': 'Noam',
'optimizer.learning_rate': 1.,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/fnet/__init__.py | labml_nn/transformers/fnet/__init__.py | """
---
title: "FNet: Mixing Tokens with Fourier Transforms"
summary: >
This is an annotated implementation/tutorial of FNet in PyTorch.
---
# FNet: Mixing Tokens with Fourier Transforms
This is a [PyTorch](https://pytorch.org) implementation of the paper
[FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824).
This paper replaces the [self-attention layer](../mha.html) with two
[Fourier transforms](https://en.wikipedia.org/wiki/Discrete_Fourier_transform) to
*mix* tokens.
This is a $7 \times$ more efficient than self-attention.
The accuracy loss of using this over self-attention is about 92% for
[BERT](https://paperswithcode.com/method/bert) on
[GLUE benchmark](https://paperswithcode.com/dataset/glue).
## Mixing tokens with two Fourier transforms
We apply Fourier transform along the hidden dimension (embedding dimension)
and then along the sequence dimension.
$$
\mathcal{R}\big(\mathcal{F}_\text{seq} \big(\mathcal{F}_\text{hidden} (x) \big) \big)
$$
where $x$ is the embedding input, $\mathcal{F}$ stands for the fourier transform and
$\mathcal{R}$ stands for the real component in complex numbers.
This is very simple to implement on PyTorch - just 1 line of code.
The paper suggests using a precomputed DFT matrix and doing matrix multiplication to get the
Fourier transformation.
Here is [the training code](experiment.html) for using a FNet based model for classifying
[AG News](https://paperswithcode.com/dataset/ag-news).
"""
from typing import Optional
import torch
from torch import nn
class FNetMix(nn.Module):
"""
## FNet - Mix tokens
This module simply implements
$$
\mathcal{R}\big(\mathcal{F}_\text{seq} \big(\mathcal{F}_\text{hidden} (x) \big) \big)
$$
The structure of this module is made similar to a [standard attention module](../mha.html) so that we can simply
replace it.
"""
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[torch.Tensor] = None):
"""
The [normal attention module](../mha.html) can be fed with different token embeddings for
$\text{query}$,$\text{key}$, and $\text{value}$ and a mask.
We follow the same function signature so that we can replace it directly.
For FNet mixing, $$x = \text{query} = \text{key} = \text{value}$$ and masking is not possible.
Shape of `query` (and `key` and `value`) is `[seq_len, batch_size, d_model]`.
"""
# $\text{query}$,$\text{key}$, and $\text{value}$ all should be equal to $x$ for token mixing
assert query is key and key is value
# Token mixing doesn't support masking. i.e. all tokens will see all other token embeddings.
assert mask is None
# Assign to `x` for clarity
x = query
# Apply the Fourier transform along the hidden (embedding) dimension
# $$\mathcal{F}_\text{hidden} (x)$$
#
# The output of the Fourier transform is a tensor of
# [complex numbers](https://pytorch.org/docs/stable/complex_numbers.html).
fft_hidden = torch.fft.fft(x, dim=2)
# Apply the Fourier transform along the sequence dimension
# $$\mathcal{F}_\text{seq} \big(\mathcal{F}_\text{hidden} (x) \big)$$
fft_seq = torch.fft.fft(fft_hidden, dim=0)
# Get the real component
# $$\mathcal{R}\big(\mathcal{F}_\text{seq} \big(\mathcal{F}_\text{hidden} (x) \big) \big)$$
return torch.real(fft_seq)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/fast_weights/experiment.py | labml_nn/transformers/fast_weights/experiment.py | """
---
title: Train Fast Weights Transformer
summary: This is training code with notes for a Fast Weights Transformer.
---
# Train Fast Weights Transformer
This trains a fast weights transformer model for auto-regression.
Here’s a Colab notebook for training a fast weights transformer on Tiny Shakespeare dataset.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/transformers/fast_weights/experiment.ipynb)
"""
import torch
from torch import nn
from labml import experiment
from labml.configs import option
from labml.utils.pytorch import get_modules
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
class AutoregressiveModel(nn.Module):
"""
## Auto regressive model
"""
def __init__(self, n_vocab: int, d_model: int, transformer: nn.Module):
super().__init__()
# Token embedding module
self.src_embed = nn.Embedding(n_vocab, d_model)
self.transformer = transformer
self.generator = nn.Linear(d_model, n_vocab)
def forward(self, x: torch.Tensor):
# Embed the tokens
x = self.src_embed(x)
# Run it through the the transformer
res = self.transformer(x)
# Generate logits of the next token
return self.generator(res), None
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
The default configs can and will be over-ridden when we start the experiment
"""
model: AutoregressiveModel
d_model: int = 512
nu: int = 1
heads: int = 8
dropout: float = 0.0
d_ff: int = 2048
n_layers: int = 6
@option(Configs.model)
def fast_weights_transformer(c: Configs):
"""
Create [fast weights transformer](index.html).
"""
from labml_nn.transformers.fast_weights import FastWeightsAttentionTransformer, \
FastWeightsAttentionTransformerLayer, FastWeightsAttention, FeedForward
from labml_nn.transformers.fast_weights import DPFP
return AutoregressiveModel(
c.n_tokens, c.d_model,
FastWeightsAttentionTransformer(
FastWeightsAttentionTransformerLayer(d_model=c.d_model,
attn=FastWeightsAttention(c.heads, c.d_model, c.dropout, DPFP(nu=c.nu)),
feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout),
dropout_prob=c.dropout),
c.n_layers)).to(c.device)
def main():
# Create experiment
experiment.create(name="fast_weights_transformer")
# Create configs
conf = Configs()
# Load configurations
experiment.configs(conf,
# A dictionary of configurations to override
{'tokenizer': 'character',
'text': 'tiny_shakespeare',
'optimizer.learning_rate': 1.0,
'optimizer.optimizer': 'Noam',
'prompt': 'It is',
'prompt_separator': '',
'train_loader': 'shuffled_train_loader',
'valid_loader': 'shuffled_valid_loader',
'seq_len': 128,
'epochs': 128,
'batch_size': 16,
'inner_iterations': 25})
# Set models for saving and loading
experiment.add_pytorch_models(get_modules(conf))
# Start the experiment
with experiment.start():
# Run the training loop
conf.run()
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/fast_weights/token_wise.py | labml_nn/transformers/fast_weights/token_wise.py | """
---
title: Fast Weight Systems
summary: >
This is an annotated implementation/tutorial of
Linear Transformers Are Secretly Fast Weight Memory Systems in PyTorch.
---
"""
from typing import Optional
import torch
from torch import nn
from labml_nn.transformers.fast_weights import DPFP
from labml_nn.transformers.feed_forward import FeedForward
from labml_nn.transformers.mha import PrepareForMultiHeadAttention
from labml_nn.utils import clone_module_list
class FastWeightsAttention(nn.Module):
def __init__(self, heads: int, d_model: int, dropout_prob: float, phi: DPFP):
super().__init__()
# Number of features per head
self.d_k = d_model // heads
#
self.heads = heads
# These transform the `query` multi-headed attention.
self.query = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=False)
# These transform the `key` and `value` for multi-headed attention.
self.key = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=False)
self.value = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=False)
self.gate = nn.Sequential(PrepareForMultiHeadAttention(d_model, heads, 1, bias=False),
nn.Sigmoid())
self.phi = phi
# Output layer
self.output = nn.Linear(d_model, d_model)
# Dropout
self.dropout = nn.Dropout(dropout_prob)
def forward(self, x: torch.Tensor, weights: Optional[torch.Tensor]):
query = self.phi(self.query(x))
key = self.phi(self.key(x))
value = self.value(x)
if weights is None:
weights = key.new_zeros((key.shape[0], key.shape[1], value.shape[2], key.shape[2]))
value_existing = torch.einsum('bhvk,bhk->bhv', weights, key)
beta = self.gate(x)
weights = weights + torch.einsum('bhv,bhk->bhvk', beta * (value - value_existing), key)
x = torch.einsum('bhvk,bhk->bhv', weights, query)
# Concatenate multiple heads
x = x.reshape(x.shape[0], -1)
# Output layer
return self.output(x), weights
class FastWeightsAttentionTransformerLayer(nn.Module):
def __init__(self, *,
d_model: int,
attn: FastWeightsAttention,
feed_forward: FeedForward,
dropout_prob: float):
super().__init__()
# Transformer size $d_{model}$
self.size = d_model
#
self.attn = attn
self.feed_forward = feed_forward
self.dropout = nn.Dropout(dropout_prob)
# Normalization layers
self.norm_self_attn = nn.LayerNorm([d_model])
self.norm_ff = nn.LayerNorm([d_model])
def forward(self, x: torch.Tensor, weights: Optional[torch.Tensor]):
attn, weights = self.attn(x, weights)
# Add the self attention results
x = x + self.dropout(attn)
# Normalize for feed-forward
z = self.norm_ff(x)
# Pass through the feed-forward network
ff = self.feed_forward(z)
# Add the feed-forward results back
x = x + self.dropout(ff)
#
return x, weights
class FastWeightsAttentionTransformer(nn.Module):
def __init__(self, layer: FastWeightsAttentionTransformerLayer, n_layers: int):
super().__init__()
# Make copies of the transformer layer
self.layers = clone_module_list(layer, n_layers)
# Final normalization layer
self.norm = nn.LayerNorm([layer.size])
def forward(self, x_seq: torch.Tensor):
# Split the input to a list along the sequence axis
x_seq = torch.unbind(x_seq, dim=0)
# List to store the outputs
res = []
# For each input step
weights = [None for _ in range(len(self.layers))]
for x in x_seq:
# Run through each layer
for i, layer in enumerate(self.layers):
# Get layer output
x, weights[i] = layer(x, weights[i])
res.append(x)
# Stack the output tensors
res = torch.stack(res)
# Normalize the output
return self.norm(res)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/fast_weights/__init__.py | labml_nn/transformers/fast_weights/__init__.py | """
---
title: Linear Transformers Are Secretly Fast Weight Memory Systems
summary: >
This is an annotated implementation/tutorial of
Linear Transformers Are Secretly Fast Weight Memory Systems in PyTorch.
---
# Fast weights transformer
The paper
[Linear Transformers Are Secretly Fast Weight Memory Systems in PyTorch](https://arxiv.org/abs/2102.11174)
finds similarities between linear self-attention and fast weight systems
and makes modifications to self-attention update rule based on that.
It also introduces a simpler, yet effective kernel function.
*The authors have provided an [official implementation](https://github.com/ischlag/fast-weight-transformers)
of the paper including other variants they compare with in the paper.*
## Fast weights
Consider a sequence of inputs $\big\{x^{(i)}\big\}^L_{i=1}$ or length $L$
and each step is a vector of size $d_{in}$; i.e. $x \in \mathbb{R}^{d_{in}}$.
The fast weight model generates a weight matrix at each step to produce output
$\big\{y^{(i)}\big\}^L_{i=1}$, $y \in \mathbb{R}^{d_{out}}$
\begin{align}
a^{(i)}, b^{(i)} &= \textcolor{orange}{W_a} x^{(i)}, \textcolor{orange}{W_b} x^{(i)} \\
\textcolor{cyan}{W^{(i)}} &= \sigma \Big( \textcolor{cyan}{W^{(i-1)}} + a^{(i)} \otimes b^{(i)} \Big) \\
y^{(i)} &= \textcolor{cyan}{W^{(i)}} x^{(i)}
\end{align}
$\otimes$ is the outer product ($a \otimes b = a b^\top$), where elements of the two vectors are multiplied with each other
to give a matrix.
$\sigma$ is an activation function.
$\textcolor{orange}{W_a}$ and $\textcolor{orange}{W_b}$ are trainable weights (parameters).
$\textcolor{cyan}{W^{(i)}}$ are the fast weights that are generated at each step.
## Linear self-attention
Original transformer self-attention is, (omitting $\frac{1}{d_k}$ for clarity)
\begin{align}
y^{(i)} &= \Big[v^{(1)}, v^{(2)}, ..., v^{(i)}\Big] \text{softmax}
\bigg(
\Big[k^{(1)}, k^{(2)}, ..., k^{(i)}\Big] ^\top
q^{(i)}
\bigg) \\
&= \sum^i_{j=1} \frac
{ v^{(j)} \kappa(k^{(j)}, q^{(i)}) }
{ \sum^i_{j'=1} \kappa(k^{(j')}, q^{(i)}) } \\
\end{align}
where $\kappa(k, q) = \text{exp}(k \cdot q)$
The idea behind linearizing self attention is to replace softmax
kernel $\kappa$ with a different kernel $\kappa '$ so that we can calculate the
denominator of the self attention function faster:
$$\kappa '(k, q) = \textcolor{lightgreen}{\phi(k)}^\top \textcolor{lightgreen}{\phi(q)}$$
This gives
\begin{align}
y^{(i)} &= \frac
{\Big( \sum^i_{j=1} v^{(j)} \otimes \textcolor{lightgreen}{\phi(k^{(j)})} \Big)
\textcolor{lightgreen}{\phi(q^{(i)})} }
{ \Big( \sum^i_{j'=1}
\textcolor{lightgreen}{\phi(k^{(j')})} \Big)
\textcolor{lightgreen}{\phi(q^{(i)})} }
\end{align}
With $\textcolor{cyan}{W^{(i)}} = \sum^i_{j=1} v^{(j)} \otimes \phi(k^{(j)})$ and
$z^{(i)} = \sum^i_{j=1} \textcolor{lightgreen}{\phi(k^{(j)})}$, we can calculate them efficiently:
\begin{align}
\textcolor{cyan}{W^{(i)}} &= \textcolor{cyan}{W^{(i-1)}} + v^{(i)} \otimes \textcolor{lightgreen}{\phi(k^{(i)})} \\
z^{(i)} &= z{(i)} + \textcolor{lightgreen}{\phi(k^{(i)})} \\
y^{(i)} &= \frac{1}{z^{(i)} \cdot \textcolor{lightgreen}{\phi(q^{(i)})}}
W^{(i)} \textcolor{lightgreen}{\phi(q^{(i)})}
\end{align}
This is quite similar to fast weights.
The paper introduces a new linear attention projection function $\textcolor{lightgreen}{\phi}$
a new update rule for $\textcolor{cyan}{W^{(i)}} = f(\textcolor{cyan}{W^{(i-1)}})$ and change the normalization
$\frac{1}{z^{(i)} \cdot \textcolor{lightgreen}{\phi(q^{(i)})}}$
Here are [the training code](experiment.html) and a notebook for training a fast weights
transformer on the Tiny Shakespeare dataset.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/transformers/fast_weights/experiment.ipynb)
"""
import torch
from torch import nn
from labml_nn.transformers.feed_forward import FeedForward
from labml_nn.transformers.mha import PrepareForMultiHeadAttention
from labml_nn.utils import clone_module_list
class DPFP(nn.Module):
"""
## Deterministic Parameter Free Project (DPFP)
This is the new projection function $\textcolor{lightgreen}{\phi}$ introduced in the paper.
DPFP projects $k$ of dimensionality $d_{key}$ to dimensionality $d_{dot} = 2 d_{key} \nu$,
where $\nu \in \\{1, 2, ..., 2 d_{key} - 1 \\}$ is a hyper-parameter.
$$\textcolor{lightgreen}{\phi_{2 d_{key} (i - 1) + j}(k)}
= \text{ReLU}\Big(\big[k, -k\big]\Big)_{j}
\text{ReLU}\Big(\big[k, -k\big]\Big)_{i + j}$$
where $\big[k, -k\big]$ is the concatenation of $k$ and $-k$ to give a vector of
size $2 d_{key}$, $i \in \\{1, 2, ..., \nu \\}$, and $j \in \\{1, 2, ..., 2 d_{key}\\}$.
$x_i$ is the $i$-th element of vector $x$ and is rolled around if
$i$ is larger than the number of elements in $x$.
Basically, it creates a new vector by multiplying elements of $[k, -k]$ shifted by $i$.
This produces projections that are sparse (only a few elements of $phi$ are non-zero) and
orthogonal ($\textcolor{lightgreen}{\phi(k^{(i)})} \cdot \textcolor{lightgreen}{\phi(k^{(j)})}
\approx 0$ for most $i, j$
unless $k^{(i)}$ and $k^{(j)}$ are very similar.
### Normalization
Paper introduces a simple normalization for $\textcolor{lightgreen}{\phi}$,
$$\textcolor{lightgreen}{\phi '(k)} =
\frac{\textcolor{lightgreen}{\phi(k)}}{\sum^{d_{dot}}_{j=1} \textcolor{lightgreen}{\phi(k)_j}}$$
*Check the paper for derivation.*
"""
def __init__(self, nu: int = 1, eps: float = 1e-6):
"""
* `nu` is the hyper-parameter $\nu$.
* `eps` is the small value used to make sure there is no division-by-zero when normalizing.
"""
super().__init__()
self.nu = nu
self.relu = nn.ReLU()
self.eps = eps
def forward(self, k: torch.Tensor):
# Get $\textcolor{lightgreen}{\phi(k)}$
k = self.dpfp(k)
# Normalize by $\sum^{d_{dot}}_{j=1} \textcolor{lightgreen}{\phi(k)_j}$
return k / (torch.sum(k, dim=-1, keepdim=True) + self.eps)
def dpfp(self, k: torch.Tensor):
"""
$$\textcolor{lightgreen}{\phi(k)}$$
"""
# $x = \text{ReLU}\Big(\big[k, -k\big]\Big)$
x = self.relu(torch.cat([k, -k], dim=-1))
# Shift and roll by $i \in \\{1, 2, ..., \nu \\}$,
# to get $$x'_{i,j} = \text{ReLU}\Big(\big[k, -k\big]\Big)_{i+j}$$
x_rolled = [x.roll(shifts=i, dims=-1) for i in range(1, self.nu + 1)]
# Concatenate to get
# $$x'_{2 d_{key} (i - 1) + j} = \text{ReLU}\Big(\big[k, -k\big]\Big)_{i+j}$$
x_rolled = torch.cat(x_rolled, dim=-1)
# Concatenate copies of $x$
x_repeat = torch.cat([x] * self.nu, dim=-1)
# Multiply them,
# $$\textcolor{lightgreen}{\phi_{2 d_{key} (i - 1) + j}(k)}
# = \text{ReLU}\Big(\big[k, -k\big]\Big)_{j}
# \text{ReLU}\Big(\big[k, -k\big]\Big)_{i + j}$$
return x_repeat * x_rolled
class FastWeightsAttention(nn.Module):
"""
## Fast Weights Attention
The paper introduces a new update rule for calculating $\textcolor{cyan}{W^{(i)}}$.
The model first retrieves the current value
$\bar{v}^{(i)}$ paired with the key $k^{(i)}$.
Then stores a combination $v^{(i)}_{new}$
of the retrieved value $\bar{v}^{(i)}$ and the input $v^{(i)}$.
\begin{align}
k^{(i)}, v^{(i)}, q^{(i)} &=
\textcolor{orange}{W_k} x^{(i)}, \textcolor{orange}{W_v} x^{(i)}, \textcolor{orange}{W_q} x^{(i)} \\
\bar{v}^{(i)} &= \textcolor{cyan}{W^{(i-1)}} \textcolor{lightgreen}{\phi'(k^{(i)})} \\
\beta^{(i)} &= \sigma \Big(\textcolor{orange}{W_\beta} x^{(i)} \Big) \\
v^{(i)}_{new} &= \beta^{(i)} v^{(i)} + \Big(1 - \beta^{(i)} \Big) \bar{v}^{(i)} \\
\textcolor{cyan}{W^{(i)}}
&= \textcolor{cyan}{W^{(i-1)}} + v^{(i)}_{new} \otimes \textcolor{lightgreen}{\phi'(k^{(i)})} \\
&= \textcolor{cyan}{W^{(i-1)}} +
\beta^{(i)} \Big( v^{(i)} - \bar{v}^{(i)} \Big ) \otimes \textcolor{lightgreen}{\phi'(k^{(i)})} \\
y^{(i)} &= \textcolor{cyan}{W^{(i)}} \textcolor{lightgreen}{\phi'(q^{(i)})}
\end{align}
where $\textcolor{orange}{W_\beta}$ is a trainable parameter and $\sigma$ is the sigmoid function.
Note that we don't need the normalization term $z$ because $\textcolor{lightgreen}{\phi'}$ is normalized.
"""
def __init__(self, heads: int, d_model: int, dropout_prob: float, phi: DPFP):
super().__init__()
# Number of features per head $d_k$
self.d_k = d_model // heads
# Number of heads
self.heads = heads
# These transform the `query`, `key` and `value` multi-headed attention.
self.query = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=False)
self.key = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=False)
self.value = PrepareForMultiHeadAttention(d_model, heads, self.d_k, bias=False)
# Interpolation weight function $\sigma \Big(\textcolor{orange}{W_\beta} x^{(i)} \Big)$ for each head
self.interpolation_weight = nn.Sequential(
PrepareForMultiHeadAttention(d_model, heads, 1, bias=False),
nn.Sigmoid()
)
# $\textcolor{lightgreen}{\phi'}$
self.phi = phi
# Output layer
self.output = nn.Linear(d_model, d_model)
# Dropout
self.dropout = nn.Dropout(dropout_prob)
def forward(self, x: torch.Tensor):
# Get the number of steps $L$
seq_len = x.shape[0]
# $\textcolor{lightgreen}{\phi'(q^{(i)})}$ for all steps and heads
query = self.phi(self.query(x))
# $\textcolor{lightgreen}{\phi'(k^{(i)})}$ for all steps and heads
key = self.phi(self.key(x))
# $v^{(i)}$ for all steps and heads
value = self.value(x)
# $\beta^{(i)}$ for all steps and heads
beta = self.interpolation_weight(x)
# $\textcolor{cyan}{W^{(0)}}$
weights = key.new_zeros((key.shape[1], key.shape[2], value.shape[3], key.shape[3]))
# List to store outputs $y^{(i)}$
outputs = []
# Iterate through steps
for i in range(seq_len):
# $$\bar{v}^{(i)} = \textcolor{cyan}{W^{(i-1)}} \textcolor{lightgreen}{\phi'(k^{(i)})}$$
value_existing = torch.einsum('bhvk,bhk->bhv', weights, key[i])
# $$\textcolor{cyan}{W^{(i)}}
# = \textcolor{cyan}{W^{(i-1)}} +
# \beta^{(i)} \Big( v^{(i)} - \bar{v}^{(i)} \Big ) \otimes \textcolor{lightgreen}{\phi'(k^{(i)})}$$
weights = weights + torch.einsum('bhv,bhk->bhvk', beta[i] * (value[i] - value_existing), key[i])
# $$y^{(i)} = \textcolor{cyan}{W^{(i)}} \textcolor{lightgreen}{\phi'(q^{(i)})}$$
y = torch.einsum('bhvk,bhk->bhv', weights, query[i])
# Merge multiple heads and append to `outputs`
outputs.append(y.reshape(y.shape[0], -1))
# Stack outputs at each step into a single tensor
x = torch.stack(outputs)
# Output layer
return self.output(x)
class FastWeightsAttentionTransformerLayer(nn.Module):
"""
This is a general transformer layer that combines self attention and feedforward network.
"""
def __init__(self, *,
d_model: int,
attn: FastWeightsAttention,
feed_forward: FeedForward,
dropout_prob: float):
super().__init__()
# Transformer size $d_{model}$
self.size = d_model
# Fast weights attention module
self.attn = attn
# Feed-forward network
self.feed_forward = feed_forward
# Dropout layer
self.dropout = nn.Dropout(dropout_prob)
# Normalization layers
self.norm_self_attn = nn.LayerNorm([d_model])
self.norm_ff = nn.LayerNorm([d_model])
def forward(self, x: torch.Tensor):
# Calculate fast weights self attention
attn = self.attn(x)
# Add the self attention results
x = x + self.dropout(attn)
# Normalize for feed-forward
z = self.norm_ff(x)
# Pass through the feed-forward network
ff = self.feed_forward(z)
# Add the feed-forward results back
x = x + self.dropout(ff)
#
return x
class FastWeightsAttentionTransformer(nn.Module):
"""
This is a general transformer module with multiple transformer layers
"""
def __init__(self, layer: FastWeightsAttentionTransformerLayer, n_layers: int):
super().__init__()
# Make copies of the transformer layer
self.layers = clone_module_list(layer, n_layers)
# Final normalization layer
self.norm = nn.LayerNorm([layer.size])
def forward(self, x: torch.Tensor):
for i, layer in enumerate(self.layers):
# Get layer output
x = layer(x)
# Normalize the output
return self.norm(x)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/compressive/experiment.py | labml_nn/transformers/compressive/experiment.py | """
---
title: Compressive Transformer Experiment
summary: This experiment trains a compressive transformer model on tiny Shakespeare dataset.
---
# Compressive Transformer Experiment
This is an annotated PyTorch experiment to train a compressive transformer model.
"""
from typing import List, Tuple, NamedTuple
import torch
import torch.nn as nn
from labml import experiment, tracker, monit, logger
from labml.configs import option
from labml.logger import Text
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.helpers.metrics import SimpleStateModule
from labml_nn.helpers.trainer import BatchIndex
from labml_nn.transformers.compressive import CompressiveTransformer, AttentionReconstructionLoss, \
CompressiveTransformerLayer, Conv1dCompression
class CompressedMemory(NamedTuple):
mem: List[torch.Tensor]
c_mem: List[torch.Tensor]
class AutoregressiveModel(nn.Module):
"""
## Auto regressive model
"""
def __init__(self, n_vocab: int, d_model: int, transformer: CompressiveTransformer):
super().__init__()
# Token embedding module
self.src_embed = nn.Embedding(n_vocab, d_model)
# Transformer
self.transformer = transformer
# Final layer
self.generator = nn.Linear(d_model, n_vocab)
# Masks
self.mask_x = None
self.mask_mem = None
def forward(self, x: torch.Tensor, mem: CompressedMemory):
# Get memory and compressed memory
if mem is not None:
mem, c_mem = mem.mem, mem.c_mem
else:
mem = []
c_mem = []
# Total length of the memory and compressed memory (for masks)
m_len = len(mem[0]) if mem else 0
if c_mem:
m_len += len(c_mem[0])
# Create a subsequent mask for tokens
if self.mask_x is None or self.mask_x.shape[0] < len(x):
from labml_nn.transformers.utils import subsequent_mask
self.mask_x = subsequent_mask(len(x)).to(x.device)
# Create an all ones (full visibility) mask for memory
if self.mask_mem is None or self.mask_mem.shape[1] < m_len or self.mask_mem.shape[0] < len(x):
self.mask_mem = self.mask_x.new_ones(len(x), m_len, 1)
# Concatenate the masks if there is memory
if m_len:
mask = torch.cat((self.mask_mem[:len(x), :m_len], self.mask_x[:len(x), :len(x)]), dim=1)
# Use only the subsequent mask otherwise
else:
mask = self.mask_x[:len(x), :len(x)]
# Token embeddings
x = self.src_embed(x)
# Run it through the transformer
res, mem = self.transformer(x, mem, c_mem, mask)
# Generate logits of the next token
res = self.generator(res)
#
return res, mem
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
The default configurations can and will be overridden when we start the experiment.
"""
model: AutoregressiveModel
# Token embedding size
d_model: int = 128
# Number of attention heads
heads: int = 4
# Dropout probability
dropout: float = 0.0
# Number of features in FFN hidden layer
d_ff: int = 256
# Number of transformer layers
n_layers: int = 6
# Number of memories to keep
mem_len: int = 8
# State module to maintain memories when switching between training and validation
memory = SimpleStateModule()
# Attention Reconstruction Loss
attention_reconstruction_loss: AttentionReconstructionLoss
# Compression rate
compression_rate: int = 4
# Compressed memory length
c_mem_len: int = 128
def init(self):
# Set tracker configurations
tracker.set_scalar("accuracy.*", True)
tracker.set_scalar("loss.*", True)
# Do not print the attention reconstruction loss in the terminal
tracker.set_scalar("ar_loss.*", False)
# This will keep the accuracy metric stats and memories separate for training and validation.
self.state_modules = [self.accuracy, self.memory]
@torch.no_grad()
def merge_compress_memory(self, mem: CompressedMemory, new_mem: List[torch.Tensor]) \
-> Tuple[CompressedMemory, List[torch.Tensor]]:
"""
Concatenate new memories and compress the oldest memories.
"""
# If the configurations specify not to use memory
if self.mem_len == 0 and self.c_mem_len == 0:
return CompressedMemory([], []), []
# Get memory and compressed memory
if mem is not None:
mem, c_mem = mem.mem, mem.c_mem
else:
mem, c_mem = [], []
# Concatenate new memories with old memory
if mem:
mem = [torch.cat((m, x), dim=0) for m, x in zip(mem, new_mem)]
else:
mem = new_mem
# Compress the oldest memories if there are more memories than `mem_len`
if len(mem[0]) > self.mem_len:
# Calculate the number of compressed memories to make $n_{cm} = \bigg\lceil\frac{n'_m - N_m}{c}\bigg\rceil$,
# where $n'_m$ is the number of memories we have
# and $N_m$ is the maximum number of memories we maintain (`mem_len`).
n_c_mem = (len(mem[0]) - self.mem_len + self.compression_rate - 1) // self.compression_rate
# Number of memories to compress $c n_{cm}$
n_old = n_c_mem * self.compression_rate
# A list to keep memories that need to be compressed for each layer.
mem_to_compress = []
# A list to keep the memories that do not get compressed for each layer.
uncompressed_mem = []
# Iterate through memories of each layer.
for m in mem:
# Split the memories at $c n_{cm}$
cm, m = torch.split(m, [n_old, len(m) - n_old])
# Collect memories to compress
mem_to_compress.append(cm)
# Collect remaining memories
uncompressed_mem.append(m)
# Update the memories
mem = uncompressed_mem
# Compress the memories
new_c_mem = []
for i, layer in enumerate(self.model.transformer.layers):
new_c_mem.append(layer.compress(mem_to_compress[i]))
# Concatenate newly compressed memories with old compressed memories
if c_mem:
c_mem = [torch.cat((m, nm), dim=0) for m, nm in zip(c_mem, new_c_mem)]
# If there are no old compressed memories
else:
c_mem = new_c_mem
# Truncate old memories
if len(c_mem[0]) > self.c_mem_len:
c_mem = [m[-self.c_mem_len:] for m in c_mem]
# No memories are compressed if the number of memories is less than `mem_len`
else:
mem_to_compress = []
# Return memories and the memories that were compressed.
# Memories that were compressed are needed for the reconstruction loss computation.
return CompressedMemory(mem, c_mem), mem_to_compress
def step(self, batch: any, batch_idx: BatchIndex):
"""
### Training/validation step
"""
# Move data to the device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Update global step (number of tokens processed) when in training mode
if self.mode.is_train:
tracker.add_global_step(data.shape[0] * data.shape[1])
# Get memories
mem = self.memory.get()
# Run the model
output, new_mem = self.model(data, mem)
# Merge and compress memory
mem, mem_to_compress = self.merge_compress_memory(mem, new_mem)
# Update memories
self.memory.set(mem)
# Calculate and log cross entropy loss
loss = self.loss_func(output, target)
tracker.add("loss.", loss)
# Calculate attention reconstruction loss if memories were compressed in this step
if mem_to_compress:
# Get attention reconstruction loss
ar_loss = self.attention_reconstruction_loss(new_mem, mem_to_compress)
# Track attention reconstruction loss
tracker.add("ar_loss.", ar_loss)
# Add attention reconstruction loss to loss
loss = loss + ar_loss
# Calculate and log accuracy
self.accuracy(output, target)
self.accuracy.track()
# Train the model
if self.mode.is_train:
# Calculate gradients
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip)
# Take optimizer step
self.optimizer.step()
# Log the model parameters and gradients on last batch of every epoch
if batch_idx.is_last:
tracker.add('model', self.model)
# Clear the gradients
self.optimizer.zero_grad()
# Save the tracked metrics
tracker.save()
def sample(self):
"""
### Sampling function to generate samples periodically while training
"""
# Starting prompt
prompt = self.prompt
# Collect output for printing
log = [(prompt, Text.subtle)]
# memory
mem = CompressedMemory([], [])
# Sample 25 tokens
for i in monit.iterate('Sample', 25):
# Tokenize the prompt
data = self.text.text_to_i(prompt).unsqueeze(-1)
# Move to device
data = data.to(self.device)
# Get the model output
output, new_mem = self.model(data, mem)
# Get the model prediction (greedy)
output = output.argmax(dim=-1).squeeze(1)
# Add the prediction to prompt
prompt += self.prompt_separator + self.text.itos[output[-1]]
# Only feed the last character to model in next iteration, rest will go in as memories
prompt = prompt[-1:]
# Add the prediction for logging
log += [(self.prompt_separator + self.text.itos[output[-1]], Text.value)]
# Update and compress memory
mem, _ = self.merge_compress_memory(mem, new_mem)
# Print the sampled output
logger.log(log)
@option(Configs.model)
def autoregressive_model(c: Configs):
"""
### Initialize the auto-regressive model
"""
from labml_nn.transformers.xl import RelativeMultiHeadAttention
from labml_nn.transformers.feed_forward import FeedForward
m = AutoregressiveModel(c.n_tokens, c.d_model, CompressiveTransformer(
CompressiveTransformerLayer(d_model=c.d_model,
self_attn=RelativeMultiHeadAttention(c.heads, c.d_model, c.dropout),
feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout),
dropout_prob=c.dropout,
compress=Conv1dCompression(c.compression_rate, c.d_model)), c.n_layers))
return m.to(c.device)
@option(Configs.attention_reconstruction_loss)
def attention_reconstruction_loss(c: Configs):
"""
### Initialize the attention reconstruction loss
"""
return AttentionReconstructionLoss(c.model.transformer.layers)
def main():
"""
### Run the experiment
"""
# Create experiment
experiment.create(name="compressive_transformer", comment='')
# Create configs
conf = Configs()
# Load configurations
experiment.configs(conf,
# A dictionary of configurations to override
{'tokenizer': 'character',
'text': 'tiny_shakespeare',
'optimizer.learning_rate': 2.5e-4,
'optimizer.optimizer': 'AdamW',
'prompt': 'It is',
'prompt_separator': '',
'train_loader': 'sequential_train_loader',
'valid_loader': 'sequential_valid_loader',
'seq_len': 8,
'mem_len': 8,
'epochs': 128,
'batch_size': 32,
'inner_iterations': 25,
'compression_rate': 2,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# `TrainValidConfigs.run`
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/compressive/__init__.py | labml_nn/transformers/compressive/__init__.py | """
---
title: Compressive Transformer
summary: >
Documented implementation with explanations of a
Compressive Transformer model.
---
# Compressive Transformer
This is an implementation of
[Compressive Transformers for Long-Range Sequence Modelling](https://arxiv.org/abs/1911.05507)
in [PyTorch](https://pytorch.org).
This is an extension of [Transformer XL](../xl/index.html) where past memories
are compressed to give a longer attention range.
That is, the furthest $n_{cm} c$ memories are compressed into
$n_{cm}$ memories, where $c$ is the compression rate.
## Compression operation
The compression operation is defined as
$f_c: \mathbb{R}^{nc \times d} \rightarrow \mathbb{R}^{n \times d}$.
The paper introduces multiple choices for $f_c$ and we have only implemented
1D convolution which seems to give the best results.
Each layer has a separate compression operation $f_c^{(i)}$ where
$i$ is the layer number.
## Training compression operation
Since training compression with BPTT requires maintaining
a very large computational graph (many time steps), the paper proposes
an *auto-encoding loss* and an *attention reconstruction loss*.
The auto-encoding loss decodes the original memories from the compressed memories
and calculates the loss.
Attention reconstruction loss computes the multi-headed attention results
on the compressed memory and on uncompressed memory and gets a mean squared error
between them.
We have implemented the latter here since it gives better results.
This implementation uses pre-layer normalization
while the paper uses post-layer normalization.
Pre-layer norm does the layer norm before [FFN](../feedforward.html) and
self-attention, and the pass-through in the residual connection is not normalized.
This is supposed to be more stable in standard transformer setups.
Here are [the training code](experiment.html) and a notebook for training a compressive transformer
model on the Tiny Shakespeare dataset.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/transformers/compressive/experiment.ipynb)
"""
from typing import Optional, List
import torch
import torch.nn.functional as F
from torch import nn
from labml_nn.transformers.feed_forward import FeedForward
from labml_nn.transformers.mha import PrepareForMultiHeadAttention
from labml_nn.transformers.xl.relative_mha import RelativeMultiHeadAttention
from labml_nn.utils import clone_module_list
class Conv1dCompression(nn.Module):
"""
## 1D Convolution Compression $f_c$
This is a simple wrapper around
[`nn.Conv1d`](https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html)
with some tensor dimension permutations.
"""
def __init__(self, compression_rate: int, d_model: int):
"""
* `compression_rate` $c$
* `d_model` is the embedding size
"""
super().__init__()
self.conv = nn.Conv1d(d_model, d_model, kernel_size=compression_rate, stride=compression_rate)
def forward(self, mem: torch.Tensor):
"""
`mem` has shape `[seq_len, batch, d_model]`
"""
# Permute the dimensions of `mem` so that we can run it through the convolution layer.
# The convolution layer accepts in the form `[batch, features, sequence]`
mem = mem.permute(1, 2, 0)
# Get compressed memory by running it through the convolution layer
c_mem = self.conv(mem)
# Permute back to form `[seq_len, batch, d_model]`
return c_mem.permute(2, 0, 1)
class CompressiveTransformerLayer(nn.Module):
"""
## Compressive Transformer Layer
This is the implementation of a single compressive transformer layer
"""
def __init__(self, *,
d_model: int,
self_attn: RelativeMultiHeadAttention,
feed_forward: FeedForward,
dropout_prob: float,
compress: Conv1dCompression):
"""
* `d_model` is the token embedding size
* `self_attn` is the [self attention module](../xl/relative_mha.html)
* `feed_forward` is the [feed forward module](../feed_forward.html)
* `dropout_prob` is the probability of dropping out after self attention and FFN
* `compress` is the compression function $f_c$
"""
super().__init__()
self.compress = compress
self.size = d_model
self.self_attn = self_attn
self.feed_forward = feed_forward
self.dropout = nn.Dropout(dropout_prob)
self.norm_self_attn = nn.LayerNorm([d_model])
self.norm_ff = nn.LayerNorm([d_model])
def concat_memory(self, z: torch.Tensor, mem: Optional[torch.Tensor], c_mem: Optional[torch.Tensor]):
"""
Concatenate the normalized token embeddings with memory and compressed memory.
* `z` is layer normalized token embeddings.
* `mem` and `c_mem` are memory and compressed memory (not normalized).
"""
# If there is no memory just return the token embeddings
if mem is None:
return z
# If there are compressed memory concatenate that with memory
if c_mem is not None:
mem = torch.cat((c_mem, mem), dim=0)
# Run the memory through the normalization layer
mem = self.norm_self_attn(mem)
# Concatenate normalized memory and normalized token embeddings
return torch.cat((mem, z), dim=0)
def forward(self, *,
x: torch.Tensor,
mem: Optional[torch.Tensor],
c_mem: Optional[torch.Tensor],
mask: torch.Tensor):
"""
* `x` is a tensor of token level feature vectors of shape `[seq_len, batch_size, d_model]`
* `mem` is a tensor of the past token level feature vectors (memory) of shape `[mem_len, batch_size, d_model]`
* `c_mem` is a tensor of the compressed memory `[c_mem_len, batch_size, d_model]`
* `mask` is a matrix of shape `[seq_len, c_mem_len + mem_len + seq_len, batch_size]` or `[seq_len, c_mem_len + mem_len + seq_len, 1]`.
`mask[i, j]` is true if token at `i` can see token at `j`.
"""
# Normalize the vectors before doing self attention
z = self.norm_self_attn(x)
# Normalize and concatenate memory and compressed memory
m_z = self.concat_memory(z, mem, c_mem)
# Attention
self_attn = self.self_attn(query=z, key=m_z, value=m_z, mask=mask)
# Add the attention results
x = x + self.dropout(self_attn)
# Normalize for feed-forward
z = self.norm_ff(x)
# Pass through the feed-forward network
ff = self.feed_forward(z)
# Add the feed-forward results back
x = x + self.dropout(ff)
#
return x
class CompressiveTransformer(nn.Module):
"""
## Compressive Transformer Model
This consists of multiple compressive transformer layers
"""
def __init__(self, layer: CompressiveTransformerLayer, n_layers: int):
super().__init__()
# Make copies of the transformer layer
self.layers = clone_module_list(layer, n_layers)
# Final normalization layer
self.norm = nn.LayerNorm([layer.size])
def forward(self, x: torch.Tensor, mem: List[torch.Tensor], c_mem: List[torch.Tensor], mask: torch.Tensor):
"""
* `x` is a tensor of the token embeddings vectors of shape `[seq_len, batch_size, d_model]`
* `mem` is a list of tensors of the past token level feature vectors of shape
`[mem_len, batch_size, d_model]` for each layer
* `c_mem` is a list of tensors of the compressed memory
`[c_mem_len, batch_size, d_model]` for each layer
* `mask` is the masking matrix
"""
# List to store token level feature vectors,
# which will become the memories for the next sequential batch.
new_mem = []
# Run through each transformer layer
for i, layer in enumerate(self.layers):
# Add to the list of feature vectors
new_mem.append(x.detach())
# Memory
m = mem[i] if mem else None
# Compressed Memory
cm = c_mem[i] if c_mem else None
# Run through the transformer XL layer
x = layer(x=x, mem=m, c_mem=cm, mask=mask)
# Finally, normalize the vectors
return self.norm(x), new_mem
class AttentionReconstructionLoss:
"""
## Attention Reconstruction Loss
Attention reconstruction loss recreates the self-attention output with
uncompressed memory and with compressed memory and calculates the mean squared error
between the two. It does this without positional encoding.
When calculating and training the compression function $f_c$ with attention
reconstruction loss, all parameters but $f_c$ are frozen.
This includes key/value projections and bias/scaling after normalization.
Since this loss can be computed independently of the cross-entropy-loss of the model
you can have a separate optimizer that only updates $f_c$.
However, we use the same optimizer to update $f_c$ so when calculating
attention reconstruction loss, we detach all other parameters except $f_c$
from the gradient computation.
"""
def __init__(self, layers: nn.ModuleList):
"""
`layers` is the list of Compressive Transformer layers
"""
self.layers = layers
self.loss_func = nn.MSELoss()
def prepare_for_attn(self, pmha: PrepareForMultiHeadAttention, x: torch.Tensor):
"""
This is a reimplementation of ['PrepareForMultiHeadAttention'](../mha.html#PrepareMHA)
where the projections are done with the parameters detached from gradient computation.
* `pmha` is the ['PrepareForMultiHeadAttention'](../mha.html#PrepareMHA) module
* `x` is tensor with the token embeddings
"""
# Shape of the input except embedding dimension; `[seq_len, batch_size]`.
head_shape = x.shape[:-1]
# Detach projection weights and bias
weight = pmha.linear.weight.detach()
bias = pmha.linear.bias.detach() if pmha.linear.bias is not None else None
# Linear transform
x = F.linear(x, weight, bias)
# Split last dimension into heads
x = x.view(*head_shape, pmha.heads, pmha.d_k)
# Output has shape `[seq_len, batch_size, heads, d_k]` or `[batch_size, d_model]`
return x
def attn(self, layer: RelativeMultiHeadAttention, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor):
"""
This is a reimplementation of ['Multi-Head Attention'](../mha.html#MHA) which calls
`prepare_for_attn` instead of ['PrepareForMultiHeadAttention'](../mha.html#PrepareMHA)
to detach projection parameters.
"""
# Calculate query, key and value projections
query = self.prepare_for_attn(layer.query, query)
key = self.prepare_for_attn(layer.key, key)
value = self.prepare_for_attn(layer.value, value)
# Compute attention scores $Q K^\top$.
# This gives a tensor of shape `[seq_len, seq_len, batch_size, heads]`.
scores = torch.einsum('ibhd,jbhd->ijbh', query, key)
# Scale scores $\frac{Q K^\top}{\sqrt{d_k}}$
scores *= layer.scale
# $softmax$ attention along the key sequence dimension
# $\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_k}}\Bigg)$
attn = layer.softmax(scores)
# Multiply by values
# $$\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_k}}\Bigg)V$$
return torch.einsum("ijbh,jbhd->ibhd", attn, value)
def norm(self, ln: nn.LayerNorm, x: torch.Tensor):
"""
Perform layer normalization with shift and scale parameters detached.
"""
# Detach shift(`bias`) and scaling(`weight`) parameters
weight = ln.weight.detach() if ln.weight is not None else None
bias = ln.bias.detach() if ln.bias is not None else None
# Layer normalization
return F.layer_norm(x, ln.normalized_shape, weight, bias, ln.eps)
def calc_loss(self, layer: CompressiveTransformerLayer, h: torch.Tensor, mem: torch.Tensor):
"""
This calculates the loss for a layer
"""
# Detach the token embeddings and memory.
h = h.detach()
mem = mem.detach()
# Compress the memory with $f_c^{(i)}$.
# The parameters of $f_c^{(i)}$ are the only parameters not detached from gradient computation.
c_mem = layer.compress(mem)
# Normalize the embeddings and memories
h = self.norm(layer.norm_self_attn, h)
mem = self.norm(layer.norm_self_attn, mem)
c_mem = self.norm(layer.norm_self_attn, c_mem)
# Calculate the attention with uncompressed memory
attn_mem = self.attn(layer.self_attn, h, mem, mem)
# Calculate the attention with compressed memory
attn_cmem = self.attn(layer.self_attn, h, c_mem, c_mem)
# Calculate the mean square error
return self.loss_func(attn_cmem, attn_mem)
def __call__(self, h: List[torch.Tensor], mem: List[torch.Tensor]):
# Calculate the losses for each layer
losses = [self.calc_loss(layer, h[n], mem[n]) for n, layer in enumerate(self.layers)]
# Sum of the losses
return sum(losses)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/xl/experiment.py | labml_nn/transformers/xl/experiment.py | """
---
title: Transformer XL Experiment
summary: This experiment trains a transformer XL model on tiny Shakespeare dataset.
---
# Transformer XL Experiment
This is an annotated PyTorch experiment to train a transformer xl model.
"""
from typing import List
import torch
import torch.nn as nn
from labml import experiment, tracker, monit, logger
from labml.configs import option
from labml.logger import Text
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.helpers.metrics import SimpleStateModule
from labml_nn.helpers.trainer import BatchIndex
from labml_nn.transformers.xl import TransformerXL, TransformerXLLayer
class AutoregressiveModel(nn.Module):
"""
## Auto regressive model
"""
def __init__(self, n_vocab: int, d_model: int, transformer: TransformerXL):
super().__init__()
# Token embedding module
self.src_embed = nn.Embedding(n_vocab, d_model)
# Transformer
self.transformer = transformer
# Final layer
self.generator = nn.Linear(d_model, n_vocab)
# Masks
self.mask_x = None
self.mask_mem = None
def forward(self, x: torch.Tensor, mem: List[torch.Tensor]):
# Length of the memory
m_len = len(mem[0]) if mem else 0
# Create a subsequent mask for tokens
if self.mask_x is None or self.mask_x.shape[0] < len(x):
from labml_nn.transformers.utils import subsequent_mask
self.mask_x = subsequent_mask(len(x)).to(x.device)
# Create an all ones (full visibility) mask for memory
if self.mask_mem is None or self.mask_mem.shape[1] < m_len or self.mask_mem.shape[0] < len(x):
self.mask_mem = self.mask_x.new_ones(len(x), m_len, 1)
# Concatenate the masks if there is memory
if m_len:
mask = torch.cat((self.mask_mem[:len(x), :m_len], self.mask_x[:len(x), :len(x)]), dim=1)
# Use the subsequent mask otherwise
else:
mask = self.mask_x[:len(x), :len(x)]
# Token embeddings
x = self.src_embed(x)
# Run it through the transformer
res, mem = self.transformer(x, mem, mask)
# Generate logits of the next token
res = self.generator(res)
#
return res, mem
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
The default configs can and will be over-ridden when we start the experiment
"""
model: AutoregressiveModel
# Token embedding size
d_model: int = 128
# Number of attention heads
heads: int = 4
# Dropout probability
dropout: float = 0.0
# Number of features in FFN hidden layer
d_ff: int = 256
# Number of transformer layers
n_layers: int = 6
# Number of memories to keep
mem_len: int = 128
# State module to maintain memories when switching between training and validation
memory = SimpleStateModule()
def init(self):
# Set tracker configurations
tracker.set_scalar("accuracy.*", True)
tracker.set_scalar("loss.*", True)
# This will keep the accuracy metric stats and memories separate for training and validation.
self.state_modules = [self.accuracy, self.memory]
def merge_memory(self, old_mem, new_mem):
"""
Concatenate memories and remove old memories to keep a maximum of
`mem_len` memories.
"""
# If it's configured not to use memory
if self.mem_len == 0:
return []
# Concatenate with old memory
if old_mem:
mem = [torch.cat((m, x), dim=0) for m, x in zip(old_mem, new_mem)]
else:
mem = new_mem
# Truncate old memories
if len(mem[0]) > self.mem_len:
mem = [m[-self.mem_len:] for m in mem]
#
return mem
def step(self, batch: any, batch_idx: BatchIndex):
"""
### Training/validation step
"""
# Move data to the device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Update global step (number of tokens processed) when in training mode
if self.mode.is_train:
tracker.add_global_step(data.shape[0] * data.shape[1])
# Get memories
mem = self.memory.get()
# Run the model
output, new_mem = self.model(data, mem)
# Merge memory
mem = self.merge_memory(mem, new_mem)
# Update memories
self.memory.set(mem)
# Calculate and log cross entropy loss
loss = self.loss_func(output, target)
tracker.add("loss.", loss)
# Calculate and log accuracy
self.accuracy(output, target)
self.accuracy.track()
# Train the model
if self.mode.is_train:
# Calculate gradients
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip)
# Take optimizer step
self.optimizer.step()
# Log the model parameters and gradients on last batch of every epoch
if batch_idx.is_last:
tracker.add('model', self.model)
# Clear the gradients
self.optimizer.zero_grad()
# Save the tracked metrics
tracker.save()
def sample(self):
"""
### Sampling function to generate samples periodically while training
"""
# Starting prompt
prompt = self.prompt
# Collect output for printing
log = [(prompt, Text.subtle)]
# memory
mem = []
# Sample 25 tokens
for i in monit.iterate('Sample', 25):
# Tokenize the prompt
data = self.text.text_to_i(prompt).unsqueeze(-1)
# Move to device
data = data.to(self.device)
# Get the model output
output, new_mem = self.model(data, mem)
# Get the model prediction (greedy)
output = output.argmax(dim=-1).squeeze(1)
# Add the prediction to prompt
prompt += self.prompt_separator + self.text.itos[output[-1]]
# Only feed the last character to model in next iteration, rest will go in as memories
prompt = prompt[-1:]
# Add the prediction for logging
log += [(self.prompt_separator + self.text.itos[output[-1]], Text.value)]
# Update memory
mem = self.merge_memory(mem, new_mem)
# Print the sampled output
logger.log(log)
@option(Configs.model)
def autoregressive_model(c: Configs):
"""
### Initialize the auto-regressive model
"""
from labml_nn.transformers.xl import RelativeMultiHeadAttention
from labml_nn.transformers.feed_forward import FeedForward
m = AutoregressiveModel(c.n_tokens, c.d_model, TransformerXL(
TransformerXLLayer(d_model=c.d_model,
self_attn=RelativeMultiHeadAttention(c.heads, c.d_model, c.dropout),
feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout),
dropout_prob=c.dropout), c.n_layers))
return m.to(c.device)
def main():
"""
### Run the experiment
"""
# Create experiment
experiment.create(name="transformer_xl", comment='')
# Create configs
conf = Configs()
# Load configurations
experiment.configs(conf,
# A dictionary of configurations to override
{'tokenizer': 'character',
'text': 'tiny_shakespeare',
'optimizer.learning_rate': 1.,
'optimizer.optimizer': 'Noam',
'prompt': 'It is',
'prompt_separator': '',
'train_loader': 'sequential_train_loader',
'valid_loader': 'sequential_valid_loader',
'seq_len': 2,
'mem_len': 32,
'epochs': 128,
'batch_size': 32,
'inner_iterations': 25,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# `TrainValidConfigs.run`
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/xl/relative_mha.py | labml_nn/transformers/xl/relative_mha.py | """
---
title: Relative Multi-Headed Attention
summary: >
Documented implementation with explanations of
Relative Multi-Headed Attention from paper Transformer-XL.
---
# Relative Multi-Headed Attention
This is an implementation of relative multi-headed attention from paper
[Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860)
in [PyTorch](https://pytorch.org).
"""
import torch
from torch import nn
from labml.logger import inspect
from labml_nn.transformers.mha import MultiHeadAttention
def shift_right(x: torch.Tensor):
"""
This method shifts $i^{th}$ row of a matrix by $i$ columns.
If the input is `[[1, 2 ,3], [4, 5 ,6], [7, 8, 9]]`, the shifted
result would be `[[1, 2 ,3], [0, 4, 5], [6, 0, 7]]`.
*Ideally we should mask out the lower triangle but it's ok for our purpose*.
"""
# Concatenate a column of zeros
zero_pad = x.new_zeros(x.shape[0], 1, *x.shape[2:])
x_padded = torch.cat([x, zero_pad], dim=1)
# Reshape and remove excess elements from the end
x_padded = x_padded.view(x.shape[1] + 1, x.shape[0], *x.shape[2:])
x = x_padded[:-1].view_as(x)
#
return x
class RelativeMultiHeadAttention(MultiHeadAttention):
"""
## Relative Multi-Head Attention Module
We override [Multi-Head Attention](mha.html) module so we only need to
write the `get_scores` method.
"""
def __init__(self, heads: int, d_model: int, dropout_prob: float = 0.1):
# The linear transformations do not need a bias since we
# explicitly include it when calculating scores.
# However having a bias for `value` might make sense.
super().__init__(heads, d_model, dropout_prob, bias=False)
# Number of relative positions
self.P = 2 ** 12
# Relative positional embeddings for key relative to the query.
# We need $2P$ embeddings because the keys can be before or after the query.
self.key_pos_embeddings = nn.Parameter(torch.zeros((self.P * 2, heads, self.d_k)), requires_grad=True)
# Relative positional embedding bias for key relative to the query.
self.key_pos_bias = nn.Parameter(torch.zeros((self.P * 2, heads)), requires_grad=True)
# Positional embeddings for the query is independent of the position of the query
self.query_pos_bias = nn.Parameter(torch.zeros((heads, self.d_k)), requires_grad=True)
def get_scores(self, query: torch.Tensor, key: torch.Tensor):
r"""
### Get relative attention scores
With absolute attention
\begin{align}
A^{abs}_{j} &= lin_q(X^q_i + P_i)^\top lin_k(X^k_j + P_j) \\
&= \underset{\textcolor{lightgreen}{A}}{Q_i^\top K_j} +
\underset{\textcolor{lightgreen}{B}}{Q_i^\top U^K_j} +
\underset{\textcolor{lightgreen}{C}}{{U^Q_i}^\top K_j} +
\underset{\textcolor{lightgreen}{D}}{{U^Q_i}^\top U^K_j}
\end{align}
where $Q_i, K_j$, are linear transformations of
original embeddings $X^q_i, X^k_j$
and $U^Q_i, U^K_j$ are linear transformations of
absolute positional encodings $P_i, P_j$.
They reason out that the attention to a given key should be the same regardless of
the position of query.
Hence replace $\underset{\textcolor{lightgreen}{C}}{{U^Q_i}^\top K_j}$
with a constant $\underset{\textcolor{lightgreen}{C}}{\textcolor{orange}{v^\top} K_j}$.
For the second and third terms relative positional encodings are introduced.
So $\underset{\textcolor{lightgreen}{B}}{Q_i^\top U^K_j}$ is
replaced with $\underset{\textcolor{lightgreen}{B}}{Q_i^\top \textcolor{orange}{R_{i - j}}}$
and $\underset{\textcolor{lightgreen}{D}}{{U^Q_i}^\top U^K_j}$
with $\underset{\textcolor{lightgreen}{D}}{\textcolor{orange}{S_{i-j}}}$.
\begin{align}
A^{rel}_{i,j} &= \underset{\mathbf{\textcolor{lightgreen}{A}}}{Q_i^\top K_j} +
\underset{\mathbf{\textcolor{lightgreen}{B}}}{Q_i^\top \textcolor{orange}{R_{i - j}}} +
\underset{\mathbf{\textcolor{lightgreen}{C}}}{\textcolor{orange}{v^\top} K_j} +
\underset{\mathbf{\textcolor{lightgreen}{D}}}{\textcolor{orange}{S_{i-j}}}
\end{align}
"""
# $\textcolor{orange}{R_k}$
key_pos_emb = self.key_pos_embeddings[self.P - key.shape[0]:self.P + query.shape[0]]
# $\textcolor{orange}{S_k}$
key_pos_bias = self.key_pos_bias[self.P - key.shape[0]:self.P + query.shape[0]]
# $\textcolor{orange}{v^\top}$
query_pos_bias = self.query_pos_bias[None, None, :, :]
# ${(\textcolor{lightgreen}{\mathbf{A + C}})}_{i,j} =
# Q_i^\top K_j +
# \textcolor{orange}{v^\top} K_j$
ac = torch.einsum('ibhd,jbhd->ijbh', query + query_pos_bias, key)
# $\textcolor{lightgreen}{\mathbf{B'}_{i,k}} = Q_i^\top \textcolor{orange}{R_k}$
b = torch.einsum('ibhd,jhd->ijbh', query, key_pos_emb)
# $\textcolor{lightgreen}{\mathbf{D'}_{i,k}} = \textcolor{orange}{S_k}$
d = key_pos_bias[None, :, None, :]
# Shift the rows of $\textcolor{lightgreen}{\mathbf{(B' + D')}_{i,k}}$
# to get $$\textcolor{lightgreen}{\mathbf{(B + D)}_{i,j} = \mathbf{(B' + D')}_{i,i - j}}$$
bd = shift_right(b + d)
# Remove extra positions
bd = bd[:, -key.shape[0]:]
# Return the sum $$
# \underset{\mathbf{\textcolor{lightgreen}{A}}}{Q_i^\top K_j} +
# \underset{\mathbf{\textcolor{lightgreen}{B}}}{Q_i^\top \textcolor{orange}{R_{i - j}}} +
# \underset{\mathbf{\textcolor{lightgreen}{C}}}{\textcolor{orange}{v^\top} K_j} +
# \underset{\mathbf{\textcolor{lightgreen}{D}}}{\textcolor{orange}{S_{i-j}}}
# $$
return ac + bd
def _test_shift_right():
x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
inspect(x)
inspect(shift_right(x))
x = torch.arange(1, 6)[None, :, None, None].repeat(5, 1, 1, 1)
inspect(x[:, :, 0, 0])
inspect(shift_right(x)[:, :, 0, 0])
x = torch.arange(1, 6)[None, :, None, None].repeat(3, 1, 1, 1)
inspect(x[:, :, 0, 0])
inspect(shift_right(x)[:, :, 0, 0])
if __name__ == '__main__':
_test_shift_right()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/xl/__init__.py | labml_nn/transformers/xl/__init__.py | """
---
title: Transformer XL
summary: >
Documented implementation with explanations of a
Transformer-XL model.
---
# Transformer XL
This is an implementation of
[Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860)
in [PyTorch](https://pytorch.org).
Transformer has a limited attention span,
equal to the length of the sequence trained in parallel.
All these positions have a fixed positional encoding.
Transformer XL increases this attention span by letting
each of the positions pay attention to precalculated past embeddings.
For instance if the context length is $l$, it will keep the embeddings of
all layers for previous batch of length $l$ and feed them to current step.
If we use fixed-positional encodings these pre-calculated embeddings will have
the same positions as the current context.
They introduce relative positional encoding, where the positional encodings
are introduced at the attention calculation.
Annotated implementation of relative multi-headed attention is in [`relative_mha.py`](relative_mha.html).
Here's [the training code](experiment.html) and a notebook for training a transformer XL model on Tiny Shakespeare dataset.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/transformers/xl/experiment.ipynb)
"""
from typing import List, Optional
import torch
import torch.nn as nn
from labml_nn.utils import clone_module_list
from .relative_mha import RelativeMultiHeadAttention
from ..feed_forward import FeedForward
class TransformerXLLayer(nn.Module):
"""
## Transformer XL Layer
The transformer XL model comprises of a number of these layers.
"""
def __init__(self, *,
d_model: int,
self_attn: RelativeMultiHeadAttention,
feed_forward: FeedForward,
dropout_prob: float):
"""
* `d_model` is the token embedding size
* `self_attn` is the [self attention module](relative_mha.html)
* `feed_forward` is the feed forward module
* `dropout_prob` is the probability of dropping out after self attention and FFN
"""
super().__init__()
self.size = d_model
self.self_attn = self_attn
self.feed_forward = feed_forward
self.dropout = nn.Dropout(dropout_prob)
self.norm_self_attn = nn.LayerNorm([d_model])
self.norm_ff = nn.LayerNorm([d_model])
def forward(self, *,
x: torch.Tensor,
mem: Optional[torch.Tensor],
mask: torch.Tensor):
"""
* `x` is a tensor of the token level feature vectors of shape `[seq_len, batch_size, d_model]`
* `mem` is a tensor of the past token level feature vectors of shape `[mem_len, batch_size, d_model]`
* `mask` is a matrix of shape `[seq_len, mem_len + seq_len, batch_size]` or `[seq_len, mem_len + seq_len, 1]`.
`mask[i, j]` is true if token at `i` can see token at `j`.
"""
# Normalize the vectors before doing self attention
z = self.norm_self_attn(x)
# If there is memory
if mem is not None:
# Normalize it
mem = self.norm_self_attn(mem)
# Concatenate with `z`
m_z = torch.cat((mem, z), dim=0)
# Ignore if there is no memory
else:
m_z = z
# Attention
self_attn = self.self_attn(query=z, key=m_z, value=m_z, mask=mask)
# Add the attention results
x = x + self.dropout(self_attn)
# Normalize for feed-forward
z = self.norm_ff(x)
# Pass through the feed-forward network
ff = self.feed_forward(z)
# Add the feed-forward results back
x = x + self.dropout(ff)
#
return x
class TransformerXL(nn.Module):
"""
## Transformer XL Model
This consists of multiple transformer XL layers
"""
def __init__(self, layer: TransformerXLLayer, n_layers: int):
super().__init__()
# Make copies of the transformer layer
self.layers = clone_module_list(layer, n_layers)
# Final normalization layer
self.norm = nn.LayerNorm([layer.size])
def forward(self, x: torch.Tensor, mem: List[torch.Tensor], mask: torch.Tensor):
"""
* `x` is a tensor of the token embeddings vectors of shape `[seq_len, batch_size, d_model]`
* `mem` is a list of tensors of the past token level feature vectors of shape
`[mem_len, batch_size, d_model]` for each layer
* `mask` is the masking matrix
"""
# List to store token level feature vectors,
# which will become the memories for the next sequential batch.
new_mem = []
# Run through each transformer layer
for i, layer in enumerate(self.layers):
# Add to the list of feature vectors
new_mem.append(x.detach())
# Memory
m = mem[i] if mem else None
# Run through the transformer XL layer
x = layer(x=x, mem=m, mask=mask)
# Finally, normalize the vectors
return self.norm(x), new_mem
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.