repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/hour_glass/experiment.py | labml_nn/transformers/hour_glass/experiment.py | """
---
title: Hierarchical Transformers Are More Efficient Language Models Experiment
summary: This experiment trains a hourglass model on Tiny Shakespeare dataset.
---
# [Hierarchical Transformers Are More Efficient Language Models](index.html) Experiment
This is an annotated PyTorch experiment to train a [hourglass](index.html).
This is based on
[training loop and configurations for a simple transformer auto-regressive NLP task](../basic/autoregressive_experiment.html).
"""
import math
from typing import List
import torch
from torch import nn
from labml import experiment
from labml.configs import option
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.transformers.hour_glass import HourGlass
from labml_nn.transformers.positional_encoding import PositionalEncoding
class AutoregressiveTransformer(nn.Module):
"""
## Autoregressive language model
"""
def __init__(self, n_tokens: int, d_model: int, dropout: float, hour_glass: HourGlass):
"""
* `n_tokens` is the vocabulary size
* `d_model` is the size of the token embeddings
* `dropout` is the dropout probability
* `hour_glass` is the [hourglass model](index.html)
"""
super().__init__()
# Token embeddings
self.embedding = nn.Embedding(n_tokens, d_model)
# [Fixed positional embeddings](../positional_encoding.html).
#
# π The
# [official paper implementation](https://github.com/google/trax/blob/master/trax/models/research/hourglass.py)
# use [relative attention](../xl/relative_mha.html)
self.pos_embedding = PositionalEncoding(d_model, dropout)
# [hourglass model](index.html)
self.hour_glass = hour_glass
# To normalize the final embeddings
self.norm = nn.LayerNorm([d_model])
# Embedding size
self.d_model = d_model
# Final linear layer to predict the logits
self.output = nn.Linear(d_model, n_tokens)
def __call__(self, x: torch.Tensor):
"""
* `x` is the tensor with token indexes of shape `[seq_len, batch_size]`
"""
# Get embeddings
x = self.embedding(x)
# Add [positional embeddings](../positional_encoding.html)
if self.pos_embedding is not None:
x = self.pos_embedding(x * math.sqrt(self.d_model))
# Hourglass
x = self.hour_glass(x)
# Get logits
output = self.output(self.norm(x))
# Return the logits
return output, None
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
This inherits from
[training loop and configurations for a simple transformer auto-regressive NLP task](../basic/autoregressive_transformer.html).
"""
# Model
model: AutoregressiveTransformer
# Number of attention heads
n_heads: int = 8
# Dropout probability
dropout: float = 0.1
# Size of feed-forward hidden layer
d_ff: int = 512
# Token embedding size
d_model: int = 256
# Shortening factors
shortening_factors: List[int] = [8, 4]
@option(Configs.model)
def _model(c: Configs):
"""
Create the model
"""
# Create hourglass model
hour_glass = HourGlass(c.n_heads, c.d_model, c.dropout, c.d_ff, c.shortening_factors)
# Create the auto-regressive wrapper
m = AutoregressiveTransformer(c.n_tokens, c.d_model, c.dropout, hour_glass).to(c.device)
#
return m
def main():
# Create experiment
experiment.create(name="hour_glass")
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# Use a context size of $256$
'seq_len': 256,
# Train for $128$ epochs
'epochs': 128,
# Batch size $32$
'batch_size': 32,
# Switch between training and validation for $10$ times
# per epoch
'inner_iterations': 10,
# Use [Noam optimizer](../../optimizers/noam.html)
'optimizer.optimizer': 'Noam',
'optimizer.learning_rate': 1.,
#
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/hour_glass/__init__.py | labml_nn/transformers/hour_glass/__init__.py | """
---
title: Hierarchical Transformers Are More Efficient Language Models
summary: >
This is an annotated implementation/tutorial of hourglass model in PyTorch.
---
# Hierarchical Transformers Are More Efficient Language Models
This is a [PyTorch](https://pytorch.org) implementation of the paper
[Hierarchical Transformers Are More Efficient Language Models](https://arxiv.org/abs/2110.13711).
This paper introduces a hierarchical transformer architecture to handle long sequences
efficiently. The first half of the transformer layers down-sample tokens and the second
half up-samples with direct skip connections between layers of the same resolution.
This is a little similar to [U-Net](../../diffusion/ddpm/unet.html) for vision tasks.
They try different up-sampling and down-sampling techniques and build a model
with the best performing up and down-sampling techniques which they call the
hourglass model.
Here we have implemented the simplest up-sampling and down-sampling techniques for simplicity.
We will consider adding more complex (and better performing) implementations later.
Here is [the training code](experiment.html) for the hourglass model.
"""
from typing import List
import torch
from torch import nn
from labml_nn.transformers import MultiHeadAttention, TransformerLayer
from labml_nn.transformers.feed_forward import FeedForward
from labml_nn.transformers.utils import subsequent_mask
class HourGlass(nn.Module):
"""
## Hourglass model
This model recursively adds layers to the middle while shortening the sequence by down-sampling.
The shortened sequence processed by another hourglass model is sandwiched between two normal transformer
layers. (A transformer layer has a [self-attention layer](../mha.html)
and a [position-wise feed-forward layer](../feed_forward.html)).
"""
def __init__(self, n_heads: int, d_model: int, dropout: float, d_ff: int, shortening_factors: List[int]):
"""
* `n_heads` is the number of heads in [multi-head attention layers](../mha.html)
* `d_model` is the size of the token embeddings
* `dropout` is the dropout probability
* `d_ff` is the dimensionality of the hidden layer in [position-wise feed-forward layers](../feed_forward.html)
* `shortening_factors` is the list of shortening factors
"""
super().__init__()
# The transformer layer before down-sampling
self.pre = TransformerLayer(d_model=d_model,
# [Multi-head attention layer](../mha.html)
self_attn=MultiHeadAttention(n_heads, d_model, dropout),
# [Position wise feed-forward layers](.. / feed_forward.html)
feed_forward=FeedForward(d_model, d_ff, dropout),
#
dropout_prob=dropout)
# Auto-regressive mask
self.mask = AutoregressiveMask()
# The shortening factor $k$ (or the down-sampling rate)
k = shortening_factors[0]
# We shift the tokens to the right by $k - 1$ steps to make sure
# information doesn't leak from the future tokens to past tokens
# as a result of down-sampling and up-sampling
self.shift_right = ShiftRight(k - 1)
# Shortening or the down-sampling layer. We use the simplest form - average pooling.
# The paper shows that attention based down sampling works best, which we haven't implemented yet.
self.shortening = AvgPoolShortening(k)
# If there are no more shortening (middle of the hourglass)
if len(shortening_factors) == 1:
# The center layer is another transformer layer
self.shortened = TransformerLayer(d_model=d_model,
self_attn=MultiHeadAttention(n_heads, d_model, dropout),
feed_forward=FeedForward(d_model, d_ff, dropout),
dropout_prob=dropout)
# Autoregressive mask
self.mask_short = AutoregressiveMask()
self.hour_glass = None
else:
# Insert another hourglass model recursively
self.hour_glass = HourGlass(n_heads, d_model, dropout, d_ff, shortening_factors[1:])
# Up-sampling layer. We use naive up-sampling for simplicity and the paper shows attention based up sampling
# works better.
self.up_sampling = NaiveUpSampling(k)
# The final transformer layer after up-sampling
self.post = TransformerLayer(d_model=d_model,
self_attn=MultiHeadAttention(n_heads, d_model, dropout),
feed_forward=FeedForward(d_model, d_ff, dropout),
dropout_prob=dropout)
def forward(self, x: torch.Tensor):
# Initial transformer layer
# $$x \leftarrow PreVanillaLayers(x)$$
x = self.pre(x=x, mask=self.mask(x))
# Shifting and shortening
# $$x' \leftarrow Shortening(ShiftRight(x,kβ1),k)$$
x_short = self.shortening(self.shift_right(x))
# If we are at the center of the hourglass,
# $$\textbf{\small if } \text{\small E\scriptsize MPTY}(shorten\_factors) \textbf{\small then}$$
if self.hour_glass is None:
# Center transformer layer
# $$x' \leftarrow ShortenedLayers(x')$$
x_short = self.shortened(x=x_short, mask=self.mask_short(x_short))
# $$\textbf{else}$$
else:
# $$x' \leftarrow \text{\small H\scriptsize OURGLASS}(x, shorten\_factors)$$
x_short = self.hour_glass(x_short)
# Up-sample the shortened sequence and add a skip connection
# $$x \leftarrow x + Upsampling(x, x', k)$$
x = x + self.up_sampling(x, x_short)
# Final transformer layer
# $$x \leftarrow PostVanillaLayers(x)$$
x = self.post(x=x, mask=self.mask(x))
#
return x
class ShiftRight(nn.Module):
"""
### Shift right operation
This shifts the sequence to the right by the given number of steps
"""
def __init__(self, shift: int):
"""
* `shift` is the number of steps to shift by
"""
super().__init__()
# cannot be negative
assert shift >= 0
#
self.shift = shift
def forward(self, x: torch.Tensor):
"""
* `x` is a tensor of shape `[seq_len, ...]`
"""
# If the shift is $0$ return the original
if self.shift == 0:
return x
# Zeros to be appended to the left
prefix = x.new_zeros([self.shift, *x.shape[1:]])
# Concatenate the zeros and truncate the right
return torch.cat([prefix, x[:-self.shift]])
class AvgPoolShortening(nn.Module):
"""
### Average pool shortening
This down-samples by a given factor with average pooling
"""
def __init__(self, k: int):
"""
* `k` is the shortening factor
"""
super().__init__()
# Average pooling layer
self.pool = nn.AvgPool1d(k, ceil_mode=True)
def forward(self, x: torch.Tensor):
"""
* `x` is of shape `[seq_len, batch_size, d_model]`
"""
# Pooling layer accepts shape `[batch_size, d_model, seq_len]` so we
# permute axes.
return self.pool(x.permute(1, 2, 0)).permute(2, 0, 1)
class NaiveUpSampling(nn.Module):
"""
### Naive up-sampling
This up-samples by repeating
"""
def __init__(self, k: int):
"""
* `k` is the shortening factor
"""
super().__init__()
self.k = k
def forward(self, x: torch.Tensor, x_short: torch.Tensor):
"""
* `x` is the tensor with embeddings before down-sampling
* `x_short` is the tensor of higher density (to be up-sampled) representations
"""
# Repeat across the sequence dimension
expanded = torch.repeat_interleave(x_short, self.k, dim=0)
# Truncate the extra embeddings at the end
expanded = expanded[:x.shape[0]]
#
return expanded
class AutoregressiveMask(nn.Module):
"""
### Generate auto-regressive mask
"""
def __init__(self):
super().__init__()
self.mask = None
def forward(self, x: torch.Tensor):
# Create a mask if we haven't created or sizes have changed
if self.mask is None or self.mask.size(0) != len(x):
# [Subsequent mask](../utils.html), will mask out tokens from seeing future tokens
self.mask = subsequent_mask(len(x)).to(x.device)
#
return self.mask
class LinearPoolingShortening(nn.Module):
"""
### π§ Linear pooling for down-sampling
This concatenates the consecutive tokens embeddings that need to be merged and do a linear
transformation to map it to the size of a single token embedding.
"""
def __init__(self):
super().__init__()
raise NotImplementedError
class AttentionBasedShortening(nn.Module):
"""
### π§ Down-sampling with attention
\begin{align}
x' &= S(x) + Attention \Big(Q=S(x),K = x, V =x \Big) \\
x' &= x' + FFN(x')
\end{align}
where $S(x)$ is average pooling or linear pooling.
"""
def __init__(self):
super().__init__()
raise NotImplementedError
class LinearUpSampling(nn.Module):
"""
### π§ Linear projection for up-sampling
Make a linear projection of dense token embeddings to a size of $d_{\text{model}} k$.
"""
def __init__(self):
super().__init__()
raise NotImplementedError
class AttentionBasedUpSampling(nn.Module):
"""
### π§ Attention based up-sampling
\begin{align}
x &= U(x,x') + Attention \Big(Q=U(x,x'),K = x', V = x' \Big) \\
x &= x + FFN(x)
\end{align}
where $U(x,x') = x + LinearUpsampling(x')$
"""
def __init__(self):
super().__init__()
raise NotImplementedError
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/flash/__init__.py | labml_nn/transformers/flash/__init__.py | """
---
title: Flash Attention
summary: >
This is a PyTorch/Triton implementation of Flash Attention 2
with explanations.
---
# Flash Attention
Flash attention speeds up transformer attention mechanism by reducing the number of
memory reads/writes between GPU high bandwidth memory (HBM) and GPU on-chip SRAM.
It's introduced in paper
[FlashAttention: Fast and Memory-Efficient Exact Attention with IO-Awareness](https://arxiv.org/abs/2205.14135)
and further optimized in paper
[FlashAttention-2: Faster Attention with Better Parallelism and Work Partitioning](https://arxiv.org/abs/2307.08691).
Official CUDA implementation can be found at [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention).
Our implementation is based on the
[Triton's example implementation](https://triton-lang.org/main/getting-started/tutorials/06-fused-attention.html).
*Note: You can click on the mathematical symbols or identifiers to highlight them*.
You can run [test.py](./test.html) to see correctness and measure performance of this implementation.
## Forward pass
Here's the attention forward pass. The formulas represent a single attention head.
$Q_i$ is query vector (row vector) at position $i$
and $K_j$ and $V_j$ are the key and value row vectors at position $j$.
$O_i$ is the output vector at position $i$.
\begin{align}
S_{ij} &= \sigma Q_i K_j^T
\\
L_i &= \sum_j e^{S_{ij}}
\\
P_{ij} &= \frac{e^{S_{ij}}}{L_i}
\\
O_i &= \sum_j P_{ij} V_j
\\
&= \frac{1}{L_i} \sum_j e^{S_{ij}} V_j
\end{align}
$S_{ij}$ is the attention score matrix before softmax,
$L_i$ is the softmax denominator,
and $P_{ij}$ is the attention matrix after softmax.
#### Flash Attention Optimization
You can compute $O_i$, instead of doing the full softmax,
by computing the sum of exponents $l_i$ and the unnormalized output $\tilde{O}_i$
while iterating over keys:
\begin{align}
S_{ij} &= \sigma Q_i K_j^T
\\
l_i &\leftarrow l_i + e^{S_{ij}}
\\
\tilde{O}_i &\leftarrow \tilde{O}_i + e^{S_{ij}} o_j
\end{align}
Finally you can compute,
$$O_i = \frac{\tilde{O}_i}{l_i}$$
To make it numerically stable flash attention subtracts the current max of $S_{ij}$ before exponentiating.
So it maintains the following while iterating over keys:
* $m_i$, the max $S_{ij}$
* $l_i$, the sum of exponents $\sum_j e^{S_{ij} - m_i}$, and
* $\tilde{O}_i$, the unnormalized output
For each block of keys $j_1 \dots j_2$ it updates them:
\begin{align}
m_i^{\text{new}} &= \max(m_i, \max_{j=j1}^{j2} S_{ij})
\\
\tilde{P}_{ij} &= \exp(S_{ij} - m_i^{\text{new}})
\\
l_i &\leftarrow e^{m_i - m_{i}^{\text{new}}} l_i + \sum_{j=j1}^{j2} \tilde{P}_{ij}
\\
\tilde{O}_i &\leftarrow e^{m_i - m_{i}^{\text{new}}} \tilde{O}_i + \tilde{P}_{ij} * V_j
\\
m_i &\leftarrow m_{i}^{\text{new}}
\end{align}
Then finally,
$$O_i = \frac{\tilde{O}_i}{l_i}$$
This reduces the memory usage since we don't have to compute full $S_{ij}$ matrix or $P_{ij}$ matrix.
It also speeds up since we don't have to load these large matrices.
Instead it only loads blocks of $K$ and $V$ as it iterates over them.
## Backward pass
Here's the standard backward pass. $dO_i$ is the gradient vector on the output $O_i$
\begin{align}
dV_j &= \sum_i P_{ij} dO_i
\\
dP_{ij} &= dO_{i} V_j^T
\\
dS_{ij} &= d\text{softmax}(dP_{ij})
\\
&= \sum_k P_{ik} (\delta_{jk} - P_{ij}) dP_{ik}
\\
&= P_{ij} dP_{ij} - P_{ij} \sum P_{ik} dP_{ik}
\\
dQ_i &= \sigma \sum_j dS_{ij} K_j
\\
dK_j &= \sigma \sum_i dS_{ij} Q_i
\end{align}
where $\delta_{jk}$ is $1$ when $j = k$ and $0$ otherwise.
Flash attention paper introduces $D_i$ to simplify $dS$ computation.
\begin{align}
D_i &= \sum_k P_{ik} dP_{ik}
\\
&= \sum_k P_{ik} dO_i V_k^T
\\
&= dO_i \sum_k P_{ik} V_k^T
\\
&= dO_i O_i^T
\end{align}
Then,
\begin{align}
dS_{ij} = P_{ij} dP_{ij} - D_i P_{ij}
\end{align}
Flash attention saves $L_i$ from the forward pass since it doesn't take much memory.
So during the backward pass it doesn't have to keep computing $l_i$ or $m_i$.
It first computes $D_i$.
Then it iterates over the queries and compute (accumulate) $dK_j$ and $dV_j$.
Finally it iterates over the keys and compute (accumulate) $dQ_i$.
In both forward and backward pass we calculate logarithms and exponentials of $2$ instead of $e$ for performance.
"""
from typing import Any, Tuple
import torch
import triton
import triton.language as tl
HI_PRES_TL: tl.constexpr = tl.float32
HI_PRES_TORCH: torch.dtype = torch.float32
class AttentionFunc(torch.autograd.Function):
@staticmethod
def forward(ctx: Any,
q: torch.Tensor, k: torch.Tensor, v: torch.Tensor,
causal: bool, sm_scale: float) -> torch.Tensor:
"""
### Forward pass
Group query attention forward pass. Returns the output in shape `[batch_size, n_heads, q_seq_len, d_head]`.
:param ctx: is the context for torch gradient descent
:param q: has shape `[batch_size, n_heads, q_seq_len, d_head]`
:param q: has shape `[batch_size, n_heads, q_seq_len, d_head]`
:param k: has shape `[batch_size, k_heads, kv_seq_len, d_head]`
:param v: has shape `[batch_size, k_heads, kv_seq_len, d_head]`
:param causal: whether to apply causal attention mask
:param sm_scale: softmax scale factor $\sigma$
"""
batch_size, n_heads, q_seq_len, d_head = q.shape
_, k_heads, kv_seq_len, _ = k.shape
assert n_heads % k_heads == 0
n_groups = n_heads // k_heads
# Shape constraints
assert d_head == k.shape[-1] == v.shape[-1]
assert d_head in {16, 32, 64, 128, 256}
# Change the tensors combining the heads with the batch dimension
q = q.view(batch_size * k_heads, n_groups, q_seq_len, d_head)
k = k.view(batch_size * k_heads, kv_seq_len, d_head)
v = v.view(batch_size * k_heads, kv_seq_len, d_head)
# Make sure the tensors are contiguous and the strides are same
assert q.is_contiguous()
assert k.is_contiguous()
assert v.is_contiguous()
assert k.stride() == v.stride()
# Tensor for the output
o = torch.empty_like(q)
# Tensor for log of sum of exponentials $\log_2 L_i = \log_2 \sum_j e^{S_{ij}}$
lse = torch.empty((batch_size * k_heads, n_groups, q_seq_len), device=q.device, dtype=HI_PRES_TORCH)
# The forward computation will be parallelized along the batch dimension and the queries in blocks of size `BLOCK_Q`
grid = lambda meta: (triton.cdiv(q_seq_len, meta["BLOCK_Q"]), batch_size * k_heads * n_groups, 1)
_attn_fwd[grid](
q, k, v, sm_scale * 1.4426950408889634, lse, o,
n_groups=n_groups,
q_seq_len=q_seq_len,
kv_seq_len=kv_seq_len,
d_head=d_head,
is_causal=causal,
)
# Save the reshaped inputs and outputs for the backward pass
ctx.save_for_backward(q, k, v, o, lse)
ctx.sm_scale = sm_scale
ctx.n_groups = n_groups
ctx.causal = causal
# Return the output in shape `[batch_size, n_heads, q_seq_len, d_head]`
return o.view(batch_size, n_heads, q_seq_len, d_head)
@staticmethod
def backward(ctx: Any, do: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, None, None]:
"""
### Backward pass
The backward pass computes the gradients of the input tensors.
:param ctx: is the context for torch gradient descent
:param do: is the gradient tensor of the attention output with shape `[batch_size, n_heads, q_seq_len, d_head]`
"""
# Get saved tensors and attributes
n_groups = ctx.n_groups
sm_scale = ctx.sm_scale
causal = ctx.causal
q, k, v, o, lse = ctx.saved_tensors
# Get shapes
batch_size, n_heads, q_seq_len, d_head = do.shape
_, kv_seq_len, _ = k.shape
k_heads = n_heads // n_groups
# Combine the heads with the batch dimension of the output gradients tensor
do = do.view(batch_size * k_heads, n_groups, q_seq_len, d_head)
# Make sure it's contiguous and the strides are the same
assert do.is_contiguous()
assert k.stride() == v.stride()
assert q.stride() == o.stride() == do.stride()
# Create tensors for input gradients
dq = torch.empty_like(q)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
# Precompute $\sigma (\log_2 e) K_j$
k_scaled = k * (sm_scale * 1.4426950408889634)
# $D_i = P^T_{i:}dP_{i:} = do^T_io_i$
pdp = torch.empty_like(lse)
# We use fixed `BLOCK_Q` for backward pass on $D$
# Compute $D_i$
#
# This is parallelized along the batch and query in blocks of size `BLOCK_Q`
BLOCK_Q = 16
pre_grid = (triton.cdiv(q_seq_len, BLOCK_Q), batch_size * k_heads)
_attn_bwd_d[pre_grid](
o, do,
pdp,
BLOCK_Q=16,
d_head=d_head,
q_seq_len=q_seq_len,
n_groups=n_groups,
num_stages=1,
)
# Compute $dK$ and $dV$
#
# This is parallelized along the batch and keys in blocks of size `BLOCK_K`
grid = lambda meta: (triton.cdiv(kv_seq_len, meta['BLOCK_K']), batch_size * k_heads)
_attn_bwd_dkdv[grid](
q, k_scaled, v, sm_scale, do, dk, dv,
lse, pdp,
q_seq_len, kv_seq_len, n_groups, d_head,
is_causal=causal,
)
# Compute $dQ$
#
# This is parallelized along the batch and queries in blocks of size `BLOCK_Q`
grid = lambda meta: (triton.cdiv(q_seq_len, meta['BLOCK_Q']), batch_size * k_heads * n_groups)
_attn_bwd_dq[grid](
q, k_scaled, v, do,
dq,
lse, pdp,
q_seq_len, kv_seq_len, n_groups, d_head,
is_causal=causal,
)
# Split the combined batch and heads
dq = dq.view(batch_size, n_heads, q_seq_len, d_head)
dk = dk.view(batch_size, k_heads, kv_seq_len, d_head)
dv = dv.view(batch_size, k_heads, kv_seq_len, d_head)
#
return dq, dk, dv, None, None
attention = AttentionFunc.apply
def _get_autotune_configs(inner_loop: str) -> list:
"""
#### Configs for auto-tuning
"""
configs = []
# Possible options for `BLOCK_Q`
for bq in [64, 128, 256]:
# Possible options for `BLOCK_K`
for bk in [64, 128, 256]:
# If the inner loop is along keys the `BLOCK_Q` must be a multiple of `BLOCK_K` for causal masking
if inner_loop == 'key' and bq % bk != 0:
continue
# Similarly when the inner loop is along queries
if inner_loop == 'query' and bk % bq != 0:
continue
# Number of stages and warps
for s in [2, 3, 4]:
for w in [4, 8]:
if bq * bk < 128 * 128 and w == 8:
continue
configs.append(triton.Config({'BLOCK_Q': bq, 'BLOCK_K': bk}, num_stages=s, num_warps=w))
# **Use `return configs` to autotune. Trying all combinations is slow for testing.**
return configs[:1]
@triton.autotune(_get_autotune_configs(inner_loop='key'),
key=["q_seq_len", "kv_seq_len", "d_head", "n_groups", "is_causal"])
@triton.jit
def _attn_fwd(t_q, t_k, t_v, sm_scale_log2e, t_lse, t_o,
n_groups: tl.constexpr,
q_seq_len: tl.constexpr,
kv_seq_len: tl.constexpr,
d_head: tl.constexpr,
is_causal: tl.constexpr,
BLOCK_Q: tl.constexpr,
BLOCK_K: tl.constexpr,
):
"""
### Triton kernel for Flash attention forward pass
:param t_q: queries $Q_i$
:param t_k: keys $K_j$
:param t_v: values $V_j$
:param sm_scale_log2e: $\sigma \log_2 e$ softmax scale multiplied by $\log_2 e$
:param t_lse: $\log_2 \sum_j e^{S_{ij}}$ (out)
:param t_o: $O_i$ output
:param n_groups: number of groups in GQA
:param q_seq_len: query sequence length
:param kv_seq_len: key/value sequence length
:param d_head: number of dimensions in a head
:param BLOCK_Q: block size for query sequence length
:param BLOCK_K: block size for key sequence length
:param is_causal: whether causal attention
Strides `z`, `h`, `m` and `d` denote the stride of the corresponding dimensions
(`batch_size`, `n_heads`, `q_seq_len`, `d_head`) in the query.
Stride `n` denote the stride on `kv_seq_len` of key.
"""
# We are computing the attention for $O_i$ for `i` ... `i + BLOCK_Q' in batch/head combination $z$.
i = tl.program_id(0)
z = tl.program_id(1) // n_groups
g = tl.program_id(1) % n_groups
# #### Create block pointers
p_q = tl.make_block_ptr(t_q + z * n_groups * q_seq_len * d_head + g * q_seq_len * d_head,
(q_seq_len, d_head),
(d_head, 1),
(i * BLOCK_Q, 0),
(BLOCK_Q, d_head),
(1, 0))
p_v = tl.make_block_ptr(t_v + z * kv_seq_len * d_head,
(kv_seq_len, d_head),
(d_head, 1),
(0, 0),
(BLOCK_K, d_head),
(1, 0))
p_kT = tl.make_block_ptr(t_k + z * kv_seq_len * d_head,
(d_head, kv_seq_len),
(1, d_head),
(0, 0),
(d_head, BLOCK_K),
(0, 1))
p_o = tl.make_block_ptr(t_o + z * n_groups * q_seq_len * d_head + g * q_seq_len * d_head,
(q_seq_len, d_head),
(d_head, 1),
(i * BLOCK_Q, 0),
(BLOCK_Q, d_head),
(1, 0))
p_lse = tl.make_block_ptr(t_lse + z * n_groups * q_seq_len + g * q_seq_len,
(q_seq_len,),
(1,),
(i * BLOCK_Q,),
(BLOCK_Q,),
(0,))
# Initialize offsets
offs_i = i * BLOCK_Q + tl.arange(0, BLOCK_Q)
offs_j = tl.arange(0, BLOCK_K)
# Mask for $Q$ for the last block
i_mask = offs_i < q_seq_len
# Initialize $m_i$ and $l_i$. $m_i$ is initialized to $-\inf$ and $l_i$ to $1$. So in the first update,
# the effect of initial $l_i$ is $e^{m_i - m_{i}^{\text{new}}} l_i = 0$.
#
# `b_m` will be storing $m_i \log_2 e$
b_m = tl.where(i_mask, -float("inf"), 0.0)
b_l = tl.where(i_mask, 1.0, 0.0)
# $O_i$
b_o = tl.zeros([BLOCK_Q, d_head], dtype=HI_PRES_TL)
# Load $Q_i$ outside the loop since it will be reused through out the loop over $K_j$.
b_q = tl.load(p_q, boundary_check=(0,), padding_option="zero")
if is_causal:
# Inner loop upto the diagonal block
b_o, b_l, b_m = _attn_fwd_inner(b_o, b_l, b_m, b_q,
p_kT, p_v,
sm_scale_log2e,
BLOCK_Q, d_head, BLOCK_K,
offs_i, offs_j,
j=tl.full([], 0, tl.int32), # type: ignore
steps=(i * BLOCK_Q) // BLOCK_K,
MASK=False,
q_seq_len=q_seq_len,
kv_seq_len=kv_seq_len
)
# Diagonal block with masking within it
b_o, b_l, b_m = _attn_fwd_inner(b_o, b_l, b_m, b_q, p_kT, p_v,
sm_scale_log2e,
BLOCK_Q, d_head, BLOCK_K,
offs_i, offs_j,
j=i * BLOCK_Q,
steps=BLOCK_Q // BLOCK_K,
MASK=True,
q_seq_len=q_seq_len,
kv_seq_len=kv_seq_len
)
else:
# Iterate through all $K_j$
b_o, b_l, b_m = _attn_fwd_inner(b_o, b_l, b_m, b_q, p_kT, p_v,
sm_scale_log2e,
BLOCK_Q, d_head, BLOCK_K,
offs_i, offs_j,
j=tl.full([], 0, tl.int32), # type: ignore
steps=tl.cdiv(kv_seq_len, BLOCK_K),
MASK=False,
q_seq_len=q_seq_len,
kv_seq_len=kv_seq_len
)
# Store LSE $\log_2 L_i = \log_2 \big( l_i * e^{m_i} \big) = \log_2 l_i + m_i log 2$
tl.store(p_lse, b_m + tl.math.log2(b_l), boundary_check=(0,))
# Store $O_i = \frac{\tilde{O}_i}{l_i}$
tl.store(p_o, (b_o / b_l[:, None]).to(t_o.type.element_ty), boundary_check=(0,))
@triton.jit
def _attn_fwd_inner(b_o, b_l, b_m, b_q,
p_kT, p_v,
sm_scale_log2e,
BLOCK_Q: tl.constexpr,
d_head: tl.constexpr,
BLOCK_K: tl.constexpr,
offs_i, offs_j,
j,
steps,
MASK: tl.constexpr,
q_seq_len: tl.constexpr,
kv_seq_len: tl.constexpr
):
"""
#### Inner loop to calculate $O_i$
This iterates through keys and values starting from `j` for `steps` number of steps.
In each step it processes `BLOCK_K` entries of keys/values.
"""
tl.static_assert(BLOCK_Q % BLOCK_K == 0)
# Move $K_j$ and $V_j$ pointers
p_kT = tl.advance(p_kT, (0, j))
p_v = tl.advance(p_v, (j, 0))
# Iterate over $K$, $V$ and update $\tilde{O}_i$ and $l_i$
for _ in range(steps):
# Load $K_j^T$
b_kT = tl.load(p_kT, boundary_check=(1,), padding_option="zero")
# Compute $(\log 2) S_ij = (\log 2) \sigma Q_i K_j^T$
b_s = tl.dot(b_q, b_kT, out_dtype=HI_PRES_TL)
b_s = b_s * sm_scale_log2e
# Apply causal mask
if MASK:
causal_mask = offs_i[:, None] >= (j + offs_j[None, :])
b_s = tl.where(causal_mask, b_s, -float("inf"))
# Mask out if the block is beyond the end of $K_j$
j_mask = (j + offs_j) < kv_seq_len
b_s = tl.where(j_mask[None, :], b_s, -float("inf"))
# $(\log_2 e) m_{i}^{\text{new}} = \max((\log_2 e) m_i, \max_{j=j1}^{j2} (\log_2 e) S_{ij})$
b_m_new = tl.maximum(b_m, tl.max(b_s, -1))
# \begin{align}
# \tilde{P}_{ij} &= e^{(S_{ij} - m_i^{\text{new}}}
# \\
# &= 2^{(\log_2 e) S_{ij} - (\log_2 e) m_i^{\text{new}}}
# \end{align}
b_p = tl.math.exp2(b_s - b_m_new[:, None])
# $\sum_{j=j1}^{j2} \tilde{P}_{ij}$
b_l_new = tl.sum(b_p, -1)
# $e^{m_i - m_{i}^{\text{new}}}$
b_m_m_new = tl.math.exp2(b_m - b_m_new)
# $l_i \leftarrow e^{m_i - m_{i}^{\text{new}}} l_i + \sum_{j=j1}^{j2} \tilde{P}_{ij}$
b_l = b_l * b_m_m_new + b_l_new
# $O_i \leftarrow e^{m_i - m_{i}^{\text{new}}} O_i + \tilde{P}_{ij} V_j$
b_o = b_o * b_m_m_new[:, None]
b_p = b_p.to(b_q.dtype) # TODO
b_v = tl.load(p_v, boundary_check=(0,), padding_option="zero")
b_o += tl.dot(b_p, b_v, out_dtype=HI_PRES_TL)
# $(\log_2 e) m_i \leftarrow (\log_2 e) m_{i}^{\text{new}}$
b_m = b_m_new
# Move pointers
j += BLOCK_K
p_v = tl.advance(p_v, (BLOCK_K, 0))
p_kT = tl.advance(p_kT, (0, BLOCK_K))
tl.static_assert(b_o.dtype == HI_PRES_TL, "attn_fwd_inner requires accumulator to be in HI_PRES_TL precision")
return b_o, b_l, b_m
@triton.jit
def _attn_bwd_d(t_o, t_do,
t_pdp,
BLOCK_Q: tl.constexpr, d_head: tl.constexpr,
q_seq_len: tl.constexpr,
n_groups: tl.constexpr,
):
"""
#### Triton kernel to compute $D_i$
"""
i = tl.program_id(0) * BLOCK_Q
z = tl.program_id(1)
# Create block pointers
p_o = tl.make_block_ptr(t_o + z * n_groups * q_seq_len * d_head,
(n_groups, q_seq_len, d_head),
(q_seq_len * d_head, d_head, 1),
(0, i, 0),
(n_groups, BLOCK_Q, d_head),
(2, 1, 0))
p_do = tl.make_block_ptr(t_do + z * n_groups * q_seq_len * d_head,
(n_groups, q_seq_len, d_head),
(q_seq_len * d_head, d_head, 1),
(0, i, 0),
(n_groups, BLOCK_Q, d_head),
(2, 1, 0))
p_pdp = tl.make_block_ptr(t_pdp + z * n_groups * q_seq_len,
(n_groups, q_seq_len),
(q_seq_len, 1),
(0, i),
(n_groups, BLOCK_Q),
(1, 0))
# Load $O_i$
o = tl.load(p_o, boundary_check=(1,), padding_option="zero")
# Load $dO_i$
do = tl.load(p_do, boundary_check=(1,), padding_option="zero").to(HI_PRES_TL)
# Calculate $D_i = dO_i O_i^T$
d = tl.sum(o * do, axis=-1)
# Save $D_i$
tl.store(p_pdp, d, boundary_check=(1,))
@triton.autotune(_get_autotune_configs(inner_loop='query'),
key=["q_seq_len", "kv_seq_len", "d_head", "n_groups", "is_causal"])
@triton.jit
def _attn_bwd_dkdv(t_q, t_k, t_v, sm_scale,
t_do,
t_dk, t_dv,
t_lse, t_pdp,
q_seq_len: tl.constexpr, kv_seq_len: tl.constexpr,
n_groups: tl.constexpr, d_head: tl.constexpr,
is_causal: tl.constexpr,
BLOCK_Q: tl.constexpr,
BLOCK_K: tl.constexpr,
):
"""
#### Triton kernel to compute $dK_j$ and $dV_j$
"""
# Compute $dK_j$ and $dV_j$ for `j` ... `j + BLOCK_K` by iterating over $Q_i$
j = tl.program_id(0) * BLOCK_K
z = tl.program_id(1)
# Create block pointers
p_k = tl.make_block_ptr(t_k + z * kv_seq_len * d_head,
(kv_seq_len, d_head),
(d_head, 1),
(j, 0),
(BLOCK_K, d_head),
(1, 0))
p_v = tl.make_block_ptr(t_v + z * kv_seq_len * d_head,
(kv_seq_len, d_head),
(d_head, 1),
(j, 0),
(BLOCK_K, d_head),
(1, 0))
p_dk = tl.make_block_ptr(t_dk + z * kv_seq_len * d_head,
(kv_seq_len, d_head),
(d_head, 1),
(j, 0),
(BLOCK_K, d_head),
(1, 0))
p_dv = tl.make_block_ptr(t_dv + z * kv_seq_len * d_head,
(kv_seq_len, d_head),
(d_head, 1),
(j, 0),
(BLOCK_K, d_head),
(1, 0))
# Initialize $\frac{1}{\sigma} dK$ and $dV$
b_dk = tl.zeros([BLOCK_K, d_head], dtype=HI_PRES_TL)
b_dv = tl.zeros([BLOCK_K, d_head], dtype=HI_PRES_TL)
# Load $\frac{\sigma}{\log 2} K$ and $V$ outside the loop.
b_k = tl.load(p_k, boundary_check=(0,), padding_option="zero")
b_v = tl.load(p_v, boundary_check=(0,), padding_option="zero")
# Iterate through queries in GQA
for g in range(n_groups):
# Create block pointers
p_qT = tl.make_block_ptr(t_q + z * n_groups * q_seq_len * d_head + g * q_seq_len * d_head,
(d_head, q_seq_len),
(1, d_head),
(0, 0),
(d_head, BLOCK_Q),
(0, 1))
p_do = tl.make_block_ptr(t_do + z * n_groups * q_seq_len * d_head + g * q_seq_len * d_head,
(q_seq_len, d_head),
(d_head, 1),
(0, 0),
(BLOCK_Q, d_head),
(1, 0))
p_lse = tl.make_block_ptr(t_lse + z * n_groups * q_seq_len + g * q_seq_len,
(q_seq_len,),
(1,),
(0,),
(BLOCK_Q,),
(0,))
p_pdp = tl.make_block_ptr(t_pdp + z * n_groups * q_seq_len + g * q_seq_len,
(q_seq_len,),
(1,),
(0,),
(BLOCK_Q,),
(0,))
if is_causal:
# Inner loop at the diagonal block
b_dk, b_dv = _attn_bwd_dkdv_inner(
b_dk, b_dv,
p_qT, b_k, b_v, p_do,
p_lse, p_pdp,
BLOCK_Q, BLOCK_K,
d_head,
j=j, i=j,
steps=BLOCK_K // BLOCK_Q,
MASK=True,
q_seq_len=q_seq_len,
kv_seq_len=kv_seq_len,
)
# Inner loop on queries after the diagonal
b_dk, b_dv = _attn_bwd_dkdv_inner(
b_dk, b_dv,
p_qT, b_k, b_v, p_do,
p_lse, p_pdp,
BLOCK_Q, BLOCK_K,
d_head,
j=j, i=j + BLOCK_K,
steps=tl.cdiv((q_seq_len - (j + BLOCK_K)), BLOCK_Q),
MASK=False,
q_seq_len=q_seq_len,
kv_seq_len=kv_seq_len
)
else:
# Iterate through all queries
b_dk, b_dv = _attn_bwd_dkdv_inner(
b_dk, b_dv,
p_qT, b_k, b_v, p_do,
p_lse, p_pdp,
BLOCK_Q, BLOCK_K,
d_head,
j=j, i=tl.full([], 0, tl.int32),
steps=tl.cdiv(q_seq_len, BLOCK_Q),
MASK=False,
q_seq_len=q_seq_len,
kv_seq_len=kv_seq_len
)
# Save $dV$
tl.store(p_dv, b_dv.to(t_dv.type.element_ty), boundary_check=(0,))
# `b_dk` had $\frac{1}{\sigma} dK$
b_dk *= sm_scale
# Save $dK$
tl.store(p_dk, b_dk.to(t_dk.type.element_ty), boundary_check=(0,))
@triton.jit
def _attn_bwd_dkdv_inner(b_dk, b_dv,
p_qT, b_k, b_v, p_do,
p_lse, p_pdp,
BLOCK_Q: tl.constexpr, BLOCK_K: tl.constexpr,
d_head: tl.constexpr,
j, i, steps,
MASK: tl.constexpr,
q_seq_len: tl.constexpr,
kv_seq_len: tl.constexpr):
"""
#### Inner loop to calculate $dK_j$, $dV_j$
"""
# To apply the mask
tl.static_assert(BLOCK_K % BLOCK_Q == 0)
# Offsets and mask
offs_i = i + tl.arange(0, BLOCK_Q)
offs_j = j + tl.arange(0, BLOCK_K)
# Move the pointers
p_qT = tl.advance(p_qT, (0, i))
p_do = tl.advance(p_do, (i, 0))
p_lse = tl.advance(p_lse, (i,))
p_pdp = tl.advance(p_pdp, (i,))
# Iterate over $Q$
for _ in range(steps):
# Load $Q_i^T$
b_qT = tl.load(p_qT, boundary_check=(1,), padding_option="zero")
# $log_2 L_i$
b_l = tl.load(p_lse, boundary_check=(0,), padding_option="zero")
# $(\log_2 e) S_{ij}^T = \sigma (\log_2 e) K_j Q_i^T$
b_sT = tl.dot(b_k, b_qT, out_dtype=HI_PRES_TL)
# \begin{align}
# P_{ij} &= \frac{e^{S_{ij}}}{L_i}
# \\
# &= \frac{2^{(log_2 e) S_{ij}}}{2^{\log_2 L_i}}
# \\
# &= 2^{(log_2 e) S_{ij} - \log_2 L_i}
# \end{align}
b_pT = tl.math.exp2(b_sT - b_l[None, :])
# Autoregressive masking
if MASK:
mask = (offs_i[None, :] >= offs_j[:, None])
b_pT = tl.where(mask, b_pT, 0.0)
# Mask out if the block is beyond the end of $Q_i$
#
# Note: No need to mask out based on $j$
# because the effects on positions outside boundary will not get stored in $dK$ or $dV$
# Masking by $i$ may also not be necessary size the tensors have 0 on loading
i_mask = offs_i < q_seq_len
b_pT = tl.where(i_mask[None, :], b_pT, 0.0)
# $dV_j = \sum_i P_{ij} dO_i$
b_do = tl.load(p_do, boundary_check=(0,), padding_option="zero")
b_dv += tl.dot(b_pT.to(b_do.dtype), b_do, out_dtype=HI_PRES_TL)
# $D_i$
b_pdp = tl.load(p_pdp, boundary_check=(0,), padding_option="zero")
# $dP_{ij} = V_j dO_i^T$
b_dpT = tl.dot(b_v, tl.trans(b_do), out_dtype=HI_PRES_TL).to(HI_PRES_TL)
# $dS_{ij} = P_{ij} \big( dP_{ij} - D_i \big)$
b_dsT = b_pT * (b_dpT - b_pdp[None, :])
# $\frac{1}{\sigma} dK_j = \sum_i dS_{ij} Q_i$
b_dk += tl.dot(b_dsT.to(b_qT.dtype), tl.trans(b_qT), out_dtype=HI_PRES_TL)
# Increment pointers.
offs_i += BLOCK_Q
p_lse = tl.advance(p_lse, (BLOCK_Q,))
p_pdp = tl.advance(p_pdp, (BLOCK_Q,))
p_qT = tl.advance(p_qT, (0, BLOCK_Q))
p_do = tl.advance(p_do, (BLOCK_Q, 0))
# Return accumulated $dK$ and $dV$
return b_dk, b_dv
@triton.autotune(_get_autotune_configs(inner_loop='key'),
key=["q_seq_len", "kv_seq_len", "d_head", "n_groups", "is_causal"])
@triton.jit
def _attn_bwd_dq(t_q, t_k, t_v, t_do,
t_dq,
t_lse, t_pdp,
q_seq_len: tl.constexpr, kv_seq_len: tl.constexpr,
n_groups: tl.constexpr, d_head: tl.constexpr,
is_causal: tl.constexpr,
BLOCK_Q: tl.constexpr,
BLOCK_K: tl.constexpr,
):
"""
#### Triton kernel to compute $dQ_i$
"""
i = tl.program_id(0) * BLOCK_Q
z = tl.program_id(1) // n_groups
g = tl.program_id(1) % n_groups # TODO
# Create block pointers
p_q = tl.make_block_ptr(t_q + z * n_groups * q_seq_len * d_head + g * q_seq_len * d_head,
(q_seq_len, d_head),
(d_head, 1),
(i, 0),
(BLOCK_Q, d_head),
(1, 0))
p_dq = tl.make_block_ptr(t_dq + z * n_groups * q_seq_len * d_head + g * q_seq_len * d_head,
(q_seq_len, d_head),
(d_head, 1),
(i, 0),
(BLOCK_Q, d_head),
(1, 0))
p_do = tl.make_block_ptr(t_do + z * n_groups * q_seq_len * d_head + g * q_seq_len * d_head,
(q_seq_len, d_head),
(d_head, 1),
(i, 0),
(BLOCK_Q, d_head),
(1, 0))
p_kT = tl.make_block_ptr(t_k + z * kv_seq_len * d_head,
(d_head, kv_seq_len),
(1, d_head),
(0, 0),
(d_head, BLOCK_K),
(0, 1))
p_vT = tl.make_block_ptr(t_v + z * kv_seq_len * d_head,
(d_head, kv_seq_len),
(1, d_head),
(0, 0),
(d_head, BLOCK_K),
(0, 1))
p_lse = tl.make_block_ptr(t_lse + z * n_groups * q_seq_len + g * q_seq_len,
(q_seq_len,),
(1,),
(i,),
(BLOCK_Q,),
(0,))
p_pdp = tl.make_block_ptr(t_pdp + z * n_groups * q_seq_len + g * q_seq_len,
(q_seq_len,),
(1,),
(i,),
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | true |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/flash/test.py | labml_nn/transformers/flash/test.py | """
### Test Flash Attention Implementation
This is the code to test and measure performance of our flash attention implementation
"""
import torch
import triton
from labml import logger, monit
from labml_nn.transformers.flash import attention
HI_PRES_TORCH = torch.float32
@torch.no_grad()
def _calc_abs_rel_error(a: torch.Tensor, b: torch.Tensor, atol=1e-2):
"""
#### Calculate absolute and relative error for reporting
"""
d = (a - b).abs()
max_abs = d.max()
d = (d - atol).clamp(min=0)
d = d / b.abs()
max_rel = d.max()
return max_abs.cpu().item(), max_rel.cpu().item()
def test_fwd_bwd(batch_size, n_heads, k_heads, q_seq_len, kv_seq_len, d_head, causal, dtype, device):
"""
#### Compare our implementation with naive PyTorch attention
"""
with monit.section(f'Init {q_seq_len} {kv_seq_len} {d_head}'):
torch.manual_seed(20)
q = (torch.empty((batch_size, n_heads, q_seq_len, d_head),
dtype=dtype, device=device).normal_(mean=0.0, std=0.5).requires_grad_())
k = (torch.empty((batch_size, k_heads, kv_seq_len, d_head),
dtype=dtype, device=device).normal_(mean=0.0, std=0.5).requires_grad_())
v = (torch.empty((batch_size, k_heads, kv_seq_len, d_head),
dtype=dtype, device=device).normal_(mean=0.0, std=0.5).requires_grad_())
sm_scale = d_head ** -0.5
d_out = torch.randn_like(q)
# reference implementation
mask = torch.tril(torch.ones((q_seq_len, kv_seq_len), device=device, dtype=torch.bool))
torch.cuda.synchronize()
with monit.section('Pytorch'):
p = torch.matmul(q.view(batch_size, k_heads, -1, q_seq_len, d_head),
k.transpose(2, 3)[:, :, None, :, :]) * sm_scale
if causal:
p[:, :, :, ~mask] = float("-inf")
p = torch.softmax(p.to(HI_PRES_TORCH), dim=-1).to(dtype)
ref_out = torch.matmul(p, v[:, :, None, :, :])
ref_out = ref_out.view(q.shape)
ref_out.backward(d_out)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
torch.cuda.synchronize()
with monit.section('Triton'):
assert q.dtype == dtype
tri_out = attention(q, k, v, causal, sm_scale).to(dtype)
monit.progress(0.5)
tri_out.backward(d_out)
monit.progress(0.9)
tri_dv, v.grad = v.grad.clone(), None # type: ignore
tri_dk, k.grad = k.grad.clone(), None # type: ignore
tri_dq, q.grad = q.grad.clone(), None # type: ignore
torch.cuda.synchronize()
with monit.section('Test') as s:
# compare
passed = True
if not torch.allclose(tri_out, ref_out, atol=1e-2, rtol=0.):
abs_err, rel_err = _calc_abs_rel_error(ref_out, tri_out)
logger.log(('[FAILED]', logger.Text.danger), f' Out mismatch {abs_err} {rel_err}')
passed = False
rtol = 1e-1
if not torch.allclose(tri_dq, ref_dq, atol=1e-2, rtol=rtol):
abs_err, rel_err = _calc_abs_rel_error(ref_dq, tri_dq)
logger.log(('[FAILED]', logger.Text.danger), f' dQ mismatch {abs_err} {rel_err}')
passed = False
if not torch.allclose(tri_dv, ref_dv, atol=1e-2, rtol=rtol):
abs_err, rel_err = _calc_abs_rel_error(ref_dv, tri_dv)
logger.log(('[FAILED]', logger.Text.danger), f' dV mismatch {abs_err} {rel_err}')
passed = False
if not torch.allclose(tri_dk, ref_dk, atol=1e-2, rtol=rtol):
abs_err, rel_err = _calc_abs_rel_error(ref_dk, tri_dk)
logger.log(('[FAILED]', logger.Text.danger), f' dK mismatch {abs_err} {rel_err}')
passed = False
if passed:
logger.log('[PASSED]', logger.Text.success)
s.success = True
else:
s.success = False
torch.cuda.synchronize()
def _perf_triton_fn(*, device, dtype, batch_size, k_heads, n_groups, seq_len, d_head, causal):
"""
Get a partial function to test performance of our implementation
"""
q = torch.randn((batch_size, k_heads * n_groups, seq_len, d_head), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((batch_size, k_heads, seq_len, d_head), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((batch_size, k_heads, seq_len, d_head), dtype=dtype, device=device, requires_grad=True)
sm_scale = d_head ** -0.5
return lambda: attention(q, k, v, causal, sm_scale)
def _perf_flash(*, batch_size, k_heads, n_groups, seq_len, d_head, causal, device, dtype):
"""
Get a partial function to test performance of original flash implementation
"""
q = torch.randn((batch_size, seq_len, k_heads * n_groups, d_head), dtype=dtype, device=device, requires_grad=True)
k = torch.randn((batch_size, seq_len, k_heads, d_head), dtype=dtype, device=device, requires_grad=True)
v = torch.randn((batch_size, seq_len, k_heads, d_head), dtype=dtype, device=device, requires_grad=True)
from flash_attn import flash_attn_func
return lambda: flash_attn_func(q, k, v, causal=causal)
def measure_performance(name, fn, *, batch_size, k_heads, n_groups, seq_len, d_head, causal, is_bwd: bool):
"""
### Measure the speed
"""
if is_bwd:
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn)
flops_per_matmul = 2.0 * batch_size * k_heads * n_groups * seq_len * seq_len * d_head
total_flops = 2 * flops_per_matmul
if causal:
total_flops *= 0.5
if is_bwd:
total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
tf_ps = total_flops * 1e-12 / (ms * 1e-3)
logger.log((f'{name}', logger.Text.key), ': ', f'{ms :,.1f}ms', ' ', f'{tf_ps :,.2f}TFps')
def main():
device = torch.device('cuda:0')
torch.cuda.set_device(device)
dtype = torch.float16
# only works on post-Ampere GPUs right now
test_fwd_bwd(1, 4, 1, 2048, 2048, 128, True, dtype=dtype, device=device)
test_fwd_bwd(16, 32, 8, 2001, 4001, 128, False, dtype=dtype, device=device)
test_fwd_bwd(4, 32, 8, 2048, 1024, 128, False, dtype=dtype, device=device)
test_fwd_bwd(4, 32, 8, 2001, 4001, 128, True, dtype=dtype, device=device)
_conf = {
'batch_size': 16,
'k_heads': 8,
'n_groups': 4,
'seq_len': 2048,
'd_head': 128,
}
for _causal in [False, True]:
for is_bwd in [False, True]:
logger.log(f'{"Causal" if _causal else "Non-causal"} {" Backward" if is_bwd else ""}', logger.Text.title)
measure_performance(f'flash', _perf_flash(causal=_causal, device=device, dtype=dtype, **_conf),
is_bwd=is_bwd,
causal=_causal, **_conf)
measure_performance(f'triton', _perf_triton_fn(causal=_causal, device=device, dtype=dtype, **_conf),
is_bwd=is_bwd,
causal=_causal, **_conf)
if __name__ == "__main__":
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/mlp_mixer/experiment.py | labml_nn/transformers/mlp_mixer/experiment.py | """
---
title: MLP Mixer experiment
summary: This experiment trains MLP Mixer on Tiny Shakespeare dataset.
---
# [MLP Mixer](index.html) Experiment
This is an annotated PyTorch experiment to train a [MLP Mixer Model](index.html).
"""
from labml import experiment
from labml.configs import option
from labml_nn.transformers import TransformerConfigs
from labml_nn.transformers.configs import FeedForwardConfigs
from labml_nn.transformers.mlm.experiment import TransformerMLM, Configs as MLMConfigs
class Configs(MLMConfigs):
"""
## Configurations
This inherits from
[`MLMConfigs`](../mlm/experiment.html) where we define an experiment for
[Masked Language Models](../mlm.index.html).
"""
# Configurable [Feed-Forward Network](../feed_forward.html) for the MLP
mix_mlp: FeedForwardConfigs
@option(Configs.mix_mlp)
def _mix_mlp_configs(c: Configs):
"""
The mixing MLP configurations
"""
conf = FeedForwardConfigs()
# Size of the MLP is the sequence length, because it is applied across tokens
conf.d_model = c.seq_len
# The paper suggests $GELU$ activation
conf.activation = 'GELU'
#
return conf
@option(Configs.transformer)
def _transformer_configs(c: Configs):
"""
### Transformer configurations
"""
# We use our
# [configurable transformer implementation](../configs.html#TransformerConfigs)
conf = TransformerConfigs()
# Set the vocabulary sizes for embeddings and generating logits
conf.n_src_vocab = c.n_tokens
conf.n_tgt_vocab = c.n_tokens
# Embedding size
conf.d_model = c.d_model
# Change attention module to [MLPMixer](index.html)
from labml_nn.transformers.mlp_mixer import MLPMixer
conf.encoder_attn = MLPMixer(c.mix_mlp.ffn)
#
return conf
def main():
# Create experiment
experiment.create(name="mlp_mixer_mlm")
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Batch size
'batch_size': 64,
# Sequence length of $32$. We use a short sequence length to train faster.
# Otherwise MLM models take forever to train.
'seq_len': 32,
# Train for 1024 epochs.
'epochs': 1024,
# Switch between training and validation for $1$ times
# per epoch
'inner_iterations': 1,
# Transformer configurations
'd_model': 128,
'transformer.ffn.d_ff': 256,
'transformer.n_heads': 8,
'transformer.n_layers': 6,
'transformer.ffn.activation': 'GELU',
# Mixer MLP hidden layer size
'mix_mlp.d_ff': 128,
# Use [Noam optimizer](../../optimizers/noam.html)
'optimizer.optimizer': 'Noam',
'optimizer.learning_rate': 1.,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/mlp_mixer/__init__.py | labml_nn/transformers/mlp_mixer/__init__.py | """
---
title: "MLP-Mixer: An all-MLP Architecture for Vision"
summary: >
This is an annotated implementation/tutorial of MLP-Mixer: An all-MLP Architecture for Vision in PyTorch.
---
# MLP-Mixer: An all-MLP Architecture for Vision
This is a [PyTorch](https://pytorch.org) implementation of the paper
[MLP-Mixer: An all-MLP Architecture for Vision](https://arxiv.org/abs/2105.01601).
This paper applies the model on vision tasks.
The model is similar to a transformer with attention layer being replaced by a MLP
that is applied across the patches (or tokens in case of a NLP task).
Our implementation of MLP Mixer is a drop in replacement for the [self-attention layer](../mha.html)
in [our transformer implementation](../models.html).
So it's just a couple of lines of code, transposing the tensor to apply the MLP
across the sequence dimension.
Although the paper applied MLP Mixer on vision tasks,
we tried it on a [masked language model](../mlm/index.html).
[Here is the experiment code](experiment.html).
"""
from typing import Optional
import torch
from torch import nn
class MLPMixer(nn.Module):
"""
## MLP Mixer
This module is a drop-in replacement for [self-attention layer](../mha.html).
It transposes the input tensor before feeding it to the MLP and transposes back,
so that the MLP is applied across the sequence dimension (across tokens or image patches) instead
of the feature dimension.
"""
def __init__(self, mlp: nn.Module):
"""
* `ffn` is the MLP module.
"""
super().__init__()
self.mlp = mlp
def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[torch.Tensor] = None):
"""
The [normal attention module](../mha.html) can be fed with different token embeddings for
$\text{query}$,$\text{key}$, and $\text{value}$ and a mask.
We follow the same function signature so that we can replace it directly.
For MLP mixing, $$x = \text{query} = \text{key} = \text{value}$$ and masking is not possible.
Shape of `query` (and `key` and `value`) is `[seq_len, batch_size, d_model]`.
"""
# $\text{query}$,$\text{key}$, and $\text{value}$ all should be the same
assert query is key and key is value
# MLP mixer doesn't support masking. i.e. all tokens will see all other token embeddings.
assert mask is None
# Assign to `x` for clarity
x = query
# Transpose so that the last dimension is the sequence dimension.
# New shape is `[d_model, batch_size, seq_len]`
x = x.transpose(0, 2)
# Apply the MLP across tokens
x = self.mlp(x)
# Transpose back into original form
x = x.transpose(0, 2)
#
return x
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/gpt/__init__.py | labml_nn/transformers/gpt/__init__.py | """
---
title: GPT
summary: >
Implementation/tutorial of GPT model and training code.
---
# GPT
This is a tutorial/implementation of
[OpenAI GPT architecture](https://openai.com/blog/better-language-models/)
in [PyTorch](https://pytorch.org).
We got a bunch of implementation details from
[minGPT](https://github.com/karpathy/minGPT)
by [@karpathy](https://twitter.com/karpathy).
This implementation also uses character tiny shakespeare dataset.
GPT model is essentially a standard transformer with a few tweaks.
GPT-2 and especially GPT-3 models are quite large and won't fit on a
single GPU and will need model parallelism.
This implementation doesn't even use data parallelism and is intended to be
more of a tutorial.
Main differences of this compared to a simple autoregressive transformer
are the parameter initialization, weight decay, and learning rate schedule.
For the transformer we reuse the
[existing labml/nn transformer implementation](../transformers/index.html).
Here's a notebook for training a GPT model on Tiny Shakespeare dataset.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/transformers/gpt/experiment.ipynb)
"""
import torch
from torch import nn
from labml import experiment
from labml.configs import option
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.optimizers.configs import OptimizerConfigs
from labml_nn.transformers import TransformerConfigs, Encoder
from labml_nn.transformers.utils import subsequent_mask
class GPT(nn.Module):
"""
## GPT model
This consists of a token embedding layer, transformer encoder, and
a final linear layer that gives token logits.
"""
def __init__(self, encoder: Encoder, src_embed: nn.Module, generator: nn.Module):
"""
* `encoder` is the transformer [Encoder](../models.html#Encoder)
* `src_embed` is the token
[embedding module (with positional encodings)](../models.html#EmbeddingsWithLearnedPositionalEncoding)
* `generator` is the [final fully connected layer](../models.html#Generator) that gives the logits.
"""
super().__init__()
self.src_embed = src_embed
self.encoder = encoder
self.generator = generator
# The mask will be initialized on the first call
self.mask = None
def forward(self, x: torch.Tensor):
# Create subsequent mask if mask is not initialized
# or if the size of the mask is different
if self.mask is None or self.mask.size(0) != len(x):
# Subsequent mask, will mask out tokens from seeing future tokens
self.mask = subsequent_mask(len(x)).to(x.device)
# Get the token embeddings with positional encodings
x = self.src_embed(x)
# Transformer encoder
x = self.encoder(x, self.mask)
# Get logits
x = self.generator(x)
# Return results
# (second value is for state, since our trainer is used with RNNs also)
return x, None
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
This inherits from
[`NLPAutoRegressionConfigs`](../../experiments/nlp_autoregression.html#NLPAutoRegressionConfigs)
"""
# GPT model
model: GPT
# Transformer
transformer: TransformerConfigs
# Weight decay
weight_decay: float = 0.1
# Number of tokens for wamup
warmup_steps: int = 128 * 128 * 20
# Custom optimizer
optimizer = 'transformer_optimizer'
@option(Configs.transformer, 'GPT')
def _transformer_configs(c: Configs):
"""
### Transformer configurations
"""
# We use our
# [configurable transformer implementation](../configs.html#TransformerConfigs)
conf = TransformerConfigs()
# Set the vocabulary sizes for embeddings and generating logits
conf.n_src_vocab = c.n_tokens
conf.n_tgt_vocab = c.n_tokens
# GPT uses GELU activation for position wise feedforward
conf.ffn.activation = 'GELU'
#
return conf
def _init_weights(module):
"""
### Initialize weights
Weights of linear layers and embedding layers are initialized
to $\mathcal{N}(0, 0.02)$
instead of the default Xavier initialzation.
"""
if not isinstance(module, (nn.Linear, nn.Embedding)):
return
module.weight.data.normal_(mean=0.0, std=0.02)
# Initialize biases to $0$
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@option(Configs.model)
def _model(c: Configs):
"""
Create GPT model and initialize weights
"""
m = GPT(c.transformer.encoder,
c.transformer.src_embed,
c.transformer.generator).to(c.device)
# Apply custom weight initialization
m.apply(_init_weights)
return m
@option(NLPAutoRegressionConfigs.optimizer)
def transformer_optimizer(c: NLPAutoRegressionConfigs):
"""
### Create custom optimizer with weight decay
This code is taken from [minGPT](https://github.com/karpathy/minGPT).
This applies weight decay only to weights of linear layers.
"""
# Collect names of parameters to apply weight decay
decay = set()
for mn, m in c.model.named_modules():
for pn, p in m.named_parameters():
fpn = f'{mn}.{pn}' if mn else pn # full param name
if fpn.endswith('weight') and isinstance(m, nn.Linear):
decay.add(fpn)
# Get all the parameters
param_dict = {pn: p for pn, p in c.model.named_parameters()}
# Parameters that are not decayed
no_decay = set(param_dict.keys()) - decay
# create the pytorch optimizer object
opt_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": c.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
# Create a [configurable optimizer](../optimizers/configs.html#OptimizerConfigs),
# so that we can change these simply by passing
# a config dictionary.
optimizer = OptimizerConfigs()
# Set parameter groups for optimization.
optimizer.parameters = opt_groups
# Use [cosine decay optimizer](../optimizers/adam_warmup_cosine_decay.html).
# This is what GPT uses.
optimizer.optimizer = 'AdamWarmupCosineDecay'
# Set model embedding size, required if we use [Noam optimizer](../optimizers/noam.html)
# which has an exponential decay.
optimizer.d_model = c.d_model
# Set default weight decay.
# This is not required since we set the weight decay in the parameter groups.
optimizer.weight_decay = c.weight_decay
# GPT uses a maximum learning rate of $6 \times 10^{-4}$.
optimizer.learning_rate = 6e-4
# $\beta_1 = 0.9, \beta_2 = 0.95$
optimizer.betas = (0.9, 0.95)
# $\epsilon = 10^{-8}$
optimizer.eps = 1e-8
# Weight decay is decoupled from gradients
optimizer.weight_decouple = True
# Total number of optimization steps for learning rate cosine decay
optimizer.total_steps = c.epochs * len(c.text.train) // (c.batch_size * c.seq_len)
# Number of warmup optimization steps
optimizer.warmup = c.warmup_steps // (c.batch_size * c.seq_len)
return optimizer
def main():
# Create experiment
experiment.create(name="gpt")
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# Use a context size of $128$
'seq_len': 128,
# Train for $32$ epochs
'epochs': 32,
# Batch size $128$
'batch_size': 128,
# Switch between training and validation for $10$ times
# per epoch
'inner_iterations': 10,
# Transformer configurations
'transformer.d_model': 512,
'transformer.ffn.d_ff': 2048,
'transformer.n_heads': 8,
'transformer.n_layers': 6
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/mlm/experiment.py | labml_nn/transformers/mlm/experiment.py | """
---
title: Masked Language Model Experiment
summary: This experiment trains Masked Language Model (MLM) on Tiny Shakespeare dataset.
---
# [Masked Language Model (MLM)](index.html) Experiment
This is an annotated PyTorch experiment to train a [Masked Language Model](index.html).
"""
from typing import List
import torch
from torch import nn
from labml import experiment, tracker, logger
from labml.configs import option
from labml.logger import Text
from labml_nn.helpers.metrics import Accuracy
from labml_nn.helpers.trainer import BatchIndex
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.transformers import Encoder, Generator
from labml_nn.transformers import TransformerConfigs
from labml_nn.transformers.mlm import MLM
class TransformerMLM(nn.Module):
"""
# Transformer based model for MLM
"""
def __init__(self, *, encoder: Encoder, src_embed: nn.Module, generator: Generator):
"""
* `encoder` is the transformer [Encoder](../models.html#Encoder)
* `src_embed` is the token
[embedding module (with positional encodings)](../models.html#EmbeddingsWithLearnedPositionalEncoding)
* `generator` is the [final fully connected layer](../models.html#Generator) that gives the logits.
"""
super().__init__()
self.generator = generator
self.src_embed = src_embed
self.encoder = encoder
def forward(self, x: torch.Tensor):
# Get the token embeddings with positional encodings
x = self.src_embed(x)
# Transformer encoder
x = self.encoder(x, None)
# Logits for the output
y = self.generator(x)
# Return results
# (second value is for state, since our trainer is used with RNNs also)
return y, None
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
This inherits from
[`NLPAutoRegressionConfigs`](../../experiments/nlp_autoregression.html)
because it has the data pipeline implementations that we reuse here.
We have implemented a custom training step form MLM.
"""
# MLM model
model: TransformerMLM
# Transformer
transformer: TransformerConfigs
# Number of tokens
n_tokens: int = 'n_tokens_mlm'
# Tokens that shouldn't be masked
no_mask_tokens: List[int] = []
# Probability of masking a token
masking_prob: float = 0.15
# Probability of replacing the mask with a random token
randomize_prob: float = 0.1
# Probability of replacing the mask with original token
no_change_prob: float = 0.1
# [Masked Language Model (MLM) class](index.html) to generate the mask
mlm: MLM
# `[MASK]` token
mask_token: int
# `[PADDING]` token
padding_token: int
# Prompt to sample
prompt: str = [
"We are accounted poor citizens, the patricians good.",
"What authority surfeits on would relieve us: if they",
"would yield us but the superfluity, while it were",
"wholesome, we might guess they relieved us humanely;",
"but they think we are too dear: the leanness that",
"afflicts us, the object of our misery, is as an",
"inventory to particularise their abundance; our",
"sufferance is a gain to them Let us revenge this with",
"our pikes, ere we become rakes: for the gods know I",
"speak this in hunger for bread, not in thirst for revenge.",
]
def init(self):
"""
### Initialization
"""
# `[MASK]` token
self.mask_token = self.n_tokens - 1
# `[PAD]` token
self.padding_token = self.n_tokens - 2
# [Masked Language Model (MLM) class](index.html) to generate the mask
self.mlm = MLM(padding_token=self.padding_token,
mask_token=self.mask_token,
no_mask_tokens=self.no_mask_tokens,
n_tokens=self.n_tokens,
masking_prob=self.masking_prob,
randomize_prob=self.randomize_prob,
no_change_prob=self.no_change_prob)
# Accuracy metric (ignore the labels equal to `[PAD]`)
self.accuracy = Accuracy(ignore_index=self.padding_token)
# Cross entropy loss (ignore the labels equal to `[PAD]`)
self.loss_func = nn.CrossEntropyLoss(ignore_index=self.padding_token)
#
super().init()
def step(self, batch: any, batch_idx: BatchIndex):
"""
### Training or validation step
"""
# Move the input to the device
data = batch[0].to(self.device)
# Update global step (number of tokens processed) when in training mode
if self.mode.is_train:
tracker.add_global_step(data.shape[0] * data.shape[1])
# Get the masked input and labels
with torch.no_grad():
data, labels = self.mlm(data)
# Get model outputs.
# It's returning a tuple for states when using RNNs.
# This is not implemented yet.
output, *_ = self.model(data)
# Calculate and log the loss
loss = self.loss_func(output.view(-1, output.shape[-1]), labels.view(-1))
tracker.add("loss.", loss)
# Calculate and log accuracy
self.accuracy(output, labels)
self.accuracy.track()
# Train the model
if self.mode.is_train:
# Calculate gradients
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip)
# Take optimizer step
self.optimizer.step()
# Log the model parameters and gradients on last batch of every epoch
if batch_idx.is_last:
tracker.add('model', self.model)
# Clear the gradients
self.optimizer.zero_grad()
# Save the tracked metrics
tracker.save()
@torch.no_grad()
def sample(self):
"""
### Sampling function to generate samples periodically while training
"""
# Empty tensor for data filled with `[PAD]`.
data = torch.full((self.seq_len, len(self.prompt)), self.padding_token, dtype=torch.long)
# Add the prompts one by one
for i, p in enumerate(self.prompt):
# Get token indexes
d = self.text.text_to_i(p)
# Add to the tensor
s = min(self.seq_len, len(d))
data[:s, i] = d[:s]
# Move the tensor to current device
data = data.to(self.device)
# Get masked input and labels
data, labels = self.mlm(data)
# Get model outputs
output, *_ = self.model(data)
# Print the samples generated
for j in range(data.shape[1]):
# Collect output from printing
log = []
# For each token
for i in range(len(data)):
# If the label is not `[PAD]`
if labels[i, j] != self.padding_token:
# Get the prediction
t = output[i, j].argmax().item()
# If it's a printable character
if t < len(self.text.itos):
# Correct prediction
if t == labels[i, j]:
log.append((self.text.itos[t], Text.value))
# Incorrect prediction
else:
log.append((self.text.itos[t], Text.danger))
# If it's not a printable character
else:
log.append(('*', Text.danger))
# If the label is `[PAD]` (unmasked) print the original.
elif data[i, j] < len(self.text.itos):
log.append((self.text.itos[data[i, j]], Text.subtle))
# Print
logger.log(log)
@option(Configs.n_tokens)
def n_tokens_mlm(c: Configs):
"""
Number of tokens including `[PAD]` and `[MASK]`
"""
return c.text.n_tokens + 2
@option(Configs.transformer)
def _transformer_configs(c: Configs):
"""
### Transformer configurations
"""
# We use our
# [configurable transformer implementation](../configs.html#TransformerConfigs)
conf = TransformerConfigs()
# Set the vocabulary sizes for embeddings and generating logits
conf.n_src_vocab = c.n_tokens
conf.n_tgt_vocab = c.n_tokens
# Embedding size
conf.d_model = c.d_model
#
return conf
@option(Configs.model)
def _model(c: Configs):
"""
Create classification model
"""
m = TransformerMLM(encoder=c.transformer.encoder,
src_embed=c.transformer.src_embed,
generator=c.transformer.generator).to(c.device)
return m
def main():
# Create experiment
experiment.create(name="mlm")
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Batch size
'batch_size': 64,
# Sequence length of $32$. We use a short sequence length to train faster.
# Otherwise it takes forever to train.
'seq_len': 32,
# Train for 1024 epochs.
'epochs': 1024,
# Switch between training and validation for $1$ times
# per epoch
'inner_iterations': 1,
# Transformer configurations (same as defaults)
'd_model': 128,
'transformer.ffn.d_ff': 256,
'transformer.n_heads': 8,
'transformer.n_layers': 6,
# Use [Noam optimizer](../../optimizers/noam.html)
'optimizer.optimizer': 'Noam',
'optimizer.learning_rate': 1.,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/mlm/__init__.py | labml_nn/transformers/mlm/__init__.py | """
---
title: Masked Language Model
summary: >
This is an annotated implementation/tutorial of the Masked Language Model in PyTorch.
---
# Masked Language Model (MLM)
This is a [PyTorch](https://pytorch.org) implementation of the Masked Language Model (MLM)
used to pre-train the BERT model introduced in the paper
[BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805).
## BERT Pretraining
BERT model is a transformer model.
The paper pre-trains the model using MLM and with next sentence prediction.
We have only implemented MLM here.
### Next sentence prediction
In *next sentence prediction*, the model is given two sentences `A` and `B` and the model
makes a binary prediction whether `B` is the sentence that follows `A` in the actual text.
The model is fed with actual sentence pairs 50% of the time and random pairs 50% of the time.
This classification is done while applying MLM. *We haven't implemented this here.*
## Masked LM
This masks a percentage of tokens at random and trains the model to predict
the masked tokens.
They **mask 15% of the tokens** by replacing them with a special `[MASK]` token.
The loss is computed on predicting the masked tokens only.
This causes a problem during fine-tuning and actual usage since there are no `[MASK]` tokens
at that time.
Therefore we might not get any meaningful representations.
To overcome this **10% of the masked tokens are replaced with the original token**,
and another **10% of the masked tokens are replaced with a random token**.
This trains the model to give representations about the actual token whether or not the
input token at that position is a `[MASK]`.
And replacing with a random token causes it to
give a representation that has information from the context as well;
because it has to use the context to fix randomly replaced tokens.
## Training
MLMs are harder to train than autoregressive models because they have a smaller training signal.
i.e. only a small percentage of predictions are trained per sample.
Another problem is since the model is bidirectional, any token can see any other token.
This makes the "credit assignment" harder.
Let's say you have the character level model trying to predict `home *s where i want to be`.
At least during the early stages of the training, it'll be super hard to figure out why the
replacement for `*` should be `i`, it could be anything from the whole sentence.
Whilst, in an autoregressive setting the model will only have to use `h` to predict `o` and
`hom` to predict `e` and so on. So the model will initially start predicting with a shorter context first
and then learn to use longer contexts later.
Since MLMs have this problem it's a lot faster to train if you start with a smaller sequence length
initially and then use a longer sequence length later.
Here is [the training code](experiment.html) for a simple MLM model.
"""
from typing import List
import torch
class MLM:
"""
## Masked LM (MLM)
This class implements the masking procedure for a given batch of token sequences.
"""
def __init__(self, *,
padding_token: int, mask_token: int, no_mask_tokens: List[int], n_tokens: int,
masking_prob: float = 0.15, randomize_prob: float = 0.1, no_change_prob: float = 0.1,
):
"""
* `padding_token` is the padding token `[PAD]`.
We will use this to mark the labels that shouldn't be used for loss calculation.
* `mask_token` is the masking token `[MASK]`.
* `no_mask_tokens` is a list of tokens that should not be masked.
This is useful if we are training the MLM with another task like classification at the same time,
and we have tokens such as `[CLS]` that shouldn't be masked.
* `n_tokens` total number of tokens (used for generating random tokens)
* `masking_prob` is the masking probability
* `randomize_prob` is the probability of replacing with a random token
* `no_change_prob` is the probability of replacing with original token
"""
self.n_tokens = n_tokens
self.no_change_prob = no_change_prob
self.randomize_prob = randomize_prob
self.masking_prob = masking_prob
self.no_mask_tokens = no_mask_tokens + [padding_token, mask_token]
self.padding_token = padding_token
self.mask_token = mask_token
def __call__(self, x: torch.Tensor):
"""
* `x` is the batch of input token sequences.
It's a tensor of type `long` with shape `[seq_len, batch_size]`.
"""
# Mask `masking_prob` of tokens
full_mask = torch.rand(x.shape, device=x.device) < self.masking_prob
# Unmask `no_mask_tokens`
for t in self.no_mask_tokens:
full_mask &= x != t
# A mask for tokens to be replaced with original tokens
unchanged = full_mask & (torch.rand(x.shape, device=x.device) < self.no_change_prob)
# A mask for tokens to be replaced with a random token
random_token_mask = full_mask & (torch.rand(x.shape, device=x.device) < self.randomize_prob)
# Indexes of tokens to be replaced with random tokens
random_token_idx = torch.nonzero(random_token_mask, as_tuple=True)
# Random tokens for each of the locations
random_tokens = torch.randint(0, self.n_tokens, (len(random_token_idx[0]),), device=x.device)
# The final set of tokens that are going to be replaced by `[MASK]`
mask = full_mask & ~random_token_mask & ~unchanged
# Make a clone of the input for the labels
y = x.clone()
# Replace with `[MASK]` tokens;
# note that this doesn't include the tokens that will have the original token unchanged and
# those that get replace with a random token.
x.masked_fill_(mask, self.mask_token)
# Assign random tokens
x[random_token_idx] = random_tokens
# Assign token `[PAD]` to all the other locations in the labels.
# The labels equal to `[PAD]` will not be used in the loss.
y.masked_fill_(~full_mask, self.padding_token)
# Return the masked input and the labels
return x, y
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/jax_transformer/__init__.py | labml_nn/transformers/jax_transformer/__init__.py | """
---
title: Autoregressive Transformer Decoder in JAX from scratch
summary: >
An implementation of a transformer decode on a small text dataset in JAX from scratch,
with implementations of basic layers like layer normalization and adam optimizer.
---
# Autoregressive Transformer Decoder in JAX from scratch
### Contents
* [Module class to help us write the layers](#Module)
* [Embedding layer](#Embedding)
* [Positional embeddings](#PositionalEmbedding)
* [Linear layer](#Linear)
* [Layer Normalization](#LayerNormalization)
* [Multi-head attention](#MHA)
* [Position-wise Feed-Forward layer](#FFN)
* [TransformerLayer layer](#TransformerLayer)
* [Cross Entropy Loss](#CrossEntropyLoss)
* [Autoregressive Transformer](#AutoregressiveTransformer)
* [Adam Optimizer](#Adam)
* [Simple dataset](#Dataset)
* [Experiment code](#Experiment)
"""
from functools import partial
from typing import Dict, NamedTuple, Tuple, Any, Callable
from typing import List, TypeVar, Generic
from typing import Union, Optional
import jax
import jax.numpy as jnp
import numpy as np
from labml import lab, monit, experiment, tracker
from labml import logger
from labml.logger import Text
from labml.utils.download import download_file
class Module:
"""
<a id="Module"></a>
## Module
This is a base class for all modules.
It handles parameters and transforms methods to pure functions for JAX to compile and differentiate.
You can skip these modules to get into the models directly.
The modules stores parameters and sub-modules separately.
When we want to transform any method to a pure function, we pass the parameters of the
module and the sub-module as an argument and assign the passed values to class.
This is based on a blog post:
[From PyTorch to JAX: towards neural net frameworks that purify stateful code](https://sjmielke.com/jax-purify.htm).
"""
# Store all parameters and sub-modules in dictionaries
_submodules: Dict[str, 'Module']
_params: Dict[str, jnp.ndarray]
def __init__(self):
"""Initialize"""
self._params = {}
self._submodules = {}
def __getattr__(self, attr_name: str):
"""
### Get attribute
We override the get attribute operation. So when you reference
an attribute with `model.attribute` this function gets called.
[Read this guide](https://rszalski.github.io/magicmethods/) if you are not familiar with Python
magic methods.
"""
# If the attribute is a parameter
if attr_name in self._params:
return self._params[attr_name]
# If the attribute is a sub-module
elif attr_name in self._submodules:
return self._submodules[attr_name]
# Otherwise fallback to normal attributes.
# The attributes are stored in `__dict__` by Python.
else:
return self.__dict__[attr_name]
def __setattr__(self, key: str, value: Any):
"""
### Set attribute
We override the set attribute operation. So when you assign
an attribute with `model.attribute` this function gets called.
"""
# If the value is also a module
if isinstance(value, Module):
self._submodules[key] = value
# If the value is a JAX array
elif isinstance(value, jnp.ndarray):
self._params[key] = value
# Otherwise add it to `__dict__`
else:
self.__dict__[key] = value
def _clear_params(self):
"""
### Clear parameters
These clears out all the parameters. This is used when a method is called as a pure function.
We first clears out all the parameters and assigns the parameters passed to the pure function.
"""
# Clear parameters of the module
self._params = {}
# Recursively clear parameters of submodules
for sm in self._submodules.values():
sm._clear_params()
def get_params(self) -> Dict[str, jnp.ndarray]:
"""
### Collect all the parameters
This recursively collects all the parameters of the module and sub-modules into a dictionary.
"""
# Parameters of the model
params = self._params.copy()
# Parameters of the submodules
for sm_name, sm in self._submodules.items():
for name, value in sm.get_params().items():
# The dictionary keys are of the form `module_name/module_name/param_name`
params[sm_name + "/" + name] = value
#
return params
def _set_params(self, params: Dict[str, jnp.ndarray]):
"""
### Set all the parameters
"""
# Iterate through parameters.
# Their names have the form `module_name/module_name/param_name`
for name, value in params.items():
# Split to get module names and parameter name
self._set_param(name.split("/"), value)
def _set_param(self, param_path: List[str], value: jnp.ndarray):
"""
### Set a single parameter
This is called by `_set_params`
"""
# No module names; i.e. a parameter of this module
if len(param_path) == 1:
self._params[param_path[0]] = value
# Parameter of a submodule
else:
self._submodules[param_path[0]]._set_param(param_path[1:], value)
def purify(self, method: Callable) -> Callable:
"""
### Transform a member method to a pure function
This transforms a member method to a pure function that accepts a dictionary of parameters
as an argument.
For example,
```python
params = model.get_params()
pure_function = model.purify(model.calculate_loss)
output = pure_function(params, data)
```
"""
def pure_method(params: Dict[str, jnp.array], *args):
# Clear parameters in the object
self._clear_params()
# Assign the passed parameters
self._set_params(params)
# Invoke the method
result = method(*args)
# Return the result
return result
#
return pure_method
# Type for generics in the module list class
M = TypeVar('M', bound=Module)
class ModuleList(Module, Generic[M]):
"""
## Module list
This stores a list of modules.
We needed this for transformer decoder to hold the list of transformer layers.
"""
# For list of modules
_submodules: List[M]
def __init__(self, modules: List[M]):
"""
Initialize with a list of modules.
"""
super().__init__()
self._submodules = modules
def __getitem__(self, idx: int) -> M:
"""
### Get the `idx`-th module
"""
return self._submodules[idx]
def __setitem__(self, key, value):
"""
This is not supported
"""
raise NotImplementedError
def __len__(self):
"""
### Number of modules
"""
return len(self._submodules)
def __getattr__(self, item):
"""
Override `__getattr__` of `Module`
"""
return self.__dict__[item]
def __setattr__(self, key, value):
"""
Override `__setattr__` of `Module`
"""
self.__dict__[key] = value
def _clear_params(self):
"""
### Clear all parameters
"""
self._params = {}
for sm in self._submodules:
sm._clear_params()
def get_params(self):
"""
### Get all parameters
"""
params = self._params
for i, sm in enumerate(self._submodules):
for name, value in sm.get_params().items():
params[f'{i}/{name}'] = value
return params
def _set_param(self, param_path: List[str], value: jnp.ndarray):
"""
### Set a parameter
"""
self._submodules[int(param_path[0])]._set_param(param_path[1:], value)
class Embedding(Module):
"""
<a id="Embedding"></a>
## Embedding layer
This maintains embeddings by id.
"""
def __init__(self, rnd_key: jax.random.PRNGKey, n_embeddings: int, n_dim: int):
"""
* `rnd_key` is the PRNG state
* `n_embeddings` is the number of embeddings
* `n_dim` is the size of an embedding
"""
super().__init__()
# Embeddings are initialized from $\mathcal{N}(0, 1)$
self.embeddings = jax.random.normal(rnd_key, (n_embeddings, n_dim))
def __call__(self, ids: jnp.ndarray):
"""
Return the embeddings for the given ids
"""
return self.embeddings[ids, :]
class EmbeddingsWithLearnedPositionalEncoding(Module):
"""
<a id="PositionalEmbedding"></a>
## Embed tokens and add parameterized positional encodings
This is based on
[our PyTorch implementation](https://nn.labml.ai/transformers/models.html#EmbeddingsWithLearnedPositionalEncoding).
"""
def __init__(self, rnd_key: jax.random.PRNGKey, n_vocab: int, d_model: int, max_len: int = 4096):
"""
* `rnd_key` is the PRNG state
* `n_vocab` is the vocabulary size
* `d_model` is the embedding size
* `max_len` is the maximum sequence length (to initialize positional encodings)
"""
super().__init__()
# Embeddings
self.embeddings = Embedding(rnd_key, n_vocab, d_model)
# Positional encodings coefficient $\frac{1}{\sqrt{d}}$
self.pe_coef = 1 / d_model ** 0.5
# Positional encodings initialized to zeros
self.positional_encodings = jnp.zeros((max_len, d_model))
def __call__(self, x: jnp.ndarray):
# Get positional encodings
pe = self.positional_encodings[:x.shape[0]]
# Get embeddings and add positional encodings
return self.embeddings(x) * self.pe_coef + pe
class Linear(Module):
"""
<a id="Linear"></a>
## Linear Layer
This is a simple linear layer with a weight matrix and a bias vector
"""
def __init__(self, rnd_key: jax.random.PRNGKey, in_features: int, out_features: int):
"""
* `rnd_key` is the PRNG state
* `in_features` is the number of features in the input
* `out_features` is the number of features in the output
"""
super().__init__()
# Initialize weights to
# $$\mathcal{U}\Bigg(-\frac{1}{\sqrt{d_{in}}}, \frac{1}{\sqrt{d_{in}}} \Bigg)$$
rnd_range = 1 / in_features ** 0.5
self.weight = jax.random.uniform(rnd_key, (in_features, out_features),
minval=-rnd_range, maxval=rnd_range)
# Initialize the biases to $0$
self.bias = jnp.zeros((out_features,))
def __call__(self, x: jnp.ndarray):
# Multiply by weights and add the bias
return jnp.matmul(x, self.weight) + self.bias
class LayerNorm(Module):
r"""
<a id="LayerNormalization"></a>
## Layer Normalization
This implements the the layer normalization from the paper
[Layer Normalization](https://papers.labml.ai/paper/1607.06450).
When input $X \in \mathbb{R}^{L \times C}$ is a sequence of embeddings,
where $C$ is the number of channels, $L$ is the length of the sequence.
$\gamma \in \mathbb{R}^{C}$ and $\beta \in \mathbb{R}^{C}$.
$$\text{LN}(X) = \gamma
\frac{X - \underset{C}{\mathbb{E}}[X]}{\sqrt{\underset{C}{Var}[X] + \epsilon}}
+ \beta$$
This is based on
[our PyTorch implementation](https://nn.labml.ai/normalization/layer_norm/index.html).
"""
def __init__(self, normalized_shape: Union[Tuple[int], List[int]], *,
eps: float = 1e-5, elementwise_affine: bool = True):
"""
* `normalized_shape` $S$ is the shape of the elements (except the batch).
The input should then be
$X \in \mathbb{R}^{* \times S[0] \times S[1] \times ... \times S[n]}$
* `eps` is $\epsilon$, used in $\sqrt{Var[X] + \epsilon}$ for numerical stability
* `elementwise_affine` is whether to scale and shift the normalized value
"""
super().__init__()
self.eps = eps
self.elementwise_affine = elementwise_affine
self.normalized_shape = tuple(normalized_shape)
# Create parameters for $\gamma$ and $\beta$ for gain and bias
if elementwise_affine:
self.gain = jnp.ones(normalized_shape)
self.bias = jnp.zeros(normalized_shape)
def __call__(self, x: jnp.ndarray):
# Sanity check to make sure the shapes match
assert self.normalized_shape == x.shape[-len(self.normalized_shape):]
# The exes to calculate the mean and variance on
axes = [-(i + 1) for i in range(len(self.normalized_shape))]
# Calculate the mean of all elements;
# i.e. the means for each element $\mathbb{E}[X]$
mean = x.mean(axis=axes, keepdims=True)
# Calculate the squared mean of all elements;
# i.e. the means for each element $\mathbb{E}[X^2]$
mean_2 = (x ** 2).mean(axis=axes, keepdims=True)
# Variance of all element $Var[X] = \mathbb{E}[X^2] - \mathbb{E}[X]^2$
var = mean_2 - mean ** 2
# Normalize $$\hat{X} = \frac{X - \mathbb{E}[X]}{\sqrt{Var[X] + \epsilon}}$$
x_norm = (x - mean) / (var + self.eps) ** 0.5
# Scale and shift $$\text{LN}(x) = \gamma \hat{X} + \beta$$
if self.elementwise_affine:
x_norm = self.gain * x_norm + self.bias
#
return x_norm
class MultiHeadAttention(Module):
r"""
<a id="MHA"></a>
## Multi-Head Attention Module
This computes scaled multi-headed attention from
the paper [Attention Is All You Need](https://papers.labml.ai/paper/1706.03762)
for given `query`, `key` and `value` vectors.
$$\mathop{Attention}(Q, K, V) = \underset{seq}{\mathop{softmax}}\Bigg(\frac{Q K^\top}{\sqrt{d_k}}\Bigg)V$$
In simple terms, it finds keys that matches the query, and gets the values of
those keys.
It uses dot-product of query and key as the indicator of how matching they are.
Before taking the $softmax$ the dot-products are scaled by $\frac{1}{\sqrt{d_k}}$.
This is done to avoid large dot-product values causing softmax to
give very small gradients when $d_k$ is large.
Softmax is calculated along the axis of of the sequence (or time) for keys.
This is based on
[our PyTorch implementation](https://nn.labml.ai/transformers/mha.html#MHA).
"""
def __init__(self, rnd_key: jax.random.PRNGKey, heads: int, d_model: int):
"""
* `rnd_key` is the PRNG state
* `heads` is the number of heads.
* `d_model` is the number of features in the `query`, `key` and `value` vectors.
"""
super().__init__()
# Split the PRNG state
_, *rnd_keys = jax.random.split(rnd_key, 5)
# Number of features per head
self.d_k = d_model // heads
# Number of heads
self.heads = heads
# These transform the `query`, `key` and `value` vectors for multi-headed attention.
self.query = Linear(rnd_keys[0], d_model, d_model)
self.key = Linear(rnd_keys[1], d_model, d_model)
self.value = Linear(rnd_keys[2], d_model, d_model)
# Output layer
self.output = Linear(rnd_keys[3], d_model, d_model)
# Scaling factor before the softmax
self.scale = 1 / self.d_k ** 0.5
def __call__(self, *,
query: jnp.ndarray,
key: jnp.ndarray,
value: jnp.ndarray,
mask: Optional[jnp.ndarray] = None):
"""
`query`, `key` and `value` are the tensors that store
collection of *query*, *key* and *value* vectors.
They have shape `[seq_len, d_model]`.
`mask` has shape `[seq_len, seq_len]` and
`mask[i, j]` indicates whether query at position `i` can see key-value at position `j`.
"""
# Get sequence length
seq_len = len(query)
if mask is not None:
# Check mask shape
assert mask.shape[0] == query.shape[0]
assert mask.shape[1] == key.shape[0]
# Same mask applied to all heads.
mask = mask[:, :, None]
# Apply linear transformations
query = self.query(query)
key = self.key(key)
value = self.value(value)
# Reshape to split into heads
# Input has shape `[seq_len, batch_size, d_model]`.
# We split the last dimension into `heads` and `d_k`.
query = query.reshape(*query.shape[:-1], self.heads, self.d_k)
key = key.reshape(*key.shape[:-1], self.heads, self.d_k)
value = value.reshape(*value.shape[:-1], self.heads, self.d_k)
# Compute attention scores $Q K^\top$.
# This gives a tensor of shape `[seq_len, seq_len, heads]`.
# $$S_{ijh} = \sum_d Q_{ihd} K_{jhd}$$
scores = jnp.einsum('ihd,jhd->ijh', query, key)
# Scale scores $\frac{Q K^\top}{\sqrt{d_k}}$
scores *= self.scale
# Apply mask
if mask is not None:
scores = scores + (mask == 0) * float('-inf')
# $softmax$ attention along the key sequence dimension
# $\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_k}}\Bigg)$
attn = jax.nn.softmax(scores, axis=1)
# Multiply by values
# $$\underset{seq}{softmax}\Bigg(\frac{Q K^\top}{\sqrt{d_k}}\Bigg)V$$
x = jnp.einsum("ijh,jhd->ihd", attn, value)
# Concatenate multiple heads
x = x.reshape(seq_len, -1)
# Output layer
return self.output(x)
class FeedForward(Module):
"""
<a id="FFN"></a>
## Position-wise Feed-Forward layer
This is based on
[our PyTorch implementation](https://nn.labml.ai/transformers/feed_forward.html).
"""
def __init__(self, rnd_key: jax.random.PRNGKey, d_model: int, d_ff: int,
activation=jax.nn.relu):
"""
* `rnd_key` is the PRNG state
* `d_model` is the number of features in a token embedding
* `d_ff` is the number of features in the hidden layer of the FFN
* `activation` is the activation function $f$
"""
super().__init__()
# Split the PRNG state
_, *rnd_keys = jax.random.split(rnd_key, 5)
# Layer one parameterized by weight $W_1$ and bias $b_1$
self.layer1 = Linear(rnd_keys[0], d_model, d_ff)
# Layer one parameterized by weight $W_1$ and bias $b_1$
self.layer2 = Linear(rnd_keys[1], d_ff, d_model)
# Activation function $f$
self.activation = activation
def __call__(self, x: jnp.ndarray):
# $f(x W_1 + b_1)$
x = self.activation(self.layer1(x))
# $f(x W_1 + b_1) W_2 + b_2$
return self.layer2(x)
class TransformerLayer(Module):
"""
<a id="TransformerLayer"></a>
## Transformer Layer
This is a transformer layer with multi-head attention and a position-wise feed-forward layer.
We use pre-layer layer normalization.
"""
def __init__(self,
d_model: int,
self_attn: MultiHeadAttention,
feed_forward: FeedForward):
"""
* `d_model` is the token embedding size
* `self_attn` is the self attention module
* `feed_forward` is the feed forward module
"""
super().__init__()
self.size = d_model
self.self_attn = self_attn
self.feed_forward = feed_forward
self.norm_self_attn = LayerNorm([d_model])
self.norm_ff = LayerNorm([d_model])
def __call__(self, x: jnp.ndarray, mask: jnp.ndarray):
# Normalize the vectors before doing self attention
z = self.norm_self_attn(x)
# Run through self attention, i.e. keys and values are from self
self_attn = self.self_attn(query=z, key=z, value=z, mask=mask)
x = x + self_attn
# Normalize for feed-forward
z = self.norm_ff(x)
# Pass through the feed-forward network
ff = self.feed_forward(z)
# Add the feed-forward results
x = x + ff
#
return x
class CrossEntropyLoss(Module):
"""
<a id="CrossEntropyLoss"></a>
## Cross Entropy Loss
"""
def __init__(self):
super().__init__()
# Use `jax.vmap` to vectorize the loss function
self._loss_vmap = jax.vmap(self._loss, in_axes=(0, 0,))
def _loss(self, output: jnp.ndarray, target: jnp.ndarray):
# $$- \sum_k y_k \log \hat{y}_k$$
return -jax.nn.log_softmax(output)[target]
def __call__(self, output: jnp.ndarray, target: jnp.ndarray):
"""
* `output` is the model outputs of shape `[seq_len, n_vocab]`
* `target` is the target of shape `[seq_len]`
"""
# Use the vectorized loss function and calculate the mean.
#
# We could have used a for loop to calculate the losses but using vmap is about 10X faster
return self._loss_vmap(output, target).mean()
class AutoregressiveTransformer(Module):
"""
<a id="AutoregressiveTransformer"></a>
## Autoregressive Transformer
This is the transformer decode with embedding and output layers.
"""
layers: ModuleList[TransformerLayer]
def __init__(self, rnd_key: jax.random.PRNGKey, n_vocab: int, d_model: int, n_layers: int, heads: int, d_ff: int):
"""
* `rnd_key` is the PRNG state
* `n_vocab` is the vocabulary size
* `d_model` is the number of features in a token embedding
* `n_layers` is the number of transformer layers
* `heads` is the number of attention heads
* `d_ff` is the number of features in the hidden layer of the FFN
"""
super().__init__()
self.n_vocab = n_vocab
self.d_model = d_model
self.loss_func = CrossEntropyLoss()
# For transformer layers
layers = []
for i in range(n_layers):
# Split PRNG state
rnd_key, mha_key, ffn_key = jax.random.split(rnd_key, 3)
# Create a transformer layer
attn = MultiHeadAttention(mha_key, heads, d_model)
ffn = FeedForward(ffn_key, d_model, d_ff)
layers.append(TransformerLayer(d_model, attn, ffn))
# Make a module list
self.layers = ModuleList(layers)
# Split PRNG state
rnd_key, emb_key, out_key = jax.random.split(rnd_key, 3)
# Create embedding layer
self.embeddings = EmbeddingsWithLearnedPositionalEncoding(emb_key, n_vocab, d_model)
# Final normalization and output layer
self.norm = LayerNorm([d_model])
self.output = Linear(out_key, d_model, n_vocab)
def __call__(self, x: jnp.ndarray):
# Get sequence length
seq_len = len(x)
# A mask for attention so that a token can only see tokens before that
mask = jnp.tril(jnp.ones((seq_len, seq_len), bool))
# Get embeddings with positional encodings
x = self.embeddings(x)
# Apply the transformer layers
for i in range(len(self.layers)):
x = self.layers[i](x, mask)
# Final normalization and linear transformation to get the logits
return self.output(self.norm(x))
def get_loss(self, x: jnp.ndarray):
"""
### Calculate the loss
"""
# Get model outputs
output = self(x)
# Cross entropy loss
return self.loss_func(output[:-1], x[1:])
def sample(self, seq: jnp.ndarray, length: int = 20):
"""
### Sample
The starting sequence is given by `seq` and we greedily sample `length1 tokens
"""
for i in range(length):
# Sample the highest probability token
idx = jnp.argmax(self(seq)[-1])
# Add it to the sequence
seq = jnp.concatenate((seq, idx[None]))
# Return the sampled sequence
return seq
class AdamState(NamedTuple):
"""
This is a named tuple for storing Adam optimizer state for a parameter
"""
m: jnp.ndarray
v: jnp.ndarray
class Adam:
"""
<a id="Adam"></a>
## Adam Optimizer
This is from paper
[Adam: A Method for Stochastic Optimization](https://papers.labml.ai/paper/1412.6980).
For parameter $\theta_t$ and gradient $g_t$ at step $t$, the Adam update is,
\begin{align}
m_t &\leftarrow \beta_1 m_{t-1} + (1 - \beta_1) \cdot g_t \\
v_t &\leftarrow \beta_2 v_{t-1} + (1 - \beta_2) \cdot g_t^2 \\
\hat{m}_t &\leftarrow \frac{m_t}{1-\beta_1^t} \\
\hat{v}_t &\leftarrow \frac{v_t}{1-\beta_2^t} \\
\theta_t &\leftarrow \theta_{t-1} - \alpha \cdot \frac{\hat{m}_t}{\sqrt{\hat{v}_t} + \epsilon}
\end{align}
where $\alpha$, $\beta_1$, $\beta_2$ and $\epsilon$ are scalar hyper parameters.
$m_t$ and $v_t$ are first and second order moments.
$\hat{m}_t$ and $\hat{v}_t$ are biased corrected moments.
$\epsilon$ is used as a fix for division by zero error, but also acts as a form of a hyper-parameter
that acts against variance in gradients.
"""
def __init__(self, params: Dict,
lr: float = 0.001, betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-16, ):
"""
* `params` is the tree-map of parameters
* `lr` is the learning rate $\alpha$
* `betas` is a tuple of ($\beta_1$, $\beta_2$)
* `eps` is $\hat{\epsilon}$`
"""
super().__init__()
self.lr = lr
self.betas = betas
self.eps = eps
# States for each parameter
self.states = jax.tree.map(self._init_state, params)
# Optimized step function
self._step_jit = jax.jit(self._step)
# Number of steps taken $t$
self._n_steps = 0
# Optimized update state function
self._update_state_jit = jax.jit(self._update_state)
def _init_state(self, param: jnp.ndarray):
"""
Initialize the state for a given parameter
"""
return AdamState(jnp.zeros_like(param), jnp.zeros_like(param))
def step(self, params: Dict, grads: Dict):
"""
## Step function
* `params` is a tree-map of parameters
* `grads` is a tree-map of gradients
"""
# Increment step $t$
self._n_steps += 1
# Update states for each parameter
self.states = jax.tree.map(self._update_state_jit, grads, self.states)
# Return updated parameters $\theta_t$
return jax.tree.map(partial(self._step_jit, self._n_steps), params, self.states)
def _step(self, n_steps: int, param: jnp.ndarray, state: AdamState):
"""
### Update parameters
This performs a Adam update on the given parameter
"""
# Bias corrections for $\hat{m}_t$: $1 - \beta_1^t$ and for $\hat{v}_t$: $1 - \beta_2^t$
bias_correction = [1 - beta ** n_steps for beta in self.betas]
# Uncorrected first and second moments $m_t$ and $v_t$
m, v = state
# $\alpha \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t}$
step_size = self.lr * (bias_correction[1] ** 0.5) / bias_correction[0]
# $\sqrt{v_t} + \hat{\epsilon}$
den = (v ** 0.5) + self.eps
# $\theta_t \leftarrow \theta_{t-1} - \alpha \frac{\sqrt{1-\beta_2^t}}{1-\beta_1^t} \cdot
# \frac{m_t}{\sqrt{v_t} + \hat{\epsilon}}$
return param - step_size * m / den
def _update_state(self, grad, state: AdamState):
"""
### Update state
This updates uncorrected first and second moments $m_t$ and $v_t$
"""
# Uncorrected first and second moments $m_{t-1}$ and $v_{t-1}$
m, v = state
# Clip gradients
grad = jnp.clip(grad, -1, 1)
# $$m_t \leftarrow \beta_1 m_{t-1} + (1 - \beta_1) \cdot g_t$$
m = self.betas[0] * m + grad * (1 - self.betas[0])
# $$v_t \leftarrow \beta_2 v_{t-1} + (1 - \beta_2) \cdot g_t^2$$
v = self.betas[1] * v + (grad ** 2) * (1 - self.betas[1])
# Return the new state
return AdamState(m, v)
class TinyShakespeare:
"""
<a id="Dataset"></a>
## Tiny Shakespeare dataset
"""
def __init__(self, rnd_key: jax.random.PRNGKey, seq_len: int, batch_size: int):
"""
* `rnd_key` is the PRNG state
* `seq_len` is the sequence length of a sample
* `batch_size` is the batch size
"""
self.batch_size = batch_size
# PRNG key for shuffling the samples
_, self.rnd_key = jax.random.split(rnd_key)
# Local path of the text file
path = lab.get_data_path() / 'tiny_shakespeare.txt'
# Download if it doesn't exist
url = 'https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt'
if not path.exists():
download_file(url, path)
# Read the file
with open(str(path), 'r') as f:
self.text = f.read()
# Get the characters/tokens
tokens = sorted(list(set(self.text)))
# Number of tokens
self.n_tokens = len(tokens)
# Map tokens to ids
self.stoi = {t: i for i, t in enumerate(tokens)}
# Id to token/character
self.itos = tokens
# As a list of ids
data = jnp.array([self.stoi[s] for s in list(self.text)])
# Number of batches
self.n_batches = len(data) // (seq_len * batch_size)
# Truncate
data = data[:self.n_batches * seq_len * batch_size]
# Reshape into a samples (better to use random offsets, but lets ignore that here)
self.data = data.reshape((-1, seq_len))
# List of sample indexes
self.idx = jnp.arange(len(self.data))
def __iter__(self):
"""
Setup for iteration
"""
# Iteration step
self._iter_idx = 0
# Split PRNG key
self.rnd_key, rnd_key = jax.random.split(self.rnd_key)
# Shuffle sample indexes
self.idx = jax.random.permutation(rnd_key, self.idx)
#
return self
def __len__(self):
"""
Number of batches
"""
return self.n_batches
def __next__(self):
"""
Get next batch
"""
# Stop iteration after iterating through all batches
if self._iter_idx >= self.n_batches:
raise StopIteration()
# Sample indexes for the batch
idx = self.idx[self._iter_idx * self.batch_size:(self._iter_idx + 1) * self.batch_size]
# Increment iteration step
self._iter_idx += 1
# Return samples
return self.data[idx]
def main():
"""
<a id="Experiment"></a>
## Run the experiment
"""
# Create experiment
experiment.create(name='jax')
# Create PRNG key
rnd_key = jax.random.PRNGKey(0)
# Create dataset
dataset = TinyShakespeare(rnd_key, seq_len=32, batch_size=128)
# Create the model
model = AutoregressiveTransformer(rnd_key, dataset.n_tokens,
d_model=128, n_layers=3, heads=8, d_ff=512)
# Get model parameters
params = model.get_params()
# JAX compiled pure sampling function
pure_sample_fn = jax.jit(model.purify(model.sample))
# JAX compiled pure function to get logits for a batch.
# First we transform `model.__call__` to a pure function which accepts two arguments:
# parameters, and input sequence.
# Next we vectorize the function to process a batch of samples. `in_axes` specifies which arguments
# to parallelize and along which axis. `(None, 0)` means we have the same parameters but parallelize
# the inputs across the first axis.
# `out_axes` specifies along which axis to merge the results.
pure_forward_fn = jax.jit(jax.vmap(model.purify(model.__call__),
in_axes=(None, 0), out_axes=0))
# Similarly we vectorize loss computation
pure_loss_fn = jax.jit(jax.vmap(model.purify(model.get_loss),
in_axes=(None, 0), out_axes=0))
# A function to get mean loss
def get_loss(params, seq):
return pure_loss_fn(params, seq).mean()
# A function to compute gradients for the first argument (parameters)
grad_loss_fn = jax.jit(jax.grad(get_loss, argnums=0))
# Create optimizer
optimizer = Adam(params)
# Start the experiment
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | true |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/glu_variants/experiment.py | labml_nn/transformers/glu_variants/experiment.py | """
---
title: Gated Linear Units and Variants
summary: >
Train an auto-regressive transformer with Gated Linear Units and variants
for the position-wise feedforward network (FFN).
---
# Gated Linear Units and Variants
This trains a simple [transformer](../../) model for auto-regression.
We try different variants for the [position-wise feedforward network](../feed_forward).
The reusable & configurable are defined in [`configs.py`](configs.html).
"""
import torch
from labml import experiment
from labml.configs import option
from labml.utils.pytorch import get_modules
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.transformers import Encoder, Generator, TransformerConfigs
from labml_nn.transformers.utils import subsequent_mask
from torch import nn
class AutoregressiveModel(nn.Module):
"""
## Auto regressive model
"""
def __init__(self, src_embed: nn.Module, encoder: Encoder, generator: Generator):
super().__init__()
# Token embedding module
self.src_embed = src_embed
# Transformer based encoder
self.encoder = encoder
# Next token generation layer;
# this give logits of the the next token
self.generator = generator
# This will be initialized on the first call
self.src_mask = None
def forward(self, src: torch.Tensor):
# Create subsequent mask, so that the transformer can only pay attention to past tokens.
if self.src_mask is None or self.src_mask.size(0) != len(src):
self.src_mask = subsequent_mask(len(src)).to(src.device)
# Embed the tokens (`src`) and run it through the the transformer
res = self.encoder(self.src_embed(src), self.src_mask)
# Generate logits of the next token
return self.generator(res), None
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
The default configs can and will be over-ridden when we start the experiment
"""
transformer: TransformerConfigs
model: AutoregressiveModel
@option(Configs.model)
def autoregressive_model(c: Configs):
"""
Initialize the auto-regressive model
"""
m = AutoregressiveModel(c.transformer.src_embed, c.transformer.encoder, c.transformer.generator)
return m.to(c.device)
@option(Configs.transformer)
def transformer_c(c: Configs):
"""
Initialize the [configurable transformer](../configs.html) encoder for our autoregressive model.
"""
tc = TransformerConfigs()
tc.n_src_vocab = c.n_tokens
tc.n_tgt_vocab = c.n_tokens
return tc
def main():
# Create experiment
experiment.create(name="glu_variants")
# Create configs
conf = Configs()
# Load configurations
experiment.configs(conf,
# A dictionary of configurations to override
{'tokenizer': 'character',
'prompt_separator': '',
'prompt': 'It is ',
'text': 'tiny_shakespeare',
'optimizer.optimizer': 'Noam',
'optimizer.learning_rate': 1.,
'optimizer.d_model': 256,
'seq_len': 1024,
'epochs': 128,
'batch_size': 6,
'inner_iterations': 10,
# GLU Variant, one of GLU, Bilinear, ReGLU, GEGLU, SwiGLU
#
# These are defined in the [configurable FFN](../configs.html#FFN)
# implementation
'transformer.ffn.glu_variant': 'Bilinear',
# Transformer configurations
'transformer.d_model': 256,
'transformer.ffn.d_ff': 1024,
'transformer.n_heads': 8,
'transformer.n_layers': 6})
# This is needed to initialize models
conf.n_tokens = conf.text.n_tokens
# Set models for saving and loading
experiment.add_pytorch_models(get_modules(conf))
# Start the experiment
with experiment.start():
# `TrainValidConfigs.run`
conf.run()
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/glu_variants/simple.py | labml_nn/transformers/glu_variants/simple.py | """
---
title: Gated Linear Units and Variants
summary: >
Train an auto-regressive transformer with Gated Linear Units and variants
for the position-wise feedforward network (FFN).
---
# Gated Linear Units and Variants
This trains a simple [transformer](../../) model for auto-regression.
We try different variants for the [position-wise feedforward network](../feed_forward).
*This is a simpler implementation that doesn't use [`labml.configs`](experiment.html) module.
We decided to write a simpler implementation to make it easier for readers who are not familiar.*
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/transformers/glu_variants/simple.ipynb)
"""
import dataclasses
import torch
from labml import experiment, lab, tracker, monit, logger
from labml.logger import Text
from labml.utils.download import download_file
from labml_nn.experiments.nlp_autoregression import transpose_batch
from labml_nn.optimizers.noam import Noam
from labml_nn.transformers import Encoder, MultiHeadAttention
from labml_nn.transformers.feed_forward import FeedForward
from labml_nn.transformers.models import EmbeddingsWithPositionalEncoding, TransformerLayer
from labml_nn.transformers.utils import subsequent_mask
from torch import nn
from torch.utils.data import Dataset, DataLoader
class AutoregressiveModel(nn.Module):
"""
## Auto regressive model
"""
def __init__(self, src_embed: nn.Module, encoder: Encoder, generator: nn.Module):
super().__init__()
# Token embedding module
self.src_embed = src_embed
# Transformer based encoder
self.encoder = encoder
# Next token generation layer;
# this gives logits of the the next token
self.generator = generator
# This will be initialized on the first call
self.src_mask = None
def forward(self, src: torch.Tensor):
# Create subsequent mask, so that the transformer can only pay attention to past tokens.
if self.src_mask is None or self.src_mask.size(0) != len(src):
self.src_mask = subsequent_mask(len(src)).to(src.device)
# Embed the tokens (`src`) and run it through the the transformer
res = self.encoder(self.src_embed(src), self.src_mask)
# Generate logits of the next token
return self.generator(res)
@dataclasses.dataclass
class Configs:
"""
### Configurations
"""
d_model: int = 512
seq_len: int = 128
batch_size: int = 32
n_layers: int = 6
n_heads: int = 8
dropout: float = 0.1
d_ff: int = 2048
glu_variant: str = 'GLU'
epochs: int = 5
grad_norm_clip: float = 0.5
class TinyShakespeareDataset(Dataset):
"""
### Tiny Shakespeare Dataset
"""
def __init__(self, seq_len: int):
# Location of the text file
path = lab.get_data_path() / 'tiny_shakespeare.txt'
# Download the file
download_file('https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt', path)
# Read the downloaded file
with open(str(path), 'r') as f:
text = f.read()
# Extract the characters
chars = list(set(text))
# Character to id (integer) map
self.stoi = {c: i for i, c in enumerate(chars)}
# Id to character map
self.itos = {i: c for i, c in enumerate(chars)}
# Length of a training sample
self.seq_len = seq_len
# Data in the form of a tensor of ids
self.data = self.text_to_i(text)
def text_to_i(self, text: str):
"""
Transform the text into a tensor of ids
"""
return torch.tensor([self.stoi[c] for c in text], dtype=torch.long)
def __len__(self):
"""
Number of samples in the dataset.
*This will read the dataset `seq_len` times in a single epoch.*
"""
return len(self.data) - self.seq_len - 1
def __getitem__(self, idx):
"""
Return a sample
"""
return self.data[idx:idx + self.seq_len], self.data[idx + 1:idx + self.seq_len + 1]
class Trainer:
"""
## Trainer
"""
def __init__(self, configs: Configs):
# Get the device
self.device = torch.device('cpu')
if torch.cuda.is_available():
self.device = torch.device('cuda:0')
# Initialize the dataset
self.dataset = TinyShakespeareDataset(configs.seq_len)
# Initialize the dataloader
self.dataloader = DataLoader(self.dataset,
batch_size=configs.batch_size,
collate_fn=transpose_batch,
shuffle=True)
# FFN with Gated Linear Unit
# $$FFN_{GLU}(x)(x, W_1, V, W_2) = (\sigma(x W_1) \otimes x V) W_2$$
if configs.glu_variant == 'GLU':
ffn = FeedForward(configs.d_model, configs.d_ff, configs.dropout, nn.Sigmoid(), True, False, False, False)
# FFN with Bilinear hidden layer
# $$FFN_{Bilinear}(x)(x, W_1, V, W_2) = (x W_1 \otimes x V) W_2$$
elif configs.glu_variant == 'Bilinear':
ffn = FeedForward(configs.d_model, configs.d_ff, configs.dropout, nn.Identity(), True, False, False, False)
# FFN with ReLU gate
# $$FFN_{ReGLU}(x)(x, W_1, V, W_2) = (\max(0, x W_1) \otimes x V) W_2$$
elif configs.glu_variant == 'ReGLU':
ffn = FeedForward(configs.d_model, configs.d_ff, configs.dropout, nn.ReLU(), True, False, False, False)
# FFN with GELU gate
# $$FFN_{GEGLU}(x)(x, W_1, V, W_2) = (\text{GELU}(x W_1) \otimes x V) W_2$$
elif configs.glu_variant == 'GEGLU':
ffn = FeedForward(configs.d_model, configs.d_ff, configs.dropout, nn.GELU(), True, False, False, False)
# FFN with Swish gate
# $$FFN_{SwiGLU}(x)(x, W_1, V, W_2) = (\text{Swish}_1(x W_1) \otimes x V) W_2$$
# where $\text{Swish}_\beta(x) = x \sigma(\beta x)$
elif configs.glu_variant == 'SwiGLU':
ffn = FeedForward(configs.d_model, configs.d_ff, configs.dropout, nn.SiLU(), True, False, False, False)
# FFN with ReLU activation
# $$FFN_{ReLU}(x)(x, W_1, W_2, b_1, b_2) = \text{ReLU}_1(x W_1 + b_1) W_2 + b_2$$
elif configs.glu_variant == 'ReLU':
ffn = FeedForward(configs.d_model, configs.d_ff, configs.dropout, nn.ReLU())
# FFN with ReLU activation
# $$FFN_{GELU}(x)(x, W_1, W_2, b_1, b_2) = \text{GELU}_1(x W_1 + b_1) W_2 + b_2$$
elif configs.glu_variant == 'GELU':
ffn = FeedForward(configs.d_model, configs.d_ff, configs.dropout, nn.GELU())
else:
raise ValueError(f'Unknown variant {configs.glu_variant}')
# Number of different characters
n_chars = len(self.dataset.stoi)
# Initialize [Multi-Head Attention module](../mha.html)
mha = MultiHeadAttention(configs.n_heads, configs.d_model, configs.dropout)
# Initialize the [Transformer Block](../models.html#TransformerLayer)
transformer_layer = TransformerLayer(d_model=configs.d_model, self_attn=mha, src_attn=None,
feed_forward=ffn, dropout_prob=configs.dropout)
# Initialize the model with an
# [embedding layer](../models.html#EmbeddingsWithPositionalEncoding)
# (with fixed positional encoding)
# [transformer encoder](../models.html#Encoder) and
# a linear layer to generate logits.
self.model = AutoregressiveModel(EmbeddingsWithPositionalEncoding(configs.d_model, n_chars),
Encoder(transformer_layer, configs.n_layers),
nn.Linear(configs.d_model, n_chars))
# Move the model to the current device
self.model.to(self.device)
# Initialize [Noam optimizer](../../optimizers/noam.html)
self.optimizer = Noam(self.model.parameters(), lr=1.0, warmup=2_000, d_model=configs.d_model)
# Cross-entropy loss
self.loss_func = nn.CrossEntropyLoss()
# Number of training epochs;
# *note that our dataset definition repeats the data `seq_len` times in a single epoch*
self.epochs = configs.epochs
# Gradient clipping norm
self.grad_norm_clip = configs.grad_norm_clip
# Set tracker configurations
tracker.set_scalar("loss.*", True)
def sample(self):
"""
### Sampling function to generate samples periodically while training
"""
# Starting prompt
prompt = 'It is'
# Collect output for printing
log = [(prompt, Text.subtle)]
# Sample 25 tokens
for i in monit.iterate('Sample', 25):
# Tokenize the prompt
data = self.dataset.text_to_i(prompt).unsqueeze(-1)
data = data.to(self.device)
# Get the model output
output = self.model(data)
# Get the model prediction (greedy)
output = output.argmax(dim=-1).squeeze()
# Add the prediction to prompt
prompt += self.dataset.itos[output[-1].item()]
# Add the prediction for logging
log += [(self.dataset.itos[output[-1].item()], Text.value)]
# Print the sampled output
logger.log(log)
def train(self):
"""
### Train the model
"""
# Loop for the given number of epochs
for _ in monit.loop(self.epochs):
# Iterate over the minibatches
for i, batch in monit.enum('Train', self.dataloader):
# Move data to the device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Set tracker step, as the number of characters trained on
tracker.add_global_step(data.shape[0] * data.shape[1])
# Set model state to training
self.model.train()
# Evaluate the model
output = self.model(data)
# Calculate loss
loss = self.loss_func(output.view(-1, output.shape[-1]), target.view(-1))
# Log the loss
tracker.add("loss.train", loss)
# Calculate gradients
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip)
# Take optimizer step
self.optimizer.step()
# Log the model parameters and gradients
if (i + 1) % 100 == 0:
tracker.add('model', self.model)
# Clear the gradients
self.optimizer.zero_grad()
# Generate a sample
if (i + 1) % 100 == 0:
self.model.eval()
with torch.no_grad():
self.sample()
# Save the tracked metrics
if (i + 1) % 10 == 0:
tracker.save()
def main():
# Create experiment
experiment.create(name="glu_variants")
# Create configs
configs = Configs()
# Load configurations
experiment.configs(dataclasses.asdict(configs))
# Create trainer
trainer = Trainer(configs)
# Set models for training and loading
experiment.add_pytorch_models({'model': trainer.model})
# Start the experiment
with experiment.start():
# Train the model
trainer.train()
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/transformers/glu_variants/__init__.py | labml_nn/transformers/glu_variants/__init__.py | """
---
title: Gated Linear Units and Variants
summary: >
Train an auto-regressive transformer with Gated Linear Units and variants
for the position-wise feedforward network (FFN).
---
# Gated Linear Units and Variants
* [Experiment that uses `labml.configs`](experiment.html)
* [Simpler version from scratch](simple.html)
"""
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/graphs/__init__.py | labml_nn/graphs/__init__.py | """
---
title: Graph Neural Networks
summary: >
A set of PyTorch implementations/tutorials related to graph neural networks
---
# Graph Neural Networks
* [Graph Attention Networks (GAT)](gat/index.html)
* [Graph Attention Networks v2 (GATv2)](gatv2/index.html)
"""
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/graphs/gat/experiment.py | labml_nn/graphs/gat/experiment.py | """
---
title: Train a Graph Attention Network (GAT) on Cora dataset
summary: >
This trains is a Graph Attention Network (GAT) on Cora dataset
---
# Train a Graph Attention Network (GAT) on Cora dataset
"""
from typing import Dict
import numpy as np
import torch
from torch import nn
from labml import lab, monit, tracker, experiment
from labml.configs import BaseConfigs, option, calculate
from labml.utils import download
from labml_nn.helpers.device import DeviceConfigs
from labml_nn.graphs.gat import GraphAttentionLayer
from labml_nn.optimizers.configs import OptimizerConfigs
class CoraDataset:
"""
## [Cora Dataset](https://linqs.soe.ucsc.edu/data)
Cora dataset is a dataset of research papers.
For each paper we are given a binary feature vector that indicates the presence of words.
Each paper is classified into one of 7 classes.
The dataset also has the citation network.
The papers are the nodes of the graph and the edges are the citations.
The task is to classify the nodes to the 7 classes with feature vectors and
citation network as input.
"""
# Labels for each node
labels: torch.Tensor
# Set of class names and an unique integer index
classes: Dict[str, int]
# Feature vectors for all nodes
features: torch.Tensor
# Adjacency matrix with the edge information.
# `adj_mat[i][j]` is `True` if there is an edge from `i` to `j`.
adj_mat: torch.Tensor
@staticmethod
def _download():
"""
Download the dataset
"""
if not (lab.get_data_path() / 'cora').exists():
download.download_file('https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz',
lab.get_data_path() / 'cora.tgz')
download.extract_tar(lab.get_data_path() / 'cora.tgz', lab.get_data_path())
def __init__(self, include_edges: bool = True):
"""
Load the dataset
"""
# Whether to include edges.
# This is test how much accuracy is lost if we ignore the citation network.
self.include_edges = include_edges
# Download dataset
self._download()
# Read the paper ids, feature vectors, and labels
with monit.section('Read content file'):
content = np.genfromtxt(str(lab.get_data_path() / 'cora/cora.content'), dtype=np.dtype(str))
# Load the citations, it's a list of pairs of integers.
with monit.section('Read citations file'):
citations = np.genfromtxt(str(lab.get_data_path() / 'cora/cora.cites'), dtype=np.int32)
# Get the feature vectors
features = torch.tensor(np.array(content[:, 1:-1], dtype=np.float32))
# Normalize the feature vectors
self.features = features / features.sum(dim=1, keepdim=True)
# Get the class names and assign an unique integer to each of them
self.classes = {s: i for i, s in enumerate(set(content[:, -1]))}
# Get the labels as those integers
self.labels = torch.tensor([self.classes[i] for i in content[:, -1]], dtype=torch.long)
# Get the paper ids
paper_ids = np.array(content[:, 0], dtype=np.int32)
# Map of paper id to index
ids_to_idx = {id_: i for i, id_ in enumerate(paper_ids)}
# Empty adjacency matrix - an identity matrix
self.adj_mat = torch.eye(len(self.labels), dtype=torch.bool)
# Mark the citations in the adjacency matrix
if self.include_edges:
for e in citations:
# The pair of paper indexes
e1, e2 = ids_to_idx[e[0]], ids_to_idx[e[1]]
# We build a symmetrical graph, where if paper $i$ referenced
# paper $j$ we place an adge from $i$ to $j$ as well as an edge
# from $j$ to $i$.
self.adj_mat[e1][e2] = True
self.adj_mat[e2][e1] = True
class GAT(nn.Module):
"""
## Graph Attention Network (GAT)
This graph attention network has two [graph attention layers](index.html).
"""
def __init__(self, in_features: int, n_hidden: int, n_classes: int, n_heads: int, dropout: float):
"""
* `in_features` is the number of features per node
* `n_hidden` is the number of features in the first graph attention layer
* `n_classes` is the number of classes
* `n_heads` is the number of heads in the graph attention layers
* `dropout` is the dropout probability
"""
super().__init__()
# First graph attention layer where we concatenate the heads
self.layer1 = GraphAttentionLayer(in_features, n_hidden, n_heads, is_concat=True, dropout=dropout)
# Activation function after first graph attention layer
self.activation = nn.ELU()
# Final graph attention layer where we average the heads
self.output = GraphAttentionLayer(n_hidden, n_classes, 1, is_concat=False, dropout=dropout)
# Dropout
self.dropout = nn.Dropout(dropout)
def forward(self, x: torch.Tensor, adj_mat: torch.Tensor):
"""
* `x` is the features vectors of shape `[n_nodes, in_features]`
* `adj_mat` is the adjacency matrix of the form
`[n_nodes, n_nodes, n_heads]` or `[n_nodes, n_nodes, 1]`
"""
# Apply dropout to the input
x = self.dropout(x)
# First graph attention layer
x = self.layer1(x, adj_mat)
# Activation function
x = self.activation(x)
# Dropout
x = self.dropout(x)
# Output layer (without activation) for logits
return self.output(x, adj_mat)
def accuracy(output: torch.Tensor, labels: torch.Tensor):
"""
A simple function to calculate the accuracy
"""
return output.argmax(dim=-1).eq(labels).sum().item() / len(labels)
class Configs(BaseConfigs):
"""
## Configurations
"""
# Model
model: GAT
# Number of nodes to train on
training_samples: int = 500
# Number of features per node in the input
in_features: int
# Number of features in the first graph attention layer
n_hidden: int = 64
# Number of heads
n_heads: int = 8
# Number of classes for classification
n_classes: int
# Dropout probability
dropout: float = 0.6
# Whether to include the citation network
include_edges: bool = True
# Dataset
dataset: CoraDataset
# Number of training iterations
epochs: int = 1_000
# Loss function
loss_func = nn.CrossEntropyLoss()
# Device to train on
#
# This creates configs for device, so that
# we can change the device by passing a config value
device: torch.device = DeviceConfigs()
# Optimizer
optimizer: torch.optim.Adam
def run(self):
"""
### Training loop
We do full batch training since the dataset is small.
If we were to sample and train we will have to sample a set of
nodes for each training step along with the edges that span
across those selected nodes.
"""
# Move the feature vectors to the device
features = self.dataset.features.to(self.device)
# Move the labels to the device
labels = self.dataset.labels.to(self.device)
# Move the adjacency matrix to the device
edges_adj = self.dataset.adj_mat.to(self.device)
# Add an empty third dimension for the heads
edges_adj = edges_adj.unsqueeze(-1)
# Random indexes
idx_rand = torch.randperm(len(labels))
# Nodes for training
idx_train = idx_rand[:self.training_samples]
# Nodes for validation
idx_valid = idx_rand[self.training_samples:]
# Training loop
for epoch in monit.loop(self.epochs):
# Set the model to training mode
self.model.train()
# Make all the gradients zero
self.optimizer.zero_grad()
# Evaluate the model
output = self.model(features, edges_adj)
# Get the loss for training nodes
loss = self.loss_func(output[idx_train], labels[idx_train])
# Calculate gradients
loss.backward()
# Take optimization step
self.optimizer.step()
# Log the loss
tracker.add('loss.train', loss)
# Log the accuracy
tracker.add('accuracy.train', accuracy(output[idx_train], labels[idx_train]))
# Set mode to evaluation mode for validation
self.model.eval()
# No need to compute gradients
with torch.no_grad():
# Evaluate the model again
output = self.model(features, edges_adj)
# Calculate the loss for validation nodes
loss = self.loss_func(output[idx_valid], labels[idx_valid])
# Log the loss
tracker.add('loss.valid', loss)
# Log the accuracy
tracker.add('accuracy.valid', accuracy(output[idx_valid], labels[idx_valid]))
# Save logs
tracker.save()
@option(Configs.dataset)
def cora_dataset(c: Configs):
"""
Create Cora dataset
"""
return CoraDataset(c.include_edges)
# Get the number of classes
calculate(Configs.n_classes, lambda c: len(c.dataset.classes))
# Number of features in the input
calculate(Configs.in_features, lambda c: c.dataset.features.shape[1])
@option(Configs.model)
def gat_model(c: Configs):
"""
Create GAT model
"""
return GAT(c.in_features, c.n_hidden, c.n_classes, c.n_heads, c.dropout).to(c.device)
@option(Configs.optimizer)
def _optimizer(c: Configs):
"""
Create configurable optimizer
"""
opt_conf = OptimizerConfigs()
opt_conf.parameters = c.model.parameters()
return opt_conf
def main():
# Create configurations
conf = Configs()
# Create an experiment
experiment.create(name='gat')
# Calculate configurations.
experiment.configs(conf, {
# Adam optimizer
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 5e-3,
'optimizer.weight_decay': 5e-4,
})
# Start and watch the experiment
with experiment.start():
# Run the training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/graphs/gat/__init__.py | labml_nn/graphs/gat/__init__.py | """
---
title: Graph Attention Networks (GAT)
summary: >
A PyTorch implementation/tutorial of Graph Attention Networks.
---
# Graph Attention Networks (GAT)
This is a [PyTorch](https://pytorch.org) implementation of the paper
[Graph Attention Networks](https://arxiv.org/abs/1710.10903).
GATs work on graph data.
A graph consists of nodes and edges connecting nodes.
For example, in Cora dataset the nodes are research papers and the edges are citations that
connect the papers.
GAT uses masked self-attention, kind of similar to [transformers](../../transformers/mha.html).
GAT consists of graph attention layers stacked on top of each other.
Each graph attention layer gets node embeddings as inputs and outputs transformed embeddings.
The node embeddings pay attention to the embeddings of other nodes it's connected to.
The details of graph attention layers are included alongside the implementation.
Here is [the training code](experiment.html) for training
a two-layer GAT on Cora dataset.
"""
import torch
from torch import nn
class GraphAttentionLayer(nn.Module):
"""
## Graph attention layer
This is a single graph attention layer.
A GAT is made up of multiple such layers.
It takes
$$\mathbf{h} = \{ \overrightarrow{h_1}, \overrightarrow{h_2}, \dots, \overrightarrow{h_N} \}$$,
where $\overrightarrow{h_i} \in \mathbb{R}^F$ as input
and outputs
$$\mathbf{h'} = \{ \overrightarrow{h'_1}, \overrightarrow{h'_2}, \dots, \overrightarrow{h'_N} \}$$,
where $\overrightarrow{h'_i} \in \mathbb{R}^{F'}$.
"""
def __init__(self, in_features: int, out_features: int, n_heads: int,
is_concat: bool = True,
dropout: float = 0.6,
leaky_relu_negative_slope: float = 0.2):
"""
* `in_features`, $F$, is the number of input features per node
* `out_features`, $F'$, is the number of output features per node
* `n_heads`, $K$, is the number of attention heads
* `is_concat` whether the multi-head results should be concatenated or averaged
* `dropout` is the dropout probability
* `leaky_relu_negative_slope` is the negative slope for leaky relu activation
"""
super().__init__()
self.is_concat = is_concat
self.n_heads = n_heads
# Calculate the number of dimensions per head
if is_concat:
assert out_features % n_heads == 0
# If we are concatenating the multiple heads
self.n_hidden = out_features // n_heads
else:
# If we are averaging the multiple heads
self.n_hidden = out_features
# Linear layer for initial transformation;
# i.e. to transform the node embeddings before self-attention
self.linear = nn.Linear(in_features, self.n_hidden * n_heads, bias=False)
# Linear layer to compute attention score $e_{ij}$
self.attn = nn.Linear(self.n_hidden * 2, 1, bias=False)
# The activation for attention score $e_{ij}$
self.activation = nn.LeakyReLU(negative_slope=leaky_relu_negative_slope)
# Softmax to compute attention $\alpha_{ij}$
self.softmax = nn.Softmax(dim=1)
# Dropout layer to be applied for attention
self.dropout = nn.Dropout(dropout)
def forward(self, h: torch.Tensor, adj_mat: torch.Tensor):
"""
* `h`, $\mathbf{h}$ is the input node embeddings of shape `[n_nodes, in_features]`.
* `adj_mat` is the adjacency matrix of shape `[n_nodes, n_nodes, n_heads]`.
We use shape `[n_nodes, n_nodes, 1]` since the adjacency is the same for each head.
Adjacency matrix represent the edges (or connections) among nodes.
`adj_mat[i][j]` is `True` if there is an edge from node `i` to node `j`.
"""
# Number of nodes
n_nodes = h.shape[0]
# The initial transformation,
# $$\overrightarrow{g^k_i} = \mathbf{W}^k \overrightarrow{h_i}$$
# for each head.
# We do single linear transformation and then split it up for each head.
g = self.linear(h).view(n_nodes, self.n_heads, self.n_hidden)
# #### Calculate attention score
#
# We calculate these for each head $k$. *We have omitted $\cdot^k$ for simplicity*.
#
# $$e_{ij} = a(\mathbf{W} \overrightarrow{h_i}, \mathbf{W} \overrightarrow{h_j}) =
# a(\overrightarrow{g_i}, \overrightarrow{g_j})$$
#
# $e_{ij}$ is the attention score (importance) from node $j$ to node $i$.
# We calculate this for each head.
#
# $a$ is the attention mechanism, that calculates the attention score.
# The paper concatenates
# $\overrightarrow{g_i}$, $\overrightarrow{g_j}$
# and does a linear transformation with a weight vector $\mathbf{a} \in \mathbb{R}^{2 F'}$
# followed by a $\text{LeakyReLU}$.
#
# $$e_{ij} = \text{LeakyReLU} \Big(
# \mathbf{a}^\top \Big[
# \overrightarrow{g_i} \Vert \overrightarrow{g_j}
# \Big] \Big)$$
# First we calculate
# $\Big[\overrightarrow{g_i} \Vert \overrightarrow{g_j} \Big]$
# for all pairs of $i, j$.
#
# `g_repeat` gets
# $$\{\overrightarrow{g_1}, \overrightarrow{g_2}, \dots, \overrightarrow{g_N},
# \overrightarrow{g_1}, \overrightarrow{g_2}, \dots, \overrightarrow{g_N}, ...\}$$
# where each node embedding is repeated `n_nodes` times.
g_repeat = g.repeat(n_nodes, 1, 1)
# `g_repeat_interleave` gets
# $$\{\overrightarrow{g_1}, \overrightarrow{g_1}, \dots, \overrightarrow{g_1},
# \overrightarrow{g_2}, \overrightarrow{g_2}, \dots, \overrightarrow{g_2}, ...\}$$
# where each node embedding is repeated `n_nodes` times.
g_repeat_interleave = g.repeat_interleave(n_nodes, dim=0)
# Now we concatenate to get
# $$\{\overrightarrow{g_1} \Vert \overrightarrow{g_1},
# \overrightarrow{g_1} \Vert \overrightarrow{g_2},
# \dots, \overrightarrow{g_1} \Vert \overrightarrow{g_N},
# \overrightarrow{g_2} \Vert \overrightarrow{g_1},
# \overrightarrow{g_2} \Vert \overrightarrow{g_2},
# \dots, \overrightarrow{g_2} \Vert \overrightarrow{g_N}, ...\}$$
g_concat = torch.cat([g_repeat_interleave, g_repeat], dim=-1)
# Reshape so that `g_concat[i, j]` is $\overrightarrow{g_i} \Vert \overrightarrow{g_j}$
g_concat = g_concat.view(n_nodes, n_nodes, self.n_heads, 2 * self.n_hidden)
# Calculate
# $$e_{ij} = \text{LeakyReLU} \Big(
# \mathbf{a}^\top \Big[
# \overrightarrow{g_i} \Vert \overrightarrow{g_j}
# \Big] \Big)$$
# `e` is of shape `[n_nodes, n_nodes, n_heads, 1]`
e = self.activation(self.attn(g_concat))
# Remove the last dimension of size `1`
e = e.squeeze(-1)
# The adjacency matrix should have shape
# `[n_nodes, n_nodes, n_heads]` or`[n_nodes, n_nodes, 1]`
assert adj_mat.shape[0] == 1 or adj_mat.shape[0] == n_nodes
assert adj_mat.shape[1] == 1 or adj_mat.shape[1] == n_nodes
assert adj_mat.shape[2] == 1 or adj_mat.shape[2] == self.n_heads
# Mask $e_{ij}$ based on adjacency matrix.
# $e_{ij}$ is set to $- \infty$ if there is no edge from $i$ to $j$.
e = e.masked_fill(adj_mat == 0, float('-inf'))
# We then normalize attention scores (or coefficients)
# $$\alpha_{ij} = \text{softmax}_j(e_{ij}) =
# \frac{\exp(e_{ij})}{\sum_{k \in \mathcal{N}_i} \exp(e_{ik})}$$
#
# where $\mathcal{N}_i$ is the set of nodes connected to $i$.
#
# We do this by setting unconnected $e_{ij}$ to $- \infty$ which
# makes $\exp(e_{ij}) \sim 0$ for unconnected pairs.
a = self.softmax(e)
# Apply dropout regularization
a = self.dropout(a)
# Calculate final output for each head
# $$\overrightarrow{h'^k_i} = \sum_{j \in \mathcal{N}_i} \alpha^k_{ij} \overrightarrow{g^k_j}$$
#
# *Note:* The paper includes the final activation $\sigma$ in $\overrightarrow{h_i}$
# We have omitted this from the Graph Attention Layer implementation
# and use it on the GAT model to match with how other PyTorch modules are defined -
# activation as a separate layer.
attn_res = torch.einsum('ijh,jhf->ihf', a, g)
# Concatenate the heads
if self.is_concat:
# $$\overrightarrow{h'_i} = \Bigg\Vert_{k=1}^{K} \overrightarrow{h'^k_i}$$
return attn_res.reshape(n_nodes, self.n_heads * self.n_hidden)
# Take the mean of the heads
else:
# $$\overrightarrow{h'_i} = \frac{1}{K} \sum_{k=1}^{K} \overrightarrow{h'^k_i}$$
return attn_res.mean(dim=1)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/graphs/gatv2/experiment.py | labml_nn/graphs/gatv2/experiment.py | """
---
title: Train a Graph Attention Network v2 (GATv2) on Cora dataset
summary: >
This trains is a Graph Attention Network v2 (GATv2) on Cora dataset
---
# Train a Graph Attention Network v2 (GATv2) on Cora dataset
"""
import torch
from torch import nn
from labml import experiment
from labml.configs import option
from labml_nn.graphs.gat.experiment import Configs as GATConfigs
from labml_nn.graphs.gatv2 import GraphAttentionV2Layer
class GATv2(nn.Module):
"""
## Graph Attention Network v2 (GATv2)
This graph attention network has two [graph attention layers](index.html).
"""
def __init__(self, in_features: int, n_hidden: int, n_classes: int, n_heads: int, dropout: float,
share_weights: bool = True):
"""
* `in_features` is the number of features per node
* `n_hidden` is the number of features in the first graph attention layer
* `n_classes` is the number of classes
* `n_heads` is the number of heads in the graph attention layers
* `dropout` is the dropout probability
* `share_weights` if set to True, the same matrix will be applied to the source and the target node of every edge
"""
super().__init__()
# First graph attention layer where we concatenate the heads
self.layer1 = GraphAttentionV2Layer(in_features, n_hidden, n_heads,
is_concat=True, dropout=dropout, share_weights=share_weights)
# Activation function after first graph attention layer
self.activation = nn.ELU()
# Final graph attention layer where we average the heads
self.output = GraphAttentionV2Layer(n_hidden, n_classes, 1,
is_concat=False, dropout=dropout, share_weights=share_weights)
# Dropout
self.dropout = nn.Dropout(dropout)
def forward(self, x: torch.Tensor, adj_mat: torch.Tensor):
"""
* `x` is the features vectors of shape `[n_nodes, in_features]`
* `adj_mat` is the adjacency matrix of the form
`[n_nodes, n_nodes, n_heads]` or `[n_nodes, n_nodes, 1]`
"""
# Apply dropout to the input
x = self.dropout(x)
# First graph attention layer
x = self.layer1(x, adj_mat)
# Activation function
x = self.activation(x)
# Dropout
x = self.dropout(x)
# Output layer (without activation) for logits
return self.output(x, adj_mat)
class Configs(GATConfigs):
"""
## Configurations
Since the experiment is same as [GAT experiment](../gat/experiment.html) but with
[GATv2 model](index.html) we extend the same configs and change the model.
"""
# Whether to share weights for source and target nodes of edges
share_weights: bool = False
# Set the model
model: GATv2 = 'gat_v2_model'
@option(Configs.model)
def gat_v2_model(c: Configs):
"""
Create GATv2 model
"""
return GATv2(c.in_features, c.n_hidden, c.n_classes, c.n_heads, c.dropout, c.share_weights).to(c.device)
def main():
# Create configurations
conf = Configs()
# Create an experiment
experiment.create(name='gatv2')
# Calculate configurations.
experiment.configs(conf, {
# Adam optimizer
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 5e-3,
'optimizer.weight_decay': 5e-4,
'dropout': 0.7,
})
# Start and watch the experiment
with experiment.start():
# Run the training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/graphs/gatv2/__init__.py | labml_nn/graphs/gatv2/__init__.py | """
---
title: Graph Attention Networks v2 (GATv2)
summary: >
A PyTorch implementation/tutorial of Graph Attention Networks v2.
---
# Graph Attention Networks v2 (GATv2)
This is a [PyTorch](https://pytorch.org) implementation of the GATv2 operator from the paper
[How Attentive are Graph Attention Networks?](https://arxiv.org/abs/2105.14491).
GATv2s work on graph data similar to [GAT](../gat/index.html).
A graph consists of nodes and edges connecting nodes.
For example, in Cora dataset the nodes are research papers and the edges are citations that
connect the papers.
The GATv2 operator fixes the static attention problem of the standard [GAT](../gat/index.html).
Static attention is when the attention to the key nodes has the same rank (order) for any query node.
[GAT](../gat/index.html) computes attention from query node $i$ to key node $j$ as,
\begin{align}
e_{ij} &= \text{LeakyReLU} \Big(\mathbf{a}^\top \Big[
\mathbf{W} \overrightarrow{h_i} \Vert \mathbf{W} \overrightarrow{h_j}
\Big] \Big) \\
&=
\text{LeakyReLU} \Big(\mathbf{a}_1^\top \mathbf{W} \overrightarrow{h_i} +
\mathbf{a}_2^\top \mathbf{W} \overrightarrow{h_j}
\Big)
\end{align}
Note that for any query node $i$, the attention rank ($argsort$) of keys depends only
on $\mathbf{a}_2^\top \mathbf{W} \overrightarrow{h_j}$.
Therefore the attention rank of keys remains the same (*static*) for all queries.
GATv2 allows dynamic attention by changing the attention mechanism,
\begin{align}
e_{ij} &= \mathbf{a}^\top \text{LeakyReLU} \Big( \mathbf{W} \Big[
\overrightarrow{h_i} \Vert \overrightarrow{h_j}
\Big] \Big) \\
&= \mathbf{a}^\top \text{LeakyReLU} \Big(
\mathbf{W}_l \overrightarrow{h_i} + \mathbf{W}_r \overrightarrow{h_j}
\Big)
\end{align}
The paper shows that GATs static attention mechanism fails on some graph problems
with a synthetic dictionary lookup dataset.
It's a fully connected bipartite graph where one set of nodes (query nodes)
have a key associated with it
and the other set of nodes have both a key and a value associated with it.
The goal is to predict the values of query nodes.
GAT fails on this task because of its limited static attention.
Here is [the training code](experiment.html) for training
a two-layer GATv2 on Cora dataset.
"""
import torch
from torch import nn
class GraphAttentionV2Layer(nn.Module):
"""
## Graph attention v2 layer
This is a single graph attention v2 layer.
A GATv2 is made up of multiple such layers.
It takes
$$\mathbf{h} = \{ \overrightarrow{h_1}, \overrightarrow{h_2}, \dots, \overrightarrow{h_N} \}$$,
where $\overrightarrow{h_i} \in \mathbb{R}^F$ as input
and outputs
$$\mathbf{h'} = \{ \overrightarrow{h'_1}, \overrightarrow{h'_2}, \dots, \overrightarrow{h'_N} \}$$,
where $\overrightarrow{h'_i} \in \mathbb{R}^{F'}$.
"""
def __init__(self, in_features: int, out_features: int, n_heads: int,
is_concat: bool = True,
dropout: float = 0.6,
leaky_relu_negative_slope: float = 0.2,
share_weights: bool = False):
"""
* `in_features`, $F$, is the number of input features per node
* `out_features`, $F'$, is the number of output features per node
* `n_heads`, $K$, is the number of attention heads
* `is_concat` whether the multi-head results should be concatenated or averaged
* `dropout` is the dropout probability
* `leaky_relu_negative_slope` is the negative slope for leaky relu activation
* `share_weights` if set to `True`, the same matrix will be applied to the source and the target node of every edge
"""
super().__init__()
self.is_concat = is_concat
self.n_heads = n_heads
self.share_weights = share_weights
# Calculate the number of dimensions per head
if is_concat:
assert out_features % n_heads == 0
# If we are concatenating the multiple heads
self.n_hidden = out_features // n_heads
else:
# If we are averaging the multiple heads
self.n_hidden = out_features
# Linear layer for initial source transformation;
# i.e. to transform the source node embeddings before self-attention
self.linear_l = nn.Linear(in_features, self.n_hidden * n_heads, bias=False)
# If `share_weights` is `True` the same linear layer is used for the target nodes
if share_weights:
self.linear_r = self.linear_l
else:
self.linear_r = nn.Linear(in_features, self.n_hidden * n_heads, bias=False)
# Linear layer to compute attention score $e_{ij}$
self.attn = nn.Linear(self.n_hidden, 1, bias=False)
# The activation for attention score $e_{ij}$
self.activation = nn.LeakyReLU(negative_slope=leaky_relu_negative_slope)
# Softmax to compute attention $\alpha_{ij}$
self.softmax = nn.Softmax(dim=1)
# Dropout layer to be applied for attention
self.dropout = nn.Dropout(dropout)
def forward(self, h: torch.Tensor, adj_mat: torch.Tensor):
"""
* `h`, $\mathbf{h}$ is the input node embeddings of shape `[n_nodes, in_features]`.
* `adj_mat` is the adjacency matrix of shape `[n_nodes, n_nodes, n_heads]`.
We use shape `[n_nodes, n_nodes, 1]` since the adjacency is the same for each head.
Adjacency matrix represent the edges (or connections) among nodes.
`adj_mat[i][j]` is `True` if there is an edge from node `i` to node `j`.
"""
# Number of nodes
n_nodes = h.shape[0]
# The initial transformations,
# $$\overrightarrow{{g_l}^k_i} = \mathbf{W_l}^k \overrightarrow{h_i}$$
# $$\overrightarrow{{g_r}^k_i} = \mathbf{W_r}^k \overrightarrow{h_i}$$
# for each head.
# We do two linear transformations and then split it up for each head.
g_l = self.linear_l(h).view(n_nodes, self.n_heads, self.n_hidden)
g_r = self.linear_r(h).view(n_nodes, self.n_heads, self.n_hidden)
# #### Calculate attention score
#
# We calculate these for each head $k$. *We have omitted $\cdot^k$ for simplicity*.
#
# $$e_{ij} = a(\mathbf{W_l} \overrightarrow{h_i}, \mathbf{W_r} \overrightarrow{h_j}) =
# a(\overrightarrow{{g_l}_i}, \overrightarrow{{g_r}_j})$$
#
# $e_{ij}$ is the attention score (importance) from node $j$ to node $i$.
# We calculate this for each head.
#
# $a$ is the attention mechanism, that calculates the attention score.
# The paper sums
# $\overrightarrow{{g_l}_i}$, $\overrightarrow{{g_r}_j}$
# followed by a $\text{LeakyReLU}$
# and does a linear transformation with a weight vector $\mathbf{a} \in \mathbb{R}^{F'}$
#
#
# $$e_{ij} = \mathbf{a}^\top \text{LeakyReLU} \Big(
# \Big[
# \overrightarrow{{g_l}_i} + \overrightarrow{{g_r}_j}
# \Big] \Big)$$
# Note: The paper desrcibes $e_{ij}$ as
# $$e_{ij} = \mathbf{a}^\top \text{LeakyReLU} \Big( \mathbf{W}
# \Big[
# \overrightarrow{h_i} \Vert \overrightarrow{h_j}
# \Big] \Big)$$
# which is equivalent to the definition we use here.
# First we calculate
# $\Big[\overrightarrow{{g_l}_i} + \overrightarrow{{g_r}_j} \Big]$
# for all pairs of $i, j$.
#
# `g_l_repeat` gets
# $$\{\overrightarrow{{g_l}_1}, \overrightarrow{{g_l}_2}, \dots, \overrightarrow{{g_l}_N},
# \overrightarrow{{g_l}_1}, \overrightarrow{{g_l}_2}, \dots, \overrightarrow{{g_l}_N}, ...\}$$
# where each node embedding is repeated `n_nodes` times.
g_l_repeat = g_l.repeat(n_nodes, 1, 1)
# `g_r_repeat_interleave` gets
# $$\{\overrightarrow{{g_r}_1}, \overrightarrow{{g_r}_1}, \dots, \overrightarrow{{g_r}_1},
# \overrightarrow{{g_r}_2}, \overrightarrow{{g_r}_2}, \dots, \overrightarrow{{g_r}_2}, ...\}$$
# where each node embedding is repeated `n_nodes` times.
g_r_repeat_interleave = g_r.repeat_interleave(n_nodes, dim=0)
# Now we add the two tensors to get
# $$\{\overrightarrow{{g_l}_1} + \overrightarrow{{g_r}_1},
# \overrightarrow{{g_l}_1} + \overrightarrow{{g_r}_2},
# \dots, \overrightarrow{{g_l}_1} +\overrightarrow{{g_r}_N},
# \overrightarrow{{g_l}_2} + \overrightarrow{{g_r}_1},
# \overrightarrow{{g_l}_2} + \overrightarrow{{g_r}_2},
# \dots, \overrightarrow{{g_l}_2} + \overrightarrow{{g_r}_N}, ...\}$$
g_sum = g_l_repeat + g_r_repeat_interleave
# Reshape so that `g_sum[i, j]` is $\overrightarrow{{g_l}_i} + \overrightarrow{{g_r}_j}$
g_sum = g_sum.view(n_nodes, n_nodes, self.n_heads, self.n_hidden)
# Calculate
# $$e_{ij} = \mathbf{a}^\top \text{LeakyReLU} \Big(
# \Big[
# \overrightarrow{{g_l}_i} + \overrightarrow{{g_r}_j}
# \Big] \Big)$$
# `e` is of shape `[n_nodes, n_nodes, n_heads, 1]`
e = self.attn(self.activation(g_sum))
# Remove the last dimension of size `1`
e = e.squeeze(-1)
# The adjacency matrix should have shape
# `[n_nodes, n_nodes, n_heads]` or`[n_nodes, n_nodes, 1]`
assert adj_mat.shape[0] == 1 or adj_mat.shape[0] == n_nodes
assert adj_mat.shape[1] == 1 or adj_mat.shape[1] == n_nodes
assert adj_mat.shape[2] == 1 or adj_mat.shape[2] == self.n_heads
# Mask $e_{ij}$ based on adjacency matrix.
# $e_{ij}$ is set to $- \infty$ if there is no edge from $i$ to $j$.
e = e.masked_fill(adj_mat == 0, float('-inf'))
# We then normalize attention scores (or coefficients)
# $$\alpha_{ij} = \text{softmax}_j(e_{ij}) =
# \frac{\exp(e_{ij})}{\sum_{j' \in \mathcal{N}_i} \exp(e_{ij'})}$$
#
# where $\mathcal{N}_i$ is the set of nodes connected to $i$.
#
# We do this by setting unconnected $e_{ij}$ to $- \infty$ which
# makes $\exp(e_{ij}) \sim 0$ for unconnected pairs.
a = self.softmax(e)
# Apply dropout regularization
a = self.dropout(a)
# Calculate final output for each head
# $$\overrightarrow{h'^k_i} = \sum_{j \in \mathcal{N}_i} \alpha^k_{ij} \overrightarrow{{g_r}_{j,k}}$$
attn_res = torch.einsum('ijh,jhf->ihf', a, g_r)
# Concatenate the heads
if self.is_concat:
# $$\overrightarrow{h'_i} = \Bigg\Vert_{k=1}^{K} \overrightarrow{h'^k_i}$$
return attn_res.reshape(n_nodes, self.n_heads * self.n_hidden)
# Take the mean of the heads
else:
# $$\overrightarrow{h'_i} = \frac{1}{K} \sum_{k=1}^{K} \overrightarrow{h'^k_i}$$
return attn_res.mean(dim=1)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/normalization/__init__.py | labml_nn/normalization/__init__.py | """
---
title: Normalization Layers
summary: >
A set of PyTorch implementations/tutorials of normalization layers.
---
# Normalization Layers
* [Batch Normalization](batch_norm/index.html)
* [Layer Normalization](layer_norm/index.html)
* [Instance Normalization](instance_norm/index.html)
* [Group Normalization](group_norm/index.html)
* [Weight Standardization](weight_standardization/index.html)
* [Batch-Channel Normalization](batch_channel_norm/index.html)
* [DeepNorm](deep_norm/index.html)
"""
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/normalization/instance_norm/experiment.py | labml_nn/normalization/instance_norm/experiment.py | """
---
title: CIFAR10 Experiment to try Instance Normalization
summary: >
This trains is a simple convolutional neural network that uses instance normalization
to classify CIFAR10 images.
---
# CIFAR10 Experiment for Instance Normalization
This demonstrates the use of an instance normalization layer in a convolutional
neural network for classification. Not that instance normalization was designed for
style transfer and this is only a demo.
"""
import torch.nn as nn
from labml import experiment
from labml.configs import option
from labml_nn.experiments.cifar10 import CIFAR10Configs, CIFAR10VGGModel
from labml_nn.normalization.instance_norm import InstanceNorm
class Model(CIFAR10VGGModel):
"""
### VGG model for CIFAR-10 classification
This derives from the [generic VGG style architecture](../../experiments/cifar10.html).
"""
def conv_block(self, in_channels, out_channels) -> nn.Module:
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
InstanceNorm(out_channels),
nn.ReLU(inplace=True),
)
def __init__(self):
super().__init__([[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]])
@option(CIFAR10Configs.model)
def _model(c: CIFAR10Configs):
"""
### Create model
"""
return Model().to(c.device)
def main():
# Create experiment
experiment.create(name='cifar10', comment='instance norm')
# Create configurations
conf = CIFAR10Configs()
# Load configurations
experiment.configs(conf, {
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
})
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/normalization/instance_norm/__init__.py | labml_nn/normalization/instance_norm/__init__.py | """
---
title: Instance Normalization
summary: >
A PyTorch implementation/tutorial of instance normalization.
---
# Instance Normalization
This is a [PyTorch](https://pytorch.org) implementation of
[Instance Normalization: The Missing Ingredient for Fast Stylization](https://arxiv.org/abs/1607.08022).
Instance normalization was introduced to improve [style transfer](https://paperswithcode.com/task/style-transfer).
It is based on the observation that stylization should not depend on the contrast of the content image.
The "contrast normalization" is
$$y_{t,i,j,k} = \frac{x_{t,i,j,k}}{\sum_{l=1}^H \sum_{m=1}^W x_{t,i,l,m}}$$
where $x$ is a batch of images with dimensions image index $t$,
feature channel $i$, and
spatial position $j, k$.
Since it's hard for a convolutional network to learn "contrast normalization", this paper
introduces instance normalization which does that.
Here's a [CIFAR 10 classification model](experiment.html) that uses instance normalization.
"""
import torch
from torch import nn
class InstanceNorm(nn.Module):
r"""
## Instance Normalization Layer
Instance normalization layer $\text{IN}$ normalizes the input $X$ as follows:
When input $X \in \mathbb{R}^{B \times C \times H \times W}$ is a batch of image representations,
where $B$ is the batch size, $C$ is the number of channels, $H$ is the height and $W$ is the width.
$\gamma \in \mathbb{R}^{C}$ and $\beta \in \mathbb{R}^{C}$. The affine transformation with $gamma$ and
$beta$ are optional.
$$\text{IN}(X) = \gamma
\frac{X - \underset{H, W}{\mathbb{E}}[X]}{\sqrt{\underset{H, W}{Var}[X] + \epsilon}}
+ \beta$$
"""
def __init__(self, channels: int, *,
eps: float = 1e-5, affine: bool = True):
"""
* `channels` is the number of features in the input
* `eps` is $\epsilon$, used in $\sqrt{Var[X] + \epsilon}$ for numerical stability
* `affine` is whether to scale and shift the normalized value
"""
super().__init__()
self.channels = channels
self.eps = eps
self.affine = affine
# Create parameters for $\gamma$ and $\beta$ for scale and shift
if self.affine:
self.scale = nn.Parameter(torch.ones(channels))
self.shift = nn.Parameter(torch.zeros(channels))
def forward(self, x: torch.Tensor):
"""
`x` is a tensor of shape `[batch_size, channels, *]`.
`*` denotes any number of (possibly 0) dimensions.
For example, in an image (2D) convolution this will be
`[batch_size, channels, height, width]`
"""
# Keep the original shape
x_shape = x.shape
# Get the batch size
batch_size = x_shape[0]
# Sanity check to make sure the number of features is the same
assert self.channels == x.shape[1]
# Reshape into `[batch_size, channels, n]`
x = x.view(batch_size, self.channels, -1)
# Calculate the mean across last dimension
# i.e. the means for each feature $\mathbb{E}[x_{t,i}]$
mean = x.mean(dim=[-1], keepdim=True)
# Calculate the squared mean across first and last dimension;
# i.e. the means for each feature $\mathbb{E}[(x_{t,i}^2]$
mean_x2 = (x ** 2).mean(dim=[-1], keepdim=True)
# Variance for each feature $Var[x_{t,i}] = \mathbb{E}[x_{t,i}^2] - \mathbb{E}[x_{t,i}]^2$
var = mean_x2 - mean ** 2
# Normalize $$\hat{x}_{t,i} = \frac{x_{t,i} - \mathbb{E}[x_{t,i}]}{\sqrt{Var[x_{t,i}] + \epsilon}}$$
x_norm = (x - mean) / torch.sqrt(var + self.eps)
x_norm = x_norm.view(batch_size, self.channels, -1)
# Scale and shift $$y_{t,i} =\gamma_i \hat{x}_{t,i} + \beta_i$$
if self.affine:
x_norm = self.scale.view(1, -1, 1) * x_norm + self.shift.view(1, -1, 1)
# Reshape to original and return
return x_norm.view(x_shape)
def _test():
"""
Simple test
"""
from labml.logger import inspect
x = torch.zeros([2, 6, 2, 4])
inspect(x.shape)
bn = InstanceNorm(6)
x = bn(x)
inspect(x.shape)
#
if __name__ == '__main__':
_test()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/normalization/deep_norm/experiment.py | labml_nn/normalization/deep_norm/experiment.py | """
---
title: DeepNorm Experiment
summary: >
Training a DeepNorm transformer on Tiny Shakespeare.
---
# [DeepNorm](index.html) Experiment
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/normalization/deep_norm/experiment.ipynb)
"""
import copy
import torch
import torch.nn as nn
from labml import experiment
from labml.configs import option
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.normalization.deep_norm import DeepNormTransformerLayer
from labml_nn.transformers import MultiHeadAttention
from labml_nn.transformers.feed_forward import FeedForward
class AutoregressiveTransformer(nn.Module):
"""
## Auto-Regressive model
This is a autoregressive transformer model that uses DeepNorm.
"""
def __init__(self, n_tokens: int, d_model: int, n_layers: int, layer: DeepNormTransformerLayer):
"""
:param n_tokens: is the number of tokens in the vocabulary
:param d_model: is the embedding size
:param n_layers: is the number of transformer layers
:param layer: is the layer. We use `n_layers` copies of this for the tranformer.
"""
super().__init__()
# Transformer with `n_layers` layers
self.transformer = nn.Sequential(*[copy.deepcopy(layer) for _ in range(n_layers)])
# Token embedding layer
self.emb = nn.Embedding(n_tokens, d_model)
# Readout layer
self.readout = nn.Linear(d_model, n_tokens)
def forward(self, x: torch.Tensor):
"""
:param x: are the input tokens of shape `[seq_len, batch_size]`
"""
# Get the token embeddings
x = self.emb(x)
# Transformer encoder
x = self.transformer(x)
# Get logits
x = self.readout(x)
# Return results
return x, None
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
This inherits from
[`NLPAutoRegressionConfigs`](../../experiments/nlp_autoregression.html#NLPAutoRegressionConfigs)
"""
# Model
model: AutoregressiveTransformer
# Number of layers
n_layers: int = 32
# $\alpha$ and $\beta$ for DeepNorm
deep_norm_alpha: float
deep_norm_beta: float
# Number of heads in the attention
n_heads: int = 4
# Embedding size
d_model: int = 64
# Size of each attention head
d_k: int = 16
@option(Configs.deep_norm_alpha)
def _deep_norm_alpha(c: Configs):
"""
#### Calculate $\alpha$
$\alpha = (2M)^{\frac{1}{4}}$
"""
return (2. * c.n_layers) ** (1. / 4.)
@option(Configs.deep_norm_beta)
def _deep_norm_beta(c: Configs):
"""
#### Calculate $\beta$
$\beta = (8M)^{-\frac{1}{4}}$
"""
return (8. * c.n_layers) ** -(1. / 4.)
@option(Configs.model)
def _model(c: Configs):
"""
#### Initialize the model
"""
m = AutoregressiveTransformer(c.n_tokens, c.d_model, c.n_layers,
DeepNormTransformerLayer(d_model=c.d_model,
deep_norm_alpha=c.deep_norm_alpha,
deep_norm_beta=c.deep_norm_beta,
feed_forward=FeedForward(d_model=c.d_model,
d_ff=c.d_model * 4),
self_attn=MultiHeadAttention(c.n_heads, c.d_model,
dropout_prob=0.0)))
return m.to(c.device)
def main():
"""
#### Create and run the experiment
"""
# Create experiment
experiment.create(name="deep_norm", writers={'screen', 'web_api'})
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# Use a context size of $256$
'seq_len': 256,
# Train for 32 epochs
'epochs': 32,
# Batch size $16$
'batch_size': 16,
# Switch between training and validation for $10$ times per epoch
'inner_iterations': 10,
# Number of layers
'n_layers': 50,
# Adam optimizer with no warmup
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 1.25e-4,
})
# Set model(s) for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/normalization/deep_norm/__init__.py | labml_nn/normalization/deep_norm/__init__.py | """
---
title: DeepNorm
summary: >
A PyTorch implementation/tutorial of DeepNorm from paper DeepNet: Scaling Transformers to 1,000 Layers.
---
# DeepNorm
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/normalization/deep_norm/experiment.ipynb)
This is a [PyTorch](https://pytorch.org) implementation of
the DeepNorm from the paper
[DeepNet: Scaling Transformers to 1,000 Layers](https://arxiv.org/abs/2203.00555).
The paper proposes a method to stabilize extremely deep transformers through a new normalizing function
to replace LayerNorm and a weight initialization scheme.
This combines the performance of Post-LayerNorm and the stability of Pre-LayerNorm.
Transformers with DeepNorms are supposed to be stable even without a learning rate warm-up.
The paper first shows that the changes to layer outputs (for the same input)
change gradually during stable training;
when unstable it changes rapidly during the initial training steps.
This happens with initializing weights to small values, and learning rate warm-ups where the
training is stable.
They use the idea of keeping the changes to layer outputs small to derive the new
normalization and weight initialization mechanism.
## Weight Initializations
Usually, the weights are initialized with Xavier or Kaiming initializations.
This paper scales (sets the gain) the weights by a constant $\beta$ depending on the size of the
transformer.
DeepNorm suggests scaling the weights of the two linear transforms in the
[Feed-Forward Network](../../transformers/feed_forward.html),
the value projection transform, and the output projection transform of the
attention layer.
Weights of these transforms are scaled by (has a gain equal to) $\beta$.
The scaling is implemented in the
## Normalization Function
$$x_{l + 1} = \mathop{LN}\Big( \alpha x_l + \mathop{G}_l \big(x_l, \theta_l \big)\Big)$$
where $\alpha$ is a constant that depends on the depth of the transformer,
$\mathop{LN}$ is [Layer Normalization](../layer_norm/index.html), and
$\mathop{G}_l (x_l, \theta_l)$ is the function of the $l$-th transformer sub-layer (FFN or attention).
This function is used to replace Post-LayerNorm.
## $\alpha$ and $\beta$ constants
\begin{align}
\begin{array} {c|cc|cc}
\text{Type} & \text{Enc-} \alpha & \text{Enc-} \beta & \text{Dec-} \alpha & \text{Dec-} \beta \\
\hline \\
\text{Encoder only} & (2N)^{\frac{1}{4}} & (8N)^{-\frac{1}{4}} & - & - \\
\text{Decoder only} & - & - & (2M)^{\frac{1}{4}} & (8M)^{-\frac{1}{4}} \\
\text{Enc-Dec} & 0.81 (N^4M)^{\frac{1}{16}} & 0.87 (N^4 M)^{-\frac{1}{16}} &
(3M)^{\frac{1}{4}} & (12M)^{-\frac{1}{4}} \\
\end{array}
\end{align}
Where $N$ is the number of layers in the encoder and $M$ is the number of layers in the decoder.
Refer to [the paper](https://arxiv.org/abs/2203.00555) for derivation.
[Here is an experiment implementation](experiment.html) that uses DeepNorm.
"""
from typing import Union, List
import torch
from torch import nn, Size
from labml_nn.normalization.layer_norm import LayerNorm
from labml_nn.transformers import MultiHeadAttention
from labml_nn.transformers.feed_forward import FeedForward
from labml_nn.transformers.utils import subsequent_mask
class DeepNorm(nn.Module):
"""
## DeepNorm Normalization
$$x_{l + 1} = \mathop{LN}\Big( \alpha x_l + \mathop{G}_l \big(x_l, \theta_l \big)\Big)$$
"""
def __init__(self, alpha: float, normalized_shape: Union[int, List[int], Size], *,
eps: float = 1e-5,
elementwise_affine: bool = True):
"""
:param alpha: is $\alpha$
:param normalized_shape: is the shape for LayerNorm $\mathop{LN}$
:param eps: is $\epsilon$ for LayerNorm
:param elementwise_affine: is a flag indicating whether to do an elementwise transformation in LayerNorm
"""
super().__init__()
self.alpha = alpha
# Initialize $\mathop{LN}$
self.layer_norm = LayerNorm(normalized_shape, eps=eps, elementwise_affine=elementwise_affine)
def forward(self, x: torch.Tensor, gx: torch.Tensor):
"""
:param x: is the output from the previous layer $x_l$
:param gx: is the output of the current sub-layer $\mathop{G}_l (x_l, \theta_l)$
"""
# $$x_{l + 1} = \mathop{LN}\Big( \alpha x_l + \mathop{G}_l \big(x_l, \theta_l \big)\Big)$$
return self.layer_norm(x + self.alpha * gx)
class DeepNormTransformerLayer(nn.Module):
"""
## Transformer Decoder Layer with DeepNorm
This implements a transformer decoder layer with DeepNorm.
Encoder layers will have a similar form.
"""
def __init__(self, *,
d_model: int,
self_attn: MultiHeadAttention,
feed_forward: FeedForward,
deep_norm_alpha: float,
deep_norm_beta: float,
):
"""
:param d_model: is the token embedding size
:param self_attn: is the self attention module
:param feed_forward: is the feed forward module
:param deep_norm_alpha: is $\alpha$ coefficient in DeepNorm
:param deep_norm_beta: is $\beta$ constant for scaling weights initialization
"""
super().__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
# DeepNorms after attention and feed forward network
self.self_attn_norm = DeepNorm(deep_norm_alpha, [d_model])
self.feed_forward_norm = DeepNorm(deep_norm_alpha, [d_model])
# Scale weights after initialization
with torch.no_grad():
# Feed forward network linear transformations
feed_forward.layer1.weight *= deep_norm_beta
feed_forward.layer2.weight *= deep_norm_beta
# Attention value projection
self_attn.value.linear.weight *= deep_norm_beta
# Attention output project
self_attn.output.weight *= deep_norm_beta
# The mask will be initialized on the first call
self.mask = None
def forward(self, x: torch.Tensor):
"""
:param x: are the embeddings of shape `[seq_len, batch_size, d_model]`
"""
# Create causal mask
if self.mask is None or self.mask.size(0) != len(x):
# Subsequent mask, will mask out tokens from seeing future tokens
self.mask = subsequent_mask(len(x)).to(x.device)
# Run through self attention, i.e. keys and values are from self
x = self.self_attn_norm(x, self.self_attn(query=x, key=x, value=x, mask=self.mask))
# Pass through the feed-forward network
x = self.feed_forward_norm(x, self.feed_forward(x))
#
return x
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/normalization/layer_norm/__init__.py | labml_nn/normalization/layer_norm/__init__.py | """
---
title: Layer Normalization
summary: >
A PyTorch implementation/tutorial of layer normalization.
---
# Layer Normalization
This is a [PyTorch](https://pytorch.org) implementation of
[Layer Normalization](https://arxiv.org/abs/1607.06450).
### Limitations of [Batch Normalization](../batch_norm/index.html)
* You need to maintain running means.
* Tricky for RNNs. Do you need different normalizations for each step?
* Doesn't work with small batch sizes;
large NLP models are usually trained with small batch sizes.
* Need to compute means and variances across devices in distributed training.
## Layer Normalization
Layer normalization is a simpler normalization method that works
on a wider range of settings.
Layer normalization transforms the inputs to have zero mean and unit variance
across the features.
*Note that batch normalization fixes the zero mean and unit variance for each element.*
Layer normalization does it for each batch across all elements.
Layer normalization is generally used for NLP tasks.
We have used layer normalization in most of the
[transformer implementations](../../transformers/gpt/index.html).
"""
from typing import Union, List
import torch
from torch import nn, Size
class LayerNorm(nn.Module):
r"""
## Layer Normalization
Layer normalization $\text{LN}$ normalizes the input $X$ as follows:
When input $X \in \mathbb{R}^{B \times C}$ is a batch of embeddings,
where $B$ is the batch size and $C$ is the number of features.
$\gamma \in \mathbb{R}^{C}$ and $\beta \in \mathbb{R}^{C}$.
$$\text{LN}(X) = \gamma
\frac{X - \underset{C}{\mathbb{E}}[X]}{\sqrt{\underset{C}{Var}[X] + \epsilon}}
+ \beta$$
When input $X \in \mathbb{R}^{L \times B \times C}$ is a batch of a sequence of embeddings,
where $B$ is the batch size, $C$ is the number of channels, $L$ is the length of the sequence.
$\gamma \in \mathbb{R}^{C}$ and $\beta \in \mathbb{R}^{C}$.
$$\text{LN}(X) = \gamma
\frac{X - \underset{C}{\mathbb{E}}[X]}{\sqrt{\underset{C}{Var}[X] + \epsilon}}
+ \beta$$
When input $X \in \mathbb{R}^{B \times C \times H \times W}$ is a batch of image representations,
where $B$ is the batch size, $C$ is the number of channels, $H$ is the height and $W$ is the width.
This is not a widely used scenario.
$\gamma \in \mathbb{R}^{C \times H \times W}$ and $\beta \in \mathbb{R}^{C \times H \times W}$.
$$\text{LN}(X) = \gamma
\frac{X - \underset{C, H, W}{\mathbb{E}}[X]}{\sqrt{\underset{C, H, W}{Var}[X] + \epsilon}}
+ \beta$$
"""
def __init__(self, normalized_shape: Union[int, List[int], Size], *,
eps: float = 1e-5,
elementwise_affine: bool = True):
"""
* `normalized_shape` $S$ is the shape of the elements (except the batch).
The input should then be
$X \in \mathbb{R}^{* \times S[0] \times S[1] \times ... \times S[n]}$
* `eps` is $\epsilon$, used in $\sqrt{Var[X] + \epsilon}$ for numerical stability
* `elementwise_affine` is whether to scale and shift the normalized value
We've tried to use the same names for arguments as PyTorch `LayerNorm` implementation.
"""
super().__init__()
# Convert `normalized_shape` to `torch.Size`
if isinstance(normalized_shape, int):
normalized_shape = torch.Size([normalized_shape])
elif isinstance(normalized_shape, list):
normalized_shape = torch.Size(normalized_shape)
assert isinstance(normalized_shape, torch.Size)
#
self.normalized_shape = normalized_shape
self.eps = eps
self.elementwise_affine = elementwise_affine
# Create parameters for $\gamma$ and $\beta$ for gain and bias
if self.elementwise_affine:
self.gain = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
def forward(self, x: torch.Tensor):
"""
`x` is a tensor of shape `[*, S[0], S[1], ..., S[n]]`.
`*` could be any number of dimensions.
For example, in an NLP task this will be
`[seq_len, batch_size, features]`
"""
# Sanity check to make sure the shapes match
assert self.normalized_shape == x.shape[-len(self.normalized_shape):]
# The dimensions to calculate the mean and variance on
dims = [-(i + 1) for i in range(len(self.normalized_shape))]
# Calculate the mean of all elements;
# i.e. the means for each element $\mathbb{E}[X]$
mean = x.mean(dim=dims, keepdim=True)
# Calculate the squared mean of all elements;
# i.e. the means for each element $\mathbb{E}[X^2]$
mean_x2 = (x ** 2).mean(dim=dims, keepdim=True)
# Variance of all element $Var[X] = \mathbb{E}[X^2] - \mathbb{E}[X]^2$
var = mean_x2 - mean ** 2
# Normalize $$\hat{X} = \frac{X - \mathbb{E}[X]}{\sqrt{Var[X] + \epsilon}}$$
x_norm = (x - mean) / torch.sqrt(var + self.eps)
# Scale and shift $$\text{LN}(x) = \gamma \hat{X} + \beta$$
if self.elementwise_affine:
x_norm = self.gain * x_norm + self.bias
#
return x_norm
def _test():
"""
Simple test
"""
from labml.logger import inspect
x = torch.zeros([2, 3, 2, 4])
inspect(x.shape)
ln = LayerNorm(x.shape[2:])
x = ln(x)
inspect(x.shape)
inspect(ln.gain.shape)
#
if __name__ == '__main__':
_test()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/normalization/weight_standardization/experiment.py | labml_nn/normalization/weight_standardization/experiment.py | """
---
title: CIFAR10 Experiment to try Weight Standardization and Batch-Channel Normalization
summary: >
This trains is a VGG net that uses weight standardization and batch-channel normalization
to classify CIFAR10 images.
---
# CIFAR10 Experiment to try Weight Standardization and Batch-Channel Normalization
"""
import torch.nn as nn
from labml import experiment
from labml.configs import option
from labml_nn.experiments.cifar10 import CIFAR10Configs, CIFAR10VGGModel
from labml_nn.normalization.batch_channel_norm import BatchChannelNorm
from labml_nn.normalization.weight_standardization.conv2d import Conv2d
class Model(CIFAR10VGGModel):
"""
### VGG model for CIFAR-10 classification
This derives from the [generic VGG style architecture](../../experiments/cifar10.html).
"""
def conv_block(self, in_channels, out_channels) -> nn.Module:
return nn.Sequential(
Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
BatchChannelNorm(out_channels, 32),
nn.ReLU(inplace=True),
)
def __init__(self):
super().__init__([[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]])
@option(CIFAR10Configs.model)
def _model(c: CIFAR10Configs):
"""
### Create model
"""
return Model().to(c.device)
def main():
# Create experiment
experiment.create(name='cifar10', comment='weight standardization')
# Create configurations
conf = CIFAR10Configs()
# Load configurations
experiment.configs(conf, {
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
'train_batch_size': 64,
})
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/normalization/weight_standardization/__init__.py | labml_nn/normalization/weight_standardization/__init__.py | """
---
title: Weight Standardization
summary: >
A PyTorch implementation/tutorial of Weight Standardization.
---
# Weight Standardization
This is a [PyTorch](https://pytorch.org) implementation of Weight Standardization from the paper
[Micro-Batch Training with Batch-Channel Normalization and Weight Standardization](https://arxiv.org/abs/1903.10520).
We also have an [annotated implementation of Batch-Channel Normalization](../batch_channel_norm/index.html).
Batch normalization **gives a smooth loss landscape** and
**avoids elimination singularities**.
Elimination singularities are nodes of the network that become
useless (e.g. a ReLU that gives 0 all the time).
However, batch normalization doesn't work well when the batch size is too small,
which happens when training large networks because of device memory limitations.
The paper introduces Weight Standardization with Batch-Channel Normalization as
a better alternative.
Weight Standardization:
1. Normalizes the gradients
2. Smoothes the landscape (reduced Lipschitz constant)
3. Avoids elimination singularities
The Lipschitz constant is the maximum slope a function has between two points.
That is, $L$ is the Lipschitz constant where $L$ is the smallest value that satisfies,
$\forall a,b \in A: \lVert f(a) - f(b) \rVert \le L \lVert a - b \rVert$
where $f: A \rightarrow \mathbb{R}^m, A \in \mathbb{R}^n$.
Elimination singularities are avoided because it keeps the statistics of the outputs similar to the
inputs. So as long as the inputs are normally distributed the outputs remain close to normal.
This avoids outputs of nodes from always falling beyond the active range of the activation function
(e.g. always negative input for a ReLU).
*[Refer to the paper for proofs](https://arxiv.org/abs/1903.10520)*.
Here is [the training code](experiment.html) for training
a VGG network that uses weight standardization to classify CIFAR-10 data.
This uses a [2D-Convolution Layer with Weight Standardization](conv2d.html).
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/normalization/weight_standardization/experiment.ipynb)
"""
import torch
def weight_standardization(weight: torch.Tensor, eps: float):
r"""
## Weight Standardization
$$\hat{W}_{i,j} = \frac{W_{i,j} - \mu_{W_{i,\cdot}}} {\sigma_{W_{i,\cdot}}}$$
where,
\begin{align}
W &\in \mathbb{R}^{O \times I} \\
\mu_{W_{i,\cdot}} &= \frac{1}{I} \sum_{j=1}^I W_{i,j} \\
\sigma_{W_{i,\cdot}} &= \sqrt{\frac{1}{I} \sum_{j=1}^I W^2_{i,j} - \mu^2_{W_{i,\cdot}} + \epsilon} \\
\end{align}
for a 2D-convolution layer $O$ is the number of output channels ($O = C_{out}$)
and $I$ is the number of input channels times the kernel size ($I = C_{in} \times k_H \times k_W$)
"""
# Get $C_{out}$, $C_{in}$ and kernel shape
c_out, c_in, *kernel_shape = weight.shape
# Reshape $W$ to $O \times I$
weight = weight.view(c_out, -1)
# Calculate
#
# \begin{align}
# \mu_{W_{i,\cdot}} &= \frac{1}{I} \sum_{j=1}^I W_{i,j} \\
# \sigma^2_{W_{i,\cdot}} &= \frac{1}{I} \sum_{j=1}^I W^2_{i,j} - \mu^2_{W_{i,\cdot}}
# \end{align}
var, mean = torch.var_mean(weight, dim=1, keepdim=True)
# Normalize
# $$\hat{W}_{i,j} = \frac{W_{i,j} - \mu_{W_{i,\cdot}}} {\sigma_{W_{i,\cdot}}}$$
weight = (weight - mean) / (torch.sqrt(var + eps))
# Change back to original shape and return
return weight.view(c_out, c_in, *kernel_shape)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/normalization/weight_standardization/conv2d.py | labml_nn/normalization/weight_standardization/conv2d.py | """
---
title: 2D Convolution Layer with Weight Standardization
summary: >
A PyTorch implementation/tutorial of a 2D Convolution Layer with Weight Standardization.
---
# 2D Convolution Layer with Weight Standardization
This is an implementation of a 2 dimensional convolution layer with [Weight Standardization](./index.html)
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
from labml_nn.normalization.weight_standardization import weight_standardization
class Conv2d(nn.Conv2d):
"""
## 2D Convolution Layer
This extends the standard 2D Convolution layer and standardize the weights before the convolution step.
"""
def __init__(self, in_channels, out_channels, kernel_size,
stride=1,
padding=0,
dilation=1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
eps: float = 1e-5):
super(Conv2d, self).__init__(in_channels, out_channels, kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
padding_mode=padding_mode)
self.eps = eps
def forward(self, x: torch.Tensor):
return F.conv2d(x, weight_standardization(self.weight, self.eps), self.bias, self.stride,
self.padding, self.dilation, self.groups)
def _test():
"""
A simple test to verify the tensor sizes
"""
conv2d = Conv2d(10, 20, 5)
from labml.logger import inspect
inspect(conv2d.weight)
import torch
inspect(conv2d(torch.zeros(10, 10, 100, 100)))
if __name__ == '__main__':
_test()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/normalization/batch_norm/mnist.py | labml_nn/normalization/batch_norm/mnist.py | """
---
title: MNIST Experiment to try Batch Normalization
summary: >
This trains is a simple convolutional neural network that uses batch normalization
to classify MNIST digits.
---
# MNIST Experiment for Batch Normalization
"""
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from labml import experiment
from labml.configs import option
from labml_nn.experiments.mnist import MNISTConfigs
from labml_nn.normalization.batch_norm import BatchNorm
class Model(nn.Module):
"""
### Model definition
"""
def __init__(self):
super().__init__()
# Note that we omit the bias parameter
self.conv1 = nn.Conv2d(1, 20, 5, 1, bias=False)
# Batch normalization with 20 channels (output of convolution layer).
# The input to this layer will have shape `[batch_size, 20, height(24), width(24)]`
self.bn1 = BatchNorm(20)
#
self.conv2 = nn.Conv2d(20, 50, 5, 1, bias=False)
# Batch normalization with 50 channels.
# The input to this layer will have shape `[batch_size, 50, height(8), width(8)]`
self.bn2 = BatchNorm(50)
#
self.fc1 = nn.Linear(4 * 4 * 50, 500, bias=False)
# Batch normalization with 500 channels (output of fully connected layer).
# The input to this layer will have shape `[batch_size, 500]`
self.bn3 = BatchNorm(500)
#
self.fc2 = nn.Linear(500, 10)
def forward(self, x: torch.Tensor):
x = F.relu(self.bn1(self.conv1(x)))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.bn2(self.conv2(x)))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.bn3(self.fc1(x)))
return self.fc2(x)
@option(MNISTConfigs.model)
def model(c: MNISTConfigs):
"""
### Create model
We use [`MNISTConfigs`](../../experiments/mnist.html#MNISTConfigs) configurations
and set a new function to calculate the model.
"""
return Model().to(c.device)
def main():
# Create experiment
experiment.create(name='mnist_batch_norm')
# Create configurations
conf = MNISTConfigs()
# Load configurations
experiment.configs(conf, {
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 0.001,
})
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/normalization/batch_norm/__init__.py | labml_nn/normalization/batch_norm/__init__.py | """
---
title: Batch Normalization
summary: >
A PyTorch implementation/tutorial of batch normalization.
---
# Batch Normalization
This is a [PyTorch](https://pytorch.org) implementation of Batch Normalization from paper
[Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167).
### Internal Covariate Shift
The paper defines *Internal Covariate Shift* as the change in the
distribution of network activations due to the change in
network parameters during training.
For example, let's say there are two layers $l_1$ and $l_2$.
During the beginning of the training $l_1$ outputs (inputs to $l_2$)
could be in distribution $\mathcal{N}(0.5, 1)$.
Then, after some training steps, it could move to $\mathcal{N}(0.6, 1.5)$.
This is *internal covariate shift*.
Internal covariate shift will adversely affect training speed because the later layers
($l_2$ in the above example) have to adapt to this shifted distribution.
By stabilizing the distribution, batch normalization minimizes the internal covariate shift.
## Normalization
It is known that whitening improves training speed and convergence.
*Whitening* is linearly transforming inputs to have zero mean, unit variance,
and be uncorrelated.
### Normalizing outside gradient computation doesn't work
Normalizing outside the gradient computation using pre-computed (detached)
means and variances doesn't work. For instance. (ignoring variance), let
$$\hat{x} = x - \mathbb{E}[x]$$
where $x = u + b$ and $b$ is a trained bias
and $\mathbb{E}[x]$ is an outside gradient computation (pre-computed constant).
Note that $\hat{x}$ has no effect on $b$.
Therefore,
$b$ will increase or decrease based
$\frac{\partial{\mathcal{L}}}{\partial x}$,
and keep on growing indefinitely in each training update.
The paper notes that similar explosions happen with variances.
### Batch Normalization
Whitening is computationally expensive because you need to de-correlate and
the gradients must flow through the full whitening calculation.
The paper introduces a simplified version which they call *Batch Normalization*.
First simplification is that it normalizes each feature independently to have
zero mean and unit variance:
$$\hat{x}^{(k)} = \frac{x^{(k)} - \mathbb{E}[x^{(k)}]}{\sqrt{Var[x^{(k)}]}}$$
where $x = (x^{(1)} ... x^{(d)})$ is the $d$-dimensional input.
The second simplification is to use estimates of mean $\mathbb{E}[x^{(k)}]$
and variance $Var[x^{(k)}]$ from the mini-batch
for normalization; instead of calculating the mean and variance across the whole dataset.
Normalizing each feature to zero mean and unit variance could affect what the layer
can represent.
As an example paper illustrates that, if the inputs to a sigmoid are normalized
most of it will be within $[-1, 1]$ range where the sigmoid is linear.
To overcome this each feature is scaled and shifted by two trained parameters
$\gamma^{(k)}$ and $\beta^{(k)}$.
$$y^{(k)} =\gamma^{(k)} \hat{x}^{(k)} + \beta^{(k)}$$
where $y^{(k)}$ is the output of the batch normalization layer.
Note that when applying batch normalization after a linear transform
like $Wu + b$ the bias parameter $b$ gets cancelled due to normalization.
So you can and should omit bias parameter in linear transforms right before the
batch normalization.
Batch normalization also makes the back propagation invariant to the scale of the weights
and empirically it improves generalization, so it has regularization effects too.
## Inference
We need to know $\mathbb{E}[x^{(k)}]$ and $Var[x^{(k)}]$ in order to
perform the normalization.
So during inference, you either need to go through the whole (or part of) dataset
and find the mean and variance, or you can use an estimate calculated during training.
The usual practice is to calculate an exponential moving average of
mean and variance during the training phase and use that for inference.
Here's [the training code](mnist.html) and a notebook for training
a CNN classifier that uses batch normalization for MNIST dataset.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/normalization/batch_norm/mnist.ipynb)
"""
import torch
from torch import nn
class BatchNorm(nn.Module):
r"""
## Batch Normalization Layer
Batch normalization layer $\text{BN}$ normalizes the input $X$ as follows:
When input $X \in \mathbb{R}^{B \times C \times H \times W}$ is a batch of image representations,
where $B$ is the batch size, $C$ is the number of channels, $H$ is the height and $W$ is the width.
$\gamma \in \mathbb{R}^{C}$ and $\beta \in \mathbb{R}^{C}$.
$$\text{BN}(X) = \gamma
\frac{X - \underset{B, H, W}{\mathbb{E}}[X]}{\sqrt{\underset{B, H, W}{Var}[X] + \epsilon}}
+ \beta$$
When input $X \in \mathbb{R}^{B \times C}$ is a batch of embeddings,
where $B$ is the batch size and $C$ is the number of features.
$\gamma \in \mathbb{R}^{C}$ and $\beta \in \mathbb{R}^{C}$.
$$\text{BN}(X) = \gamma
\frac{X - \underset{B}{\mathbb{E}}[X]}{\sqrt{\underset{B}{Var}[X] + \epsilon}}
+ \beta$$
When input $X \in \mathbb{R}^{B \times C \times L}$ is a batch of a sequence embeddings,
where $B$ is the batch size, $C$ is the number of features, and $L$ is the length of the sequence.
$\gamma \in \mathbb{R}^{C}$ and $\beta \in \mathbb{R}^{C}$.
$$\text{BN}(X) = \gamma
\frac{X - \underset{B, L}{\mathbb{E}}[X]}{\sqrt{\underset{B, L}{Var}[X] + \epsilon}}
+ \beta$$
"""
def __init__(self, channels: int, *,
eps: float = 1e-5, momentum: float = 0.1,
affine: bool = True, track_running_stats: bool = True):
"""
* `channels` is the number of features in the input
* `eps` is $\epsilon$, used in $\sqrt{Var[x^{(k)}] + \epsilon}$ for numerical stability
* `momentum` is the momentum in taking the exponential moving average
* `affine` is whether to scale and shift the normalized value
* `track_running_stats` is whether to calculate the moving averages or mean and variance
We've tried to use the same names for arguments as PyTorch `BatchNorm` implementation.
"""
super().__init__()
self.channels = channels
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
# Create parameters for $\gamma$ and $\beta$ for scale and shift
if self.affine:
self.scale = nn.Parameter(torch.ones(channels))
self.shift = nn.Parameter(torch.zeros(channels))
# Create buffers to store exponential moving averages of
# mean $\mathbb{E}[x^{(k)}]$ and variance $Var[x^{(k)}]$
if self.track_running_stats:
self.register_buffer('exp_mean', torch.zeros(channels))
self.register_buffer('exp_var', torch.ones(channels))
def forward(self, x: torch.Tensor):
"""
`x` is a tensor of shape `[batch_size, channels, *]`.
`*` denotes any number of (possibly 0) dimensions.
For example, in an image (2D) convolution this will be
`[batch_size, channels, height, width]`
"""
# Keep the original shape
x_shape = x.shape
# Get the batch size
batch_size = x_shape[0]
# Sanity check to make sure the number of features is the same
assert self.channels == x.shape[1]
# Reshape into `[batch_size, channels, n]`
x = x.view(batch_size, self.channels, -1)
# We will calculate the mini-batch mean and variance
# if we are in training mode or if we have not tracked exponential moving averages
if self.training or not self.track_running_stats:
# Calculate the mean across first and last dimension;
# i.e. the means for each feature $\mathbb{E}[x^{(k)}]$
mean = x.mean(dim=[0, 2])
# Calculate the squared mean across first and last dimension;
# i.e. the means for each feature $\mathbb{E}[(x^{(k)})^2]$
mean_x2 = (x ** 2).mean(dim=[0, 2])
# Variance for each feature $Var[x^{(k)}] = \mathbb{E}[(x^{(k)})^2] - \mathbb{E}[x^{(k)}]^2$
var = mean_x2 - mean ** 2
# Update exponential moving averages
if self.training and self.track_running_stats:
self.exp_mean = (1 - self.momentum) * self.exp_mean + self.momentum * mean
self.exp_var = (1 - self.momentum) * self.exp_var + self.momentum * var
# Use exponential moving averages as estimates
else:
mean = self.exp_mean
var = self.exp_var
# Normalize $$\hat{x}^{(k)} = \frac{x^{(k)} - \mathbb{E}[x^{(k)}]}{\sqrt{Var[x^{(k)}] + \epsilon}}$$
x_norm = (x - mean.view(1, -1, 1)) / torch.sqrt(var + self.eps).view(1, -1, 1)
# Scale and shift $$y^{(k)} =\gamma^{(k)} \hat{x}^{(k)} + \beta^{(k)}$$
if self.affine:
x_norm = self.scale.view(1, -1, 1) * x_norm + self.shift.view(1, -1, 1)
# Reshape to original and return
return x_norm.view(x_shape)
def _test():
"""
Simple test
"""
from labml.logger import inspect
x = torch.zeros([2, 3, 2, 4])
inspect(x.shape)
bn = BatchNorm(3)
x = bn(x)
inspect(x.shape)
inspect(bn.exp_var.shape)
#
if __name__ == '__main__':
_test()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/normalization/batch_norm/cifar10.py | labml_nn/normalization/batch_norm/cifar10.py | """
---
title: CIFAR10 Experiment to try Group Normalization
summary: >
This trains is a simple convolutional neural network that uses group normalization
to classify CIFAR10 images.
---
# CIFAR10 Experiment for Group Normalization
"""
import torch.nn as nn
from labml import experiment
from labml.configs import option
from labml_nn.experiments.cifar10 import CIFAR10Configs, CIFAR10VGGModel
from labml_nn.normalization.batch_norm import BatchNorm
class Model(CIFAR10VGGModel):
"""
### VGG model for CIFAR-10 classification
This derives from the [generic VGG style architecture](../../experiments/cifar10.html).
"""
def conv_block(self, in_channels, out_channels) -> nn.Module:
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
BatchNorm(out_channels),
nn.ReLU(inplace=True),
)
def __init__(self):
super().__init__([[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]])
@option(CIFAR10Configs.model)
def model(c: CIFAR10Configs):
"""
### Create model
"""
return Model().to(c.device)
def main():
# Create experiment
experiment.create(name='cifar10', comment='batch norm')
# Create configurations
conf = CIFAR10Configs()
# Load configurations
experiment.configs(conf, {
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
'train_batch_size': 64,
})
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/normalization/batch_channel_norm/__init__.py | labml_nn/normalization/batch_channel_norm/__init__.py | """
---
title: Batch-Channel Normalization
summary: >
A PyTorch implementation/tutorial of Batch-Channel Normalization.
---
# Batch-Channel Normalization
This is a [PyTorch](https://pytorch.org) implementation of Batch-Channel Normalization from the paper
[Micro-Batch Training with Batch-Channel Normalization and Weight Standardization](https://arxiv.org/abs/1903.10520).
We also have an [annotated implementation of Weight Standardization](../weight_standardization/index.html).
Batch-Channel Normalization performs batch normalization followed
by a channel normalization (similar to a [Group Normalization](../group_norm/index.html).
When the batch size is small a running mean and variance is used for
batch normalization.
Here is [the training code](../weight_standardization/experiment.html) for training
a VGG network that uses weight standardization to classify CIFAR-10 data.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/normalization/weight_standardization/experiment.ipynb)
"""
import torch
from torch import nn
from labml_nn.normalization.batch_norm import BatchNorm
class BatchChannelNorm(nn.Module):
"""
## Batch-Channel Normalization
This first performs a batch normalization - either [normal batch norm](../batch_norm/index.html)
or a batch norm with
estimated mean and variance (exponential mean/variance over multiple batches).
Then a channel normalization performed.
"""
def __init__(self, channels: int, groups: int,
eps: float = 1e-5, momentum: float = 0.1, estimate: bool = True):
"""
* `channels` is the number of features in the input
* `groups` is the number of groups the features are divided into
* `eps` is $\epsilon$, used in $\sqrt{Var[x^{(k)}] + \epsilon}$ for numerical stability
* `momentum` is the momentum in taking the exponential moving average
* `estimate` is whether to use running mean and variance for batch norm
"""
super().__init__()
# Use estimated batch norm or normal batch norm.
if estimate:
self.batch_norm = EstimatedBatchNorm(channels,
eps=eps, momentum=momentum)
else:
self.batch_norm = BatchNorm(channels,
eps=eps, momentum=momentum)
# Channel normalization
self.channel_norm = ChannelNorm(channels, groups, eps)
def forward(self, x):
x = self.batch_norm(x)
return self.channel_norm(x)
class EstimatedBatchNorm(nn.Module):
"""
## Estimated Batch Normalization
When input $X \in \mathbb{R}^{B \times C \times H \times W}$ is a batch of image representations,
where $B$ is the batch size, $C$ is the number of channels, $H$ is the height and $W$ is the width.
$\gamma \in \mathbb{R}^{C}$ and $\beta \in \mathbb{R}^{C}$.
$$\dot{X}_{\cdot, C, \cdot, \cdot} = \gamma_C
\frac{X_{\cdot, C, \cdot, \cdot} - \hat{\mu}_C}{\hat{\sigma}_C}
+ \beta_C$$
where,
\begin{align}
\hat{\mu}_C &\longleftarrow (1 - r)\hat{\mu}_C + r \frac{1}{B H W} \sum_{b,h,w} X_{b,c,h,w} \\
\hat{\sigma}^2_C &\longleftarrow (1 - r)\hat{\sigma}^2_C + r \frac{1}{B H W} \sum_{b,h,w} \big(X_{b,c,h,w} - \hat{\mu}_C \big)^2
\end{align}
are the running mean and variances. $r$ is the momentum for calculating the exponential mean.
"""
def __init__(self, channels: int,
eps: float = 1e-5, momentum: float = 0.1, affine: bool = True):
"""
* `channels` is the number of features in the input
* `eps` is $\epsilon$, used in $\sqrt{Var[x^{(k)}] + \epsilon}$ for numerical stability
* `momentum` is the momentum in taking the exponential moving average
* `estimate` is whether to use running mean and variance for batch norm
"""
super().__init__()
self.eps = eps
self.momentum = momentum
self.affine = affine
self.channels = channels
# Channel wise transformation parameters
if self.affine:
self.scale = nn.Parameter(torch.ones(channels))
self.shift = nn.Parameter(torch.zeros(channels))
# Tensors for $\hat{\mu}_C$ and $\hat{\sigma}^2_C$
self.register_buffer('exp_mean', torch.zeros(channels))
self.register_buffer('exp_var', torch.ones(channels))
def forward(self, x: torch.Tensor):
"""
`x` is a tensor of shape `[batch_size, channels, *]`.
`*` denotes any number of (possibly 0) dimensions.
For example, in an image (2D) convolution this will be
`[batch_size, channels, height, width]`
"""
# Keep old shape
x_shape = x.shape
# Get the batch size
batch_size = x_shape[0]
# Sanity check to make sure the number of features is correct
assert self.channels == x.shape[1]
# Reshape into `[batch_size, channels, n]`
x = x.view(batch_size, self.channels, -1)
# Update $\hat{\mu}_C$ and $\hat{\sigma}^2_C$ in training mode only
if self.training:
# No backpropagation through $\hat{\mu}_C$ and $\hat{\sigma}^2_C$
with torch.no_grad():
# Calculate the mean across first and last dimensions;
# $$\frac{1}{B H W} \sum_{b,h,w} X_{b,c,h,w}$$
mean = x.mean(dim=[0, 2])
# Calculate the squared mean across first and last dimensions;
# $$\frac{1}{B H W} \sum_{b,h,w} X^2_{b,c,h,w}$$
mean_x2 = (x ** 2).mean(dim=[0, 2])
# Variance for each feature
# $$\frac{1}{B H W} \sum_{b,h,w} \big(X_{b,c,h,w} - \hat{\mu}_C \big)^2$$
var = mean_x2 - mean ** 2
# Update exponential moving averages
#
# \begin{align}
# \hat{\mu}_C &\longleftarrow (1 - r)\hat{\mu}_C + r \frac{1}{B H W} \sum_{b,h,w} X_{b,c,h,w} \\
# \hat{\sigma}^2_C &\longleftarrow (1 - r)\hat{\sigma}^2_C + r \frac{1}{B H W} \sum_{b,h,w} \big(X_{b,c,h,w} - \hat{\mu}_C \big)^2
# \end{align}
self.exp_mean = (1 - self.momentum) * self.exp_mean + self.momentum * mean
self.exp_var = (1 - self.momentum) * self.exp_var + self.momentum * var
# Normalize
# $$\frac{X_{\cdot, C, \cdot, \cdot} - \hat{\mu}_C}{\hat{\sigma}_C}$$
x_norm = (x - self.exp_mean.view(1, -1, 1)) / torch.sqrt(self.exp_var + self.eps).view(1, -1, 1)
# Scale and shift
# $$ \gamma_C
# \frac{X_{\cdot, C, \cdot, \cdot} - \hat{\mu}_C}{\hat{\sigma}_C}
# + \beta_C$$
if self.affine:
x_norm = self.scale.view(1, -1, 1) * x_norm + self.shift.view(1, -1, 1)
# Reshape to original and return
return x_norm.view(x_shape)
class ChannelNorm(nn.Module):
"""
## Channel Normalization
This is similar to [Group Normalization](../group_norm/index.html) but affine transform is done group wise.
"""
def __init__(self, channels, groups,
eps: float = 1e-5, affine: bool = True):
"""
* `groups` is the number of groups the features are divided into
* `channels` is the number of features in the input
* `eps` is $\epsilon$, used in $\sqrt{Var[x^{(k)}] + \epsilon}$ for numerical stability
* `affine` is whether to scale and shift the normalized value
"""
super().__init__()
self.channels = channels
self.groups = groups
self.eps = eps
self.affine = affine
# Parameters for affine transformation.
#
# *Note that these transforms are per group, unlike in group norm where
# they are transformed channel-wise.*
if self.affine:
self.scale = nn.Parameter(torch.ones(groups))
self.shift = nn.Parameter(torch.zeros(groups))
def forward(self, x: torch.Tensor):
"""
`x` is a tensor of shape `[batch_size, channels, *]`.
`*` denotes any number of (possibly 0) dimensions.
For example, in an image (2D) convolution this will be
`[batch_size, channels, height, width]`
"""
# Keep the original shape
x_shape = x.shape
# Get the batch size
batch_size = x_shape[0]
# Sanity check to make sure the number of features is the same
assert self.channels == x.shape[1]
# Reshape into `[batch_size, groups, n]`
x = x.view(batch_size, self.groups, -1)
# Calculate the mean across last dimension;
# i.e. the means for each sample and channel group $\mathbb{E}[x_{(i_N, i_G)}]$
mean = x.mean(dim=[-1], keepdim=True)
# Calculate the squared mean across last dimension;
# i.e. the means for each sample and channel group $\mathbb{E}[x^2_{(i_N, i_G)}]$
mean_x2 = (x ** 2).mean(dim=[-1], keepdim=True)
# Variance for each sample and feature group
# $Var[x_{(i_N, i_G)}] = \mathbb{E}[x^2_{(i_N, i_G)}] - \mathbb{E}[x_{(i_N, i_G)}]^2$
var = mean_x2 - mean ** 2
# Normalize
# $$\hat{x}_{(i_N, i_G)} =
# \frac{x_{(i_N, i_G)} - \mathbb{E}[x_{(i_N, i_G)}]}{\sqrt{Var[x_{(i_N, i_G)}] + \epsilon}}$$
x_norm = (x - mean) / torch.sqrt(var + self.eps)
# Scale and shift group-wise
# $$y_{i_G} =\gamma_{i_G} \hat{x}_{i_G} + \beta_{i_G}$$
if self.affine:
x_norm = self.scale.view(1, -1, 1) * x_norm + self.shift.view(1, -1, 1)
# Reshape to original and return
return x_norm.view(x_shape)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/normalization/group_norm/experiment.py | labml_nn/normalization/group_norm/experiment.py | """
---
title: CIFAR10 Experiment to try Group Normalization
summary: >
This trains is a simple convolutional neural network that uses group normalization
to classify CIFAR10 images.
---
# CIFAR10 Experiment for Group Normalization
"""
import torch.nn as nn
from labml import experiment
from labml.configs import option
from labml_nn.experiments.cifar10 import CIFAR10Configs, CIFAR10VGGModel
class Model(CIFAR10VGGModel):
"""
### VGG model for CIFAR-10 classification
This derives from the [generic VGG style architecture](../../experiments/cifar10.html).
"""
def conv_block(self, in_channels, out_channels) -> nn.Module:
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
fnorm.GroupNorm(self.groups, out_channels), # new
nn.ReLU(inplace=True),
)
def __init__(self, groups: int = 32):
self.groups = groups # input param:groups to conv_block
super().__init__([[64, 64], [128, 128], [256, 256, 256], [512, 512, 512], [512, 512, 512]])
class Configs(CIFAR10Configs):
# Number of groups
groups: int = 16
@option(Configs.model)
def model(c: Configs):
"""
### Create model
"""
return Model(c.groups).to(c.device)
def main():
# Create experiment
experiment.create(name='cifar10', comment='group norm')
# Create configurations
conf = Configs()
# Load configurations
experiment.configs(conf, {
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
})
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/normalization/group_norm/__init__.py | labml_nn/normalization/group_norm/__init__.py | """
---
title: Group Normalization
summary: >
A PyTorch implementation/tutorial of group normalization.
---
# Group Normalization
This is a [PyTorch](https://pytorch.org) implementation of
the [Group Normalization](https://arxiv.org/abs/1803.08494) paper.
[Batch Normalization](../batch_norm/index.html) works well for large enough batch sizes
but not well for small batch sizes, because it normalizes over the batch.
Training large models with large batch sizes is not possible due to the memory capacity of the
devices.
This paper introduces Group Normalization, which normalizes a set of features together as a group.
This is based on the observation that classical features such as
[SIFT](https://en.wikipedia.org/wiki/Scale-invariant_feature_transform) and
[HOG](https://en.wikipedia.org/wiki/Histogram_of_oriented_gradients) are group-wise features.
The paper proposes dividing feature channels into groups and then separately normalizing
all channels within each group.
## Formulation
All normalization layers can be defined by the following computation.
$$\hat{x}_i = \frac{1}{\sigma_i} (x_i - \mu_i)$$
where $x$ is the tensor representing the batch,
and $i$ is the index of a single value.
For instance, when it's 2D images
$i = (i_N, i_C, i_H, i_W)$ is a 4-d vector for indexing
image within batch, feature channel, vertical coordinate and horizontal coordinate.
$\mu_i$ and $\sigma_i$ are mean and standard deviation.
\begin{align}
\mu_i &= \frac{1}{m} \sum_{k \in \mathcal{S}_i} x_k \\
\sigma_i &= \sqrt{\frac{1}{m} \sum_{k \in \mathcal{S}_i} (x_k - \mu_i)^2 + \epsilon}
\end{align}
$\mathcal{S}_i$ is the set of indexes across which the mean and standard deviation
are calculated for index $i$.
$m$ is the size of the set $\mathcal{S}_i$ which is the same for all $i$.
The definition of $\mathcal{S}_i$ is different for
[Batch normalization](../batch_norm/index.html),
[Layer normalization](../layer_norm/index.html), and
[Instance normalization](../instance_norm/index.html).
### [Batch Normalization](../batch_norm/index.html)
$$\mathcal{S}_i = \{k | k_C = i_C\}$$
The values that share the same feature channel are normalized together.
### [Layer Normalization](../layer_norm/index.html)
$$\mathcal{S}_i = \{k | k_N = i_N\}$$
The values from the same sample in the batch are normalized together.
### [Instance Normalization](../instance_norm/index.html)
$$\mathcal{S}_i = \{k | k_N = i_N, k_C = i_C\}$$
The values from the same sample and same feature channel are normalized together.
### Group Normalization
$$\mathcal{S}_i = \{k | k_N = i_N,
\bigg \lfloor \frac{k_C}{C/G} \bigg \rfloor = \bigg \lfloor \frac{i_C}{C/G} \bigg \rfloor\}$$
where $G$ is the number of groups and $C$ is the number of channels.
Group normalization normalizes values of the same sample and the same group of channels together.
Here's a [CIFAR 10 classification model](experiment.html) that uses instance normalization.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/normalization/group_norm/experiment.ipynb)
"""
import torch
from torch import nn
class GroupNorm(nn.Module):
r"""
## Group Normalization Layer
"""
def __init__(self, groups: int, channels: int, *,
eps: float = 1e-5, affine: bool = True):
"""
* `groups` is the number of groups the features are divided into
* `channels` is the number of features in the input
* `eps` is $\epsilon$, used in $\sqrt{Var[x^{(k)}] + \epsilon}$ for numerical stability
* `affine` is whether to scale and shift the normalized value
"""
super().__init__()
assert channels % groups == 0, "Number of channels should be evenly divisible by the number of groups"
self.groups = groups
self.channels = channels
self.eps = eps
self.affine = affine
# Create parameters for $\gamma$ and $\beta$ for scale and shift
if self.affine:
self.scale = nn.Parameter(torch.ones(channels))
self.shift = nn.Parameter(torch.zeros(channels))
def forward(self, x: torch.Tensor):
"""
`x` is a tensor of shape `[batch_size, channels, *]`.
`*` denotes any number of (possibly 0) dimensions.
For example, in an image (2D) convolution this will be
`[batch_size, channels, height, width]`
"""
# Keep the original shape
x_shape = x.shape
# Get the batch size
batch_size = x_shape[0]
# Sanity check to make sure the number of features is the same
assert self.channels == x.shape[1]
# Reshape into `[batch_size, groups, n]`
x = x.view(batch_size, self.groups, -1)
# Calculate the mean across last dimension;
# i.e. the means for each sample and channel group $\mathbb{E}[x_{(i_N, i_G)}]$
mean = x.mean(dim=[-1], keepdim=True)
# Calculate the squared mean across last dimension;
# i.e. the means for each sample and channel group $\mathbb{E}[x^2_{(i_N, i_G)}]$
mean_x2 = (x ** 2).mean(dim=[-1], keepdim=True)
# Variance for each sample and feature group
# $Var[x_{(i_N, i_G)}] = \mathbb{E}[x^2_{(i_N, i_G)}] - \mathbb{E}[x_{(i_N, i_G)}]^2$
var = mean_x2 - mean ** 2
# Normalize
# $$\hat{x}_{(i_N, i_G)} =
# \frac{x_{(i_N, i_G)} - \mathbb{E}[x_{(i_N, i_G)}]}{\sqrt{Var[x_{(i_N, i_G)}] + \epsilon}}$$
x_norm = (x - mean) / torch.sqrt(var + self.eps)
# Scale and shift channel-wise
# $$y_{i_C} =\gamma_{i_C} \hat{x}_{i_C} + \beta_{i_C}$$
if self.affine:
x_norm = x_norm.view(batch_size, self.channels, -1)
x_norm = self.scale.view(1, -1, 1) * x_norm + self.shift.view(1, -1, 1)
# Reshape to original and return
return x_norm.view(x_shape)
def _test():
"""
Simple test
"""
from labml.logger import inspect
x = torch.zeros([2, 6, 2, 4])
inspect(x.shape)
bn = GroupNorm(2, 6)
x = bn(x)
inspect(x.shape)
#
if __name__ == '__main__':
_test()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/rwkv/experiment.py | labml_nn/rwkv/experiment.py | import inspect
import math
import torch
import torch.nn as nn
from labml_nn.rwkv.configs import RWKVConfigs
from labml_nn.rwkv import RWKV
from labml_nn.rwkv import TimeMixing
from labml import experiment
from labml.configs import option
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
This inherits from
[`NLPAutoRegressionConfigs`](../../experiments/nlp_autoregression.html#NLPAutoRegressionConfigs)
"""
# RWKV model
model: RWKV
rwkv: RWKVConfigs
# number of warmup iterations
warmup_iters: int = 2000
# total number of training iterations
max_iters: int = 600000
# weight decay
weight_decay: float = 1e-1
# Custom optimizer
beta1: float = 0.9
beta2: float = 0.95
optimizer = 'rwkv_optimizer'
@option(Configs.rwkv, 'RWKV')
def _rwkv_configs(c: Configs):
"""
### RWKV configurations
"""
# We use our
# [configurable RWKV implementation](../configs.html#RWKVConfigs)
conf = RWKVConfigs()
# Set the vocabulary sizes for embeddings and generating logits
conf.n_src_vocab = c.n_tokens
conf.n_tgt_vocab = c.n_tokens
return conf
def _init_weights(module, rwkv: RWKVConfigs):
# initialize Vector Parameters in TimeMixing
if isinstance(module, TimeMixing):
layer_id = module.layer_id
n_layer = module.n_layer
n_embd = module.n_embd
attn_sz = n_embd
with torch.no_grad():
ratio_0_to_1 = layer_id / (n_layer - 1) # 0 to 1
ratio_1_to_almost0 = 1.0 - (layer_id / n_layer) # 1 to ~0
ddd = torch.ones(1, 1, n_embd)
for i in range(n_embd):
ddd[0, 0, i] = i / n_embd
decay_speed = torch.ones(attn_sz)
for h in range(attn_sz):
decay_speed[h] = -5 + 8 * (h / (attn_sz - 1)) ** (0.7 + 1.3 * ratio_0_to_1)
module.time_decay = nn.Parameter(decay_speed)
zigzag = torch.tensor([(i + 1) % 3 - 1 for i in range(attn_sz)]) * 0.5
module.time_first = nn.Parameter(torch.ones(attn_sz) * math.log(0.3) + zigzag)
module.time_mix_key = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0))
module.time_mix_value = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1)
module.time_mix_receptance = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0))
@option(Configs.model)
def _model(c: Configs):
"""
Create RWKV model and initialize weights
"""
m = RWKV(c.rwkv).to(c.device)
# Apply custom weight initialization
m.apply(_init_weights, c.rwkv)
return m
@option(NLPAutoRegressionConfigs.optimizer)
def _configure_optimizers(c: NLPAutoRegressionConfigs):
# start with all of the candidate parameters
param_dict = {pn: p for pn, p in c.model.named_parameters()}
# filter out those that do not require grad
param_dict = {pn: p for pn, p in param_dict.items() if p.requires_grad}
# create optim groups. Any parameters that is 2D will be weight decayed, otherwise no.
# i.e. all weight tensors in matmuls + embeddings decay, all biases and layernorms don't.
decay_params = [p for n, p in param_dict.items() if p.dim() >= 2]
nodecay_params = [p for n, p in param_dict.items() if p.dim() < 2]
optim_groups = [
{'params': decay_params, 'weight_decay': c.weight_decay},
{'params': nodecay_params, 'weight_decay': 0.0}
]
num_decay_params = sum(p.numel() for p in decay_params)
num_nodecay_params = sum(p.numel() for p in nodecay_params)
print(f"num decayed parameter tensors: {len(decay_params)}, with {num_decay_params:,} parameters")
print(f"num non-decayed parameter tensors: {len(nodecay_params)}, with {num_nodecay_params:,} parameters")
# Create AdamW optimizer and use the fused version if it is available
fused_available = 'fused' in inspect.signature(torch.optim.AdamW).parameters
use_fused = fused_available and c.device_type == 'cuda'
extra_args = dict(fused=True) if use_fused else dict()
optimizer = torch.optim.AdamW(optim_groups, lr=c.learning_rate, betas=c.betas, **extra_args)
print(f"using fused AdamW: {use_fused}")
return optimizer
def main():
# Create experiment
experiment.create(name="RWKV")
# Create configs
conf = Configs()
print(conf.model)
# Override configurations
experiment.configs(conf, {
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# Use a context size of $128$
'seq_len': 128,
# Train for $32$ epochs
'epochs': 32,
# Batch size $128$
'batch_size': 128,
# Switch between training and validation for $10$ times
# per epoch
'inner_iterations': 10,
'rwkv.block_size': 1024,
# model
'rwkv.n_layer': 12,
'rwkv.n_heads': 12,
'rwkv.n_embd': 768
})
print(conf.model)
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/rwkv/configs.py | labml_nn/rwkv/configs.py | from labml.configs import BaseConfigs
class RWKVConfigs(BaseConfigs):
"""
## Transformer Configurations
This defines configurations for a transformer.
The configurations are calculate using option functions.
These are lazy loaded and therefore only the necessary modules
are calculated.
"""
# Number of attention heads
n_heads: int = 8
# Transformer embedding size
d_model: int = 512
# Number of layers
n_layers: int = 6
# Dropout probability
dropout: float = 0.1
# Number of tokens in the source vocabulary (for token embeddings)
n_src_vocab: int
# Number of tokens in the target vocabulary (to generate logits for prediction)
n_tgt_vocab: int
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/rwkv/__init__.py | labml_nn/rwkv/__init__.py | """
---
title: Receptance Weighted Key Value (RWKV)
summary: >
This implements the RWKV model
using PyTorch with explanations.
---
# Receptance Weighted Key Value (RWKV)
This is a tutorial/implementation of RWKV
from paper [RWKV: Reinventing RNNs for the Transformer Era](https://arxiv.org/pdf/2305.13048.pdf)
in [PyTorch](https://pytorch.org/).
Full definition of a RWKV Language Model, all of it in this single file.
References:
1) [the official RWKV PyTorch implementation released by Bo Peng](https://github.com/BlinkDL/RWKV-LM/blob/main/RWKV-v4neo/src/model.py)
2) [huggingface/transformers PyTorch implementation](https://github.com/huggingface/transformers/blob/main/src/transformers/models/rwkv/modeling_rwkv.py)
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
PREV_X_TIME = 0
NUM_STATE = 1
DEN_STATE = 2
MAX_STATE = 3
PREV_X_CHANNEL = 4
class LayerNorm(nn.Module):
"""
### Layer normalization with bias
"""
def __init__(self, ndim, bias):
super().__init__()
self.weight = nn.Parameter(torch.ones(ndim))
self.bias = nn.Parameter(torch.zeros(ndim)) if bias else None
def forward(self, input):
return F.layer_norm(input, self.weight.shape, self.weight, self.bias, 1e-5)
class L2Wrap(torch.autograd.Function):
"""
### L2 loss wrapper
[ref](https://github.com/BlinkDL/RWKV-LM/blob/cca1b5e8e597cf40675882bb10b46287c844e35c/RWKV-v4/src/model.py#L21)
"""
@staticmethod
def forward(ctx, loss, y):
ctx.save_for_backward(y)
return loss
@staticmethod
def backward(ctx, grad_output):
y = ctx.saved_tensors[0]
# to encourage the logits to be close to 0
factor = 1e-4 / (y.shape[0] * y.shape[1])
maxx, ids = torch.max(y, -1, keepdim=True)
gy = torch.zeros_like(y)
gy.scatter_(-1, ids, maxx * factor)
return grad_output, gy
class ChannelMixing(nn.Module):
"""
### Channel Mixing
"""
def __init__(self, config, layer_id):
super().__init__()
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
# token shifting
self.layer_id = layer_id
n_embd = config.n_embd
intermediate_size = (
config.intermediate_size if config.intermediate_size is not None else 4 * n_embd
)
# Learnable Matrix
self.key_proj = nn.Linear(n_embd, intermediate_size, bias=False)
self.value_proj = nn.Linear(intermediate_size, n_embd, bias=False)
self.receptance_proj = nn.Linear(n_embd, n_embd, bias=False)
# Learnable Vector
self.time_mix_key = nn.Parameter(torch.empty(1, 1, n_embd))
self.time_mix_receptance = nn.Parameter(torch.empty(1, 1, n_embd))
def forward(self, x, state=None):
"""
# x = (Batch,Time,Channel)
"""
if state is not None:
prev_x = state[self.layer_id, :, [PREV_X_CHANNEL], :]
state[self.layer_id, :, [PREV_X_CHANNEL], :] = x
else:
prev_x = self.time_shift(x)
# $r_t=W_r \cdot (\mu_r x_t + (1-\mu_r)x_{t-1})$
receptance = x * self.time_mix_receptance + prev_x * (1 - self.time_mix_receptance)
receptance = self.receptance_proj(receptance)
# $k_t=W_k \cdot (\mu_k x_t + (1-\mu_k)x_{t-1})$
key = x * self.time_mix_key + prev_x * (1 - self.time_mix_key)
key = self.key_proj(key)
# $V_t=W_v \cdot max(k_t,0)^2$
value = self.value_proj(torch.square(torch.relu(key)))
# $o_t=\sigma(r_t) \odot v_t$
out = F.sigmoid(receptance) * value
return out, state
class TimeMixing(nn.Module):
"""
### Time Mixing
"""
def __init__(self, config, layer_id):
super().__init__()
self.config = config
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
self.layer_id = layer_id
n_embd = config.n_embd
attn_sz = n_embd
# learnable matrix
self.key_proj = nn.Linear(n_embd, attn_sz, bias=False)
self.value_proj = nn.Linear(n_embd, attn_sz, bias=False)
self.receptance_proj = nn.Linear(n_embd, attn_sz, bias=False)
self.output_proj = nn.Linear(attn_sz, n_embd, bias=False)
# learnable vector
self.time_decay = nn.Parameter(torch.empty(attn_sz))
self.time_first = nn.Parameter(torch.empty(attn_sz))
self.time_mix_key = nn.Parameter(torch.empty(1, 1, n_embd))
self.time_mix_value = nn.Parameter(torch.empty(1, 1, n_embd))
self.time_mix_receptance = nn.Parameter(torch.empty(1, 1, n_embd))
def forward(self, x, state=None):
"""
x = (Batch,Time,Channel)
"""
if state is not None:
prev_x = state[self.layer_id, :, [PREV_X_TIME], :]
state[self.layer_id, :, [PREV_X_TIME], :] = x
else:
prev_x = self.time_shift(x)
# $r_t=W_r \cdot (\mu_r x_t + (1-\mu_r)x_{t-1})$
receptance = x * self.time_mix_receptance + prev_x * (1 - self.time_mix_receptance)
receptance = self.receptance_proj(receptance)
# $k_t=W_k \cdot (\mu_k x_t + (1-\mu_k)x_{t-1})$
key = x * self.time_mix_key + prev_x * (1 - self.time_mix_key)
key = self.key_proj(key)
# $v_t=W_v \cdot (\mu_v x_t + (1-\mu_v)x_{t-1})$
value = x * self.time_mix_value + prev_x * (1 - self.time_mix_value)
value = self.value_proj(value)
# WKV calculation
_, seq_length, _ = key.size()
output = torch.zeros_like(key)
if state is None:
num_state = torch.zeros_like(key[:, 0], dtype=torch.float32)
den_state = torch.zeros_like(key[:, 0], dtype=torch.float32)
max_state = torch.zeros_like(key[:, 0], dtype=torch.float32) - 1e38
else:
num_state = state[self.layer_id, :, NUM_STATE, :]
den_state = state[self.layer_id, :, DEN_STATE, :]
max_state = state[self.layer_id, :, MAX_STATE, :]
time_decay = -torch.exp(self.time_decay)
for current_index in range(seq_length):
current_key = key[:, current_index].float()
current_value = value[:, current_index]
# $wkv_t=\frac{\sum^{t-1}_{i=1}d^{-(t-1-i)w+k_i}v_i+e^{u+k_t}v_t}{\sum^{t-1}_{i=1}e^{-(t-1-i)w+k_i}+e^{u+k_t}}$
max_for_output = torch.maximum(max_state, current_key + self.time_first)
e1 = torch.exp(max_state - max_for_output)
e2 = torch.exp(current_key + self.time_first - max_for_output)
numerator = e1 * num_state + e2 * current_value
denominator = e1 * den_state + e2
output[:, current_index] = (numerator / denominator).to(output.dtype)
# Update state for next iteration
max_for_state = torch.maximum(max_state + time_decay, current_key)
e1 = torch.exp(max_state + time_decay - max_for_state)
e2 = torch.exp(current_key - max_for_state)
num_state = e1 * num_state + e2 * current_value
den_state = e1 * den_state + e2
max_state = max_for_state
# update states
state[self.layer_id, :, NUM_STATE, :] = num_state
state[self.layer_id, :, DEN_STATE, :] = den_state
state[self.layer_id, :, MAX_STATE, :] = max_state
wkv, state = self.wkv_function(key, value, use_customized_cuda_kernel=self.config.use_customized_cuda_kernel,
state=state)
# $o_t=W_o \cdot (\sigma(r_t) \odot wkv_t)$
rwkv = F.sigmoid(receptance) * wkv
rwkv = self.output_proj(rwkv)
return rwkv, state
class Block(nn.Module):
"""
## RWKV block element
"""
def __init__(self, config, layer_id):
super().__init__()
self.ln_1 = LayerNorm(config.n_embd, bias=config.bias)
self.attn = TimeMixing(config, layer_id)
self.ln_2 = LayerNorm(config.n_embd, bias=config.bias)
self.ffn = ChannelMixing(config, layer_id)
def forward(self, x, state=None):
# state: [batch_size, 5 , n_embd]
# time mixing
residual = x
x, state = self.attn(self.ln_1(x), state=state)
x = x + residual
# channel mixing
residual = x
x, state = self.ffn(self.ln_2(x), state=state)
x = x + residual
return x, state
class RWKV(nn.Module):
"""
## RWKV
"""
def __init__(self, config, lr_init=0.0008):
super().__init__()
assert config.vocab_size is not None
assert config.block_size is not None
self.config = config
self.lr_init = lr_init ## used to initialize embedding parameters
self.n_layer = config.n_layer
self.n_embd = config.n_embd
# Initiate model layers
self.rwkv = nn.ModuleDict(dict(
wte=nn.Embedding(config.vocab_size, config.n_embd),
ln_p=LayerNorm(config.n_embd, bias=config.bias),
h=nn.ModuleList([Block(config, layer_id) for layer_id in range(config.n_layer)]),
ln_f=LayerNorm(config.n_embd, bias=config.bias),
))
# Output linear layer
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
def forward(self, idx, targets=None, state=None, return_state=False):
b, t = idx.size()
assert t <= self.config.block_size, f"Cannot forward sequence of length {t}, block size is only {self.config.block_size}"
# Embedding Layer
x = self.rwkv.wte(idx)
# Layer Norm
x = self.rwkv.ln_p(x)
# RWKV Blocks
for block_idx, block in enumerate(self.rwkv.h):
x, state = block(x, state)
x = self.rwkv.ln_f(x)
# Logit Layer and loss Function (for training)
if targets is not None:
# if we are given some desired targets also calculate the loss
logits = self.lm_head(x)
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
if self.training:
loss = L2Wrap.apply(loss, logits)
else:
# inference-time mini-optimization: only forward the lm_head on the very last position
logits = self.lm_head(x[:, [-1], :]) # note: using list [-1] to preserve the time dim
loss = None
# Return Logits and loss
if return_state:
return logits, loss, state
else:
return logits, loss
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/cfr/infoset_saver.py | labml_nn/cfr/infoset_saver.py | import json
import pathlib
from typing import Dict
from labml import experiment
from labml_nn.cfr import InfoSet
class InfoSetSaver(experiment.ModelSaver):
def __init__(self, infosets: Dict[str, InfoSet]):
self.infosets = infosets
def save(self, checkpoint_path: pathlib.Path) -> any:
data = {key: infoset.to_dict() for key, infoset in self.infosets.items()}
file_name = f"infosets.json"
with open(str(checkpoint_path / file_name), 'w') as f:
f.write(json.dumps(data))
return file_name
def load(self, checkpoint_path: pathlib.Path, file_name: str):
with open(str(checkpoint_path / file_name), 'w') as f:
data = json.loads(f.read())
for key, d in data.items():
self.infosets[key] = InfoSet.from_dict(d)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/cfr/analytics.py | labml_nn/cfr/analytics.py | from typing import List
import altair as alt
import numpy as np
from labml import analytics
from labml.analytics import IndicatorCollection
def calculate_percentages(means: List[np.ndarray], names: List[List[str]]):
normalized = []
for i in range(len(means)):
total = np.zeros_like(means[i])
for j, n in enumerate(names):
if n[-1][:-1] == names[i][-1][:-1]:
total += means[j]
normalized.append(means[i] / (total + np.finfo(float).eps))
return normalized
def plot_infosets(indicators: IndicatorCollection, *,
is_normalize: bool = True,
width: int = 600,
height: int = 300):
data, names = analytics.indicator_data(indicators)
step = [d[:, 0] for d in data]
means = [d[:, 5] for d in data]
if is_normalize:
normalized = calculate_percentages(means, names)
else:
normalized = means
common = names[0][-1]
for i, n in enumerate(names):
n = n[-1]
if len(n) < len(common):
common = common[:len(n)]
for j in range(len(common)):
if common[j] != n[j]:
common = common[:j]
break
table = []
for i, n in enumerate(names):
for j, v in zip(step[i], normalized[i]):
table.append({
'series': n[-1][len(common):],
'step': j,
'value': v
})
table = alt.Data(values=table)
selection = alt.selection_multi(fields=['series'], bind='legend')
return alt.Chart(table).mark_line().encode(
alt.X('step:Q'),
alt.Y('value:Q'),
alt.Color('series:N', scale=alt.Scale(scheme='tableau20')),
opacity=alt.condition(selection, alt.value(1), alt.value(0.0001))
).add_selection(
selection
).properties(width=width, height=height)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/cfr/__init__.py | labml_nn/cfr/__init__.py | """
---
title: Regret Minimization in Games with Incomplete Information (CFR)
summary: >
This is an annotated implementation/tutorial of Regret Minimization in Games with Incomplete Information
---
# Regret Minimization in Games with Incomplete Information (CFR)
The paper
[Regret Minimization in Games with Incomplete Information](http://martin.zinkevich.org/publications/regretpoker.pdf)
introduces counterfactual regret and how minimizing counterfactual regret through self-play
can be used to reach Nash equilibrium.
The algorithm is called Counterfactual Regret Minimization (**CFR**).
The paper
[Monte Carlo Sampling for Regret Minimization in Extensive Games](http://mlanctot.info/files/papers/nips09mccfr.pdf)
introduces Monte Carlo Counterfactual Regret Minimization (**MCCFR**),
where we sample from the game tree and estimate the regrets.
We tried to keep our Python implementation easy-to-understand like a tutorial.
We run it on [a very simple imperfect information game called Kuhn poker](kuhn/index.html).
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/cfr/kuhn/experiment.ipynb)
[](https://twitter.com/labmlai/status/1407186002255380484)
Twitter thread
## Introduction
We implement Monte Carlo Counterfactual Regret Minimization (MCCFR) with chance sampling (CS).
It iteratively, explores part of the game tree by trying all player actions,
but sampling chance events.
Chance events are things like dealing cards; they are kept sampled once per iteration.
Then it calculates, for each action, the *regret* of following the current strategy instead of taking that action.
Then it updates the strategy based on these regrets for the next iteration, using regret matching.
Finally, it computes the average of the strategies throughout the iterations,
which is very close to the Nash equilibrium if we ran enough iterations.
We will first introduce the mathematical notation and theory.
### Player
A player is denoted by $i \in N$, where $N$ is the set of players.
### [History](#History)
History $h \in H$ is a sequence of actions including chance events,
and $H$ is the set of all histories.
$Z \subseteq H$ is the set of terminal histories (game over).
### Action
Action $a$, $A(h) = {a: (h, a) \in H}$ where $h \in H$ is a non-terminal [history](#History).
### [Information Set $I_i$](#InfoSet)
**Information set** $I_i \in \mathcal{I}_i$ for player $i$
is similar to a history $h \in H$
but only contains the actions visible to player $i$.
That is, the history $h$ will contain actions/events such as cards dealt to the
opposing player while $I_i$ will not have them.
$\mathcal{I}_i$ is known as the **information partition** of player $i$.
$h \in I$ is the set of all histories that belong to a given information set;
i.e. all those histories look the same in the eye of the player.
<a id="Strategy"></a>
### Strategy
**Strategy of player** $i$, $\sigma_i \in \Sigma_i$ is a distribution over actions $A(I_i)$,
where $\Sigma_i$ is the set of all strategies for player $i$.
Strategy on $t$-th iteration is denoted by $\sigma^t_i$.
Strategy is defined as a probability for taking an action $a$ in for a given information set $I$,
$$\sigma_i(I)(a)$$
$\sigma$ is the **strategy profile** which consists of strategies of all players
$\sigma_1, \sigma_2, \ldots$
$\sigma_{-i}$ is strategies of all players except $\sigma_i$
<a id="HistoryProbability"></a>
### Probability of History
$\pi^\sigma(h)$ is the probability of reaching the history $h$ with strategy profile $\sigma$.
$\pi^\sigma(h)_{-i}$ is the probability of reaching $h$ without player $i$'s contribution;
i.e. player $i$ took the actions to follow $h$ with a probability of $1$.
$\pi^\sigma(h)_{i}$ is the probability of reaching $h$ with only player $i$'s contribution.
That is,
$$\pi^\sigma(h) = \pi^\sigma(h)_{i} \pi^\sigma(h)_{-i}$$
Probability of reaching a information set $I$ is,
$$\pi^\sigma(I) = \sum_{h \in I} \pi^\sigma(h)$$
### Utility (Pay off)
The [terminal utility](#terminal_utility) is the utility (or pay off)
of a player $i$ for a terminal history $h$.
$$u_i(h)$$ where $h \in Z$
$u_i(\sigma)$ is the expected utility (payoff) for player $i$ with strategy profile $\sigma$.
$$u_i(\sigma) = \sum_{h \in Z} u_i(h) \pi^\sigma(h)$$
<a id="NashEquilibrium"></a>
### Nash Equilibrium
Nash equilibrium is a state where none of the players can increase their expected utility (or payoff)
by changing their strategy alone.
For two players, Nash equilibrium is a [strategy profile](#Strategy) where
\begin{align}
u_1(\sigma) &\ge \max_{\sigma'_1 \in \Sigma_1} u_1(\sigma'_1, \sigma_2) \\
u_2(\sigma) &\ge \max_{\sigma'_2 \in \Sigma_2} u_1(\sigma_1, \sigma'_2) \\
\end{align}
$\epsilon$-Nash equilibrium is,
\begin{align}
u_1(\sigma) + \epsilon &\ge \max_{\sigma'_1 \in \Sigma_1} u_1(\sigma'_1, \sigma_2) \\
u_2(\sigma) + \epsilon &\ge \max_{\sigma'_2 \in \Sigma_2} u_1(\sigma_1, \sigma'_2) \\
\end{align}
### Regret Minimization
Regret is the utility (or pay off) that the player didn't get because
she didn't follow the optimal strategy or took the best action.
Average overall regret for Player $i$ is the average regret of not following the
optimal strategy in all $T$ rounds of iterations.
$$R^T_i = \frac{1}{T} \max_{\sigma^*_i \in \Sigma_i} \sum_{t=1}^T
\Big( u_i(\sigma^*_i, \sigma^t_{-i}) - u_i(\sigma^t) \Big)$$
where $\sigma^t$ is the strategy profile of all players in iteration $t$,
and
$$(\sigma^*_i, \sigma^t_{-i})$$
is the strategy profile $\sigma^t$ with player $i$'s strategy
replaced with $\sigma^*_i$.
The average strategy is the average of strategies followed in each round,
for all $I \in \mathcal{I}, a \in A(I)$
$$\textcolor{cyan}{\bar{\sigma}^T_i(I)(a)} =
\frac{\sum_{t=1}^T \pi_i^{\sigma^t}(I)\textcolor{lightgreen}{\sigma^t(I)(a)}}{\sum_{t=1}^T \pi_i^{\sigma^t}(I)}$$
That is the mean regret of not playing with the optimal strategy.
If $R^T_i < \epsilon$ for all players then $\bar{\sigma}^T_i(I)(a)$ is a
$2\epsilon$-Nash equilibrium.
\begin{align}
R^T_i &< \epsilon \\
R^T_i &= \frac{1}{T} \max_{\sigma^*_i \in \Sigma_i} \sum_{t=1}^T
\Big( u_i(\sigma^*_i, \sigma^t_{-i}) - u_i(\sigma^t) \Big) \\
&= \frac{1}{T} \max_{\sigma^*_i \in \Sigma_i} \sum_{t=1}^T u_i(\sigma^*_i, \sigma^t_{-i})
- \frac{1}{T} \sum_{t=1}^T u_i(\sigma^t) < \epsilon
\end{align}
Since $u_1 = -u_2$ because it's a zero-sum game, we can add $R^T_1$ and $R^T_i$ and the
second term will cancel out.
\begin{align}
2\epsilon &>
\frac{1}{T} \max_{\sigma^*_1 \in \Sigma_1} \sum_{t=1}^T u_1(\sigma^*_1, \sigma^t_{-1}) +
\frac{1}{T} \max_{\sigma^*_2 \in \Sigma_2} \sum_{t=1}^T u_2(\sigma^*_2, \sigma^t_{-2})
\end{align}
The average of utilities over a set of strategies is equal to the utility of the average strategy.
$$\frac{1}{T} \sum_{t=1}^T u_i(\sigma^t) = u_i(\bar{\sigma}^T)$$
Therefore,
\begin{align}
2\epsilon &>
\max_{\sigma^*_1 \in \Sigma_1} u_1(\sigma^*_1, \bar{\sigma}^T_{-1}) +
\max_{\sigma^*_2 \in \Sigma_2} u_2(\sigma^*_2, \bar{\sigma}^T_{-2})
\end{align}
From the definition of $\max$,
$$\max_{\sigma^*_2 \in \Sigma_2} u_2(\sigma^*_2, \bar{\sigma}^T_{-2}) \ge u_2(\bar{\sigma}^T)
= -u_1(\bar{\sigma}^T)$$
Then,
\begin{align}
2\epsilon &>
\max_{\sigma^*_1 \in \Sigma_1} u_1(\sigma^*_1, \bar{\sigma}^T_{-1}) +
-u_1(\bar{\sigma}^T) \\
u_1(\bar{\sigma}^T) + 2\epsilon &> \max_{\sigma^*_1 \in \Sigma_1} u_1(\sigma^*_1, \bar{\sigma}^T_{-1})
\end{align}
This is $2\epsilon$-Nash equilibrium.
You can similarly prove for games with more than 2 players.
So we need to minimize $R^T_i$ to get close to a Nash equilibrium.
<a id="CounterfactualRegret"></a>
### Counterfactual regret
**Counterfactual value** $\textcolor{pink}{v_i(\sigma, I)}$ is the expected utility for player $i$ if
if player $i$ tried to reach $I$ (took the actions leading to $I$ with a probability of $1$).
$$\textcolor{pink}{v_i(\sigma, I)} = \sum_{z \in Z_I} \pi^\sigma_{-i}(z[I]) \pi^\sigma(z[I], z) u_i(z)$$
where $Z_I$ is the set of terminal histories reachable from $I$,
and $z[I]$ is the prefix of $z$ up to $I$.
$\pi^\sigma(z[I], z)$ is the probability of reaching z from $z[I]$.
**Immediate counterfactual regret** is,
$$R^T_{i,imm}(I) = \max_{a \in A{I}} R^T_{i,imm}(I, a)$$
where
$$R^T_{i,imm}(I) = \frac{1}{T} \sum_{t=1}^T
\Big(
\textcolor{pink}{v_i(\sigma^t |_{I \rightarrow a}, I)} - \textcolor{pink}{v_i(\sigma^t, I)}
\Big)$$
where $\sigma |_{I \rightarrow a}$ is the strategy profile $\sigma$ with the modification
of always taking action $a$ at information set $I$.
The [paper](http://martin.zinkevich.org/publications/regretpoker.pdf) proves that (Theorem 3),
$$R^T_i \le \sum_{I \in \mathcal{I}} R^{T,+}_{i,imm}(I)$$
where $$R^{T,+}_{i,imm}(I) = \max(R^T_{i,imm}(I), 0)$$
<a id="RegretMatching"></a>
### Regret Matching
The strategy is calculated using regret matching.
The regret for each information set and action pair $\textcolor{orange}{R^T_i(I, a)}$ is maintained,
\begin{align}
\textcolor{coral}{r^t_i(I, a)} &=
\textcolor{pink}{v_i(\sigma^t |_{I \rightarrow a}, I)} - \textcolor{pink}{v_i(\sigma^t, I)}
\\
\textcolor{orange}{R^T_i(I, a)} &=
\frac{1}{T} \sum_{t=1}^T \textcolor{coral}{r^t_i(I, a)}
\end{align}
and the strategy is calculated with regret matching,
\begin{align}
\textcolor{lightgreen}{\sigma_i^{T+1}(I)(a)} =
\begin{cases}
\frac{\textcolor{orange}{R^{T,+}_i(I, a)}}{\sum_{a'\in A(I)}\textcolor{orange}{R^{T,+}_i(I, a')}},
& \text{if} \sum_{a'\in A(I)}\textcolor{orange}{R^{T,+}_i(I, a')} \gt 0 \\
\frac{1}{\lvert A(I) \rvert},
& \text{otherwise}
\end{cases}
\end{align}
where $\textcolor{orange}{R^{T,+}_i(I, a)} = \max \Big(\textcolor{orange}{R^T_i(I, a)}, 0 \Big)$
The paper
The paper
[Regret Minimization in Games with Incomplete Information](http://martin.zinkevich.org/publications/regretpoker.pdf)
proves that if the strategy is selected according to above equation
$R^T_i$ gets smaller proportionate to $\frac{1}{\sqrt T}$, and
therefore reaches $\epsilon$-[Nash equilibrium](#NashEquilibrium).
<a id="MCCFR"></a>
### Monte Carlo CFR (MCCFR)
Computing $\textcolor{coral}{r^t_i(I, a)}$ requires expanding the full game tree
on each iteration.
The paper
[Monte Carlo Sampling for Regret Minimization in Extensive Games](http://mlanctot.info/files/papers/nips09mccfr.pdf)
shows we can sample from the game tree and estimate the regrets.
$\mathcal{Q} = {Q_1, \ldots, Q_r}$ is a set of subsets of $Z$ ($Q_j \subseteq Z$) where
we look at only a single block $Q_j$ in an iteration.
Union of all subsets spans $Z$ ($Q_1 \cap \ldots \cap Q_r = Z$).
$q_j$ is the probability of picking block $Q_j$.
$q(z)$ is the probability of picking $z$ in current iteration; i.e. $q(z) = \sum_{j:z \in Q_j} q_j$ -
the sum of $q_j$ where $z \in Q_j$.
Then we get **sampled counterfactual value** fro block $j$,
$$\textcolor{pink}{\tilde{v}(\sigma, I|j)} =
\sum_{z \in Q_j} \frac{1}{q(z)}
\pi^\sigma_{-i}(z[I]) \pi^\sigma(z[I], z) u_i(z)$$
The paper shows that
$$\mathbb{E}_{j \sim q_j} \Big[ \textcolor{pink}{\tilde{v}(\sigma, I|j)} \Big]
= \textcolor{pink}{v_i(\sigma, I)}$$
with a simple proof.
Therefore we can sample a part of the game tree and calculate the regrets.
We calculate an estimate of regrets
$$
\textcolor{coral}{\tilde{r}^t_i(I, a)} =
\textcolor{pink}{\tilde{v}_i(\sigma^t |_{I \rightarrow a}, I)} - \textcolor{pink}{\tilde{v}_i(\sigma^t, I)}
$$
And use that to update $\textcolor{orange}{R^T_i(I, a)}$ and calculate
the strategy $\textcolor{lightgreen}{\sigma_i^{T+1}(I)(a)}$ on each iteration.
Finally, we calculate the overall average strategy $\textcolor{cyan}{\bar{\sigma}^T_i(I)(a)}$.
Here is a [Kuhn Poker](kuhn/index.html) implementation to try CFR on Kuhn Poker.
*Let's dive into the code!*
"""
from typing import NewType, Dict, List, Callable, cast
from labml import monit, tracker, logger, experiment
from labml.configs import BaseConfigs, option
# A player $i \in N$ where $N$ is the set of players
Player = NewType('Player', int)
# Action $a$, $A(h) = {a: (h, a) \in H}$ where $h \in H$ is a non-terminal [history](#History)
Action = NewType('Action', str)
class History:
"""
<a id="History"></a>
## History
History $h \in H$ is a sequence of actions including chance events,
and $H$ is the set of all histories.
This class should be extended with game specific logic.
"""
def is_terminal(self):
"""
Whether it's a terminal history; i.e. game over.
$h \in Z$
"""
raise NotImplementedError()
def terminal_utility(self, i: Player) -> float:
"""
<a id="terminal_utility"></a>
Utility of player $i$ for a terminal history.
$u_i(h)$ where $h \in Z$
"""
raise NotImplementedError()
def player(self) -> Player:
"""
Get current player, denoted by $P(h)$, where $P$ is known as **Player function**.
If $P(h) = c$ it means that current event is a chance $c$ event.
Something like dealing cards, or opening common cards in poker.
"""
raise NotImplementedError()
def is_chance(self) -> bool:
"""
Whether the next step is a chance step; something like dealing a new card.
$P(h) = c$
"""
raise NotImplementedError()
def sample_chance(self) -> Action:
"""
Sample a chance when $P(h) = c$.
"""
raise NotImplementedError()
def __add__(self, action: Action):
"""
Add an action to the history.
"""
raise NotImplementedError()
def info_set_key(self) -> str:
"""
Get [information set](#InfoSet) for the current player
"""
raise NotImplementedError
def new_info_set(self) -> 'InfoSet':
"""
Create a new [information set](#InfoSet) for the current player
"""
raise NotImplementedError()
def __repr__(self):
"""
Human readable representation
"""
raise NotImplementedError()
class InfoSet:
"""
<a id="InfoSet"></a>
## Information Set $I_i$
"""
# Unique key identifying the information set
key: str
# $\sigma_i$, the [strategy](#Strategy) of player $i$
strategy: Dict[Action, float]
# Total regret of not taking each action $A(I_i)$,
#
# \begin{align}
# \textcolor{coral}{\tilde{r}^t_i(I, a)} &=
# \textcolor{pink}{\tilde{v}_i(\sigma^t |_{I \rightarrow a}, I)} -
# \textcolor{pink}{\tilde{v}_i(\sigma^t, I)}
# \\
# \textcolor{orange}{R^T_i(I, a)} &=
# \frac{1}{T} \sum_{t=1}^T \textcolor{coral}{\tilde{r}^t_i(I, a)}
# \end{align}
#
# We maintain $T \textcolor{orange}{R^T_i(I, a)}$ instead of $\textcolor{orange}{R^T_i(I, a)}$
# since $\frac{1}{T}$ term cancels out anyway when computing strategy
# $\textcolor{lightgreen}{\sigma_i^{T+1}(I)(a)}$
regret: Dict[Action, float]
# We maintain the cumulative strategy
# $$\sum_{t=1}^T \pi_i^{\sigma^t}(I)\textcolor{lightgreen}{\sigma^t(I)(a)}$$
# to compute overall average strategy
#
# $$\textcolor{cyan}{\bar{\sigma}^T_i(I)(a)} =
# \frac{\sum_{t=1}^T \pi_i^{\sigma^t}(I)\textcolor{lightgreen}{\sigma^t(I)(a)}}{\sum_{t=1}^T \pi_i^{\sigma^t}(I)}$$
cumulative_strategy: Dict[Action, float]
def __init__(self, key: str):
"""
Initialize
"""
self.key = key
self.regret = {a: 0 for a in self.actions()}
self.cumulative_strategy = {a: 0 for a in self.actions()}
self.calculate_strategy()
def actions(self) -> List[Action]:
"""
Actions $A(I_i)$
"""
raise NotImplementedError()
@staticmethod
def from_dict(data: Dict[str, any]) -> 'InfoSet':
"""
Load information set from a saved dictionary
"""
raise NotImplementedError()
def to_dict(self):
"""
Save the information set to a dictionary
"""
return {
'key': self.key,
'regret': self.regret,
'average_strategy': self.cumulative_strategy,
}
def load_dict(self, data: Dict[str, any]):
"""
Load data from a saved dictionary
"""
self.regret = data['regret']
self.cumulative_strategy = data['average_strategy']
self.calculate_strategy()
def calculate_strategy(self):
"""
## Calculate strategy
Calculate current strategy using [regret matching](#RegretMatching).
\begin{align}
\textcolor{lightgreen}{\sigma_i^{T+1}(I)(a)} =
\begin{cases}
\frac{\textcolor{orange}{R^{T,+}_i(I, a)}}{\sum_{a'\in A(I)}\textcolor{orange}{R^{T,+}_i(I, a')}},
& \text{if} \sum_{a'\in A(I)}\textcolor{orange}{R^{T,+}_i(I, a')} \gt 0 \\
\frac{1}{\lvert A(I) \rvert},
& \text{otherwise}
\end{cases}
\end{align}
where $\textcolor{orange}{R^{T,+}_i(I, a)} = \max \Big(\textcolor{orange}{R^T_i(I, a)}, 0 \Big)$
"""
# $$\textcolor{orange}{R^{T,+}_i(I, a)} = \max \Big(\textcolor{orange}{R^T_i(I, a)}, 0 \Big)$$
regret = {a: max(r, 0) for a, r in self.regret.items()}
# $$\sum_{a'\in A(I)}\textcolor{orange}{R^{T,+}_i(I, a')}$$
regret_sum = sum(regret.values())
# if $\sum_{a'\in A(I)}\textcolor{orange}{R^{T,+}_i(I, a')} \gt 0$,
if regret_sum > 0:
# $$\textcolor{lightgreen}{\sigma_i^{T+1}(I)(a)} =
# \frac{\textcolor{orange}{R^{T,+}_i(I, a)}}{\sum_{a'\in A(I)}\textcolor{orange}{R^{T,+}_i(I, a')}}$$
self.strategy = {a: r / regret_sum for a, r in regret.items()}
# Otherwise,
else:
# $\lvert A(I) \rvert$
count = len(list(a for a in self.regret))
# $$\textcolor{lightgreen}{\sigma_i^{T+1}(I)(a)} =
# \frac{1}{\lvert A(I) \rvert}$$
self.strategy = {a: 1 / count for a, r in regret.items()}
def get_average_strategy(self):
"""
## Get average strategy
$$\textcolor{cyan}{\bar{\sigma}^T_i(I)(a)} =
\frac{\sum_{t=1}^T \pi_i^{\sigma^t}(I)\textcolor{lightgreen}{\sigma^t(I)(a)}}
{\sum_{t=1}^T \pi_i^{\sigma^t}(I)}$$
"""
# $$\sum_{t=1}^T \pi_i^{\sigma^t}(I) \textcolor{lightgreen}{\sigma^t(I)(a)}$$
cum_strategy = {a: self.cumulative_strategy.get(a, 0.) for a in self.actions()}
# $$\sum_{t=1}^T \pi_i^{\sigma^t}(I) =
# \sum_{a \in A(I)} \sum_{t=1}^T
# \pi_i^{\sigma^t}(I)\textcolor{lightgreen}{\sigma^t(I)(a)}$$
strategy_sum = sum(cum_strategy.values())
# If $\sum_{t=1}^T \pi_i^{\sigma^t}(I) > 0$,
if strategy_sum > 0:
# $$\textcolor{cyan}{\bar{\sigma}^T_i(I)(a)} =
# \frac{\sum_{t=1}^T \pi_i^{\sigma^t}(I)\textcolor{lightgreen}{\sigma^t(I)(a)}}
# {\sum_{t=1}^T \pi_i^{\sigma^t}(I)}$$
return {a: s / strategy_sum for a, s in cum_strategy.items()}
# Otherwise,
else:
# $\lvert A(I) \rvert$
count = len(list(a for a in cum_strategy))
# $$\textcolor{cyan}{\bar{\sigma}^T_i(I)(a)} =
# \frac{1}{\lvert A(I) \rvert}$$
return {a: 1 / count for a, r in cum_strategy.items()}
def __repr__(self):
"""
Human readable representation
"""
raise NotImplementedError()
class CFR:
"""
## Counterfactual Regret Minimization (CFR) Algorithm
We do chance sampling (**CS**) where all the chance events (nodes) are sampled and
all other events (nodes) are explored.
We can ignore the term $q(z)$ since it's the same for all terminal histories
since we are doing chance sampling and it cancels out when calculating
strategy (common in numerator and denominator).
"""
# $\mathcal{I}$ set of all information sets.
info_sets: Dict[str, InfoSet]
def __init__(self, *,
create_new_history: Callable[[], History],
epochs: int,
n_players: int = 2):
"""
* `create_new_history` creates a new empty history
* `epochs` is the number of iterations to train on $T$
* `n_players` is the number of players
"""
self.n_players = n_players
self.epochs = epochs
self.create_new_history = create_new_history
# A dictionary for $\mathcal{I}$ set of all information sets
self.info_sets = {}
# Tracker for analytics
self.tracker = InfoSetTracker()
def _get_info_set(self, h: History):
"""
Returns the information set $I$ of the current player for a given history $h$
"""
info_set_key = h.info_set_key()
if info_set_key not in self.info_sets:
self.info_sets[info_set_key] = h.new_info_set()
return self.info_sets[info_set_key]
def walk_tree(self, h: History, i: Player, pi_i: float, pi_neg_i: float) -> float:
"""
### Walk Tree
This function walks the game tree.
* `h` is the current history $h$
* `i` is the player $i$ that we are computing regrets of
* [`pi_i`](#HistoryProbability) is
$\pi^{\sigma^t}_i(h)$
* [`pi_neg_i`](#HistoryProbability) is
$\pi^{\sigma^t}_{-i}(h)$
It returns the expected utility, for the history $h$
$$\sum_{z \in Z_h} \pi^\sigma(h, z) u_i(z)$$
where $Z_h$ is the set of terminal histories with prefix $h$
While walking the tee it updates the total regrets $\textcolor{orange}{R^T_i(I, a)}$.
"""
# If it's a terminal history $h \in Z$ return the terminal utility $u_i(h)$.
if h.is_terminal():
return h.terminal_utility(i)
# If it's a chance event $P(h) = c$ sample a and go to next step.
elif h.is_chance():
a = h.sample_chance()
return self.walk_tree(h + a, i, pi_i, pi_neg_i)
# Get current player's information set for $h$
I = self._get_info_set(h)
# To store $\sum_{z \in Z_h} \pi^\sigma(h, z) u_i(z)$
v = 0
# To store
# $$\sum_{z \in Z_h} \pi^{\sigma^t |_{I \rightarrow a}}(h, z) u_i(z)$$
# for each action $a \in A(h)$
va = {}
# Iterate through all actions
for a in I.actions():
# If the current player is $i$,
if i == h.player():
# \begin{align}
# \pi^{\sigma^t}_i(h + a) &= \pi^{\sigma^t}_i(h) \sigma^t_i(I)(a) \\
# \pi^{\sigma^t}_{-i}(h + a) &= \pi^{\sigma^t}_{-i}(h)
# \end{align}
va[a] = self.walk_tree(h + a, i, pi_i * I.strategy[a], pi_neg_i)
# Otherwise,
else:
# \begin{align}
# \pi^{\sigma^t}_i(h + a) &= \pi^{\sigma^t}_i(h) \\
# \pi^{\sigma^t}_{-i}(h + a) &= \pi^{\sigma^t}_{-i}(h) * \sigma^t_i(I)(a)
# \end{align}
va[a] = self.walk_tree(h + a, i, pi_i, pi_neg_i * I.strategy[a])
# $$\sum_{z \in Z_h} \pi^\sigma(h, z) u_i(z) =
# \sum_{a \in A(I)} \Bigg[ \sigma^t_i(I)(a)
# \sum_{z \in Z_h} \pi^{\sigma^t |_{I \rightarrow a}}(h, z) u_i(z)
# \Bigg]$$
v = v + I.strategy[a] * va[a]
# If the current player is $i$,
# update the cumulative strategies and total regrets
if h.player() == i:
# Update cumulative strategies
# $$\sum_{t=1}^T \pi_i^{\sigma^t}(I)\textcolor{lightgreen}{\sigma^t(I)(a)}
# = \sum_{t=1}^T \Big[ \sum_{h \in I} \pi_i^{\sigma^t}(h)
# \textcolor{lightgreen}{\sigma^t(I)(a)} \Big]$$
for a in I.actions():
I.cumulative_strategy[a] = I.cumulative_strategy[a] + pi_i * I.strategy[a]
# \begin{align}
# \textcolor{coral}{\tilde{r}^t_i(I, a)} &=
# \textcolor{pink}{\tilde{v}_i(\sigma^t |_{I \rightarrow a}, I)} -
# \textcolor{pink}{\tilde{v}_i(\sigma^t, I)} \\
# &=
# \pi^{\sigma^t}_{-i} (h) \Big(
# \sum_{z \in Z_h} \pi^{\sigma^t |_{I \rightarrow a}}(h, z) u_i(z) -
# \sum_{z \in Z_h} \pi^\sigma(h, z) u_i(z)
# \Big) \\
# T \textcolor{orange}{R^T_i(I, a)} &=
# \sum_{t=1}^T \textcolor{coral}{\tilde{r}^t_i(I, a)}
# \end{align}
for a in I.actions():
I.regret[a] += pi_neg_i * (va[a] - v)
# Update the strategy $\textcolor{lightgreen}{\sigma^t(I)(a)}$
I.calculate_strategy()
# Return the expected utility for player $i$,
# $$\sum_{z \in Z_h} \pi^\sigma(h, z) u_i(z)$$
return v
def iterate(self):
"""
### Iteratively update $\textcolor{lightgreen}{\sigma^t(I)(a)}$
This updates the strategies for $T$ iterations.
"""
# Loop for `epochs` times
for t in monit.iterate('Train', self.epochs):
# Walk tree and update regrets for each player
for i in range(self.n_players):
self.walk_tree(self.create_new_history(), cast(Player, i), 1, 1)
# Track data for analytics
tracker.add_global_step()
self.tracker(self.info_sets)
tracker.save()
# Print the information sets
logger.inspect(self.info_sets)
class InfoSetTracker:
"""
### Information set tracker
This is a small helper class to track data from information sets
"""
def __init__(self):
"""
Set tracking indicators
"""
tracker.set_histogram(f'strategy.*')
tracker.set_histogram(f'average_strategy.*')
tracker.set_histogram(f'regret.*')
def __call__(self, info_sets: Dict[str, InfoSet]):
"""
Track the data from all information sets
"""
for I in info_sets.values():
avg_strategy = I.get_average_strategy()
for a in I.actions():
tracker.add({
f'strategy.{I.key}.{a}': I.strategy[a],
f'average_strategy.{I.key}.{a}': avg_strategy[a],
f'regret.{I.key}.{a}': I.regret[a],
})
class CFRConfigs(BaseConfigs):
"""
### Configurable CFR module
"""
create_new_history: Callable[[], History]
epochs: int = 1_00_000
cfr: CFR = 'simple_cfr'
@option(CFRConfigs.cfr)
def simple_cfr(c: CFRConfigs):
"""
Initialize **CFR** algorithm
"""
return CFR(create_new_history=c.create_new_history,
epochs=c.epochs)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/cfr/kuhn/__init__.py | labml_nn/cfr/kuhn/__init__.py | """
---
title: CFR on Kuhn Poker
summary: >
This is an annotated implementation/tutorial of CFR on Kuhn Poker
---
# [Counterfactual Regret Minimization (CFR)](../index.html) on Kuhn Poker
This applies [Counterfactual Regret Minimization (CFR)](../index.html) to Kuhn poker.
[Kuhn Poker](https://en.wikipedia.org/wiki/Kuhn_poker) is a two player 3-card betting game.
The players are dealt one card each out of Ace, King and Queen (no suits).
There are only three cards in the pack so one card is left out.
Ace beats King and Queen and King beats Queen - just like in normal ranking of cards.
Both players ante $1$ chip (blindly bet $1$ chip).
After looking at the cards, the first player can either pass or bet $1$ chip.
If first player passes, the the player with higher card wins the pot.
If first player bets, the second play can bet (i.e. call) $1$ chip or pass (i.e. fold).
If the second player bets and the player with the higher card wins the pot.
If the second player passes (i.e. folds) the first player gets the pot.
This game is played repeatedly and a good strategy will optimize for the long term utility (or winnings).
Here's some example games:
* `KAp` - Player 1 gets K. Player 2 gets A. Player 1 passes. Player 2 doesn't get a betting chance and Player 2 wins the pot of $2$ chips.
* `QKbp` - Player 1 gets Q. Player 2 gets K. Player 1 bets a chip. Player 2 passes (folds). Player 1 gets the pot of $4$ because Player 2 folded.
* `QAbb` - Player 1 gets Q. Player 2 gets A. Player 1 bets a chip. Player 2 also bets (calls). Player 2 wins the pot of $4$.
He we extend the `InfoSet` class and `History` class defined in [`__init__.py`](../index.html)
with Kuhn Poker specifics.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/cfr/kuhn/experiment.ipynb)
"""
from typing import List, cast, Dict
import numpy as np
from labml import experiment
from labml.configs import option
from labml_nn.cfr import History as _History, InfoSet as _InfoSet, Action, Player, CFRConfigs
# Kuhn poker actions are pass (`p`) or bet (`b`)
ACTIONS = cast(List[Action], ['p', 'b'])
# The three cards in play are Ace, King and Queen
CHANCES = cast(List[Action], ['A', 'K', 'Q'])
# There are two players
PLAYERS = cast(List[Player], [0, 1])
class InfoSet(_InfoSet):
"""
## [Information set](../index.html#InfoSet)
"""
@staticmethod
def from_dict(data: Dict[str, any]) -> 'InfoSet':
"""Does not support save/load"""
pass
def actions(self) -> List[Action]:
"""
Return the list of actions. Terminal states are handled by `History` class.
"""
return ACTIONS
def __repr__(self):
"""
Human readable string representation - it gives the betting probability
"""
total = sum(self.cumulative_strategy.values())
total = max(total, 1e-6)
bet = self.cumulative_strategy[cast(Action, 'b')] / total
return f'{bet * 100: .1f}%'
class History(_History):
"""
## [History](../index.html#History)
This defines when a game ends, calculates the utility and sample chance events (dealing cards).
The history is stored in a string:
* First two characters are the cards dealt to player 1 and player 2
* The third character is the action by the first player
* Fourth character is the action by the second player
"""
# History
history: str
def __init__(self, history: str = ''):
"""
Initialize with a given history string
"""
self.history = history
def is_terminal(self):
"""
Whether the history is terminal (game over).
"""
# Players are yet to take actions
if len(self.history) <= 2:
return False
# Last player to play passed (game over)
elif self.history[-1] == 'p':
return True
# Both players called (bet) (game over)
elif self.history[-2:] == 'bb':
return True
# Any other combination
else:
return False
def _terminal_utility_p1(self) -> float:
"""
Calculate the terminal utility for player $1$, $u_1(z)$
"""
# $+1$ if Player 1 has a better card and $-1$ otherwise
winner = -1 + 2 * (self.history[0] < self.history[1])
# Second player passed
if self.history[-2:] == 'bp':
return 1
# Both players called, the player with better card wins $2$ chips
elif self.history[-2:] == 'bb':
return winner * 2
# First player passed, the player with better card wins $1$ chip
elif self.history[-1] == 'p':
return winner
# History is non-terminal
else:
raise RuntimeError()
def terminal_utility(self, i: Player) -> float:
"""
Get the terminal utility for player $i$
"""
# If $i$ is Player 1
if i == PLAYERS[0]:
return self._terminal_utility_p1()
# Otherwise, $u_2(z) = -u_1(z)$
else:
return -1 * self._terminal_utility_p1()
def is_chance(self) -> bool:
"""
The first two events are card dealing; i.e. chance events
"""
return len(self.history) < 2
def __add__(self, other: Action):
"""
Add an action to the history and return a new history
"""
return History(self.history + other)
def player(self) -> Player:
"""
Current player
"""
return cast(Player, len(self.history) % 2)
def sample_chance(self) -> Action:
"""
Sample a chance action
"""
while True:
# Randomly pick a card
r = np.random.randint(len(CHANCES))
chance = CHANCES[r]
# See if the card was dealt before
for c in self.history:
if c == chance:
chance = None
break
# Return the card if it was not dealt before
if chance is not None:
return cast(Action, chance)
def __repr__(self):
"""
Human readable representation
"""
return repr(self.history)
def info_set_key(self) -> str:
"""
Information set key for the current history.
This is a string of actions only visible to the current player.
"""
# Get current player
i = self.player()
# Current player sees her card and the betting actions
return self.history[i] + self.history[2:]
def new_info_set(self) -> InfoSet:
# Create a new information set object
return InfoSet(self.info_set_key())
def create_new_history():
"""A function to create an empty history object"""
return History()
class Configs(CFRConfigs):
"""
Configurations extends the CFR configurations class
"""
pass
@option(Configs.create_new_history)
def _cnh():
"""
Set the `create_new_history` method for Kuhn Poker
"""
return create_new_history
def main():
"""
### Run the experiment
"""
# Create an experiment, we only write tracking information to `sqlite` to speed things up.
# Since the algorithm iterates fast and we track data on each iteration, writing to
# other destinations such as Tensorboard can be relatively time consuming.
# SQLite is enough for our analytics.
experiment.create(name='kuhn_poker', writers={'sqlite'})
# Initialize configuration
conf = Configs()
# Load configuration
experiment.configs(conf)
# Start the experiment
with experiment.start():
# Start iterating
conf.cfr.iterate()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/recurrent_highway_networks/__init__.py | labml_nn/recurrent_highway_networks/__init__.py | """
---
title: Recurrent Highway Networks
summary: A simple PyTorch implementation/tutorial of Recurrent Highway Networks.
---
# Recurrent Highway Networks
This is a [PyTorch](https://pytorch.org) implementation of [Recurrent Highway Networks](https://arxiv.org/abs/1607.03474).
"""
from typing import Optional
import torch
from torch import nn
class RHNCell(nn.Module):
"""
## Recurrent Highway Network Cell
This implements equations $(6) - (9)$.
$s_d^t = h_d^t \odot g_d^t + s_{d - 1}^t \odot c_d^t$
where
\begin{align}
h_0^t &= \tanh(lin_{hx}(x) + lin_{hs}(s_D^{t-1})) \\
g_0^t &= \sigma(lin_{gx}(x) + lin_{gs}^1(s_D^{t-1})) \\
c_0^t &= \sigma(lin_{cx}(x) + lin_{cs}^1(s_D^{t-1}))
\end{align}
and for $0 < d < D$
\begin{align}
h_d^t &= \tanh(lin_{hs}^d(s_d^t)) \\
g_d^t &= \sigma(lin_{gs}^d(s_d^t)) \\
c_d^t &= \sigma(lin_{cs}^d(s_d^t))
\end{align}
$\odot$ stands for element-wise multiplication.
Here we have made a couple of changes to notations from the paper.
To avoid confusion with time, gate is represented with $g$,
which was $t$ in the paper.
To avoid confusion with multiple layers we use $d$ for depth and $D$ for
total depth instead of $l$ and $L$ from the paper.
We have also replaced the weight matrices and bias vectors from the equations with
linear transforms, because that's how the implementation is going to look like.
We implement weight tying, as described in paper, $c_d^t = 1 - g_d^t$.
"""
def __init__(self, input_size: int, hidden_size: int, depth: int):
"""
`input_size` is the feature length of the input and `hidden_size` is
the feature length of the cell.
`depth` is $D$.
"""
super().__init__()
self.hidden_size = hidden_size
self.depth = depth
# We combine $lin_{hs}$ and $lin_{gs}$, with a single linear layer.
# We can then split the results to get the $lin_{hs}$ and $lin_{gs}$ components.
# This is the $lin_{hs}^d$ and $lin_{gs}^d$ for $0 \leq d < D$.
self.hidden_lin = nn.ModuleList([nn.Linear(hidden_size, 2 * hidden_size) for _ in range(depth)])
# Similarly we combine $lin_{hx}$ and $lin_{gx}$.
self.input_lin = nn.Linear(input_size, 2 * hidden_size, bias=False)
def forward(self, x: torch.Tensor, s: torch.Tensor):
"""
`x` has shape `[batch_size, input_size]` and
`s` has shape `[batch_size, hidden_size]`.
"""
# Iterate $0 \leq d < D$
for d in range(self.depth):
# We calculate the concatenation of linear transforms for $h$ and $g$
if d == 0:
# The input is used only when $d$ is $0$.
hg = self.input_lin(x) + self.hidden_lin[d](s)
else:
hg = self.hidden_lin[d](s)
# Use the first half of `hg` to get $h_d^t$
#
# \begin{align}
# h_0^t &= \tanh(lin_{hx}(x) + lin_{hs}(s_D^{t-1})) \\
# h_d^t &= \tanh(lin_{hs}^d(s_d^t))
# \end{align}
h = torch.tanh(hg[:, :self.hidden_size])
# Use the second half of `hg` to get $g_d^t$
#
# \begin{align}
# g_0^t &= \sigma(lin_{gx}(x) + lin_{gs}^1(s_D^{t-1})) \\
# g_d^t &= \sigma(lin_{gs}^d(s_d^t))
# \end{align}
g = torch.sigmoid(hg[:, self.hidden_size:])
s = h * g + s * (1 - g)
return s
class RHN(nn.Module):
"""
## Multilayer Recurrent Highway Network
"""
def __init__(self, input_size: int, hidden_size: int, depth: int, n_layers: int):
"""
Create a network of `n_layers` of recurrent highway network layers, each with depth `depth`, $D$.
"""
super().__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
# Create cells for each layer. Note that only the first layer gets the input directly.
# Rest of the layers get the input from the layer below
self.cells = nn.ModuleList([RHNCell(input_size, hidden_size, depth)] +
[RHNCell(hidden_size, hidden_size, depth) for _ in range(n_layers - 1)])
def forward(self, x: torch.Tensor, state: Optional[torch.Tensor] = None):
"""
`x` has shape `[seq_len, batch_size, input_size]` and
`state` has shape `[batch_size, hidden_size]`.
"""
time_steps, batch_size = x.shape[:2]
# Initialize the state if `None`
if state is None:
s = [x.new_zeros(batch_size, self.hidden_size) for _ in range(self.n_layers)]
else:
# Reverse stack the state to get the state of each layer
#
# π You can just work with the tensor itself but this is easier to debug
s = torch.unbind(state)
# Array to collect the outputs of the final layer at each time step.
out = []
# Run through the network for each time step
for t in range(time_steps):
# Input to the first layer is the input itself
inp = x[t]
# Loop through the layers
for layer in range(self.n_layers):
# Get the state of the layer
s[layer] = self.cells[layer](inp, s[layer])
# Input to the next layer is the state of this layer
inp = s[layer]
# Collect the output of the final layer
out.append(s[-1])
# Stack the outputs and states
out = torch.stack(out)
s = torch.stack(s)
return out, s
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/gan/__init__.py | labml_nn/gan/__init__.py | """
---
title: Generative Adversarial Networks
summary: >
A set of PyTorch implementations/tutorials of GANs.
---
# Generative Adversarial Networks
* [Original GAN](original/index.html)
* [GAN with deep convolutional network](dcgan/index.html)
* [Cycle GAN](cycle_gan/index.html)
* [Wasserstein GAN](wasserstein/index.html)
* [Wasserstein GAN with Gradient Penalty](wasserstein/gradient_penalty/index.html)
* [StyleGAN 2](stylegan/index.html)
""" | python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/gan/cycle_gan/__init__.py | labml_nn/gan/cycle_gan/__init__.py | """
---
title: Cycle GAN
summary: >
A simple PyTorch implementation/tutorial of Cycle GAN introduced in paper
Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks.
---
# Cycle GAN
This is a [PyTorch](https://pytorch.org) implementation/tutorial of the paper
[Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks](https://arxiv.org/abs/1703.10593).
I've taken pieces of code from [eriklindernoren/PyTorch-GAN](https://github.com/eriklindernoren/PyTorch-GAN).
It is a very good resource if you want to checkout other GAN variations too.
Cycle GAN does image-to-image translation.
It trains a model to translate an image from given distribution to another, say, images of class A and B.
Images of a certain distribution could be things like images of a certain style, or nature.
The models do not need paired images between A and B.
Just a set of images of each class is enough.
This works very well on changing between image styles, lighting changes, pattern changes, etc.
For example, changing summer to winter, painting style to photos, and horses to zebras.
Cycle GAN trains two generator models and two discriminator models.
One generator translates images from A to B and the other from B to A.
The discriminators test whether the generated images look real.
This file contains the model code as well as the training code.
We also have a Google Colab notebook.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/gan/cycle_gan/experiment.ipynb)
"""
import itertools
import random
import zipfile
from typing import Tuple
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import InterpolationMode
from torchvision.utils import make_grid
from labml import lab, tracker, experiment, monit
from labml.configs import BaseConfigs
from labml.utils.download import download_file
from labml.utils.pytorch import get_modules
from labml_nn.helpers.device import DeviceConfigs
class GeneratorResNet(nn.Module):
"""
The generator is a residual network.
"""
def __init__(self, input_channels: int, n_residual_blocks: int):
super().__init__()
# This first block runs a $7\times7$ convolution and maps the image to
# a feature map.
# The output feature map has the same height and width because we have
# a padding of $3$.
# Reflection padding is used because it gives better image quality at edges.
#
# `inplace=True` in `ReLU` saves a little bit of memory.
out_features = 64
layers = [
nn.Conv2d(input_channels, out_features, kernel_size=7, padding=3, padding_mode='reflect'),
nn.InstanceNorm2d(out_features),
nn.ReLU(inplace=True),
]
in_features = out_features
# We down-sample with two $3 \times 3$ convolutions
# with stride of 2
for _ in range(2):
out_features *= 2
layers += [
nn.Conv2d(in_features, out_features, kernel_size=3, stride=2, padding=1),
nn.InstanceNorm2d(out_features),
nn.ReLU(inplace=True),
]
in_features = out_features
# We take this through `n_residual_blocks`.
# This module is defined below.
for _ in range(n_residual_blocks):
layers += [ResidualBlock(out_features)]
# Then the resulting feature map is up-sampled
# to match the original image height and width.
for _ in range(2):
out_features //= 2
layers += [
nn.Upsample(scale_factor=2),
nn.Conv2d(in_features, out_features, kernel_size=3, stride=1, padding=1),
nn.InstanceNorm2d(out_features),
nn.ReLU(inplace=True),
]
in_features = out_features
# Finally we map the feature map to an RGB image
layers += [nn.Conv2d(out_features, input_channels, 7, padding=3, padding_mode='reflect'), nn.Tanh()]
# Create a sequential module with the layers
self.layers = nn.Sequential(*layers)
# Initialize weights to $\mathcal{N}(0, 0.2)$
self.apply(weights_init_normal)
def forward(self, x):
return self.layers(x)
class ResidualBlock(nn.Module):
"""
This is the residual block, with two convolution layers.
"""
def __init__(self, in_features: int):
super().__init__()
self.block = nn.Sequential(
nn.Conv2d(in_features, in_features, kernel_size=3, padding=1, padding_mode='reflect'),
nn.InstanceNorm2d(in_features),
nn.ReLU(inplace=True),
nn.Conv2d(in_features, in_features, kernel_size=3, padding=1, padding_mode='reflect'),
nn.InstanceNorm2d(in_features),
nn.ReLU(inplace=True),
)
def forward(self, x: torch.Tensor):
return x + self.block(x)
class Discriminator(nn.Module):
"""
This is the discriminator.
"""
def __init__(self, input_shape: Tuple[int, int, int]):
super().__init__()
channels, height, width = input_shape
# Output of the discriminator is also a map of probabilities,
# whether each region of the image is real or generated
self.output_shape = (1, height // 2 ** 4, width // 2 ** 4)
self.layers = nn.Sequential(
# Each of these blocks will shrink the height and width by a factor of 2
DiscriminatorBlock(channels, 64, normalize=False),
DiscriminatorBlock(64, 128),
DiscriminatorBlock(128, 256),
DiscriminatorBlock(256, 512),
# Zero pad on top and left to keep the output height and width same
# with the $4 \times 4$ kernel
nn.ZeroPad2d((1, 0, 1, 0)),
nn.Conv2d(512, 1, kernel_size=4, padding=1)
)
# Initialize weights to $\mathcal{N}(0, 0.2)$
self.apply(weights_init_normal)
def forward(self, img):
return self.layers(img)
class DiscriminatorBlock(nn.Module):
"""
This is the discriminator block module.
It does a convolution, an optional normalization, and a leaky ReLU.
It shrinks the height and width of the input feature map by half.
"""
def __init__(self, in_filters: int, out_filters: int, normalize: bool = True):
super().__init__()
layers = [nn.Conv2d(in_filters, out_filters, kernel_size=4, stride=2, padding=1)]
if normalize:
layers.append(nn.InstanceNorm2d(out_filters))
layers.append(nn.LeakyReLU(0.2, inplace=True))
self.layers = nn.Sequential(*layers)
def forward(self, x: torch.Tensor):
return self.layers(x)
def weights_init_normal(m):
"""
Initialize convolution layer weights to $\mathcal{N}(0, 0.2)$
"""
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
def load_image(path: str):
"""
Load an image and change to RGB if in grey-scale.
"""
image = Image.open(path)
if image.mode != 'RGB':
image = Image.new("RGB", image.size).paste(image)
return image
class ImageDataset(Dataset):
"""
### Dataset to load images
"""
@staticmethod
def download(dataset_name: str):
"""
#### Download dataset and extract data
"""
# URL
url = f'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/{dataset_name}.zip'
# Download folder
root = lab.get_data_path() / 'cycle_gan'
if not root.exists():
root.mkdir(parents=True)
# Download destination
archive = root / f'{dataset_name}.zip'
# Download file (generally ~100MB)
download_file(url, archive)
# Extract the archive
with zipfile.ZipFile(archive, 'r') as f:
f.extractall(root)
def __init__(self, dataset_name: str, transforms_, mode: str):
"""
#### Initialize the dataset
* `dataset_name` is the name of the dataset
* `transforms_` is the set of image transforms
* `mode` is either `train` or `test`
"""
# Dataset path
root = lab.get_data_path() / 'cycle_gan' / dataset_name
# Download if missing
if not root.exists():
self.download(dataset_name)
# Image transforms
self.transform = transforms.Compose(transforms_)
# Get image paths
path_a = root / f'{mode}A'
path_b = root / f'{mode}B'
self.files_a = sorted(str(f) for f in path_a.iterdir())
self.files_b = sorted(str(f) for f in path_b.iterdir())
def __getitem__(self, index):
# Return a pair of images.
# These pairs get batched together, and they do not act like pairs in training.
# So it is kind of ok that we always keep giving the same pair.
return {"x": self.transform(load_image(self.files_a[index % len(self.files_a)])),
"y": self.transform(load_image(self.files_b[index % len(self.files_b)]))}
def __len__(self):
# Number of images in the dataset
return max(len(self.files_a), len(self.files_b))
class ReplayBuffer:
"""
### Replay Buffer
Replay buffer is used to train the discriminator.
Generated images are added to the replay buffer and sampled from it.
The replay buffer returns the newly added image with a probability of $0.5$.
Otherwise, it sends an older generated image and replaces the older image
with the newly generated image.
This is done to reduce model oscillation.
"""
def __init__(self, max_size: int = 50):
self.max_size = max_size
self.data = []
def push_and_pop(self, data: torch.Tensor):
"""Add/retrieve an image"""
data = data.detach()
res = []
for element in data:
if len(self.data) < self.max_size:
self.data.append(element)
res.append(element)
else:
if random.uniform(0, 1) > 0.5:
i = random.randint(0, self.max_size - 1)
res.append(self.data[i].clone())
self.data[i] = element
else:
res.append(element)
return torch.stack(res)
class Configs(BaseConfigs):
"""## Configurations"""
# `DeviceConfigs` will pick a GPU if available
device: torch.device = DeviceConfigs()
# Hyper-parameters
epochs: int = 200
dataset_name: str = 'monet2photo'
batch_size: int = 1
data_loader_workers = 8
learning_rate = 0.0002
adam_betas = (0.5, 0.999)
decay_start = 100
# The paper suggests using a least-squares loss instead of
# negative log-likelihood, at it is found to be more stable.
gan_loss = torch.nn.MSELoss()
# L1 loss is used for cycle loss and identity loss
cycle_loss = torch.nn.L1Loss()
identity_loss = torch.nn.L1Loss()
# Image dimensions
img_height = 256
img_width = 256
img_channels = 3
# Number of residual blocks in the generator
n_residual_blocks = 9
# Loss coefficients
cyclic_loss_coefficient = 10.0
identity_loss_coefficient = 5.
sample_interval = 500
# Models
generator_xy: GeneratorResNet
generator_yx: GeneratorResNet
discriminator_x: Discriminator
discriminator_y: Discriminator
# Optimizers
generator_optimizer: torch.optim.Adam
discriminator_optimizer: torch.optim.Adam
# Learning rate schedules
generator_lr_scheduler: torch.optim.lr_scheduler.LambdaLR
discriminator_lr_scheduler: torch.optim.lr_scheduler.LambdaLR
# Data loaders
dataloader: DataLoader
valid_dataloader: DataLoader
def sample_images(self, n: int):
"""Generate samples from test set and save them"""
batch = next(iter(self.valid_dataloader))
self.generator_xy.eval()
self.generator_yx.eval()
with torch.no_grad():
data_x, data_y = batch['x'].to(self.generator_xy.device), batch['y'].to(self.generator_yx.device)
gen_y = self.generator_xy(data_x)
gen_x = self.generator_yx(data_y)
# Arrange images along x-axis
data_x = make_grid(data_x, nrow=5, normalize=True)
data_y = make_grid(data_y, nrow=5, normalize=True)
gen_x = make_grid(gen_x, nrow=5, normalize=True)
gen_y = make_grid(gen_y, nrow=5, normalize=True)
# Arrange images along y-axis
image_grid = torch.cat((data_x, gen_y, data_y, gen_x), 1)
# Show samples
plot_image(image_grid)
def initialize(self):
"""
## Initialize models and data loaders
"""
input_shape = (self.img_channels, self.img_height, self.img_width)
# Create the models
self.generator_xy = GeneratorResNet(self.img_channels, self.n_residual_blocks).to(self.device)
self.generator_yx = GeneratorResNet(self.img_channels, self.n_residual_blocks).to(self.device)
self.discriminator_x = Discriminator(input_shape).to(self.device)
self.discriminator_y = Discriminator(input_shape).to(self.device)
# Create the optmizers
self.generator_optimizer = torch.optim.Adam(
itertools.chain(self.generator_xy.parameters(), self.generator_yx.parameters()),
lr=self.learning_rate, betas=self.adam_betas)
self.discriminator_optimizer = torch.optim.Adam(
itertools.chain(self.discriminator_x.parameters(), self.discriminator_y.parameters()),
lr=self.learning_rate, betas=self.adam_betas)
# Create the learning rate schedules.
# The learning rate stars flat until `decay_start` epochs,
# and then linearly reduce to $0$ at end of training.
decay_epochs = self.epochs - self.decay_start
self.generator_lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
self.generator_optimizer, lr_lambda=lambda e: 1.0 - max(0, e - self.decay_start) / decay_epochs)
self.discriminator_lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
self.discriminator_optimizer, lr_lambda=lambda e: 1.0 - max(0, e - self.decay_start) / decay_epochs)
# Image transformations
transforms_ = [
transforms.Resize(int(self.img_height * 1.12), InterpolationMode.BICUBIC),
transforms.RandomCrop((self.img_height, self.img_width)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
# Training data loader
self.dataloader = DataLoader(
ImageDataset(self.dataset_name, transforms_, 'train'),
batch_size=self.batch_size,
shuffle=True,
num_workers=self.data_loader_workers,
)
# Validation data loader
self.valid_dataloader = DataLoader(
ImageDataset(self.dataset_name, transforms_, "test"),
batch_size=5,
shuffle=True,
num_workers=self.data_loader_workers,
)
def run(self):
"""
## Training
We aim to solve:
$$G^{*}, F^{*} = \arg \min_{G,F} \max_{D_X, D_Y} \mathcal{L}(G, F, D_X, D_Y)$$
where,
$G$ translates images from $X \rightarrow Y$,
$F$ translates images from $Y \rightarrow X$,
$D_X$ tests if images are from $X$ space,
$D_Y$ tests if images are from $Y$ space, and
\begin{align}
\mathcal{L}(G, F, D_X, D_Y)
&= \mathcal{L}_{GAN}(G, D_Y, X, Y) \\
&+ \mathcal{L}_{GAN}(F, D_X, Y, X) \\
&+ \lambda_1 \mathcal{L}_{cyc}(G, F) \\
&+ \lambda_2 \mathcal{L}_{identity}(G, F) \\
\\
\mathcal{L}_{GAN}(G, F, D_Y, X, Y)
&= \mathbb{E}_{y \sim p_{data}(y)} \Big[log D_Y(y)\Big] \\
&+ \mathbb{E}_{x \sim p_{data}(x)} \bigg[log\Big(1 - D_Y(G(x))\Big)\bigg] \\
&+ \mathbb{E}_{x \sim p_{data}(x)} \Big[log D_X(x)\Big] \\
&+ \mathbb{E}_{y \sim p_{data}(y)} \bigg[log\Big(1 - D_X(F(y))\Big)\bigg] \\
\\
\mathcal{L}_{cyc}(G, F)
&= \mathbb{E}_{x \sim p_{data}(x)} \Big[\lVert F(G(x)) - x \lVert_1\Big] \\
&+ \mathbb{E}_{y \sim p_{data}(y)} \Big[\lVert G(F(y)) - y \rVert_1\Big] \\
\\
\mathcal{L}_{identity}(G, F)
&= \mathbb{E}_{x \sim p_{data}(x)} \Big[\lVert F(x) - x \lVert_1\Big] \\
&+ \mathbb{E}_{y \sim p_{data}(y)} \Big[\lVert G(y) - y \rVert_1\Big] \\
\end{align}
$\mathcal{L}_{GAN}$ is the generative adversarial loss from the original
GAN paper.
$\mathcal{L}_{cyc}$ is the cyclic loss, where we try to get $F(G(x))$ to be similar to $x$,
and $G(F(y))$ to be similar to $y$.
Basically if the two generators (transformations) are applied in series it should give back the
original image.
This is the main contribution of this paper.
It trains the generators to generate an image of the other distribution that is similar to
the original image.
Without this loss $G(x)$ could generate anything that's from the distribution of $Y$.
Now it needs to generate something from the distribution of $Y$ but still has properties of $x$,
so that $F(G(x)$ can re-generate something like $x$.
$\mathcal{L}_{cyc}$ is the identity loss.
This was used to encourage the mapping to preserve color composition between
the input and the output.
To solve $$G^*, F^*$$,
discriminators $D_X$ and $D_Y$ should **ascend** on the gradient,
\begin{align}
\nabla_{\theta_{D_X, D_Y}} \frac{1}{m} \sum_{i=1}^m
&\Bigg[
\log D_Y\Big(y^{(i)}\Big) \\
&+ \log \Big(1 - D_Y\Big(G\Big(x^{(i)}\Big)\Big)\Big) \\
&+ \log D_X\Big(x^{(i)}\Big) \\
& +\log\Big(1 - D_X\Big(F\Big(y^{(i)}\Big)\Big)\Big)
\Bigg]
\end{align}
That is descend on *negative* log-likelihood loss.
In order to stabilize the training the negative log- likelihood objective
was replaced by a least-squared loss -
the least-squared error of discriminator, labelling real images with 1,
and generated images with 0.
So we want to descend on the gradient,
\begin{align}
\nabla_{\theta_{D_X, D_Y}} \frac{1}{m} \sum_{i=1}^m
&\Bigg[
\bigg(D_Y\Big(y^{(i)}\Big) - 1\bigg)^2 \\
&+ D_Y\Big(G\Big(x^{(i)}\Big)\Big)^2 \\
&+ \bigg(D_X\Big(x^{(i)}\Big) - 1\bigg)^2 \\
&+ D_X\Big(F\Big(y^{(i)}\Big)\Big)^2
\Bigg]
\end{align}
We use least-squares for generators also.
The generators should *descend* on the gradient,
\begin{align}
\nabla_{\theta_{F, G}} \frac{1}{m} \sum_{i=1}^m
&\Bigg[
\bigg(D_Y\Big(G\Big(x^{(i)}\Big)\Big) - 1\bigg)^2 \\
&+ \bigg(D_X\Big(F\Big(y^{(i)}\Big)\Big) - 1\bigg)^2 \\
&+ \mathcal{L}_{cyc}(G, F)
+ \mathcal{L}_{identity}(G, F)
\Bigg]
\end{align}
We use `generator_xy` for $G$ and `generator_yx` for $F$.
We use `discriminator_x` for $D_X$ and `discriminator_y` for $D_Y$.
"""
# Replay buffers to keep generated samples
gen_x_buffer = ReplayBuffer()
gen_y_buffer = ReplayBuffer()
# Loop through epochs
for epoch in monit.loop(self.epochs):
# Loop through the dataset
for i, batch in monit.enum('Train', self.dataloader):
# Move images to the device
data_x, data_y = batch['x'].to(self.device), batch['y'].to(self.device)
# true labels equal to $1$
true_labels = torch.ones(data_x.size(0), *self.discriminator_x.output_shape,
device=self.device, requires_grad=False)
# false labels equal to $0$
false_labels = torch.zeros(data_x.size(0), *self.discriminator_x.output_shape,
device=self.device, requires_grad=False)
# Train the generators.
# This returns the generated images.
gen_x, gen_y = self.optimize_generators(data_x, data_y, true_labels)
# Train discriminators
self.optimize_discriminator(data_x, data_y,
gen_x_buffer.push_and_pop(gen_x), gen_y_buffer.push_and_pop(gen_y),
true_labels, false_labels)
# Save training statistics and increment the global step counter
tracker.save()
tracker.add_global_step(max(len(data_x), len(data_y)))
# Save images at intervals
batches_done = epoch * len(self.dataloader) + i
if batches_done % self.sample_interval == 0:
# Sample images
self.sample_images(batches_done)
# Update learning rates
self.generator_lr_scheduler.step()
self.discriminator_lr_scheduler.step()
# New line
tracker.new_line()
def optimize_generators(self, data_x: torch.Tensor, data_y: torch.Tensor, true_labels: torch.Tensor):
"""
### Optimize the generators with identity, gan and cycle losses.
"""
# Change to training mode
self.generator_xy.train()
self.generator_yx.train()
# Identity loss
# $$\lVert F(G(x^{(i)})) - x^{(i)} \lVert_1\
# \lVert G(F(y^{(i)})) - y^{(i)} \rVert_1$$
loss_identity = (self.identity_loss(self.generator_yx(data_x), data_x) +
self.identity_loss(self.generator_xy(data_y), data_y))
# Generate images $G(x)$ and $F(y)$
gen_y = self.generator_xy(data_x)
gen_x = self.generator_yx(data_y)
# GAN loss
# $$\bigg(D_Y\Big(G\Big(x^{(i)}\Big)\Big) - 1\bigg)^2
# + \bigg(D_X\Big(F\Big(y^{(i)}\Big)\Big) - 1\bigg)^2$$
loss_gan = (self.gan_loss(self.discriminator_y(gen_y), true_labels) +
self.gan_loss(self.discriminator_x(gen_x), true_labels))
# Cycle loss
# $$
# \lVert F(G(x^{(i)})) - x^{(i)} \lVert_1 +
# \lVert G(F(y^{(i)})) - y^{(i)} \rVert_1
# $$
loss_cycle = (self.cycle_loss(self.generator_yx(gen_y), data_x) +
self.cycle_loss(self.generator_xy(gen_x), data_y))
# Total loss
loss_generator = (loss_gan +
self.cyclic_loss_coefficient * loss_cycle +
self.identity_loss_coefficient * loss_identity)
# Take a step in the optimizer
self.generator_optimizer.zero_grad()
loss_generator.backward()
self.generator_optimizer.step()
# Log losses
tracker.add({'loss.generator': loss_generator,
'loss.generator.cycle': loss_cycle,
'loss.generator.gan': loss_gan,
'loss.generator.identity': loss_identity})
# Return generated images
return gen_x, gen_y
def optimize_discriminator(self, data_x: torch.Tensor, data_y: torch.Tensor,
gen_x: torch.Tensor, gen_y: torch.Tensor,
true_labels: torch.Tensor, false_labels: torch.Tensor):
"""
### Optimize the discriminators with gan loss.
"""
# GAN Loss
#
# \begin{align}
# \bigg(D_Y\Big(y ^ {(i)}\Big) - 1\bigg) ^ 2
# + D_Y\Big(G\Big(x ^ {(i)}\Big)\Big) ^ 2 + \\
# \bigg(D_X\Big(x ^ {(i)}\Big) - 1\bigg) ^ 2
# + D_X\Big(F\Big(y ^ {(i)}\Big)\Big) ^ 2
# \end{align}
loss_discriminator = (self.gan_loss(self.discriminator_x(data_x), true_labels) +
self.gan_loss(self.discriminator_x(gen_x), false_labels) +
self.gan_loss(self.discriminator_y(data_y), true_labels) +
self.gan_loss(self.discriminator_y(gen_y), false_labels))
# Take a step in the optimizer
self.discriminator_optimizer.zero_grad()
loss_discriminator.backward()
self.discriminator_optimizer.step()
# Log losses
tracker.add({'loss.discriminator': loss_discriminator})
def train():
"""
## Train Cycle GAN
"""
# Create configurations
conf = Configs()
# Create an experiment
experiment.create(name='cycle_gan')
# Calculate configurations.
# It will calculate `conf.run` and all other configs required by it.
experiment.configs(conf, {'dataset_name': 'summer2winter_yosemite'})
conf.initialize()
# Register models for saving and loading.
# `get_modules` gives a dictionary of `nn.Modules` in `conf`.
# You can also specify a custom dictionary of models.
experiment.add_pytorch_models(get_modules(conf))
# Start and watch the experiment
with experiment.start():
# Run the training
conf.run()
def plot_image(img: torch.Tensor):
"""
### Plot an image with matplotlib
"""
from matplotlib import pyplot as plt
# Move tensor to CPU
img = img.cpu()
# Get min and max values of the image for normalization
img_min, img_max = img.min(), img.max()
# Scale image values to be [0...1]
img = (img - img_min) / (img_max - img_min + 1e-5)
# We have to change the order of dimensions to HWC.
img = img.permute(1, 2, 0)
# Show Image
plt.imshow(img)
# We don't need axes
plt.axis('off')
# Display
plt.show()
def evaluate():
"""
## Evaluate trained Cycle GAN
"""
# Set the run UUID from the training run
trained_run_uuid = 'f73c1164184711eb9190b74249275441'
# Create configs object
conf = Configs()
# Create experiment
experiment.create(name='cycle_gan_inference')
# Load hyper parameters set for training
conf_dict = experiment.load_configs(trained_run_uuid)
# Calculate configurations. We specify the generators `'generator_xy', 'generator_yx'`
# so that it only loads those and their dependencies.
# Configs like `device` and `img_channels` will be calculated, since these are required by
# `generator_xy` and `generator_yx`.
#
# If you want other parameters like `dataset_name` you should specify them here.
# If you specify nothing, all the configurations will be calculated, including data loaders.
# Calculation of configurations and their dependencies will happen when you call `experiment.start`
experiment.configs(conf, conf_dict)
conf.initialize()
# Register models for saving and loading.
# `get_modules` gives a dictionary of `nn.Modules` in `conf`.
# You can also specify a custom dictionary of models.
experiment.add_pytorch_models(get_modules(conf))
# Specify which run to load from.
# Loading will actually happen when you call `experiment.start`
experiment.load(trained_run_uuid)
# Start the experiment
with experiment.start():
# Image transformations
transforms_ = [
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
# Load your own data. Here we try the test set.
# I was trying with Yosemite photos, they look awesome.
# You can use `conf.dataset_name`, if you specified `dataset_name` as something you wanted to be calculated
# in the call to `experiment.configs`
dataset = ImageDataset(conf.dataset_name, transforms_, 'train')
# Get an image from dataset
x_image = dataset[10]['x']
# Display the image
plot_image(x_image)
# Evaluation mode
conf.generator_xy.eval()
conf.generator_yx.eval()
# We don't need gradients
with torch.no_grad():
# Add batch dimension and move to the device we use
data = x_image.unsqueeze(0).to(conf.device)
generated_y = conf.generator_xy(data)
# Display the generated image.
plot_image(generated_y[0].cpu())
if __name__ == '__main__':
train()
# evaluate()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/gan/stylegan/experiment.py | labml_nn/gan/stylegan/experiment.py | """
---
title: StyleGAN 2 Model Training
summary: >
An annotated PyTorch implementation of StyleGAN2 model training code.
---
# [StyleGAN 2](index.html) Model Training
This is the training code for [StyleGAN 2](index.html) model.

---*These are $64 \times 64$ images generated after training for about 80K steps.*---
*Our implementation is a minimalistic StyleGAN 2 model training code.
Only single GPU training is supported to keep the implementation simple.
We managed to shrink it to keep it at less than 500 lines of code, including the training loop.*
*Without DDP (distributed data parallel) and multi-gpu training it will not be possible to train the model
for large resolutions (128+).
If you want training code with fp16 and DDP take a look at
[lucidrains/stylegan2-pytorch](https://github.com/lucidrains/stylegan2-pytorch).*
We trained this on [CelebA-HQ dataset](https://github.com/tkarras/progressive_growing_of_gans).
You can find the download instruction in this
[discussion on fast.ai](https://forums.fast.ai/t/download-celeba-hq-dataset/45873/3).
Save the images inside [`data/stylegan` folder](#dataset_path).
"""
import math
from pathlib import Path
from typing import Iterator, Tuple
import torchvision
from PIL import Image
import torch
import torch.utils.data
from labml import tracker, lab, monit, experiment
from labml.configs import BaseConfigs
from labml_nn.gan.stylegan import Discriminator, Generator, MappingNetwork, GradientPenalty, PathLengthPenalty
from labml_nn.gan.wasserstein import DiscriminatorLoss, GeneratorLoss
from labml_nn.helpers.device import DeviceConfigs
from labml_nn.helpers.trainer import ModeState
from labml_nn.utils import cycle_dataloader
class Dataset(torch.utils.data.Dataset):
"""
## Dataset
This loads the training dataset and resize it to the give image size.
"""
def __init__(self, path: str, image_size: int):
"""
* `path` path to the folder containing the images
* `image_size` size of the image
"""
super().__init__()
# Get the paths of all `jpg` files
self.paths = [p for p in Path(path).glob(f'**/*.jpg')]
# Transformation
self.transform = torchvision.transforms.Compose([
# Resize the image
torchvision.transforms.Resize(image_size),
# Convert to PyTorch tensor
torchvision.transforms.ToTensor(),
])
def __len__(self):
"""Number of images"""
return len(self.paths)
def __getitem__(self, index):
"""Get the the `index`-th image"""
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
class Configs(BaseConfigs):
"""
## Configurations
"""
# Device to train the model on.
# [`DeviceConfigs`](../../helpers/device.html)
# picks up an available CUDA device or defaults to CPU.
device: torch.device = DeviceConfigs()
# [StyleGAN2 Discriminator](index.html#discriminator)
discriminator: Discriminator
# [StyleGAN2 Generator](index.html#generator)
generator: Generator
# [Mapping network](index.html#mapping_network)
mapping_network: MappingNetwork
# Discriminator and generator loss functions.
# We use [Wasserstein loss](../wasserstein/index.html)
discriminator_loss: DiscriminatorLoss
generator_loss: GeneratorLoss
# Optimizers
generator_optimizer: torch.optim.Adam
discriminator_optimizer: torch.optim.Adam
mapping_network_optimizer: torch.optim.Adam
# [Gradient Penalty Regularization Loss](index.html#gradient_penalty)
gradient_penalty = GradientPenalty()
# Gradient penalty coefficient $\gamma$
gradient_penalty_coefficient: float = 10.
# [Path length penalty](index.html#path_length_penalty)
path_length_penalty: PathLengthPenalty
# Data loader
loader: Iterator
# Batch size
batch_size: int = 32
# Dimensionality of $z$ and $w$
d_latent: int = 512
# Height/width of the image
image_size: int = 32
# Number of layers in the mapping network
mapping_network_layers: int = 8
# Generator & Discriminator learning rate
learning_rate: float = 1e-3
# Mapping network learning rate ($100 \times$ lower than the others)
mapping_network_learning_rate: float = 1e-5
# Number of steps to accumulate gradients on. Use this to increase the effective batch size.
gradient_accumulate_steps: int = 1
# $\beta_1$ and $\beta_2$ for Adam optimizer
adam_betas: Tuple[float, float] = (0.0, 0.99)
# Probability of mixing styles
style_mixing_prob: float = 0.9
# Total number of training steps
training_steps: int = 150_000
# Number of blocks in the generator (calculated based on image resolution)
n_gen_blocks: int
# ### Lazy regularization
# Instead of calculating the regularization losses, the paper proposes lazy regularization
# where the regularization terms are calculated once in a while.
# This improves the training efficiency a lot.
# The interval at which to compute gradient penalty
lazy_gradient_penalty_interval: int = 4
# Path length penalty calculation interval
lazy_path_penalty_interval: int = 32
# Skip calculating path length penalty during the initial phase of training
lazy_path_penalty_after: int = 5_000
# How often to log generated images
log_generated_interval: int = 500
# How often to save model checkpoints
save_checkpoint_interval: int = 2_000
# Training mode state for logging activations
mode: ModeState
# <a id="dataset_path"></a>
# We trained this on [CelebA-HQ dataset](https://github.com/tkarras/progressive_growing_of_gans).
# You can find the download instruction in this
# [discussion on fast.ai](https://forums.fast.ai/t/download-celeba-hq-dataset/45873/3).
# Save the images inside `data/stylegan` folder.
dataset_path: str = str(lab.get_data_path() / 'stylegan2')
def init(self):
"""
### Initialize
"""
# Create dataset
dataset = Dataset(self.dataset_path, self.image_size)
# Create data loader
dataloader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, num_workers=8,
shuffle=True, drop_last=True, pin_memory=True)
# Continuous [cyclic loader](../../utils.html#cycle_dataloader)
self.loader = cycle_dataloader(dataloader)
# $\log_2$ of image resolution
log_resolution = int(math.log2(self.image_size))
# Create discriminator and generator
self.discriminator = Discriminator(log_resolution).to(self.device)
self.generator = Generator(log_resolution, self.d_latent).to(self.device)
# Get number of generator blocks for creating style and noise inputs
self.n_gen_blocks = self.generator.n_blocks
# Create mapping network
self.mapping_network = MappingNetwork(self.d_latent, self.mapping_network_layers).to(self.device)
# Create path length penalty loss
self.path_length_penalty = PathLengthPenalty(0.99).to(self.device)
# Discriminator and generator losses
self.discriminator_loss = DiscriminatorLoss().to(self.device)
self.generator_loss = GeneratorLoss().to(self.device)
# Create optimizers
self.discriminator_optimizer = torch.optim.Adam(
self.discriminator.parameters(),
lr=self.learning_rate, betas=self.adam_betas
)
self.generator_optimizer = torch.optim.Adam(
self.generator.parameters(),
lr=self.learning_rate, betas=self.adam_betas
)
self.mapping_network_optimizer = torch.optim.Adam(
self.mapping_network.parameters(),
lr=self.mapping_network_learning_rate, betas=self.adam_betas
)
# Set tracker configurations
tracker.set_image("generated", True)
def get_w(self, batch_size: int):
"""
### Sample $w$
This samples $z$ randomly and get $w$ from the mapping network.
We also apply style mixing sometimes where we generate two latent variables
$z_1$ and $z_2$ and get corresponding $w_1$ and $w_2$.
Then we randomly sample a cross-over point and apply $w_1$ to
the generator blocks before the cross-over point and
$w_2$ to the blocks after.
"""
# Mix styles
if torch.rand(()).item() < self.style_mixing_prob:
# Random cross-over point
cross_over_point = int(torch.rand(()).item() * self.n_gen_blocks)
# Sample $z_1$ and $z_2$
z2 = torch.randn(batch_size, self.d_latent).to(self.device)
z1 = torch.randn(batch_size, self.d_latent).to(self.device)
# Get $w_1$ and $w_2$
w1 = self.mapping_network(z1)
w2 = self.mapping_network(z2)
# Expand $w_1$ and $w_2$ for the generator blocks and concatenate
w1 = w1[None, :, :].expand(cross_over_point, -1, -1)
w2 = w2[None, :, :].expand(self.n_gen_blocks - cross_over_point, -1, -1)
return torch.cat((w1, w2), dim=0)
# Without mixing
else:
# Sample $z$ and $z$
z = torch.randn(batch_size, self.d_latent).to(self.device)
# Get $w$ and $w$
w = self.mapping_network(z)
# Expand $w$ for the generator blocks
return w[None, :, :].expand(self.n_gen_blocks, -1, -1)
def get_noise(self, batch_size: int):
"""
### Generate noise
This generates noise for each [generator block](index.html#generator_block)
"""
# List to store noise
noise = []
# Noise resolution starts from $4$
resolution = 4
# Generate noise for each generator block
for i in range(self.n_gen_blocks):
# The first block has only one $3 \times 3$ convolution
if i == 0:
n1 = None
# Generate noise to add after the first convolution layer
else:
n1 = torch.randn(batch_size, 1, resolution, resolution, device=self.device)
# Generate noise to add after the second convolution layer
n2 = torch.randn(batch_size, 1, resolution, resolution, device=self.device)
# Add noise tensors to the list
noise.append((n1, n2))
# Next block has $2 \times$ resolution
resolution *= 2
# Return noise tensors
return noise
def generate_images(self, batch_size: int):
"""
### Generate images
This generate images using the generator
"""
# Get $w$
w = self.get_w(batch_size)
# Get noise
noise = self.get_noise(batch_size)
# Generate images
images = self.generator(w, noise)
# Return images and $w$
return images, w
def step(self, idx: int):
"""
### Training Step
"""
# Train the discriminator
with monit.section('Discriminator'):
# Reset gradients
self.discriminator_optimizer.zero_grad()
# Accumulate gradients for `gradient_accumulate_steps`
for i in range(self.gradient_accumulate_steps):
# Sample images from generator
generated_images, _ = self.generate_images(self.batch_size)
# Discriminator classification for generated images
fake_output = self.discriminator(generated_images.detach())
# Get real images from the data loader
real_images = next(self.loader).to(self.device)
# We need to calculate gradients w.r.t. real images for gradient penalty
if (idx + 1) % self.lazy_gradient_penalty_interval == 0:
real_images.requires_grad_()
# Discriminator classification for real images
real_output = self.discriminator(real_images)
# Get discriminator loss
real_loss, fake_loss = self.discriminator_loss(real_output, fake_output)
disc_loss = real_loss + fake_loss
# Add gradient penalty
if (idx + 1) % self.lazy_gradient_penalty_interval == 0:
# Calculate and log gradient penalty
gp = self.gradient_penalty(real_images, real_output)
tracker.add('loss.gp', gp)
# Multiply by coefficient and add gradient penalty
disc_loss = disc_loss + 0.5 * self.gradient_penalty_coefficient * gp * self.lazy_gradient_penalty_interval
# Compute gradients
disc_loss.backward()
# Log discriminator loss
tracker.add('loss.discriminator', disc_loss)
if (idx + 1) % self.log_generated_interval == 0:
# Log discriminator model parameters occasionally
tracker.add('discriminator', self.discriminator)
# Clip gradients for stabilization
torch.nn.utils.clip_grad_norm_(self.discriminator.parameters(), max_norm=1.0)
# Take optimizer step
self.discriminator_optimizer.step()
# Train the generator
with monit.section('Generator'):
# Reset gradients
self.generator_optimizer.zero_grad()
self.mapping_network_optimizer.zero_grad()
# Accumulate gradients for `gradient_accumulate_steps`
for i in range(self.gradient_accumulate_steps):
# Sample images from generator
generated_images, w = self.generate_images(self.batch_size)
# Discriminator classification for generated images
fake_output = self.discriminator(generated_images)
# Get generator loss
gen_loss = self.generator_loss(fake_output)
# Add path length penalty
if idx > self.lazy_path_penalty_after and (idx + 1) % self.lazy_path_penalty_interval == 0:
# Calculate path length penalty
plp = self.path_length_penalty(w, generated_images)
# Ignore if `nan`
if not torch.isnan(plp):
tracker.add('loss.plp', plp)
gen_loss = gen_loss + plp
# Calculate gradients
gen_loss.backward()
# Log generator loss
tracker.add('loss.generator', gen_loss)
if (idx + 1) % self.log_generated_interval == 0:
# Log discriminator model parameters occasionally
tracker.add('generator', self.generator)
tracker.add('mapping_network', self.mapping_network)
# Clip gradients for stabilization
torch.nn.utils.clip_grad_norm_(self.generator.parameters(), max_norm=1.0)
torch.nn.utils.clip_grad_norm_(self.mapping_network.parameters(), max_norm=1.0)
# Take optimizer step
self.generator_optimizer.step()
self.mapping_network_optimizer.step()
# Log generated images
if (idx + 1) % self.log_generated_interval == 0:
tracker.add('generated', torch.cat([generated_images[:6], real_images[:3]], dim=0))
# Save model checkpoints
if (idx + 1) % self.save_checkpoint_interval == 0:
# Save checkpoint
pass
# Flush tracker
tracker.save()
def train(self):
"""
## Train model
"""
# Loop for `training_steps`
for i in monit.loop(self.training_steps):
# Take a training step
self.step(i)
#
if (i + 1) % self.log_generated_interval == 0:
tracker.new_line()
def main():
"""
### Train StyleGAN2
"""
# Create an experiment
experiment.create(name='stylegan2')
# Create configurations object
configs = Configs()
# Set configurations and override some
experiment.configs(configs, {
'device.cuda_device': 0,
'image_size': 64,
'log_generated_interval': 200
})
# Initialize
configs.init()
# Set models for saving and loading
experiment.add_pytorch_models(mapping_network=configs.mapping_network,
generator=configs.generator,
discriminator=configs.discriminator)
# Start the experiment
with experiment.start():
# Run the training loop
configs.train()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/gan/stylegan/__init__.py | labml_nn/gan/stylegan/__init__.py | """
---
title: StyleGAN 2
summary: >
An annotated PyTorch implementation of StyleGAN2.
---
# StyleGAN 2
This is a [PyTorch](https://pytorch.org) implementation of the paper
[Analyzing and Improving the Image Quality of StyleGAN](https://arxiv.org/abs/1912.04958)
which introduces **StyleGAN 2**.
StyleGAN 2 is an improvement over **StyleGAN** from the paper
[A Style-Based Generator Architecture for Generative Adversarial Networks](https://arxiv.org/abs/1812.04948).
And StyleGAN is based on **Progressive GAN** from the paper
[Progressive Growing of GANs for Improved Quality, Stability, and Variation](https://arxiv.org/abs/1710.10196).
All three papers are from the same authors from [NVIDIA AI](https://twitter.com/NVIDIAAI).
*Our implementation is a minimalistic StyleGAN 2 model training code.
Only single GPU training is supported to keep the implementation simple.
We managed to shrink it to keep it at less than 500 lines of code, including the training loop.*
**π Here's the training code: [`experiment.py`](experiment.html).**

---*These are $64 \times 64$ images generated after training for about 80K steps.*---
We'll first introduce the three papers at a high level.
## Generative Adversarial Networks
Generative adversarial networks have two components; the generator and the discriminator.
The generator network takes a random latent vector ($z \in \mathcal{Z}$)
and tries to generate a realistic image.
The discriminator network tries to differentiate the real images from generated images.
When we train the two networks together the generator starts generating images indistinguishable from real images.
## Progressive GAN
Progressive GAN generates high-resolution images ($1080 \times 1080$) of size.
It does so by *progressively* increasing the image size.
First, it trains a network that produces a $4 \times 4$ image, then $8 \times 8$ ,
then an $16 \times 16$ image, and so on up to the desired image resolution.
At each resolution, the generator network produces an image in latent space which is converted into RGB,
with a $1 \times 1$ convolution.
When we progress from a lower resolution to a higher resolution
(say from $4 \times 4$ to $8 \times 8$ ) we scale the latent image by $2\times$
and add a new block (two $3 \times 3$ convolution layers)
and a new $1 \times 1$ layer to get RGB.
The transition is done smoothly by adding a residual connection to
the $2\times$ scaled $4 \times 4$ RGB image.
The weight of this residual connection is slowly reduced, to let the new block take over.
The discriminator is a mirror image of the generator network.
The progressive growth of the discriminator is done similarly.

---*$2\times$ and $0.5\times$ denote feature map resolution scaling and scaling.
$4\times4$, $8\times4$, ... denote feature map resolution at the generator or discriminator block.
Each discriminator and generator block consists of 2 convolution layers with leaky ReLU activations.*---
They use **minibatch standard deviation** to increase variation and
**equalized learning rate** which we discussed below in the implementation.
They also use **pixel-wise normalization** where at each pixel the feature vector is normalized.
They apply this to all the convolution layer outputs (except RGB).
## StyleGAN
StyleGAN improves the generator of Progressive GAN keeping the discriminator architecture the same.
#### Mapping Network
It maps the random latent vector ($z \in \mathcal{Z}$)
into a different latent space ($w \in \mathcal{W}$),
with an 8-layer neural network.
This gives an intermediate latent space $\mathcal{W}$
where the factors of variations are more linear (disentangled).
#### AdaIN
Then $w$ is transformed into two vectors (**styles**) per layer,
$i$, $y_i = (y_{s,i}, y_{b,i}) = f_{A_i}(w)$ and used for scaling and shifting (biasing)
in each layer with $\text{AdaIN}$ operator (normalize and scale):
$$\text{AdaIN}(x_i, y_i) = y_{s, i} \frac{x_i - \mu(x_i)}{\sigma(x_i)} + y_{b,i}$$
#### Style Mixing
To prevent the generator from assuming adjacent styles are correlated,
they randomly use different styles for different blocks.
That is, they sample two latent vectors $(z_1, z_2)$ and corresponding $(w_1, w_2)$ and
use $w_1$ based styles for some blocks and $w_2$ based styles for some blacks randomly.
#### Stochastic Variation
Noise is made available to each block which helps the generator create more realistic images.
Noise is scaled per channel by a learned weight.
#### Bilinear Up and Down Sampling
All the up and down-sampling operations are accompanied by bilinear smoothing.

---*$A$ denotes a linear layer.
$B$ denotes a broadcast and scaling operation (noise is a single channel).
StyleGAN also uses progressive growing like Progressive GAN.*---
## StyleGAN 2
StyleGAN 2 changes both the generator and the discriminator of StyleGAN.
#### Weight Modulation and Demodulation
They remove the $\text{AdaIN}$ operator and replace it with
the weight modulation and demodulation step.
This is supposed to improve what they call droplet artifacts that are present in generated images,
which are caused by the normalization in $\text{AdaIN}$ operator.
Style vector per layer is calculated from $w_i \in \mathcal{W}$ as $s_i = f_{A_i}(w_i)$.
Then the convolution weights $w$ are modulated as follows.
($w$ here on refers to weights not intermediate latent space,
we are sticking to the same notation as the paper.)
$$w'_{i, j, k} = s_i \cdot w_{i, j, k}$$
Then it's demodulated by normalizing,
$$w''_{i,j,k} = \frac{w'_{i,j,k}}{\sqrt{\sum_{i,k}{w'_{i, j, k}}^2 + \epsilon}}$$
where $i$ is the input channel, $j$ is the output channel, and $k$ is the kernel index.
#### Path Length Regularization
Path length regularization encourages a fixed-size step in $\mathcal{W}$ to result in a non-zero,
fixed-magnitude change in the generated image.
#### No Progressive Growing
StyleGAN2 uses residual connections (with down-sampling) in the discriminator and skip connections
in the generator with up-sampling
(the RGB outputs from each layer are added - no residual connections in feature maps).
They show that with experiments that the contribution of low-resolution layers is higher
at beginning of the training and then high-resolution layers take over.
"""
import math
from typing import Tuple, Optional, List
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.data
from torch import nn
class MappingNetwork(nn.Module):
"""
<a id="mapping_network"></a>
## Mapping Network

This is an MLP with 8 linear layers.
The mapping network maps the latent vector $z \in \mathcal{W}$
to an intermediate latent space $w \in \mathcal{W}$.
$\mathcal{W}$ space will be disentangled from the image space
where the factors of variation become more linear.
"""
def __init__(self, features: int, n_layers: int):
"""
* `features` is the number of features in $z$ and $w$
* `n_layers` is the number of layers in the mapping network.
"""
super().__init__()
# Create the MLP
layers = []
for i in range(n_layers):
# [Equalized learning-rate linear layers](#equalized_linear)
layers.append(EqualizedLinear(features, features))
# Leaky Relu
layers.append(nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.net = nn.Sequential(*layers)
def forward(self, z: torch.Tensor):
# Normalize $z$
z = F.normalize(z, dim=1)
# Map $z$ to $w$
return self.net(z)
class Generator(nn.Module):
"""
<a id="generator"></a>
## StyleGAN2 Generator

---*$A$ denotes a linear layer.
$B$ denotes a broadcast and scaling operation (noise is a single channel).
[`toRGB`](#to_rgb) also has a style modulation which is not shown in the diagram to keep it simple.*---
The generator starts with a learned constant.
Then it has a series of blocks. The feature map resolution is doubled at each block
Each block outputs an RGB image and they are scaled up and summed to get the final RGB image.
"""
def __init__(self, log_resolution: int, d_latent: int, n_features: int = 32, max_features: int = 512):
"""
* `log_resolution` is the $\log_2$ of image resolution
* `d_latent` is the dimensionality of $w$
* `n_features` number of features in the convolution layer at the highest resolution (final block)
* `max_features` maximum number of features in any generator block
"""
super().__init__()
# Calculate the number of features for each block
#
# Something like `[512, 512, 256, 128, 64, 32]`
features = [min(max_features, n_features * (2 ** i)) for i in range(log_resolution - 2, -1, -1)]
# Number of generator blocks
self.n_blocks = len(features)
# Trainable $4 \times 4$ constant
self.initial_constant = nn.Parameter(torch.randn((1, features[0], 4, 4)))
# First style block for $4 \times 4$ resolution and layer to get RGB
self.style_block = StyleBlock(d_latent, features[0], features[0])
self.to_rgb = ToRGB(d_latent, features[0])
# Generator blocks
blocks = [GeneratorBlock(d_latent, features[i - 1], features[i]) for i in range(1, self.n_blocks)]
self.blocks = nn.ModuleList(blocks)
# $2 \times$ up sampling layer. The feature space is up sampled
# at each block
self.up_sample = UpSample()
def forward(self, w: torch.Tensor, input_noise: List[Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]]):
"""
* `w` is $w$. In order to mix-styles (use different $w$ for different layers), we provide a separate
$w$ for each [generator block](#generator_block). It has shape `[n_blocks, batch_size, d_latent]`.
* `input_noise` is the noise for each block.
It's a list of pairs of noise sensors because each block (except the initial) has two noise inputs
after each convolution layer (see the diagram).
"""
# Get batch size
batch_size = w.shape[1]
# Expand the learned constant to match batch size
x = self.initial_constant.expand(batch_size, -1, -1, -1)
# The first style block
x = self.style_block(x, w[0], input_noise[0][1])
# Get first rgb image
rgb = self.to_rgb(x, w[0])
# Evaluate rest of the blocks
for i in range(1, self.n_blocks):
# Up sample the feature map
x = self.up_sample(x)
# Run it through the [generator block](#generator_block)
x, rgb_new = self.blocks[i - 1](x, w[i], input_noise[i])
# Up sample the RGB image and add to the rgb from the block
rgb = self.up_sample(rgb) + rgb_new
# Return the final RGB image
return rgb
class GeneratorBlock(nn.Module):
"""
<a id="generator_block"></a>
### Generator Block

---*$A$ denotes a linear layer.
$B$ denotes a broadcast and scaling operation (noise is a single channel).
[`toRGB`](#to_rgb) also has a style modulation which is not shown in the diagram to keep it simple.*---
The generator block consists of two [style blocks](#style_block) ($3 \times 3$ convolutions with style modulation)
and an RGB output.
"""
def __init__(self, d_latent: int, in_features: int, out_features: int):
"""
* `d_latent` is the dimensionality of $w$
* `in_features` is the number of features in the input feature map
* `out_features` is the number of features in the output feature map
"""
super().__init__()
# First [style block](#style_block) changes the feature map size to `out_features`
self.style_block1 = StyleBlock(d_latent, in_features, out_features)
# Second [style block](#style_block)
self.style_block2 = StyleBlock(d_latent, out_features, out_features)
# *toRGB* layer
self.to_rgb = ToRGB(d_latent, out_features)
def forward(self, x: torch.Tensor, w: torch.Tensor, noise: Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]):
"""
* `x` is the input feature map of shape `[batch_size, in_features, height, width]`
* `w` is $w$ with shape `[batch_size, d_latent]`
* `noise` is a tuple of two noise tensors of shape `[batch_size, 1, height, width]`
"""
# First style block with first noise tensor.
# The output is of shape `[batch_size, out_features, height, width]`
x = self.style_block1(x, w, noise[0])
# Second style block with second noise tensor.
# The output is of shape `[batch_size, out_features, height, width]`
x = self.style_block2(x, w, noise[1])
# Get RGB image
rgb = self.to_rgb(x, w)
# Return feature map and rgb image
return x, rgb
class StyleBlock(nn.Module):
"""
<a id="style_block"></a>
### Style Block

---*$A$ denotes a linear layer.
$B$ denotes a broadcast and scaling operation (noise is single channel).*---
Style block has a weight modulation convolution layer.
"""
def __init__(self, d_latent: int, in_features: int, out_features: int):
"""
* `d_latent` is the dimensionality of $w$
* `in_features` is the number of features in the input feature map
* `out_features` is the number of features in the output feature map
"""
super().__init__()
# Get style vector from $w$ (denoted by $A$ in the diagram) with
# an [equalized learning-rate linear layer](#equalized_linear)
self.to_style = EqualizedLinear(d_latent, in_features, bias=1.0)
# Weight modulated convolution layer
self.conv = Conv2dWeightModulate(in_features, out_features, kernel_size=3)
# Noise scale
self.scale_noise = nn.Parameter(torch.zeros(1))
# Bias
self.bias = nn.Parameter(torch.zeros(out_features))
# Activation function
self.activation = nn.LeakyReLU(0.2, True)
def forward(self, x: torch.Tensor, w: torch.Tensor, noise: Optional[torch.Tensor]):
"""
* `x` is the input feature map of shape `[batch_size, in_features, height, width]`
* `w` is $w$ with shape `[batch_size, d_latent]`
* `noise` is a tensor of shape `[batch_size, 1, height, width]`
"""
# Get style vector $s$
s = self.to_style(w)
# Weight modulated convolution
x = self.conv(x, s)
# Scale and add noise
if noise is not None:
x = x + self.scale_noise[None, :, None, None] * noise
# Add bias and evaluate activation function
return self.activation(x + self.bias[None, :, None, None])
class ToRGB(nn.Module):
"""
<a id="to_rgb"></a>
### To RGB

---*$A$ denotes a linear layer.*---
Generates an RGB image from a feature map using $1 \times 1$ convolution.
"""
def __init__(self, d_latent: int, features: int):
"""
* `d_latent` is the dimensionality of $w$
* `features` is the number of features in the feature map
"""
super().__init__()
# Get style vector from $w$ (denoted by $A$ in the diagram) with
# an [equalized learning-rate linear layer](#equalized_linear)
self.to_style = EqualizedLinear(d_latent, features, bias=1.0)
# Weight modulated convolution layer without demodulation
self.conv = Conv2dWeightModulate(features, 3, kernel_size=1, demodulate=False)
# Bias
self.bias = nn.Parameter(torch.zeros(3))
# Activation function
self.activation = nn.LeakyReLU(0.2, True)
def forward(self, x: torch.Tensor, w: torch.Tensor):
"""
* `x` is the input feature map of shape `[batch_size, in_features, height, width]`
* `w` is $w$ with shape `[batch_size, d_latent]`
"""
# Get style vector $s$
style = self.to_style(w)
# Weight modulated convolution
x = self.conv(x, style)
# Add bias and evaluate activation function
return self.activation(x + self.bias[None, :, None, None])
class Conv2dWeightModulate(nn.Module):
"""
### Convolution with Weight Modulation and Demodulation
This layer scales the convolution weights by the style vector and demodulates by normalizing it.
"""
def __init__(self, in_features: int, out_features: int, kernel_size: int,
demodulate: float = True, eps: float = 1e-8):
"""
* `in_features` is the number of features in the input feature map
* `out_features` is the number of features in the output feature map
* `kernel_size` is the size of the convolution kernel
* `demodulate` is flag whether to normalize weights by its standard deviation
* `eps` is the $\epsilon$ for normalizing
"""
super().__init__()
# Number of output features
self.out_features = out_features
# Whether to normalize weights
self.demodulate = demodulate
# Padding size
self.padding = (kernel_size - 1) // 2
# [Weights parameter with equalized learning rate](#equalized_weight)
self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size])
# $\epsilon$
self.eps = eps
def forward(self, x: torch.Tensor, s: torch.Tensor):
"""
* `x` is the input feature map of shape `[batch_size, in_features, height, width]`
* `s` is style based scaling tensor of shape `[batch_size, in_features]`
"""
# Get batch size, height and width
b, _, h, w = x.shape
# Reshape the scales
s = s[:, None, :, None, None]
# Get [learning rate equalized weights](#equalized_weight)
weights = self.weight()[None, :, :, :, :]
# $$w`_{i,j,k} = s_i * w_{i,j,k}$$
# where $i$ is the input channel, $j$ is the output channel, and $k$ is the kernel index.
#
# The result has shape `[batch_size, out_features, in_features, kernel_size, kernel_size]`
weights = weights * s
# Demodulate
if self.demodulate:
# $$\sigma_j = \sqrt{\sum_{i,k} (w'_{i, j, k})^2 + \epsilon}$$
sigma_inv = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps)
# $$w''_{i,j,k} = \frac{w'_{i,j,k}}{\sqrt{\sum_{i,k} (w'_{i, j, k})^2 + \epsilon}}$$
weights = weights * sigma_inv
# Reshape `x`
x = x.reshape(1, -1, h, w)
# Reshape weights
_, _, *ws = weights.shape
weights = weights.reshape(b * self.out_features, *ws)
# Use grouped convolution to efficiently calculate the convolution with sample wise kernel.
# i.e. we have a different kernel (weights) for each sample in the batch
x = F.conv2d(x, weights, padding=self.padding, groups=b)
# Reshape `x` to `[batch_size, out_features, height, width]` and return
return x.reshape(-1, self.out_features, h, w)
class Discriminator(nn.Module):
"""
<a id="discriminator"></a>
## StyleGAN 2 Discriminator

Discriminator first transforms the image to a feature map of the same resolution and then
runs it through a series of blocks with residual connections.
The resolution is down-sampled by $2 \times$ at each block while doubling the
number of features.
"""
def __init__(self, log_resolution: int, n_features: int = 64, max_features: int = 512):
"""
* `log_resolution` is the $\log_2$ of image resolution
* `n_features` number of features in the convolution layer at the highest resolution (first block)
* `max_features` maximum number of features in any generator block
"""
super().__init__()
# Layer to convert RGB image to a feature map with `n_features` number of features.
self.from_rgb = nn.Sequential(
EqualizedConv2d(3, n_features, 1),
nn.LeakyReLU(0.2, True),
)
# Calculate the number of features for each block.
#
# Something like `[64, 128, 256, 512, 512, 512]`.
features = [min(max_features, n_features * (2 ** i)) for i in range(log_resolution - 1)]
# Number of [discirminator blocks](#discriminator_block)
n_blocks = len(features) - 1
# Discriminator blocks
blocks = [DiscriminatorBlock(features[i], features[i + 1]) for i in range(n_blocks)]
self.blocks = nn.Sequential(*blocks)
# [Mini-batch Standard Deviation](#mini_batch_std_dev)
self.std_dev = MiniBatchStdDev()
# Number of features after adding the standard deviations map
final_features = features[-1] + 1
# Final $3 \times 3$ convolution layer
self.conv = EqualizedConv2d(final_features, final_features, 3)
# Final linear layer to get the classification
self.final = EqualizedLinear(2 * 2 * final_features, 1)
def forward(self, x: torch.Tensor):
"""
* `x` is the input image of shape `[batch_size, 3, height, width]`
"""
# Try to normalize the image (this is totally optional, but sped up the early training a little)
x = x - 0.5
# Convert from RGB
x = self.from_rgb(x)
# Run through the [discriminator blocks](#discriminator_block)
x = self.blocks(x)
# Calculate and append [mini-batch standard deviation](#mini_batch_std_dev)
x = self.std_dev(x)
# $3 \times 3$ convolution
x = self.conv(x)
# Flatten
x = x.reshape(x.shape[0], -1)
# Return the classification score
return self.final(x)
class DiscriminatorBlock(nn.Module):
"""
<a id="discriminator_black"></a>
### Discriminator Block

Discriminator block consists of two $3 \times 3$ convolutions with a residual connection.
"""
def __init__(self, in_features, out_features):
"""
* `in_features` is the number of features in the input feature map
* `out_features` is the number of features in the output feature map
"""
super().__init__()
# Down-sampling and $1 \times 1$ convolution layer for the residual connection
self.residual = nn.Sequential(DownSample(),
EqualizedConv2d(in_features, out_features, kernel_size=1))
# Two $3 \times 3$ convolutions
self.block = nn.Sequential(
EqualizedConv2d(in_features, in_features, kernel_size=3, padding=1),
nn.LeakyReLU(0.2, True),
EqualizedConv2d(in_features, out_features, kernel_size=3, padding=1),
nn.LeakyReLU(0.2, True),
)
# Down-sampling layer
self.down_sample = DownSample()
# Scaling factor $\frac{1}{\sqrt 2}$ after adding the residual
self.scale = 1 / math.sqrt(2)
def forward(self, x):
# Get the residual connection
residual = self.residual(x)
# Convolutions
x = self.block(x)
# Down-sample
x = self.down_sample(x)
# Add the residual and scale
return (x + residual) * self.scale
class MiniBatchStdDev(nn.Module):
"""
<a id="mini_batch_std_dev"></a>
### Mini-batch Standard Deviation
Mini-batch standard deviation calculates the standard deviation
across a mini-batch (or a subgroups within the mini-batch)
for each feature in the feature map. Then it takes the mean of all
the standard deviations and appends it to the feature map as one extra feature.
"""
def __init__(self, group_size: int = 4):
"""
* `group_size` is the number of samples to calculate standard deviation across.
"""
super().__init__()
self.group_size = group_size
def forward(self, x: torch.Tensor):
"""
* `x` is the feature map
"""
# Check if the batch size is divisible by the group size
assert x.shape[0] % self.group_size == 0
# Split the samples into groups of `group_size`, we flatten the feature map to a single dimension
# since we want to calculate the standard deviation for each feature.
grouped = x.view(self.group_size, -1)
# Calculate the standard deviation for each feature among `group_size` samples
#
# \begin{align}
# \mu_{i} &= \frac{1}{N} \sum_g x_{g,i} \\
# \sigma_{i} &= \sqrt{\frac{1}{N} \sum_g (x_{g,i} - \mu_i)^2 + \epsilon}
# \end{align}
std = torch.sqrt(grouped.var(dim=0) + 1e-8)
# Get the mean standard deviation
std = std.mean().view(1, 1, 1, 1)
# Expand the standard deviation to append to the feature map
b, _, h, w = x.shape
std = std.expand(b, -1, h, w)
# Append (concatenate) the standard deviations to the feature map
return torch.cat([x, std], dim=1)
class DownSample(nn.Module):
"""
<a id="down_sample"></a>
### Down-sample
The down-sample operation [smoothens](#smooth) each feature channel and
scale $2 \times$ using bilinear interpolation.
This is based on the paper
[Making Convolutional Networks Shift-Invariant Again](https://arxiv.org/abs/1904.11486).
"""
def __init__(self):
super().__init__()
# Smoothing layer
self.smooth = Smooth()
def forward(self, x: torch.Tensor):
# Smoothing or blurring
x = self.smooth(x)
# Scaled down
return F.interpolate(x, (x.shape[2] // 2, x.shape[3] // 2), mode='bilinear', align_corners=False)
class UpSample(nn.Module):
"""
<a id="up_sample"></a>
### Up-sample
The up-sample operation scales the image up by $2 \times$ and [smoothens](#smooth) each feature channel.
This is based on the paper
[Making Convolutional Networks Shift-Invariant Again](https://arxiv.org/abs/1904.11486).
"""
def __init__(self):
super().__init__()
# Up-sampling layer
self.up_sample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
# Smoothing layer
self.smooth = Smooth()
def forward(self, x: torch.Tensor):
# Up-sample and smoothen
return self.smooth(self.up_sample(x))
class Smooth(nn.Module):
"""
<a id="smooth"></a>
### Smoothing Layer
This layer blurs each channel
"""
def __init__(self):
super().__init__()
# Blurring kernel
kernel = [[1, 2, 1],
[2, 4, 2],
[1, 2, 1]]
# Convert the kernel to a PyTorch tensor
kernel = torch.tensor([[kernel]], dtype=torch.float)
# Normalize the kernel
kernel /= kernel.sum()
# Save kernel as a fixed parameter (no gradient updates)
self.kernel = nn.Parameter(kernel, requires_grad=False)
# Padding layer
self.pad = nn.ReplicationPad2d(1)
def forward(self, x: torch.Tensor):
# Get shape of the input feature map
b, c, h, w = x.shape
# Reshape for smoothening
x = x.view(-1, 1, h, w)
# Add padding
x = self.pad(x)
# Smoothen (blur) with the kernel
x = F.conv2d(x, self.kernel)
# Reshape and return
return x.view(b, c, h, w)
class EqualizedLinear(nn.Module):
"""
<a id="equalized_linear"></a>
## Learning-rate Equalized Linear Layer
This uses [learning-rate equalized weights](#equalized_weights) for a linear layer.
"""
def __init__(self, in_features: int, out_features: int, bias: float = 0.):
"""
* `in_features` is the number of features in the input feature map
* `out_features` is the number of features in the output feature map
* `bias` is the bias initialization constant
"""
super().__init__()
# [Learning-rate equalized weights](#equalized_weights)
self.weight = EqualizedWeight([out_features, in_features])
# Bias
self.bias = nn.Parameter(torch.ones(out_features) * bias)
def forward(self, x: torch.Tensor):
# Linear transformation
return F.linear(x, self.weight(), bias=self.bias)
class EqualizedConv2d(nn.Module):
"""
<a id="equalized_conv2d"></a>
## Learning-rate Equalized 2D Convolution Layer
This uses [learning-rate equalized weights](#equalized_weights) for a convolution layer.
"""
def __init__(self, in_features: int, out_features: int,
kernel_size: int, padding: int = 0):
"""
* `in_features` is the number of features in the input feature map
* `out_features` is the number of features in the output feature map
* `kernel_size` is the size of the convolution kernel
* `padding` is the padding to be added on both sides of each size dimension
"""
super().__init__()
# Padding size
self.padding = padding
# [Learning-rate equalized weights](#equalized_weights)
self.weight = EqualizedWeight([out_features, in_features, kernel_size, kernel_size])
# Bias
self.bias = nn.Parameter(torch.ones(out_features))
def forward(self, x: torch.Tensor):
# Convolution
return F.conv2d(x, self.weight(), bias=self.bias, padding=self.padding)
class EqualizedWeight(nn.Module):
"""
<a id="equalized_weight"></a>
## Learning-rate Equalized Weights Parameter
This is based on equalized learning rate introduced in the Progressive GAN paper.
Instead of initializing weights at $\mathcal{N}(0,c)$ they initialize weights
to $\mathcal{N}(0, 1)$ and then multiply them by $c$ when using it.
$$w_i = c \hat{w}_i$$
The gradients on stored parameters $\hat{w}$ get multiplied by $c$ but this doesn't have
an affect since optimizers such as Adam normalize them by a running mean of the squared gradients.
The optimizer updates on $\hat{w}$ are proportionate to the learning rate $\lambda$.
But the effective weights $w$ get updated proportionately to $c \lambda$.
Without equalized learning rate, the effective weights will get updated proportionately to just $\lambda$.
So we are effectively scaling the learning rate by $c$ for these weight parameters.
"""
def __init__(self, shape: List[int]):
"""
* `shape` is the shape of the weight parameter
"""
super().__init__()
# He initialization constant
self.c = 1 / math.sqrt(np.prod(shape[1:]))
# Initialize the weights with $\mathcal{N}(0, 1)$
self.weight = nn.Parameter(torch.randn(shape))
# Weight multiplication coefficient
def forward(self):
# Multiply the weights by $c$ and return
return self.weight * self.c
class GradientPenalty(nn.Module):
"""
<a id="gradient_penalty"></a>
## Gradient Penalty
This is the $R_1$ regularization penality from the paper
[Which Training Methods for GANs do actually Converge?](https://arxiv.org/abs/1801.04406).
$$R_1(\psi) = \frac{\gamma}{2} \mathbb{E}_{p_\mathcal{D}(x)}
\Big[\Vert \nabla_x D_\psi(x)^2 \Vert\Big]$$
That is we try to reduce the L2 norm of gradients of the discriminator with
respect to images, for real images ($P_\mathcal{D}$).
"""
def forward(self, x: torch.Tensor, d: torch.Tensor):
"""
* `x` is $x \sim \mathcal{D}$
* `d` is $D(x)$
"""
# Get batch size
batch_size = x.shape[0]
# Calculate gradients of $D(x)$ with respect to $x$.
# `grad_outputs` is set to $1$ since we want the gradients of $D(x)$,
# and we need to create and retain graph since we have to compute gradients
# with respect to weight on this loss.
gradients, *_ = torch.autograd.grad(outputs=d,
inputs=x,
grad_outputs=d.new_ones(d.shape),
create_graph=True)
# Reshape gradients to calculate the norm
gradients = gradients.reshape(batch_size, -1)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | true |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/gan/original/experiment.py | labml_nn/gan/original/experiment.py | """
---
title: Generative Adversarial Networks experiment with MNIST
summary: This experiment generates MNIST images using multi-layer perceptron.
---
# Generative Adversarial Networks experiment with MNIST
"""
from typing import Any
from torchvision import transforms
import torch
import torch.nn as nn
import torch.utils.data
from labml import tracker, monit, experiment
from labml.configs import option, calculate
from labml_nn.gan.original import DiscriminatorLogitsLoss, GeneratorLogitsLoss
from labml_nn.helpers.datasets import MNISTConfigs
from labml_nn.helpers.device import DeviceConfigs
from labml_nn.helpers.optimizer import OptimizerConfigs
from labml_nn.helpers.trainer import TrainValidConfigs, BatchIndex
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class Generator(nn.Module):
"""
### Simple MLP Generator
This has three linear layers of increasing size with `LeakyReLU` activations.
The final layer has a $tanh$ activation.
"""
def __init__(self):
super().__init__()
layer_sizes = [256, 512, 1024]
layers = []
d_prev = 100
for size in layer_sizes:
layers = layers + [nn.Linear(d_prev, size), nn.LeakyReLU(0.2)]
d_prev = size
self.layers = nn.Sequential(*layers, nn.Linear(d_prev, 28 * 28), nn.Tanh())
self.apply(weights_init)
def forward(self, x):
return self.layers(x).view(x.shape[0], 1, 28, 28)
class Discriminator(nn.Module):
"""
### Simple MLP Discriminator
This has three linear layers of decreasing size with `LeakyReLU` activations.
The final layer has a single output that gives the logit of whether input
is real or fake. You can get the probability by calculating the sigmoid of it.
"""
def __init__(self):
super().__init__()
layer_sizes = [1024, 512, 256]
layers = []
d_prev = 28 * 28
for size in layer_sizes:
layers = layers + [nn.Linear(d_prev, size), nn.LeakyReLU(0.2)]
d_prev = size
self.layers = nn.Sequential(*layers, nn.Linear(d_prev, 1))
self.apply(weights_init)
def forward(self, x):
return self.layers(x.view(x.shape[0], -1))
class Configs(MNISTConfigs, TrainValidConfigs):
"""
## Configurations
This extends MNIST configurations to get the data loaders and Training and validation loop
configurations to simplify our implementation.
"""
device: torch.device = DeviceConfigs()
dataset_transforms = 'mnist_gan_transforms'
epochs: int = 10
is_save_models = True
discriminator: nn.Module = 'mlp'
generator: nn.Module = 'mlp'
generator_optimizer: torch.optim.Adam
discriminator_optimizer: torch.optim.Adam
generator_loss: GeneratorLogitsLoss = 'original'
discriminator_loss: DiscriminatorLogitsLoss = 'original'
label_smoothing: float = 0.2
discriminator_k: int = 1
def init(self):
"""
Initializations
"""
self.state_modules = []
tracker.set_scalar("loss.generator.*", True)
tracker.set_scalar("loss.discriminator.*", True)
tracker.set_image("generated", True, 1 / 100)
def sample_z(self, batch_size: int):
"""
$$z \sim p(z)$$
"""
return torch.randn(batch_size, 100, device=self.device)
def step(self, batch: Any, batch_idx: BatchIndex):
"""
Take a training step
"""
# Set model states
self.generator.train(self.mode.is_train)
self.discriminator.train(self.mode.is_train)
# Get MNIST images
data = batch[0].to(self.device)
# Increment step in training mode
if self.mode.is_train:
tracker.add_global_step(len(data))
# Train the discriminator
with monit.section("discriminator"):
# Get discriminator loss
loss = self.calc_discriminator_loss(data)
# Train
if self.mode.is_train:
self.discriminator_optimizer.zero_grad()
loss.backward()
if batch_idx.is_last:
tracker.add('discriminator', self.discriminator)
self.discriminator_optimizer.step()
# Train the generator once in every `discriminator_k`
if batch_idx.is_interval(self.discriminator_k):
with monit.section("generator"):
loss = self.calc_generator_loss(data.shape[0])
# Train
if self.mode.is_train:
self.generator_optimizer.zero_grad()
loss.backward()
if batch_idx.is_last:
tracker.add('generator', self.generator)
self.generator_optimizer.step()
tracker.save()
def calc_discriminator_loss(self, data):
"""
Calculate discriminator loss
"""
latent = self.sample_z(data.shape[0])
logits_true = self.discriminator(data)
logits_false = self.discriminator(self.generator(latent).detach())
loss_true, loss_false = self.discriminator_loss(logits_true, logits_false)
loss = loss_true + loss_false
# Log stuff
tracker.add("loss.discriminator.true.", loss_true)
tracker.add("loss.discriminator.false.", loss_false)
tracker.add("loss.discriminator.", loss)
return loss
def calc_generator_loss(self, batch_size: int):
"""
Calculate generator loss
"""
latent = self.sample_z(batch_size)
generated_images = self.generator(latent)
logits = self.discriminator(generated_images)
loss = self.generator_loss(logits)
# Log stuff
tracker.add('generated', generated_images[0:6])
tracker.add("loss.generator.", loss)
return loss
@option(Configs.dataset_transforms)
def mnist_gan_transforms():
return transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
@option(Configs.discriminator_optimizer)
def _discriminator_optimizer(c: Configs):
opt_conf = OptimizerConfigs()
opt_conf.optimizer = 'Adam'
opt_conf.parameters = c.discriminator.parameters()
opt_conf.learning_rate = 2.5e-4
# Setting exponent decay rate for first moment of gradient,
# $\beta_1$ to `0.5` is important.
# Default of `0.9` fails.
opt_conf.betas = (0.5, 0.999)
return opt_conf
@option(Configs.generator_optimizer)
def _generator_optimizer(c: Configs):
opt_conf = OptimizerConfigs()
opt_conf.optimizer = 'Adam'
opt_conf.parameters = c.generator.parameters()
opt_conf.learning_rate = 2.5e-4
# Setting exponent decay rate for first moment of gradient,
# $\beta_1$ to `0.5` is important.
# Default of `0.9` fails.
opt_conf.betas = (0.5, 0.999)
return opt_conf
calculate(Configs.generator, 'mlp', lambda c: Generator().to(c.device))
calculate(Configs.discriminator, 'mlp', lambda c: Discriminator().to(c.device))
calculate(Configs.generator_loss, 'original', lambda c: GeneratorLogitsLoss(c.label_smoothing).to(c.device))
calculate(Configs.discriminator_loss, 'original', lambda c: DiscriminatorLogitsLoss(c.label_smoothing).to(c.device))
def main():
conf = Configs()
experiment.create(name='mnist_gan', comment='test')
experiment.configs(conf,
{'label_smoothing': 0.01})
with experiment.start():
conf.run()
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/gan/original/__init__.py | labml_nn/gan/original/__init__.py | """
---
title: Generative Adversarial Networks (GAN)
summary: A simple PyTorch implementation/tutorial of Generative Adversarial Networks (GAN) loss functions.
---
# Generative Adversarial Networks (GAN)
This is an implementation of
[Generative Adversarial Networks](https://arxiv.org/abs/1406.2661).
The generator, $G(\pmb{z}; \theta_g)$ generates samples that match the
distribution of data, while the discriminator, $D(\pmb{x}; \theta_g)$
gives the probability that $\pmb{x}$ came from data rather than $G$.
We train $D$ and $G$ simultaneously on a two-player min-max game with value
function $V(G, D)$.
$$\min_G \max_D V(D, G) =
\mathop{\mathbb{E}}_{\pmb{x} \sim p_{data}(\pmb{x})}
\big[\log D(\pmb{x})\big] +
\mathop{\mathbb{E}}_{\pmb{z} \sim p_{\pmb{z}}(\pmb{z})}
\big[\log (1 - D(G(\pmb{z}))\big]
$$
$p_{data}(\pmb{x})$ is the probability distribution over data,
whilst $p_{\pmb{z}}(\pmb{z})$ probability distribution of $\pmb{z}$, which is set to
gaussian noise.
This file defines the loss functions. [Here](experiment.html) is an MNIST example
with two multilayer perceptron for the generator and discriminator.
"""
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data
class DiscriminatorLogitsLoss(nn.Module):
"""
## Discriminator Loss
Discriminator should **ascend** on the gradient,
$$\nabla_{\theta_d} \frac{1}{m} \sum_{i=1}^m \Bigg[
\log D\Big(\pmb{x}^{(i)}\Big) +
\log \Big(1 - D\Big(G\Big(\pmb{z}^{(i)}\Big)\Big)\Big)
\Bigg]$$
$m$ is the mini-batch size and $(i)$ is used to index samples in the mini-batch.
$\pmb{x}$ are samples from $p_{data}$ and $\pmb{z}$ are samples from $p_z$.
"""
def __init__(self, smoothing: float = 0.2):
super().__init__()
# We use PyTorch Binary Cross Entropy Loss, which is
# $-\sum\Big[y \log(\hat{y}) + (1 - y) \log(1 - \hat{y})\Big]$,
# where $y$ are the labels and $\hat{y}$ are the predictions.
# *Note the negative sign*.
# We use labels equal to $1$ for $\pmb{x}$ from $p_{data}$
# and labels equal to $0$ for $\pmb{x}$ from $p_{G}.$
# Then descending on the sum of these is the same as ascending on
# the above gradient.
#
# `BCEWithLogitsLoss` combines softmax and binary cross entropy loss.
self.loss_true = nn.BCEWithLogitsLoss()
self.loss_false = nn.BCEWithLogitsLoss()
# We use label smoothing because it seems to work better in some cases
self.smoothing = smoothing
# Labels are registered as buffered and persistence is set to `False`.
self.register_buffer('labels_true', _create_labels(256, 1.0 - smoothing, 1.0), False)
self.register_buffer('labels_false', _create_labels(256, 0.0, smoothing), False)
def forward(self, logits_true: torch.Tensor, logits_false: torch.Tensor):
"""
`logits_true` are logits from $D(\pmb{x}^{(i)})$ and
`logits_false` are logits from $D(G(\pmb{z}^{(i)}))$
"""
if len(logits_true) > len(self.labels_true):
self.register_buffer("labels_true",
_create_labels(len(logits_true), 1.0 - self.smoothing, 1.0, logits_true.device), False)
if len(logits_false) > len(self.labels_false):
self.register_buffer("labels_false",
_create_labels(len(logits_false), 0.0, self.smoothing, logits_false.device), False)
return (self.loss_true(logits_true, self.labels_true[:len(logits_true)]),
self.loss_false(logits_false, self.labels_false[:len(logits_false)]))
class GeneratorLogitsLoss(nn.Module):
"""
## Generator Loss
Generator should **descend** on the gradient,
$$\nabla_{\theta_g} \frac{1}{m} \sum_{i=1}^m \Bigg[
\log \Big(1 - D\Big(G\Big(\pmb{z}^{(i)}\Big)\Big)\Big)
\Bigg]$$
"""
def __init__(self, smoothing: float = 0.2):
super().__init__()
self.loss_true = nn.BCEWithLogitsLoss()
self.smoothing = smoothing
# We use labels equal to $1$ for $\pmb{x}$ from $p_{G}.$
# Then descending on this loss is the same as descending on
# the above gradient.
self.register_buffer('fake_labels', _create_labels(256, 1.0 - smoothing, 1.0), False)
def forward(self, logits: torch.Tensor):
if len(logits) > len(self.fake_labels):
self.register_buffer("fake_labels",
_create_labels(len(logits), 1.0 - self.smoothing, 1.0, logits.device), False)
return self.loss_true(logits, self.fake_labels[:len(logits)])
def _create_labels(n: int, r1: float, r2: float, device: torch.device = None):
"""
Create smoothed labels
"""
return torch.empty(n, 1, requires_grad=False, device=device).uniform_(r1, r2)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/gan/dcgan/__init__.py | labml_nn/gan/dcgan/__init__.py | """
---
title: Deep Convolutional Generative Adversarial Networks (DCGAN)
summary: A simple PyTorch implementation/tutorial of Deep Convolutional Generative Adversarial Networks (DCGAN).
---
# Deep Convolutional Generative Adversarial Networks (DCGAN)
This is a [PyTorch](https://pytorch.org) implementation of paper
[Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks](https://arxiv.org/abs/1511.06434).
This implementation is based on the [PyTorch DCGAN Tutorial](https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html).
"""
import torch.nn as nn
from labml import experiment
from labml.configs import calculate
from labml_nn.gan.original.experiment import Configs
class Generator(nn.Module):
"""
### Convolutional Generator Network
This is similar to the de-convolutional network used for CelebA faces,
but modified for MNIST images.

"""
def __init__(self):
super().__init__()
# The input is $1 \times 1$ with 100 channels
self.layers = nn.Sequential(
# This gives $3 \times 3$ output
nn.ConvTranspose2d(100, 1024, 3, 1, 0, bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(True),
# This gives $7 \times 7$
nn.ConvTranspose2d(1024, 512, 3, 2, 0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(True),
# This gives $14 \times 14$
nn.ConvTranspose2d(512, 256, 4, 2, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(True),
# This gives $28 \times 28$
nn.ConvTranspose2d(256, 1, 4, 2, 1, bias=False),
nn.Tanh()
)
self.apply(_weights_init)
def forward(self, x):
# Change from shape `[batch_size, 100]` to `[batch_size, 100, 1, 1]`
x = x.unsqueeze(-1).unsqueeze(-1)
x = self.layers(x)
return x
class Discriminator(nn.Module):
"""
### Convolutional Discriminator Network
"""
def __init__(self):
super().__init__()
# The input is $28 \times 28$ with one channel
self.layers = nn.Sequential(
# This gives $14 \times 14$
nn.Conv2d(1, 256, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# This gives $7 \times 7$
nn.Conv2d(256, 512, 4, 2, 1, bias=False),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2, inplace=True),
# This gives $3 \times 3$
nn.Conv2d(512, 1024, 3, 2, 0, bias=False),
nn.BatchNorm2d(1024),
nn.LeakyReLU(0.2, inplace=True),
# This gives $1 \times 1$
nn.Conv2d(1024, 1, 3, 1, 0, bias=False),
)
self.apply(_weights_init)
def forward(self, x):
x = self.layers(x)
return x.view(x.shape[0], -1)
def _weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
# We import the [simple gan experiment](../original/experiment.html) and change the
# generator and discriminator networks
calculate(Configs.generator, 'cnn', lambda c: Generator().to(c.device))
calculate(Configs.discriminator, 'cnn', lambda c: Discriminator().to(c.device))
def main():
conf = Configs()
experiment.create(name='mnist_dcgan')
experiment.configs(conf,
{'discriminator': 'cnn',
'generator': 'cnn',
'label_smoothing': 0.01})
with experiment.start():
conf.run()
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/gan/wasserstein/experiment.py | labml_nn/gan/wasserstein/experiment.py | """
---
title: WGAN experiment with MNIST
summary: This experiment generates MNIST images using convolutional neural network.
---
# WGAN experiment with MNIST
"""
from labml import experiment
from labml.configs import calculate
# Import configurations from [DCGAN experiment](../dcgan/index.html)
from labml_nn.gan.dcgan import Configs
# Import [Wasserstein GAN losses](./index.html)
from labml_nn.gan.wasserstein import GeneratorLoss, DiscriminatorLoss
# Set configurations options for Wasserstein GAN losses
calculate(Configs.generator_loss, 'wasserstein', lambda c: GeneratorLoss())
calculate(Configs.discriminator_loss, 'wasserstein', lambda c: DiscriminatorLoss())
def main():
# Create configs object
conf = Configs()
# Create experiment
experiment.create(name='mnist_wassertein_dcgan', comment='test')
# Override configurations
experiment.configs(conf,
{
'discriminator': 'cnn',
'generator': 'cnn',
'label_smoothing': 0.01,
'generator_loss': 'wasserstein',
'discriminator_loss': 'wasserstein',
})
# Start the experiment and run training loop
with experiment.start():
conf.run()
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/gan/wasserstein/__init__.py | labml_nn/gan/wasserstein/__init__.py | r"""
---
title: Wasserstein GAN (WGAN)
summary: A simple PyTorch implementation/tutorial of Wasserstein Generative Adversarial Networks (WGAN) loss functions.
---
# Wasserstein GAN (WGAN)
This is an implementation of
[Wasserstein GAN](https://arxiv.org/abs/1701.07875).
The original GAN loss is based on Jensen-Shannon (JS) divergence
between the real distribution $\mathbb{P}_r$ and generated distribution $\mathbb{P}_g$.
The Wasserstein GAN is based on Earth Mover distance between these distributions.
$$
W(\mathbb{P}_r, \mathbb{P}_g) =
\underset{\gamma \in \Pi(\mathbb{P}_r, \mathbb{P}_g)} {\mathrm{inf}}
\mathbb{E}_{(x,y) \sim \gamma}
\Vert x - y \Vert
$$
$\Pi(\mathbb{P}_r, \mathbb{P}_g)$ is the set of all joint distributions, whose
marginal probabilities are $\gamma(x, y)$.
$\mathbb{E}_{(x,y) \sim \gamma} \Vert x - y \Vert$ is the earth mover distance for
a given joint distribution ($x$ and $y$ are probabilities).
So $W(\mathbb{P}_r, \mathbb{P}_g)$ is equal to the least earth mover distance for
any joint distribution between the real distribution $\mathbb{P}_r$ and generated distribution $\mathbb{P}_g$.
The paper shows that Jensen-Shannon (JS) divergence and other measures for the difference between two probability
distributions are not smooth. And therefore if we are doing gradient descent on one of the probability
distributions (parameterized) it will not converge.
Based on Kantorovich-Rubinstein duality,
$$
W(\mathbb{P}_r, \mathbb{P}_g) =
\underset{\Vert f \Vert_L \le 1} {\mathrm{sup}}
\mathbb{E}_{x \sim \mathbb{P}_r} [f(x)]- \mathbb{E}_{x \sim \mathbb{P}_g} [f(x)]
$$
where $\Vert f \Vert_L \le 1$ are all 1-Lipschitz functions.
That is, it is equal to the greatest difference
$$\mathbb{E}_{x \sim \mathbb{P}_r} [f(x)] - \mathbb{E}_{x \sim \mathbb{P}_g} [f(x)]$$
among all 1-Lipschitz functions.
For $K$-Lipschitz functions,
$$
W(\mathbb{P}_r, \mathbb{P}_g) =
\underset{\Vert f \Vert_L \le K} {\mathrm{sup}}
\mathbb{E}_{x \sim \mathbb{P}_r} \Bigg[\frac{1}{K} f(x) \Bigg]
- \mathbb{E}_{x \sim \mathbb{P}_g} \Bigg[\frac{1}{K} f(x) \Bigg]
$$
If all $K$-Lipschitz functions can be represented as $f_w$ where $f$ is parameterized by
$w \in \mathcal{W}$,
$$
K \cdot W(\mathbb{P}_r, \mathbb{P}_g) =
\max_{w \in \mathcal{W}}
\mathbb{E}_{x \sim \mathbb{P}_r} [f_w(x)]- \mathbb{E}_{x \sim \mathbb{P}_g} [f_w(x)]
$$
If $(\mathbb{P}_{g})$ is represented by a generator $$g_\theta (z)$$ and $z$ is from a known
distribution $z \sim p(z)$,
$$
K \cdot W(\mathbb{P}_r, \mathbb{P}_\theta) =
\max_{w \in \mathcal{W}}
\mathbb{E}_{x \sim \mathbb{P}_r} [f_w(x)]- \mathbb{E}_{z \sim p(z)} [f_w(g_\theta(z))]
$$
Now to converge $g_\theta$ with $\mathbb{P}_{r}$ we can gradient descent on $\theta$
to minimize above formula.
Similarly we can find $\max_{w \in \mathcal{W}}$ by ascending on $w$,
while keeping $K$ bounded. *One way to keep $K$ bounded is to clip all weights in the neural
network that defines $f$ clipped within a range.*
Here is the code to try this on a [simple MNIST generation experiment](experiment.html).
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/gan/wasserstein/experiment.ipynb)
"""
import torch.utils.data
from torch import nn
from torch.nn import functional as F
class DiscriminatorLoss(nn.Module):
"""
## Discriminator Loss
We want to find $w$ to maximize
$$\mathbb{E}_{x \sim \mathbb{P}_r} [f_w(x)]- \mathbb{E}_{z \sim p(z)} [f_w(g_\theta(z))]$$,
so we minimize,
$$-\frac{1}{m} \sum_{i=1}^m f_w \big(x^{(i)} \big) +
\frac{1}{m} \sum_{i=1}^m f_w \big( g_\theta(z^{(i)}) \big)$$
"""
def forward(self, f_real: torch.Tensor, f_fake: torch.Tensor):
"""
* `f_real` is $f_w(x)$
* `f_fake` is $f_w(g_\theta(z))$
This returns the a tuple with losses for $f_w(x)$ and $f_w(g_\theta(z))$,
which are later added.
They are kept separate for logging.
"""
# We use ReLUs to clip the loss to keep $f \in [-1, +1]$ range.
return F.relu(1 - f_real).mean(), F.relu(1 + f_fake).mean()
class GeneratorLoss(nn.Module):
"""
## Generator Loss
We want to find $\theta$ to minimize
$$\mathbb{E}_{x \sim \mathbb{P}_r} [f_w(x)]- \mathbb{E}_{z \sim p(z)} [f_w(g_\theta(z))]$$
The first component is independent of $\theta$,
so we minimize,
$$-\frac{1}{m} \sum_{i=1}^m f_w \big( g_\theta(z^{(i)}) \big)$$
"""
def forward(self, f_fake: torch.Tensor):
"""
* `f_fake` is $f_w(g_\theta(z))$
"""
return -f_fake.mean()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/gan/wasserstein/gradient_penalty/experiment.py | labml_nn/gan/wasserstein/gradient_penalty/experiment.py | """
---
title: WGAN-GP experiment with MNIST
summary: This experiment generates MNIST images using convolutional neural network.
---
# WGAN-GP experiment with MNIST
"""
import torch
from labml import experiment, tracker
# Import configurations from [Wasserstein experiment](../experiment.html)
from labml_nn.gan.wasserstein.experiment import Configs as OriginalConfigs
#
from labml_nn.gan.wasserstein.gradient_penalty import GradientPenalty
class Configs(OriginalConfigs):
"""
## Configuration class
We extend [original GAN implementation](../../original/experiment.html) and override the discriminator (critic) loss
calculation to include gradient penalty.
"""
# Gradient penalty coefficient $\lambda$
gradient_penalty_coefficient: float = 10.0
#
gradient_penalty = GradientPenalty()
def calc_discriminator_loss(self, data: torch.Tensor):
"""
This overrides the original discriminator loss calculation and
includes gradient penalty.
"""
# Require gradients on $x$ to calculate gradient penalty
data.requires_grad_()
# Sample $z \sim p(z)$
latent = self.sample_z(data.shape[0])
# $D(x)$
f_real = self.discriminator(data)
# $D(G_\theta(z))$
f_fake = self.discriminator(self.generator(latent).detach())
# Get discriminator losses
loss_true, loss_false = self.discriminator_loss(f_real, f_fake)
# Calculate gradient penalties in training mode
if self.mode.is_train:
gradient_penalty = self.gradient_penalty(data, f_real)
tracker.add("loss.gp.", gradient_penalty)
loss = loss_true + loss_false + self.gradient_penalty_coefficient * gradient_penalty
# Skip gradient penalty otherwise
else:
loss = loss_true + loss_false
# Log stuff
tracker.add("loss.discriminator.true.", loss_true)
tracker.add("loss.discriminator.false.", loss_false)
tracker.add("loss.discriminator.", loss)
return loss
def main():
# Create configs object
conf = Configs()
# Create experiment
experiment.create(name='mnist_wassertein_gp_dcgan')
# Override configurations
experiment.configs(conf,
{
'discriminator': 'cnn',
'generator': 'cnn',
'label_smoothing': 0.01,
'generator_loss': 'wasserstein',
'discriminator_loss': 'wasserstein',
'discriminator_k': 5,
})
# Start the experiment and run training loop
with experiment.start():
conf.run()
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/gan/wasserstein/gradient_penalty/__init__.py | labml_nn/gan/wasserstein/gradient_penalty/__init__.py | r"""
---
title: Gradient Penalty for Wasserstein GAN (WGAN-GP)
summary: >
An annotated PyTorch implementation/tutorial of
Improved Training of Wasserstein GANs.
---
# Gradient Penalty for Wasserstein GAN (WGAN-GP)
This is an implementation of
[Improved Training of Wasserstein GANs](https://arxiv.org/abs/1704.00028).
[WGAN](../index.html) suggests clipping weights to enforce Lipschitz constraint
on the discriminator network (critic).
This and other weight constraints like L2 norm clipping, weight normalization,
L1, L2 weight decay have problems:
1. Limiting the capacity of the discriminator
2. Exploding and vanishing gradients (without [Batch Normalization](../../../normalization/batch_norm/index.html)).
The paper [Improved Training of Wasserstein GANs](https://arxiv.org/abs/1704.00028)
proposal a better way to improve Lipschitz constraint, a gradient penalty.
$$\mathcal{L}_{GP} = \lambda \underset{\hat{x} \sim \mathbb{P}_{\hat{x}}}{\mathbb{E}}
\Big[ \big(\Vert \nabla_{\hat{x}} D(\hat{x}) \Vert_2 - 1\big)^2 \Big]
$$
where $\lambda$ is the penalty weight and
\begin{align}
x &\sim \mathbb{P}_r \\
z &\sim p(z) \\
\epsilon &\sim U[0,1] \\
\tilde{x} &\leftarrow G_\theta (z) \\
\hat{x} &\leftarrow \epsilon x + (1 - \epsilon) \tilde{x}
\end{align}
That is we try to keep the gradient norm $\Vert \nabla_{\hat{x}} D(\hat{x}) \Vert_2$ close to $1$.
In this implementation we set $\epsilon = 1$.
Here is the [code for an experiment](experiment.html) that uses gradient penalty.
"""
import torch
import torch.autograd
from torch import nn
class GradientPenalty(nn.Module):
"""
## Gradient Penalty
"""
def forward(self, x: torch.Tensor, f: torch.Tensor):
"""
* `x` is $x \sim \mathbb{P}_r$
* `f` is $D(x)$
$\hat{x} \leftarrow x$
since we set $\epsilon = 1$ for this implementation.
"""
# Get batch size
batch_size = x.shape[0]
# Calculate gradients of $D(x)$ with respect to $x$.
# `grad_outputs` is set to ones since we want the gradients of $D(x)$,
# and we need to create and retain graph since we have to compute gradients
# with respect to weight on this loss.
gradients, *_ = torch.autograd.grad(outputs=f,
inputs=x,
grad_outputs=f.new_ones(f.shape),
create_graph=True)
# Reshape gradients to calculate the norm
gradients = gradients.reshape(batch_size, -1)
# Calculate the norm $\Vert \nabla_{\hat{x}} D(\hat{x}) \Vert_2$
norm = gradients.norm(2, dim=-1)
# Return the loss $\big(\Vert \nabla_{\hat{x}} D(\hat{x}) \Vert_2 - 1\big)^2$
return torch.mean((norm - 1) ** 2)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/conv_mixer/experiment.py | labml_nn/conv_mixer/experiment.py | """
---
title: Train ConvMixer on CIFAR 10
summary: >
Train ConvMixer on CIFAR 10
---
# Train a [ConvMixer](index.html) on CIFAR 10
This script trains a ConvMixer on CIFAR 10 dataset.
This is not an attempt to reproduce the results of the paper.
The paper uses image augmentations
present in [PyTorch Image Models (timm)](https://github.com/rwightman/pytorch-image-models)
for training. We haven't done this for simplicity - which causes our validation accuracy to drop.
"""
from labml import experiment
from labml.configs import option
from labml_nn.experiments.cifar10 import CIFAR10Configs
class Configs(CIFAR10Configs):
"""
## Configurations
We use [`CIFAR10Configs`](../experiments/cifar10.html) which defines all the
dataset related configurations, optimizer, and a training loop.
"""
# Size of a patch, $p$
patch_size: int = 2
# Number of channels in patch embeddings, $h$
d_model: int = 256
# Number of [ConvMixer layers](#ConvMixerLayer) or depth, $d$
n_layers: int = 8
# Kernel size of the depth-wise convolution, $k$
kernel_size: int = 7
# Number of classes in the task
n_classes: int = 10
@option(Configs.model)
def _conv_mixer(c: Configs):
"""
### Create model
"""
from labml_nn.conv_mixer import ConvMixerLayer, ConvMixer, ClassificationHead, PatchEmbeddings
# Create ConvMixer
return ConvMixer(ConvMixerLayer(c.d_model, c.kernel_size), c.n_layers,
PatchEmbeddings(c.d_model, c.patch_size, 3),
ClassificationHead(c.d_model, c.n_classes)).to(c.device)
def main():
# Create experiment
experiment.create(name='ConvMixer', comment='cifar10')
# Create configurations
conf = Configs()
# Load configurations
experiment.configs(conf, {
# Optimizer
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 2.5e-4,
# Training epochs and batch size
'epochs': 150,
'train_batch_size': 64,
# Simple image augmentations
'train_dataset': 'cifar10_train_augmented',
# Do not augment images for validation
'valid_dataset': 'cifar10_valid_no_augment',
})
# Set model for saving/loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/conv_mixer/__init__.py | labml_nn/conv_mixer/__init__.py | """
---
title: Patches Are All You Need? (ConvMixer)
summary: >
A PyTorch implementation/tutorial of the paper
"Patches Are All You Need?"
---
# Patches Are All You Need? (ConvMixer)
This is a [PyTorch](https://pytorch.org) implementation of the paper
[Patches Are All You Need?](https://arxiv.org/abs/2201.09792).

ConvMixer is Similar to [MLP-Mixer](../transformers/mlp_mixer/index.html).
MLP-Mixer separates mixing of spatial and channel dimensions, by applying an MLP across spatial dimension
and then an MLP across the channel dimension
(spatial MLP replaces the [ViT](../transformers/vit/index.html) attention
and channel MLP is the [FFN](../transformers/feed_forward.html) of ViT).
ConvMixer uses a $1 \times 1$ convolution for channel mixing and a
depth-wise convolution for spatial mixing.
Since it's a convolution instead of a full MLP across the space, it mixes only the nearby batches in
contrast to ViT or MLP-Mixer.
Also, the MLP-mixer uses MLPs of two layers for each mixing and ConvMixer uses a single layer for each mixing.
The paper recommends removing the residual connection across the channel mixing (point-wise convolution)
and having only a residual connection over the spatial mixing (depth-wise convolution).
They also use [Batch normalization](../normalization/batch_norm/index.html) instead
of [Layer normalization](../normalization/layer_norm/index.html).
Here's [an experiment](experiment.html) that trains ConvMixer on CIFAR-10.
"""
import torch
from torch import nn
from labml_nn.utils import clone_module_list
class ConvMixerLayer(nn.Module):
"""
<a id="ConvMixerLayer"></a>
## ConvMixer layer
This is a single ConvMixer layer. The model will have a series of these.
"""
def __init__(self, d_model: int, kernel_size: int):
"""
* `d_model` is the number of channels in patch embeddings, $h$
* `kernel_size` is the size of the kernel of spatial convolution, $k$
"""
super().__init__()
# Depth-wise convolution is separate convolution for each channel.
# We do this with a convolution layer with the number of groups equal to the number of channels.
# So that each channel is it's own group.
self.depth_wise_conv = nn.Conv2d(d_model, d_model,
kernel_size=kernel_size,
groups=d_model,
padding=(kernel_size - 1) // 2)
# Activation after depth-wise convolution
self.act1 = nn.GELU()
# Normalization after depth-wise convolution
self.norm1 = nn.BatchNorm2d(d_model)
# Point-wise convolution is a $1 \times 1$ convolution.
# i.e. a linear transformation of patch embeddings
self.point_wise_conv = nn.Conv2d(d_model, d_model, kernel_size=1)
# Activation after point-wise convolution
self.act2 = nn.GELU()
# Normalization after point-wise convolution
self.norm2 = nn.BatchNorm2d(d_model)
def forward(self, x: torch.Tensor):
# For the residual connection around the depth-wise convolution
residual = x
# Depth-wise convolution, activation and normalization
x = self.depth_wise_conv(x)
x = self.act1(x)
x = self.norm1(x)
# Add residual connection
x += residual
# Point-wise convolution, activation and normalization
x = self.point_wise_conv(x)
x = self.act2(x)
x = self.norm2(x)
#
return x
class PatchEmbeddings(nn.Module):
"""
<a id="PatchEmbeddings"></a>
## Get patch embeddings
This splits the image into patches of size $p \times p$ and gives an embedding for each patch.
"""
def __init__(self, d_model: int, patch_size: int, in_channels: int):
"""
* `d_model` is the number of channels in patch embeddings $h$
* `patch_size` is the size of the patch, $p$
* `in_channels` is the number of channels in the input image (3 for rgb)
"""
super().__init__()
# We create a convolution layer with a kernel size and and stride length equal to patch size.
# This is equivalent to splitting the image into patches and doing a linear
# transformation on each patch.
self.conv = nn.Conv2d(in_channels, d_model, kernel_size=patch_size, stride=patch_size)
# Activation function
self.act = nn.GELU()
# Batch normalization
self.norm = nn.BatchNorm2d(d_model)
def forward(self, x: torch.Tensor):
"""
* `x` is the input image of shape `[batch_size, channels, height, width]`
"""
# Apply convolution layer
x = self.conv(x)
# Activation and normalization
x = self.act(x)
x = self.norm(x)
#
return x
class ClassificationHead(nn.Module):
"""
<a id="ClassificationHead"></a>
## Classification Head
They do average pooling (taking the mean of all patch embeddings) and a final linear transformation
to predict the log-probabilities of the image classes.
"""
def __init__(self, d_model: int, n_classes: int):
"""
* `d_model` is the number of channels in patch embeddings, $h$
* `n_classes` is the number of classes in the classification task
"""
super().__init__()
# Average Pool
self.pool = nn.AdaptiveAvgPool2d((1, 1))
# Linear layer
self.linear = nn.Linear(d_model, n_classes)
def forward(self, x: torch.Tensor):
# Average pooling
x = self.pool(x)
# Get the embedding, `x` will have shape `[batch_size, d_model, 1, 1]`
x = x[:, :, 0, 0]
# Linear layer
x = self.linear(x)
#
return x
class ConvMixer(nn.Module):
"""
## ConvMixer
This combines the patch embeddings block, a number of ConvMixer layers and a classification head.
"""
def __init__(self, conv_mixer_layer: ConvMixerLayer, n_layers: int,
patch_emb: PatchEmbeddings,
classification: ClassificationHead):
"""
* `conv_mixer_layer` is a copy of a single [ConvMixer layer](#ConvMixerLayer).
We make copies of it to make ConvMixer with `n_layers`.
* `n_layers` is the number of ConvMixer layers (or depth), $d$.
* `patch_emb` is the [patch embeddings layer](#PatchEmbeddings).
* `classification` is the [classification head](#ClassificationHead).
"""
super().__init__()
# Patch embeddings
self.patch_emb = patch_emb
# Classification head
self.classification = classification
# Make copies of the [ConvMixer layer](#ConvMixerLayer)
self.conv_mixer_layers = clone_module_list(conv_mixer_layer, n_layers)
def forward(self, x: torch.Tensor):
"""
* `x` is the input image of shape `[batch_size, channels, height, width]`
"""
# Get patch embeddings. This gives a tensor of shape `[batch_size, d_model, height / patch_size, width / patch_size]`.
x = self.patch_emb(x)
# Pass through [ConvMixer layers](#ConvMixerLayer)
for layer in self.conv_mixer_layers:
x = layer(x)
# Classification head, to get logits
x = self.classification(x)
#
return x
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/hypernetworks/hyper_lstm.py | labml_nn/hypernetworks/hyper_lstm.py | """
---
title: HyperNetworks - HyperLSTM
summary: A PyTorch implementation/tutorial of HyperLSTM introduced in paper HyperNetworks.
---
# HyperNetworks - HyperLSTM
We have implemented HyperLSTM introduced in paper
[HyperNetworks](https://arxiv.org/abs/1609.09106), with annotations
using [PyTorch](https://pytorch.org).
[This blog post](https://blog.otoro.net/2016/09/28/hyper-networks/)
by David Ha gives a good explanation of HyperNetworks.
We have an experiment that trains a HyperLSTM to predict text on Shakespeare dataset.
Here's the link to code: [`experiment.py`](experiment.html)
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/hypernetworks/experiment.ipynb)
HyperNetworks use a smaller network to generate weights of a larger network.
There are two variants: static hyper-networks and dynamic hyper-networks.
Static HyperNetworks have smaller networks that generate weights (kernels)
of a convolutional network. Dynamic HyperNetworks generate parameters of a
recurrent neural network
for each step. This is an implementation of the latter.
## Dynamic HyperNetworks
In a RNN the parameters stay constant for each step.
Dynamic HyperNetworks generate different parameters for each step.
HyperLSTM has the structure of a LSTM but the parameters of
each step are changed by a smaller LSTM network.
In the basic form, a Dynamic HyperNetwork has a smaller recurrent network that generates
a feature vector corresponding to each parameter tensor of the larger recurrent network.
Let's say the larger network has some parameter $\textcolor{cyan}{W_h}$ the smaller network generates a feature
vector $z_h$ and we dynamically compute $\textcolor{cyan}{W_h}$ as a linear transformation of $z_h$.
For instance $\textcolor{cyan}{W_h} = \langle W_{hz}, z_h \rangle$ where
$W_{hz}$ is a 3-d tensor parameter and $\langle . \rangle$ is a tensor-vector multiplication.
$z_h$ is usually a linear transformation of the output of the smaller recurrent network.
### Weight scaling instead of computing
Large recurrent networks have large dynamically computed parameters.
These are calculated using linear transformation of feature vector $z$.
And this transformation requires an even larger weight tensor.
That is, when $\textcolor{cyan}{W_h}$ has shape $N_h \times N_h$,
$W_{hz}$ will be $N_h \times N_h \times N_z$.
To overcome this, we compute the weight parameters of the recurrent network by
dynamically scaling each row of a matrix of same size.
\begin{align}
d(z) = W_{hz} z_h \\
\\
\textcolor{cyan}{W_h} =
\begin{pmatrix}
d_0(z) W_{hd_0} \\
d_1(z) W_{hd_1} \\
... \\
d_{N_h}(z) W_{hd_{N_h}} \\
\end{pmatrix}
\end{align}
where $W_{hd}$ is a $N_h \times N_h$ parameter matrix.
We can further optimize this when we compute $\textcolor{cyan}{W_h} h$,
as
$$\textcolor{lightgreen}{d(z) \odot (W_{hd} h)}$$
where $\odot$ stands for element-wise multiplication.
"""
from typing import Optional, Tuple
import torch
from torch import nn
from labml_nn.lstm import LSTMCell
class HyperLSTMCell(nn.Module):
"""
## HyperLSTM Cell
For HyperLSTM the smaller network and the larger network both have the LSTM structure.
This is defined in Appendix A.2.2 in the paper.
"""
def __init__(self, input_size: int, hidden_size: int, hyper_size: int, n_z: int):
"""
`input_size` is the size of the input $x_t$,
`hidden_size` is the size of the LSTM, and
`hyper_size` is the size of the smaller LSTM that alters the weights of the larger outer LSTM.
`n_z` is the size of the feature vectors used to alter the LSTM weights.
We use the output of the smaller LSTM to compute $z_h^{i,f,g,o}$, $z_x^{i,f,g,o}$ and
$z_b^{i,f,g,o}$ using linear transformations.
We calculate $d_h^{i,f,g,o}(z_h^{i,f,g,o})$, $d_x^{i,f,g,o}(z_x^{i,f,g,o})$, and
$d_b^{i,f,g,o}(z_b^{i,f,g,o})$ from these, using linear transformations again.
These are then used to scale the rows of weight and bias tensors of the main LSTM.
π Since the computation of $z$ and $d$ are two sequential linear transformations
these can be combined into a single linear transformation.
However we've implemented this separately so that it matches with the description
in the paper.
"""
super().__init__()
# The input to the hyperLSTM is
# $$
# \hat{x}_t = \begin{pmatrix}
# h_{t-1} \\
# x_t
# \end{pmatrix}
# $$
# where $x_t$ is the input and $h_{t-1}$ is the output of the outer LSTM at previous step.
# So the input size is `hidden_size + input_size`.
#
# The output of hyperLSTM is $\hat{h}_t$ and $\hat{c}_t$.
self.hyper = LSTMCell(hidden_size + input_size, hyper_size, layer_norm=True)
# $$z_h^{i,f,g,o} = lin_{h}^{i,f,g,o}(\hat{h}_t)$$
# π€ In the paper it was specified as
# $$z_h^{i,f,g,o} = lin_{h}^{i,f,g,o}(\hat{h}_{\textcolor{red}{t-1}})$$
# I feel that it's a typo.
self.z_h = nn.Linear(hyper_size, 4 * n_z)
# $$z_x^{i,f,g,o} = lin_x^{i,f,g,o}(\hat{h}_t)$$
self.z_x = nn.Linear(hyper_size, 4 * n_z)
# $$z_b^{i,f,g,o} = lin_b^{i,f,g,o}(\hat{h}_t)$$
self.z_b = nn.Linear(hyper_size, 4 * n_z, bias=False)
# $$d_h^{i,f,g,o}(z_h^{i,f,g,o}) = lin_{dh}^{i,f,g,o}(z_h^{i,f,g,o})$$
d_h = [nn.Linear(n_z, hidden_size, bias=False) for _ in range(4)]
self.d_h = nn.ModuleList(d_h)
# $$d_x^{i,f,g,o}(z_x^{i,f,g,o}) = lin_{dx}^{i,f,g,o}(z_x^{i,f,g,o})$$
d_x = [nn.Linear(n_z, hidden_size, bias=False) for _ in range(4)]
self.d_x = nn.ModuleList(d_x)
# $$d_b^{i,f,g,o}(z_b^{i,f,g,o}) = lin_{db}^{i,f,g,o}(z_b^{i,f,g,o})$$
d_b = [nn.Linear(n_z, hidden_size) for _ in range(4)]
self.d_b = nn.ModuleList(d_b)
# The weight matrices $W_h^{i,f,g,o}$
self.w_h = nn.ParameterList([nn.Parameter(torch.zeros(hidden_size, hidden_size)) for _ in range(4)])
# The weight matrices $W_x^{i,f,g,o}$
self.w_x = nn.ParameterList([nn.Parameter(torch.zeros(hidden_size, input_size)) for _ in range(4)])
# Layer normalization
self.layer_norm = nn.ModuleList([nn.LayerNorm(hidden_size) for _ in range(4)])
self.layer_norm_c = nn.LayerNorm(hidden_size)
def forward(self, x: torch.Tensor,
h: torch.Tensor, c: torch.Tensor,
h_hat: torch.Tensor, c_hat: torch.Tensor):
# $$
# \hat{x}_t = \begin{pmatrix}
# h_{t-1} \\
# x_t
# \end{pmatrix}
# $$
x_hat = torch.cat((h, x), dim=-1)
# $$\hat{h}_t, \hat{c}_t = lstm(\hat{x}_t, \hat{h}_{t-1}, \hat{c}_{t-1})$$
h_hat, c_hat = self.hyper(x_hat, h_hat, c_hat)
# $$z_h^{i,f,g,o} = lin_{h}^{i,f,g,o}(\hat{h}_t)$$
z_h = self.z_h(h_hat).chunk(4, dim=-1)
# $$z_x^{i,f,g,o} = lin_x^{i,f,g,o}(\hat{h}_t)$$
z_x = self.z_x(h_hat).chunk(4, dim=-1)
# $$z_b^{i,f,g,o} = lin_b^{i,f,g,o}(\hat{h}_t)$$
z_b = self.z_b(h_hat).chunk(4, dim=-1)
# We calculate $i$, $f$, $g$ and $o$ in a loop
ifgo = []
for i in range(4):
# $$d_h^{i,f,g,o}(z_h^{i,f,g,o}) = lin_{dh}^{i,f,g,o}(z_h^{i,f,g,o})$$
d_h = self.d_h[i](z_h[i])
# $$d_x^{i,f,g,o}(z_x^{i,f,g,o}) = lin_{dx}^{i,f,g,o}(z_x^{i,f,g,o})$$
d_x = self.d_x[i](z_x[i])
# \begin{align}
# {i,f,g,o} = LN(&\textcolor{lightgreen}{d_h^{i,f,g,o}(z_h) \odot (W_h^{i,f,g,o} h_{t-1})} \\
# + &\textcolor{lightgreen}{d_x^{i,f,g,o}(z_x) \odot (W_h^{i,f,g,o} x_t)} \\
# + &d_b^{i,f,g,o}(z_b))
# \end{align}
y = d_h * torch.einsum('ij,bj->bi', self.w_h[i], h) + \
d_x * torch.einsum('ij,bj->bi', self.w_x[i], x) + \
self.d_b[i](z_b[i])
ifgo.append(self.layer_norm[i](y))
# $$i_t, f_t, g_t, o_t$$
i, f, g, o = ifgo
# $$c_t = \sigma(f_t) \odot c_{t-1} + \sigma(i_t) \odot \tanh(g_t) $$
c_next = torch.sigmoid(f) * c + torch.sigmoid(i) * torch.tanh(g)
# $$h_t = \sigma(o_t) \odot \tanh(LN(c_t))$$
h_next = torch.sigmoid(o) * torch.tanh(self.layer_norm_c(c_next))
return h_next, c_next, h_hat, c_hat
class HyperLSTM(nn.Module):
"""
# HyperLSTM module
"""
def __init__(self, input_size: int, hidden_size: int, hyper_size: int, n_z: int, n_layers: int):
"""
Create a network of `n_layers` of HyperLSTM.
"""
super().__init__()
# Store sizes to initialize state
self.n_layers = n_layers
self.hidden_size = hidden_size
self.hyper_size = hyper_size
# Create cells for each layer. Note that only the first layer gets the input directly.
# Rest of the layers get the input from the layer below
self.cells = nn.ModuleList([HyperLSTMCell(input_size, hidden_size, hyper_size, n_z)] +
[HyperLSTMCell(hidden_size, hidden_size, hyper_size, n_z) for _ in
range(n_layers - 1)])
def forward(self, x: torch.Tensor,
state: Optional[Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]] = None):
"""
* `x` has shape `[n_steps, batch_size, input_size]` and
* `state` is a tuple of $h, c, \hat{h}, \hat{c}$.
$h, c$ have shape `[batch_size, hidden_size]` and
$\hat{h}, \hat{c}$ have shape `[batch_size, hyper_size]`.
"""
n_steps, batch_size = x.shape[:2]
# Initialize the state with zeros if `None`
if state is None:
h = [x.new_zeros(batch_size, self.hidden_size) for _ in range(self.n_layers)]
c = [x.new_zeros(batch_size, self.hidden_size) for _ in range(self.n_layers)]
h_hat = [x.new_zeros(batch_size, self.hyper_size) for _ in range(self.n_layers)]
c_hat = [x.new_zeros(batch_size, self.hyper_size) for _ in range(self.n_layers)]
#
else:
(h, c, h_hat, c_hat) = state
# Reverse stack the tensors to get the states of each layer
#
# π You can just work with the tensor itself but this is easier to debug
h, c = list(torch.unbind(h)), list(torch.unbind(c))
h_hat, c_hat = list(torch.unbind(h_hat)), list(torch.unbind(c_hat))
# Collect the outputs of the final layer at each step
out = []
for t in range(n_steps):
# Input to the first layer is the input itself
inp = x[t]
# Loop through the layers
for layer in range(self.n_layers):
# Get the state of the layer
h[layer], c[layer], h_hat[layer], c_hat[layer] = \
self.cells[layer](inp, h[layer], c[layer], h_hat[layer], c_hat[layer])
# Input to the next layer is the state of this layer
inp = h[layer]
# Collect the output $h$ of the final layer
out.append(h[-1])
# Stack the outputs and states
out = torch.stack(out)
h = torch.stack(h)
c = torch.stack(c)
h_hat = torch.stack(h_hat)
c_hat = torch.stack(c_hat)
#
return out, (h, c, h_hat, c_hat)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/hypernetworks/experiment.py | labml_nn/hypernetworks/experiment.py | import torch
import torch.nn as nn
from labml import experiment
from labml.configs import option
from labml.utils.pytorch import get_modules
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.hypernetworks.hyper_lstm import HyperLSTM
from labml_nn.lstm import LSTM
class AutoregressiveModel(nn.Module):
"""
## Auto regressive model
"""
def __init__(self, n_vocab: int, d_model: int, rnn_model: nn.Module):
super().__init__()
# Token embedding module
self.src_embed = nn.Embedding(n_vocab, d_model)
self.lstm = rnn_model
self.generator = nn.Linear(d_model, n_vocab)
def forward(self, x: torch.Tensor):
x = self.src_embed(x)
# Embed the tokens (`src`) and run it through the the transformer
res, state = self.lstm(x)
# Generate logits of the next token
return self.generator(res), state
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
The default configs can and will be over-ridden when we start the experiment
"""
model: AutoregressiveModel
rnn_model: nn.Module
d_model: int = 512
n_rhn: int = 16
n_z: int = 16
@option(Configs.model)
def autoregressive_model(c: Configs):
"""
Initialize the auto-regressive model
"""
m = AutoregressiveModel(c.n_tokens, c.d_model, c.rnn_model)
return m.to(c.device)
@option(Configs.rnn_model)
def hyper_lstm(c: Configs):
return HyperLSTM(c.d_model, c.d_model, c.n_rhn, c.n_z, 1)
@option(Configs.rnn_model)
def lstm(c: Configs):
return LSTM(c.d_model, c.d_model, 1)
def main():
# Create experiment
experiment.create(name="hyper_lstm", comment='')
# Create configs
conf = Configs()
# Load configurations
experiment.configs(conf,
# A dictionary of configurations to override
{'tokenizer': 'character',
'text': 'tiny_shakespeare',
'optimizer.learning_rate': 2.5e-4,
'optimizer.optimizer': 'Adam',
'prompt': 'It is',
'prompt_separator': '',
'rnn_model': 'hyper_lstm',
'train_loader': 'shuffled_train_loader',
'valid_loader': 'shuffled_valid_loader',
'seq_len': 512,
'epochs': 128,
'batch_size': 2,
'inner_iterations': 25})
# Set models for saving and loading
experiment.add_pytorch_models(get_modules(conf))
# Start the experiment
with experiment.start():
# `TrainValidConfigs.run`
conf.run()
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/hypernetworks/__init__.py | labml_nn/hypernetworks/__init__.py | """
---
title: HyperNetworks
summary: A PyTorch implementation/tutorial of HyperLSTM introduced in paper HyperNetworks.
---
## [HyperLSTM](hyper_lstm.html)
""" | python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/model.py | labml_nn/neox/model.py | """
---
title: GPT-NeoX Model Definition
summary: >
This is the model definition of GPT-NeoX.
---
# GPT-NeoX Model
Here is the code for layers of GPT-NeoX model and the code to load
20B checkpoint.
The method `load_state` in the layers load the checkpoints of that layer.
The checkpoint loading helpers are on [`checkpoint.py`](checkpoint.html)
"""
import copy
import math
from typing import Dict, Optional, Set, Callable, Any, Generator, Tuple
import torch
from torch import nn
from torch.cuda.amp import autocast
from labml import monit, logger
from labml.logger import Text
from labml_nn.neox import checkpoint
from labml_nn.neox.utils.cache import get_cache
class NeoXModule(nn.Module):
def load_state(self, p1: Dict[str, torch.Tensor], p2: Dict[str, torch.Tensor]):
pass
class Embedding(NeoXModule):
"""
## Embedding layer
This is a standard embeddings layer with code to load the checkpoint.
"""
def __init__(self, n_vocab: int = 50_432, n_hidden: int = 6_144):
"""
:param n_vocab: is the size of the vocabulary
:param n_hidden: is the size of the embeddings
"""
super().__init__()
self.emb = nn.Embedding(n_vocab, n_hidden)
def forward(self, x: torch.Tensor):
"""
:param x: are the token ids of shape `[batch_size, seq_len]`
"""
return self.emb(x)
def load_state(self, p1: Dict[str, torch.Tensor], p2: Dict[str, torch.Tensor]):
"""
Code to load the checkpoint
"""
with monit.section('Load embedding layer'):
checkpoint.merge_params_dim_0(self.emb.weight, 'word_embeddings.weight', p1, p2)
class RoPE(nn.Module):
"""
## Rotary Positional Embeddings
GPT-NeoX uses [rotary positional embeddings (RoPE)](https://arxiv.org/abs/2104.09864).
WE have annotated implementation of RoPE [here](https://nn.labml.ai/transformers/rope/index.html)
with more notes the theory.
"""
def __init__(self, d_rope: int, base: float = 10_000.):
"""
:param d_rope: is the number of features for RoPE embeddings
:param base: is the base for $\theta_i = 10000^{\frac{2(i-1)}{d}}$, which defaults to $10000$
"""
super().__init__()
# To store $\theta_i$ for the features
self.theta = None
# Cache $\cos m\theta_i$ and $\sin m\theta_i$
self.cos_cached = None
self.sin_cached = None
# Base for $\theta_i = 10000^{\frac{2(i-1)}{d}}$
self.base = base
# Number of features for RoPE
self.d_rope = d_rope
@staticmethod
def rotate_half(x: torch.Tensor):
"""
### Rotate the features
$[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., -x^{(\frac{d}{2})}]$
"""
x1, x2 = x[..., : x.shape[-1] // 2], x[..., x.shape[-1] // 2:]
return torch.cat((-x2, x1), dim=-1)
def forward(self, x: torch.Tensor, offset: int = 0):
"""
:param x: has shape `[..., seq, n_heads, d_k]`
:param offset: is the starting position of `x`. This is $\gt 0$ when we have
cached the keys and queries of previous positions
"""
# Get the actual sequence length
seq_len = x.shape[-3] + offset
# Initialize $\theta$
if self.theta is None:
# $\theta_i = 10000^{\frac{2(i-1)}{d}}$
theta = 1.0 / (self.base ** (torch.arange(0, self.d_rope, 2).float() / self.d_rope))
self.theta = theta.to(x.device).to(x.dtype)
# Initialize $\cos m\theta_i$ and $\sin m\theta_i$ cache
if (
self.cos_cached is None or
seq_len > self.cos_cached.shape[1] or
self.cos_cached.device != x.device or
self.cos_cached.dtype != x.dtype
):
# Get position indexes $m$
seq_idx = torch.arange(seq_len, device=x.device).type_as(self.theta)
# $m \theta_i$
idx_theta = torch.einsum("s,d->sd", seq_idx, self.theta)
# Concatenate so that for row $m$ we have
#
# $$[m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}, m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}]$$
idx_theta2 = torch.cat((idx_theta, idx_theta), dim=-1).to(x.device)
# Calculate $\cos m\theta_i$ and $\sin m\theta_i$ in fp32
with autocast(enabled=False):
idx_theta2 = idx_theta2.float()
# Add head dimension
self.cos_cached = idx_theta2.cos()[:, None, :]
self.sin_cached = idx_theta2.sin()[:, None, :]
# Cache them
self.cos_cached = self.cos_cached.to(x.dtype)
self.sin_cached = self.sin_cached.to(x.dtype)
# Split the features. We apply RoPE to only `d_rope` features
x_rope, x_pass = x[..., :self.d_rope], x[..., self.d_rope:]
# Get the sin and cos values from the cache
cos, sin = self.cos_cached[offset: seq_len], self.sin_cached[offset: seq_len]
# RoPE embeddings
#
# \begin{align}
# \begin{pmatrix}
# x^{(i)}_m \cos m \theta_i - x^{(i + \frac{d}{2})}_m \sin m \theta_i \\
# x^{(i + \frac{d}{2})}_m \cos m\theta_i + x^{(i)}_m \sin m \theta_i \\
# \end{pmatrix} \\
# \end{align}
#
# for $i \in {1, 2, ..., \frac{d}{2}}$
x_rope = (x_rope * cos) + (self.rotate_half(x_rope) * sin)
# Concatenate with features that didn't get RoPE embeddings
return torch.cat((x_rope, x_pass), dim=-1)
class AttentionLayer(nn.Module):
"""
## Attention layer
"""
def __init__(self, n_hidden: int = 6_144, n_heads: int = 64, rope_percentage: float = 0.25,
mask_fill: float = -10_000.0, *, is_flash_attention: bool = False):
"""
:param n_hidden: the number of features in embeddings
:param n_heads: the number of attention heads
:param rope_percentage: percentage of features to add RoPE embeddings
:param mask_fill: masking fill value for attention matrix
:param is_flash_attention: specifies whether to use
[FlashAttention](https://github.com/HazyResearch/flash-attention)
"""
super().__init__()
self.n_heads = n_heads
self.mask_fill = mask_fill
# Linear layer for query, key and value
self.qkv_lin = nn.Linear(n_hidden, n_hidden * 3)
# Final linear layer
self.output = nn.Linear(n_hidden, n_hidden)
# Number of features per head
d_k = n_hidden // n_heads
# RoPE embedding module
self.rope = RoPE(int(d_k * rope_percentage))
# Attention scaling factor
self.scale = 1 / math.sqrt(d_k)
# To cache causal mask
self.causal_mask = None
# Attention softmax module
self.softmax = nn.Softmax(dim=-2)
# [FlashAttention](https://github.com/HazyResearch/flash-attention)
if is_flash_attention:
try:
from flash_attn.flash_attention import FlashAttention
self.flash_attention = FlashAttention()
except ImportError:
logger.log('Install flash attention github.com/HazyResearch/flash-attention. '
'Falling back to normal attention', Text.warning)
self.flash_attention = None
else:
self.flash_attention = None
def _get_mask(self, attn: torch.Tensor):
"""
#### Calculate the causal mask
* `attn` has shape [batch_size, query_seq_len, key_seq_len, n_heads]
"""
# Query and key lengths
nq, nk = attn.shape[1:3]
# Create mask
if (
self.causal_mask is None or
self.causal_mask.shape[0] != nq or
self.causal_mask.shape[1] != nk or
self.causal_mask.device != attn.device
):
self.causal_mask = torch.triu(attn.new_ones([nq, nk], dtype=torch.bool), 1 + nk - nq)
# Return from cache
return self.causal_mask[None, :, :, None]
def forward(self, x: torch.Tensor):
"""
:param x: has shape `[batch_size, seq_len, n_hidden]`
"""
# Get query, key and value embeddings (all concatenated).
# The last dimension size will change from n_hidden -> `3 x n_hidden`
qkv = self.qkv_lin(x)
# Split into heads by changing the shape to `[batch_size, seq_len, n_heads, 3 * d_k]`
qkv = qkv.view(*qkv.shape[:-1], self.n_heads, -1)
# Split into query, key and value each of shape `[batch_size, seq_len, n_heads, 3 * d_k]`
q, k, v = torch.split(qkv, qkv.shape[-1] // 3, dim=-1)
# If we are caching the states of previous tokens
if get_cache().get('use_cache', False):
# Get the state id's. We use to retrieve previous states and store the next states
prev_state_id, next_state_id = get_cache().get('state_ids')
# If there's cache
if prev_state_id is not None:
# Get the past keys and values. These will have shape `[batch_size, prev_seq_len, n_heads, d_k]`
k_past, v_past = get_cache().pop(f'attn_kv_{prev_state_id}')
# Offset of the current embeddings
offset = k_past.shape[1]
# Add RoPE embeddings
q = self.rope(q, offset=offset)
k = self.rope(k, offset=offset)
# Concatenate the past
k = torch.cat([k_past, k], dim=1)
v = torch.cat([v_past, v], dim=1)
else:
# Add RoPE embeddings
q = self.rope(q)
k = self.rope(k)
# Save the current state
get_cache().push(f'attn_kv_{next_state_id}', (k, v))
else:
# No cache - simply add RoPE embeddings
q = self.rope(q)
k = self.rope(k)
# Use flash attention
if self.flash_attention is not None and q.shape[1] == k.shape[1] and q.shape[-1] <= 128:
output = self.compute_flash_attention(q, k, v)
# Otherwise, use normal attention
else:
output = self.compute_attention(q, k, v)
# Reshape from `[batch_size, seq_len, n_heads, d_k] to `[batch_size, seq_len, n_hidden]`
output = output.reshape(*x.shape)
# Final linear layer
return self.output(output)
def compute_flash_attention(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor):
# Stack them into shape `[batch_size, seq_len, 3, n_heads, d_k]`
qkv = torch.stack((q, k, v), dim=2)
d_k = qkv.shape[-1]
if d_k <= 32:
pad = 32 - d_k
elif d_k <= 64:
pad = 64 - d_k
elif d_k <= 128:
pad = 128 - d_k
else:
raise ValueError(f'Head size {d_k} too large for flash attention')
if pad > 0:
qkv = torch.cat((qkv, qkv.new_zeros(*qkv.shape[:-1], pad)), dim=-1)
output, _ = self.flash_attention(qkv, causal=True)
# The output is of shape `[batch_size, seq_len, n_heads, d_k + padding]`
output = output[:, :, :, :d_k]
return output
def compute_attention(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor):
# Disable auto-casting to fp16 for attention computation
with autocast(enabled=False):
if q.dtype == torch.float16:
# Convert to fp32 if the current dtype is fp16
attn = torch.einsum('bihk,bjhk->bijh', q.float(), k.float())
else:
# Do not cast for bfloat
attn = torch.einsum('bihk,bjhk->bijh', q, k)
# Scale attention
attn = attn * self.scale
# Get causal mask
mask = self._get_mask(attn)
# Apply mask
attn.masked_fill_(mask, self.mask_fill)
# Attention softmax
attn = self.softmax(attn)
# Get attention weighted values
output = torch.einsum('bijh,bjhk->bihk', attn.to(v.dtype), v)
return output
class FFNLayer(nn.Module):
"""
## Feedforward Network
"""
def __init__(self, n_hidden: int = 6_144, d_ff: int = 0):
"""
:param n_hidden: is the embedding size
"""
super().__init__()
if not d_ff:
d_ff = n_hidden * 4
# Expansion linear layer
self.dense_h_h4 = nn.Linear(n_hidden, d_ff)
# GELU activation
self.activation = nn.GELU()
# Contraction linear layer
self.dense_h4_h = nn.Linear(d_ff, n_hidden)
def forward(self, x: torch.Tensor):
"""
:param x: has shape `[batch_size, seq_len, n_hidden]`
"""
x = self.dense_h_h4(x)
x = self.activation(x)
x = self.dense_h4_h(x)
return x
class TransformerLayer(NeoXModule):
"""
## Transformer Layer
"""
def __init__(self, n_hidden: int = 6_144, n_heads: int = 64, *, is_flash_attention: bool = False):
"""
:param n_hidden: is the embedding size
:param n_heads: is the number of heads
:param is_flash_attention: specifies whether to use
[FlashAttention](https://github.com/HazyResearch/flash-attention)
*Out implementation doesn't include dropout*.
"""
super().__init__()
# Layer normalization before attention
self.pre_ln_attn = nn.LayerNorm(n_hidden)
# Layer normalization before FFN
self.pre_ln_ffn = nn.LayerNorm(n_hidden)
# Attention layer
self.attention = AttentionLayer(n_hidden, n_heads, is_flash_attention=is_flash_attention)
# FFN layer
self.ffn = FFNLayer(n_hidden)
def forward(self, x: torch.Tensor):
"""
:param x: are the embeddings of shape `[batch_size, seq_len, n_hidden]`
"""
# Residual connection
residual = x
# NeoX runs attention and feedforward network in parallel
attn = self.attention(self.pre_ln_attn(x))
ffn = self.ffn(self.pre_ln_ffn(x))
# Add them and the residual connection
return attn + ffn + residual
def load_state(self, p1: Dict[str, torch.Tensor], p2: Dict[str, torch.Tensor]):
"""
Code to load the checkpoint
"""
with monit.section('Load transformer layer'):
# Attention output transform
checkpoint.merge_params_sum(self.attention.output.bias, 'attention.dense.bias', p1, p2)
checkpoint.merge_params_dim_1(self.attention.output.weight, 'attention.dense.weight', p1, p2)
# Attention query, key and value transform
checkpoint.merge_params_dim_0(self.attention.qkv_lin.bias, 'attention.query_key_value.bias', p1, p2)
checkpoint.merge_params_dim_0(self.attention.qkv_lin.weight, 'attention.query_key_value.weight', p1, p2)
# Layer norm before attention
checkpoint.merge_params_duplicate(self.pre_ln_attn.bias, 'input_layernorm.bias', p1, p2)
checkpoint.merge_params_duplicate(self.pre_ln_attn.weight, 'input_layernorm.weight', p1, p2)
# FFN second transform
checkpoint.merge_params_dim_0(self.ffn.dense_h_h4.bias, 'mlp.dense_h_to_4h.bias', p1, p2)
checkpoint.merge_params_dim_0(self.ffn.dense_h_h4.weight, 'mlp.dense_h_to_4h.weight', p1, p2)
# FFN first transform
checkpoint.merge_params_sum(self.ffn.dense_h4_h.bias, 'mlp.dense_4h_to_h.bias', p1, p2)
checkpoint.merge_params_dim_1(self.ffn.dense_h4_h.weight, 'mlp.dense_4h_to_h.weight', p1, p2)
# Layer norm before FFN
checkpoint.merge_params_duplicate(self.pre_ln_ffn.bias, 'post_attention_layernorm.bias', p1, p2)
checkpoint.merge_params_duplicate(self.pre_ln_ffn.weight, 'post_attention_layernorm.weight', p1, p2)
class FinalNorm(NeoXModule):
"""
## Final normalization layer
"""
def __init__(self, n_hidden: int = 6_144):
"""
:param n_hidden: is the embedding size
"""
super().__init__()
self.ln = nn.LayerNorm(n_hidden)
def forward(self, x: torch.Tensor):
"""
:param x: are the embeddings of shape `[batch_size, seq_len, n_hidden]`
"""
return self.ln(x)
def load_state(self, p1: Dict[str, torch.Tensor], p2: Dict[str, torch.Tensor]):
"""
Code to load the checkpoint
"""
with monit.section('Load final normalization layer'):
checkpoint.merge_params_duplicate(self.ln.bias, 'norm.bias', p1, p2)
checkpoint.merge_params_duplicate(self.ln.weight, 'norm.weight', p1, p2)
class ReadoutLayer(NeoXModule):
"""
Readout layer
"""
def __init__(self, n_hidden: int = 6_144, n_vocab: int = 50_432):
"""
:param n_hidden: is the embedding size
:param n_vocab: is the size of the vocabulary
"""
super().__init__()
self.linear = nn.Linear(n_hidden, n_vocab, bias=False)
def forward(self, x: torch.Tensor):
"""
:param x: are the embeddings of shape `[batch_size, seq_len, n_hidden]`
"""
return self.linear(x)
def load_state(self, p1: Dict[str, torch.Tensor], p2: Dict[str, torch.Tensor]):
"""
Code to load the checkpoint
"""
with monit.section('Load final linear layer'):
checkpoint.merge_params_dim_0(self.linear.weight, 'final_linear.weight', p1, p2)
class LayerGenerator:
pre_created_layers: Dict[Any, Optional[NeoXModule]]
def __init__(self, *, n_vocab: int = 50_432, n_hidden: int = 6_144,
n_layers: int = 44, n_heads: int = 64,
filter_layers: Optional[Set] = None,
is_clone_layers: bool = True,
dtype: torch.dtype = torch.float,
device: torch.device = torch.device('cpu'),
is_llm_int8: bool = False,
llm_int8_threshold: float = 6.0,
is_flash_attention: bool = False
):
"""
### Generator to create layers
The layers are generated in the same order as checkpoints.
It gives `None` when a layer is not available; we use the layer indices as NeoX and there are two
transformation layers we don't need in our implementation.
:param n_vocab: is the number of tokens in the vocabulary
:param n_hidden: is the number of features in the embeddings
:param n_layers: is the number of transformer layers
:param n_heads: is the number of attention heads
:param filter_layers: are the set of layers to be used. All layers will be used if None.
This is used to test smaller versions of the model with fewer layers
:param is_clone_layers: specifies whether to clone the transformer layers (a bit faster)
:param dtype: is the data type of the model
:param device: is the device of the model
:param is_llm_int8: specifies whether to use int8 quantization
:param llm_int8_threshold: is the threshold $\alpha$ used to separate outlier features
:param is_flash_attention: specifies whether to use
[FlashAttention](https://github.com/HazyResearch/flash-attention)
"""
if filter_layers is None:
filter_layers = set(range(n_layers + 3))
self.n_vocab = n_vocab
self.n_hidden = n_hidden
self.n_layers = n_layers
self.n_heads = n_heads
self.filter_layers = filter_layers
self.is_clone_layers = is_clone_layers
self.dtype = dtype
self.device = device
self.is_llm_int8 = is_llm_int8
self.llm_int8_threshold = llm_int8_threshold
self.is_flash_attention = is_flash_attention
self.pre_created_layers = dict(
transformer_layer=None,
)
def _prepare_layer(self, layer: NeoXModule):
"""
#### Prepares the layer for usage
We move the layer to the device and convert it to the correct data type
:param layer: is the layer to prepare
:return: the prepared layer
"""
return layer.to(self.device, self.dtype)
@torch.no_grad()
def post_load_prepare(self, layer: NeoXModule, *,
is_llm_int8: bool = None,
device: torch.device = None,
llm_int8_threshold: float = None,
):
"""
<a id="post_load_prepare"></a>
### Layer transformations after loading the checkpoint
This function implements layer transformations after loading the checkpoint.
Currently, it only applies the int8 quantization.
:param layer: is the layer to prepare
:param is_llm_int8: specifies whether to use int8 quantization
:param device: is the device of the model
:param llm_int8_threshold: is the threshold $\alpha$ used to separate outlier features
:return: the prepared layer
"""
# Get default values if not specified
if is_llm_int8 is None:
is_llm_int8 = self.is_llm_int8
if device is None:
device = self.device
if llm_int8_threshold is None:
llm_int8_threshold = self.llm_int8_threshold
# Skip if not using int8 quantization
if not is_llm_int8:
return layer
# Only convert the linear layers in the transformer layers
if not isinstance(layer, TransformerLayer):
return layer
# Use `make_llm_int8_linear` defined in [utilities](./utils/llm_int8.html).
from labml_nn.neox.utils.llm_int8 import make_llm_int8_linear
# Convert the linear layers
with monit.section('Convert to int8'):
layer.attention.output = make_llm_int8_linear(layer.attention.output,
device=device,
threshold=llm_int8_threshold)
layer.attention.qkv_lin = make_llm_int8_linear(layer.attention.qkv_lin,
device=device,
threshold=llm_int8_threshold)
layer.ffn.dense_h_h4 = make_llm_int8_linear(layer.ffn.dense_h_h4,
device=device,
threshold=llm_int8_threshold)
layer.ffn.dense_h4_h = make_llm_int8_linear(layer.ffn.dense_h4_h,
device=device,
threshold=llm_int8_threshold)
#
return layer
def _create_and_cache_layer(self, name: str, creator: Callable[[], NeoXModule]):
"""
#### Creates and caches a layer
Copying cached layers is faster than initializing new layers because it takes time to
initialize parameters.
:param name: is the name of the layer
:param creator: is the function to create the layer
:return: the created layer or a copy of the cached layer
"""
if not self.is_clone_layers:
return self._prepare_layer(creator())
if self.pre_created_layers[name] is None:
self.pre_created_layers[name] = self._prepare_layer(creator())
layer = copy.deepcopy(self.pre_created_layers[name])
return layer
def _create_transformer_layer(self):
return self._create_and_cache_layer(
'transformer_layer',
lambda: TransformerLayer(self.n_hidden, self.n_heads, is_flash_attention=self.is_flash_attention)
)
def _create_embedding_layer(self):
return Embedding(self.n_vocab, self.n_hidden)
def _create_final_norm_layer(self):
return FinalNorm(self.n_hidden)
def _create_readout_layer(self):
return ReadoutLayer(self.n_hidden, self.n_vocab)
@torch.no_grad()
def get_layers(self) -> Generator[Tuple[NeoXModule, Tuple[str, str]], None, None]:
"""
### Generator to get layers
"""
# Embedding layer
if 0 in self.filter_layers:
with monit.section('Embedding layer'):
layer = self._prepare_layer(self._create_embedding_layer())
yield layer, ('layer_00-model_00-model_states.pt', 'layer_00-model_01-model_states.pt')
# Transformer layers
for i in range(self.n_layers):
# Transformer layer
if i + 1 in self.filter_layers:
with monit.section(f'Transformer Layer {i}'):
yield self._create_transformer_layer(), \
(f'layer_{i + 2 :02d}-model_00-model_states.pt',
f'layer_{i + 2 :02d}-model_01-model_states.pt')
# Final normalization layer
if self.n_layers + 1 in self.filter_layers:
with monit.section('Final norm layer'):
layer = self._prepare_layer(self._create_final_norm_layer())
yield layer, ('layer_47-model_00-model_states.pt', 'layer_47-model_01-model_states.pt')
# Readout layer
if self.n_layers + 2 in self.filter_layers:
with monit.section('Readout layer'):
layer = self._prepare_layer(self._create_readout_layer())
yield layer, ('layer_48-model_00-model_states.pt', 'layer_48-model_01-model_states.pt')
for k in self.pre_created_layers.keys():
self.pre_created_layers[k] = None
@property
def total_layers(self):
"""
### Returns the total number of layers
"""
return self.n_layers + 3
@torch.no_grad()
def load(self) -> Generator[NeoXModule, None, None]:
"""
### Generator to load layers
"""
with monit.section("Layers"):
for i, (layer, files) in enumerate(self.get_layers()):
if files is not None:
layer.load_state(*checkpoint.load_checkpoint_files(files))
layer = self.post_load_prepare(layer)
monit.progress(min(0.99, (i + 1) / self.total_layers))
yield layer
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/checkpoint.py | labml_nn/neox/checkpoint.py | """
---
title: GPT-NeoX Checkpoints
summary: >
Code to download checkpoints and helpers to load them.
---
# GPT-NeoX Checkpoints
"""
from pathlib import Path
from typing import Dict, Union, Tuple, Optional
import torch
from torch import nn
from labml import monit, lab, logger
from labml.logger import Text, inspect
from labml.utils.download import download_file
# Parent url
CHECKPOINTS_URL = 'https://mystic.the-eye.eu/public/AI/models/GPT-NeoX-20B/slim_weights/'
_CHECKPOINTS_DOWNLOAD_PATH: Optional[Path] = None
# Download path
def get_checkpoints_download_path():
global _CHECKPOINTS_DOWNLOAD_PATH
if _CHECKPOINTS_DOWNLOAD_PATH is not None:
return _CHECKPOINTS_DOWNLOAD_PATH
_CHECKPOINTS_DOWNLOAD_PATH = lab.get_data_path() / 'neox_fast' / 'slim_weights'
if not _CHECKPOINTS_DOWNLOAD_PATH.exists():
_CHECKPOINTS_DOWNLOAD_PATH = lab.get_data_path() / 'neox' / 'slim_weights'
inspect(neox_checkpoint_path=_CHECKPOINTS_DOWNLOAD_PATH)
return _CHECKPOINTS_DOWNLOAD_PATH
def get_files_to_download(n_layers: int = 44):
"""
### Get files to download
:return: a list of files to be downloaded
"""
layers = (
# Embedding layer
[0] +
# Transformer layers
list(range(2, 2 + n_layers)) +
# Final normalization layer and readout layer
[47, 48]
)
return (
# Vocabulary and configs
['20B_tokenizer.json', 'configs/20B.yml', 'latest'] +
# Layer checkpoints
[f'global_step150000/layer_{i :02d}-model_{p :02d}-model_states.pt' for i in layers for p in range(2)] +
# Empty states (not used)
[f'global_step150000/mp_rank_{i :02d}_model_states.pt' for i in range(8)]
)
def download(n_layers: int = 44):
"""
## Download all checkpoint files
"""
# Get files to download
files = get_files_to_download(n_layers)
# Iterate
for i, f in monit.enum('Download All', files):
# Log
logger.log(['Downloading ', (f'{i + 1 :3d}/{len(files)}', Text.meta), ': ', (f, Text.value)])
# Download
download_file(CHECKPOINTS_URL + f, get_checkpoints_download_path() / f)
def load_checkpoint_files(files: Tuple[str, str]):
"""
### Load a pair of checkpoint files
:param files: pair of files to load
:return: the loaded parameter tensors
"""
checkpoint_path = get_checkpoints_download_path() / 'global_step150000'
with monit.section('Load checkpoint'):
data = [torch.load(checkpoint_path / f) for f in files]
return data
def merge_params_dim_0(param: Union[nn.Parameter, torch.Tensor], key: str, p1: Dict[str, torch.Tensor],
p2: Dict[str, torch.Tensor]):
"""
### Load a parameter by merging the partitions along first dimension
:param param: is the parameter
:param key: is the name of the parameter
:param p1: first partition dictionary
:param p2: second partition dictionary
"""
w1, w2 = p1[key], p2[key]
param.data[:w1.shape[0]] = w1
param.data[w1.shape[0]:] = w2
def merge_params_dim_1(param: Union[nn.Parameter, torch.Tensor], key: str, p1: Dict[str, torch.Tensor],
p2: Dict[str, torch.Tensor]):
"""
### Load a parameter by merging the partitions along second dimension
:param param: is the parameter
:param key: is the name of the parameter
:param p1: first partition dictionary
:param p2: second partition dictionary
"""
w1, w2 = p1[key], p2[key]
param.data[:, :w1.shape[1]] = w1
param.data[:, w1.shape[1]:] = w2
def merge_params_duplicate(param: Union[nn.Parameter, torch.Tensor], key: str, p1: Dict[str, torch.Tensor],
p2: Dict[str, torch.Tensor]):
"""
### Load an un-partitioned parameter
This does a sanity check to make use both partitions are the same
:param param: is the parameter
:param key: is the name of the parameter
:param p1: first partition dictionary
:param p2: second partition dictionary
"""
w1, w2 = p1[key], p2[key]
diff = sum((w1 - w2) ** 2).item()
assert diff < 1e-4, f'The partitions do not match: {key}'
param.data[:] = (w1 + w2) / 2.
def merge_params_sum(param: Union[nn.Parameter, torch.Tensor], key: str, p1: Dict[str, torch.Tensor],
p2: Dict[str, torch.Tensor]):
"""
### Load biases that are partitioned which gets added on reduce
:param param: is the parameter
:param key: is the name of the parameter
:param p1: first partition dictionary
:param p2: second partition dictionary
"""
w1, w2 = p1[key], p2[key]
param.data[:] = w1 + w2
#
if __name__ == '__main__':
download()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/__init__.py | labml_nn/neox/__init__.py | """
---
title: GPT-NeoX
summary: >
Simple GPT-NeoX implementation
---
# GPT-NeoX
This is a simple implementation of [Eleuther GPT-NeoX](https://arxiv.org/abs/2204.06745) for inference and fine-tuning.
* [Model definition](model.html)
* [Tokenizer](tokenizer.html)
* [Checkpoint downloading and loading helpers](checkpoint.html)
* [Utilities](utils/index.html)
* [LLM.int8() quantization](utils/llm_int8.html)
### [Samples](samples/__init__.py)
* [Generating text](samples/generate.html)
* [Fine-tuning the biases with pipeline-parallel](samples/finetune.html)
* [Generating text with LLM.int8()](samples/llm_int8.html)
### [Evaluation](evaluation/__init__.py)
* [Evaluating half precision model on a single GPU](evaluation/half_precision.html)
* [Evaluating LLM.int8() model](evaluation/llm_int8.html)
**Official [Eleuther](https://www.eleuther.ai)
GPT-NoeX is source code is available at [eleutherai/gpt-neox](https://github.com/eleutherai/gpt-neox).**
"""
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/tokenizer.py | labml_nn/neox/tokenizer.py | """
---
title: GPT-NeoX Tokenizer
summary: >
Loads the GPT-NeoX tokenizer
---
# GPT-NeoX Tokenizer
This initializes a Hugging Face tokenizer from the downloaded vocabulary.
"""
from tokenizers import Tokenizer
from labml import lab, monit
@monit.func('Load NeoX Tokenizer')
def get_tokenizer() -> Tokenizer:
"""
### Load NeoX Tokenizer
:return: the tokenizer
"""
vocab_file = lab.get_data_path() / 'neox' / 'slim_weights' / '20B_tokenizer.json'
tokenizer = Tokenizer.from_file(str(vocab_file))
return tokenizer
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/samples/finetune.py | labml_nn/neox/samples/finetune.py | """
---
title: Fine Tune GPT-NeoX
summary: >
Fine tune GPT-NeoX biases with Fairscale pipeline parallel module
---
# Fine Tune GPT-NeoX
This shows how to fine tune GPT-NeoX with pipeline parallelism.
"""
import fairscale
import torch
import torch.nn as nn
import torch.utils.data
import torch.utils.data
import typing
from torch.utils.data import DataLoader, RandomSampler
from labml import experiment, monit, tracker, lab
from labml.configs import option
from labml.logger import inspect
from labml_nn.neox.utils.text_dataset import get_training_data
from labml_nn.neox.utils.finetune import FineTuneBiases
from labml_nn.neox.model import LayerGenerator, NeoXModule
from labml_nn.neox.utils import balance_layers_simple
from labml_nn.neox.utils.trainer import PipelineParallelTrainerConf
@option(PipelineParallelTrainerConf.layers, 'PipelineBiases')
def neox_layers(c: PipelineParallelTrainerConf):
"""
### Load GPT-NeoX layers
"""
return list(LayerGenerator(is_clone_layers=c.is_clone_layers,
filter_layers=c.filter_layers,
dtype=c.dtype,
).load())
@option(PipelineParallelTrainerConf.fine_tuner, 'PipelineBiases')
def fine_tune_biases(c: PipelineParallelTrainerConf):
"""
### Create fine tuner for biases
"""
fine_tuner = FineTuneBiases(typing.cast(typing.List[NeoXModule], c.layers))
# Mark biases as trainable
fine_tuner.set_trainable_params()
#
return fine_tuner
@option(PipelineParallelTrainerConf.model, 'PipelineBiases')
def pipe_model(c: PipelineParallelTrainerConf):
"""
### Create pipeline parallel model
"""
if c.is_checkpointing:
raise NotImplementedError()
else:
layers = c.layers
# Make sure the finetuner is initialized
_ = c.fine_tuner
# Create the Pipe module
with monit.section('Pipe'):
# Get the layer distribution across GPUs
balance = balance_layers_simple(len(layers), c.n_gpus)
inspect(balance=balance)
# Devices for each GPU
devices = [torch.device(f'cuda:{i}') for i in range(c.n_gpus)]
# Create Fairscale Pipe module
pipe_model = fairscale.nn.Pipe(nn.Sequential(*layers),
balance=balance,
devices=devices,
chunks=c.chunks)
#
return pipe_model
@option(PipelineParallelTrainerConf.train_loader)
def tiny_shakespeare(c: PipelineParallelTrainerConf):
"""
#### Tiny Shakespeare dataset
"""
dataset = get_training_data(c.max_seq_len)
return DataLoader(dataset,
batch_size=c.batch_size,
sampler=RandomSampler(dataset, replacement=True))
def main():
# Create experiment
experiment.create(name='pipe_neox_biases',
writers={'screen', 'web_api'})
# Initialize configs
conf = PipelineParallelTrainerConf()
experiment.configs(conf, {
'learning_rate': 3e-4,
'is_checkpointing': False,
'max_seq_len': 128,
'batch_size': 64,
'chunks': 8,
})
# Start the experiment
with experiment.start():
# Initialize the model. Do this before the loop for cleaner logs.
_ = conf.model
# Train
for epoch in monit.loop(conf.epochs):
conf.train_epoch()
tracker.new_line()
torch.save(conf.fine_tuner.state_dict(), str(lab.get_data_path() / 'fine_tune.pt'))
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/samples/generate.py | labml_nn/neox/samples/generate.py | """
---
title: Generate Text with GPT-NeoX
summary: >
Generate Text with GPT-NeoX
---
# Generate Text with GPT-NeoX
This shows how to generate text from GPT-NeoX with a single GPU.
This needs a GPU with more than 45GB memory.
"""
# Imports
from typing import List
import torch
from torch import nn
from labml import monit
from labml_nn.neox.model import LayerGenerator
from labml_nn.neox.utils import get_tokens, print_tokens
from labml_nn.neox.utils.cache import get_cache
# List of layers to load. This is used for testing.
# You can assign a subset of layers like `{0, 1}` so that it only loads
# the first to transformer layers.
LAYERS = None
# Prompt to complete
PROMPT = 'Einstein was born in the German Empire, but moved to Switzerland in 1895, forsaking his German'
def infer(model: nn.Module, ids: List[int], device: torch.device):
"""
### Predict the next token
:param model: is the model
:param ids: are the input token ids
:param device: is the device of the model
"""
with torch.no_grad():
# Get the tokens
x = torch.tensor(ids)[None, :].to(device)
# Eval model
x = model(x)
# Return predicted token
return x[0].max(dim=-1)[1].tolist()
def generate():
"""
## Generate text
"""
# Setup [cache](../utils/cache.html) to cache intermediate key/value pairs for faster generation
cache = get_cache()
cache.set('use_cache', True)
# Device
device = torch.device('cuda:0')
# Load layers
layers = list(LayerGenerator(is_clone_layers=True,
filter_layers=LAYERS,
dtype=torch.float16,
device=device,
).load())
model = nn.Sequential(*layers)
# Get token ids
ids = get_tokens(PROMPT)
# Run the model
cache.set('state_ids', (None, 1))
with monit.section('Infer'):
next_token = infer(model, ids, device)[-1]
# Append the predicted token
ids += [next_token]
# Predict 100 tokens
for i in range(1, 100):
# Set the state to use cached activations
cache.set('state_ids', (i, i + 1))
# Get next token. Note that we only feed the last token to the model because
# we cache the key/value pairs of previous tokens.
with monit.section('Infer'):
next_token = infer(model, [next_token], device)[-1]
# Append the predicted token
ids += [next_token]
# Print
print_tokens(ids, [ids])
#
if __name__ == '__main__':
generate()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/samples/__init__.py | labml_nn/neox/samples/__init__.py | """
---
title: Samples
summary: >
Samples for inference and fine-tuning
---
# Samples
* [Generating text](generate.html)
* [Fine tuning the biases with pipeline-parallel training](finetune.html)
""" | python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/samples/llm_int8.py | labml_nn/neox/samples/llm_int8.py | """
---
title: Generate Text with GPT-NeoX using LLM.int8() quantization
summary: >
Generate Text with GPT-NeoX using LLM.int8() quantization
---
# Generate Text with GPT-NeoX using LLM.int8() quantization
This shows how to generate text from GPT-NeoX using [LLM.int8() quantization](../utils/llm_int8.html).
This needs a GPU with 24GB memory.
"""
import torch
from torch import nn
from labml import monit
from labml_nn.neox.model import LayerGenerator
from labml_nn.neox.samples.generate import PROMPT, infer
from labml_nn.neox.utils import get_tokens, print_tokens
from labml_nn.neox.utils.cache import get_cache
def generate():
"""
## Generate text
"""
# Setup [cache](../utils/cache.html) to cache intermediate key/value pairs for faster generation
cache = get_cache()
cache.set('use_cache', True)
# Device
device = torch.device('cuda:0')
# Load layers in float16 into CPU. We convert the layers to int8 later, because doing that
# on the fly after loading layers to GPU causes CUDA memory fragmentation
# (about 3GB memory can get lost due to fragmentation).
layer_generator = LayerGenerator(is_clone_layers=True,
dtype=torch.float16,
device=torch.device('cpu'),
is_llm_int8=False,
)
layers = list(layer_generator.load())
# This reduces CUDA memory fragmentation
for layer in monit.iterate('Convert to int8', layers, is_children_silent=True):
layer_generator.post_load_prepare(layer,
device=device,
is_llm_int8=True,
llm_int8_threshold=6.0,
)
layer.to(device)
# Create `nn.Sequential` model
model = nn.Sequential(*layers)
# Clear cache and print memory summary for debugging
torch.cuda.empty_cache()
print(torch.cuda.memory_summary())
# Get token ids
ids = get_tokens(PROMPT)
# Run the model.
# We use the [`infer`](generate.html) function defined in [`generate.py`](generate.html)
cache.set('state_ids', (None, 1))
with monit.section('Infer'):
next_token = infer(model, ids, device)[-1]
# Append the predicted token
ids += [next_token]
# Predict 100 tokens
for i in range(1, 100):
# Set the state to use cached activations
cache.set('state_ids', (i, i + 1))
# Get next token. Note that we only feed the last token to the model because
# we cache the key/value pairs of previous tokens.
with monit.section('Infer'):
next_token = infer(model, [next_token], device)[-1]
# Append the predicted token
ids += [next_token]
# Print
print_tokens(ids, [ids])
#
if __name__ == '__main__':
generate()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/utils/text_dataset.py | labml_nn/neox/utils/text_dataset.py | """
---
title: Text Dataset for GPT-NeoX
summary: >
Loads text datasets to fine-tune GPT-NeoX
---
# Text Dataset for GPT-NeoX
"""
from pathlib import PurePath, Path
from typing import Optional, List
import torch
import torch.utils.data
from labml import lab
from labml import monit
from labml.logger import inspect
from labml.utils.download import download_file
from labml_nn.neox.tokenizer import get_tokenizer
def load_text(path: PurePath, url: Optional[str] = None, *, filter_subset: Optional[int] = None):
"""
### Load text file
:param path: is the location of the text file
:param url: is the URL to download the file from
:param filter_subset: is the number of characters to filter.
Use this during testing when trying large datasets
:return: the text content
"""
path = Path(path)
# Download if it doesn't exist
if not path.exists():
if not url:
raise FileNotFoundError(str(path))
else:
download_file(url, path)
with monit.section("Load data"):
# Load data
with open(str(path), 'r') as f:
text = f.read()
# Filter
if filter_subset:
text = text[:filter_subset]
#
return text
class NeoXDataset(torch.utils.data.Dataset):
"""
## Dataset for fine-tuning GPT-NeoX
This is not optimized to very large datasets.
"""
def __init__(self, tokens: List[int], seq_len: int):
"""
:param tokens: is the list of token ids
:param seq_len: is the sequence length of a single training sample
"""
self.seq_len = seq_len
# Number of samples
n_samples = len(tokens) // seq_len
self.n_samples = n_samples
# Truncate
tokens = tokens[:n_samples * seq_len + 1]
# Create a PyTorch tensor
self.tokens = torch.tensor(tokens)
def __len__(self):
return self.n_samples
def __getitem__(self, idx: int):
"""
### Get a sample
:param idx: is the index of the sample
:return: the input and the target
"""
offset = idx * self.seq_len
return self.tokens[offset:offset + self.seq_len], self.tokens[offset + 1:offset + 1 + self.seq_len]
DATASETS = {
'tiny_shakespeare': {
'file': 'tiny_shakespeare.txt',
'url': 'https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt'
}
}
def get_training_data(seq_len: int = 32, dataset_name: str = 'tiny_shakespeare', truncate: int = -1):
"""
### Load Dataset
:param seq_len: is the sequence length of a single training sample
:param dataset_name: is the name of the dataset
:return: the dataset
"""
ds = DATASETS[dataset_name]
# Load the content
text = load_text(lab.get_data_path() / ds['file'], ds['url'])
# Tokenize
tokenizer = get_tokenizer()
tokens = tokenizer.encode_batch([text])[0]
if truncate > 0:
token_ids = tokens.ids[:truncate * seq_len]
else:
token_ids = tokens.ids
#
return NeoXDataset(token_ids, seq_len)
def _test():
dataset = get_training_data()
inspect(tokens=len(dataset.tokens))
#
if __name__ == '__main__':
_test()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/utils/finetune.py | labml_nn/neox/utils/finetune.py | from typing import List, Dict
import torch
from torch import nn
from labml_nn.neox.model import TransformerLayer, NeoXModule
class FineTuner:
def __init__(self, layers: List[NeoXModule]):
self.layers = layers
def get_trainable_params(self) -> Dict[str, nn.Parameter]:
params = {}
for i, layer in enumerate(self.layers):
params.update(self.get_layer_trainable_params(layer, prefix=f'layer_{i :02d}'))
return params
def get_layer_trainable_params(self, layer: NeoXModule, prefix: str) -> Dict[str, nn.Parameter]:
raise NotImplementedError
def set_trainable_params(self):
for layer in self.layers:
# Set `requires_grad` to `False` for the entire layer.
layer.requires_grad_(False)
#
for p in self.get_trainable_params().values():
p.requires_grad_(True)
def state_dict(self):
return {n: p.data.cpu() for n, p in self.get_trainable_params().items()}
def load_state_dict(self, state_dict: Dict[str, torch.Tensor]):
params = self.get_trainable_params()
for n, p in params.items():
p.data[:] = state_dict[n].to(p.data.device)
for n in state_dict.keys():
assert n in params, n
class FineTuneBiases(FineTuner):
def get_layer_trainable_params(self, layer: NeoXModule, prefix: str) -> Dict[str, nn.Parameter]:
params = {}
if isinstance(layer, TransformerLayer):
# No need to train the mlp bias because we are adding it with attention output
params[f'{prefix}.attention.output.bias'] = layer.attention.output.bias
params[f'{prefix}.attention.qkv_lin.bias'] = layer.attention.qkv_lin.bias
params[f'{prefix}.ffn.dense_h_h4.bias'] = layer.ffn.dense_h_h4.bias
else:
pass
return params
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/utils/trainer.py | labml_nn/neox/utils/trainer.py | from typing import Optional, Set, List
import torch.nn as nn
import torch.optim
import torch.utils.data
from torch.cuda import amp
from torch.cuda.amp import GradScaler
from labml import monit, tracker
from labml.configs import BaseConfigs, option
from labml_nn.neox.utils.finetune import FineTuner
def get_trainable_params(model: nn.Module):
"""
### Get trainable parameters
:param model: is the model to train
:return: a list of parameters for training
"""
# Get all parameters
params = list(model.parameters())
# Filter parameters that require gradients
trainable_params = [p for p in params if p.requires_grad]
#
return trainable_params
class TrainerConf(BaseConfigs):
model: nn.Module
layers: List[nn.Module]
optimizer: torch.optim.Optimizer = 'Adam'
train_loader: torch.utils.data.DataLoader
valid_loader: Optional[torch.utils.data.DataLoader] = None,
device: torch.device = torch.device('cuda:0')
scaler: Optional[GradScaler] = 'Default'
is_amp: bool = True
dtype: torch.dtype = torch.float16
is_clone_layers: bool = True
loss_func: nn.Module = nn.CrossEntropyLoss()
checkpoints_per_epoch: int = 0
samples_per_epoch: int = 0
grad_norm: Optional[float] = 1.0
learning_rate: float = 3e-4
max_seq_len: int = 1024
batch_size: int = 64
epochs: int = 16
n_gpus: int = torch.cuda.device_count()
filter_layers: Optional[Set] = None
def get_loss(self, sample, dataset_split: str):
"""
:param dataset_split: train/valid
:param sample: is the sample
:return: the loss, output and the target
"""
data, target = sample
# Forward pass
with monit.section('Forward pass'):
output = self.model(data.to(self.device))
# Move targets to the same device as output
target = target.to(output.device)
# Calculate loss
loss = self.loss_func(output.view(target.numel(), -1), target.view(-1))
return loss, output, target
def train(self):
for epoch in monit.loop(self.epochs):
self.train_epoch()
tracker.new_line()
def sample(self, idx):
pass
def save_checkpoint(self, idx):
pass
def get_iterators(self):
# Iterate through the batches
iterators = [('train', self.train_loader)]
if self.valid_loader is not None:
iterators.append(('valid', self.valid_loader))
if self.samples_per_epoch > 0:
iterators.append((self.sample, [i for i in range(self.samples_per_epoch)]))
if self.checkpoints_per_epoch > 0:
iterators.append((self.save_checkpoint, [i for i in range(self.checkpoints_per_epoch)]))
return iterators
def train_epoch(self):
# Set model for train
self.model.train()
iterators = self.get_iterators()
for split_name, sample in monit.mix(1024, *iterators):
if split_name == 'train':
# Set gradients to zero
self.optimizer.zero_grad()
tracker.add_global_step()
with torch.set_grad_enabled(split_name == 'train'):
if self.is_amp:
# Forward pass
with amp.autocast():
loss, output, target = self.get_loss(sample, split_name)
else:
loss, output, target = self.get_loss(sample, split_name)
# Get predictions
pred = output.argmax(dim=-1)
# Calculate accuracy
accuracy = pred.eq(target).sum().item() / (target != -100).sum()
tracker.add({f'loss.{split_name}': loss, f'acc.{split_name}': accuracy * 100})
if split_name == 'train':
if self.scaler is not None:
# Backward pass
loss = self.scaler.scale(loss)
# tracker.add({'loss.scaled': loss})
with monit.section('Backward pass'):
loss.backward()
# Optimize
with monit.section('Optimize'):
if self.scaler is None:
self.optimizer.step()
else:
self.scaler.unscale_(self.optimizer)
if self.grad_norm is not None:
torch.nn.utils.clip_grad_norm_(get_trainable_params(self.model), self.grad_norm)
self.scaler.step(self.optimizer)
self.scaler.update()
tracker.save()
@option(TrainerConf.optimizer, 'Adam')
def adam_optimizer(c: TrainerConf):
if c.dtype == torch.float32:
return torch.optim.Adam(get_trainable_params(c.model), lr=c.learning_rate)
elif c.dtype == torch.float16:
from labml_nn.optimizers.adam_fp16 import AdamFP16
return AdamFP16(get_trainable_params(c.model), lr=c.learning_rate)
else:
raise NotImplementedError()
@option(TrainerConf.optimizer, 'SGD')
def sgd_optimizer(c: TrainerConf):
return torch.optim.SGD(get_trainable_params(c.model), lr=c.learning_rate)
@option(TrainerConf.scaler, 'Default')
def grad_scaler(c: TrainerConf):
if not c.is_amp:
return None
if c.dtype == torch.float16:
from labml_nn.optimizers.adam_fp16 import GradScalerFP16
return GradScalerFP16()
else:
return GradScaler()
class PipelineParallelTrainerConf(TrainerConf):
is_checkpointing: bool = False
chunks: int
fine_tuner: FineTuner
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/utils/__init__.py | labml_nn/neox/utils/__init__.py | """
---
title: Utilities and Helpers
summary: >
Utilities and helper functions
---
# Utilities and Helpers
* [Cache for intermediate activations (for faster inference)](cache.html)
* [Tools for finetuning](finetune.html)
* [Trainer](trainer.html)
* [Text dataset](text_dataset.html)
"""
import typing
from typing import List, Optional
import torch
from labml import logger
from labml.logger import Text
from labml_nn.neox.tokenizer import get_tokenizer
if typing.TYPE_CHECKING:
from tokenizers import Tokenizer
# Tokenizer singleton
_TOKENIZER: Optional['Tokenizer'] = None
def get_tokens(text: str) -> List[int]:
"""
### Get token ids
:param text: is the text to tokenize
:return: the token ids
"""
global _TOKENIZER
if _TOKENIZER is None:
_TOKENIZER = get_tokenizer()
return _TOKENIZER.encode_batch([text])[0].ids
def print_token_outputs(ids: List[int], *xs: torch.Tensor):
"""
### Print tokens from model outputs
Pretty prints target tokens along side outputs from the model(s).
:param ids: are the target token ids
:param xs: are the model(s) outputs
"""
ids = ids + [-1]
xs = [[-1] + x[0].max(dim=-1)[1].tolist() for x in xs]
print_tokens(ids, xs)
def print_tokens(target: List[int], others: List[List[int]]):
"""
### Print tokens
Pretty prints tokens for comparison
:param target: are the target token ids
:param others: are the sampled outputs from the model(s)
"""
# Load tokenizer
global _TOKENIZER
if _TOKENIZER is None:
_TOKENIZER = get_tokenizer()
# Convert the tokens to list of strings
text = []
for i in range(len(target)):
tokens = [_TOKENIZER.decode([target[i]]) if target[i] != -1 else '---']
for j in range(len(others)):
tokens.append(_TOKENIZER.decode([others[j][i]]) if others[j][i] != -1 else '---')
text.append(tokens)
# Stats
correct = [0 for _ in others]
total = 0
# Iterate through tokens
for i in range(len(target)):
parts = [(f'{i}: ', Text.meta)]
parts += [('"', Text.subtle), (text[i][0], Text.subtle), ('"', Text.subtle), '\t']
# Empty target
if target[i] == -1:
for j in range(len(others)):
parts += [('"', Text.subtle), (text[i][j + 1], Text.subtle), ('"', Text.subtle), '\t']
logger.log(parts)
continue
# Number of tokens
total += 1
# Other outputs
for j in range(len(others)):
correct[j] += 1 if others[j][i] == target[i] else 0
parts += [('"', Text.subtle),
(text[i][j + 1], Text.success if others[j][i] == target[i] else Text.danger),
('"', Text.subtle), '\t']
logger.log(parts)
# Stats
parts = [(f'{total}', Text.highlight), '\t']
for j in range(len(others)):
parts += [(f'{correct[j]}', Text.value), '\t']
logger.log(parts)
def balance_layers_simple(n_layers: int, n_chunks: int):
"""
### Balance layers
Split the `n_layers` into `n_chunks`. This is used for pipeline parallel training.
:param n_layers: is the number of layers
:param n_chunks: is the number of chunks
:return: returns a list with the number of layers for each chunk
"""
balance = []
for i in range(n_chunks):
balance.append((n_layers - sum(balance)) // (n_chunks - i))
return list(reversed(balance))
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/utils/cache.py | labml_nn/neox/utils/cache.py | """
---
title: Cache for Intermediate Activations
summary: >
Cache for intermediate activations for faster inference.
---
# Cache for Intermediate Activations
During inference the model outputs token by token.
We use this simple cache to store key's and value's attention layers,
so that we don't have to recompute them for previous tokens.
"""
from typing import Any
class Cache:
"""
## Cache
This maintains a key-value cache and queues push values and pop them in the same order.
The queues are useful since we have multiple attention layers.
"""
def __init__(self):
self._cache = {}
def clear_all(self):
"""
### Clear cache
"""
self._cache = {}
def push(self, name: str, value: Any):
"""
### Push a value to a queue
:param name: is the name of the queue
:param value: is the value to be pushed
"""
# Create an empty queue if it's not present
if name not in self._cache:
self._cache[name] = []
# Push to the queue
self._cache[name].append(value)
def q_size(self, name):
"""
### Return the size of the queue
:param name: is the name of the queue
:return: size of the queue if exists else None
"""
if name not in self._cache:
return None
if type(self._cache[name]) != list:
return None
return len(self._cache[name])
def pop(self, name: str):
"""
### Pop from a queue
:param name: is the name of the queue
:return: the value
"""
return self._cache[name].pop(0)
def set(self, key: str, value: Any):
"""
### Cache a value
:param key: is the name of the value to be cached
:param value: is the value
"""
self._cache[key] = value
def get(self, key: str, default: Any = None):
"""
### Retrieve a value from cache
:param key: is the name used when caching
:param default: is the default value if the cache is empty
:return: the cached value
"""
return self._cache.get(key, default)
def clear(self, key: str):
"""
### Clear a cache value
:param key: is the name used when caching
"""
del self._cache[key]
# Singleton for cache
_INSTANCE = None
def get_cache() -> Cache:
"""
### Get the cache instance
:return: the cache instance
"""
global _INSTANCE
if _INSTANCE is None:
_INSTANCE = Cache()
return _INSTANCE
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/utils/llm_int8.py | labml_nn/neox/utils/llm_int8.py | """
---
title: LLM.int8() on GPT-NeoX
summary: >
Transform nn.Linear layers to 8-bit integer layers.
---
# LLM.int() on GPT-NeoX
This implements a utility function to transform a `nn.Linear` layer to LLM.int8() linear layer.
[LLM.int8() paper](https://arxiv.org/abs/eb2bcaee1d0011edaa66a71c10a887e7)
shows you can use int8 quantization while handling outliers to
reduce memory footprint without performance degradation in large language models.
They convert weights and inputs to scaled 8-bit integers and does matrix multiplication
producing int32 results which is then converted back to float16 and rescaled.
They show that in large langauge models, some features can give extreme values (outliers)
that dominate the model's output.
These features get clamped in 8-bit integer space which causes the model performance to degrade.
As a solution they pick these outliers (greater than a specified threshold)
and compute their multiplications separately in float16 space.
Since the percentage of outliers is around 0.01% this doesn't increase memory usage,
and prevents the model from degrading performance.
The code to transform GPT-NoeX layers is defined in [model.py](../model.html#post_load_prepare).
Here are example uses of GPT-NeoX with int8 quantization.
* [Generate Text](../samples/llm_int8.html)
* [Run Evaluation Tests](../evaluation/llm_int8.html)
"""
# Import [`bitsandbytes`](https://github.com/timdettmers/bitsandbytes) package
try:
from bitsandbytes.nn import Linear8bitLt, Int8Params
except ImportError:
raise ImportError('''Please install `bitsandbytes` with `pip install bitsandbytes -U`''')
import torch
from torch import nn
def make_llm_int8_linear(linear_module: nn.Linear, device: torch.device, threshold: float = 6.0):
"""
## Transform a `nn.Linear` layer to LLM.int8() linear layer
:param linear_module: is the `nn.Linear` layer to transform
:param device: is the device of the model
:param threshold: is the threshold $\alpha$ to use for outlier detection
"""
#
assert isinstance(linear_module, nn.Linear)
# Create an empty Linear8bitLt module
int8_lin = Linear8bitLt(
linear_module.in_features,
linear_module.out_features,
linear_module.bias is not None,
has_fp16_weights=False,
threshold=threshold,
)
# Quantize the weights
int8_lin._parameters['weight'] = Int8Params(linear_module.weight.data.cpu(),
requires_grad=False,
has_fp16_weights=False).to(device)
# Set the bias in float16 space
if linear_module.bias is not None:
int8_lin._parameters['bias'] = nn.Parameter(linear_module.bias.data,
requires_grad=False)
#
return int8_lin
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/evaluation/half_precision.py | labml_nn/neox/evaluation/half_precision.py | """
---
title: Evaluate GPT-NeoX using LLM.int8() quantization on test suite
summary: >
Evaluate GPT-NeoX using LLM.int8() quantization on test suite
---
# Evaluate GPT-NeoX using LLM.int8() quantization on test suite
This code evaluate [GPT-NeoX](../index.html) using, on a suite of tasks.
"""
import argparse
import torch
from torch import nn
from labml_nn.neox.evaluation import run_eval_harness
from labml_nn.neox.model import LayerGenerator
def main():
# Argument parser
parser = argparse.ArgumentParser()
parser.add_argument("--flash", action='store_true', help="whether to use Flash Attention")
opt = parser.parse_args()
# Device
device = torch.device('cuda:0')
# Load layers
layers = list(LayerGenerator(is_clone_layers=True,
filter_layers=None,
dtype=torch.float16,
device=device,
is_flash_attention=opt.flash,
).load())
# Create `nn.Sequential` model
model = nn.Sequential(*layers)
# Run [evaluation harness](index.html)
print(run_eval_harness(model, 'half_precision', ['lambada'], device))
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/evaluation/__init__.py | labml_nn/neox/evaluation/__init__.py | """
---
title: Evaluation
summary: >
Code to evaluate the model on NLP tasks through lm-evaluation-harness
---
# Evaluation
This is the code to test the model on
[EleutherAI/lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness).
* [Evaluating half precision model on a single GPU](half_precision.html)
"""
import math
from typing import List
import torch
import torch.nn.functional as F
from lm_eval import tasks, evaluator, utils
from lm_eval.base import BaseLM
from tokenizers import Tokenizer
from torch import nn
from tqdm import tqdm
from labml import monit
from labml_nn.neox.tokenizer import get_tokenizer
class EvalHarnessAdapter(BaseLM):
"""
## Evaluation Harness Adapter
This is based on the [adapter from EleutherAI/gpt-neox](https://github.com/EleutherAI/gpt-neox/blob/main/eval_tasks/eval_adapter.py)
"""
def __init__(self, tokenizer: Tokenizer, vocab_size: int, batch_size: int):
"""
:param tokenizer: is the [Huggingface Tokenizer](huggingface/tokenizers)
:param vocab_size: is the size of the vocabulary
(this differs from the tokenizer vocab size since neox adds some extra to make the embedding layer
model parallel.)
:param batch_size: is the batch size
"""
super().__init__()
self.tokenizer = tokenizer
self._eot_token_id = self.tokenizer.token_to_id("<|endoftext|>")
self._vocab_size = vocab_size
self._batch_size = batch_size
@property
def device(self):
raise RuntimeError()
@property
def vocab_size(self):
"""Size of the vocabulary"""
return self._vocab_size
@property
def eot_token_id(self):
"""End-of-text token"""
return self._eot_token_id
@property
def max_length(self):
"""Maximum sequence length"""
return 2048
@property
def max_gen_toks(self):
"""Maximum number of tokens to generate"""
return 128
@property
def batch_size(self):
"""
Batch size
"""
return self._batch_size
def tok_encode(self, string: str):
"""
Encode a given text
"""
return self.tokenizer.encode(string).ids
def tok_decode(self, tokens: List[int]):
"""
Decode text from token ids
"""
return self.tokenizer.decode(tokens)
def _model_call(self, inps: torch.Tensor):
raise NotImplementedError
def _model_generate(self, context, max_length, eos_token_id):
raise RuntimeError()
def greedy_until(self, requests):
raise RuntimeError()
@torch.no_grad()
def _loglikelihood_tokens(self, requests, disable_tqdm=False):
"""
### Get log-likelihoods of the next tokens
:param requests: List of requests containing the context and the expected continuation.
:param disable_tqdm: If True, disable tqdm progress bar.
"""
# For results
res = []
# Reorder the requests in the descending order of the lengths,
# so that sequences with similar lengths are close
def _collate(x):
toks = x[1] + x[2]
return -len(toks), tuple(toks)
reord = utils.Reorderer(requests, _collate)
# Loop through requests with `batch_size` number of requests at a time
for chunk in utils.chunks(tqdm(reord.get_reordered(), disable=disable_tqdm), self.batch_size):
# To store the inputs for the batch
inps = []
# The continuations for the batch
continuations = []
# Lengths of the input sequences
inplens = []
# Padded length for the batch
padded_length = None
# Loop through each request in the chunk and collect them into PyTorch tensors with paddings
for _, context_enc, continuation_enc in chunk:
# Concatenate the context and continuation
inp = context_enc + continuation_enc
# Truncate from left if the size exceeds the `max_length`
inp = inp[-(self.max_length + 1):]
# Remove final token
inp = inp[:-1]
# Create a tensor
inp = torch.tensor(inp, dtype=torch.long)
# Input length
inplen = inp.shape[0]
# Determine the padded length.
# Shorter sequences will get padded.
if padded_length is None:
padded_length = int(math.ceil(inplen / 32)) * 32
# padded_length = padded_length if padded_length is not None else inplen
# Padding
padding = torch.zeros(padded_length - inplen, dtype=torch.long)
# Add padding
inp = torch.cat([inp, padding], dim=0)
inps.append(inp)
continuations.append(continuation_enc)
inplens.append(inplen)
# Get model logits
logits = self._model_call(torch.stack(inps))
# Get log softmaxes
multi_logits = F.log_softmax(logits, dim=-1)
# Loop through the input/output pairs of the batch
for logits, inplen, cont_toks in zip(multi_logits, inplens, continuations):
# Get number of predicted tokens
contlen = len(cont_toks)
# Get logits of those
logits = logits[inplen - contlen: inplen]
# Get the tokens with the highest probabilities
greedy_tokens = logits.argmax(dim=-1)
# Get the target tokens
cont_toks = torch.tensor(cont_toks, dtype=torch.long).to(logits.device)
# Whether there's an exact match
max_equal = (greedy_tokens == cont_toks).all()
# Log-likelihoods of the target tokens
logits = torch.gather(logits, 1, cont_toks[:, None])
# Add the total log-likelihoods and whether there was a match to the results
res.append((float(logits.sum()), bool(max_equal)))
# Re-order and return results
return reord.get_original(res)
@torch.no_grad()
def run_eval(self, name: str, eval_tasks: List[str]):
"""
### Run given evaluations
"""
# Run [EleutherAI/lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) evaluator
results = evaluator.evaluate(lm=self, task_dict=tasks.get_task_dict(eval_tasks))
# Add configs
results["config"] = {
"name": name,
}
#
return results
class NoeXEvalHarnessAdapter(EvalHarnessAdapter):
"""
## Evaluation Harness Adapter
This is based on the [adapter from EleutherAI/gpt-neox](https://github.com/EleutherAI/gpt-neox/blob/main/eval_tasks/eval_adapter.py)
"""
def __init__(self, model: nn.Module, tokenizer: Tokenizer, vocab_size: int, batch_size: int, device: torch.device):
"""
:param model: is model
:param tokenizer: is the [Huggingface Tokenizer](huggingface/tokenizers)
:param vocab_size: is the size of the vocabulary
(this differs from the tokenizer vocab size since neox adds some extra to make the embedding layer
model parallel.)
:param batch_size: is the batch size
:param device: is the device of the model
"""
super().__init__(tokenizer, vocab_size, batch_size)
self.model = model
self._device = device
def _model_call(self, inps: torch.Tensor):
"""
Call the model
"""
return self.model(inps.to(self._device))
def run_eval_harness(model: nn.Module, name: str, eval_tasks: List[str], device: torch.device, batch_size: int = 8):
"""
## Run evaluation harness with a given model
"""
# Load the tokenizer
with monit.section('Load tokenizer'):
tokenizer = get_tokenizer()
# All tasks if nothing is specified
if not eval_tasks:
eval_tasks = [
"anli_r1",
"anli_r2",
"anli_r3",
"hellaswag",
"lambada",
"piqa",
"winogrande",
"wsc",
"mathqa",
]
# Create the adapter
adapter = NoeXEvalHarnessAdapter(model, tokenizer, 50_432, batch_size, device)
# Run
return adapter.run_eval(name, eval_tasks)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/neox/evaluation/llm_int8.py | labml_nn/neox/evaluation/llm_int8.py | """
---
title: Evaluate GPT-NeoX using LLM.int8() quantization on test suite
summary: >
Evaluate GPT-NeoX using LLM.int8() quantization on test suite
---
# Evaluate GPT-NeoX using LLM.int8() quantization on test suite
This code evaluate [GPT-NeoX](../index.html) using [LLM.int8() quantization](../utils/llm_int8.html),
on a suite of tasks.
"""
import torch
from torch import nn
from labml import monit
from labml_nn.neox.evaluation import run_eval_harness
from labml_nn.neox.model import LayerGenerator
def main():
# Device
device = torch.device('cuda:0')
# Load layers in float16 into CPU. We convert the layers to int8 later, because doing that
# on the fly after loading layers to GPU causes CUDA memory fragmentation
# (about 3GB memory can get lost due to fragmentation).
layer_generator = LayerGenerator(is_clone_layers=True,
dtype=torch.float16,
device=torch.device('cpu'),
)
# Load layers
layers = list(layer_generator.load())
# This reduces CUDA memory fragmentation
for layer in monit.iterate('Convert to int8', layers, is_children_silent=True):
layer_generator.post_load_prepare(layer,
device=device,
is_llm_int8=True,
llm_int8_threshold=6.0,
)
layer.to(device)
# Create `nn.Sequential` model
model = nn.Sequential(*layers)
# Run [evaluation harness](index.html)
print(run_eval_harness(model, 'half_precision', [], device))
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/uncertainty/__init__.py | labml_nn/uncertainty/__init__.py | """
---
title: Neural Networks with Uncertainty Estimation
summary: >
A set of PyTorch implementations/tutorials related to uncertainty estimation
---
# Neural Networks with Uncertainty Estimation
These are neural network architectures that estimate the uncertainty of the predictions.
* [Evidential Deep Learning to Quantify Classification Uncertainty](evidence/index.html)
"""
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/uncertainty/evidence/experiment.py | labml_nn/uncertainty/evidence/experiment.py | """
---
title: "Evidential Deep Learning to Quantify Classification Uncertainty Experiment"
summary: >
This trains is EDL model on MNIST
---
# [Evidential Deep Learning to Quantify Classification Uncertainty](index.html) Experiment
This trains a model based on [Evidential Deep Learning to Quantify Classification Uncertainty](index.html)
on MNIST dataset.
"""
from typing import Any
import torch.nn as nn
import torch.utils.data
from labml import tracker, experiment
from labml.configs import option, calculate
from labml_nn.helpers.schedule import Schedule, RelativePiecewise
from labml_nn.helpers.trainer import BatchIndex
from labml_nn.experiments.mnist import MNISTConfigs
from labml_nn.uncertainty.evidence import KLDivergenceLoss, TrackStatistics, MaximumLikelihoodLoss, \
CrossEntropyBayesRisk, SquaredErrorBayesRisk
class Model(nn.Module):
"""
## LeNet based model fro MNIST classification
"""
def __init__(self, dropout: float):
super().__init__()
# First $5x5$ convolution layer
self.conv1 = nn.Conv2d(1, 20, kernel_size=5)
# ReLU activation
self.act1 = nn.ReLU()
# $2x2$ max-pooling
self.max_pool1 = nn.MaxPool2d(2, 2)
# Second $5x5$ convolution layer
self.conv2 = nn.Conv2d(20, 50, kernel_size=5)
# ReLU activation
self.act2 = nn.ReLU()
# $2x2$ max-pooling
self.max_pool2 = nn.MaxPool2d(2, 2)
# First fully-connected layer that maps to $500$ features
self.fc1 = nn.Linear(50 * 4 * 4, 500)
# ReLU activation
self.act3 = nn.ReLU()
# Final fully connected layer to output evidence for $10$ classes.
# The ReLU or Softplus activation is applied to this outside the model to get the
# non-negative evidence
self.fc2 = nn.Linear(500, 10)
# Dropout for the hidden layer
self.dropout = nn.Dropout(p=dropout)
def __call__(self, x: torch.Tensor):
"""
* `x` is the batch of MNIST images of shape `[batch_size, 1, 28, 28]`
"""
# Apply first convolution and max pooling.
# The result has shape `[batch_size, 20, 12, 12]`
x = self.max_pool1(self.act1(self.conv1(x)))
# Apply second convolution and max pooling.
# The result has shape `[batch_size, 50, 4, 4]`
x = self.max_pool2(self.act2(self.conv2(x)))
# Flatten the tensor to shape `[batch_size, 50 * 4 * 4]`
x = x.view(x.shape[0], -1)
# Apply hidden layer
x = self.act3(self.fc1(x))
# Apply dropout
x = self.dropout(x)
# Apply final layer and return
return self.fc2(x)
class Configs(MNISTConfigs):
"""
## Configurations
We use [`MNISTConfigs`](../../experiments/mnist.html#MNISTConfigs) configurations.
"""
# [KL Divergence regularization](index.html#KLDivergenceLoss)
kl_div_loss = KLDivergenceLoss()
# KL Divergence regularization coefficient schedule
kl_div_coef: Schedule
# KL Divergence regularization coefficient schedule
kl_div_coef_schedule = [(0, 0.), (0.2, 0.01), (1, 1.)]
# [Stats module](index.html#TrackStatistics) for tracking
stats = TrackStatistics()
# Dropout
dropout: float = 0.5
# Module to convert the model output to non-zero evidences
outputs_to_evidence: nn.Module
def init(self):
"""
### Initialization
"""
# Set tracker configurations
tracker.set_scalar("loss.*", True)
tracker.set_scalar("accuracy.*", True)
tracker.set_histogram('u.*', True)
tracker.set_histogram('prob.*', False)
tracker.set_scalar('annealing_coef.*', False)
tracker.set_scalar('kl_div_loss.*', False)
#
self.state_modules = []
def step(self, batch: Any, batch_idx: BatchIndex):
"""
### Training or validation step
"""
# Training/Evaluation mode
self.model.train(self.mode.is_train)
# Move data to the device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# One-hot coded targets
eye = torch.eye(10).to(torch.float).to(self.device)
target = eye[target]
# Update global step (number of samples processed) when in training mode
if self.mode.is_train:
tracker.add_global_step(len(data))
# Get model outputs
outputs = self.model(data)
# Get evidences $e_k \ge 0$
evidence = self.outputs_to_evidence(outputs)
# Calculate loss
loss = self.loss_func(evidence, target)
# Calculate KL Divergence regularization loss
kl_div_loss = self.kl_div_loss(evidence, target)
tracker.add("loss.", loss)
tracker.add("kl_div_loss.", kl_div_loss)
# KL Divergence loss coefficient $\lambda_t$
annealing_coef = min(1., self.kl_div_coef(tracker.get_global_step()))
tracker.add("annealing_coef.", annealing_coef)
# Total loss
loss = loss + annealing_coef * kl_div_loss
# Track statistics
self.stats(evidence, target)
# Train the model
if self.mode.is_train:
# Calculate gradients
loss.backward()
# Take optimizer step
self.optimizer.step()
# Clear the gradients
self.optimizer.zero_grad()
# Save the tracked metrics
tracker.save()
@option(Configs.model)
def mnist_model(c: Configs):
"""
### Create model
"""
return Model(c.dropout).to(c.device)
@option(Configs.kl_div_coef)
def kl_div_coef(c: Configs):
"""
### KL Divergence Loss Coefficient Schedule
"""
# Create a [relative piecewise schedule](../../helpers/schedule.html)
return RelativePiecewise(c.kl_div_coef_schedule, c.epochs * len(c.train_dataset))
# [Maximum Likelihood Loss](index.html#MaximumLikelihoodLoss)
calculate(Configs.loss_func, 'max_likelihood_loss', lambda: MaximumLikelihoodLoss())
# [Cross Entropy Bayes Risk](index.html#CrossEntropyBayesRisk)
calculate(Configs.loss_func, 'cross_entropy_bayes_risk', lambda: CrossEntropyBayesRisk())
# [Squared Error Bayes Risk](index.html#SquaredErrorBayesRisk)
calculate(Configs.loss_func, 'squared_error_bayes_risk', lambda: SquaredErrorBayesRisk())
# ReLU to calculate evidence
calculate(Configs.outputs_to_evidence, 'relu', lambda: nn.ReLU())
# Softplus to calculate evidence
calculate(Configs.outputs_to_evidence, 'softplus', lambda: nn.Softplus())
def main():
# Create experiment
experiment.create(name='evidence_mnist')
# Create configurations
conf = Configs()
# Load configurations
experiment.configs(conf, {
'optimizer.optimizer': 'Adam',
'optimizer.learning_rate': 0.001,
'optimizer.weight_decay': 0.005,
# 'loss_func': 'max_likelihood_loss',
# 'loss_func': 'cross_entropy_bayes_risk',
'loss_func': 'squared_error_bayes_risk',
'outputs_to_evidence': 'softplus',
'dropout': 0.5,
})
# Start the experiment and run the training loop
with experiment.start():
conf.run()
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/uncertainty/evidence/__init__.py | labml_nn/uncertainty/evidence/__init__.py | """
---
title: "Evidential Deep Learning to Quantify Classification Uncertainty"
summary: >
A PyTorch implementation/tutorial of the paper Evidential Deep Learning to Quantify Classification
Uncertainty.
---
# Evidential Deep Learning to Quantify Classification Uncertainty
This is a [PyTorch](https://pytorch.org) implementation of the paper
[Evidential Deep Learning to Quantify Classification Uncertainty](https://arxiv.org/abs/1806.01768).
[Dampster-Shafer Theory of Evidence](https://en.wikipedia.org/wiki/Dempster%E2%80%93Shafer_theory)
assigns belief masses a set of classes (unlike assigning a probability to a single class).
Sum of the masses of all subsets is $1$.
Individual class probabilities (plausibilities) can be derived from these masses.
Assigning a mass to the set of all classes means it can be any one of the classes; i.e. saying "I don't know".
If there are $K$ classes, we assign masses $b_k \ge 0$ to each of the classes and
an overall uncertainty mass $u \ge 0$ to all classes.
$$u + \sum_{k=1}^K b_k = 1$$
Belief masses $b_k$ and $u$ can be computed from evidence $e_k \ge 0$, as $b_k = \frac{e_k}{S}$
and $u = \frac{K}{S}$ where $S = \sum_{k=1}^K (e_k + 1)$.
Paper uses term evidence as a measure of the amount of support
collected from data in favor of a sample to be classified into a certain class.
This corresponds to a [Dirichlet distribution](https://en.wikipedia.org/wiki/Dirichlet_distribution)
with parameters $\textcolor{orange}{\alpha_k} = e_k + 1$, and
$\textcolor{orange}{\alpha_0} = S = \sum_{k=1}^K \textcolor{orange}{\alpha_k}$ is known as the Dirichlet strength.
Dirichlet distribution $D(\mathbf{p} \vert \textcolor{orange}{\mathbf{\alpha}})$
is a distribution over categorical distribution; i.e. you can sample class probabilities
from a Dirichlet distribution.
The expected probability for class $k$ is $\hat{p}_k = \frac{\textcolor{orange}{\alpha_k}}{S}$.
We get the model to output evidences
$$\mathbf{e} = \textcolor{orange}{\mathbf{\alpha}} - 1 = f(\mathbf{x} | \Theta)$$
for a given input $\mathbf{x}$.
We use a function such as
[ReLU](https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html) or a
[Softplus](https://pytorch.org/docs/stable/generated/torch.nn.Softplus.html)
at the final layer to get $f(\mathbf{x} | \Theta) \ge 0$.
The paper proposes a few loss functions to train the model, which we have implemented below.
Here is the [training code `experiment.py`](experiment.html) to train a model on MNIST dataset.
"""
import torch
from labml import tracker
from torch import nn
class MaximumLikelihoodLoss(nn.Module):
"""
<a id="MaximumLikelihoodLoss"></a>
## Type II Maximum Likelihood Loss
The distribution $D(\mathbf{p} \vert \textcolor{orange}{\mathbf{\alpha}})$ is a prior on the likelihood
$Multi(\mathbf{y} \vert p)$,
and the negative log marginal likelihood is calculated by integrating over class probabilities
$\mathbf{p}$.
If target probabilities (one-hot targets) are $y_k$ for a given sample the loss is,
\begin{align}
\mathcal{L}(\Theta)
&= -\log \Bigg(
\int
\prod_{k=1}^K p_k^{y_k}
\frac{1}{B(\textcolor{orange}{\mathbf{\alpha}})}
\prod_{k=1}^K p_k^{\textcolor{orange}{\alpha_k} - 1}
d\mathbf{p}
\Bigg ) \\
&= \sum_{k=1}^K y_k \bigg( \log S - \log \textcolor{orange}{\alpha_k} \bigg)
\end{align}
"""
def forward(self, evidence: torch.Tensor, target: torch.Tensor):
"""
* `evidence` is $\mathbf{e} \ge 0$ with shape `[batch_size, n_classes]`
* `target` is $\mathbf{y}$ with shape `[batch_size, n_classes]`
"""
# $\textcolor{orange}{\alpha_k} = e_k + 1$
alpha = evidence + 1.
# $S = \sum_{k=1}^K \textcolor{orange}{\alpha_k}$
strength = alpha.sum(dim=-1)
# Losses $\mathcal{L}(\Theta) = \sum_{k=1}^K y_k \bigg( \log S - \log \textcolor{orange}{\alpha_k} \bigg)$
loss = (target * (strength.log()[:, None] - alpha.log())).sum(dim=-1)
# Mean loss over the batch
return loss.mean()
class CrossEntropyBayesRisk(nn.Module):
"""
<a id="CrossEntropyBayesRisk"></a>
## Bayes Risk with Cross Entropy Loss
Bayes risk is the overall maximum cost of making incorrect estimates.
It takes a cost function that gives the cost of making an incorrect estimate
and sums it over all possible outcomes based on probability distribution.
Here the cost function is cross-entropy loss, for one-hot coded $\mathbf{y}$
$$\sum_{k=1}^K -y_k \log p_k$$
We integrate this cost over all $\mathbf{p}$
\begin{align}
\mathcal{L}(\Theta)
&= -\log \Bigg(
\int
\Big[ \sum_{k=1}^K -y_k \log p_k \Big]
\frac{1}{B(\textcolor{orange}{\mathbf{\alpha}})}
\prod_{k=1}^K p_k^{\textcolor{orange}{\alpha_k} - 1}
d\mathbf{p}
\Bigg ) \\
&= \sum_{k=1}^K y_k \bigg( \psi(S) - \psi( \textcolor{orange}{\alpha_k} ) \bigg)
\end{align}
where $\psi(\cdot)$ is the $digamma$ function.
"""
def forward(self, evidence: torch.Tensor, target: torch.Tensor):
"""
* `evidence` is $\mathbf{e} \ge 0$ with shape `[batch_size, n_classes]`
* `target` is $\mathbf{y}$ with shape `[batch_size, n_classes]`
"""
# $\textcolor{orange}{\alpha_k} = e_k + 1$
alpha = evidence + 1.
# $S = \sum_{k=1}^K \textcolor{orange}{\alpha_k}$
strength = alpha.sum(dim=-1)
# Losses $\mathcal{L}(\Theta) = \sum_{k=1}^K y_k \bigg( \psi(S) - \psi( \textcolor{orange}{\alpha_k} ) \bigg)$
loss = (target * (torch.digamma(strength)[:, None] - torch.digamma(alpha))).sum(dim=-1)
# Mean loss over the batch
return loss.mean()
class SquaredErrorBayesRisk(nn.Module):
"""
<a id="SquaredErrorBayesRisk"></a>
## Bayes Risk with Squared Error Loss
Here the cost function is squared error,
$$\sum_{k=1}^K (y_k - p_k)^2 = \Vert \mathbf{y} - \mathbf{p} \Vert_2^2$$
We integrate this cost over all $\mathbf{p}$
\begin{align}
\mathcal{L}(\Theta)
&= -\log \Bigg(
\int
\Big[ \sum_{k=1}^K (y_k - p_k)^2 \Big]
\frac{1}{B(\textcolor{orange}{\mathbf{\alpha}})}
\prod_{k=1}^K p_k^{\textcolor{orange}{\alpha_k} - 1}
d\mathbf{p}
\Bigg ) \\
&= \sum_{k=1}^K \mathbb{E} \Big[ y_k^2 -2 y_k p_k + p_k^2 \Big] \\
&= \sum_{k=1}^K \Big( y_k^2 -2 y_k \mathbb{E}[p_k] + \mathbb{E}[p_k^2] \Big)
\end{align}
Where $$\mathbb{E}[p_k] = \hat{p}_k = \frac{\textcolor{orange}{\alpha_k}}{S}$$
is the expected probability when sampled from the Dirichlet distribution
and $$\mathbb{E}[p_k^2] = \mathbb{E}[p_k]^2 + \text{Var}(p_k)$$
where
$$\text{Var}(p_k) = \frac{\textcolor{orange}{\alpha_k}(S - \textcolor{orange}{\alpha_k})}{S^2 (S + 1)}
= \frac{\hat{p}_k(1 - \hat{p}_k)}{S + 1}$$
is the variance.
This gives,
\begin{align}
\mathcal{L}(\Theta)
&= \sum_{k=1}^K \Big( y_k^2 -2 y_k \mathbb{E}[p_k] + \mathbb{E}[p_k^2] \Big) \\
&= \sum_{k=1}^K \Big( y_k^2 -2 y_k \mathbb{E}[p_k] + \mathbb{E}[p_k]^2 + \text{Var}(p_k) \Big) \\
&= \sum_{k=1}^K \Big( \big( y_k -\mathbb{E}[p_k] \big)^2 + \text{Var}(p_k) \Big) \\
&= \sum_{k=1}^K \Big( ( y_k -\hat{p}_k)^2 + \frac{\hat{p}_k(1 - \hat{p}_k)}{S + 1} \Big)
\end{align}
This first part of the equation $\big(y_k -\mathbb{E}[p_k]\big)^2$ is the error term and
the second part is the variance.
"""
def forward(self, evidence: torch.Tensor, target: torch.Tensor):
"""
* `evidence` is $\mathbf{e} \ge 0$ with shape `[batch_size, n_classes]`
* `target` is $\mathbf{y}$ with shape `[batch_size, n_classes]`
"""
# $\textcolor{orange}{\alpha_k} = e_k + 1$
alpha = evidence + 1.
# $S = \sum_{k=1}^K \textcolor{orange}{\alpha_k}$
strength = alpha.sum(dim=-1)
# $\hat{p}_k = \frac{\textcolor{orange}{\alpha_k}}{S}$
p = alpha / strength[:, None]
# Error $(y_k -\hat{p}_k)^2$
err = (target - p) ** 2
# Variance $\text{Var}(p_k) = \frac{\hat{p}_k(1 - \hat{p}_k)}{S + 1}$
var = p * (1 - p) / (strength[:, None] + 1)
# Sum of them
loss = (err + var).sum(dim=-1)
# Mean loss over the batch
return loss.mean()
class KLDivergenceLoss(nn.Module):
"""
<a id="KLDivergenceLoss"></a>
## KL Divergence Regularization Loss
This tries to shrink the total evidence to zero if the sample cannot be correctly classified.
First we calculate $\tilde{\alpha}_k = y_k + (1 - y_k) \textcolor{orange}{\alpha_k}$ the
Dirichlet parameters after remove the correct evidence.
\begin{align}
&KL \Big[ D(\mathbf{p} \vert \mathbf{\tilde{\alpha}}) \Big \Vert
D(\mathbf{p} \vert <1, \dots, 1>\Big] \\
&= \log \Bigg( \frac{\Gamma \Big( \sum_{k=1}^K \tilde{\alpha}_k \Big)}
{\Gamma(K) \prod_{k=1}^K \Gamma(\tilde{\alpha}_k)} \Bigg)
+ \sum_{k=1}^K (\tilde{\alpha}_k - 1)
\Big[ \psi(\tilde{\alpha}_k) - \psi(\tilde{S}) \Big]
\end{align}
where $\Gamma(\cdot)$ is the gamma function,
$\psi(\cdot)$ is the $digamma$ function and
$\tilde{S} = \sum_{k=1}^K \tilde{\alpha}_k$
"""
def forward(self, evidence: torch.Tensor, target: torch.Tensor):
"""
* `evidence` is $\mathbf{e} \ge 0$ with shape `[batch_size, n_classes]`
* `target` is $\mathbf{y}$ with shape `[batch_size, n_classes]`
"""
# $\textcolor{orange}{\alpha_k} = e_k + 1$
alpha = evidence + 1.
# Number of classes
n_classes = evidence.shape[-1]
# Remove non-misleading evidence
# $$\tilde{\alpha}_k = y_k + (1 - y_k) \textcolor{orange}{\alpha_k}$$
alpha_tilde = target + (1 - target) * alpha
# $\tilde{S} = \sum_{k=1}^K \tilde{\alpha}_k$
strength_tilde = alpha_tilde.sum(dim=-1)
# The first term
#
# \begin{align}
# &\log \Bigg( \frac{\Gamma \Big( \sum_{k=1}^K \tilde{\alpha}_k \Big)}
# {\Gamma(K) \prod_{k=1}^K \Gamma(\tilde{\alpha}_k)} \Bigg) \\
# &= \log \Gamma \Big( \sum_{k=1}^K \tilde{\alpha}_k \Big)
# - \log \Gamma(K)
# - \sum_{k=1}^K \log \Gamma(\tilde{\alpha}_k)
# \end{align}
first = (torch.lgamma(alpha_tilde.sum(dim=-1))
- torch.lgamma(alpha_tilde.new_tensor(float(n_classes)))
- (torch.lgamma(alpha_tilde)).sum(dim=-1))
# The second term
# $$\sum_{k=1}^K (\tilde{\alpha}_k - 1)
# \Big[ \psi(\tilde{\alpha}_k) - \psi(\tilde{S}) \Big]$$
second = (
(alpha_tilde - 1) *
(torch.digamma(alpha_tilde) - torch.digamma(strength_tilde)[:, None])
).sum(dim=-1)
# Sum of the terms
loss = first + second
# Mean loss over the batch
return loss.mean()
class TrackStatistics(nn.Module):
"""
<a id="TrackStatistics"></a>
### Track statistics
This module computes statistics and tracks them with [labml `tracker`](https://docs.labml.ai/api/tracker.html).
"""
def forward(self, evidence: torch.Tensor, target: torch.Tensor):
# Number of classes
n_classes = evidence.shape[-1]
# Predictions that correctly match with the target (greedy sampling based on highest probability)
match = evidence.argmax(dim=-1).eq(target.argmax(dim=-1))
# Track accuracy
tracker.add('accuracy.', match.sum() / match.shape[0])
# $\textcolor{orange}{\alpha_k} = e_k + 1$
alpha = evidence + 1.
# $S = \sum_{k=1}^K \textcolor{orange}{\alpha_k}$
strength = alpha.sum(dim=-1)
# $\hat{p}_k = \frac{\textcolor{orange}{\alpha_k}}{S}$
expected_probability = alpha / strength[:, None]
# Expected probability of the selected (greedy highset probability) class
expected_probability, _ = expected_probability.max(dim=-1)
# Uncertainty mass $u = \frac{K}{S}$
uncertainty_mass = n_classes / strength
# Track $u$ for correctly predictions
tracker.add('u.succ.', uncertainty_mass.masked_select(match))
# Track $u$ for incorrect predictions
tracker.add('u.fail.', uncertainty_mass.masked_select(~match))
# Track $\hat{p}_k$ for correctly predictions
tracker.add('prob.succ.', expected_probability.masked_select(match))
# Track $\hat{p}_k$ for incorrect predictions
tracker.add('prob.fail.', expected_probability.masked_select(~match))
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/sampling/nucleus.py | labml_nn/sampling/nucleus.py | """
---
title: Nucleus Sampling
summary: A PyTorch implementation of nucleus sampling from language models.
---
# Nucleus Sampling
This is an implementation of nucleus sampling, introduced in the paper
[The Curious Case of Neural Text Degeneration](https://arxiv.org/abs/1904.09751).
The paper discusses the problems with other sampling methods such as Beam Search,
[Pure sampling](temperature.html), [Temperature sampling](temperature.html), and
[Top-k sampling](top_k.html). The paper introduces the idea of nucleus sampling,
which practically performs better than other sampling methods for text generation.
Nucleus sampling first picks a subset of the vocabulary $V^{(p)} \subset V$,
where $V^{(p)}$ is smallest set of tokens such that
$$\sum_{x_i \in V^{(p)}} P(x_i | x_{1:i-1}) \ge p$$
That is, we pick the highest probable tokens until the sum of their probabilities is less that $p$.
Then we sample from the selected tokens.
Here's an [experiment](experiment.html) that uses these sampling techniques.
"""
import torch
from torch import nn
from labml_nn.sampling import Sampler
class NucleusSampler(Sampler):
"""
## Nucleus Sampler
"""
def __init__(self, p: float, sampler: Sampler):
"""
:param p: is the sum of probabilities of tokens to pick $p$
:param sampler: is the sampler to use for the selected tokens
"""
self.p = p
self.sampler = sampler
# Softmax to compute $P(x_i | x_{1:i-1})$ from the logits
self.softmax = nn.Softmax(dim=-1)
def __call__(self, logits: torch.Tensor):
"""
Sample from logits with Nucleus Sampling
"""
# Get probabilities $P(x_i | x_{1:i-1})$
probs = self.softmax(logits)
# Sort probabilities in descending order
sorted_probs, indices = torch.sort(probs, dim=-1, descending=True)
# Get the cumulative sum of probabilities in the sorted order
cum_sum_probs = torch.cumsum(sorted_probs, dim=-1)
# Find the cumulative sums less than $p$.
nucleus = cum_sum_probs < self.p
# Prepend ones so that we add one token after the minimum number
# of tokens with cumulative probability less that $p$.
nucleus = torch.cat([nucleus.new_ones(nucleus.shape[:-1] + (1,)), nucleus[..., :-1]], dim=-1)
# Get log probabilities and mask out the non-nucleus
sorted_log_probs = torch.log(sorted_probs)
sorted_log_probs[~nucleus] = float('-inf')
# Sample from the sampler
sampled_sorted_indexes = self.sampler(sorted_log_probs)
# Get the actual indexes
res = indices.gather(-1, sampled_sorted_indexes.unsqueeze(-1))
#
return res.squeeze(-1)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/sampling/top_k.py | labml_nn/sampling/top_k.py | """
---
title: Top-k Sampling
summary: A PyTorch implementation of top-k sampling from language models.
---
# Top-k Sampling
Here we first pick the top-k tokens from the distribution of logits, and then
sample from them.
Here's an [experiment](experiment.html) that uses these sampling techniques.
"""
import torch
from labml_nn.sampling import Sampler
class TopKSampler(Sampler):
"""
## Top-k Sampler
"""
def __init__(self, k: int, sampler: Sampler):
"""
:param k: is the number of tokens to pick
:param sampler: is the sampler to use for the top-k tokens
`sampler` can be any sampler that takes a logits tensor as input and returns a token tensor;
e.g. [`TemperatureSampler'](temperature.html).
"""
self.k = k
self.sampler = sampler
def __call__(self, logits: torch.Tensor):
"""
Sample from logits
"""
# New logits filled with $-\infty$; i.e. zero probability
zeros = logits.new_ones(logits.shape) * float('-inf')
# Pick the largest $k$ logits and their indices
values, indices = torch.topk(logits, self.k, dim=-1)
# Set the values of the top-k selected indices to actual logits.
# Logits of other tokens remain $-\infty$
zeros.scatter_(-1, indices, values)
# Sample from the top-k logits with the specified sampler.
return self.sampler(zeros)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/sampling/experiment.py | labml_nn/sampling/experiment.py | """
---
title: Trying out Sampling Techniques for Language Models
summary: >
We try out different sampling techniques for language models on HuggingFace's GPT2 model.
---
# Trying out Sampling Techniques for Language Models
* [Greedy Sampling](greedy.html)
* [Temperature Sampling](temperature.html)
* [Top-k Sampling](top_k.html)
* [Nucleus Sampling](nucleus.html)
This experiment uses the above sampling techniques, on HuggingFace's GPT2 model.
"""
import torch
from labml import monit, logger, lab
from labml.logger import Text
from labml_nn.sampling import Sampler
from labml_nn.sampling.greedy import GreedySampler
from labml_nn.sampling.nucleus import NucleusSampler
from labml_nn.sampling.temperature import TemperatureSampler
from labml_nn.sampling.top_k import TopKSampler
from transformers import GPT2Tokenizer, GPT2LMHeadModel
@torch.no_grad()
def sample(model: GPT2LMHeadModel, tokenizer: GPT2Tokenizer, sampler: Sampler,
n_samples: int, n_tokens: int, seq_len: int, prompt: str):
"""
## Sample from model
:param model: is the model to sample from
:param tokenizer: is the tokenizer to use
:param sampler: is the sampler to use
:param n_samples: is the number of samples to generate
:param n_tokens: is the number of tokens to generate
:param seq_len: is the maximum sequence length for the model
:param prompt: is the starting prompt
"""
# Tokenize the `prompt` and make `n_samples` copies of it
data = torch.tile(torch.tensor(tokenizer.encode(prompt))[None, :], (n_samples, 1))
# Collect output for printing
logs = [[(prompt, Text.meta)] for _ in range(n_samples)]
# Sample `n_tokens`
for i in monit.iterate('Sample', n_tokens):
# Truncate the data to the maximum sequence length
data = data[-seq_len:]
# Get the model output. The 'logits' has shape `[batch_size, seq_len, n_tokens]`
logits = model(data)[0]
# Get the `logits` of the last token
logits = logits[:, -1]
# Sample from the `logits`
res = sampler(logits)
# Add the sampled token to the data
data = torch.cat([data, res[:, None]], dim=1)
# Decode and add the sampled token for logging
for j in range(n_samples):
logs[j] += [('' + tokenizer.decode(res[j]), Text.value)]
# Print the sampled outputs
for j in range(n_samples):
logger.log(logs[j])
def main():
"""
### Try different sampling techniques
"""
# Load the model and tokenizer
with monit.section('Load tokenizer/model'):
tokenizer = GPT2Tokenizer.from_pretrained('gpt2', cache_dir=lab.get_data_path() / 'cache')
model = GPT2LMHeadModel.from_pretrained('gpt2', cache_dir=lab.get_data_path() / 'cache')
# Set the model to eval mode
model.eval()
# Prompts to use for sampling
prompt = 'I saw an interesting dream last night. '
# [Greedy Sampling](greedy.html)
with monit.section('greedy'):
sample(model, tokenizer, GreedySampler(), 4, 32, 128, prompt)
# [Temperature Sampling](temperature.html)
with monit.section('temperature=1.'):
sample(model, tokenizer, TemperatureSampler(1.), 4, 32, 128, prompt)
with monit.section('temperature=.1'):
sample(model, tokenizer, TemperatureSampler(.1), 4, 32, 128, prompt)
with monit.section('temperature=10.'):
sample(model, tokenizer, TemperatureSampler(10.), 4, 32, 128, prompt)
# [Top-k Sampling](top_k.html)
with monit.section('top_k=5'):
sample(model, tokenizer, TopKSampler(2, TemperatureSampler(1.)), 4, 32, 128, prompt)
# [Nucleus Sampling](nucleus.html)
with monit.section('nucleus p=.95'):
sample(model, tokenizer, NucleusSampler(0.95, TemperatureSampler(1.)), 4, 32, 128, prompt)
with monit.section('nucleus p=.1'):
sample(model, tokenizer, NucleusSampler(0.1, TemperatureSampler(1.)), 4, 32, 128, prompt)
#
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/sampling/temperature.py | labml_nn/sampling/temperature.py | """
---
title: Sampling from Language Models with Temperature
summary: A PyTorch implementation of sampling from language models with temperature.
---
# Sampling from Language Models with Temperature
Here we sample from the following probability distribution where $V$ is the vocabulary,
$u_{1:|V|}$ are the logits of the distribution and T is the temperature:
$$P(x_i=V_l | x_{1:i-1}) = \frac{\exp(\frac{u_l}{T})}{\sum_j \exp(\frac{u_j}{T})}$$
$T = 1$ is normal random sampling.
Here's an [experiment](experiment.html) that uses these sampling techniques.
"""
import torch
from torch.distributions import Categorical
from labml_nn.sampling import Sampler
class TemperatureSampler(Sampler):
"""
## Sampler with Temperature
"""
def __init__(self, temperature: float = 1.0):
"""
:param temperature: is the temperature to sample with
"""
self.temperature = temperature
def __call__(self, logits: torch.Tensor):
"""
Sample from logits
"""
# Create a categorical distribution with temperature adjusted logits
dist = Categorical(logits=logits / self.temperature)
# Sample
return dist.sample()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/sampling/greedy.py | labml_nn/sampling/greedy.py | """
---
title: Greedy Sampling
summary: A PyTorch implementation of greedy sampling from language models.
---
# Greedy Sampling
Here we sample the most likely token from the distribution of logits.
Here's an [experiment](experiment.html) that uses these sampling techniques.
"""
import torch
from labml_nn.sampling import Sampler
class GreedySampler(Sampler):
def __call__(self, logits: torch.Tensor):
"""
Sample the most likely token from the distribution of logits
"""
return logits.argmax(dim=-1)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/sampling/__init__.py | labml_nn/sampling/__init__.py | """
---
title: Sampling Techniques for Language Models
summary: >
A set of PyTorch implementations/tutorials of sampling techniques for language models.
---
# Sampling Techniques for Language Models
* [Greedy Sampling](greedy.html)
* [Temperature Sampling](temperature.html)
* [Top-k Sampling](top_k.html)
* [Nucleus Sampling](nucleus.html)
Here's an [experiment](experiment.html) that uses these sampling techniques.
"""
import torch
class Sampler:
"""
### Sampler base class
"""
def __call__(self, logits: torch.Tensor) -> torch.Tensor:
"""
### Sample from logits
:param logits: are the logits of the distribution of shape `[..., n_tokens]`
"""
raise NotImplementedError()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/sampling/experiment_tiny.py | labml_nn/sampling/experiment_tiny.py | from typing import Tuple
import torch
from labml import experiment, monit
from labml import logger
from labml.logger import Text
from labml_nn.helpers.datasets import TextDataset
from labml_nn.sampling import Sampler
from labml_nn.sampling.greedy import GreedySampler
from labml_nn.sampling.nucleus import NucleusSampler
from labml_nn.sampling.temperature import TemperatureSampler
from labml_nn.sampling.top_k import TopKSampler
from labml_nn.transformers.basic.autoregressive_experiment import Configs, AutoregressiveTransformer
def get_model_dataset(run_uuid: str) -> Tuple[AutoregressiveTransformer, TextDataset]:
experiment.evaluate()
conf = Configs()
experiment.configs(conf, experiment.load_configs(run_uuid))
experiment.load(run_uuid)
experiment.add_pytorch_models({'model': conf.model})
experiment.start()
return conf.model, conf.text
def sample(model, ds, sampler: Sampler, n_samples: int, n_tokens: int, seq_len: int, prompt: str):
with torch.no_grad():
data = torch.tile(ds.text_to_i(prompt)[:, None], (1, n_samples))
# Collect output for printing
logs = [[(prompt, Text.meta)] for _ in range(n_samples)]
# Sample 25 tokens
for i in monit.iterate('Sample', n_tokens):
# Tokenize the prompt
data = data[-seq_len:]
# Get the model output
logits, *_ = model(data)
logits = logits[-1]
# Get the model prediction (greedy)
res = sampler(logits)
data = torch.cat([data, res[None, :]], dim=0)
# Add the prediction for logging
for j in range(n_samples):
logs[j] += [('' + ds.itos[res[j]], Text.value)]
# Print the sampled output
for j in range(n_samples):
logger.log(logs[j])
def main():
model, ds = get_model_dataset('074d4004cc6b11ecad7a0242ac1c0002')
model.eval()
with monit.section('greedy'):
sample(model, ds, GreedySampler(), 4, 32, 128, 'It is')
with monit.section('temperature=1.'):
sample(model, ds, TemperatureSampler(1.), 4, 32, 128, 'It is')
with monit.section('temperature=.1'):
sample(model, ds, TemperatureSampler(.1), 4, 32, 128, 'It is')
with monit.section('temperature=10.'):
sample(model, ds, TemperatureSampler(10.), 4, 32, 128, 'It is')
with monit.section('top_k=5'):
sample(model, ds, TopKSampler(2, TemperatureSampler(1.)), 4, 32, 128, 'It is')
with monit.section('nucles p=.95'):
sample(model, ds, NucleusSampler(0.95, TemperatureSampler(1.)), 4, 32, 128, 'It is')
with monit.section('nucles p=.95'):
sample(model, ds, NucleusSampler(0.1, TemperatureSampler(1.)), 4, 32, 128, 'It is')
if __name__ == '__main__':
main()
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/lora/experiment.py | labml_nn/lora/experiment.py | """
---
title: Finetune GPT-2 with LoRA
summary: This is training code with notes for fine-tuning pre-trained GPT-2 model with LoRA.
---
# Finetune [GPT-2](gpt2.html) with [LoRA](index.html)
Here's a Colab notebook for training a feedback transformer on Tiny Shakespeare dataset.
[](https://colab.research.google.com/github/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/lora/experiment.ipynb)
"""
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader, TensorDataset
from transformers import AutoTokenizer, AutoModelForCausalLM
from labml import lab, monit, tracker
from labml.configs import BaseConfigs, option
from labml.utils.download import download_file
from labml_nn.helpers.device import DeviceConfigs
from labml_nn.lora.gpt2 import GPTModel
class Trainer(BaseConfigs):
"""
## Trainer configurations and the training loop
The default configs can and will be over-ridden when we start the experiment
"""
device: torch.device = DeviceConfigs()
# GPT-2 configs
layer_norm_epsilon: float = 1e-05
d_model: int = 768
n_layers: int = 12
n_heads: int = 12
n_positions: int = 1024
vocab_size: int = 50257
# Training configs
epochs: int = 10
batch_size: int = 32
learning_rate: float = 1e-4
context_len: int = 512
# LoRA rank
lora_r: int = 32
# Dataset
text: TensorDataset = "tiny_shakespeare"
# Huggingface tokenizer
tokenizer = AutoTokenizer.from_pretrained("gpt2")
# [GPT2 model](gpt2.html)
model: GPTModel
# Optimizer
optimizer: torch.optim.Adam
# Cross entropy loss
loss_func = torch.nn.CrossEntropyLoss()
# Dataloader
data_loader: DataLoader
def _load_pretrained_weights(self):
"""
### Load pre-trained [GPT-2 from huggingface](https://huggingface.co/openai-community/gpt2)
"""
# Load the huggingface model and get the parameters
hf_model = AutoModelForCausalLM.from_pretrained("gpt2")
state_dict = hf_model.state_dict()
# Transformer embedding and prediction layer parameter mapping (`hf: ours`)
mapping = {
'transformer.wte.weight': 'token_embedding.weight',
'transformer.wpe.weight': 'position_embedding.weight',
'transformer.ln_f.weight': 'final_norm.weight',
'transformer.ln_f.bias': 'final_norm.bias',
'lm_head.weight': 'lm_head.weight'
}
# Mapping (`hf: ours`) of decoder layers
for i in range(12):
mapping[f'transformer.h.{i}.ln_1.weight'] = f'blocks.{i}.attn_norm.weight'
mapping[f'transformer.h.{i}.ln_1.bias'] = f'blocks.{i}.attn_norm.bias'
mapping[f'transformer.h.{i}.attn.c_attn.weight'] = f'blocks.{i}.attn.qkv_projection.weight'
mapping[f'transformer.h.{i}.attn.c_attn.bias'] = f'blocks.{i}.attn.qkv_projection.bias'
mapping[f'transformer.h.{i}.attn.c_proj.weight'] = f'blocks.{i}.attn.output_projection.weight'
mapping[f'transformer.h.{i}.attn.c_proj.bias'] = f'blocks.{i}.attn.output_projection.bias'
mapping[f'transformer.h.{i}.ln_2.weight'] = f'blocks.{i}.ffn_norm.weight'
mapping[f'transformer.h.{i}.ln_2.bias'] = f'blocks.{i}.ffn_norm.bias'
mapping[f'transformer.h.{i}.mlp.c_fc.weight'] = f'blocks.{i}.ffn.linear_in.weight'
mapping[f'transformer.h.{i}.mlp.c_fc.bias'] = f'blocks.{i}.ffn.linear_in.bias'
mapping[f'transformer.h.{i}.mlp.c_proj.weight'] = f'blocks.{i}.ffn.linear_out.weight'
mapping[f'transformer.h.{i}.mlp.c_proj.bias'] = f'blocks.{i}.ffn.linear_out.bias'
# Move the parameters based on mapping
new_state_dict = {}
for old_key, new_key in mapping.items():
if old_key in state_dict:
new_state_dict[new_key] = state_dict[old_key]
# GPT-2 hugging face uses 1D Convolution layers. We need to transpose those weights since we use linear layers
convo_layers = ([f'blocks.{i}.ffn.linear_in.weight' for i in range(12)] +
[f'blocks.{i}.ffn.linear_out.weight' for i in range(12)] +
[f'blocks.{i}.attn.qkv_projection.weight' for i in range(12)] +
[f'blocks.{i}.attn.output_projection.weight' for i in range(12)])
for layer in convo_layers:
new_state_dict[layer] = torch.transpose(new_state_dict[layer], 0, 1)
# Load out model. We use `strict = False` because the state does not have LoRA weights
missing_keys, unexpected_keys = self.model.load_state_dict(new_state_dict, strict=False)
# make sure that only lora weights are not loaded
assert all('lora' in key for key in missing_keys)
assert not unexpected_keys
def initialize(self):
"""
### Initialize the model, optimizer and dataloader
"""
# Initialize the [GPT2 model](gpt2.html)
self.model = GPTModel(
layer_norm_epsilon=self.layer_norm_epsilon,
d_model=self.d_model,
n_layers=self.n_layers,
n_heads=self.n_heads,
n_positions=self.n_positions,
vocab_size=self.vocab_size,
r=self.lora_r,
)
self.model.to(self.device)
# Load pre-trained model weights
self._load_pretrained_weights()
# Initialize the optimizer
self.optimizer = Adam(self.model.parameters(), lr=self.learning_rate)
# Initialize the data loader
self.data_loader = DataLoader(self.text, batch_size=self.batch_size, shuffle=True)
def run(self):
"""
### Training loop
"""
for _ in monit.loop(self.epochs):
# `inputs` has shape `[batch_size, seq_len]`
for (inputs,) in monit.iterate('Train', self.data_loader):
# Move `inputs` to device
inputs = inputs.to(self.device)
# Call the model, with the all but the last token
logits = self.model(inputs[:, :-1])
# Get cross entropy loss
loss = self.loss_func(logits.reshape(-1, logits.shape[-1]), inputs[:, 1:].reshape(-1))
# Make gradients 0
self.optimizer.zero_grad()
# Compute gradients
loss.backward()
# Optimize
self.optimizer.step()
# Log the loss
tracker.save({'loss': loss})
tracker.add_global_step()
#
tracker.new_line()
@option(Trainer.text)
def tiny_shakespeare(c: Trainer):
"""
### Tiny Shakespeare dataset
It will download from the url if not present
"""
path = lab.get_data_path() / 'tiny_shakespeare.txt'
if not path.exists():
download_file("https://raw.githubusercontent.com/karpathy/char-rnn/master/data/tinyshakespeare/input.txt", path)
with open(path, 'r', encoding='utf-8') as f:
text = f.read()
tokens = c.tokenizer.encode(text)
num_batches = len(tokens) // (c.batch_size * c.context_len)
tokens = tokens[:num_batches * c.batch_size * c.context_len]
input_ids = torch.tensor(tokens).view(-1, c.context_len)
return TensorDataset(input_ids)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/lora/gpt2.py | labml_nn/lora/gpt2.py | """
---
title: GPT-2 with LoRA
summary: GPT-2 implementation with LoRA modules
---
# GPT-2 with [LoRA modules](index.html)
Here's [the training code](experiment.html) for training a GPT2 model with LoRA
on Tiny Shakespeare dataset.
"""
import torch
import torch.nn as nn
from labml_nn.lora import Linear, Embedding
class FFN(nn.Module):
"""
### Feedforward Network
"""
def __init__(self, d_model: int, d_ff: int, r: int):
"""
:param d_model: is the number of dimensions
:param d_ff: is the size of the hidden dimension
:param r: is the lora rank
"""
super().__init__()
# The linear layers and the activation
self.linear_in = Linear(d_model, d_ff, r=r, bias=True)
self.linear_out = Linear(d_ff, d_model, r=r, bias=True)
self.act = nn.GELU()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
:param x: is the embeddings tensor with shape `[batch_size, seq_len, d_model]`
"""
x = self.linear_in(x)
x = self.act(x)
x = self.linear_out(x)
return x
class MultiHeadAttention(nn.Module):
"""
### Multi-Head Attention
"""
def __init__(self, d_model: int, n_heads: int, r: int):
"""
:param d_model: is the number of dimensions in the embeddings
:param n_heads: is the number of heads
:param r: is the lora rank
"""
super().__init__()
self.d_model = d_model
self.n_heads = n_heads
self.d_head = d_model // n_heads
# Linear transformation for QKV
self.qkv_projection = Linear(d_model, d_model * 3, r=r, bias=True)
# Output projection
self.output_projection = Linear(d_model, d_model, r=r, bias=True)
def _split_heads(self, x: torch.Tensor):
"""
:param x: is the tensor with shape `[batch_size, seq_len, d_model]`
"""
# Split last dimension to `[n_heads, d_head]`
x = x.view(x.shape[:-1] + (self.n_heads, self.d_head))
# Reorder to `[batch_size, head, seq_length, d_head]`
return x.permute(0, 2, 1, 3)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
:param x: is the embeddings tensor with shape `[batch_size, seq_len, d_model]`
"""
batch_size, seq_length, _ = x.shape
# Get query, key and value
q, k, v = self.qkv_projection(x).split(self.d_model, dim=-1)
# Transform them from shape `[batch_size, seq_len, d_model]` to `[batch_size, head, seq_length, d_head]`
q = self._split_heads(q)
k = self._split_heads(k)
v = self._split_heads(v)
# Apply causal attention
attn_output = torch.nn.functional.scaled_dot_product_attention(q, k, v, is_causal=True)
# Transform them from shape `[batch_size, head, seq_length, d_head]` to `[batch_size, seq_len, d_model]`
attn_output = attn_output.permute(0, 2, 1, 3).reshape(batch_size, seq_length, self.d_model)
# Final project
return self.output_projection(attn_output)
class Block(nn.Module):
"""
### Decoder block
"""
def __init__(self, d_model: int, n_heads: int, layer_norm_epsilon: float, r: int):
"""
:param d_model: is the number of dimensions in the embeddings
:param n_heads: is the number of heads
:param layer_norm_epsilon: is the layer norm epsilon
:param r: is the lora rank
"""
super().__init__()
# Attention pre-normalization layer
self.attn_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
# Attention layer
self.attn = MultiHeadAttention(d_model, n_heads, r)
# FFN pre-normalization layer
self.ffn_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
# Feed-forward network
self.ffn = FFN(d_model, d_model * 4, r)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
:param x: is the embeddings tensor with shape `[batch_size, seq_len, d_model]`
"""
# Attention
x = x + self.attn(self.attn_norm(x))
# FFN
x = x + self.ffn(self.ffn_norm(x))
return x
class GPTModel(nn.Module):
"""
## GPT2 Model
"""
def __init__(self, *, d_model: int,
n_heads: int, n_layers: int,
n_positions: int,
layer_norm_epsilon: float,
vocab_size: int, r: int):
"""
:param d_model: is the number of dimensions in the embeddings
:param n_heads: is the number of attention heads
:param n_layers: is the number of decoder layers
:param n_positions: is the number of positional embeddings
:param layer_norm_epsilon: is the layer norm epsilon
:param vocab_size: is the vocabulary size
:param r: is the lora rank
"""
super().__init__()
# Token and absolute positional embeddings
self.token_embedding = Embedding(vocab_size, d_model, r=r)
self.position_embedding = Embedding(n_positions, d_model, r=r)
# Decoder blocks
self.blocks = nn.ModuleList([Block(d_model, n_heads, layer_norm_epsilon, r=r)
for _ in range(n_layers)])
# Final layer norm
self.final_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
# Projection layer to logit space
self.lm_head = Linear(d_model, vocab_size, r=r, bias=False)
def forward(self, input_ids: torch.Tensor):
"""
:param input_ids: has shape `[batch_size, seq_len]`
"""
batch_size, seq_len = input_ids.shape
# Get token embeddings
token_embeddings = self.token_embedding(input_ids)
# Get position ids
position_ids = torch.arange(seq_len, device=input_ids.device)[None, :]
# Get position embeddings
position_embeddings = self.position_embedding(position_ids)
# Add position embeddings
x = token_embeddings + position_embeddings
# Run through transformer blocks
for block in self.blocks:
x = block(x)
# Final normalization
x = self.final_norm(x)
# Get logits from projection layer
return self.lm_head(x)
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
labmlai/annotated_deep_learning_paper_implementations | https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/25e169843e93980faa1d0468ea4df42ca7463382/labml_nn/lora/__init__.py | labml_nn/lora/__init__.py | """
---
title: Low-Rank Adaptation (LoRA)
summary: >
Annotated implementation of RoRA from paper
LoRA: Low-Rank Adaptation of Large Language Models
---
# Low-Rank Adaptation (LoRA)
This is an implementation of
[Low-Rank Adaptation (LoRA)](https://arxiv.org/abs/2106.09685)
in [PyTorch](https://pytorch.org).
Low-Rank Adaptation (LoRA) freezes pre-trained model weights and injects
trainable rank decomposition matrices into each layer of the transformer.
This makes it possible to efficiently fine-tune large language models by
reducing trainable parameters by a large factor.
Here's [the training code](experiment.html) for training a GPT2 model with LoRA
on Tiny Shakespeare dataset.
"""
import torch
import torch.nn as nn
class Linear(nn.Module):
"""
## LoRA Linear Layer
LoRA linear layer adds a low-rank decomposition to the pre-trained
weight matrix ($W_0 \in \mathbb{R}^{d \times k}$)
of the linear layer.
$$W_0 + \Delta W = W_0 + BA$$
, where $B \in \mathbb{R}^{d \times r}$, $A \in \mathbb{R}^{r \times k}$,
and the rank $r \ll min(d, k)$.
All parameters are frozen except $A$ and $B$.
$\Delta W$ is initialized to be zero at the beginning of the training.
They multiple $x \Delta W^T$ by $\frac{\alpha}{r}$ where $\alpha$ is a hyper-parameter.
Once $\alpha$ is tuned it can be kept the same when varying $r$.
"""
def __init__(self, in_features: int, out_features: int, bias: bool,
r: int, alpha: int = None):
"""
:param in_features: is the number of input features of the linear layer
:param out_features: is the number of output features of the linear layer
:param bias: is a flag indicating if there is a bias parameter
:param r: is the rank of the decomposition $r$
:param alpha: is the scaling factor $\alpha$
"""
super().__init__()
# Set $\alpha = r$ is not provided. i.e. make the scaling factor $\frac{\alpha}{r} = 1$.
if alpha is None:
alpha = r
# The pre-trained weight $W_0$
self.weight = nn.Parameter(torch.empty((out_features, in_features)))
# Freeze it
self.weight.requires_grad = False
if bias:
# Bias parameter $b_0$ (also frozen)
self.bias = nn.Parameter(torch.empty(out_features))
self.bias.requires_grad = False
else:
# No bias parameter
self.bias = None
# scaling factor $\frac{\alpha}{r}$
self.scaling = alpha / r
# Matrix $A \in \mathbb{R}^{r \times k}$
self.lora_a = nn.Parameter(torch.empty((r, in_features)))
# Matrix $B \in \mathbb{R}^{d \times r}$, we keep $A$ and $B$ transposed
self.lora_b = nn.Parameter(torch.empty((out_features, r)))
with torch.no_grad():
# Initialize $A$ similar to a weight matrix in a normal linear layer
nn.init.kaiming_uniform_(self.lora_a, a=5 ** 0.5)
# Initialize $B$ to $0$ so that $\Delta W = BA$ is $0$ at initialization
nn.init.zeros_(self.lora_b)
def forward(self, x: torch.Tensor):
# Compute $x W_0^T + b_0$
result = nn.functional.linear(x, self.weight, bias=self.bias)
# Add $\frac{\alpha}{r} x \Delta W^T = \frac{\alpha}{r} x {(BA)}^T = \frac{\alpha}{r} x A^T B^T$
result += (x @ self.lora_a.T @ self.lora_b.T) * self.scaling
#
return result
class Embedding(nn.Module):
"""
## LoRA Embedding Layer
Similar to LoRA linear layer this adds a low-rank decomposition to the pre-trained
embedding weights matrix ($W_0 \in \mathbb{R}^{d \times k}$).
$$W_0 + \Delta W = W_0 + BA$$
"""
def __init__(self, num_embeddings: int, embedding_dim: int,
r: int, alpha: int = None):
"""
:param num_embeddings: is the number of embeddings
:param embedding_dim: is the number embedding dimensions
:param r: is the rank of the decomposition $r$
:param alpha: is the scaling factor $\alpha$
"""
super().__init__()
# Set $\alpha = r$ is not provided. i.e. make the scaling factor $\frac{\alpha}{r} = 1$.
if alpha is None:
alpha = r
# The pre-trained embedding weights $W_0^T$ (frozen)
self.weight = nn.Parameter(torch.empty((num_embeddings, embedding_dim)))
self.weight.requires_grad = False
# scaling factor $\frac{\alpha}{r}$
self.scaling = alpha / r
# Matrix $A \in \mathbb{R}^{r \times k}$
self.lora_a = nn.Parameter(torch.empty((r, num_embeddings)))
# Matrix $B \in \mathbb{R}^{d \times r}$
self.lora_b = nn.Parameter(torch.empty((embedding_dim, r)))
with torch.no_grad():
# Initialize $A$ with a normal distribution
nn.init.normal_(self.lora_a)
# Initialize $B$ to $0$ so that $\Delta W = BA$ is $0$ at initialization
nn.init.zeros_(self.lora_b)
def forward(self, x: torch.Tensor):
# Compute the embeddings $\text{onehot}(x) W_0$
result = nn.functional.embedding(x, self.weight)
# Add $\frac{\alpha}{r} \text{onehot}(x) \Delta W^T = \frac{\alpha}{r} \text{onehot}(x) A^T B^T$
result += (nn.functional.embedding(x, self.lora_a.T) @ self.lora_b.T) * self.scaling
#
return result
| python | MIT | 25e169843e93980faa1d0468ea4df42ca7463382 | 2026-01-04T14:38:23.238891Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/support_ticket_agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/support_ticket_agent.py | """
OpenAI Agents SDK Tutorial 2: Structured Output Agent - Support Tickets
This module demonstrates how to create an agent that returns structured data
using Pydantic models for support ticket creation.
"""
import os
from typing import List, Optional
from enum import Enum
from dotenv import load_dotenv
from pydantic import BaseModel, Field
from agents import Agent, Runner
# Load environment variables
load_dotenv()
class Priority(str, Enum):
"""Priority levels for support tickets"""
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
CRITICAL = "critical"
class Category(str, Enum):
"""Support ticket categories"""
TECHNICAL = "technical"
BILLING = "billing"
ACCOUNT = "account"
PRODUCT = "product"
GENERAL = "general"
class SupportTicket(BaseModel):
"""Structured support ticket model"""
title: str = Field(description="A concise summary of the issue")
description: str = Field(description="Detailed description of the problem")
priority: Priority = Field(description="The ticket priority level")
category: Category = Field(description="The department this ticket belongs to")
customer_name: Optional[str] = Field(
description="Customer name if mentioned",
default=None
)
steps_to_reproduce: Optional[List[str]] = Field(
description="Steps to reproduce the issue (for technical problems)",
default=None
)
estimated_resolution_time: str = Field(
description="Estimated time to resolve this issue"
)
urgency_keywords: List[str] = Field(
description="Keywords that indicate urgency or importance",
default=[]
)
# Create the support ticket agent
support_ticket_agent = Agent(
name="Support Ticket Creator",
instructions="""
You are a support ticket creation assistant that converts customer complaints
and issues into well-structured support tickets.
Based on customer descriptions, create structured support tickets with:
- Clear, concise titles
- Detailed problem descriptions
- Appropriate priority levels (low/medium/high/critical)
- Correct categories (technical/billing/account/product/general)
- Customer names if mentioned
- Steps to reproduce for technical issues
- Realistic resolution time estimates
- Keywords that indicate urgency
Priority Guidelines:
- CRITICAL: System down, security issues, data loss
- HIGH: Core features not working, urgent business impact
- MEDIUM: Important features affected, moderate business impact
- LOW: Minor issues, feature requests, general questions
Category Guidelines:
- TECHNICAL: App crashes, login issues, performance problems
- BILLING: Payment issues, subscription problems, invoice questions
- ACCOUNT: Profile issues, access problems, account settings
- PRODUCT: Feature requests, product feedback, functionality questions
- GENERAL: General inquiries, documentation, training
Resolution Time Guidelines:
- Critical: "1-4 hours"
- High: "4-24 hours"
- Medium: "1-3 business days"
- Low: "3-7 business days"
Always return a valid JSON object matching the SupportTicket schema.
""",
output_type=SupportTicket
)
def demonstrate_support_tickets():
"""Demonstrate the support ticket agent with various examples"""
print("π― OpenAI Agents SDK - Tutorial 2: Support Ticket Agent")
print("=" * 60)
print()
# Test cases with different types of issues
test_cases = [
{
"description": "Billing Issue",
"complaint": "Hi, I'm John Smith and I noticed my credit card was charged twice for last month's premium subscription. I only signed up once but see two $29.99 charges on my statement from January 15th. This needs to be resolved quickly as it's affecting my budget."
},
{
"description": "Technical Issue",
"complaint": "The mobile app keeps crashing whenever I try to upload photos. I'm using an iPhone 14 with iOS 17. Steps: 1) Open app 2) Go to gallery 3) Select photo 4) Tap upload 5) App crashes immediately. This is blocking my work completely!"
},
{
"description": "Account Issue",
"complaint": "I can't log into my account. My username is mary.johnson@email.com and I keep getting 'invalid credentials' even though I'm sure my password is correct. I've tried resetting it but never received the email. I need access urgently for a client meeting tomorrow."
},
{
"description": "Low Priority Request",
"complaint": "Hey there! I was wondering if you could add a dark mode feature to the app? It would be really nice to have, especially for us night owls. Not urgent at all, just a suggestion. Thanks!"
}
]
for i, test_case in enumerate(test_cases, 1):
print(f"=== Test Case {i}: {test_case['description']} ===")
print(f"Customer Complaint:")
print(f'"{test_case["complaint"]}"')
print()
try:
# Generate structured support ticket
result = Runner.run_sync(support_ticket_agent, test_case["complaint"])
ticket = result.final_output
print("Generated Support Ticket:")
print(f"π Title: {ticket.title}")
print(f"π·οΈ Category: {ticket.category.value.title()}")
print(f"β‘ Priority: {ticket.priority.value.title()}")
if ticket.customer_name:
print(f"π€ Customer: {ticket.customer_name}")
print(f"π Description: {ticket.description}")
if ticket.steps_to_reproduce:
print(f"π Steps to Reproduce:")
for step in ticket.steps_to_reproduce:
print(f" β’ {step}")
print(f"β±οΈ Estimated Resolution: {ticket.estimated_resolution_time}")
if ticket.urgency_keywords:
print(f"π¨ Urgency Keywords: {', '.join(ticket.urgency_keywords)}")
except Exception as e:
print(f"β Error: {e}")
print()
print("-" * 60)
print()
def interactive_mode():
"""Interactive mode for creating support tickets"""
print("=== Interactive Support Ticket Creation ===")
print("Describe a customer issue and I'll create a structured support ticket.")
print("Type 'quit' to exit.")
print()
while True:
complaint = input("Customer Complaint: ").strip()
if complaint.lower() in ['quit', 'exit', 'bye']:
print("Goodbye!")
break
if not complaint:
continue
try:
print("\nGenerating support ticket...")
result = Runner.run_sync(support_ticket_agent, complaint)
ticket = result.final_output
print("\n" + "="*50)
print("π SUPPORT TICKET CREATED")
print("="*50)
print(f"Title: {ticket.title}")
print(f"Category: {ticket.category.value.title()}")
print(f"Priority: {ticket.priority.value.title()}")
if ticket.customer_name:
print(f"Customer: {ticket.customer_name}")
print(f"Description: {ticket.description}")
if ticket.steps_to_reproduce:
print("Steps to Reproduce:")
for i, step in enumerate(ticket.steps_to_reproduce, 1):
print(f" {i}. {step}")
print(f"Estimated Resolution: {ticket.estimated_resolution_time}")
if ticket.urgency_keywords:
print(f"Urgency Keywords: {', '.join(ticket.urgency_keywords)}")
print("="*50)
print()
except Exception as e:
print(f"β Error: {e}")
print()
def main():
"""Main function"""
# Check API key
if not os.getenv("OPENAI_API_KEY"):
print("β Error: OPENAI_API_KEY not found in environment variables")
print("Please create a .env file with your OpenAI API key")
return
try:
# Run demonstrations
demonstrate_support_tickets()
# Interactive mode
interactive_mode()
except Exception as e:
print(f"β Error: {e}")
if __name__ == "__main__":
main()
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/product_review_agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/product_review_agent.py | """
OpenAI Agents SDK Tutorial 2: Structured Output Agent - Product Reviews
This module demonstrates extracting structured data from product reviews
using complex nested Pydantic models.
"""
import os
from typing import List, Optional
from enum import Enum
from dotenv import load_dotenv
from pydantic import BaseModel, Field, validator
from agents import Agent, Runner
# Load environment variables
load_dotenv()
class Sentiment(str, Enum):
"""Review sentiment classification"""
VERY_POSITIVE = "very_positive"
POSITIVE = "positive"
NEUTRAL = "neutral"
NEGATIVE = "negative"
VERY_NEGATIVE = "very_negative"
class ProductCategory(str, Enum):
"""Product category classification"""
ELECTRONICS = "electronics"
CLOTHING = "clothing"
HOME = "home"
BOOKS = "books"
FOOD = "food"
BEAUTY = "beauty"
SPORTS = "sports"
AUTOMOTIVE = "automotive"
OTHER = "other"
class ProductInfo(BaseModel):
"""Product information extracted from review"""
name: Optional[str] = Field(description="Product name if mentioned", default=None)
category: ProductCategory = Field(description="Inferred product category")
brand: Optional[str] = Field(description="Brand name if mentioned", default=None)
price_mentioned: Optional[str] = Field(description="Price if mentioned in review", default=None)
class ReviewMetrics(BaseModel):
"""Quantitative review metrics"""
rating: int = Field(description="Star rating (1-5)", ge=1, le=5)
sentiment: Sentiment = Field(description="Overall sentiment of the review")
confidence_score: float = Field(description="Confidence in sentiment analysis (0-1)", ge=0, le=1)
word_count: int = Field(description="Approximate word count of review", ge=0)
class ReviewAspects(BaseModel):
"""Specific aspects mentioned in the review"""
quality: Optional[str] = Field(description="Quality assessment if mentioned", default=None)
value_for_money: Optional[str] = Field(description="Value assessment if mentioned", default=None)
shipping: Optional[str] = Field(description="Shipping experience if mentioned", default=None)
customer_service: Optional[str] = Field(description="Customer service experience if mentioned", default=None)
ease_of_use: Optional[str] = Field(description="Usability assessment if mentioned", default=None)
class ProductReview(BaseModel):
"""Complete structured product review analysis"""
product_info: ProductInfo
metrics: ReviewMetrics
aspects: ReviewAspects
# Key insights
main_positives: List[str] = Field(description="Main positive points mentioned", default=[])
main_negatives: List[str] = Field(description="Main negative points mentioned", default=[])
would_recommend: Optional[bool] = Field(description="Whether reviewer would recommend", default=None)
# Summary
summary: str = Field(description="Brief summary of the review")
key_phrases: List[str] = Field(description="Important phrases from the review", default=[])
@validator('key_phrases')
def limit_key_phrases(cls, v):
"""Limit key phrases to maximum of 5"""
return v[:5] if len(v) > 5 else v
# Create the product review agent
product_review_agent = Agent(
name="Product Review Analyzer",
instructions="""
You are a product review analysis expert that extracts structured data
from customer product reviews.
Analyze the review text and extract:
PRODUCT INFO:
- Product name, brand, category, and price if mentioned
- Infer category from context if not explicitly stated
REVIEW METRICS:
- Star rating (1-5) based on review tone
- Sentiment classification (very_positive to very_negative)
- Confidence score for sentiment analysis
- Approximate word count
REVIEW ASPECTS:
- Quality, value for money, shipping, customer service, ease of use
- Only include aspects that are actually mentioned
KEY INSIGHTS:
- Main positive and negative points
- Whether they would recommend (if stated or implied)
- Brief summary and key phrases
RATING GUIDELINES:
- 5 stars: Excellent, highly satisfied, "amazing", "perfect"
- 4 stars: Good, satisfied, minor issues
- 3 stars: Okay, mixed feelings, "decent"
- 2 stars: Poor, unsatisfied, significant issues
- 1 star: Terrible, very unsatisfied, "worst"
SENTIMENT GUIDELINES:
- very_positive: Extremely enthusiastic, highly recommended
- positive: Generally satisfied, good experience
- neutral: Mixed or balanced opinion
- negative: Generally unsatisfied, disappointed
- very_negative: Extremely dissatisfied, angry
Always return a valid JSON object matching the ProductReview schema.
""",
output_type=ProductReview
)
def demonstrate_review_analysis():
"""Demonstrate the product review agent with various examples"""
print("π― OpenAI Agents SDK - Tutorial 2: Product Review Agent")
print("=" * 60)
print()
# Test cases with different types of reviews
test_reviews = [
{
"title": "Positive Electronics Review",
"review": "This MacBook Pro M2 is absolutely incredible! The battery life lasts all day, the screen is gorgeous, and it's lightning fast. Worth every penny of the $2,499 I paid. Apple really knocked it out of the park. The build quality is premium and it handles video editing like a dream. Highly recommend to any creative professional!"
},
{
"title": "Mixed Clothing Review",
"review": "The Nike running shoes are decent for the price ($120). Comfortable for short runs but the sizing runs a bit small. Quality seems okay but not amazing. Shipping was fast though, arrived in 2 days. Customer service was helpful when I had questions. Would maybe recommend if you size up."
},
{
"title": "Negative Food Review",
"review": "Terrible experience with this organic coffee subscription. The beans taste stale and bitter, nothing like the description. Customer service ignored my complaints for weeks. Way overpriced at $35/month for this quality. Save your money and buy local. Will not be ordering again."
},
{
"title": "Neutral Home Product Review",
"review": "The IKEA desk lamp does its job. Easy to assemble and decent lighting for work. Not the brightest but sufficient. Build quality is what you'd expect for $25. The cord could be longer. It's an okay purchase, nothing special but functional."
}
]
for i, test_case in enumerate(test_reviews, 1):
print(f"=== Review Analysis {i}: {test_case['title']} ===")
print("Original Review:")
print(f'"{test_case["review"]}"')
print()
try:
# Analyze the review
result = Runner.run_sync(product_review_agent, test_case["review"])
analysis = result.final_output
print("π STRUCTURED ANALYSIS:")
print(f"π·οΈ Product: {analysis.product_info.name or 'Not specified'}")
print(f"π’ Brand: {analysis.product_info.brand or 'Not specified'}")
print(f"π± Category: {analysis.product_info.category.value.title()}")
if analysis.product_info.price_mentioned:
print(f"π° Price: {analysis.product_info.price_mentioned}")
print(f"\nβ Rating: {analysis.metrics.rating}/5 stars")
print(f"π Sentiment: {analysis.metrics.sentiment.value.replace('_', ' ').title()}")
print(f"π― Confidence: {analysis.metrics.confidence_score:.1%}")
print(f"π Word Count: ~{analysis.metrics.word_count}")
if analysis.main_positives:
print(f"\nβ
Positives: {', '.join(analysis.main_positives)}")
if analysis.main_negatives:
print(f"β Negatives: {', '.join(analysis.main_negatives)}")
if analysis.would_recommend is not None:
recommend_text = "Yes" if analysis.would_recommend else "No"
print(f"π Would Recommend: {recommend_text}")
print(f"\nπ Summary: {analysis.summary}")
if analysis.key_phrases:
print(f"π Key Phrases: {', '.join(analysis.key_phrases)}")
# Show aspects that were mentioned
aspects_mentioned = []
if analysis.aspects.quality:
aspects_mentioned.append(f"Quality: {analysis.aspects.quality}")
if analysis.aspects.value_for_money:
aspects_mentioned.append(f"Value: {analysis.aspects.value_for_money}")
if analysis.aspects.shipping:
aspects_mentioned.append(f"Shipping: {analysis.aspects.shipping}")
if analysis.aspects.customer_service:
aspects_mentioned.append(f"Service: {analysis.aspects.customer_service}")
if analysis.aspects.ease_of_use:
aspects_mentioned.append(f"Usability: {analysis.aspects.ease_of_use}")
if aspects_mentioned:
print(f"\nπ Specific Aspects: {' | '.join(aspects_mentioned)}")
except Exception as e:
print(f"β Error: {e}")
print()
print("-" * 60)
print()
def interactive_mode():
"""Interactive mode for analyzing product reviews"""
print("=== Interactive Product Review Analysis ===")
print("Paste a product review and I'll extract structured data from it.")
print("Type 'quit' to exit.")
print()
while True:
review_text = input("Product Review: ").strip()
if review_text.lower() in ['quit', 'exit', 'bye']:
print("Goodbye!")
break
if not review_text:
continue
try:
print("\nAnalyzing review...")
result = Runner.run_sync(product_review_agent, review_text)
analysis = result.final_output
print("\n" + "="*50)
print("π REVIEW ANALYSIS COMPLETE")
print("="*50)
# Product Information
print("π·οΈ PRODUCT INFO:")
print(f" Name: {analysis.product_info.name or 'Not specified'}")
print(f" Brand: {analysis.product_info.brand or 'Not specified'}")
print(f" Category: {analysis.product_info.category.value.title()}")
if analysis.product_info.price_mentioned:
print(f" Price: {analysis.product_info.price_mentioned}")
# Metrics
print(f"\nπ METRICS:")
print(f" Rating: {analysis.metrics.rating}/5 β")
print(f" Sentiment: {analysis.metrics.sentiment.value.replace('_', ' ').title()}")
print(f" Confidence: {analysis.metrics.confidence_score:.1%}")
# Key Points
if analysis.main_positives:
print(f"\nβ
POSITIVES: {', '.join(analysis.main_positives)}")
if analysis.main_negatives:
print(f"\nβ NEGATIVES: {', '.join(analysis.main_negatives)}")
# Summary
print(f"\nπ SUMMARY: {analysis.summary}")
print("="*50)
print()
except Exception as e:
print(f"β Error: {e}")
print()
def main():
"""Main function"""
# Check API key
if not os.getenv("OPENAI_API_KEY"):
print("β Error: OPENAI_API_KEY not found in environment variables")
print("Please create a .env file with your OpenAI API key")
return
try:
# Run demonstrations
demonstrate_review_analysis()
# Interactive mode
interactive_mode()
except Exception as e:
print(f"β Error: {e}")
if __name__ == "__main__":
main()
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/2_2_product_review_agent/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/2_2_product_review_agent/__init__.py | # Product Review Agent Package
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/2_2_product_review_agent/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/2_2_product_review_agent/agent.py | from typing import List, Optional
from enum import Enum
from agents import Agent
from pydantic import BaseModel, Field
class Sentiment(str, Enum):
VERY_POSITIVE = "very_positive"
POSITIVE = "positive"
NEUTRAL = "neutral"
NEGATIVE = "negative"
VERY_NEGATIVE = "very_negative"
class ProductReview(BaseModel):
product_name: Optional[str] = Field(description="Product name if mentioned", default=None)
rating: int = Field(description="Star rating (1-5)", ge=1, le=5)
sentiment: Sentiment = Field(description="Overall sentiment of the review")
main_positives: List[str] = Field(description="Main positive points mentioned", default=[])
main_negatives: List[str] = Field(description="Main negative points mentioned", default=[])
would_recommend: Optional[bool] = Field(description="Whether reviewer would recommend", default=None)
summary: str = Field(description="Brief summary of the review")
root_agent = Agent(
name="Product Review Analyzer",
instructions="""
You are a product review analysis expert that extracts structured data
from customer product reviews.
Analyze the review text and extract:
- Product name if mentioned
- Star rating (1-5) based on review tone
- Sentiment classification (very_positive to very_negative)
- Main positive and negative points
- Whether they would recommend (if stated or implied)
- Brief summary
RATING GUIDELINES:
- 5 stars: Excellent, highly satisfied, "amazing", "perfect"
- 4 stars: Good, satisfied, minor issues
- 3 stars: Okay, mixed feelings, "decent"
- 2 stars: Poor, unsatisfied, significant issues
- 1 star: Terrible, very unsatisfied, "worst"
IMPORTANT: Response must be valid JSON matching the ProductReview schema.
""",
output_type=ProductReview
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/2_1_support_ticket_agent/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/2_1_support_ticket_agent/__init__.py | # Support Ticket Agent Package
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/2_1_support_ticket_agent/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/2_structured_output_agent/2_1_support_ticket_agent/agent.py | from typing import List, Optional
from enum import Enum
from agents import Agent
from pydantic import BaseModel, Field
class Priority(str, Enum):
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
CRITICAL = "critical"
class SupportTicket(BaseModel):
title: str = Field(description="A concise summary of the issue")
description: str = Field(description="Detailed description of the problem")
priority: Priority = Field(description="The ticket priority level")
category: str = Field(description="The department this ticket belongs to")
steps_to_reproduce: Optional[List[str]] = Field(
description="Steps to reproduce the issue (for technical problems)",
default=None
)
estimated_resolution_time: str = Field(
description="Estimated time to resolve this issue"
)
root_agent = Agent(
name="Support Ticket Creator",
instructions="""
You are a support ticket creation assistant that converts customer complaints
into well-structured support tickets.
Based on customer descriptions, create structured support tickets with:
- Clear, concise titles
- Detailed problem descriptions
- Appropriate priority levels (low/medium/high/critical)
- Correct categories (technical/billing/account/product/general)
- Steps to reproduce for technical issues
- Realistic resolution time estimates
IMPORTANT: Response must be valid JSON matching the SupportTicket schema.
""",
output_type=SupportTicket
)
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/5_context_management/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/5_context_management/agent.py | from dataclasses import dataclass
from agents import Agent, RunContextWrapper, Runner, function_tool
@dataclass
class UserInfo:
"""Context object containing user information and session data"""
name: str
uid: int
preferences: dict = None
def __post_init__(self):
if self.preferences is None:
self.preferences = {}
@function_tool
async def fetch_user_profile(wrapper: RunContextWrapper[UserInfo]) -> str:
"""Fetch detailed user profile information from the context"""
user = wrapper.context
return f"User Profile: {user.name} (ID: {user.uid}), Preferences: {user.preferences}"
@function_tool
async def update_user_preference(wrapper: RunContextWrapper[UserInfo], key: str, value: str) -> str:
"""Update a user preference in the context"""
user = wrapper.context
user.preferences[key] = value
return f"Updated {user.name}'s preference: {key} = {value}"
@function_tool
async def get_personalized_greeting(wrapper: RunContextWrapper[UserInfo]) -> str:
"""Generate a personalized greeting based on user context"""
user = wrapper.context
preferred_style = user.preferences.get('greeting_style', 'formal')
if preferred_style == 'casual':
return f"Hey {user.name}! What's up?"
elif preferred_style == 'friendly':
return f"Hi there, {user.name}! How can I help you today?"
else:
return f"Good day, {user.name}. How may I assist you?"
# Create agent with context-aware tools
root_agent = Agent[UserInfo](
name="Context-Aware Assistant",
instructions="""
You are a personalized assistant that uses user context to provide tailored responses.
You have access to:
- User profile information (name, ID, preferences)
- Ability to update user preferences
- Personalized greeting generation
Use the context tools to:
1. Fetch user information when needed
2. Update preferences when users express them
3. Provide personalized greetings and responses
Always consider the user's context when responding.
""",
tools=[fetch_user_profile, update_user_preference, get_personalized_greeting]
)
# Example usage with context
async def context_example():
"""Demonstrates context management with user information"""
# Create user context
user_context = UserInfo(
name="Alice Johnson",
uid=12345,
preferences={"greeting_style": "friendly", "topic_interest": "technology"}
)
# Run agent with context
result = await Runner.run(
root_agent,
"Hello! I'd like to know about my profile and prefer casual greetings.",
context=user_context
)
print(f"Response: {result.final_output}")
print(f"Updated context: {user_context}")
return result
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/advanced_handoffs.py | ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/advanced_handoffs.py | from agents import Agent, Runner, handoff, RunContextWrapper
from agents.extensions import handoff_filters
from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX
from pydantic import BaseModel
import asyncio
# Define structured input for escalation handoff
class EscalationData(BaseModel):
reason: str
priority: str
customer_id: str
# Create specialized agents
escalation_agent = Agent(
name="Escalation Agent",
instructions=f"""{RECOMMENDED_PROMPT_PREFIX}
You handle escalated customer issues. You have access to additional tools and authority
to resolve complex problems that first-level support cannot handle.
"""
)
# Callback function for escalation tracking
async def on_escalation_handoff(ctx: RunContextWrapper[None], input_data: EscalationData):
"""Callback executed when escalation handoff is triggered"""
print(f"π¨ ESCALATION ALERT:")
print(f" Reason: {input_data.reason}")
print(f" Priority: {input_data.priority}")
print(f" Customer ID: {input_data.customer_id}")
# Create advanced handoff with custom configuration
escalation_handoff = handoff(
agent=escalation_agent,
tool_name_override="escalate_to_manager",
tool_description_override="Escalate complex issues that require manager intervention",
on_handoff=on_escalation_handoff,
input_type=EscalationData # Structured input required
)
# Advanced triage agent
root_agent = Agent(
name="Advanced Triage Agent",
instructions=f"""{RECOMMENDED_PROMPT_PREFIX}
You are an advanced customer service agent with escalation capabilities.
Handle most issues yourself, but use escalations for:
- Angry customers or complex complaints
- Issues requiring refunds > $100
- Technical problems you cannot resolve
When escalating, provide reason, priority (low/medium/high), and customer_id.
""",
handoffs=[escalation_handoff]
)
# Example usage
async def main():
print("β‘ OpenAI Agents SDK - Advanced Handoffs")
print("=" * 50)
# Test escalation with structured input
print("=== Escalation with Structured Input ===")
result = await Runner.run(
root_agent,
"""I am absolutely furious! Your service has been down for 3 days and I've lost thousands
of dollars in business. I want a full refund of my annual subscription ($299) and
compensation for my losses. My customer ID is CUST-789123."""
)
print(f"Response: {result.final_output}")
print("\nβ
Advanced handoffs tutorial complete!")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/basic_handoffs.py | ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/basic_handoffs.py | from agents import Agent, Runner, handoff
from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX
import asyncio
# Create specialized agents
billing_agent = Agent(
name="Billing Agent",
instructions=f"""{RECOMMENDED_PROMPT_PREFIX}
You are a billing specialist. Help customers with:
- Payment issues and billing questions
- Subscription management and upgrades
- Invoice and receipt requests
- Refund processing
Be helpful and provide specific billing assistance.
"""
)
technical_agent = Agent(
name="Technical Support Agent",
instructions=f"""{RECOMMENDED_PROMPT_PREFIX}
You are a technical support specialist. Help customers with:
- App crashes and technical issues
- Account access problems
- Feature usage and troubleshooting
- Bug reports and technical questions
Provide clear technical guidance and solutions.
"""
)
# Create triage agent with handoffs
root_agent = Agent(
name="Customer Service Triage Agent",
instructions=f"""{RECOMMENDED_PROMPT_PREFIX}
You are a customer service triage agent. Your job is to:
1. Understand the customer's issue
2. Determine which specialist can best help them
3. Transfer them to the appropriate agent using handoff tools
Available specialists:
- Billing Agent: For payment, subscription, billing, and refund issues
- Technical Support Agent: For app problems, technical issues, and troubleshooting
If the issue is clearly billing-related, transfer to Billing Agent.
If the issue is clearly technical, transfer to Technical Support Agent.
If you can handle it yourself (general questions), do so.
""",
handoffs=[billing_agent, technical_agent] # Creates handoff tools automatically
)
# Example usage
async def main():
print("π€ OpenAI Agents SDK - Basic Handoffs")
print("=" * 50)
# Test billing handoff
print("=== Billing Handoff Example ===")
result = await Runner.run(
root_agent,
"Hi, I was charged twice for my subscription this month. Can you help me get a refund?"
)
print(f"Response: {result.final_output}")
# Test technical handoff
print("\n=== Technical Support Handoff Example ===")
result = await Runner.run(
root_agent,
"My app keeps crashing when I try to upload photos. This has been happening for 3 days."
)
print(f"Response: {result.final_output}")
print("\nβ
Basic handoffs tutorial complete!")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/8_1_basic_handoffs/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/8_1_basic_handoffs/__init__.py | # Basic Handoffs module for OpenAI Agents SDK tutorial
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/8_1_basic_handoffs/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/8_1_basic_handoffs/agent.py | from agents import Agent, Runner, handoff
from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX
import asyncio
# Create specialized agents
billing_agent = Agent(
name="Billing Agent",
instructions=f"""{RECOMMENDED_PROMPT_PREFIX}
You are a billing specialist. Help customers with:
- Payment issues and billing questions
- Subscription management and upgrades
- Invoice and receipt requests
- Refund processing
Be helpful and provide specific billing assistance.
"""
)
technical_agent = Agent(
name="Technical Support Agent",
instructions=f"""{RECOMMENDED_PROMPT_PREFIX}
You are a technical support specialist. Help customers with:
- App crashes and technical issues
- Account access problems
- Feature usage and troubleshooting
- Bug reports and technical questions
Provide clear technical guidance and solutions.
"""
)
# Create triage agent with handoffs
root_agent = Agent(
name="Customer Service Triage Agent",
instructions=f"""{RECOMMENDED_PROMPT_PREFIX}
You are a customer service triage agent. Your job is to:
1. Understand the customer's issue
2. Determine which specialist can best help them
3. Transfer them to the appropriate agent using handoff tools
Available specialists:
- Billing Agent: For payment, subscription, billing, and refund issues
- Technical Support Agent: For app problems, technical issues, and troubleshooting
If the issue is clearly billing-related, transfer to Billing Agent.
If the issue is clearly technical, transfer to Technical Support Agent.
If you can handle it yourself (general questions), do so.
""",
handoffs=[billing_agent, technical_agent] # Creates handoff tools automatically
)
# Example usage
async def main():
print("π€ OpenAI Agents SDK - Basic Handoffs")
print("=" * 50)
# Test billing handoff
print("=== Billing Handoff Example ===")
result = await Runner.run(
root_agent,
"Hi, I was charged twice for my subscription this month. Can you help me get a refund?"
)
print(f"Response: {result.final_output}")
# Test technical handoff
print("\n=== Technical Support Handoff Example ===")
result = await Runner.run(
root_agent,
"My app keeps crashing when I try to upload photos. This has been happening for 3 days."
)
print(f"Response: {result.final_output}")
print("\nβ
Basic handoffs tutorial complete!")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/8_2_advanced_handoffs/__init__.py | ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/8_2_advanced_handoffs/__init__.py | # Advanced Handoffs module for OpenAI Agents SDK tutorial
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Shubhamsaboo/awesome-llm-apps | https://github.com/Shubhamsaboo/awesome-llm-apps/blob/44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335/ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/8_2_advanced_handoffs/agent.py | ai_agent_framework_crash_course/openai_sdk_crash_course/8_handoffs_delegation/8_2_advanced_handoffs/agent.py | from agents import Agent, Runner, handoff, RunContextWrapper
from agents.extensions import handoff_filters
from agents.extensions.handoff_prompt import RECOMMENDED_PROMPT_PREFIX
from pydantic import BaseModel
import asyncio
# Define structured input for escalation handoff
class EscalationData(BaseModel):
reason: str
priority: str
customer_id: str
# Create specialized agents
escalation_agent = Agent(
name="Escalation Agent",
instructions=f"""{RECOMMENDED_PROMPT_PREFIX}
You handle escalated customer issues. You have access to additional tools and authority
to resolve complex problems that first-level support cannot handle.
"""
)
# Callback function for escalation tracking
async def on_escalation_handoff(ctx: RunContextWrapper[None], input_data: EscalationData):
"""Callback executed when escalation handoff is triggered"""
print(f"π¨ ESCALATION ALERT:")
print(f" Reason: {input_data.reason}")
print(f" Priority: {input_data.priority}")
print(f" Customer ID: {input_data.customer_id}")
# Create advanced handoff with custom configuration
escalation_handoff = handoff(
agent=escalation_agent,
tool_name_override="escalate_to_manager",
tool_description_override="Escalate complex issues that require manager intervention",
on_handoff=on_escalation_handoff,
input_type=EscalationData # Structured input required
)
# Advanced triage agent
root_agent = Agent(
name="Advanced Triage Agent",
instructions=f"""{RECOMMENDED_PROMPT_PREFIX}
You are an advanced customer service agent with escalation capabilities.
Handle most issues yourself, but use escalations for:
- Angry customers or complex complaints
- Issues requiring refunds > $100
- Technical problems you cannot resolve
When escalating, provide reason, priority (low/medium/high), and customer_id.
""",
handoffs=[escalation_handoff]
)
# Example usage
async def main():
print("β‘ OpenAI Agents SDK - Advanced Handoffs")
print("=" * 50)
# Test escalation with structured input
print("=== Escalation with Structured Input ===")
result = await Runner.run(
root_agent,
"""I am absolutely furious! Your service has been down for 3 days and I've lost thousands
of dollars in business. I want a full refund of my annual subscription ($299) and
compensation for my losses. My customer ID is CUST-789123."""
)
print(f"Response: {result.final_output}")
print("\nβ
Advanced handoffs tutorial complete!")
if __name__ == "__main__":
asyncio.run(main())
| python | Apache-2.0 | 44efabeac69a8bd1f7cfbf8fddd8fde5ce32b335 | 2026-01-04T14:38:15.481187Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.