|
|
import math |
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
from torch import Tensor |
|
|
|
|
|
|
|
|
class PositionalEncoding2D(nn.Module): |
|
|
"""2-D positional encodings for the feature maps produced by the encoder. |
|
|
|
|
|
Following https://arxiv.org/abs/2103.06450 by Sumeet Singh. |
|
|
|
|
|
Reference: |
|
|
https://github.com/full-stack-deep-learning/fsdl-text-recognizer-2021-labs/blob/main/lab9/text_recognizer/models/transformer_util.py |
|
|
""" |
|
|
|
|
|
def __init__(self, d_model: int, max_h: int = 2000, max_w: int = 2000) -> None: |
|
|
super().__init__() |
|
|
self.d_model = d_model |
|
|
assert d_model % 2 == 0, f"Embedding depth {d_model} is not even" |
|
|
pe = self.make_pe(d_model, max_h, max_w) |
|
|
self.register_buffer("pe", pe) |
|
|
|
|
|
@staticmethod |
|
|
def make_pe(d_model: int, max_h: int, max_w: int) -> Tensor: |
|
|
"""Compute positional encoding.""" |
|
|
pe_h = PositionalEncoding1D.make_pe(d_model=d_model // 2, max_len=max_h) |
|
|
pe_h = pe_h.permute(2, 0, 1).expand(-1, -1, max_w) |
|
|
|
|
|
pe_w = PositionalEncoding1D.make_pe(d_model=d_model // 2, max_len=max_w) |
|
|
pe_w = pe_w.permute(2, 1, 0).expand(-1, max_h, -1) |
|
|
|
|
|
pe = torch.cat([pe_h, pe_w], dim=0) |
|
|
return pe |
|
|
|
|
|
def forward(self, x: Tensor) -> Tensor: |
|
|
"""Forward pass. |
|
|
|
|
|
Args: |
|
|
x: (B, d_model, H, W) |
|
|
|
|
|
Returns: |
|
|
(B, d_model, H, W) |
|
|
""" |
|
|
assert x.shape[1] == self.pe.shape[0] |
|
|
x = x + self.pe[:, : x.size(2), : x.size(3)] |
|
|
return x |
|
|
|
|
|
|
|
|
class PositionalEncoding1D(nn.Module): |
|
|
"""Classic Attention-is-all-you-need positional encoding.""" |
|
|
|
|
|
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000) -> None: |
|
|
super().__init__() |
|
|
self.dropout = nn.Dropout(p=dropout) |
|
|
pe = self.make_pe(d_model, max_len) |
|
|
self.register_buffer("pe", pe) |
|
|
|
|
|
@staticmethod |
|
|
def make_pe(d_model: int, max_len: int) -> Tensor: |
|
|
"""Compute positional encoding.""" |
|
|
pe = torch.zeros(max_len, d_model) |
|
|
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) |
|
|
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) |
|
|
pe[:, 0::2] = torch.sin(position * div_term) |
|
|
pe[:, 1::2] = torch.cos(position * div_term) |
|
|
pe = pe.unsqueeze(1) |
|
|
return pe |
|
|
|
|
|
def forward(self, x: Tensor) -> Tensor: |
|
|
"""Forward pass. |
|
|
|
|
|
Args: |
|
|
x: (S, B, d_model) |
|
|
|
|
|
Returns: |
|
|
(B, d_model, H, W) |
|
|
""" |
|
|
assert x.shape[2] == self.pe.shape[2] |
|
|
x = x + self.pe[: x.size(0)] |
|
|
return self.dropout(x) |
|
|
|