|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import math |
|
|
from itertools import permutations |
|
|
from typing import Any, Optional |
|
|
|
|
|
import numpy as np |
|
|
import torch |
|
|
import torch.nn as nn |
|
|
import torch.nn.functional as F |
|
|
from torch import Tensor |
|
|
from torch.nn.modules import transformer |
|
|
|
|
|
|
|
|
class DecoderLayer(nn.Module): |
|
|
"""A Transformer decoder layer supporting two-stream attention (XLNet) This |
|
|
implements a pre-LN decoder, as opposed to the post-LN default in |
|
|
PyTorch.""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
d_model, |
|
|
nhead, |
|
|
dim_feedforward=2048, |
|
|
dropout=0.1, |
|
|
activation='gelu', |
|
|
layer_norm_eps=1e-5, |
|
|
): |
|
|
super().__init__() |
|
|
self.self_attn = nn.MultiheadAttention(d_model, |
|
|
nhead, |
|
|
dropout=dropout, |
|
|
batch_first=True) |
|
|
self.cross_attn = nn.MultiheadAttention(d_model, |
|
|
nhead, |
|
|
dropout=dropout, |
|
|
batch_first=True) |
|
|
|
|
|
self.linear1 = nn.Linear(d_model, dim_feedforward) |
|
|
self.dropout = nn.Dropout(dropout) |
|
|
self.linear2 = nn.Linear(dim_feedforward, d_model) |
|
|
|
|
|
self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) |
|
|
self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) |
|
|
self.norm_q = nn.LayerNorm(d_model, eps=layer_norm_eps) |
|
|
self.norm_c = nn.LayerNorm(d_model, eps=layer_norm_eps) |
|
|
self.dropout1 = nn.Dropout(dropout) |
|
|
self.dropout2 = nn.Dropout(dropout) |
|
|
self.dropout3 = nn.Dropout(dropout) |
|
|
|
|
|
self.activation = transformer._get_activation_fn(activation) |
|
|
|
|
|
def __setstate__(self, state): |
|
|
if 'activation' not in state: |
|
|
state['activation'] = F.gelu |
|
|
super().__setstate__(state) |
|
|
|
|
|
def forward_stream( |
|
|
self, |
|
|
tgt: Tensor, |
|
|
tgt_norm: Tensor, |
|
|
tgt_kv: Tensor, |
|
|
memory: Tensor, |
|
|
tgt_mask: Optional[Tensor], |
|
|
tgt_key_padding_mask: Optional[Tensor], |
|
|
): |
|
|
"""Forward pass for a single stream (i.e. content or query) tgt_norm is |
|
|
just a LayerNorm'd tgt. |
|
|
|
|
|
Added as a separate parameter for efficiency. Both tgt_kv and memory |
|
|
are expected to be LayerNorm'd too. memory is LayerNorm'd by ViT. |
|
|
""" |
|
|
tgt2, sa_weights = self.self_attn( |
|
|
tgt_norm, |
|
|
tgt_kv, |
|
|
tgt_kv, |
|
|
attn_mask=tgt_mask, |
|
|
key_padding_mask=tgt_key_padding_mask) |
|
|
tgt = tgt + self.dropout1(tgt2) |
|
|
|
|
|
tgt2, ca_weights = self.cross_attn(self.norm1(tgt), memory, memory) |
|
|
self.attn_map = ca_weights |
|
|
tgt = tgt + self.dropout2(tgt2) |
|
|
|
|
|
tgt2 = self.linear2( |
|
|
self.dropout(self.activation(self.linear1(self.norm2(tgt))))) |
|
|
tgt = tgt + self.dropout3(tgt2) |
|
|
return tgt, sa_weights, ca_weights |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
query, |
|
|
content, |
|
|
memory, |
|
|
query_mask: Optional[Tensor] = None, |
|
|
content_mask: Optional[Tensor] = None, |
|
|
content_key_padding_mask: Optional[Tensor] = None, |
|
|
update_content: bool = True, |
|
|
): |
|
|
query_norm = self.norm_q(query) |
|
|
content_norm = self.norm_c(content) |
|
|
query = self.forward_stream(query, query_norm, content_norm, memory, |
|
|
query_mask, content_key_padding_mask)[0] |
|
|
if update_content: |
|
|
content = self.forward_stream(content, content_norm, content_norm, |
|
|
memory, content_mask, |
|
|
content_key_padding_mask)[0] |
|
|
return query, content |
|
|
|
|
|
|
|
|
class Decoder(nn.Module): |
|
|
__constants__ = ['norm'] |
|
|
|
|
|
def __init__(self, decoder_layer, num_layers, norm): |
|
|
super().__init__() |
|
|
self.layers = transformer._get_clones(decoder_layer, num_layers) |
|
|
self.num_layers = num_layers |
|
|
self.norm = norm |
|
|
|
|
|
def forward( |
|
|
self, |
|
|
query, |
|
|
content, |
|
|
memory, |
|
|
query_mask: Optional[Tensor] = None, |
|
|
content_mask: Optional[Tensor] = None, |
|
|
content_key_padding_mask: Optional[Tensor] = None, |
|
|
): |
|
|
for i, mod in enumerate(self.layers): |
|
|
last = i == len(self.layers) - 1 |
|
|
query, content = mod( |
|
|
query, |
|
|
content, |
|
|
memory, |
|
|
query_mask, |
|
|
content_mask, |
|
|
content_key_padding_mask, |
|
|
update_content=not last, |
|
|
) |
|
|
query = self.norm(query) |
|
|
return query |
|
|
|
|
|
|
|
|
class TokenEmbedding(nn.Module): |
|
|
|
|
|
def __init__(self, charset_size: int, embed_dim: int): |
|
|
super().__init__() |
|
|
self.embedding = nn.Embedding(charset_size, embed_dim) |
|
|
self.embed_dim = embed_dim |
|
|
|
|
|
def forward(self, tokens: torch.Tensor): |
|
|
return math.sqrt(self.embed_dim) * self.embedding(tokens) |
|
|
|
|
|
|
|
|
class PARSeqDecoder(nn.Module): |
|
|
|
|
|
def __init__(self, |
|
|
in_channels, |
|
|
out_channels, |
|
|
max_label_length=25, |
|
|
embed_dim=384, |
|
|
dec_num_heads=12, |
|
|
dec_mlp_ratio=4, |
|
|
dec_depth=1, |
|
|
perm_num=6, |
|
|
perm_forward=True, |
|
|
perm_mirrored=True, |
|
|
decode_ar=True, |
|
|
refine_iters=1, |
|
|
dropout=0.1, |
|
|
**kwargs: Any) -> None: |
|
|
super().__init__() |
|
|
self.pad_id = out_channels - 1 |
|
|
self.eos_id = 0 |
|
|
self.bos_id = out_channels - 2 |
|
|
self.max_label_length = max_label_length |
|
|
self.decode_ar = decode_ar |
|
|
self.refine_iters = refine_iters |
|
|
|
|
|
decoder_layer = DecoderLayer(embed_dim, dec_num_heads, |
|
|
embed_dim * dec_mlp_ratio, dropout) |
|
|
self.decoder = Decoder(decoder_layer, |
|
|
num_layers=dec_depth, |
|
|
norm=nn.LayerNorm(embed_dim)) |
|
|
|
|
|
|
|
|
self.rng = np.random.default_rng() |
|
|
self.max_gen_perms = perm_num // 2 if perm_mirrored else perm_num |
|
|
self.perm_forward = perm_forward |
|
|
self.perm_mirrored = perm_mirrored |
|
|
|
|
|
|
|
|
self.head = nn.Linear(embed_dim, out_channels - 2) |
|
|
self.text_embed = TokenEmbedding(out_channels, embed_dim) |
|
|
|
|
|
|
|
|
self.pos_queries = nn.Parameter( |
|
|
torch.Tensor(1, max_label_length + 1, embed_dim)) |
|
|
self.dropout = nn.Dropout(p=dropout) |
|
|
|
|
|
self.apply(self._init_weights) |
|
|
nn.init.trunc_normal_(self.pos_queries, std=0.02) |
|
|
|
|
|
def _init_weights(self, module: nn.Module): |
|
|
"""Initialize the weights using the typical initialization schemes used |
|
|
in SOTA models.""" |
|
|
|
|
|
if isinstance(module, nn.Linear): |
|
|
nn.init.trunc_normal_(module.weight, std=0.02) |
|
|
if module.bias is not None: |
|
|
nn.init.zeros_(module.bias) |
|
|
elif isinstance(module, nn.Embedding): |
|
|
nn.init.trunc_normal_(module.weight, std=0.02) |
|
|
if module.padding_idx is not None: |
|
|
module.weight.data[module.padding_idx].zero_() |
|
|
elif isinstance(module, nn.Conv2d): |
|
|
nn.init.kaiming_normal_(module.weight, |
|
|
mode='fan_out', |
|
|
nonlinearity='relu') |
|
|
if module.bias is not None: |
|
|
nn.init.zeros_(module.bias) |
|
|
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)): |
|
|
nn.init.ones_(module.weight) |
|
|
nn.init.zeros_(module.bias) |
|
|
|
|
|
@torch.jit.ignore |
|
|
def no_weight_decay(self): |
|
|
param_names = {'text_embed.embedding.weight', 'pos_queries'} |
|
|
return param_names |
|
|
|
|
|
def decode( |
|
|
self, |
|
|
tgt: torch.Tensor, |
|
|
memory: torch.Tensor, |
|
|
tgt_mask: Optional[Tensor] = None, |
|
|
tgt_padding_mask: Optional[Tensor] = None, |
|
|
tgt_query: Optional[Tensor] = None, |
|
|
tgt_query_mask: Optional[Tensor] = None, |
|
|
pos_query: torch.Tensor = None, |
|
|
): |
|
|
N, L = tgt.shape |
|
|
|
|
|
null_ctx = self.text_embed(tgt[:, :1]) |
|
|
|
|
|
if tgt_query is None: |
|
|
tgt_query = pos_query[:, :L] |
|
|
tgt_emb = pos_query[:, :L - 1] + self.text_embed(tgt[:, 1:]) |
|
|
tgt_emb = self.dropout(torch.cat([null_ctx, tgt_emb], dim=1)) |
|
|
|
|
|
tgt_query = self.dropout(tgt_query) |
|
|
return self.decoder(tgt_query, tgt_emb, memory, tgt_query_mask, |
|
|
tgt_mask, tgt_padding_mask) |
|
|
|
|
|
def forward(self, x, data=None, pos_query=None): |
|
|
if self.training: |
|
|
return self.training_step([x, pos_query, data[0]]) |
|
|
else: |
|
|
return self.forward_test(x, pos_query) |
|
|
|
|
|
def forward_test(self, |
|
|
memory: Tensor, |
|
|
pos_query: Tensor = None, |
|
|
max_length: Optional[int] = None) -> Tensor: |
|
|
_device = memory.get_device() |
|
|
testing = max_length is None |
|
|
max_length = (self.max_label_length if max_length is None else min( |
|
|
max_length, self.max_label_length)) |
|
|
bs = memory.shape[0] |
|
|
|
|
|
num_steps = max_length + 1 |
|
|
|
|
|
|
|
|
|
|
|
if pos_query is None: |
|
|
pos_queries = self.pos_queries[:, :num_steps].expand(bs, -1, -1) |
|
|
else: |
|
|
pos_queries = pos_query |
|
|
|
|
|
|
|
|
tgt_mask = query_mask = torch.triu( |
|
|
torch.full((num_steps, num_steps), float('-inf'), device=_device), |
|
|
1) |
|
|
self.attn_maps = [] |
|
|
if self.decode_ar: |
|
|
tgt_in = torch.full((bs, num_steps), |
|
|
self.pad_id, |
|
|
dtype=torch.long, |
|
|
device=_device) |
|
|
tgt_in[:, 0] = self.bos_id |
|
|
|
|
|
logits = [] |
|
|
for i in range(num_steps): |
|
|
j = i + 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tgt_out = self.decode( |
|
|
tgt_in[:, :j], |
|
|
memory, |
|
|
tgt_mask[:j, :j], |
|
|
tgt_query=pos_queries[:, i:j], |
|
|
tgt_query_mask=query_mask[i:j, :j], |
|
|
pos_query=pos_queries, |
|
|
) |
|
|
self.attn_maps.append(self.decoder.layers[-1].attn_map) |
|
|
|
|
|
p_i = self.head(tgt_out) |
|
|
logits.append(p_i) |
|
|
if j < num_steps: |
|
|
|
|
|
tgt_in[:, j] = p_i.squeeze().argmax(-1) |
|
|
|
|
|
if testing and (tgt_in == self.eos_id).any(dim=-1).all(): |
|
|
break |
|
|
|
|
|
logits = torch.cat(logits, dim=1) |
|
|
else: |
|
|
|
|
|
tgt_in = torch.full((bs, 1), |
|
|
self.bos_id, |
|
|
dtype=torch.long, |
|
|
device=_device) |
|
|
tgt_out = self.decode(tgt_in, |
|
|
memory, |
|
|
tgt_query=pos_queries, |
|
|
pos_query=pos_queries) |
|
|
logits = self.head(tgt_out) |
|
|
|
|
|
if self.refine_iters: |
|
|
|
|
|
|
|
|
query_mask[torch.triu( |
|
|
torch.ones(num_steps, |
|
|
num_steps, |
|
|
dtype=torch.bool, |
|
|
device=_device), 2)] = 0 |
|
|
bos = torch.full((bs, 1), |
|
|
self.bos_id, |
|
|
dtype=torch.long, |
|
|
device=_device) |
|
|
for i in range(self.refine_iters): |
|
|
|
|
|
tgt_in = torch.cat([bos, logits[:, :-1].argmax(-1)], dim=1) |
|
|
tgt_padding_mask = (tgt_in == self.eos_id).int().cumsum( |
|
|
-1) > 0 |
|
|
tgt_out = self.decode( |
|
|
tgt_in, |
|
|
memory, |
|
|
tgt_mask, |
|
|
tgt_padding_mask, |
|
|
tgt_query=pos_queries, |
|
|
tgt_query_mask=query_mask[:, :tgt_in.shape[1]], |
|
|
pos_query=pos_queries, |
|
|
) |
|
|
logits = self.head(tgt_out) |
|
|
|
|
|
return F.softmax(logits, -1) |
|
|
|
|
|
def gen_tgt_perms(self, tgt, _device): |
|
|
"""Generate shared permutations for the whole batch. |
|
|
|
|
|
This works because the same attention mask can be used for the shorter |
|
|
sequences because of the padding mask. |
|
|
""" |
|
|
|
|
|
max_num_chars = tgt.shape[1] - 2 |
|
|
|
|
|
if max_num_chars == 1: |
|
|
return torch.arange(3, device=_device).unsqueeze(0) |
|
|
perms = [torch.arange(max_num_chars, device=_device) |
|
|
] if self.perm_forward else [] |
|
|
|
|
|
max_perms = math.factorial(max_num_chars) |
|
|
if self.perm_mirrored: |
|
|
max_perms //= 2 |
|
|
num_gen_perms = min(self.max_gen_perms, max_perms) |
|
|
|
|
|
|
|
|
if max_num_chars < 5: |
|
|
|
|
|
|
|
|
if max_num_chars == 4 and self.perm_mirrored: |
|
|
selector = [0, 3, 4, 6, 9, 10, 12, 16, 17, 18, 19, 21] |
|
|
else: |
|
|
selector = list(range(max_perms)) |
|
|
perm_pool = torch.as_tensor(list( |
|
|
permutations(range(max_num_chars), max_num_chars)), |
|
|
device=_device)[selector] |
|
|
|
|
|
if self.perm_forward: |
|
|
perm_pool = perm_pool[1:] |
|
|
perms = torch.stack(perms) |
|
|
if len(perm_pool): |
|
|
i = self.rng.choice(len(perm_pool), |
|
|
size=num_gen_perms - len(perms), |
|
|
replace=False) |
|
|
perms = torch.cat([perms, perm_pool[i]]) |
|
|
else: |
|
|
perms.extend([ |
|
|
torch.randperm(max_num_chars, device=_device) |
|
|
for _ in range(num_gen_perms - len(perms)) |
|
|
]) |
|
|
perms = torch.stack(perms) |
|
|
if self.perm_mirrored: |
|
|
|
|
|
comp = perms.flip(-1) |
|
|
|
|
|
perms = torch.stack([perms, comp |
|
|
]).transpose(0, 1).reshape(-1, max_num_chars) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bos_idx = perms.new_zeros((len(perms), 1)) |
|
|
eos_idx = perms.new_full((len(perms), 1), max_num_chars + 1) |
|
|
perms = torch.cat([bos_idx, perms + 1, eos_idx], dim=1) |
|
|
|
|
|
|
|
|
|
|
|
if len(perms) > 1: |
|
|
perms[1, 1:] = max_num_chars + 1 - torch.arange(max_num_chars + 1, |
|
|
device=_device) |
|
|
return perms |
|
|
|
|
|
def generate_attn_masks(self, perm, _device): |
|
|
"""Generate attention masks given a sequence permutation (includes pos. |
|
|
for bos and eos tokens) |
|
|
|
|
|
:param perm: the permutation sequence. i = 0 is always the BOS |
|
|
:return: lookahead attention masks |
|
|
""" |
|
|
sz = perm.shape[0] |
|
|
mask = torch.zeros((sz, sz), device=_device) |
|
|
for i in range(sz): |
|
|
query_idx = perm[i] |
|
|
masked_keys = perm[i + 1:] |
|
|
mask[query_idx, masked_keys] = float('-inf') |
|
|
content_mask = mask[:-1, :-1].clone() |
|
|
mask[torch.eye(sz, dtype=torch.bool, |
|
|
device=_device)] = float('-inf') |
|
|
query_mask = mask[1:, :-1] |
|
|
return content_mask, query_mask |
|
|
|
|
|
def training_step(self, batch): |
|
|
memory, pos_query, tgt = batch |
|
|
bs = memory.shape[0] |
|
|
if pos_query is None: |
|
|
pos_query = self.pos_queries.expand(bs, -1, -1) |
|
|
|
|
|
|
|
|
tgt_perms = self.gen_tgt_perms(tgt, memory.get_device()) |
|
|
tgt_in = tgt[:, :-1] |
|
|
tgt_out = tgt[:, 1:] |
|
|
|
|
|
tgt_padding_mask = (tgt_in == self.pad_id) | (tgt_in == self.eos_id) |
|
|
|
|
|
loss = 0 |
|
|
loss_numel = 0 |
|
|
n = (tgt_out != self.pad_id).sum().item() |
|
|
for i, perm in enumerate(tgt_perms): |
|
|
tgt_mask, query_mask = self.generate_attn_masks( |
|
|
perm, memory.get_device()) |
|
|
out = self.decode( |
|
|
tgt_in, |
|
|
memory, |
|
|
tgt_mask, |
|
|
tgt_padding_mask, |
|
|
tgt_query_mask=query_mask, |
|
|
pos_query=pos_query, |
|
|
) |
|
|
logits = self.head(out) |
|
|
if i == 0: |
|
|
final_out = logits |
|
|
loss += n * F.cross_entropy(logits.flatten(end_dim=1), |
|
|
tgt_out.flatten(), |
|
|
ignore_index=self.pad_id) |
|
|
loss_numel += n |
|
|
|
|
|
|
|
|
if i == 1: |
|
|
tgt_out = torch.where(tgt_out == self.eos_id, self.pad_id, |
|
|
tgt_out) |
|
|
n = (tgt_out != self.pad_id).sum().item() |
|
|
loss /= loss_numel |
|
|
|
|
|
|
|
|
return [loss, final_out] |
|
|
|