translation_service / absolute_model.py
ashwinradhe's picture
Update absolute_model.py
7282137 verified
import torch
from torch import nn, Tensor
import math
class PositionalEncoding(nn.Module):
def __init__(self, emb_size: int, dropout: float, maxlen: int = 5000):
super(PositionalEncoding, self).__init__()
den = torch.exp(-torch.arange(0, emb_size, 2) * math.log(10000) / emb_size)
pos = torch.arange(0, maxlen).reshape(maxlen, 1)
pos_embedding = torch.zeros((maxlen, emb_size))
pos_embedding[:, 0::2] = torch.sin(pos * den)
pos_embedding[:, 1::2] = torch.cos(pos * den)
pos_embedding = pos_embedding.unsqueeze(-2)
self.dropout = nn.Dropout(dropout)
self.register_buffer('pos_embedding', pos_embedding)
def forward(self, token_embedding: Tensor):
return self.dropout(token_embedding + self.pos_embedding[:token_embedding.size(0), :])
class TokenEmbedding(nn.Module):
def __init__(self, vocab_size: int, emb_size: int):
super(TokenEmbedding, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_size)
self.emb_size = emb_size
def forward(self, tokens: Tensor):
return self.embedding(tokens.long()) * math.sqrt(self.emb_size)
class TransformerModelAbsolute(nn.Module):
def __init__(self, num_tokens_en, num_tokens_fr, embed_size, nhead, dim_feedforward, max_seq_length):
super(TransformerModel, self).__init__()
self.embed_size = embed_size
self.src_tok_emb = TokenEmbedding(num_tokens_en, embed_size)
self.tgt_tok_emb = TokenEmbedding(num_tokens_fr, embed_size)
self.positional_encoding = PositionalEncoding(embed_size, dropout=0.1, maxlen=max_seq_length)
encoder_layer = nn.TransformerEncoderLayer(d_model=embed_size, nhead=nhead, dim_feedforward=dim_feedforward, dropout=0.1)
self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=3)
decoder_layer = nn.TransformerDecoderLayer(d_model=embed_size, nhead=nhead, dim_feedforward=dim_feedforward, dropout=0.1)
self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=3)
self.generator = nn.Linear(embed_size, num_tokens_fr)
def encode(self, src, src_mask):
src_emb = self.positional_encoding(self.src_tok_emb(src))
return self.transformer_encoder(src_emb, src_key_padding_mask=src_mask)
def decode(self, tgt, memory, tgt_mask, tgt_key_padding_mask):
tgt_emb = self.positional_encoding(self.tgt_tok_emb(tgt))
return self.transformer_decoder(tgt_emb, memory, tgt_mask, tgt_key_padding_mask=tgt_key_padding_mask)
def forward(self, src, tgt, src_mask=None, tgt_mask=None, src_padding_mask=None, tgt_padding_mask=None):
memory = self.encode(src, src_padding_mask)
output = self.decode(tgt, memory, tgt_mask, tgt_padding_mask)
return self.generator(output)
def generate_square_subsequent_mask(self, sz):
mask = torch.triu(torch.ones(sz, sz, device=device)).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask