| import torch.nn as nn |
| import torch |
| import numpy as np |
| import torch.nn.functional as F |
| import copy |
|
|
|
|
| class PositionalEncoding(nn.Module): |
| def __init__(self, d_hid, n_position=200): |
| super(PositionalEncoding, self).__init__() |
|
|
| |
| self.register_buffer("pos_table", self._get_sinusoid_encoding_table(n_position, d_hid)) |
|
|
| def _get_sinusoid_encoding_table(self, n_position, d_hid): |
| """Sinusoid position encoding table""" |
| |
|
|
| def get_position_angle_vec(position): |
| return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] |
|
|
| sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) |
| sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) |
| sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) |
|
|
| return torch.FloatTensor(sinusoid_table).unsqueeze(0) |
|
|
| def forward(self, winsize): |
| return self.pos_table[:, :winsize].clone().detach() |
|
|
|
|
| def _get_activation_fn(activation): |
| """Return an activation function given a string""" |
| if activation == "relu": |
| return F.relu |
| if activation == "gelu": |
| return F.gelu |
| if activation == "glu": |
| return F.glu |
| raise RuntimeError(f"activation should be relu/gelu, not {activation}.") |
|
|
|
|
| def _get_clones(module, N): |
| return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) |
|
|
|
|
| class Transformer(nn.Module): |
| def __init__( |
| self, |
| d_model=512, |
| nhead=8, |
| num_encoder_layers=6, |
| num_decoder_layers=6, |
| dim_feedforward=2048, |
| dropout=0.1, |
| activation="relu", |
| normalize_before=False, |
| return_intermediate_dec=True, |
| ): |
| super().__init__() |
|
|
| encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before) |
| encoder_norm = nn.LayerNorm(d_model) if normalize_before else None |
| self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) |
|
|
| decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout, activation, normalize_before) |
| decoder_norm = nn.LayerNorm(d_model) |
| self.decoder = TransformerDecoder( |
| decoder_layer, num_decoder_layers, decoder_norm, return_intermediate=return_intermediate_dec |
| ) |
|
|
| self._reset_parameters() |
|
|
| self.d_model = d_model |
| self.nhead = nhead |
|
|
| def _reset_parameters(self): |
| for p in self.parameters(): |
| if p.dim() > 1: |
| nn.init.xavier_uniform_(p) |
|
|
| def forward(self, opt, src, query_embed, pos_embed): |
| |
|
|
| src = src.permute(1, 0, 2) |
| pos_embed = pos_embed.permute(1, 0, 2) |
| query_embed = query_embed.permute(1, 0, 2) |
|
|
| tgt = torch.zeros_like(query_embed) |
| memory = self.encoder(src, pos=pos_embed) |
|
|
| hs = self.decoder(tgt, memory, pos=pos_embed, query_pos=query_embed) |
| return hs |
|
|
|
|
| class TransformerEncoder(nn.Module): |
| def __init__(self, encoder_layer, num_layers, norm=None): |
| super().__init__() |
| self.layers = _get_clones(encoder_layer, num_layers) |
| self.num_layers = num_layers |
| self.norm = norm |
|
|
| def forward(self, src, mask=None, src_key_padding_mask=None, pos=None): |
| output = src + pos |
|
|
| for layer in self.layers: |
| output = layer(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos) |
|
|
| if self.norm is not None: |
| output = self.norm(output) |
|
|
| return output |
|
|
|
|
| class TransformerDecoder(nn.Module): |
| def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): |
| super().__init__() |
| self.layers = _get_clones(decoder_layer, num_layers) |
| self.num_layers = num_layers |
| self.norm = norm |
| self.return_intermediate = return_intermediate |
|
|
| def forward( |
| self, |
| tgt, |
| memory, |
| tgt_mask=None, |
| memory_mask=None, |
| tgt_key_padding_mask=None, |
| memory_key_padding_mask=None, |
| pos=None, |
| query_pos=None, |
| ): |
| output = tgt + pos + query_pos |
|
|
| intermediate = [] |
|
|
| for layer in self.layers: |
| output = layer( |
| output, |
| memory, |
| tgt_mask=tgt_mask, |
| memory_mask=memory_mask, |
| tgt_key_padding_mask=tgt_key_padding_mask, |
| memory_key_padding_mask=memory_key_padding_mask, |
| pos=pos, |
| query_pos=query_pos, |
| ) |
| if self.return_intermediate: |
| intermediate.append(self.norm(output)) |
|
|
| if self.norm is not None: |
| output = self.norm(output) |
| if self.return_intermediate: |
| intermediate.pop() |
| intermediate.append(output) |
|
|
| if self.return_intermediate: |
| return torch.stack(intermediate) |
|
|
| return output.unsqueeze(0) |
|
|
|
|
| class TransformerEncoderLayer(nn.Module): |
| def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False): |
| super().__init__() |
| self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) |
| |
| self.linear1 = nn.Linear(d_model, dim_feedforward) |
| self.dropout = nn.Dropout(dropout) |
| self.linear2 = nn.Linear(dim_feedforward, d_model) |
|
|
| self.norm1 = nn.LayerNorm(d_model) |
| self.norm2 = nn.LayerNorm(d_model) |
| self.dropout1 = nn.Dropout(dropout) |
| self.dropout2 = nn.Dropout(dropout) |
|
|
| self.activation = _get_activation_fn(activation) |
| self.normalize_before = normalize_before |
|
|
| def with_pos_embed(self, tensor, pos): |
| return tensor if pos is None else tensor + pos |
|
|
| def forward_post(self, src, src_mask=None, src_key_padding_mask=None, pos=None): |
| |
| src2 = self.self_attn(src, src, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] |
| src = src + self.dropout1(src2) |
| src = self.norm1(src) |
| src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) |
| src = src + self.dropout2(src2) |
| src = self.norm2(src) |
| return src |
|
|
| def forward_pre(self, src, src_mask=None, src_key_padding_mask=None, pos=None): |
| src2 = self.norm1(src) |
| |
| src2 = self.self_attn(src2, src2, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0] |
| src = src + self.dropout1(src2) |
| src2 = self.norm2(src) |
| src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) |
| src = src + self.dropout2(src2) |
| return src |
|
|
| def forward(self, src, src_mask=None, src_key_padding_mask=None, pos=None): |
| if self.normalize_before: |
| return self.forward_pre(src, src_mask, src_key_padding_mask, pos) |
| return self.forward_post(src, src_mask, src_key_padding_mask, pos) |
|
|
|
|
| class TransformerDecoderLayer(nn.Module): |
| def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False): |
| super().__init__() |
| self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) |
| self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) |
| |
| self.linear1 = nn.Linear(d_model, dim_feedforward) |
| self.dropout = nn.Dropout(dropout) |
| self.linear2 = nn.Linear(dim_feedforward, d_model) |
|
|
| self.norm1 = nn.LayerNorm(d_model) |
| self.norm2 = nn.LayerNorm(d_model) |
| self.norm3 = nn.LayerNorm(d_model) |
| self.dropout1 = nn.Dropout(dropout) |
| self.dropout2 = nn.Dropout(dropout) |
| self.dropout3 = nn.Dropout(dropout) |
|
|
| self.activation = _get_activation_fn(activation) |
| self.normalize_before = normalize_before |
|
|
| def with_pos_embed(self, tensor, pos): |
| return tensor if pos is None else tensor + pos |
|
|
| def forward_post( |
| self, |
| tgt, |
| memory, |
| tgt_mask=None, |
| memory_mask=None, |
| tgt_key_padding_mask=None, |
| memory_key_padding_mask=None, |
| pos=None, |
| query_pos=None, |
| ): |
| |
| tgt2 = self.self_attn(tgt, tgt, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] |
| tgt = tgt + self.dropout1(tgt2) |
| tgt = self.norm1(tgt) |
| tgt2 = self.multihead_attn( |
| query=tgt, key=memory, value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask |
| )[0] |
| tgt = tgt + self.dropout2(tgt2) |
| tgt = self.norm2(tgt) |
| tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) |
| tgt = tgt + self.dropout3(tgt2) |
| tgt = self.norm3(tgt) |
| return tgt |
|
|
| def forward_pre( |
| self, |
| tgt, |
| memory, |
| tgt_mask=None, |
| memory_mask=None, |
| tgt_key_padding_mask=None, |
| memory_key_padding_mask=None, |
| pos=None, |
| query_pos=None, |
| ): |
| tgt2 = self.norm1(tgt) |
| |
| tgt2 = self.self_attn(tgt2, tgt2, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask)[0] |
| tgt = tgt + self.dropout1(tgt2) |
| tgt2 = self.norm2(tgt) |
| tgt2 = self.multihead_attn( |
| query=tgt2, key=memory, value=memory, attn_mask=memory_mask, key_padding_mask=memory_key_padding_mask |
| )[0] |
| tgt = tgt + self.dropout2(tgt2) |
| tgt2 = self.norm3(tgt) |
| tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) |
| tgt = tgt + self.dropout3(tgt2) |
| return tgt |
|
|
| def forward( |
| self, |
| tgt, |
| memory, |
| tgt_mask=None, |
| memory_mask=None, |
| tgt_key_padding_mask=None, |
| memory_key_padding_mask=None, |
| pos=None, |
| query_pos=None, |
| ): |
| if self.normalize_before: |
| return self.forward_pre( |
| tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos |
| ) |
| return self.forward_post( |
| tgt, memory, tgt_mask, memory_mask, tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos |
| ) |
|
|