| |
| import torch |
| import torch.nn as nn |
| from math import log |
|
|
| class PositionalEncoding(nn.Module): |
| def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 500): |
| super().__init__() |
| self.dropout = nn.Dropout(p=dropout) |
|
|
| position = torch.arange(max_len, dtype=torch.float).unsqueeze(1) |
| div_term = torch.exp( |
| torch.arange(0, d_model, 2, dtype=torch.float) * (-log(10000.0) / d_model) |
| ) |
|
|
| pe = torch.zeros(max_len, d_model, dtype=torch.float) |
| pe[:, 0::2] = torch.sin(position * div_term) |
| pe[:, 1::2] = torch.cos(position * div_term) |
|
|
| pe = pe.unsqueeze(0) |
| self.register_buffer("pe", pe, persistent=False) |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| |
| s = x.size(1) |
| x = x + self.pe[:, :s, :] |
| return self.dropout(x) |