Spaces:
Running
Running
| import numpy as np | |
| import torch | |
| from torch import Tensor | |
| import torch.nn as nn | |
| def positional_encoding( | |
| v: Tensor, | |
| sigma: float, | |
| m: int) -> Tensor: | |
| r"""Computes :math:`\gamma(\mathbf{v}) = (\dots, \cos{2 \pi \sigma^{(j/m)} \mathbf{v}} , \sin{2 \pi \sigma^{(j/m)} \mathbf{v}}, \dots)` | |
| where :math:`j \in \{0, \dots, m-1\}` | |
| Args: | |
| v (Tensor): input tensor of shape :math:`(N, *, \text{input_size})` | |
| sigma (float): constant chosen based upon the domain of :attr:`v` | |
| m (int): [description] | |
| Returns: | |
| Tensor: mapped tensor of shape :math:`(N, *, 2 \cdot m \cdot \text{input_size})` | |
| See :class:`~rff.layers.PositionalEncoding` for more details. | |
| """ | |
| j = torch.arange(m, device=v.device) | |
| coeffs = 2 * np.pi * sigma ** (j / m) | |
| vp = coeffs * torch.unsqueeze(v, -1) | |
| vp_cat = torch.cat((torch.cos(vp), torch.sin(vp)), dim=-1) | |
| return vp_cat.flatten(-2, -1) | |
| class PositionalEncoding(nn.Module): | |
| """Layer for mapping coordinates using the positional encoding""" | |
| def __init__(self, sigma: float, m: int): | |
| r""" | |
| Args: | |
| sigma (float): frequency constant | |
| m (int): number of frequencies to map to | |
| """ | |
| super().__init__() | |
| self.sigma = sigma | |
| self.m = m | |
| def forward(self, v: Tensor) -> Tensor: | |
| r"""Computes :math:`\gamma(\mathbf{v}) = (\dots, \cos{2 \pi \sigma^{(j/m)} \mathbf{v}} , \sin{2 \pi \sigma^{(j/m)} \mathbf{v}}, \dots)` | |
| Args: | |
| v (Tensor): input tensor of shape :math:`(N, *, \text{input_size})` | |
| Returns: | |
| Tensor: mapped tensor of shape :math:`(N, *, 2 \cdot m \cdot \text{input_size})` | |
| """ | |
| return positional_encoding(v, self.sigma, self.m) |