Spaces:
Running
Running
File size: 1,799 Bytes
99e2b6c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import numpy as np
import torch
from torch import Tensor
import torch.nn as nn
@torch.jit.script
def positional_encoding(
v: Tensor,
sigma: float,
m: int) -> Tensor:
r"""Computes :math:`\gamma(\mathbf{v}) = (\dots, \cos{2 \pi \sigma^{(j/m)} \mathbf{v}} , \sin{2 \pi \sigma^{(j/m)} \mathbf{v}}, \dots)`
where :math:`j \in \{0, \dots, m-1\}`
Args:
v (Tensor): input tensor of shape :math:`(N, *, \text{input_size})`
sigma (float): constant chosen based upon the domain of :attr:`v`
m (int): [description]
Returns:
Tensor: mapped tensor of shape :math:`(N, *, 2 \cdot m \cdot \text{input_size})`
See :class:`~rff.layers.PositionalEncoding` for more details.
"""
j = torch.arange(m, device=v.device)
coeffs = 2 * np.pi * sigma ** (j / m)
vp = coeffs * torch.unsqueeze(v, -1)
vp_cat = torch.cat((torch.cos(vp), torch.sin(vp)), dim=-1)
return vp_cat.flatten(-2, -1)
class PositionalEncoding(nn.Module):
"""Layer for mapping coordinates using the positional encoding"""
def __init__(self, sigma: float, m: int):
r"""
Args:
sigma (float): frequency constant
m (int): number of frequencies to map to
"""
super().__init__()
self.sigma = sigma
self.m = m
def forward(self, v: Tensor) -> Tensor:
r"""Computes :math:`\gamma(\mathbf{v}) = (\dots, \cos{2 \pi \sigma^{(j/m)} \mathbf{v}} , \sin{2 \pi \sigma^{(j/m)} \mathbf{v}}, \dots)`
Args:
v (Tensor): input tensor of shape :math:`(N, *, \text{input_size})`
Returns:
Tensor: mapped tensor of shape :math:`(N, *, 2 \cdot m \cdot \text{input_size})`
"""
return positional_encoding(v, self.sigma, self.m) |