| | |
| | |
| | |
| | |
| |
|
| | import torch |
| | from torch import nn, pow, sin |
| | from torch.nn import Parameter |
| |
|
| |
|
| | class Snake(nn.Module): |
| | r"""Implementation of a sine-based periodic activation function. |
| | Alpha is initialized to 1 by default, higher values means higher frequency. |
| | It will be trained along with the rest of your model. |
| | |
| | Args: |
| | in_features: shape of the input |
| | alpha: trainable parameter |
| | |
| | Shape: |
| | - Input: (B, C, T) |
| | - Output: (B, C, T), same shape as the input |
| | |
| | References: |
| | This activation function is from this paper by Liu Ziyin, Tilman Hartwig, |
| | Masahito Ueda: https://arxiv.org/abs/2006.08195 |
| | |
| | Examples: |
| | >>> a1 = Snake(256) |
| | >>> x = torch.randn(256) |
| | >>> x = a1(x) |
| | """ |
| |
|
| | def __init__( |
| | self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False |
| | ): |
| | super(Snake, self).__init__() |
| | self.in_features = in_features |
| |
|
| | |
| | self.alpha_logscale = alpha_logscale |
| | if self.alpha_logscale: |
| | self.alpha = Parameter(torch.zeros(in_features) * alpha) |
| | else: |
| | self.alpha = Parameter(torch.ones(in_features) * alpha) |
| |
|
| | self.alpha.requires_grad = alpha_trainable |
| |
|
| | self.no_div_by_zero = 0.000000001 |
| |
|
| | def forward(self, x): |
| | r"""Forward pass of the function. Applies the function to the input elementwise. |
| | Snake ∶= x + 1/a * sin^2 (ax) |
| | """ |
| |
|
| | alpha = self.alpha.unsqueeze(0).unsqueeze(-1) |
| | if self.alpha_logscale: |
| | alpha = torch.exp(alpha) |
| | x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2) |
| |
|
| | return x |
| |
|
| |
|
| | class SnakeBeta(nn.Module): |
| | r"""A modified Snake function which uses separate parameters for the magnitude |
| | of the periodic components. Alpha is initialized to 1 by default, |
| | higher values means higher frequency. Beta is initialized to 1 by default, |
| | higher values means higher magnitude. Both will be trained along with the |
| | rest of your model. |
| | |
| | Args: |
| | in_features: shape of the input |
| | alpha: trainable parameter that controls frequency |
| | beta: trainable parameter that controls magnitude |
| | |
| | Shape: |
| | - Input: (B, C, T) |
| | - Output: (B, C, T), same shape as the input |
| | |
| | References: |
| | This activation function is a modified version based on this paper by Liu Ziyin, |
| | Tilman Hartwig, Masahito Ueda: https://arxiv.org/abs/2006.08195 |
| | |
| | Examples: |
| | >>> a1 = SnakeBeta(256) |
| | >>> x = torch.randn(256) |
| | >>> x = a1(x) |
| | """ |
| |
|
| | def __init__( |
| | self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False |
| | ): |
| | super(SnakeBeta, self).__init__() |
| | self.in_features = in_features |
| |
|
| | |
| | self.alpha_logscale = alpha_logscale |
| | if self.alpha_logscale: |
| | self.alpha = Parameter(torch.zeros(in_features) * alpha) |
| | self.beta = Parameter(torch.zeros(in_features) * alpha) |
| | else: |
| | self.alpha = Parameter(torch.ones(in_features) * alpha) |
| | self.beta = Parameter(torch.ones(in_features) * alpha) |
| |
|
| | self.alpha.requires_grad = alpha_trainable |
| | self.beta.requires_grad = alpha_trainable |
| |
|
| | self.no_div_by_zero = 0.000000001 |
| |
|
| | def forward(self, x): |
| | r"""Forward pass of the function. Applies the function to the input elementwise. |
| | SnakeBeta ∶= x + 1/b * sin^2 (xa) |
| | """ |
| |
|
| | alpha = self.alpha.unsqueeze(0).unsqueeze(-1) |
| | beta = self.beta.unsqueeze(0).unsqueeze(-1) |
| | if self.alpha_logscale: |
| | alpha = torch.exp(alpha) |
| | beta = torch.exp(beta) |
| | x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2) |
| |
|
| | return x |
| |
|