| | """ Activations |
| | |
| | A collection of activations fn and modules with a common interface so that they can |
| | easily be swapped. All have an `inplace` arg even if not used. |
| | |
| | Hacked together by / Copyright 2020 Ross Wightman |
| | """ |
| |
|
| | import torch |
| | from torch import nn as nn |
| | from torch.nn import functional as F |
| |
|
| |
|
| | def swish(x, inplace: bool = False): |
| | """Swish - Described in: https://arxiv.org/abs/1710.05941 |
| | """ |
| | return x.mul_(x.sigmoid()) if inplace else x.mul(x.sigmoid()) |
| |
|
| |
|
| | class Swish(nn.Module): |
| | def __init__(self, inplace: bool = False): |
| | super(Swish, self).__init__() |
| | self.inplace = inplace |
| |
|
| | def forward(self, x): |
| | return swish(x, self.inplace) |
| |
|
| |
|
| | def mish(x, inplace: bool = False): |
| | """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 |
| | NOTE: I don't have a working inplace variant |
| | """ |
| | return x.mul(F.softplus(x).tanh()) |
| |
|
| |
|
| | class Mish(nn.Module): |
| | """Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681 |
| | """ |
| | def __init__(self, inplace: bool = False): |
| | super(Mish, self).__init__() |
| |
|
| | def forward(self, x): |
| | return mish(x) |
| |
|
| |
|
| | def sigmoid(x, inplace: bool = False): |
| | return x.sigmoid_() if inplace else x.sigmoid() |
| |
|
| |
|
| | |
| | class Sigmoid(nn.Module): |
| | def __init__(self, inplace: bool = False): |
| | super(Sigmoid, self).__init__() |
| | self.inplace = inplace |
| |
|
| | def forward(self, x): |
| | return x.sigmoid_() if self.inplace else x.sigmoid() |
| |
|
| |
|
| | def tanh(x, inplace: bool = False): |
| | return x.tanh_() if inplace else x.tanh() |
| |
|
| |
|
| | |
| | class Tanh(nn.Module): |
| | def __init__(self, inplace: bool = False): |
| | super(Tanh, self).__init__() |
| | self.inplace = inplace |
| |
|
| | def forward(self, x): |
| | return x.tanh_() if self.inplace else x.tanh() |
| |
|
| |
|
| | def hard_swish(x, inplace: bool = False): |
| | inner = F.relu6(x + 3.).div_(6.) |
| | return x.mul_(inner) if inplace else x.mul(inner) |
| |
|
| |
|
| | class HardSwish(nn.Module): |
| | def __init__(self, inplace: bool = False): |
| | super(HardSwish, self).__init__() |
| | self.inplace = inplace |
| |
|
| | def forward(self, x): |
| | return hard_swish(x, self.inplace) |
| |
|
| |
|
| | def hard_sigmoid(x, inplace: bool = False): |
| | if inplace: |
| | return x.add_(3.).clamp_(0., 6.).div_(6.) |
| | else: |
| | return F.relu6(x + 3.) / 6. |
| |
|
| |
|
| | class HardSigmoid(nn.Module): |
| | def __init__(self, inplace: bool = False): |
| | super(HardSigmoid, self).__init__() |
| | self.inplace = inplace |
| |
|
| | def forward(self, x): |
| | return hard_sigmoid(x, self.inplace) |
| |
|
| |
|
| | def hard_mish(x, inplace: bool = False): |
| | """ Hard Mish |
| | Experimental, based on notes by Mish author Diganta Misra at |
| | https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md |
| | """ |
| | if inplace: |
| | return x.mul_(0.5 * (x + 2).clamp(min=0, max=2)) |
| | else: |
| | return 0.5 * x * (x + 2).clamp(min=0, max=2) |
| |
|
| |
|
| | class HardMish(nn.Module): |
| | def __init__(self, inplace: bool = False): |
| | super(HardMish, self).__init__() |
| | self.inplace = inplace |
| |
|
| | def forward(self, x): |
| | return hard_mish(x, self.inplace) |
| |
|
| |
|
| | class PReLU(nn.PReLU): |
| | """Applies PReLU (w/ dummy inplace arg) |
| | """ |
| | def __init__(self, num_parameters: int = 1, init: float = 0.25, inplace: bool = False) -> None: |
| | super(PReLU, self).__init__(num_parameters=num_parameters, init=init) |
| |
|
| | def forward(self, input: torch.Tensor) -> torch.Tensor: |
| | return F.prelu(input, self.weight) |
| |
|
| |
|
| | def gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor: |
| | return F.gelu(x) |
| |
|
| |
|
| | class GELU(nn.Module): |
| | """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg) |
| | """ |
| | def __init__(self, inplace: bool = False): |
| | super(GELU, self).__init__() |
| |
|
| | def forward(self, input: torch.Tensor) -> torch.Tensor: |
| | return F.gelu(input) |
| |
|
| |
|
| | def gelu_tanh(x: torch.Tensor, inplace: bool = False) -> torch.Tensor: |
| | return F.gelu(x, approximate='tanh') |
| |
|
| |
|
| | class GELUTanh(nn.Module): |
| | """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg) |
| | """ |
| | def __init__(self, inplace: bool = False): |
| | super(GELUTanh, self).__init__() |
| |
|
| | def forward(self, input: torch.Tensor) -> torch.Tensor: |
| | return F.gelu(input, approximate='tanh') |
| |
|
| |
|
| | def quick_gelu(x: torch.Tensor, inplace: bool = False) -> torch.Tensor: |
| | return x * torch.sigmoid(1.702 * x) |
| |
|
| |
|
| | class QuickGELU(nn.Module): |
| | """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg) |
| | """ |
| | def __init__(self, inplace: bool = False): |
| | super(QuickGELU, self).__init__() |
| |
|
| | def forward(self, input: torch.Tensor) -> torch.Tensor: |
| | return quick_gelu(input) |
| |
|