| | |
| | import torch |
| | import torch.nn as nn |
| | import torch.nn.functional as F |
| |
|
| | from annotator.mmpkg.mmcv.utils import TORCH_VERSION, build_from_cfg, digit_version |
| | from .registry import ACTIVATION_LAYERS |
| |
|
| | for module in [ |
| | nn.ReLU, nn.LeakyReLU, nn.PReLU, nn.RReLU, nn.ReLU6, nn.ELU, |
| | nn.Sigmoid, nn.Tanh |
| | ]: |
| | ACTIVATION_LAYERS.register_module(module=module) |
| |
|
| |
|
| | @ACTIVATION_LAYERS.register_module(name='Clip') |
| | @ACTIVATION_LAYERS.register_module() |
| | class Clamp(nn.Module): |
| | """Clamp activation layer. |
| | |
| | This activation function is to clamp the feature map value within |
| | :math:`[min, max]`. More details can be found in ``torch.clamp()``. |
| | |
| | Args: |
| | min (Number | optional): Lower-bound of the range to be clamped to. |
| | Default to -1. |
| | max (Number | optional): Upper-bound of the range to be clamped to. |
| | Default to 1. |
| | """ |
| |
|
| | def __init__(self, min=-1., max=1.): |
| | super(Clamp, self).__init__() |
| | self.min = min |
| | self.max = max |
| |
|
| | def forward(self, x): |
| | """Forward function. |
| | |
| | Args: |
| | x (torch.Tensor): The input tensor. |
| | |
| | Returns: |
| | torch.Tensor: Clamped tensor. |
| | """ |
| | return torch.clamp(x, min=self.min, max=self.max) |
| |
|
| |
|
| | class GELU(nn.Module): |
| | r"""Applies the Gaussian Error Linear Units function: |
| | |
| | .. math:: |
| | \text{GELU}(x) = x * \Phi(x) |
| | where :math:`\Phi(x)` is the Cumulative Distribution Function for |
| | Gaussian Distribution. |
| | |
| | Shape: |
| | - Input: :math:`(N, *)` where `*` means, any number of additional |
| | dimensions |
| | - Output: :math:`(N, *)`, same shape as the input |
| | |
| | .. image:: scripts/activation_images/GELU.png |
| | |
| | Examples:: |
| | |
| | >>> m = nn.GELU() |
| | >>> input = torch.randn(2) |
| | >>> output = m(input) |
| | """ |
| |
|
| | def forward(self, input): |
| | return F.gelu(input) |
| |
|
| |
|
| | if (TORCH_VERSION == 'parrots' |
| | or digit_version(TORCH_VERSION) < digit_version('1.4')): |
| | ACTIVATION_LAYERS.register_module(module=GELU) |
| | else: |
| | ACTIVATION_LAYERS.register_module(module=nn.GELU) |
| |
|
| |
|
| | def build_activation_layer(cfg): |
| | """Build activation layer. |
| | |
| | Args: |
| | cfg (dict): The activation layer config, which should contain: |
| | - type (str): Layer type. |
| | - layer args: Args needed to instantiate an activation layer. |
| | |
| | Returns: |
| | nn.Module: Created activation layer. |
| | """ |
| | return build_from_cfg(cfg, ACTIVATION_LAYERS) |
| |
|