| | import torch |
| | import math |
| | import warnings |
| |
|
| | from torch.nn.init import _calculate_fan_in_and_fan_out |
| |
|
| |
|
| | def _no_grad_trunc_normal_(tensor, mean, std, a, b): |
| | |
| | |
| | def norm_cdf(x): |
| | |
| | return (1. + math.erf(x / math.sqrt(2.))) / 2. |
| |
|
| | if (mean < a - 2 * std) or (mean > b + 2 * std): |
| | warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. " |
| | "The distribution of values may be incorrect.", |
| | stacklevel=2) |
| |
|
| | with torch.no_grad(): |
| | |
| | |
| | |
| | l = norm_cdf((a - mean) / std) |
| | u = norm_cdf((b - mean) / std) |
| |
|
| | |
| | |
| | tensor.uniform_(2 * l - 1, 2 * u - 1) |
| |
|
| | |
| | |
| | tensor.erfinv_() |
| |
|
| | |
| | tensor.mul_(std * math.sqrt(2.)) |
| | tensor.add_(mean) |
| |
|
| | |
| | tensor.clamp_(min=a, max=b) |
| | return tensor |
| |
|
| |
|
| | def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.): |
| | |
| | r"""Fills the input Tensor with values drawn from a truncated |
| | normal distribution. The values are effectively drawn from the |
| | normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` |
| | with values outside :math:`[a, b]` redrawn until they are within |
| | the bounds. The method used for generating the random values works |
| | best when :math:`a \leq \text{mean} \leq b`. |
| | Args: |
| | tensor: an n-dimensional `torch.Tensor` |
| | mean: the mean of the normal distribution |
| | std: the standard deviation of the normal distribution |
| | a: the minimum cutoff value |
| | b: the maximum cutoff value |
| | Examples: |
| | >>> w = torch.empty(3, 5) |
| | >>> nn.init.trunc_normal_(w) |
| | """ |
| | return _no_grad_trunc_normal_(tensor, mean, std, a, b) |
| |
|
| |
|
| | def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'): |
| | fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) |
| | if mode == 'fan_in': |
| | denom = fan_in |
| | elif mode == 'fan_out': |
| | denom = fan_out |
| | elif mode == 'fan_avg': |
| | denom = (fan_in + fan_out) / 2 |
| |
|
| | variance = scale / denom |
| |
|
| | if distribution == "truncated_normal": |
| | |
| | trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978) |
| | elif distribution == "normal": |
| | tensor.normal_(std=math.sqrt(variance)) |
| | elif distribution == "uniform": |
| | bound = math.sqrt(3 * variance) |
| | tensor.uniform_(-bound, bound) |
| | else: |
| | raise ValueError(f"invalid distribution {distribution}") |
| |
|
| |
|
| | def lecun_normal_(tensor): |
| | variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') |
| |
|