| """ AdamW Optimizer |
| Impl copied from PyTorch master |
| |
| NOTE: This impl has been deprecated in favour of torch.optim.AdamW and remains as a reference |
| """ |
| import math |
| from typing import Tuple |
|
|
| import torch |
| from torch.optim.optimizer import Optimizer |
|
|
| from ._types import ParamsT |
|
|
|
|
| class AdamWLegacy(Optimizer): |
| r"""Implements AdamW algorithm. |
| |
| NOTE: This impl has been deprecated in favour of torch.optim.NAdam and remains as a reference |
| |
| References: |
| - Adam: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 |
| - Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101 |
| - On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ |
| |
| Args: |
| params: iterable of parameters to optimize or dicts defining parameter groups |
| lr: learning rate |
| betas: coefficients used for computing running averages of gradient and its square |
| eps: term added to the denominator to improve numerical stability |
| weight_decay: weight decay coefficient |
| amsgrad: whether to use the AMSGrad variant of this algorithm |
| from the paper `On the Convergence of Adam and Beyond` |
| caution: apply caution when using AdamW |
| """ |
|
|
| def __init__( |
| self, |
| params: ParamsT, |
| lr: float = 1e-3, |
| betas: Tuple[float, float] = (0.9, 0.999), |
| eps: float = 1e-8, |
| weight_decay: float = 1e-2, |
| amsgrad: bool = False, |
| caution: bool = False, |
| ): |
| if not 0.0 <= lr: |
| raise ValueError("Invalid learning rate: {}".format(lr)) |
| if not 0.0 <= eps: |
| raise ValueError("Invalid epsilon value: {}".format(eps)) |
| if not 0.0 <= betas[0] < 1.0: |
| raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) |
| if not 0.0 <= betas[1] < 1.0: |
| raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) |
| defaults = dict( |
| lr=lr, |
| betas=betas, |
| eps=eps, |
| weight_decay=weight_decay, |
| amsgrad=amsgrad, |
| caution=caution, |
| ) |
| super(AdamWLegacy, self).__init__(params, defaults) |
|
|
| def __setstate__(self, state): |
| super(AdamWLegacy, self).__setstate__(state) |
| for group in self.param_groups: |
| group.setdefault('amsgrad', False) |
| group.setdefault('caution', False) |
|
|
| @torch.no_grad() |
| def step(self, closure=None): |
| """Performs a single optimization step. |
| |
| Arguments: |
| closure (callable, optional): A closure that reevaluates the model |
| and returns the loss. |
| """ |
| loss = None |
| if closure is not None: |
| with torch.enable_grad(): |
| loss = closure() |
|
|
| for group in self.param_groups: |
| for p in group['params']: |
| if p.grad is None: |
| continue |
|
|
| |
| p.data.mul_(1 - group['lr'] * group['weight_decay']) |
|
|
| |
| grad = p.grad |
| if grad.is_sparse: |
| raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') |
| amsgrad = group['amsgrad'] |
|
|
| state = self.state[p] |
|
|
| |
| if len(state) == 0: |
| state['step'] = 0 |
| |
| state['exp_avg'] = torch.zeros_like(p) |
| |
| state['exp_avg_sq'] = torch.zeros_like(p) |
| if amsgrad: |
| |
| state['max_exp_avg_sq'] = torch.zeros_like(p) |
|
|
| exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] |
| if amsgrad: |
| max_exp_avg_sq = state['max_exp_avg_sq'] |
| beta1, beta2 = group['betas'] |
|
|
| state['step'] += 1 |
| bias_correction1 = 1 - beta1 ** state['step'] |
| bias_correction2 = 1 - beta2 ** state['step'] |
|
|
| |
| exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) |
| exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) |
| if amsgrad: |
| |
| torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) |
| |
| denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) |
| else: |
| denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) |
|
|
| step_size = group['lr'] / bias_correction1 |
|
|
| if group['caution']: |
| |
| mask = (exp_avg * grad > 0).to(grad.dtype) |
| mask.div_(mask.mean().clamp_(min=1e-3)) |
| exp_avg = exp_avg * mask |
|
|
| p.addcdiv_(exp_avg, denom, value=-step_size) |
|
|
| return loss |
|
|