| """ |
| SGDP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/sgdp.py |
| |
| Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 |
| Code: https://github.com/clovaai/AdamP |
| |
| Copyright (c) 2020-present NAVER Corp. |
| MIT license |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| from torch.optim.optimizer import Optimizer, required |
| import math |
|
|
|
|
| class SGDP(Optimizer): |
| def __init__( |
| self, |
| params, |
| lr=required, |
| momentum=0, |
| dampening=0, |
| weight_decay=0, |
| nesterov=False, |
| eps=1e-8, |
| delta=0.1, |
| wd_ratio=0.1, |
| ): |
| defaults = dict( |
| lr=lr, |
| momentum=momentum, |
| dampening=dampening, |
| weight_decay=weight_decay, |
| nesterov=nesterov, |
| eps=eps, |
| delta=delta, |
| wd_ratio=wd_ratio, |
| ) |
| super(SGDP, self).__init__(params, defaults) |
|
|
| def _channel_view(self, x): |
| return x.view(x.size(0), -1) |
|
|
| def _layer_view(self, x): |
| return x.view(1, -1) |
|
|
| def _cosine_similarity(self, x, y, eps, view_func): |
| x = view_func(x) |
| y = view_func(y) |
|
|
| x_norm = x.norm(dim=1).add_(eps) |
| y_norm = y.norm(dim=1).add_(eps) |
| dot = (x * y).sum(dim=1) |
|
|
| return dot.abs() / x_norm / y_norm |
|
|
| def _projection(self, p, grad, perturb, delta, wd_ratio, eps): |
| wd = 1 |
| expand_size = [-1] + [1] * (len(p.shape) - 1) |
| for view_func in [self._channel_view, self._layer_view]: |
|
|
| cosine_sim = self._cosine_similarity(grad, p.data, eps, view_func) |
|
|
| if cosine_sim.max() < delta / math.sqrt(view_func(p.data).size(1)): |
| p_n = p.data / view_func(p.data).norm(dim=1).view(expand_size).add_(eps) |
| perturb -= p_n * view_func(p_n * perturb).sum(dim=1).view(expand_size) |
| wd = wd_ratio |
|
|
| return perturb, wd |
|
|
| return perturb, wd |
|
|
| def step(self, closure=None): |
| loss = None |
| if closure is not None: |
| loss = closure() |
|
|
| for group in self.param_groups: |
| weight_decay = group["weight_decay"] |
| momentum = group["momentum"] |
| dampening = group["dampening"] |
| nesterov = group["nesterov"] |
|
|
| for p in group["params"]: |
| if p.grad is None: |
| continue |
| grad = p.grad.data |
| state = self.state[p] |
|
|
| |
| if len(state) == 0: |
| state["momentum"] = torch.zeros_like(p.data) |
|
|
| |
| buf = state["momentum"] |
| buf.mul_(momentum).add_(1 - dampening, grad) |
| if nesterov: |
| d_p = grad + momentum * buf |
| else: |
| d_p = buf |
|
|
| |
| wd_ratio = 1 |
| if len(p.shape) > 1: |
| d_p, wd_ratio = self._projection( |
| p, grad, d_p, group["delta"], group["wd_ratio"], group["eps"] |
| ) |
|
|
| |
| if weight_decay != 0: |
| p.data.mul_( |
| 1 |
| - group["lr"] |
| * group["weight_decay"] |
| * wd_ratio |
| / (1 - momentum) |
| ) |
|
|
| |
| p.data.add_(-group["lr"], d_p) |
|
|
| return loss |
|
|