| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """RAdam |
| | Original source taken from https://github.com/LiyuanLucasLiu/RAdam |
| | |
| | Copyright 2019 Liyuan Liu |
| | |
| | Licensed under the Apache License, Version 2.0 (the "License"); |
| | you may not use this file except in compliance with the License. |
| | You may obtain a copy of the License at |
| | |
| | http://www.apache.org/licenses/LICENSE-2.0 |
| | |
| | Unless required by applicable law or agreed to in writing, software |
| | distributed under the License is distributed on an "AS IS" BASIS, |
| | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| | See the License for the specific language governing permissions and |
| | limitations under the License. |
| | """ |
| | import math |
| |
|
| | import torch |
| | from torch.optim.optimizer import Optimizer |
| |
|
| |
|
| | class RAdam(Optimizer): |
| | """RAdam optimizer""" |
| |
|
| | def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): |
| | """ |
| | Init |
| | |
| | :param params: parameters to optimize |
| | :param lr: learning rate |
| | :param betas: beta |
| | :param eps: numerical precision |
| | :param weight_decay: weight decay weight |
| | """ |
| | defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) |
| | self.buffer = [[None, None, None] for _ in range(10)] |
| | super().__init__(params, defaults) |
| |
|
| | def step(self, closure=None): |
| |
|
| | loss = None |
| | if closure is not None: |
| | loss = closure() |
| |
|
| | for group in self.param_groups: |
| |
|
| | for p in group['params']: |
| | if p.grad is None: |
| | continue |
| | grad = p.grad.data.float() |
| | if grad.is_sparse: |
| | raise RuntimeError('RAdam does not support sparse gradients') |
| |
|
| | p_data_fp32 = p.data.float() |
| |
|
| | state = self.state[p] |
| |
|
| | if len(state) == 0: |
| | state['step'] = 0 |
| | state['exp_avg'] = torch.zeros_like(p_data_fp32) |
| | state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) |
| | else: |
| | state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) |
| | state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) |
| |
|
| | exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] |
| | beta1, beta2 = group['betas'] |
| |
|
| | exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1.0 - beta2)) |
| | exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1)) |
| |
|
| | state['step'] += 1 |
| | buffered = self.buffer[int(state['step'] % 10)] |
| | if state['step'] == buffered[0]: |
| | N_sma, step_size = buffered[1], buffered[2] |
| | else: |
| | buffered[0] = state['step'] |
| | beta2_t = beta2 ** state['step'] |
| | N_sma_max = 2 / (1 - beta2) - 1 |
| | N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) |
| | buffered[1] = N_sma |
| |
|
| | |
| | if N_sma >= 5: |
| | step_size = ( |
| | group['lr'] |
| | * math.sqrt( |
| | (1 - beta2_t) |
| | * (N_sma - 4) |
| | / (N_sma_max - 4) |
| | * (N_sma - 2) |
| | / N_sma |
| | * N_sma_max |
| | / (N_sma_max - 2) |
| | ) |
| | / (1 - beta1 ** state['step']) |
| | ) |
| | else: |
| | step_size = group['lr'] / (1 - beta1 ** state['step']) |
| | buffered[2] = step_size |
| |
|
| | if group['weight_decay'] != 0: |
| | p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32) |
| |
|
| | |
| | if N_sma >= 5: |
| | denom = exp_avg_sq.sqrt().add_(group['eps']) |
| | p_data_fp32.addcdiv_(-step_size, exp_avg, denom) |
| | else: |
| | p_data_fp32.add_(-step_size, exp_avg) |
| |
|
| | p.data.copy_(p_data_fp32) |
| |
|
| | return loss |
| |
|