Spaces:
Build error
Build error
| # Copyright (c) Microsoft Corporation. | |
| # Licensed under the MIT license. | |
| import math | |
| import torch | |
| from torch.optim.optimizer import Optimizer, required | |
| class RAdam(Optimizer): | |
| """ | |
| @article{liu2019radam, | |
| title={On the Variance of the Adaptive Learning Rate and Beyond}, | |
| author={Liu, Liyuan and Jiang, Haoming and He, Pengcheng and Chen, Weizhu and Liu, Xiaodong and Gao, Jianfeng and Han, Jiawei}, | |
| journal={arXiv preprint arXiv:1908.03265}, | |
| year={2019} | |
| } | |
| """ | |
| def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): | |
| defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) | |
| self.buffer = [[None, None, None] for ind in range(10)] | |
| super(RAdam, self).__init__(params, defaults) | |
| def __setstate__(self, state): | |
| super(RAdam, self).__setstate__(state) | |
| def step(self, closure=None): | |
| loss = None | |
| if closure is not None: | |
| loss = closure() | |
| for group in self.param_groups: | |
| for p in group["params"]: | |
| if p.grad is None: | |
| continue | |
| grad = p.grad.data.float() | |
| if grad.is_sparse: | |
| raise RuntimeError("RAdam does not support sparse gradients") | |
| p_data_fp32 = p.data.float() | |
| state = self.state[p] | |
| if len(state) == 0: | |
| state["step"] = 0 | |
| state["exp_avg"] = torch.zeros_like(p_data_fp32) | |
| state["exp_avg_sq"] = torch.zeros_like(p_data_fp32) | |
| else: | |
| state["exp_avg"] = state["exp_avg"].type_as(p_data_fp32) | |
| state["exp_avg_sq"] = state["exp_avg_sq"].type_as(p_data_fp32) | |
| exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] | |
| beta1, beta2 = group["betas"] | |
| exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) | |
| exp_avg.mul_(beta1).add_(1 - beta1, grad) | |
| state["step"] += 1 | |
| buffered = self.buffer[int(state["step"] % 10)] | |
| if state["step"] == buffered[0]: | |
| N_sma, step_size = buffered[1], buffered[2] | |
| else: | |
| buffered[0] = state["step"] | |
| beta2_t = beta2 ** state["step"] | |
| N_sma_max = 2 / (1 - beta2) - 1 | |
| N_sma = N_sma_max - 2 * state["step"] * beta2_t / (1 - beta2_t) | |
| buffered[1] = N_sma | |
| # more conservative since it's an approximated value | |
| if N_sma >= 5: | |
| step_size = ( | |
| group["lr"] | |
| * math.sqrt( | |
| (1 - beta2_t) | |
| * (N_sma - 4) | |
| / (N_sma_max - 4) | |
| * (N_sma - 2) | |
| / N_sma | |
| * N_sma_max | |
| / (N_sma_max - 2) | |
| ) | |
| / (1 - beta1 ** state["step"]) | |
| ) | |
| else: | |
| step_size = group["lr"] / (1 - beta1 ** state["step"]) | |
| buffered[2] = step_size | |
| if group["weight_decay"] != 0: | |
| p_data_fp32.add_(-group["weight_decay"] * group["lr"], p_data_fp32) | |
| # more conservative since it's an approximated value | |
| if N_sma >= 5: | |
| denom = exp_avg_sq.sqrt().add_(group["eps"]) | |
| p_data_fp32.addcdiv_(-step_size, exp_avg, denom) | |
| else: | |
| p_data_fp32.add_(-step_size, exp_avg) | |
| p.data.copy_(p_data_fp32) | |
| return loss | |
| class PlainRAdam(Optimizer): | |
| def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): | |
| defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) | |
| super(PlainRAdam, self).__init__(params, defaults) | |
| def __setstate__(self, state): | |
| super(PlainRAdam, self).__setstate__(state) | |
| def step(self, closure=None): | |
| loss = None | |
| if closure is not None: | |
| loss = closure() | |
| for group in self.param_groups: | |
| for p in group["params"]: | |
| if p.grad is None: | |
| continue | |
| grad = p.grad.data.float() | |
| if grad.is_sparse: | |
| raise RuntimeError("RAdam does not support sparse gradients") | |
| p_data_fp32 = p.data.float() | |
| state = self.state[p] | |
| if len(state) == 0: | |
| state["step"] = 0 | |
| state["exp_avg"] = torch.zeros_like(p_data_fp32) | |
| state["exp_avg_sq"] = torch.zeros_like(p_data_fp32) | |
| else: | |
| state["exp_avg"] = state["exp_avg"].type_as(p_data_fp32) | |
| state["exp_avg_sq"] = state["exp_avg_sq"].type_as(p_data_fp32) | |
| exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] | |
| beta1, beta2 = group["betas"] | |
| exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) | |
| exp_avg.mul_(beta1).add_(1 - beta1, grad) | |
| state["step"] += 1 | |
| beta2_t = beta2 ** state["step"] | |
| N_sma_max = 2 / (1 - beta2) - 1 | |
| N_sma = N_sma_max - 2 * state["step"] * beta2_t / (1 - beta2_t) | |
| if group["weight_decay"] != 0: | |
| p_data_fp32.add_(-group["weight_decay"] * group["lr"], p_data_fp32) | |
| # more conservative since it's an approximated value | |
| if N_sma >= 5: | |
| step_size = ( | |
| group["lr"] | |
| * math.sqrt( | |
| (1 - beta2_t) | |
| * (N_sma - 4) | |
| / (N_sma_max - 4) | |
| * (N_sma - 2) | |
| / N_sma | |
| * N_sma_max | |
| / (N_sma_max - 2) | |
| ) | |
| / (1 - beta1 ** state["step"]) | |
| ) | |
| denom = exp_avg_sq.sqrt().add_(group["eps"]) | |
| p_data_fp32.addcdiv_(-step_size, exp_avg, denom) | |
| else: | |
| step_size = group["lr"] / (1 - beta1 ** state["step"]) | |
| p_data_fp32.add_(-step_size, exp_avg) | |
| p.data.copy_(p_data_fp32) | |
| return loss | |
| class AdamW(Optimizer): | |
| def __init__( | |
| self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup=0 | |
| ): | |
| defaults = dict( | |
| lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, warmup=warmup | |
| ) | |
| super(AdamW, self).__init__(params, defaults) | |
| def __setstate__(self, state): | |
| super(AdamW, self).__setstate__(state) | |
| def step(self, closure=None): | |
| loss = None | |
| if closure is not None: | |
| loss = closure() | |
| for group in self.param_groups: | |
| for p in group["params"]: | |
| if p.grad is None: | |
| continue | |
| grad = p.grad.data.float() | |
| if grad.is_sparse: | |
| raise RuntimeError( | |
| "Adam does not support sparse gradients, please consider SparseAdam instead" | |
| ) | |
| p_data_fp32 = p.data.float() | |
| state = self.state[p] | |
| if len(state) == 0: | |
| state["step"] = 0 | |
| state["exp_avg"] = torch.zeros_like(p_data_fp32) | |
| state["exp_avg_sq"] = torch.zeros_like(p_data_fp32) | |
| else: | |
| state["exp_avg"] = state["exp_avg"].type_as(p_data_fp32) | |
| state["exp_avg_sq"] = state["exp_avg_sq"].type_as(p_data_fp32) | |
| exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] | |
| beta1, beta2 = group["betas"] | |
| state["step"] += 1 | |
| exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) | |
| exp_avg.mul_(beta1).add_(1 - beta1, grad) | |
| denom = exp_avg_sq.sqrt().add_(group["eps"]) | |
| bias_correction1 = 1 - beta1 ** state["step"] | |
| bias_correction2 = 1 - beta2 ** state["step"] | |
| if group["warmup"] > state["step"]: | |
| scheduled_lr = 1e-8 + state["step"] * group["lr"] / group["warmup"] | |
| else: | |
| scheduled_lr = group["lr"] | |
| step_size = group["lr"] * math.sqrt(bias_correction2) / bias_correction1 | |
| if group["weight_decay"] != 0: | |
| p_data_fp32.add_(-group["weight_decay"] * scheduled_lr, p_data_fp32) | |
| p_data_fp32.addcdiv_(-step_size, exp_avg, denom) | |
| p.data.copy_(p_data_fp32) | |
| return loss | |