| | """ AdaHessian Optimizer |
| | |
| | Lifted from https://github.com/davda54/ada-hessian/blob/master/ada_hessian.py |
| | Originally licensed MIT, Copyright 2020, David Samuel |
| | """ |
| | import torch |
| |
|
| |
|
| | class Adahessian(torch.optim.Optimizer): |
| | """ |
| | Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning" |
| | |
| | Arguments: |
| | params (iterable): iterable of parameters to optimize or dicts defining parameter groups |
| | lr (float, optional): learning rate (default: 0.1) |
| | betas ((float, float), optional): coefficients used for computing running averages of gradient and the |
| | squared hessian trace (default: (0.9, 0.999)) |
| | eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) |
| | weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0) |
| | hessian_power (float, optional): exponent of the hessian trace (default: 1.0) |
| | update_each (int, optional): compute the hessian trace approximation only after *this* number of steps |
| | (to save time) (default: 1) |
| | n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1) |
| | """ |
| |
|
| | def __init__( |
| | self, |
| | params, |
| | lr=0.1, |
| | betas=(0.9, 0.999), |
| | eps=1e-8, |
| | weight_decay=0.0, |
| | hessian_power=1.0, |
| | update_each=1, |
| | n_samples=1, |
| | avg_conv_kernel=False, |
| | ): |
| | if not 0.0 <= lr: |
| | raise ValueError(f"Invalid learning rate: {lr}") |
| | if not 0.0 <= eps: |
| | raise ValueError(f"Invalid epsilon value: {eps}") |
| | if not 0.0 <= betas[0] < 1.0: |
| | raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") |
| | if not 0.0 <= betas[1] < 1.0: |
| | raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") |
| | if not 0.0 <= hessian_power <= 1.0: |
| | raise ValueError(f"Invalid Hessian power value: {hessian_power}") |
| |
|
| | self.n_samples = n_samples |
| | self.update_each = update_each |
| | self.avg_conv_kernel = avg_conv_kernel |
| |
|
| | |
| | self.seed = 2147483647 |
| | self.generator = torch.Generator().manual_seed(self.seed) |
| |
|
| | defaults = dict( |
| | lr=lr, |
| | betas=betas, |
| | eps=eps, |
| | weight_decay=weight_decay, |
| | hessian_power=hessian_power, |
| | ) |
| | super(Adahessian, self).__init__(params, defaults) |
| |
|
| | for p in self.get_params(): |
| | p.hess = 0.0 |
| | self.state[p]["hessian step"] = 0 |
| |
|
| | @property |
| | def is_second_order(self): |
| | return True |
| |
|
| | def get_params(self): |
| | """ |
| | Gets all parameters in all param_groups with gradients |
| | """ |
| |
|
| | return ( |
| | p for group in self.param_groups for p in group["params"] if p.requires_grad |
| | ) |
| |
|
| | def zero_hessian(self): |
| | """ |
| | Zeros out the accumalated hessian traces. |
| | """ |
| |
|
| | for p in self.get_params(): |
| | if ( |
| | not isinstance(p.hess, float) |
| | and self.state[p]["hessian step"] % self.update_each == 0 |
| | ): |
| | p.hess.zero_() |
| |
|
| | @torch.no_grad() |
| | def set_hessian(self): |
| | """ |
| | Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter. |
| | """ |
| |
|
| | params = [] |
| | for p in filter(lambda p: p.grad is not None, self.get_params()): |
| | if ( |
| | self.state[p]["hessian step"] % self.update_each == 0 |
| | ): |
| | params.append(p) |
| | self.state[p]["hessian step"] += 1 |
| |
|
| | if len(params) == 0: |
| | return |
| |
|
| | if ( |
| | self.generator.device != params[0].device |
| | ): |
| | self.generator = torch.Generator(params[0].device).manual_seed(self.seed) |
| |
|
| | grads = [p.grad for p in params] |
| |
|
| | for i in range(self.n_samples): |
| | |
| | zs = [ |
| | torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) |
| | * 2.0 |
| | - 1.0 |
| | for p in params |
| | ] |
| | h_zs = torch.autograd.grad( |
| | grads, |
| | params, |
| | grad_outputs=zs, |
| | only_inputs=True, |
| | retain_graph=i < self.n_samples - 1, |
| | ) |
| | for h_z, z, p in zip(h_zs, zs, params): |
| | p.hess += ( |
| | h_z * z / self.n_samples |
| | ) |
| |
|
| | @torch.no_grad() |
| | def step(self, closure=None): |
| | """ |
| | Performs a single optimization step. |
| | Arguments: |
| | closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None) |
| | """ |
| |
|
| | loss = None |
| | if closure is not None: |
| | loss = closure() |
| |
|
| | self.zero_hessian() |
| | self.set_hessian() |
| |
|
| | for group in self.param_groups: |
| | for p in group["params"]: |
| | if p.grad is None or p.hess is None: |
| | continue |
| |
|
| | if self.avg_conv_kernel and p.dim() == 4: |
| | p.hess = ( |
| | torch.abs(p.hess) |
| | .mean(dim=[2, 3], keepdim=True) |
| | .expand_as(p.hess) |
| | .clone() |
| | ) |
| |
|
| | |
| | p.mul_(1 - group["lr"] * group["weight_decay"]) |
| |
|
| | state = self.state[p] |
| |
|
| | |
| | if len(state) == 1: |
| | state["step"] = 0 |
| | |
| | state["exp_avg"] = torch.zeros_like(p) |
| | |
| | state["exp_hessian_diag_sq"] = torch.zeros_like(p) |
| |
|
| | exp_avg, exp_hessian_diag_sq = ( |
| | state["exp_avg"], |
| | state["exp_hessian_diag_sq"], |
| | ) |
| | beta1, beta2 = group["betas"] |
| | state["step"] += 1 |
| |
|
| | |
| | exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1) |
| | exp_hessian_diag_sq.mul_(beta2).addcmul_( |
| | p.hess, p.hess, value=1 - beta2 |
| | ) |
| |
|
| | bias_correction1 = 1 - beta1 ** state["step"] |
| | bias_correction2 = 1 - beta2 ** state["step"] |
| |
|
| | k = group["hessian_power"] |
| | denom = ( |
| | (exp_hessian_diag_sq / bias_correction2) |
| | .pow_(k / 2) |
| | .add_(group["eps"]) |
| | ) |
| |
|
| | |
| | step_size = group["lr"] / bias_correction1 |
| | p.addcdiv_(exp_avg, denom, value=-step_size) |
| |
|
| | return loss |
| |
|