index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
|---|---|---|---|---|---|
717,400
|
pytorch_optimizer.optimizer.pcgrad
|
__init__
| null |
def __init__(self, optimizer: OPTIMIZER, reduction: str = 'mean'):
self.validate_options(reduction, 'reduction', ['mean', 'sum'])
self.optimizer = optimizer
self.reduction = reduction
|
(self, optimizer: Type[torch.optim.optimizer.Optimizer], reduction: str = 'mean')
|
717,407
|
pytorch_optimizer.optimizer.pcgrad
|
pack_grad
|
Pack the gradient of the parameters of the network for each objective.
:param objectives: Iterable[nn.Module]. a list of objectives.
:return: torch.Tensor. packed gradients.
|
def pack_grad(self, objectives: Iterable) -> Tuple[List[torch.Tensor], List[List[int]], List[torch.Tensor]]:
r"""Pack the gradient of the parameters of the network for each objective.
:param objectives: Iterable[nn.Module]. a list of objectives.
:return: torch.Tensor. packed gradients.
"""
grads, shapes, has_grads = [], [], []
for objective in objectives:
self.optimizer.zero_grad(set_to_none=True)
objective.backward(retain_graph=True)
grad, shape, has_grad = self.retrieve_grad()
grads.append(flatten_grad(grad))
has_grads.append(flatten_grad(has_grad))
shapes.append(shape)
return grads, shapes, has_grads
|
(self, objectives: Iterable) -> Tuple[List[torch.Tensor], List[List[int]], List[torch.Tensor]]
|
717,408
|
pytorch_optimizer.optimizer.pcgrad
|
pc_backward
|
Calculate the gradient of the parameters.
:param objectives: Iterable[nn.Module]. a list of objectives.
|
def pc_backward(self, objectives: Iterable[nn.Module]):
r"""Calculate the gradient of the parameters.
:param objectives: Iterable[nn.Module]. a list of objectives.
"""
grads, shapes, has_grads = self.pack_grad(objectives)
pc_grad = self.project_conflicting(grads, has_grads)
pc_grad = un_flatten_grad(pc_grad, shapes[0])
self.set_grad(pc_grad)
|
(self, objectives: Iterable[torch.nn.modules.module.Module])
|
717,409
|
pytorch_optimizer.optimizer.pcgrad
|
project_conflicting
|
Project conflicting.
:param grads: a list of the gradient of the parameters.
:param has_grads: a list of mask represent whether the parameter has gradient.
:return: torch.Tensor. merged gradients.
|
def project_conflicting(self, grads: List[torch.Tensor], has_grads: List[torch.Tensor]) -> torch.Tensor:
r"""Project conflicting.
:param grads: a list of the gradient of the parameters.
:param has_grads: a list of mask represent whether the parameter has gradient.
:return: torch.Tensor. merged gradients.
"""
shared: torch.Tensor = torch.stack(has_grads).prod(0).bool()
pc_grad: List[torch.Tensor] = deepcopy(grads)
for i, g_i in enumerate(pc_grad):
random.shuffle(grads)
for g_j in grads:
g_i_g_j: torch.Tensor = torch.dot(g_i, g_j)
if g_i_g_j < 0:
pc_grad[i] -= g_i_g_j * g_j / (g_j.norm() ** 2)
merged_grad: torch.Tensor = torch.zeros_like(grads[0])
shared_pc_gradients: torch.Tensor = torch.stack([g[shared] for g in pc_grad])
if self.reduction == 'mean':
merged_grad[shared] = shared_pc_gradients.mean(dim=0)
else:
merged_grad[shared] = shared_pc_gradients.sum(dim=0)
merged_grad[~shared] = torch.stack([g[~shared] for g in pc_grad]).sum(dim=0)
return merged_grad
|
(self, grads: List[torch.Tensor], has_grads: List[torch.Tensor]) -> torch.Tensor
|
717,410
|
pytorch_optimizer.optimizer.pcgrad
|
reset
| null |
def pc_backward(self, objectives: Iterable[nn.Module]):
r"""Calculate the gradient of the parameters.
:param objectives: Iterable[nn.Module]. a list of objectives.
"""
grads, shapes, has_grads = self.pack_grad(objectives)
pc_grad = self.project_conflicting(grads, has_grads)
pc_grad = un_flatten_grad(pc_grad, shapes[0])
self.set_grad(pc_grad)
|
(self)
|
717,411
|
pytorch_optimizer.optimizer.pcgrad
|
retrieve_grad
|
Get the gradient of the parameters of the network with specific objective.
|
def retrieve_grad(self) -> Tuple[List[torch.Tensor], List[int], List[torch.Tensor]]:
r"""Get the gradient of the parameters of the network with specific objective."""
grad, shape, has_grad = [], [], []
for group in self.optimizer.param_groups:
for p in group['params']:
if p.grad is None:
shape.append(p.shape)
grad.append(torch.zeros_like(p, device=p.device))
has_grad.append(torch.zeros_like(p, device=p.device))
continue
shape.append(p.grad.shape)
grad.append(p.grad.clone())
has_grad.append(torch.ones_like(p, device=p.device))
return grad, shape, has_grad
|
(self) -> Tuple[List[torch.Tensor], List[int], List[torch.Tensor]]
|
717,412
|
pytorch_optimizer.optimizer.pcgrad
|
set_grad
| null |
def set_grad(self, grads: List[torch.Tensor]):
idx: int = 0
for group in self.optimizer.param_groups:
for p in group['params']:
p.grad = grads[idx]
idx += 1
|
(self, grads: List[torch.Tensor])
|
717,414
|
pytorch_optimizer.optimizer.pcgrad
|
step
| null |
def step(self):
return self.optimizer.step()
|
(self)
|
717,424
|
pytorch_optimizer.optimizer.pcgrad
|
zero_grad
| null |
def zero_grad(self):
return self.optimizer.zero_grad(set_to_none=True)
|
(self)
|
717,426
|
pytorch_optimizer.optimizer.pid
|
PID
|
A PID Controller Approach for Stochastic Optimization of Deep Networks.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum factor.
:param dampening: float. dampening for momentum.
:param derivative: float. D part of the PID.
:param integral: float. I part of the PID.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
|
class PID(Optimizer, BaseOptimizer):
r"""A PID Controller Approach for Stochastic Optimization of Deep Networks.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum factor.
:param dampening: float. dampening for momentum.
:param derivative: float. D part of the PID.
:param integral: float. I part of the PID.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
momentum: float = 0.0,
dampening: float = 0.0,
derivative: float = 10.0,
integral: float = 5.0,
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.validate_non_negative(derivative, 'derivative')
self.validate_non_negative(integral, 'integral')
self.validate_non_negative(weight_decay, 'weight_decay')
defaults: DEFAULTS = {
'lr': lr,
'momentum': momentum,
'dampening': dampening,
'derivative': derivative,
'integral': integral,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'PID'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
if group['momentum'] > 0.0:
state['grad_buffer'] = torch.zeros_like(p)
state['i_buffer'] = torch.zeros_like(p)
state['d_buffer'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0 and group['momentum'] > 0.0:
state['grad_buffer'] = torch.zeros_like(p)
state['i_buffer'] = torch.zeros_like(p)
state['d_buffer'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
if group['momentum'] > 0.0:
i_buf = state['i_buffer']
i_buf.mul_(group['momentum']).add_(grad, alpha=1.0 - group['dampening'])
g_buf, d_buf = state['grad_buffer'], state['d_buffer']
d_buf.mul_(group['momentum'])
if group['step'] > 1:
d_buf.add_(grad - g_buf, alpha=1.0 - group['momentum'])
g_buf.copy_(grad)
grad.add_(i_buf, alpha=group['integral']).add_(d_buf, alpha=group['derivative'])
p.add_(grad, alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, momentum: float = 0.0, dampening: float = 0.0, derivative: float = 10.0, integral: float = 5.0, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False)
|
717,428
|
pytorch_optimizer.optimizer.pid
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
momentum: float = 0.0,
dampening: float = 0.0,
derivative: float = 10.0,
integral: float = 5.0,
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.validate_non_negative(derivative, 'derivative')
self.validate_non_negative(integral, 'integral')
self.validate_non_negative(weight_decay, 'weight_decay')
defaults: DEFAULTS = {
'lr': lr,
'momentum': momentum,
'dampening': dampening,
'derivative': derivative,
'integral': integral,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, momentum: float = 0.0, dampening: float = 0.0, derivative: float = 10.0, integral: float = 5.0, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False)
|
717,431
|
pytorch_optimizer.optimizer.pid
|
__str__
| null |
def __str__(self) -> str:
return 'PID'
|
(self) -> str
|
717,452
|
pytorch_optimizer.optimizer.pid
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0 and group['momentum'] > 0.0:
state['grad_buffer'] = torch.zeros_like(p)
state['i_buffer'] = torch.zeros_like(p)
state['d_buffer'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
if group['momentum'] > 0.0:
i_buf = state['i_buffer']
i_buf.mul_(group['momentum']).add_(grad, alpha=1.0 - group['dampening'])
g_buf, d_buf = state['grad_buffer'], state['d_buffer']
d_buf.mul_(group['momentum'])
if group['step'] > 1:
d_buf.add_(grad - g_buf, alpha=1.0 - group['momentum'])
g_buf.copy_(grad)
grad.add_(i_buf, alpha=group['integral']).add_(d_buf, alpha=group['derivative'])
p.add_(grad, alpha=-group['lr'])
return loss
|
(self)
|
717,467
|
pytorch_optimizer.optimizer.pnm
|
PNM
|
Positive-Negative Momentum Optimizers.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. use weight_decouple.
:param fixed_decay: bool. fix weight decay.
:param eps: float. term added to the denominator to improve numerical stability.
|
class PNM(Optimizer, BaseOptimizer):
r"""Positive-Negative Momentum Optimizers.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. use weight_decouple.
:param fixed_decay: bool. fix weight decay.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 1.0),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'PNM'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['pos_momentum'] = torch.zeros_like(p)
state['neg_momentum'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
noise_norm: float = math.sqrt((1 + beta2) ** 2 + beta2 ** 2) # fmt: skip
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['pos_momentum'] = torch.zeros_like(p)
state['neg_momentum'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
if group['step'] % 2 == 1:
pos_momentum, neg_momentum = state['pos_momentum'], state['neg_momentum']
else:
neg_momentum, pos_momentum = state['pos_momentum'], state['neg_momentum']
pos_momentum.mul_(beta1 ** 2).add_(grad, alpha=1.0 - beta1 ** 2) # fmt: skip
delta_p = pos_momentum.mul(1 + beta2).add_(neg_momentum, alpha=-beta2).mul_(1.0 / noise_norm)
p.add_(delta_p, alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 1.0), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, eps: float = 1e-08)
|
717,469
|
pytorch_optimizer.optimizer.pnm
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 1.0),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 1.0), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, eps: float = 1e-08)
|
717,472
|
pytorch_optimizer.optimizer.pnm
|
__str__
| null |
def __str__(self) -> str:
return 'PNM'
|
(self) -> str
|
717,508
|
pytorch_optimizer.lr_scheduler.linear_warmup
|
PolyScheduler
|
Poly LR Scheduler.
:param poly_order: float. lr scheduler decreases with steps.
|
class PolyScheduler(BaseLinearWarmupScheduler):
r"""Poly LR Scheduler.
:param poly_order: float. lr scheduler decreases with steps.
"""
def __init__(self, poly_order: float = 0.5, **kwargs):
self.poly_order = poly_order
if poly_order <= 0:
raise ValueError(f'[-] poly_order must be positive. {poly_order}')
super().__init__(**kwargs)
def _step(self) -> float:
return self.min_lr + (self.max_lr - self.min_lr) * (self.step_t - self.warmup_steps) ** self.poly_order
|
(poly_order: float = 0.5, **kwargs)
|
717,509
|
pytorch_optimizer.lr_scheduler.linear_warmup
|
__init__
| null |
def __init__(self, poly_order: float = 0.5, **kwargs):
self.poly_order = poly_order
if poly_order <= 0:
raise ValueError(f'[-] poly_order must be positive. {poly_order}')
super().__init__(**kwargs)
|
(self, poly_order: float = 0.5, **kwargs)
|
717,511
|
pytorch_optimizer.lr_scheduler.linear_warmup
|
_step
| null |
def _step(self) -> float:
return self.min_lr + (self.max_lr - self.min_lr) * (self.step_t - self.warmup_steps) ** self.poly_order
|
(self) -> float
|
717,515
|
pytorch_optimizer.optimizer.shampoo_utils
|
PreConditioner
|
Compute statistics & shape from gradients for preconditioning.
:param var: torch.Tensor. variable.
:param beta2: float. beta2.
:param inverse_exponent_override: int. override inv exp.
:param block_size: int. size of block.
:param skip_preconditioning_rank_lt: int. skip low-rank parameter.
:param no_preconditioning_for_layers_with_dim_gt: int. skip large size of dim of parameter.
:param shape_interpretation: bool. reshaping parameter.
:param pre_conditioner_type: int. type of pre-conditioner.
:param matrix_eps: float. epsilon of matrix.
:param use_svd: bool. use SVD instead of Schur-Newton method to calculate M^{-1/p}.
|
class PreConditioner:
r"""Compute statistics & shape from gradients for preconditioning.
:param var: torch.Tensor. variable.
:param beta2: float. beta2.
:param inverse_exponent_override: int. override inv exp.
:param block_size: int. size of block.
:param skip_preconditioning_rank_lt: int. skip low-rank parameter.
:param no_preconditioning_for_layers_with_dim_gt: int. skip large size of dim of parameter.
:param shape_interpretation: bool. reshaping parameter.
:param pre_conditioner_type: int. type of pre-conditioner.
:param matrix_eps: float. epsilon of matrix.
:param use_svd: bool. use SVD instead of Schur-Newton method to calculate M^{-1/p}.
"""
def __init__(
self,
var: torch.Tensor,
beta2: float,
inverse_exponent_override: int,
block_size: int,
skip_preconditioning_rank_lt: int,
no_preconditioning_for_layers_with_dim_gt: int,
shape_interpretation: bool,
pre_conditioner_type: int,
matrix_eps: float = 1e-6,
use_svd: bool = False,
):
self.beta2 = beta2
self.inverse_exponent_override = inverse_exponent_override
self.skip_preconditioning_rank_lt = skip_preconditioning_rank_lt
self.no_preconditioning_for_layers_with_dim_gt = no_preconditioning_for_layers_with_dim_gt
self.pre_conditioner_type = pre_conditioner_type
self.matrix_eps = matrix_eps
self.use_svd = use_svd
self.w2: float = 1.0 if self.beta2 == 1.0 else (1.0 - self.beta2)
self.original_shape: List[int] = var.shape
self.transformed_shape: List[int] = (
merge_small_dims(self.original_shape, block_size) if shape_interpretation else var.shape
)
self.should_precondition_dims: List[bool] = self.get_should_precondition_dims()
self.rank: int = sum(self.should_precondition_dims)
self.exponent_for_pre_conditioner: int = (
self.inverse_exponent_override if self.inverse_exponent_override > 0 else 2 * self.rank
)
self.statistics: Union[List[torch.Tensor], torch.Tensor] = []
self.pre_conditioners: Union[List[torch.Tensor], torch.Tensor] = []
self.is_same_shapes: bool = False
if len(self.transformed_shape) > 1 and not self.skip_precondition(var):
self.partitioner = BlockPartitioner(
var=torch.reshape(var, self.transformed_shape),
rank=self.rank,
block_size=block_size,
pre_conditioner_type=self.pre_conditioner_type,
)
shapes: List[Optional[List[int]]] = self.partitioner.shapes_for_pre_conditioners()
self.statistics = [self.matrix_eps * torch.eye(shape[0], device=var.device) for shape in shapes if shape]
self.pre_conditioners = [torch.eye(shape[0], device=var.device) for shape in shapes if shape]
self.is_same_shapes = None not in shapes and len(np.unique(shapes)) == 1
if self.is_same_shapes:
self.statistics = torch.stack(self.statistics, dim=0)
self.pre_conditioners = torch.stack(self.pre_conditioners, dim=0)
def get_should_precondition_dims(self) -> List[bool]:
r"""Get pre-condition dimensions by the type of conditioner."""
if self.pre_conditioner_type == PreConditionerType.ALL or len(self.transformed_shape) <= 1:
return [True] * len(self.transformed_shape)
if self.pre_conditioner_type == PreConditionerType.INPUT:
return [True] * (len(self.transformed_shape) - 1) + [False]
if self.pre_conditioner_type == PreConditionerType.OUTPUT:
return [False] * (len(self.transformed_shape) - 1) + [True]
raise ValueError
def skip_precondition(self, x: torch.Tensor) -> bool:
return (len(x.shape) < self.skip_preconditioning_rank_lt) or any(
dim > self.no_preconditioning_for_layers_with_dim_gt for dim in x.shape
)
def add_statistics(self, grad: torch.Tensor):
r"""Compute statistics from gradients and add to the correct state entries.
:param grad: torch.Tensor. gradient to compute statistics from.
"""
if len(self.statistics) == 0:
return
reshaped_grad: torch.Tensor = torch.reshape(grad, self.transformed_shape)
partitioned_grads: List[torch.Tensor] = self.partitioner.partition(reshaped_grad)
for j in range(len(partitioned_grads)):
partitioned_grad: torch.Tensor = partitioned_grads[j]
for i in range(self.rank):
axes: List[int] = [ax for ax in range(partitioned_grad.ndim) if ax != i]
stat: torch.Tensor = torch.tensordot(partitioned_grad, partitioned_grad, dims=[axes, axes])
self.statistics[j * self.rank + i].mul_(self.beta2).add_(stat, alpha=self.w2)
def compute_pre_conditioners(self):
r"""Compute L^{-1/exp} for each stats matrix L.
If `self.use_svd` is enabled and where all shapes of statistics & pre-conditioners are same, perform batch SVD.
else, SVD one by one.
If `self.use_svd` is disabled, use Schur-Newton method, which is usually much faster.
"""
if self.use_svd and self.is_same_shapes:
self.pre_conditioners = compute_power_svd(matrix=self.statistics, power=self.exponent_for_pre_conditioner)
return
for i, statistic in enumerate(self.statistics):
self.pre_conditioners[i] = (
compute_power_svd(matrix=statistic, power=self.exponent_for_pre_conditioner)
if self.use_svd
else compute_power_schur_newton(
mat_g=statistic, p=self.exponent_for_pre_conditioner, ridge_epsilon=self.matrix_eps
)
)
@staticmethod
def precondition_block(
partitioned_grad: torch.Tensor,
should_preconditioned_dims: List[bool],
pre_conditioners_for_grad: List[torch.Tensor],
) -> torch.Tensor:
r"""Perform a preconditioning operation on a single gradient block.
Loop invariant: the dimension to be preconditioned is first
We keep all axes in the same cyclic order they were originally.
"""
rank: int = len(partitioned_grad.shape)
roll: Tuple[int, ...] = (*tuple(range(1, rank)), 0)
i: int = 0
for should_precondition_dim in should_preconditioned_dims:
if not should_precondition_dim:
partitioned_grad = torch.permute(partitioned_grad, roll)
continue
partitioned_grad = torch.tensordot(partitioned_grad, pre_conditioners_for_grad[i], dims=[[0], [0]])
i += 1
return partitioned_grad
def preconditioned_grad(self, grad: torch.Tensor) -> torch.Tensor:
r"""Precondition the gradient.
:param grad: torch.Tensor. a gradient tensor to precondition.
"""
if len(self.pre_conditioners) == 0:
return grad
reshaped_grad = torch.reshape(grad, self.transformed_shape)
partitioned_grads = self.partitioner.partition(reshaped_grad)
pre_cond_partitioned_grads: List[torch.Tensor] = [
self.precondition_block(
partitioned_grad,
self.should_precondition_dims,
self.pre_conditioners[i * self.rank:(i + 1) * self.rank] # fmt: skip
)
for i, partitioned_grad in enumerate(partitioned_grads)
]
merged_grad = self.partitioner.merge_partitions(pre_cond_partitioned_grads)
return torch.reshape(merged_grad, self.original_shape)
|
(var: torch.Tensor, beta2: float, inverse_exponent_override: int, block_size: int, skip_preconditioning_rank_lt: int, no_preconditioning_for_layers_with_dim_gt: int, shape_interpretation: bool, pre_conditioner_type: int, matrix_eps: float = 1e-06, use_svd: bool = False)
|
717,516
|
pytorch_optimizer.optimizer.shampoo_utils
|
__init__
| null |
def __init__(
self,
var: torch.Tensor,
beta2: float,
inverse_exponent_override: int,
block_size: int,
skip_preconditioning_rank_lt: int,
no_preconditioning_for_layers_with_dim_gt: int,
shape_interpretation: bool,
pre_conditioner_type: int,
matrix_eps: float = 1e-6,
use_svd: bool = False,
):
self.beta2 = beta2
self.inverse_exponent_override = inverse_exponent_override
self.skip_preconditioning_rank_lt = skip_preconditioning_rank_lt
self.no_preconditioning_for_layers_with_dim_gt = no_preconditioning_for_layers_with_dim_gt
self.pre_conditioner_type = pre_conditioner_type
self.matrix_eps = matrix_eps
self.use_svd = use_svd
self.w2: float = 1.0 if self.beta2 == 1.0 else (1.0 - self.beta2)
self.original_shape: List[int] = var.shape
self.transformed_shape: List[int] = (
merge_small_dims(self.original_shape, block_size) if shape_interpretation else var.shape
)
self.should_precondition_dims: List[bool] = self.get_should_precondition_dims()
self.rank: int = sum(self.should_precondition_dims)
self.exponent_for_pre_conditioner: int = (
self.inverse_exponent_override if self.inverse_exponent_override > 0 else 2 * self.rank
)
self.statistics: Union[List[torch.Tensor], torch.Tensor] = []
self.pre_conditioners: Union[List[torch.Tensor], torch.Tensor] = []
self.is_same_shapes: bool = False
if len(self.transformed_shape) > 1 and not self.skip_precondition(var):
self.partitioner = BlockPartitioner(
var=torch.reshape(var, self.transformed_shape),
rank=self.rank,
block_size=block_size,
pre_conditioner_type=self.pre_conditioner_type,
)
shapes: List[Optional[List[int]]] = self.partitioner.shapes_for_pre_conditioners()
self.statistics = [self.matrix_eps * torch.eye(shape[0], device=var.device) for shape in shapes if shape]
self.pre_conditioners = [torch.eye(shape[0], device=var.device) for shape in shapes if shape]
self.is_same_shapes = None not in shapes and len(np.unique(shapes)) == 1
if self.is_same_shapes:
self.statistics = torch.stack(self.statistics, dim=0)
self.pre_conditioners = torch.stack(self.pre_conditioners, dim=0)
|
(self, var: torch.Tensor, beta2: float, inverse_exponent_override: int, block_size: int, skip_preconditioning_rank_lt: int, no_preconditioning_for_layers_with_dim_gt: int, shape_interpretation: bool, pre_conditioner_type: int, matrix_eps: float = 1e-06, use_svd: bool = False)
|
717,517
|
pytorch_optimizer.optimizer.shampoo_utils
|
add_statistics
|
Compute statistics from gradients and add to the correct state entries.
:param grad: torch.Tensor. gradient to compute statistics from.
|
def add_statistics(self, grad: torch.Tensor):
r"""Compute statistics from gradients and add to the correct state entries.
:param grad: torch.Tensor. gradient to compute statistics from.
"""
if len(self.statistics) == 0:
return
reshaped_grad: torch.Tensor = torch.reshape(grad, self.transformed_shape)
partitioned_grads: List[torch.Tensor] = self.partitioner.partition(reshaped_grad)
for j in range(len(partitioned_grads)):
partitioned_grad: torch.Tensor = partitioned_grads[j]
for i in range(self.rank):
axes: List[int] = [ax for ax in range(partitioned_grad.ndim) if ax != i]
stat: torch.Tensor = torch.tensordot(partitioned_grad, partitioned_grad, dims=[axes, axes])
self.statistics[j * self.rank + i].mul_(self.beta2).add_(stat, alpha=self.w2)
|
(self, grad: torch.Tensor)
|
717,518
|
pytorch_optimizer.optimizer.shampoo_utils
|
compute_pre_conditioners
|
Compute L^{-1/exp} for each stats matrix L.
If `self.use_svd` is enabled and where all shapes of statistics & pre-conditioners are same, perform batch SVD.
else, SVD one by one.
If `self.use_svd` is disabled, use Schur-Newton method, which is usually much faster.
|
def compute_pre_conditioners(self):
r"""Compute L^{-1/exp} for each stats matrix L.
If `self.use_svd` is enabled and where all shapes of statistics & pre-conditioners are same, perform batch SVD.
else, SVD one by one.
If `self.use_svd` is disabled, use Schur-Newton method, which is usually much faster.
"""
if self.use_svd and self.is_same_shapes:
self.pre_conditioners = compute_power_svd(matrix=self.statistics, power=self.exponent_for_pre_conditioner)
return
for i, statistic in enumerate(self.statistics):
self.pre_conditioners[i] = (
compute_power_svd(matrix=statistic, power=self.exponent_for_pre_conditioner)
if self.use_svd
else compute_power_schur_newton(
mat_g=statistic, p=self.exponent_for_pre_conditioner, ridge_epsilon=self.matrix_eps
)
)
|
(self)
|
717,519
|
pytorch_optimizer.optimizer.shampoo_utils
|
get_should_precondition_dims
|
Get pre-condition dimensions by the type of conditioner.
|
def get_should_precondition_dims(self) -> List[bool]:
r"""Get pre-condition dimensions by the type of conditioner."""
if self.pre_conditioner_type == PreConditionerType.ALL or len(self.transformed_shape) <= 1:
return [True] * len(self.transformed_shape)
if self.pre_conditioner_type == PreConditionerType.INPUT:
return [True] * (len(self.transformed_shape) - 1) + [False]
if self.pre_conditioner_type == PreConditionerType.OUTPUT:
return [False] * (len(self.transformed_shape) - 1) + [True]
raise ValueError
|
(self) -> List[bool]
|
717,520
|
pytorch_optimizer.optimizer.shampoo_utils
|
precondition_block
|
Perform a preconditioning operation on a single gradient block.
Loop invariant: the dimension to be preconditioned is first
We keep all axes in the same cyclic order they were originally.
|
@staticmethod
def precondition_block(
partitioned_grad: torch.Tensor,
should_preconditioned_dims: List[bool],
pre_conditioners_for_grad: List[torch.Tensor],
) -> torch.Tensor:
r"""Perform a preconditioning operation on a single gradient block.
Loop invariant: the dimension to be preconditioned is first
We keep all axes in the same cyclic order they were originally.
"""
rank: int = len(partitioned_grad.shape)
roll: Tuple[int, ...] = (*tuple(range(1, rank)), 0)
i: int = 0
for should_precondition_dim in should_preconditioned_dims:
if not should_precondition_dim:
partitioned_grad = torch.permute(partitioned_grad, roll)
continue
partitioned_grad = torch.tensordot(partitioned_grad, pre_conditioners_for_grad[i], dims=[[0], [0]])
i += 1
return partitioned_grad
|
(partitioned_grad: torch.Tensor, should_preconditioned_dims: List[bool], pre_conditioners_for_grad: List[torch.Tensor]) -> torch.Tensor
|
717,521
|
pytorch_optimizer.optimizer.shampoo_utils
|
preconditioned_grad
|
Precondition the gradient.
:param grad: torch.Tensor. a gradient tensor to precondition.
|
def preconditioned_grad(self, grad: torch.Tensor) -> torch.Tensor:
r"""Precondition the gradient.
:param grad: torch.Tensor. a gradient tensor to precondition.
"""
if len(self.pre_conditioners) == 0:
return grad
reshaped_grad = torch.reshape(grad, self.transformed_shape)
partitioned_grads = self.partitioner.partition(reshaped_grad)
pre_cond_partitioned_grads: List[torch.Tensor] = [
self.precondition_block(
partitioned_grad,
self.should_precondition_dims,
self.pre_conditioners[i * self.rank:(i + 1) * self.rank] # fmt: skip
)
for i, partitioned_grad in enumerate(partitioned_grads)
]
merged_grad = self.partitioner.merge_partitions(pre_cond_partitioned_grads)
return torch.reshape(merged_grad, self.original_shape)
|
(self, grad: torch.Tensor) -> torch.Tensor
|
717,522
|
pytorch_optimizer.optimizer.shampoo_utils
|
skip_precondition
| null |
def skip_precondition(self, x: torch.Tensor) -> bool:
return (len(x.shape) < self.skip_preconditioning_rank_lt) or any(
dim > self.no_preconditioning_for_layers_with_dim_gt for dim in x.shape
)
|
(self, x: torch.Tensor) -> bool
|
717,523
|
pytorch_optimizer.optimizer.shampoo_utils
|
PreConditionerType
|
Type of PreConditioner.
In default (ALL), computes pre-conditioner for each dim.
INPUT/OUTPUT is one-sided Shampoo, in this case only on input/output dim.
Assumes last dim is always the output dim and everything else input dim.
|
class PreConditionerType(IntEnum):
r"""Type of PreConditioner.
In default (ALL), computes pre-conditioner for each dim.
INPUT/OUTPUT is one-sided Shampoo, in this case only on input/output dim.
Assumes last dim is always the output dim and everything else input dim.
"""
ALL = 0
INPUT = 1
OUTPUT = 2
|
(value, names=None, *, module=None, qualname=None, type=None, start=1)
|
717,524
|
pytorch_optimizer.optimizer.prodigy
|
Prodigy
|
An Expeditiously Adaptive Parameter-Free Learner.
Leave LR set to 1 unless you encounter instability.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. betas.
:param beta3: float. coefficients for computing the Prodidy step-size using running averages. If set to None,
uses the value of square root of beta2.
:param d0: float. initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
:param d_coef: float. Coefficient in the expression for the estimate of d.
:param growth_rate: float. prevent the D estimate from growing faster than this multiplicative rate.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. use AdamW style weight decay.
:param fixed_decay: bool. fix weight decay.
:param bias_correction: bool. turn on Adam's bias correction.
:param safeguard_warmup: bool. remove lr from the denominator of D estimate to avoid issues during warm-up stage.
:param eps: float. term added to the denominator to improve numerical stability.
|
class Prodigy(Optimizer, BaseOptimizer):
r"""An Expeditiously Adaptive Parameter-Free Learner.
Leave LR set to 1 unless you encounter instability.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. betas.
:param beta3: float. coefficients for computing the Prodidy step-size using running averages. If set to None,
uses the value of square root of beta2.
:param d0: float. initial D estimate for D-adaptation (default 1e-6). Rarely needs changing.
:param d_coef: float. Coefficient in the expression for the estimate of d.
:param growth_rate: float. prevent the D estimate from growing faster than this multiplicative rate.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. use AdamW style weight decay.
:param fixed_decay: bool. fix weight decay.
:param bias_correction: bool. turn on Adam's bias correction.
:param safeguard_warmup: bool. remove lr from the denominator of D estimate to avoid issues during warm-up stage.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1.0,
betas: BETAS = (0.9, 0.999),
beta3: Optional[float] = None,
d0: float = 1e-6,
d_coef: float = 1.0,
growth_rate: float = float('inf'),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
bias_correction: bool = False,
safeguard_warmup: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas((*betas, beta3))
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'beta3': beta3,
'd': d0,
'd0': d0,
'd_max': d0,
'd_coef': d_coef,
'growth_rate': growth_rate,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'bias_correction': bias_correction,
'safeguard_warmup': safeguard_warmup,
'step': 1,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'Prodigy'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 1
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
state['s'] = torch.zeros_like(p)
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
group = self.param_groups[0]
device = group['params'][0].device
d_de_nom = torch.tensor([0.0], device=device)
beta1, beta2 = group['betas']
beta3 = group['beta3'] if group['beta3'] is not None else math.sqrt(beta2)
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
bias_correction: float = (bias_correction1 / bias_correction2_sq) if group['bias_correction'] else 1.0
d, d0 = group['d'], group['d0']
d_lr: float = d * group['lr'] / bias_correction
if 'd_numerator' not in group:
group['d_numerator'] = torch.tensor([0.0], device=device)
d_numerator = group['d_numerator']
d_numerator.mul_(beta3)
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['s'] = torch.zeros_like(p)
state['p0'] = p.clone()
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
p0, exp_avg, exp_avg_sq = state['p0'], state['exp_avg'], state['exp_avg_sq']
d_numerator.add_(torch.dot(grad.flatten(), (p0 - p).flatten()), alpha=(d / d0) * d_lr)
exp_avg.mul_(beta1).add_(grad, alpha=d * (1.0 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=d * d * (1.0 - beta2))
s = state['s']
s.mul_(beta3).add_(grad, alpha=(d / d0) * (d if group['safeguard_warmup'] else d_lr))
d_de_nom.add_(s.abs().sum())
if d_de_nom == 0:
return loss
d_hat = (group['d_coef'] * d_numerator / d_de_nom).item()
if d == group['d0']:
d = max(d, d_hat)
d_max = max(group['d_max'], d_hat)
d = min(d_max, d * group['growth_rate'])
for group in self.param_groups:
group['step'] += 1
group['d_numerator'] = d_numerator
group['d_de_nom'] = d_de_nom
group['d'] = d
group['d_max'] = d_max
group['d_hat'] = d_hat
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
de_nom = exp_avg_sq.sqrt().add_(d * group['eps'])
self.apply_weight_decay(
p,
p.grad,
lr=d_lr,
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
p.addcdiv_(exp_avg, de_nom, value=-d_lr)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 1.0, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), beta3: Optional[float] = None, d0: float = 1e-06, d_coef: float = 1.0, growth_rate: float = inf, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, bias_correction: bool = False, safeguard_warmup: bool = False, eps: float = 1e-08)
|
717,526
|
pytorch_optimizer.optimizer.prodigy
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1.0,
betas: BETAS = (0.9, 0.999),
beta3: Optional[float] = None,
d0: float = 1e-6,
d_coef: float = 1.0,
growth_rate: float = float('inf'),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
bias_correction: bool = False,
safeguard_warmup: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas((*betas, beta3))
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'beta3': beta3,
'd': d0,
'd0': d0,
'd_max': d0,
'd_coef': d_coef,
'growth_rate': growth_rate,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'bias_correction': bias_correction,
'safeguard_warmup': safeguard_warmup,
'step': 1,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 1.0, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), beta3: Optional[float] = None, d0: float = 1e-06, d_coef: float = 1.0, growth_rate: float = inf, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, bias_correction: bool = False, safeguard_warmup: bool = False, eps: float = 1e-08)
|
717,529
|
pytorch_optimizer.optimizer.prodigy
|
__str__
| null |
def __str__(self) -> str:
return 'Prodigy'
|
(self) -> str
|
717,550
|
pytorch_optimizer.optimizer.prodigy
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
group = self.param_groups[0]
device = group['params'][0].device
d_de_nom = torch.tensor([0.0], device=device)
beta1, beta2 = group['betas']
beta3 = group['beta3'] if group['beta3'] is not None else math.sqrt(beta2)
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step'])
bias_correction: float = (bias_correction1 / bias_correction2_sq) if group['bias_correction'] else 1.0
d, d0 = group['d'], group['d0']
d_lr: float = d * group['lr'] / bias_correction
if 'd_numerator' not in group:
group['d_numerator'] = torch.tensor([0.0], device=device)
d_numerator = group['d_numerator']
d_numerator.mul_(beta3)
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['s'] = torch.zeros_like(p)
state['p0'] = p.clone()
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
p0, exp_avg, exp_avg_sq = state['p0'], state['exp_avg'], state['exp_avg_sq']
d_numerator.add_(torch.dot(grad.flatten(), (p0 - p).flatten()), alpha=(d / d0) * d_lr)
exp_avg.mul_(beta1).add_(grad, alpha=d * (1.0 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=d * d * (1.0 - beta2))
s = state['s']
s.mul_(beta3).add_(grad, alpha=(d / d0) * (d if group['safeguard_warmup'] else d_lr))
d_de_nom.add_(s.abs().sum())
if d_de_nom == 0:
return loss
d_hat = (group['d_coef'] * d_numerator / d_de_nom).item()
if d == group['d0']:
d = max(d, d_hat)
d_max = max(group['d_max'], d_hat)
d = min(d_max, d * group['growth_rate'])
for group in self.param_groups:
group['step'] += 1
group['d_numerator'] = d_numerator
group['d_de_nom'] = d_de_nom
group['d'] = d
group['d_max'] = d_max
group['d_hat'] = d_hat
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
de_nom = exp_avg_sq.sqrt().add_(d * group['eps'])
self.apply_weight_decay(
p,
p.grad,
lr=d_lr,
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
p.addcdiv_(exp_avg, de_nom, value=-d_lr)
return loss
|
(self)
|
717,565
|
pytorch_optimizer.lr_scheduler.proportion
|
ProportionScheduler
|
ProportionScheduler (Rho Scheduler of GSAM).
This scheduler outputs a value that evolves proportional to lr_scheduler.
:param lr_scheduler: learning rate scheduler.
:param max_lr: float. maximum lr.
:param min_lr: float. minimum lr.
:param max_value: float. maximum of rho.
:param min_value: float. minimum of rho.
|
class ProportionScheduler:
r"""ProportionScheduler (Rho Scheduler of GSAM).
This scheduler outputs a value that evolves proportional to lr_scheduler.
:param lr_scheduler: learning rate scheduler.
:param max_lr: float. maximum lr.
:param min_lr: float. minimum lr.
:param max_value: float. maximum of rho.
:param min_value: float. minimum of rho.
"""
def __init__(
self, lr_scheduler, max_lr: float, min_lr: float = 0.0, max_value: float = 2.0, min_value: float = 2.0
):
self.lr_scheduler = lr_scheduler
self.max_lr = max_lr
self.min_lr = min_lr
self.max_value = max_value
self.min_value = min_value
self.step_t: int = 0
self.last_lr: List[float] = []
self.step()
def get_lr(self) -> float:
return self.last_lr[0]
def step(self) -> float:
self.step_t += 1
if hasattr(self.lr_scheduler, 'last_lr'):
lr = self.lr_scheduler.last_lr[0]
else:
lr = self.lr_scheduler.optimizer.param_groups[0]['lr']
if self.max_lr > self.min_lr:
value = self.min_value + (self.max_value - self.min_value) * (lr - self.min_lr) / (
self.max_lr - self.min_lr
)
else:
value = self.max_value
self.last_lr = [value]
return value
|
(lr_scheduler, max_lr: float, min_lr: float = 0.0, max_value: float = 2.0, min_value: float = 2.0)
|
717,566
|
pytorch_optimizer.lr_scheduler.proportion
|
__init__
| null |
def __init__(
self, lr_scheduler, max_lr: float, min_lr: float = 0.0, max_value: float = 2.0, min_value: float = 2.0
):
self.lr_scheduler = lr_scheduler
self.max_lr = max_lr
self.min_lr = min_lr
self.max_value = max_value
self.min_value = min_value
self.step_t: int = 0
self.last_lr: List[float] = []
self.step()
|
(self, lr_scheduler, max_lr: float, min_lr: float = 0.0, max_value: float = 2.0, min_value: float = 2.0)
|
717,568
|
pytorch_optimizer.lr_scheduler.proportion
|
step
| null |
def step(self) -> float:
self.step_t += 1
if hasattr(self.lr_scheduler, 'last_lr'):
lr = self.lr_scheduler.last_lr[0]
else:
lr = self.lr_scheduler.optimizer.param_groups[0]['lr']
if self.max_lr > self.min_lr:
value = self.min_value + (self.max_value - self.min_value) * (lr - self.min_lr) / (
self.max_lr - self.min_lr
)
else:
value = self.max_value
self.last_lr = [value]
return value
|
(self) -> float
|
717,569
|
pytorch_optimizer.optimizer.qhadam
|
QHAdam
|
Quasi-hyperbolic momentum and Adam for deep learning.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param nus: Tuple[float, float]. immediate discount factors used to estimate the gradient and its square.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param eps: float. term added to the denominator to improve numerical stability.
|
class QHAdam(Optimizer, BaseOptimizer):
r"""Quasi-hyperbolic momentum and Adam for deep learning.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param nus: Tuple[float, float]. immediate discount factors used to estimate the gradient and its square.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
nus: Tuple[float, float] = (1.0, 1.0),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_nus(nus)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'nus': nus,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'QHAdam'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['beta1_weight'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
state['beta2_weight'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
nu1, nu2 = group['nus']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['beta1_weight'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
state['beta2_weight'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
beta1_weight, beta2_weight = state['beta1_weight'], state['beta2_weight']
beta1_weight.mul_(beta1).add_(1.0)
beta2_weight.mul_(beta2).add_(1.0)
beta1_adj = 1.0 - (1.0 / beta1_weight)
beta2_adj = 1.0 - (1.0 / beta2_weight)
grad_p2 = grad.pow(2)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1_adj).add_((1.0 - beta1_adj) * grad)
exp_avg_sq.mul_(beta2_adj).add_(1.0 - beta2_adj * grad_p2)
avg_grad = exp_avg.mul(nu1)
if nu1 != 1.0:
avg_grad.add_(grad, alpha=1.0 - nu1)
avg_grad_rms = exp_avg_sq.mul(nu2)
if nu2 != 1.0:
avg_grad_rms.add_(grad_p2, alpha=1.0 - nu2)
avg_grad_rms.sqrt_().add_(group['eps'])
p.addcdiv_(avg_grad, avg_grad_rms, value=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), nus: Tuple[float, float] = (1.0, 1.0), weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, eps: float = 1e-08)
|
717,571
|
pytorch_optimizer.optimizer.qhadam
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
nus: Tuple[float, float] = (1.0, 1.0),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_nus(nus)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'nus': nus,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), nus: Tuple[float, float] = (1.0, 1.0), weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, eps: float = 1e-08)
|
717,574
|
pytorch_optimizer.optimizer.qhadam
|
__str__
| null |
def __str__(self) -> str:
return 'QHAdam'
|
(self) -> str
|
717,595
|
pytorch_optimizer.optimizer.qhadam
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
nu1, nu2 = group['nus']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['beta1_weight'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
state['beta2_weight'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
beta1_weight, beta2_weight = state['beta1_weight'], state['beta2_weight']
beta1_weight.mul_(beta1).add_(1.0)
beta2_weight.mul_(beta2).add_(1.0)
beta1_adj = 1.0 - (1.0 / beta1_weight)
beta2_adj = 1.0 - (1.0 / beta2_weight)
grad_p2 = grad.pow(2)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1_adj).add_((1.0 - beta1_adj) * grad)
exp_avg_sq.mul_(beta2_adj).add_(1.0 - beta2_adj * grad_p2)
avg_grad = exp_avg.mul(nu1)
if nu1 != 1.0:
avg_grad.add_(grad, alpha=1.0 - nu1)
avg_grad_rms = exp_avg_sq.mul(nu2)
if nu2 != 1.0:
avg_grad_rms.add_(grad_p2, alpha=1.0 - nu2)
avg_grad_rms.sqrt_().add_(group['eps'])
p.addcdiv_(avg_grad, avg_grad_rms, value=-group['lr'])
return loss
|
(self)
|
717,610
|
pytorch_optimizer.optimizer.qhm
|
QHM
|
Quasi-hyperbolic momentum (QHM) optimization algorithm.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum factor.
:param nu: float. immediate discount factor used to estimate the gradient and its square.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
|
class QHM(Optimizer, BaseOptimizer):
r"""Quasi-hyperbolic momentum (QHM) optimization algorithm.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum factor.
:param nu: float. immediate discount factor used to estimate the gradient and its square.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
momentum: float = 0.0,
nu: float = 1.0,
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_nus(nu)
defaults: DEFAULTS = {
'lr': lr,
'momentum': momentum,
'nu': nu,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'QHM'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['momentum_buffer'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['momentum_buffer'] = torch.zeros_like(p)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
buf = state['momentum_buffer']
buf.mul_(group['momentum']).add_(grad, alpha=1.0 - group['momentum'])
p.add_(buf, alpha=-group['lr'] * group['nu'])
p.add_(grad, alpha=-group['lr'] * (1.0 - group['nu']))
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, momentum: float = 0.0, nu: float = 1.0, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False)
|
717,612
|
pytorch_optimizer.optimizer.qhm
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
momentum: float = 0.0,
nu: float = 1.0,
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_nus(nu)
defaults: DEFAULTS = {
'lr': lr,
'momentum': momentum,
'nu': nu,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, momentum: float = 0.0, nu: float = 1.0, weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False)
|
717,615
|
pytorch_optimizer.optimizer.qhm
|
__str__
| null |
def __str__(self) -> str:
return 'QHM'
|
(self) -> str
|
717,651
|
pytorch_optimizer.optimizer.radam
|
RAdam
|
Rectified Adam.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param n_sma_threshold: int. (recommended is 5).
:param degenerated_to_sgd: float. degenerated to SGD.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class RAdam(Optimizer, BaseOptimizer):
r"""Rectified Adam.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param n_sma_threshold: int. (recommended is 5).
:param degenerated_to_sgd: float. degenerated to SGD.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
n_sma_threshold: int = 5,
degenerated_to_sgd: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
self.n_sma_threshold = n_sma_threshold
self.degenerated_to_sgd = degenerated_to_sgd
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
def __str__(self) -> str:
return 'RAdam'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
step_size, n_sma = self.get_rectify_step_size(
is_rectify=True,
step=group['step'],
lr=group['lr'],
beta2=beta2,
n_sma_threshold=self.n_sma_threshold,
degenerated_to_sgd=self.degenerated_to_sgd,
)
step_size = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=step_size,
bias_correction1=bias_correction1,
)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
if step_size > 0 or n_sma >= self.n_sma_threshold:
self.apply_weight_decay(
p=p,
grad=None,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
if n_sma >= self.n_sma_threshold:
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
p.addcdiv_(exp_avg, de_nom, value=-step_size)
elif step_size > 0:
p.add_(exp_avg, alpha=-step_size)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, n_sma_threshold: int = 5, degenerated_to_sgd: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
717,653
|
pytorch_optimizer.optimizer.radam
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
n_sma_threshold: int = 5,
degenerated_to_sgd: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
self.n_sma_threshold = n_sma_threshold
self.degenerated_to_sgd = degenerated_to_sgd
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, n_sma_threshold: int = 5, degenerated_to_sgd: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-08)
|
717,656
|
pytorch_optimizer.optimizer.radam
|
__str__
| null |
def __str__(self) -> str:
return 'RAdam'
|
(self) -> str
|
717,677
|
pytorch_optimizer.optimizer.radam
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
step_size, n_sma = self.get_rectify_step_size(
is_rectify=True,
step=group['step'],
lr=group['lr'],
beta2=beta2,
n_sma_threshold=self.n_sma_threshold,
degenerated_to_sgd=self.degenerated_to_sgd,
)
step_size = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=step_size,
bias_correction1=bias_correction1,
)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
if step_size > 0 or n_sma >= self.n_sma_threshold:
self.apply_weight_decay(
p=p,
grad=None,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
if n_sma >= self.n_sma_threshold:
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
p.addcdiv_(exp_avg, de_nom, value=-step_size)
elif step_size > 0:
p.add_(exp_avg, alpha=-step_size)
return loss
|
(self)
|
717,692
|
pytorch_optimizer.optimizer.shampoo_utils
|
RMSPropGraft
|
Graft using RMSProp. Essentially an implementation of RMSProp with momentum.
:param var: torch.Tensor. variable.
:param diagonal_eps: float. diagonal epsilon.
|
class RMSPropGraft(SGDGraft):
r"""Graft using RMSProp. Essentially an implementation of RMSProp with momentum.
:param var: torch.Tensor. variable.
:param diagonal_eps: float. diagonal epsilon.
"""
def __init__(self, var: torch.Tensor, diagonal_eps: float):
super().__init__(var)
self.diagonal_eps = diagonal_eps
self.statistics: torch.Tensor = torch.zeros_like(var)
def add_statistics(self, grad: torch.Tensor, beta2: float):
r"""Add the statistics."""
self.statistics.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
def precondition_gradient(self, grad: torch.Tensor) -> torch.Tensor:
r"""Get preconditioned gradient."""
return grad / (torch.sqrt(self.statistics) + self.diagonal_eps)
|
(var: torch.Tensor, diagonal_eps: float)
|
717,694
|
pytorch_optimizer.optimizer.shampoo_utils
|
add_statistics
|
Add the statistics.
|
def add_statistics(self, grad: torch.Tensor, beta2: float):
r"""Add the statistics."""
self.statistics.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
|
(self, grad: torch.Tensor, beta2: float)
|
717,697
|
pytorch_optimizer.optimizer.ranger
|
Ranger
|
a synergistic optimizer combining RAdam and LookAhead, and now GC in one optimizer.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param n_sma_threshold: int. (recommended is 5).
:param degenerated_to_sgd: bool. perform SGD update when variance of gradient is high.
:param use_gc: bool. use Gradient Centralization (both convolution & fc layers).
:param gc_conv_only: bool. use Gradient Centralization (only convolution layer).
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class Ranger(Optimizer, BaseOptimizer):
r"""a synergistic optimizer combining RAdam and LookAhead, and now GC in one optimizer.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param n_sma_threshold: int. (recommended is 5).
:param degenerated_to_sgd: bool. perform SGD update when variance of gradient is high.
:param use_gc: bool. use Gradient Centralization (both convolution & fc layers).
:param gc_conv_only: bool. use Gradient Centralization (only convolution layer).
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.95, 0.999),
alpha: float = 0.5,
k: int = 6,
n_sma_threshold: int = 5,
degenerated_to_sgd: bool = False,
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
use_gc: bool = True,
gc_conv_only: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-5,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_range(alpha, 'alpha', 0.0, 1.0, range_type='[]')
self.validate_positive(k, 'k')
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
self.n_sma_threshold = n_sma_threshold
self.degenerated_to_sgd = degenerated_to_sgd
self.use_gc = use_gc
self.gc_gradient_threshold: int = 3 if gc_conv_only else 1
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'alpha': alpha,
'k': k,
'step_counter': 0,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
def __str__(self) -> str:
return 'Ranger'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['slow_buffer'] = torch.empty_like(p)
state['slow_buffer'].copy_(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
step_size, n_sma = self.get_rectify_step_size(
is_rectify=True,
step=group['step'],
lr=group['lr'],
beta2=beta2,
n_sma_threshold=self.n_sma_threshold,
degenerated_to_sgd=self.degenerated_to_sgd,
)
step_size = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=step_size,
bias_correction1=bias_correction1,
)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['slow_buffer'] = torch.empty_like(p)
state['slow_buffer'].copy_(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
if self.use_gc and grad.dim() > self.gc_gradient_threshold:
centralize_gradient(grad, gc_conv_only=False)
self.apply_weight_decay(
p=p,
grad=None,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
if n_sma >= self.n_sma_threshold:
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
p.addcdiv_(exp_avg, de_nom, value=-step_size)
else:
p.add_(exp_avg, alpha=-step_size)
if group['step'] % group['k'] == 0:
slow_p = state['slow_buffer']
slow_p.add_(p - slow_p, alpha=group['alpha'])
p.copy_(slow_p)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.95, 0.999), alpha: float = 0.5, k: int = 6, n_sma_threshold: int = 5, degenerated_to_sgd: bool = False, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, use_gc: bool = True, gc_conv_only: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-05)
|
717,699
|
pytorch_optimizer.optimizer.ranger
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.95, 0.999),
alpha: float = 0.5,
k: int = 6,
n_sma_threshold: int = 5,
degenerated_to_sgd: bool = False,
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
use_gc: bool = True,
gc_conv_only: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-5,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_range(alpha, 'alpha', 0.0, 1.0, range_type='[]')
self.validate_positive(k, 'k')
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
self.n_sma_threshold = n_sma_threshold
self.degenerated_to_sgd = degenerated_to_sgd
self.use_gc = use_gc
self.gc_gradient_threshold: int = 3 if gc_conv_only else 1
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'alpha': alpha,
'k': k,
'step_counter': 0,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'adanorm': adanorm,
'adam_debias': adam_debias,
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.95, 0.999), alpha: float = 0.5, k: int = 6, n_sma_threshold: int = 5, degenerated_to_sgd: bool = False, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, use_gc: bool = True, gc_conv_only: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-05)
|
717,702
|
pytorch_optimizer.optimizer.ranger
|
__str__
| null |
def __str__(self) -> str:
return 'Ranger'
|
(self) -> str
|
717,723
|
pytorch_optimizer.optimizer.ranger
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
step_size, n_sma = self.get_rectify_step_size(
is_rectify=True,
step=group['step'],
lr=group['lr'],
beta2=beta2,
n_sma_threshold=self.n_sma_threshold,
degenerated_to_sgd=self.degenerated_to_sgd,
)
step_size = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=step_size,
bias_correction1=bias_correction1,
)
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['slow_buffer'] = torch.empty_like(p)
state['slow_buffer'].copy_(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
if self.use_gc and grad.dim() > self.gc_gradient_threshold:
centralize_gradient(grad, gc_conv_only=False)
self.apply_weight_decay(
p=p,
grad=None,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
if n_sma >= self.n_sma_threshold:
de_nom = exp_avg_sq.sqrt().add_(group['eps'])
p.addcdiv_(exp_avg, de_nom, value=-step_size)
else:
p.add_(exp_avg, alpha=-step_size)
if group['step'] % group['k'] == 0:
slow_p = state['slow_buffer']
slow_p.add_(p - slow_p, alpha=group['alpha'])
p.copy_(slow_p)
return loss
|
(self)
|
717,738
|
pytorch_optimizer.optimizer.ranger21
|
Ranger21
|
Integrating the latest deep learning components into a single optimizer.
Here's the components
* uses the AdamW optimizer as its core (or, optionally, MadGrad)
* Adaptive gradient clipping
* Gradient centralization
* Positive-Negative momentum
* Norm loss
* Stable weight decay
* Linear learning rate warm-up
* Explore-exploit learning rate schedule
* Lookahead
* Softplus transformation
* Gradient Normalization
* Corrects the denominator (AdamD).
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param beta0: float. Manages the amplitude of the noise introduced by positive negative momentum
While 0.9 is a recommended default value, you can use -0.5 to minimize the noise.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param use_softplus: bool. use softplus to smooth.
:param beta_softplus: float. beta.
:param agc_clipping_value: float.
:param agc_eps: float. eps for AGC
:param centralize_gradients: bool. use GC both convolution & fc layers.
:param normalize_gradients: bool. use gradient normalization.
:param lookahead_merge_time: int. merge time.
:param lookahead_blending_alpha: float. blending alpha.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param norm_loss_factor: float. norm loss factor.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class Ranger21(Optimizer, BaseOptimizer):
r"""Integrating the latest deep learning components into a single optimizer.
Here's the components
* uses the AdamW optimizer as its core (or, optionally, MadGrad)
* Adaptive gradient clipping
* Gradient centralization
* Positive-Negative momentum
* Norm loss
* Stable weight decay
* Linear learning rate warm-up
* Explore-exploit learning rate schedule
* Lookahead
* Softplus transformation
* Gradient Normalization
* Corrects the denominator (AdamD).
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param beta0: float. Manages the amplitude of the noise introduced by positive negative momentum
While 0.9 is a recommended default value, you can use -0.5 to minimize the noise.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param use_softplus: bool. use softplus to smooth.
:param beta_softplus: float. beta.
:param agc_clipping_value: float.
:param agc_eps: float. eps for AGC
:param centralize_gradients: bool. use GC both convolution & fc layers.
:param normalize_gradients: bool. use gradient normalization.
:param lookahead_merge_time: int. merge time.
:param lookahead_blending_alpha: float. blending alpha.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param norm_loss_factor: float. norm loss factor.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__( # pylint: disable=R0913
self,
params: PARAMETERS,
num_iterations: int,
lr: float = 1e-3,
beta0: float = 0.9,
betas: BETAS = (0.9, 0.999),
use_softplus: bool = True,
beta_softplus: float = 50.0,
num_warm_up_iterations: Optional[int] = None,
num_warm_down_iterations: Optional[int] = None,
warm_down_min_lr: float = 3e-5,
agc_clipping_value: float = 1e-2,
agc_eps: float = 1e-3,
centralize_gradients: bool = True,
normalize_gradients: bool = True,
lookahead_merge_time: int = 5,
lookahead_blending_alpha: float = 0.5,
weight_decay: float = 1e-4,
weight_decouple: bool = True,
fixed_decay: bool = False,
norm_loss_factor: float = 1e-4,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_learning_rate(warm_down_min_lr)
self.validate_betas(betas)
self.validate_range(beta0, 'beta0', 0.0, 1.0, range_type='[]')
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(agc_clipping_value, 'agc_clipping_value')
self.validate_non_negative(eps, 'eps')
self.validate_non_negative(agc_eps, 'agc_eps')
self.min_lr = warm_down_min_lr
self.use_softplus = use_softplus
self.beta_softplus = beta_softplus
self.agc_clipping_value = agc_clipping_value
self.agc_eps = agc_eps
self.centralize_gradients = centralize_gradients
self.normalize_gradients = normalize_gradients
self.lookahead_merge_time = lookahead_merge_time
self.lookahead_blending_alpha = lookahead_blending_alpha
self.norm_loss_factor = norm_loss_factor
self.lookahead_step: int = 0
self.starting_lr: float = lr
self.current_lr: float = lr
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'adam_debias': adam_debias,
'eps': eps,
}
super().__init__(params, defaults)
self.num_warm_up_iterations: int = (
self.build_warm_up_iterations(num_iterations, betas[1])
if num_warm_up_iterations is None
else num_warm_up_iterations
)
self.num_warm_down_iterations: int = (
self.build_warm_down_iterations(num_iterations)
if num_warm_down_iterations is None
else num_warm_down_iterations
)
self.start_warm_down: int = num_iterations - self.num_warm_down_iterations
self.warm_down_lr_delta: float = self.starting_lr - self.min_lr
def __str__(self) -> str:
return 'Ranger21'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['grad_ma'] = torch.zeros_like(p)
state['variance_ma'] = torch.zeros_like(p)
state['lookahead_params'] = p.clone()
state['neg_grad_ma'] = torch.zeros_like(p)
state['max_variance_ma'] = torch.zeros_like(p)
@staticmethod
def build_warm_up_iterations(total_iterations: int, beta2: float, warm_up_pct: float = 0.22) -> int:
warm_up_iterations: int = math.ceil(2.0 / (1.0 - beta2)) # default un-tuned linear warmup
beta_pct: float = warm_up_iterations / total_iterations
return int(warm_up_pct * total_iterations) if beta_pct > 0.45 else warm_up_iterations
@staticmethod
def build_warm_down_iterations(total_iterations: int, warm_down_pct: float = 0.72) -> int:
start_warm_down: int = int(warm_down_pct * total_iterations)
return total_iterations - start_warm_down
def warm_up_dampening(self, lr: float, step: int) -> float:
if step > self.num_warm_up_iterations:
return lr
warm_up_current_pct: float = min(1.0, (step / self.num_warm_up_iterations))
self.current_lr = lr * warm_up_current_pct
return self.current_lr
def warm_down(self, lr: float, iteration: int) -> float:
if iteration < self.start_warm_down:
return lr
# start iteration from 1, not 0
warm_down_iteration: int = max((iteration + 1) - self.start_warm_down, 1)
warm_down_pct: float = min(warm_down_iteration / (self.num_warm_down_iterations + 1), 1.0)
self.current_lr = max(self.starting_lr - self.warm_down_lr_delta * warm_down_pct, self.min_lr)
return self.current_lr
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
param_size: int = 0
variance_ma_sum: float = 1.0
# Phase 1 - Accumulate all the variance_ma_sum to use in stable weight decay
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction2: float = 1.0 - beta2 ** group['step']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
param_size += p.numel()
state = self.state[p]
if len(state) == 0:
state['grad_ma'] = torch.zeros_like(p)
state['variance_ma'] = torch.zeros_like(p)
state['lookahead_params'] = p.clone()
state['neg_grad_ma'] = torch.zeros_like(p)
state['max_variance_ma'] = torch.zeros_like(p)
# Apply Adaptive Gradient Clipping (AGC)
grad.copy_(agc(p, grad, self.agc_eps, self.agc_clipping_value))
# Apply gradient centralization & normalization
centralize_gradient(grad, gc_conv_only=False)
normalize_gradient(grad)
# second moment estimation
# using positive-negative momentum and bias correction
variance_ma = state['variance_ma']
variance_ma.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
variance_ma_sum += (variance_ma / bias_correction2).sum()
if param_size == 0:
raise ZeroParameterSizeError()
variance_normalized = math.sqrt(variance_ma_sum / param_size)
# Phase 2 - Apply weight decay and step
for group in self.param_groups:
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step'] # fmt: skip
bias_correction2_sq: float = math.sqrt(1.0 - beta2 ** group['step']) # fmt: skip
noise_norm: float = math.sqrt((1.0 + beta2) ** 2 + beta2 ** 2) # fmt: skip
# warm up & down
lr: float = self.warm_up_dampening(group['lr'], group['step'])
lr = self.warm_down(lr, group['step'])
for p in group['params']:
if p.grad is None:
continue
# stable weight decay
self.apply_weight_decay(
p=p,
grad=None,
lr=lr,
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
ratio=1.0 / variance_normalized,
)
# norm loss
correction = 2.0 * self.norm_loss_factor * (1.0 - 1.0 / unit_norm(p).add_(group['eps']))
p.mul_(1.0 - lr * correction)
state = self.state[p]
if group['step'] % 2 == 1:
grad_ma, neg_grad_ma = state['grad_ma'], state['neg_grad_ma']
else:
grad_ma, neg_grad_ma = state['neg_grad_ma'], state['grad_ma']
variance_ma = state['variance_ma']
torch.max(state['max_variance_ma'], variance_ma, out=variance_ma)
de_nom = (variance_ma.sqrt() / bias_correction2_sq).add_(group['eps'])
if self.use_softplus:
de_nom = f.softplus(de_nom, beta=self.beta_softplus)
grad = p.grad
centralize_gradient(grad, gc_conv_only=False)
normalize_gradient(grad)
grad_ma.mul_(beta1 ** 2).add_(grad, alpha=1.0 - beta1 ** 2) # fmt: skip
step_size: float = self.apply_adam_debias(group['adam_debias'], lr, bias_correction1)
pn_momentum = grad_ma.mul(1.0 + 1.0).add(neg_grad_ma, alpha=-1.0).mul(1.0 / noise_norm)
p.addcdiv_(pn_momentum, de_nom, value=-step_size)
self.lookahead_process_step()
return loss
def lookahead_process_step(self):
self.lookahead_step += 1
if self.lookahead_step >= self.lookahead_merge_time:
self.lookahead_step: int = 0
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
p.mul_(self.lookahead_blending_alpha).add_(
state['lookahead_params'],
alpha=1.0 - self.lookahead_blending_alpha,
)
state['lookahead_params'].copy_(p)
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], num_iterations: int, lr: float = 0.001, beta0: float = 0.9, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), use_softplus: bool = True, beta_softplus: float = 50.0, num_warm_up_iterations: Optional[int] = None, num_warm_down_iterations: Optional[int] = None, warm_down_min_lr: float = 3e-05, agc_clipping_value: float = 0.01, agc_eps: float = 0.001, centralize_gradients: bool = True, normalize_gradients: bool = True, lookahead_merge_time: int = 5, lookahead_blending_alpha: float = 0.5, weight_decay: float = 0.0001, weight_decouple: bool = True, fixed_decay: bool = False, norm_loss_factor: float = 0.0001, adam_debias: bool = False, eps: float = 1e-08)
|
717,740
|
pytorch_optimizer.optimizer.ranger21
|
__init__
| null |
def __init__( # pylint: disable=R0913
self,
params: PARAMETERS,
num_iterations: int,
lr: float = 1e-3,
beta0: float = 0.9,
betas: BETAS = (0.9, 0.999),
use_softplus: bool = True,
beta_softplus: float = 50.0,
num_warm_up_iterations: Optional[int] = None,
num_warm_down_iterations: Optional[int] = None,
warm_down_min_lr: float = 3e-5,
agc_clipping_value: float = 1e-2,
agc_eps: float = 1e-3,
centralize_gradients: bool = True,
normalize_gradients: bool = True,
lookahead_merge_time: int = 5,
lookahead_blending_alpha: float = 0.5,
weight_decay: float = 1e-4,
weight_decouple: bool = True,
fixed_decay: bool = False,
norm_loss_factor: float = 1e-4,
adam_debias: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_learning_rate(warm_down_min_lr)
self.validate_betas(betas)
self.validate_range(beta0, 'beta0', 0.0, 1.0, range_type='[]')
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(agc_clipping_value, 'agc_clipping_value')
self.validate_non_negative(eps, 'eps')
self.validate_non_negative(agc_eps, 'agc_eps')
self.min_lr = warm_down_min_lr
self.use_softplus = use_softplus
self.beta_softplus = beta_softplus
self.agc_clipping_value = agc_clipping_value
self.agc_eps = agc_eps
self.centralize_gradients = centralize_gradients
self.normalize_gradients = normalize_gradients
self.lookahead_merge_time = lookahead_merge_time
self.lookahead_blending_alpha = lookahead_blending_alpha
self.norm_loss_factor = norm_loss_factor
self.lookahead_step: int = 0
self.starting_lr: float = lr
self.current_lr: float = lr
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'adam_debias': adam_debias,
'eps': eps,
}
super().__init__(params, defaults)
self.num_warm_up_iterations: int = (
self.build_warm_up_iterations(num_iterations, betas[1])
if num_warm_up_iterations is None
else num_warm_up_iterations
)
self.num_warm_down_iterations: int = (
self.build_warm_down_iterations(num_iterations)
if num_warm_down_iterations is None
else num_warm_down_iterations
)
self.start_warm_down: int = num_iterations - self.num_warm_down_iterations
self.warm_down_lr_delta: float = self.starting_lr - self.min_lr
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], num_iterations: int, lr: float = 0.001, beta0: float = 0.9, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), use_softplus: bool = True, beta_softplus: float = 50.0, num_warm_up_iterations: Optional[int] = None, num_warm_down_iterations: Optional[int] = None, warm_down_min_lr: float = 3e-05, agc_clipping_value: float = 0.01, agc_eps: float = 0.001, centralize_gradients: bool = True, normalize_gradients: bool = True, lookahead_merge_time: int = 5, lookahead_blending_alpha: float = 0.5, weight_decay: float = 0.0001, weight_decouple: bool = True, fixed_decay: bool = False, norm_loss_factor: float = 0.0001, adam_debias: bool = False, eps: float = 1e-08)
|
717,743
|
pytorch_optimizer.optimizer.ranger21
|
__str__
| null |
def __str__(self) -> str:
return 'Ranger21'
|
(self) -> str
|
717,753
|
pytorch_optimizer.optimizer.ranger21
|
build_warm_down_iterations
| null |
@staticmethod
def build_warm_down_iterations(total_iterations: int, warm_down_pct: float = 0.72) -> int:
start_warm_down: int = int(warm_down_pct * total_iterations)
return total_iterations - start_warm_down
|
(total_iterations: int, warm_down_pct: float = 0.72) -> int
|
717,754
|
pytorch_optimizer.optimizer.ranger21
|
build_warm_up_iterations
| null |
@staticmethod
def build_warm_up_iterations(total_iterations: int, beta2: float, warm_up_pct: float = 0.22) -> int:
warm_up_iterations: int = math.ceil(2.0 / (1.0 - beta2)) # default un-tuned linear warmup
beta_pct: float = warm_up_iterations / total_iterations
return int(warm_up_pct * total_iterations) if beta_pct > 0.45 else warm_up_iterations
|
(total_iterations: int, beta2: float, warm_up_pct: float = 0.22) -> int
|
717,759
|
pytorch_optimizer.optimizer.ranger21
|
lookahead_process_step
| null |
def lookahead_process_step(self):
self.lookahead_step += 1
if self.lookahead_step >= self.lookahead_merge_time:
self.lookahead_step: int = 0
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
state = self.state[p]
p.mul_(self.lookahead_blending_alpha).add_(
state['lookahead_params'],
alpha=1.0 - self.lookahead_blending_alpha,
)
state['lookahead_params'].copy_(p)
|
(self)
|
717,780
|
pytorch_optimizer.optimizer.ranger21
|
warm_down
| null |
def warm_down(self, lr: float, iteration: int) -> float:
if iteration < self.start_warm_down:
return lr
# start iteration from 1, not 0
warm_down_iteration: int = max((iteration + 1) - self.start_warm_down, 1)
warm_down_pct: float = min(warm_down_iteration / (self.num_warm_down_iterations + 1), 1.0)
self.current_lr = max(self.starting_lr - self.warm_down_lr_delta * warm_down_pct, self.min_lr)
return self.current_lr
|
(self, lr: float, iteration: int) -> float
|
717,781
|
pytorch_optimizer.optimizer.ranger21
|
warm_up_dampening
| null |
def warm_up_dampening(self, lr: float, step: int) -> float:
if step > self.num_warm_up_iterations:
return lr
warm_up_current_pct: float = min(1.0, (step / self.num_warm_up_iterations))
self.current_lr = lr * warm_up_current_pct
return self.current_lr
|
(self, lr: float, step: int) -> float
|
717,784
|
pytorch_optimizer.optimizer.rotograd
|
RotoGrad
|
Implementation of RotoGrad as described in the original paper.
:param backbone: nn.Module. shared module.
:param heads: List[nn.Module]. task-specific modules.
:param latent_size: int. size of the shared representation, size of the output of the backbone.z.
:param burn_in_period: int. When back-propagating towards the shared parameters, *each task loss is normalized
dividing by its initial value*, :math:`{L_k(t)}/{L_k(t_0 = 0)}`. This parameter sets a number of iterations
after which the denominator will be replaced by the value of the loss at that iteration, that is,
:math:`t_0 = burn\_in\_period`. This is done to overcome problems with losses quickly changing
in the first iterations.
:param normalize_losses: bool. Whether to use this normalized losses to back-propagate through the task-specific
parameters as well.
|
class RotoGrad(RotateOnly):
r"""Implementation of RotoGrad as described in the original paper.
:param backbone: nn.Module. shared module.
:param heads: List[nn.Module]. task-specific modules.
:param latent_size: int. size of the shared representation, size of the output of the backbone.z.
:param burn_in_period: int. When back-propagating towards the shared parameters, *each task loss is normalized
dividing by its initial value*, :math:`{L_k(t)}/{L_k(t_0 = 0)}`. This parameter sets a number of iterations
after which the denominator will be replaced by the value of the loss at that iteration, that is,
:math:`t_0 = burn\_in\_period`. This is done to overcome problems with losses quickly changing
in the first iterations.
:param normalize_losses: bool. Whether to use this normalized losses to back-propagate through the task-specific
parameters as well.
"""
num_tasks: int
backbone: nn.Module
heads: Sequence[nn.Module]
rep: torch.Tensor
def __init__(
self,
backbone: nn.Module,
heads: Sequence[nn.Module],
latent_size: int,
*args,
burn_in_period: int = 20,
normalize_losses: bool = False,
):
super().__init__(backbone, heads, latent_size, burn_in_period, *args, normalize_losses=normalize_losses)
self.initial_grads = None
self.counter: int = 0
def _rep_grad(self):
super()._rep_grad()
grad_norms = [torch.linalg.norm(g, keepdim=True).clamp_min(1e-15) for g in self.original_grads]
if self.initial_grads is None or self.counter == self.burn_in_period:
self.initial_grads = grad_norms
conv_ratios = [torch.ones((1,)) for _ in range(len(self.initial_grads))]
else:
conv_ratios = [x / y for x, y, in zip(grad_norms, self.initial_grads)]
self.counter += 1
alphas = [x / torch.clamp(sum(conv_ratios), 1e-15) for x in conv_ratios]
weighted_sum_norms = sum(a * g for a, g in zip(alphas, grad_norms))
return sum(g / n * weighted_sum_norms for g, n in zip(self.original_grads, grad_norms))
|
(backbone: torch.nn.modules.module.Module, heads: Sequence[torch.nn.modules.module.Module], latent_size: int, *args, burn_in_period: int = 20, normalize_losses: bool = False)
|
717,789
|
pytorch_optimizer.optimizer.rotograd
|
__getitem__
|
Get an end-to-end model for the selected task.
|
def __getitem__(self, item) -> nn.Module:
r"""Get an end-to-end model for the selected task."""
return nn.Sequential(self.backbone, self.heads[item])
|
(self, item) -> torch.nn.modules.module.Module
|
717,791
|
pytorch_optimizer.optimizer.rotograd
|
__init__
| null |
def __init__(
self,
backbone: nn.Module,
heads: Sequence[nn.Module],
latent_size: int,
*args,
burn_in_period: int = 20,
normalize_losses: bool = False,
):
super().__init__(backbone, heads, latent_size, burn_in_period, *args, normalize_losses=normalize_losses)
self.initial_grads = None
self.counter: int = 0
|
(self, backbone: torch.nn.modules.module.Module, heads: Sequence[torch.nn.modules.module.Module], latent_size: int, *args, burn_in_period: int = 20, normalize_losses: bool = False)
|
717,792
|
pytorch_optimizer.optimizer.rotograd
|
__len__
|
Get the number of tasks.
|
def __len__(self) -> int:
r"""Get the number of tasks."""
return self.num_tasks
|
(self) -> int
|
717,801
|
pytorch_optimizer.optimizer.rotograd
|
_hook
| null |
def _hook(self, index):
def _hook_(g):
self.original_grads[index] = g
return _hook_
|
(self, index)
|
717,807
|
pytorch_optimizer.optimizer.rotograd
|
_rep_grad
| null |
def _rep_grad(self):
super()._rep_grad()
grad_norms = [torch.linalg.norm(g, keepdim=True).clamp_min(1e-15) for g in self.original_grads]
if self.initial_grads is None or self.counter == self.burn_in_period:
self.initial_grads = grad_norms
conv_ratios = [torch.ones((1,)) for _ in range(len(self.initial_grads))]
else:
conv_ratios = [x / y for x, y, in zip(grad_norms, self.initial_grads)]
self.counter += 1
alphas = [x / torch.clamp(sum(conv_ratios), 1e-15) for x in conv_ratios]
weighted_sum_norms = sum(a * g for a, g in zip(alphas, grad_norms))
return sum(g / n * weighted_sum_norms for g, n in zip(self.original_grads, grad_norms))
|
(self)
|
717,814
|
pytorch_optimizer.optimizer.rotograd
|
backward
|
Compute the backward computations for the entire model.
It also computes the gradients for the rotation matrices.
:param losses: Sequence[torch.Tensor]. losses.
:param backbone_loss: Optional[torch.Tensor]. backbone loss.
|
def backward(self, losses: Sequence[torch.Tensor], backbone_loss=None, **kwargs) -> None:
r"""Compute the backward computations for the entire model.
It also computes the gradients for the rotation matrices.
:param losses: Sequence[torch.Tensor]. losses.
:param backbone_loss: Optional[torch.Tensor]. backbone loss.
"""
if not self.training:
raise AssertionError('Backward should only be called when training')
if self.iteration_counter in (0, self.burn_in_period):
for i, loss in enumerate(losses):
self.initial_losses[i] = loss.item()
if self.normalize_losses and backbone_loss is not None:
self.initial_backbone_loss = backbone_loss.item()
self.iteration_counter += 1
for i in range(len(losses)):
loss = losses[i] / self.initial_losses[i]
self.losses[i] = loss.item()
if self.normalize_losses:
loss.backward(**kwargs)
else:
losses[i].backward(**kwargs)
if backbone_loss is not None:
if self.normalize_losses:
(backbone_loss / self.initial_backbone_loss).backward(retain_graph=True)
else:
backbone_loss.backward(retain_graph=True)
self.rep.backward(self._rep_grad())
|
(self, losses: Sequence[torch.Tensor], backbone_loss=None, **kwargs) -> NoneType
|
717,825
|
pytorch_optimizer.optimizer.rotograd
|
forward
|
Forward the input through the backbone and all heads, returning a list with all the task predictions.
|
def forward(self, x: Any) -> Sequence[Any]:
r"""Forward the input through the backbone and all heads, returning a list with all the task predictions."""
out = self.backbone(x)
if isinstance(out, (list, tuple)):
rep, extra_out = out[0], out[1:]
extra_out = list(extra_out)
else:
rep = out
extra_out = []
if self.training:
self.rep = rep
preds = []
for i, head in enumerate(self.heads):
rep_i = rep
if self.training:
rep_i = rep.detach().clone()
rep_i.requires_grad = True
rep_i.register_hook(self._hook(i))
out_i = head(rep_i)
if isinstance(out_i, (list, tuple)):
preds.append(out_i[0])
extra_out.append(out_i[1:])
else:
preds.append(out_i)
return preds if len(extra_out) == 0 else preds, extra_out
|
(self, x: Any) -> Sequence[Any]
|
717,833
|
pytorch_optimizer.optimizer.rotograd
|
model_parameters
| null |
def model_parameters(self, recurse=True):
for param in self.backbone.parameters(recurse=recurse):
yield param
for h in self.heads:
for param in h.parameters(recurse=recurse):
yield param
|
(self, recurse=True)
|
717,835
|
pytorch_optimizer.optimizer.rotograd
|
mtl_parameters
| null |
def mtl_parameters(self, recurse: bool = True):
return self.parameters(recurse=recurse)
|
(self, recurse: bool = True)
|
717,855
|
pytorch_optimizer.optimizer.rotograd
|
to
| null |
def to(self, *args, **kwargs):
self.backbone.to(*args, **kwargs)
for head in self.heads:
head.to(*args, **kwargs)
return super().to(*args, **kwargs)
|
(self, *args, **kwargs)
|
717,857
|
pytorch_optimizer.optimizer.rotograd
|
train
| null |
def train(self, mode: bool = True) -> nn.Module:
super().train(mode)
self.backbone.train(mode)
for head in self.heads:
head.train(mode)
return self
|
(self, mode: bool = True) -> torch.nn.modules.module.Module
|
717,861
|
pytorch_optimizer.optimizer.sam
|
SAM
|
Sharpness-Aware Minimization for Efficiently Improving Generalization.
Example:
-------
Here's an example::
model = YourModel()
base_optimizer = Ranger21
optimizer = SAM(model.parameters(), base_optimizer)
for input, output in data:
# first forward-backward pass
loss = loss_function(output, model(input))
loss.backward()
optimizer.first_step(zero_grad=True)
# second forward-backward pass
# make sure to do a full forward pass
loss_function(output, model(input)).backward()
optimizer.second_step(zero_grad=True)
Alternative example with a single closure-based step function::
model = YourModel()
base_optimizer = Ranger21
optimizer = SAM(model.parameters(), base_optimizer)
def closure():
loss = loss_function(output, model(input))
loss.backward()
return loss
for input, output in data:
loss = loss_function(output, model(input))
loss.backward()
optimizer.step(closure)
optimizer.zero_grad()
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param base_optimizer: Optimizer. base optimizer.
:param rho: float. size of the neighborhood for computing the max loss.
:param adaptive: bool. element-wise Adaptive SAM.
:param kwargs: Dict. parameters for optimizer.
|
class SAM(Optimizer, BaseOptimizer):
r"""Sharpness-Aware Minimization for Efficiently Improving Generalization.
Example:
-------
Here's an example::
model = YourModel()
base_optimizer = Ranger21
optimizer = SAM(model.parameters(), base_optimizer)
for input, output in data:
# first forward-backward pass
loss = loss_function(output, model(input))
loss.backward()
optimizer.first_step(zero_grad=True)
# second forward-backward pass
# make sure to do a full forward pass
loss_function(output, model(input)).backward()
optimizer.second_step(zero_grad=True)
Alternative example with a single closure-based step function::
model = YourModel()
base_optimizer = Ranger21
optimizer = SAM(model.parameters(), base_optimizer)
def closure():
loss = loss_function(output, model(input))
loss.backward()
return loss
for input, output in data:
loss = loss_function(output, model(input))
loss.backward()
optimizer.step(closure)
optimizer.zero_grad()
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param base_optimizer: Optimizer. base optimizer.
:param rho: float. size of the neighborhood for computing the max loss.
:param adaptive: bool. element-wise Adaptive SAM.
:param kwargs: Dict. parameters for optimizer.
"""
def __init__(
self,
params: PARAMETERS,
base_optimizer: OPTIMIZER,
rho: float = 0.05,
adaptive: bool = False,
**kwargs,
):
self.validate_non_negative(rho, 'rho')
defaults: DEFAULTS = {'rho': rho, 'adaptive': adaptive}
defaults.update(kwargs)
super().__init__(params, defaults)
self.base_optimizer = base_optimizer(self.param_groups, **kwargs)
self.param_groups = self.base_optimizer.param_groups
def __str__(self) -> str:
return 'SAM'
@torch.no_grad()
def reset(self):
pass
@torch.no_grad()
def first_step(self, zero_grad: bool = False):
grad_norm = self.grad_norm()
for group in self.param_groups:
scale = group['rho'] / (grad_norm + 1e-12)
for p in group['params']:
if p.grad is None:
continue
self.state[p]['old_p'] = p.clone()
e_w = (torch.pow(p, 2) if group['adaptive'] else 1.0) * p.grad * scale.to(p)
# climb to the local maximum "w + e(w)"
p.add_(e_w)
if zero_grad:
self.zero_grad()
@torch.no_grad()
def second_step(self, zero_grad: bool = False):
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# get back to "w" from "w + e(w)"
p.data = self.state[p]['old_p']
# do the actual "sharpness-aware" update
self.base_optimizer.step()
if zero_grad:
self.zero_grad()
@torch.no_grad()
def step(self, closure: CLOSURE = None):
if closure is None:
raise NoClosureError(str(self))
self.first_step(zero_grad=True)
# the closure should do a full forward-backward pass
with torch.enable_grad():
closure()
self.second_step()
def grad_norm(self) -> torch.Tensor:
# put everything on the same device, in case of model parallelism
shared_device = self.param_groups[0]['params'][0].device
return torch.norm(
torch.stack(
[
((torch.abs(p) if group['adaptive'] else 1.0) * p.grad).norm(p=2).to(shared_device)
for group in self.param_groups
for p in group['params']
if p.grad is not None
]
),
p=2,
)
def load_state_dict(self, state_dict: Dict):
super().load_state_dict(state_dict)
self.base_optimizer.param_groups = self.param_groups
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], base_optimizer: Type[torch.optim.optimizer.Optimizer], rho: float = 0.05, adaptive: bool = False, **kwargs)
|
717,863
|
pytorch_optimizer.optimizer.sam
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
base_optimizer: OPTIMIZER,
rho: float = 0.05,
adaptive: bool = False,
**kwargs,
):
self.validate_non_negative(rho, 'rho')
defaults: DEFAULTS = {'rho': rho, 'adaptive': adaptive}
defaults.update(kwargs)
super().__init__(params, defaults)
self.base_optimizer = base_optimizer(self.param_groups, **kwargs)
self.param_groups = self.base_optimizer.param_groups
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], base_optimizer: Type[torch.optim.optimizer.Optimizer], rho: float = 0.05, adaptive: bool = False, **kwargs)
|
717,866
|
pytorch_optimizer.optimizer.sam
|
__str__
| null |
def __str__(self) -> str:
return 'SAM'
|
(self) -> str
|
717,877
|
pytorch_optimizer.optimizer.sam
|
first_step
| null |
@torch.no_grad()
def second_step(self, zero_grad: bool = False):
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
# get back to "w" from "w + e(w)"
p.data = self.state[p]['old_p']
# do the actual "sharpness-aware" update
self.base_optimizer.step()
if zero_grad:
self.zero_grad()
|
(self, zero_grad: bool = False)
|
717,880
|
pytorch_optimizer.optimizer.sam
|
grad_norm
| null |
def grad_norm(self) -> torch.Tensor:
# put everything on the same device, in case of model parallelism
shared_device = self.param_groups[0]['params'][0].device
return torch.norm(
torch.stack(
[
((torch.abs(p) if group['adaptive'] else 1.0) * p.grad).norm(p=2).to(shared_device)
for group in self.param_groups
for p in group['params']
if p.grad is not None
]
),
p=2,
)
|
(self) -> torch.Tensor
|
717,905
|
pytorch_optimizer.optimizer.shampoo_utils
|
SGDGraft
|
Graft using SGD + momentum. momentum maintains an exponentially weighted moving average of gradients.
|
class SGDGraft(Graft):
r"""Graft using SGD + momentum. momentum maintains an exponentially weighted moving average of gradients."""
def __init__(self, var: torch.Tensor):
super().__init__(var)
self.momentum: torch.Tensor = torch.zeros_like(var, device=var.device)
def update_momentum(self, update: torch.Tensor, beta1: float) -> torch.Tensor:
r"""Update momentum."""
self.momentum.mul_(beta1).add_(update)
return self.momentum
|
(var: torch.Tensor)
|
717,906
|
pytorch_optimizer.optimizer.shampoo_utils
|
__init__
| null |
def __init__(self, var: torch.Tensor):
super().__init__(var)
self.momentum: torch.Tensor = torch.zeros_like(var, device=var.device)
|
(self, var: torch.Tensor)
|
717,910
|
pytorch_optimizer.optimizer.sgdp
|
SGDP
|
SGD + Slowing Down the Slowdown for Momentum Optimizers on Scale-invariant Weights.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum factor.
:param dampening: float. dampening for momentum.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param delta: float. threshold that determines whether a set of parameters is scale invariant or not.
:param wd_ratio: float. relative weight decay applied on scale-invariant parameters compared to that applied
on scale-variant parameters.
:param nesterov: bool. enables nesterov momentum.
:param eps: float. term added to the denominator to improve numerical stability.
|
class SGDP(Optimizer, BaseOptimizer):
r"""SGD + Slowing Down the Slowdown for Momentum Optimizers on Scale-invariant Weights.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum factor.
:param dampening: float. dampening for momentum.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param delta: float. threshold that determines whether a set of parameters is scale invariant or not.
:param wd_ratio: float. relative weight decay applied on scale-invariant parameters compared to that applied
on scale-variant parameters.
:param nesterov: bool. enables nesterov momentum.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
momentum: float = 0.0,
dampening: float = 0.0,
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
delta: float = 0.1,
wd_ratio: float = 0.1,
nesterov: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_range(wd_ratio, 'wd_ratio', 0.0, 1.0)
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'momentum': momentum,
'dampening': dampening,
'delta': delta,
'wd_ratio': wd_ratio,
'nesterov': nesterov,
'eps': eps,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'SGDP'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['momentum'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
momentum = group['momentum']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['momentum'] = torch.zeros_like(p)
buf = state['momentum']
buf.mul_(momentum).add_(grad, alpha=1.0 - group['dampening'])
d_p = buf.clone()
if group['nesterov']:
d_p = d_p.mul_(momentum).add_(grad)
wd_ratio: float = 1.0
if len(p.shape) > 1:
d_p, wd_ratio = projection(
p,
grad,
d_p,
group['delta'],
group['wd_ratio'],
group['eps'],
)
self.apply_weight_decay(
p=p,
grad=None,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
ratio=wd_ratio / (1.0 - momentum),
)
p.add_(d_p, alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, momentum: float = 0.0, dampening: float = 0.0, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, delta: float = 0.1, wd_ratio: float = 0.1, nesterov: bool = False, eps: float = 1e-08)
|
717,912
|
pytorch_optimizer.optimizer.sgdp
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
momentum: float = 0.0,
dampening: float = 0.0,
weight_decay: float = 0.0,
weight_decouple: bool = True,
fixed_decay: bool = False,
delta: float = 0.1,
wd_ratio: float = 0.1,
nesterov: bool = False,
eps: float = 1e-8,
):
self.validate_learning_rate(lr)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_range(wd_ratio, 'wd_ratio', 0.0, 1.0)
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'momentum': momentum,
'dampening': dampening,
'delta': delta,
'wd_ratio': wd_ratio,
'nesterov': nesterov,
'eps': eps,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, momentum: float = 0.0, dampening: float = 0.0, weight_decay: float = 0.0, weight_decouple: bool = True, fixed_decay: bool = False, delta: float = 0.1, wd_ratio: float = 0.1, nesterov: bool = False, eps: float = 1e-08)
|
717,915
|
pytorch_optimizer.optimizer.sgdp
|
__str__
| null |
def __str__(self) -> str:
return 'SGDP'
|
(self) -> str
|
717,936
|
pytorch_optimizer.optimizer.sgdp
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
momentum = group['momentum']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['momentum'] = torch.zeros_like(p)
buf = state['momentum']
buf.mul_(momentum).add_(grad, alpha=1.0 - group['dampening'])
d_p = buf.clone()
if group['nesterov']:
d_p = d_p.mul_(momentum).add_(grad)
wd_ratio: float = 1.0
if len(p.shape) > 1:
d_p, wd_ratio = projection(
p,
grad,
d_p,
group['delta'],
group['wd_ratio'],
group['eps'],
)
self.apply_weight_decay(
p=p,
grad=None,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
ratio=wd_ratio / (1.0 - momentum),
)
p.add_(d_p, alpha=-group['lr'])
return loss
|
(self)
|
717,951
|
pytorch_optimizer.optimizer.sgd
|
SGDW
|
Decoupled Weight Decay Regularization.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum factor.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param dampening: float. dampening for momentum.
:param nesterov: bool. enables Nesterov momentum
|
class SGDW(Optimizer, BaseOptimizer):
r"""Decoupled Weight Decay Regularization.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. momentum factor.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param dampening: float. dampening for momentum.
:param nesterov: bool. enables Nesterov momentum
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-4,
momentum: float = 0.0,
weight_decay: float = 0.0,
weight_decouple: bool = True,
dampening: float = 0.0,
nesterov: bool = False,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.validate_non_negative(weight_decay, 'weight_decay')
defaults: DEFAULTS = {
'lr': lr,
'momentum': momentum,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'dampening': dampening,
'nesterov': nesterov,
}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'SGDW'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
if group['momentum'] > 0.0:
state['momentum_buffer'] = p.grad.clone()
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
momentum = group['momentum']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0 and momentum > 0.0:
state['momentum_buffer'] = grad.clone()
if momentum > 0.0:
buf = state['momentum_buffer']
buf.mul_(momentum).add_(grad, alpha=1.0 - group['dampening'])
if group['nesterov']:
grad.add_(buf, alpha=momentum)
else:
grad = buf
self.apply_weight_decay(
p,
grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=False,
)
p.add_(grad, alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.0001, momentum: float = 0.0, weight_decay: float = 0.0, weight_decouple: bool = True, dampening: float = 0.0, nesterov: bool = False)
|
717,953
|
pytorch_optimizer.optimizer.sgd
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-4,
momentum: float = 0.0,
weight_decay: float = 0.0,
weight_decouple: bool = True,
dampening: float = 0.0,
nesterov: bool = False,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.validate_non_negative(weight_decay, 'weight_decay')
defaults: DEFAULTS = {
'lr': lr,
'momentum': momentum,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'dampening': dampening,
'nesterov': nesterov,
}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.0001, momentum: float = 0.0, weight_decay: float = 0.0, weight_decouple: bool = True, dampening: float = 0.0, nesterov: bool = False)
|
717,956
|
pytorch_optimizer.optimizer.sgd
|
__str__
| null |
def __str__(self) -> str:
return 'SGDW'
|
(self) -> str
|
717,992
|
pytorch_optimizer.optimizer.sm3
|
SM3
|
Memory-Efficient Adaptive Optimization.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. coefficient used to scale prior updates before adding. This drastically increases
memory usage if `momentum > 0.0`. This is ignored if the parameter's gradient is sparse.
:param beta: float. coefficient used for exponential moving averages.
:param eps: float. term added to the denominator to improve numerical stability.
|
class SM3(Optimizer, BaseOptimizer):
r"""Memory-Efficient Adaptive Optimization.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param momentum: float. coefficient used to scale prior updates before adding. This drastically increases
memory usage if `momentum > 0.0`. This is ignored if the parameter's gradient is sparse.
:param beta: float. coefficient used for exponential moving averages.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-1,
momentum: float = 0.0,
beta: float = 0.0,
eps: float = 1e-30,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[]')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {'lr': lr, 'momentum': momentum, 'beta': beta, 'eps': eps}
super().__init__(params, defaults)
def __str__(self) -> str:
return 'SM3'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['momentum_buffer'] = torch.zeros_like(p)
@staticmethod
def make_sparse(grad: torch.Tensor, values: torch.Tensor) -> torch.Tensor:
if grad._indices().dim() == 0 or values.dim() == 0:
return grad.new().resize_as_(grad)
return grad.new(grad._indices(), values, grad.size())
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
momentum, beta = group['momentum'], group['beta']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
shape = grad.shape
rank: int = len(shape)
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['momentum_buffer'] = torch.zeros_like(p)
if grad.is_sparse:
state['accumulator_0'] = torch.zeros(shape[0], dtype=grad.dtype, device=grad.device)
elif rank == 0:
state['accumulator_0'] = torch.zeros_like(p)
else:
for i in range(rank):
state[f'accumulator_{i}'] = torch.zeros(
[1] * i + [shape[i]] + [1] * (rank - 1 - i), dtype=grad.dtype, device=grad.device
)
state['step'] += 1
if grad.is_sparse:
grad = grad.coalesce()
acc = state['accumulator_0']
update_values = torch.gather(acc, 0, grad._indices()[0])
if beta > 0.0:
update_values.mul_(beta)
update_values.addcmul_(grad._values(), grad._values(), value=1.0 - beta)
nu_max = reduce_max_except_dim(self.make_sparse(grad, update_values).to_dense(), 0).squeeze_()
if beta > 0.0:
torch.max(acc, nu_max, out=acc)
else:
acc.copy_(nu_max)
update_values.add_(group['eps']).rsqrt_().mul_(grad._values())
update = self.make_sparse(grad, update_values)
else:
update = state['accumulator_0'].clone()
for i in range(1, rank):
update = torch.min(update, state[f'accumulator_{i}'])
if beta > 0.0:
update.mul_(beta)
update.addcmul_(grad, grad, value=1.0 - beta)
for i in range(rank):
acc = state[f'accumulator_{i}']
nu_max = reduce_max_except_dim(update, i)
if beta > 0.0:
torch.max(acc, nu_max, out=acc)
else:
acc.copy_(nu_max)
update.add_(group['eps']).rsqrt_().mul_(grad)
if momentum > 0.0:
m = state['momentum_buffer']
m.mul_(momentum).add_(update, alpha=1.0 - momentum)
update = m
p.add_(update, alpha=-group['lr'])
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.1, momentum: float = 0.0, beta: float = 0.0, eps: float = 1e-30)
|
717,994
|
pytorch_optimizer.optimizer.sm3
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-1,
momentum: float = 0.0,
beta: float = 0.0,
eps: float = 1e-30,
):
self.validate_learning_rate(lr)
self.validate_range(momentum, 'momentum', 0.0, 1.0)
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[]')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {'lr': lr, 'momentum': momentum, 'beta': beta, 'eps': eps}
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.1, momentum: float = 0.0, beta: float = 0.0, eps: float = 1e-30)
|
717,997
|
pytorch_optimizer.optimizer.sm3
|
__str__
| null |
def __str__(self) -> str:
return 'SM3'
|
(self) -> str
|
718,011
|
pytorch_optimizer.optimizer.sm3
|
make_sparse
| null |
@staticmethod
def make_sparse(grad: torch.Tensor, values: torch.Tensor) -> torch.Tensor:
if grad._indices().dim() == 0 or values.dim() == 0:
return grad.new().resize_as_(grad)
return grad.new(grad._indices(), values, grad.size())
|
(grad: torch.Tensor, values: torch.Tensor) -> torch.Tensor
|
718,019
|
pytorch_optimizer.optimizer.sm3
|
reset
| null |
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
momentum, beta = group['momentum'], group['beta']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
shape = grad.shape
rank: int = len(shape)
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['momentum_buffer'] = torch.zeros_like(p)
if grad.is_sparse:
state['accumulator_0'] = torch.zeros(shape[0], dtype=grad.dtype, device=grad.device)
elif rank == 0:
state['accumulator_0'] = torch.zeros_like(p)
else:
for i in range(rank):
state[f'accumulator_{i}'] = torch.zeros(
[1] * i + [shape[i]] + [1] * (rank - 1 - i), dtype=grad.dtype, device=grad.device
)
state['step'] += 1
if grad.is_sparse:
grad = grad.coalesce()
acc = state['accumulator_0']
update_values = torch.gather(acc, 0, grad._indices()[0])
if beta > 0.0:
update_values.mul_(beta)
update_values.addcmul_(grad._values(), grad._values(), value=1.0 - beta)
nu_max = reduce_max_except_dim(self.make_sparse(grad, update_values).to_dense(), 0).squeeze_()
if beta > 0.0:
torch.max(acc, nu_max, out=acc)
else:
acc.copy_(nu_max)
update_values.add_(group['eps']).rsqrt_().mul_(grad._values())
update = self.make_sparse(grad, update_values)
else:
update = state['accumulator_0'].clone()
for i in range(1, rank):
update = torch.min(update, state[f'accumulator_{i}'])
if beta > 0.0:
update.mul_(beta)
update.addcmul_(grad, grad, value=1.0 - beta)
for i in range(rank):
acc = state[f'accumulator_{i}']
nu_max = reduce_max_except_dim(update, i)
if beta > 0.0:
torch.max(acc, nu_max, out=acc)
else:
acc.copy_(nu_max)
update.add_(group['eps']).rsqrt_().mul_(grad)
if momentum > 0.0:
m = state['momentum_buffer']
m.mul_(momentum).add_(update, alpha=1.0 - momentum)
update = m
p.add_(update, alpha=-group['lr'])
return loss
|
(self)
|
718,034
|
pytorch_optimizer.optimizer.shampoo_utils
|
SQRTNGraft
|
Graft using SQRTN.
|
class SQRTNGraft(Graft):
r"""Graft using SQRTN."""
def __init__(self, var: torch.Tensor):
super().__init__(var)
def precondition_gradient(self, grad: torch.Tensor) -> torch.Tensor:
r"""Get preconditioned gradient."""
return grad.sign()
|
(var: torch.Tensor)
|
718,035
|
pytorch_optimizer.optimizer.shampoo_utils
|
__init__
| null |
def __init__(self, var: torch.Tensor):
super().__init__(var)
|
(self, var: torch.Tensor)
|
718,037
|
pytorch_optimizer.optimizer.shampoo_utils
|
precondition_gradient
|
Get preconditioned gradient.
|
def precondition_gradient(self, grad: torch.Tensor) -> torch.Tensor:
r"""Get preconditioned gradient."""
return grad.sign()
|
(self, grad: torch.Tensor) -> torch.Tensor
|
718,039
|
pytorch_optimizer.optimizer.srmm
|
SRMM
|
Stochastic regularized majorization-minimization with weakly convex and multi-convex surrogates.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param beta: float. adaptivity weight.
:param memory_length: Optional[int]. internal memory length for moving average. None for no refreshing.
|
class SRMM(Optimizer, BaseOptimizer):
"""Stochastic regularized majorization-minimization with weakly convex and multi-convex surrogates.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param beta: float. adaptivity weight.
:param memory_length: Optional[int]. internal memory length for moving average. None for no refreshing.
"""
def __init__(self, params: PARAMETERS, lr: float = 0.01, beta: float = 0.5, memory_length: Optional[int] = 100):
self.validate_learning_rate(lr)
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[]')
defaults: DEFAULTS = {'lr': lr, 'beta': beta, 'memory_length': memory_length}
super().__init__(params, defaults)
self.base_lrs: List[float] = [group['lr'] for group in self.param_groups]
def __str__(self) -> str:
return 'SRMM'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['mov_avg_grad'] = torch.zeros_like(p)
state['mov_avg_param'] = torch.zeros_like(p)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
w_t: float = (
(group['step'] + 1) % (group['memory_length'] if group['memory_length'] is not None else 1)
) ** -group['beta']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['mov_avg_grad'] = torch.zeros_like(p)
state['mov_avg_param'] = torch.zeros_like(p)
mov_avg_grad, mov_avg_param = state['mov_avg_grad'], state['mov_avg_param']
mov_avg_grad.mul_(1.0 - w_t).add_(grad, alpha=w_t)
mov_avg_param.mul_(1.0 - w_t).add_(p, alpha=w_t)
mov_avg_param.add_(mov_avg_grad, alpha=-group['lr'])
p.copy_(mov_avg_param)
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.01, beta: float = 0.5, memory_length: Optional[int] = 100)
|
718,041
|
pytorch_optimizer.optimizer.srmm
|
__init__
| null |
def __init__(self, params: PARAMETERS, lr: float = 0.01, beta: float = 0.5, memory_length: Optional[int] = 100):
self.validate_learning_rate(lr)
self.validate_range(beta, 'beta', 0.0, 1.0, range_type='[]')
defaults: DEFAULTS = {'lr': lr, 'beta': beta, 'memory_length': memory_length}
super().__init__(params, defaults)
self.base_lrs: List[float] = [group['lr'] for group in self.param_groups]
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.01, beta: float = 0.5, memory_length: Optional[int] = 100)
|
718,044
|
pytorch_optimizer.optimizer.srmm
|
__str__
| null |
def __str__(self) -> str:
return 'SRMM'
|
(self) -> str
|
718,080
|
pytorch_optimizer.optimizer.swats
|
SWATS
|
Improving Generalization Performance by Switching from Adam to SGD.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param ams_bound: bool. whether to use the ams_bound variant of this algorithm from the paper.
:param nesterov: bool. enables Nesterov momentum.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
|
class SWATS(Optimizer, BaseOptimizer):
r"""Improving Generalization Performance by Switching from Adam to SGD.
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups.
:param lr: float. learning rate.
:param betas: BETAS. coefficients used for computing running averages of gradient and the squared hessian trace.
:param weight_decay: float. weight decay (L2 penalty).
:param weight_decouple: bool. the optimizer uses decoupled weight decay as in AdamW.
:param fixed_decay: bool. fix weight decay.
:param ams_bound: bool. whether to use the ams_bound variant of this algorithm from the paper.
:param nesterov: bool. enables Nesterov momentum.
:param r: float. EMA factor. between 0.9 ~ 0.99 is preferred.
:param adanorm: bool. whether to use the AdaNorm variant.
:param adam_debias: bool. Only correct the denominator to avoid inflating step sizes early in training.
:param eps: float. term added to the denominator to improve numerical stability.
"""
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
ams_bound: bool = False,
nesterov: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-6,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'ams_bound': ams_bound,
'nesterov': nesterov,
'adanorm': adanorm,
'adam_debias': adam_debias,
'phase': 'adam',
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
def __str__(self) -> str:
return 'SWATS'
@torch.no_grad()
def reset(self):
for group in self.param_groups:
group['step'] = 0
for p in group['params']:
state = self.state[p]
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg2'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
if group['ams_bound']:
state['max_exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=p.dtype, device=p.device)
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
if 'step' in group:
group['step'] += 1
else:
group['step'] = 1
beta1, beta2 = group['betas']
bias_correction1: float = 1.0 - beta1 ** group['step']
bias_correction2: float = 1.0 - beta2 ** group['step']
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise NoSparseGradientError(str(self))
state = self.state[p]
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['exp_avg2'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
if group['ams_bound']:
state['max_exp_avg_sq'] = torch.zeros_like(p)
if group['adanorm']:
state['exp_grad_norm'] = torch.zeros((1,), dtype=grad.dtype, device=grad.device)
self.apply_weight_decay(
p=p,
grad=p.grad,
lr=group['lr'],
weight_decay=group['weight_decay'],
weight_decouple=group['weight_decouple'],
fixed_decay=group['fixed_decay'],
)
if group['phase'] == 'sgd':
if 'momentum_buffer' not in state:
state['momentum_buffer'] = torch.zeros_like(grad)
buf = state['momentum_buffer']
buf.mul_(beta1).add_(grad)
update = buf.clone()
update.mul_(1.0 - beta1)
if group['nesterov']:
update.add_(buf, alpha=beta1)
p.add_(update, alpha=-group['lr'])
continue
s_grad = self.get_adanorm_gradient(
grad=grad,
adanorm=group['adanorm'],
exp_grad_norm=state.get('exp_grad_norm', None),
r=group.get('r', None),
)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
exp_avg.mul_(beta1).add_(s_grad, alpha=1.0 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
de_nom = self.apply_ams_bound(
ams_bound=group['ams_bound'],
exp_avg_sq=exp_avg_sq,
max_exp_avg_sq=state.get('max_exp_avg_sq', None),
eps=group['eps'],
)
step_size: float = self.apply_adam_debias(
adam_debias=group['adam_debias'],
step_size=group['lr'] * math.sqrt(bias_correction2),
bias_correction1=bias_correction1,
)
perturb = exp_avg.clone()
perturb.div_(de_nom).mul_(-step_size)
p.add_(perturb)
perturb_view = perturb.view(-1)
pg = perturb_view.dot(grad.view(-1))
if pg != 0:
scaling = perturb_view.dot(perturb_view).div_(-pg)
exp_avg2 = state['exp_avg2']
exp_avg2.mul_(beta2).add_(scaling, alpha=1.0 - beta2)
corrected_exp_avg = exp_avg2 / bias_correction2
if (
group['step'] > 1
and corrected_exp_avg > 0.0
and corrected_exp_avg.allclose(scaling, rtol=group['eps'])
):
group['phase'] = 'sgd'
group['lr'] = corrected_exp_avg.item()
return loss
|
(params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, ams_bound: bool = False, nesterov: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-06)
|
718,082
|
pytorch_optimizer.optimizer.swats
|
__init__
| null |
def __init__(
self,
params: PARAMETERS,
lr: float = 1e-3,
betas: BETAS = (0.9, 0.999),
weight_decay: float = 0.0,
weight_decouple: bool = False,
fixed_decay: bool = False,
ams_bound: bool = False,
nesterov: bool = False,
r: float = 0.95,
adanorm: bool = False,
adam_debias: bool = False,
eps: float = 1e-6,
):
self.validate_learning_rate(lr)
self.validate_betas(betas)
self.validate_non_negative(weight_decay, 'weight_decay')
self.validate_non_negative(eps, 'eps')
defaults: DEFAULTS = {
'lr': lr,
'betas': betas,
'weight_decay': weight_decay,
'weight_decouple': weight_decouple,
'fixed_decay': fixed_decay,
'ams_bound': ams_bound,
'nesterov': nesterov,
'adanorm': adanorm,
'adam_debias': adam_debias,
'phase': 'adam',
'eps': eps,
}
if adanorm:
defaults.update({'r': r})
super().__init__(params, defaults)
|
(self, params: Union[Iterable[Dict], Iterable[torch.Tensor], NoneType], lr: float = 0.001, betas: Union[Tuple[float, float], Tuple[float, float, float]] = (0.9, 0.999), weight_decay: float = 0.0, weight_decouple: bool = False, fixed_decay: bool = False, ams_bound: bool = False, nesterov: bool = False, r: float = 0.95, adanorm: bool = False, adam_debias: bool = False, eps: float = 1e-06)
|
718,085
|
pytorch_optimizer.optimizer.swats
|
__str__
| null |
def __str__(self) -> str:
return 'SWATS'
|
(self) -> str
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.