| | """ Optimizer Factory w/ Custom Weight Decay |
| | Hacked together by / Copyright 2020 Ross Wightman |
| | """ |
| | from typing import Optional |
| |
|
| | import torch |
| | import torch.nn as nn |
| | import torch.optim as optim |
| |
|
| | from .timm.adafactor import Adafactor |
| | from .timm.adahessian import Adahessian |
| | from .timm.adamp import AdamP |
| | from .timm.lookahead import Lookahead |
| | from .timm.nadam import Nadam |
| | from .timm.novograd import NovoGrad |
| | from .timm.nvnovograd import NvNovoGrad |
| | from .timm.radam import RAdam |
| | from .timm.rmsprop_tf import RMSpropTF |
| | from .timm.sgdp import SGDP |
| | from .timm.adabelief import AdaBelief |
| |
|
| | try: |
| | from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD |
| | has_apex = True |
| | except ImportError: |
| | has_apex = False |
| |
|
| |
|
| | def add_weight_decay(model, weight_decay=1e-5, skip_list=()): |
| | decay = [] |
| | no_decay = [] |
| | for name, param in model.named_parameters(): |
| | if not param.requires_grad: |
| | continue |
| | if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: |
| | no_decay.append(param) |
| | else: |
| | decay.append(param) |
| | return [ |
| | {"params": no_decay, "weight_decay": 0.}, |
| | {"params": decay, "weight_decay": weight_decay}] |
| |
|
| |
|
| | def optimizer_kwargs(args, lr_weight): |
| | """ args/argparse to kwargs helper |
| | Convert optimizer args in argparse args or args like object to keyword args for updated create fn. |
| | """ |
| | kwargs = dict( |
| | optimizer_name=args.opt, |
| | learning_rate=args.lr_base*args.batch_size/128*lr_weight, |
| | weight_decay=args.weight_decay, |
| | momentum=args.momentum) |
| | if getattr(args, "opt_eps", None) is not None: |
| | kwargs["eps"] = args.opt_eps |
| | if getattr(args, "opt_betas", None) is not None: |
| | kwargs["betas"] = args.opt_betas |
| | if getattr(args, "opt_args", None) is not None: |
| | kwargs.update(args.opt_args) |
| | return kwargs |
| |
|
| |
|
| | def create_optimizer(args, model, filter_bias_and_bn=True, lr_weight=1): |
| | """ Legacy optimizer factory for backwards compatibility. |
| | NOTE: Use create_optimizer_v2 for new code. |
| | """ |
| | return create_optimizer_v2( |
| | model, |
| | **optimizer_kwargs(args, lr_weight), |
| | filter_bias_and_bn=filter_bias_and_bn, |
| | ) |
| |
|
| |
|
| | def create_optimizer_v2( |
| | model: nn.Module, |
| | optimizer_name: str = "sgd", |
| | learning_rate: Optional[float] = None, |
| | weight_decay: float = 0., |
| | momentum: float = 0.9, |
| | filter_bias_and_bn: bool = True, |
| | **kwargs): |
| | """ Create an optimizer. |
| | |
| | TODO currently the model is passed in and all parameters are selected for optimization. |
| | For more general use an interface that allows selection of parameters to optimize and lr groups, one of: |
| | * a filter fn interface that further breaks params into groups in a weight_decay compatible fashion |
| | * expose the parameters interface and leave it up to caller |
| | |
| | Args: |
| | model (nn.Module): model containing parameters to optimize |
| | optimizer_name: name of optimizer to create |
| | learning_rate: initial learning rate |
| | weight_decay: weight decay to apply in optimizer |
| | momentum: momentum for momentum based optimizers (others may use betas via kwargs) |
| | filter_bias_and_bn: filter out bias, bn and other 1d params from weight decay |
| | **kwargs: extra optimizer specific kwargs to pass through |
| | |
| | Returns: |
| | Optimizer |
| | """ |
| | opt_lower = optimizer_name.lower() |
| | if weight_decay and filter_bias_and_bn: |
| | skip = {} |
| | if hasattr(model, "no_weight_decay"): |
| | skip = model.no_weight_decay() |
| | parameters = add_weight_decay(model, weight_decay, skip) |
| | weight_decay = 0. |
| | else: |
| | parameters = model.parameters() |
| | if "fused" in opt_lower: |
| | assert has_apex and torch.cuda.is_available(), "APEX and CUDA required for fused optimizers" |
| |
|
| | opt_args = dict(lr=learning_rate, weight_decay=weight_decay, **kwargs) |
| | opt_split = opt_lower.split("_") |
| | opt_lower = opt_split[-1] |
| | if opt_lower == "sgd" or opt_lower == "nesterov": |
| | opt_args.pop("eps", None) |
| | optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) |
| | elif opt_lower == "momentum": |
| | opt_args.pop("eps", None) |
| | optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args) |
| | |
| | elif opt_lower == "adam": |
| | optimizer = optim.Adam(parameters, **opt_args) |
| | elif opt_lower == "adabelief": |
| | optimizer = AdaBelief(parameters, rectify=False, **opt_args) |
| | elif opt_lower == "adamw": |
| | optimizer = optim.AdamW(parameters, lr=learning_rate, weight_decay=weight_decay) |
| | |
| | elif opt_lower == "nadam": |
| | optimizer = Nadam(parameters, **opt_args) |
| | elif opt_lower == "radam": |
| | optimizer = RAdam(parameters, **opt_args) |
| | elif opt_lower == "adamp": |
| | optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) |
| | elif opt_lower == "sgdp": |
| | optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args) |
| | elif opt_lower == "adadelta": |
| | optimizer = optim.Adadelta(parameters, **opt_args) |
| | elif opt_lower == "adafactor": |
| | if not learning_rate: |
| | opt_args["lr"] = None |
| | optimizer = Adafactor(parameters, **opt_args) |
| | elif opt_lower == "adahessian": |
| | optimizer = Adahessian(parameters, **opt_args) |
| | elif opt_lower == "rmsprop": |
| | optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args) |
| | elif opt_lower == "rmsproptf": |
| | optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args) |
| | elif opt_lower == "novograd": |
| | optimizer = NovoGrad(parameters, **opt_args) |
| | elif opt_lower == "nvnovograd": |
| | optimizer = NvNovoGrad(parameters, **opt_args) |
| | elif opt_lower == "fusedsgd": |
| | opt_args.pop("eps", None) |
| | optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args) |
| | elif opt_lower == "fusedmomentum": |
| | opt_args.pop("eps", None) |
| | optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args) |
| | elif opt_lower == "fusedadam": |
| | optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) |
| | elif opt_lower == "fusedadamw": |
| | optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) |
| | elif opt_lower == "fusedlamb": |
| | optimizer = FusedLAMB(parameters, **opt_args) |
| | elif opt_lower == "fusednovograd": |
| | opt_args.setdefault("betas", (0.95, 0.98)) |
| | optimizer = FusedNovoGrad(parameters, **opt_args) |
| | else: |
| | assert False and "Invalid optimizer" |
| | raise ValueError |
| |
|
| | if len(opt_split) > 1: |
| | if opt_split[0] == "lookahead": |
| | optimizer = Lookahead(optimizer) |
| |
|
| | return optimizer |
| |
|