| from keras.src import ops |
| from keras.src.api_export import keras_export |
| from keras.src.optimizers import optimizer |
|
|
|
|
| @keras_export(["keras.optimizers.Adamax"]) |
| class Adamax(optimizer.Optimizer): |
| """Optimizer that implements the Adamax algorithm. |
| |
| Adamax, a variant of Adam based on the infinity norm, is a first-order |
| gradient-based optimization method. Due to its capability of adjusting the |
| learning rate based on data characteristics, it is suited to learn |
| time-variant process, e.g., speech data with dynamically changed noise |
| conditions. Default parameters follow those provided in the paper (see |
| references below). |
| |
| Initialization: |
| |
| ```python |
| m = 0 # Initialize initial 1st moment vector |
| u = 0 # Initialize the exponentially weighted infinity norm |
| t = 0 # Initialize timestep |
| ``` |
| |
| The update rule for parameter `w` with gradient `g` is described at the end |
| of section 7.1 of the paper (see the reference section): |
| |
| ```python |
| t += 1 |
| m = beta1 * m + (1 - beta) * g |
| u = max(beta2 * u, abs(g)) |
| current_lr = learning_rate / (1 - beta1 ** t) |
| w = w - current_lr * m / (u + epsilon) |
| ``` |
| |
| Args: |
| learning_rate: A float, a |
| `keras.optimizers.schedules.LearningRateSchedule` instance, or |
| a callable that takes no arguments and returns the actual value to |
| use. The learning rate. Defaults to `0.001`. |
| beta_1: A float value or a constant float tensor. The exponential decay |
| rate for the 1st moment estimates. |
| beta_2: A float value or a constant float tensor. The exponential decay |
| rate for the exponentially weighted infinity norm. |
| epsilon: A small constant for numerical stability. |
| {{base_optimizer_keyword_args}} |
| |
| Reference: |
| |
| - [Kingma et al., 2014](http://arxiv.org/abs/1412.6980) |
| """ |
|
|
| def __init__( |
| self, |
| learning_rate=0.001, |
| beta_1=0.9, |
| beta_2=0.999, |
| epsilon=1e-7, |
| weight_decay=None, |
| clipnorm=None, |
| clipvalue=None, |
| global_clipnorm=None, |
| use_ema=False, |
| ema_momentum=0.99, |
| ema_overwrite_frequency=None, |
| loss_scale_factor=None, |
| gradient_accumulation_steps=None, |
| name="adamax", |
| **kwargs, |
| ): |
| super().__init__( |
| learning_rate=learning_rate, |
| name=name, |
| weight_decay=weight_decay, |
| clipnorm=clipnorm, |
| clipvalue=clipvalue, |
| global_clipnorm=global_clipnorm, |
| use_ema=use_ema, |
| ema_momentum=ema_momentum, |
| ema_overwrite_frequency=ema_overwrite_frequency, |
| loss_scale_factor=loss_scale_factor, |
| gradient_accumulation_steps=gradient_accumulation_steps, |
| **kwargs, |
| ) |
| self.beta_1 = beta_1 |
| self.beta_2 = beta_2 |
| self.epsilon = epsilon |
|
|
| def build(self, var_list): |
| """Initialize optimizer variables. |
| |
| Adamax optimizer has 2 types of variables: momentums (denoted as m), |
| exponentially weighted infinity norm (denoted as u). |
| |
| Args: |
| var_list: list of model variables to build Adamax variables on. |
| """ |
| if self.built: |
| return |
| super().build(var_list) |
| self._m, self._u = self.add_optimizer_variables( |
| var_list, ["momentum", "norm"] |
| ) |
|
|
| def update_step(self, gradient, variable, learning_rate): |
| """Update step given gradient and the associated model variable.""" |
| lr = ops.cast(learning_rate, variable.dtype) |
| gradient = ops.cast(gradient, variable.dtype) |
| local_step = ops.cast(self.iterations + 1, variable.dtype) |
| beta_1_power = ops.power( |
| ops.cast(self.beta_1, variable.dtype), local_step |
| ) |
|
|
| m = self._m[self._get_variable_index(variable)] |
| u = self._u[self._get_variable_index(variable)] |
|
|
| self.assign_add( |
| m, ops.multiply(ops.subtract(gradient, m), (1 - self.beta_1)) |
| ) |
| self.assign( |
| u, ops.maximum(ops.multiply(self.beta_2, u), ops.abs(gradient)) |
| ) |
| self.assign_sub( |
| variable, |
| ops.divide( |
| ops.multiply(lr, m), |
| ops.multiply((1 - beta_1_power), ops.add(u, self.epsilon)), |
| ), |
| ) |
|
|
| def get_config(self): |
| config = super().get_config() |
|
|
| config.update( |
| { |
| "beta_1": self.beta_1, |
| "beta_2": self.beta_2, |
| "epsilon": self.epsilon, |
| } |
| ) |
| return config |
|
|
|
|
| Adamax.__doc__ = Adamax.__doc__.replace( |
| "{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args |
| ) |
|
|