Comparative-Analysis-of-Speech-Synthesis-Models
/
TensorFlowTTS
/tensorflow_tts
/optimizers
/adamweightdecay.py
| # -*- coding: utf-8 -*- | |
| # Copyright 2019 The TensorFlow Authors. All Rights Reserved. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| """AdamW for training self-attention.""" | |
| import re | |
| import tensorflow as tf | |
| class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule): | |
| """Applys a warmup schedule on a given learning rate decay schedule.""" | |
| def __init__( | |
| self, | |
| initial_learning_rate, | |
| decay_schedule_fn, | |
| warmup_steps, | |
| power=1.0, | |
| name=None, | |
| ): | |
| super(WarmUp, self).__init__() | |
| self.initial_learning_rate = initial_learning_rate | |
| self.warmup_steps = warmup_steps | |
| self.power = power | |
| self.decay_schedule_fn = decay_schedule_fn | |
| self.name = name | |
| def __call__(self, step): | |
| with tf.name_scope(self.name or "WarmUp") as name: | |
| # Implements polynomial warmup. i.e., if global_step < warmup_steps, the | |
| # learning rate will be `global_step/num_warmup_steps * init_lr`. | |
| global_step_float = tf.cast(step, tf.float32) | |
| warmup_steps_float = tf.cast(self.warmup_steps, tf.float32) | |
| warmup_percent_done = global_step_float / warmup_steps_float | |
| warmup_learning_rate = self.initial_learning_rate * tf.math.pow( | |
| warmup_percent_done, self.power | |
| ) | |
| return tf.cond( | |
| global_step_float < warmup_steps_float, | |
| lambda: warmup_learning_rate, | |
| lambda: self.decay_schedule_fn(step), | |
| name=name, | |
| ) | |
| def get_config(self): | |
| return { | |
| "initial_learning_rate": self.initial_learning_rate, | |
| "decay_schedule_fn": self.decay_schedule_fn, | |
| "warmup_steps": self.warmup_steps, | |
| "power": self.power, | |
| "name": self.name, | |
| } | |
| class AdamWeightDecay(tf.keras.optimizers.Adam): | |
| """Adam enables L2 weight decay and clip_by_global_norm on gradients. | |
| Just adding the square of the weights to the loss function is *not* the | |
| correct way of using L2 regularization/weight decay with Adam, since that will | |
| interact with the m and v parameters in strange ways. | |
| Instead we want ot decay the weights in a manner that doesn't interact with | |
| the m/v parameters. This is equivalent to adding the square of the weights to | |
| the loss with plain (non-momentum) SGD. | |
| """ | |
| def __init__( | |
| self, | |
| learning_rate=0.001, | |
| beta_1=0.9, | |
| beta_2=0.999, | |
| epsilon=1e-7, | |
| amsgrad=False, | |
| weight_decay_rate=0.0, | |
| include_in_weight_decay=None, | |
| exclude_from_weight_decay=None, | |
| name="AdamWeightDecay", | |
| **kwargs | |
| ): | |
| super(AdamWeightDecay, self).__init__( | |
| learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs | |
| ) | |
| self.weight_decay_rate = weight_decay_rate | |
| self._include_in_weight_decay = include_in_weight_decay | |
| self._exclude_from_weight_decay = exclude_from_weight_decay | |
| def from_config(cls, config): | |
| """Creates an optimizer from its config with WarmUp custom object.""" | |
| custom_objects = {"WarmUp": WarmUp} | |
| return super(AdamWeightDecay, cls).from_config( | |
| config, custom_objects=custom_objects | |
| ) | |
| def _prepare_local(self, var_device, var_dtype, apply_state): | |
| super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype, apply_state) | |
| apply_state["weight_decay_rate"] = tf.constant( | |
| self.weight_decay_rate, name="adam_weight_decay_rate" | |
| ) | |
| def _decay_weights_op(self, var, learning_rate, apply_state): | |
| do_decay = self._do_use_weight_decay(var.name) | |
| if do_decay: | |
| return var.assign_sub( | |
| learning_rate * var * apply_state["weight_decay_rate"], | |
| use_locking=self._use_locking, | |
| ) | |
| return tf.no_op() | |
| def apply_gradients(self, grads_and_vars, clip_norm=0.5, **kwargs): | |
| grads, tvars = list(zip(*grads_and_vars)) | |
| (grads, _) = tf.clip_by_global_norm(grads, clip_norm=clip_norm) | |
| return super(AdamWeightDecay, self).apply_gradients(zip(grads, tvars), **kwargs) | |
| def _get_lr(self, var_device, var_dtype, apply_state): | |
| """Retrieves the learning rate with the given state.""" | |
| if apply_state is None: | |
| return self._decayed_lr_t[var_dtype], {} | |
| apply_state = apply_state or {} | |
| coefficients = apply_state.get((var_device, var_dtype)) | |
| if coefficients is None: | |
| coefficients = self._fallback_apply_state(var_device, var_dtype) | |
| apply_state[(var_device, var_dtype)] = coefficients | |
| return coefficients["lr_t"], dict(apply_state=apply_state) | |
| def _resource_apply_dense(self, grad, var, apply_state=None): | |
| lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state) | |
| decay = self._decay_weights_op(var, lr_t, apply_state) | |
| with tf.control_dependencies([decay]): | |
| return super(AdamWeightDecay, self)._resource_apply_dense( | |
| grad, var, **kwargs | |
| ) | |
| def _resource_apply_sparse(self, grad, var, indices, apply_state=None): | |
| lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state) | |
| decay = self._decay_weights_op(var, lr_t, apply_state) | |
| with tf.control_dependencies([decay]): | |
| return super(AdamWeightDecay, self)._resource_apply_sparse( | |
| grad, var, indices, **kwargs | |
| ) | |
| def get_config(self): | |
| config = super(AdamWeightDecay, self).get_config() | |
| config.update( | |
| {"weight_decay_rate": self.weight_decay_rate,} | |
| ) | |
| return config | |
| def _do_use_weight_decay(self, param_name): | |
| """Whether to use L2 weight decay for `param_name`.""" | |
| if self.weight_decay_rate == 0: | |
| return False | |
| if self._include_in_weight_decay: | |
| for r in self._include_in_weight_decay: | |
| if re.search(r, param_name) is not None: | |
| return True | |
| if self._exclude_from_weight_decay: | |
| for r in self._exclude_from_weight_decay: | |
| if re.search(r, param_name) is not None: | |
| return False | |
| return True | |