code
stringlengths
17
6.64M
class InnerProductParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _INNERPRODUCTPARAMETER
class LRNParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _LRNPARAMETER
class MemoryDataParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _MEMORYDATAPARAMETER
class MVNParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _MVNPARAMETER
class PoolingParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _POOLINGPARAMETER
class PowerParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _POWERPARAMETER
class PythonParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _PYTHONPARAMETER
class ReLUParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _RELUPARAMETER
class ROIPoolingParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _ROIPOOLINGPARAMETER
class SigmoidParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _SIGMOIDPARAMETER
class SliceParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _SLICEPARAMETER
class SoftmaxParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOFTMAXPARAMETER
class TanHParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _TANHPARAMETER
class ThresholdParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _THRESHOLDPARAMETER
class WindowDataParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _WINDOWDATAPARAMETER
class V1LayerParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _V1LAYERPARAMETER
class V0LayerParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _V0LAYERPARAMETER
class PReLUParameter(_message.Message): __metaclass__ = _reflection.GeneratedProtocolMessageType DESCRIPTOR = _PRELUPARAMETER
class BlobProto(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTO
class BlobProtoVector(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTOVECTOR
class Datum(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DATUM
class FillerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _FILLERPARAMETER
class LayerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LAYERPARAMETER
class LayerConnection(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LAYERCONNECTION
class NetParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _NETPARAMETER
class SolverParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERPARAMETER
class SolverState(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERSTATE
class BlobProto(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTO
class BlobProtoVector(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTOVECTOR
class Datum(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DATUM
class FillerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _FILLERPARAMETER
class NetParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _NETPARAMETER
class SolverParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERPARAMETER
class SolverState(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERSTATE
class LayerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LAYERPARAMETER
class ConcatParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _CONCATPARAMETER
class ConvolutionParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _CONVOLUTIONPARAMETER
class DataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DATAPARAMETER
class DropoutParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DROPOUTPARAMETER
class HDF5DataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _HDF5DATAPARAMETER
class HDF5OutputParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _HDF5OUTPUTPARAMETER
class ImageDataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _IMAGEDATAPARAMETER
class InfogainLossParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _INFOGAINLOSSPARAMETER
class InnerProductParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _INNERPRODUCTPARAMETER
class LRNParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LRNPARAMETER
class MemoryDataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _MEMORYDATAPARAMETER
class PoolingParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _POOLINGPARAMETER
class PowerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _POWERPARAMETER
class WindowDataParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _WINDOWDATAPARAMETER
class V0LayerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _V0LAYERPARAMETER
class BlobProto(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTO
class BlobProtoVector(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _BLOBPROTOVECTOR
class Datum(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _DATUM
class FillerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _FILLERPARAMETER
class LayerParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LAYERPARAMETER
class LayerConnection(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _LAYERCONNECTION
class NetParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _NETPARAMETER
class SolverParameter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERPARAMETER
class EvalHistoryIter(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _EVALHISTORYITER
class EvalHistory(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _EVALHISTORY
class SolverState(message.Message): __metaclass__ = reflection.GeneratedProtocolMessageType DESCRIPTOR = _SOLVERSTATE
def remove_urls(text): return URLS_RE.sub('', text)
def replace_multi_whitespaces(line): return ' '.join(line.split())
def remove_listing(line): return LISTING_RE.sub('', line)
def main(): with open(sys.argv[1], 'r') as input_file: for line in input_file: if (line is '\n'): print('') else: line = line.lower() line = remove_urls(line) line = remove_listing(line) line = replace_multi_whitespaces(line) if (line is not ''): print(line)
def _is_punctuation(char): 'Checks whether `chars` is a punctuation character.' cp = ord(char) if (((cp >= 33) and (cp <= 47)) or ((cp >= 58) and (cp <= 64)) or ((cp >= 91) and (cp <= 96)) or ((cp >= 123) and (cp <= 126))): return True cat = unicodedata.category(char) if cat.startswith('P'): return True return False
def _run_split_on_punc(text): 'Splits punctuation on a piece of text.' chars = list(text) i = 0 start_new_word = True output = [] while (i < len(chars)): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[(- 1)].append(char) i += 1 return ' '.join([''.join(x) for x in output])
def replace_multi_whitespaces(line): return ' '.join(line.split())
def main(): with open(sys.argv[1], 'r') as input_file: for line in input_file: if (line is '\n'): print('') else: line = _run_split_on_punc(line) line = replace_multi_whitespaces(line) if (line is not ''): print(line)
class OneCycleLR(Callback): def __init__(self, max_lr, end_percentage=0.1, scale_percentage=None, maximum_momentum=0.95, minimum_momentum=0.85, verbose=True): ' This callback implements a cyclical learning rate policy (CLR).\n This is a special case of Cyclic Learning Rates, where we have only 1 cycle.\n After the completion of 1 cycle, the learning rate will decrease rapidly to\n 100th its initial lowest value.\n # Arguments:\n max_lr: Float. Initial learning rate. This also sets the\n starting learning rate (which will be 10x smaller than\n this), and will increase to this value during the first cycle.\n end_percentage: Float. The percentage of all the epochs of training\n that will be dedicated to sharply decreasing the learning\n rate after the completion of 1 cycle. Must be between 0 and 1.\n scale_percentage: Float or None. If float, must be between 0 and 1.\n If None, it will compute the scale_percentage automatically\n based on the `end_percentage`.\n maximum_momentum: Optional. Sets the maximum momentum (initial)\n value, which gradually drops to its lowest value in half-cycle,\n then gradually increases again to stay constant at this max value.\n Can only be used with SGD Optimizer.\n minimum_momentum: Optional. Sets the minimum momentum at the end of\n the half-cycle. Can only be used with SGD Optimizer.\n verbose: Bool. Whether to print the current learning rate after every\n epoch.\n # Reference\n - [A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, weight_decay, and weight decay](https://arxiv.org/abs/1803.09820)\n - [Super-Convergence: Very Fast Training of Residual Networks Using Large Learning Rates](https://arxiv.org/abs/1708.07120)\n ' super(OneCycleLR, self).__init__() if ((end_percentage < 0.0) or (end_percentage > 1.0)): raise ValueError('`end_percentage` must be between 0 and 1') if ((scale_percentage is not None) and ((scale_percentage < 0.0) or (scale_percentage > 1.0))): raise ValueError('`scale_percentage` must be between 0 and 1') self.initial_lr = max_lr self.end_percentage = end_percentage self.scale = (float(scale_percentage) if (scale_percentage is not None) else float(end_percentage)) self.max_momentum = maximum_momentum self.min_momentum = minimum_momentum self.verbose = verbose if ((self.max_momentum is not None) and (self.min_momentum is not None)): self._update_momentum = True else: self._update_momentum = False self.clr_iterations = 0.0 self.history = {} self.epochs = None self.batch_size = None self.samples = None self.steps = None self.num_iterations = None self.mid_cycle_id = None def _reset(self): '\n Reset the callback.\n ' self.clr_iterations = 0.0 self.history = {} def compute_lr(self): '\n Compute the learning rate based on which phase of the cycle it is in.\n - If in the first half of training, the learning rate gradually increases.\n - If in the second half of training, the learning rate gradually decreases.\n - If in the final `end_percentage` portion of training, the learning rate\n is quickly reduced to near 100th of the original min learning rate.\n # Returns:\n the new learning rate\n ' if (self.clr_iterations > (2 * self.mid_cycle_id)): current_percentage = (self.clr_iterations - (2 * self.mid_cycle_id)) current_percentage /= float((self.num_iterations - (2 * self.mid_cycle_id))) new_lr = ((self.initial_lr * (1.0 + ((current_percentage * (1.0 - 100.0)) / 100.0))) * self.scale) elif (self.clr_iterations > self.mid_cycle_id): current_percentage = (1.0 - ((self.clr_iterations - self.mid_cycle_id) / self.mid_cycle_id)) new_lr = ((self.initial_lr * (1.0 + (current_percentage * ((self.scale * 100) - 1.0)))) * self.scale) else: current_percentage = (self.clr_iterations / self.mid_cycle_id) new_lr = ((self.initial_lr * (1.0 + (current_percentage * ((self.scale * 100) - 1.0)))) * self.scale) if (self.clr_iterations == self.num_iterations): self.clr_iterations = 0 return new_lr def compute_momentum(self): '\n Compute the momentum based on which phase of the cycle it is in.\n - If in the first half of training, the momentum gradually decreases.\n - If in the second half of training, the momentum gradually increases.\n - If in the final `end_percentage` portion of training, the momentum value\n is kept constant at the maximum initial value.\n # Returns:\n the new momentum value\n ' if (self.clr_iterations > (2 * self.mid_cycle_id)): new_momentum = self.max_momentum elif (self.clr_iterations > self.mid_cycle_id): current_percentage = (1.0 - ((self.clr_iterations - self.mid_cycle_id) / float(self.mid_cycle_id))) new_momentum = (self.max_momentum - (current_percentage * (self.max_momentum - self.min_momentum))) else: current_percentage = (self.clr_iterations / float(self.mid_cycle_id)) new_momentum = (self.max_momentum - (current_percentage * (self.max_momentum - self.min_momentum))) return new_momentum def on_train_begin(self, logs={}): logs = (logs or {}) self.epochs = self.params['epochs'] self.batch_size = self.params['batch_size'] self.samples = self.params['samples'] self.steps = self.params['steps'] if (self.steps is not None): self.num_iterations = (self.epochs * self.steps) else: if ((self.samples % self.batch_size) == 0): remainder = 0 else: remainder = 1 self.num_iterations = (((self.epochs + remainder) * self.samples) // self.batch_size) self.mid_cycle_id = int(((self.num_iterations * (1.0 - self.end_percentage)) / float(2))) self._reset() K.set_value(self.model.optimizer.lr, self.compute_lr()) if self._update_momentum: if (not hasattr(self.model.optimizer, 'momentum')): raise ValueError('Momentum can be updated only on SGD optimizer !') new_momentum = self.compute_momentum() K.set_value(self.model.optimizer.momentum, new_momentum) def on_batch_end(self, epoch, logs=None): logs = (logs or {}) self.clr_iterations += 1 new_lr = self.compute_lr() self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr)) K.set_value(self.model.optimizer.lr, new_lr) if self._update_momentum: if (not hasattr(self.model.optimizer, 'momentum')): raise ValueError('Momentum can be updated only on SGD optimizer !') new_momentum = self.compute_momentum() self.history.setdefault('momentum', []).append(K.get_value(self.model.optimizer.momentum)) K.set_value(self.model.optimizer.momentum, new_momentum) for (k, v) in logs.items(): self.history.setdefault(k, []).append(v) def on_epoch_end(self, epoch, logs=None): if self.verbose: if self._update_momentum: print((' - lr: %0.5f - momentum: %0.2f ' % (self.history['lr'][(- 1)], self.history['momentum'][(- 1)]))) else: print((' - lr: %0.5f ' % self.history['lr'][(- 1)]))
class LRFinder(Callback): def __init__(self, num_samples, batch_size, minimum_lr=1e-05, maximum_lr=10.0, lr_scale='exp', validation_data=None, validation_sample_rate=5, stopping_criterion_factor=4.0, loss_smoothing_beta=0.98, save_dir=None, verbose=True): "\n This class uses the Cyclic Learning Rate history to find a\n set of learning rates that can be good initializations for the\n One-Cycle training proposed by Leslie Smith in the paper referenced\n below.\n A port of the Fast.ai implementation for Keras.\n # Note\n This requires that the model be trained for exactly 1 epoch. If the model\n is trained for more epochs, then the metric calculations are only done for\n the first epoch.\n # Interpretation\n Upon visualizing the loss plot, check where the loss starts to increase\n rapidly. Choose a learning rate at somewhat prior to the corresponding\n position in the plot for faster convergence. This will be the maximum_lr lr.\n Choose the max value as this value when passing the `max_val` argument\n to OneCycleLR callback.\n Since the plot is in log-scale, you need to compute 10 ^ (-k) of the x-axis\n # Arguments:\n num_samples: Integer. Number of samples in the dataset.\n batch_size: Integer. Batch size during training.\n minimum_lr: Float. Initial learning rate (and the minimum).\n maximum_lr: Float. Final learning rate (and the maximum).\n lr_scale: Can be one of ['exp', 'linear']. Chooses the type of\n scaling for each update to the learning rate during subsequent\n batches. Choose 'exp' for large range and 'linear' for small range.\n validation_data: Requires the validation dataset as a tuple of\n (X, y) belonging to the validation set. If provided, will use the\n validation set to compute the loss metrics. Else uses the training\n batch loss. Will warn if not provided to alert the user.\n validation_sample_rate: Positive or Negative Integer. Number of batches to sample from the\n validation set per iteration of the LRFinder. Larger number of\n samples will reduce the variance but will take longer time to execute\n per batch.\n If Positive > 0, will sample from the validation dataset\n If Megative, will use the entire dataset\n stopping_criterion_factor: Integer or None. A factor which is used\n to measure large increase in the loss value during training.\n Since callbacks cannot stop training of a model, it will simply\n stop logging the additional values from the epochs after this\n stopping criterion has been met.\n If None, this check will not be performed.\n loss_smoothing_beta: Float. The smoothing factor for the moving\n average of the loss function.\n save_dir: Optional, String. If passed a directory path, the callback\n will save the running loss and learning rates to two separate numpy\n arrays inside this directory. If the directory in this path does not\n exist, they will be created.\n verbose: Whether to print the learning rate after every batch of training.\n # References:\n - [A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, weight_decay, and weight decay](https://arxiv.org/abs/1803.09820)\n " super(LRFinder, self).__init__() if (lr_scale not in ['exp', 'linear']): raise ValueError("`lr_scale` must be one of ['exp', 'linear']") if (validation_data is not None): self.validation_data = validation_data self.use_validation_set = True if ((validation_sample_rate > 0) or (validation_sample_rate < 0)): self.validation_sample_rate = validation_sample_rate else: raise ValueError('`validation_sample_rate` must be a positive or negative integer other than o') else: self.use_validation_set = False self.validation_sample_rate = 0 self.num_samples = num_samples self.batch_size = batch_size self.initial_lr = minimum_lr self.final_lr = maximum_lr self.lr_scale = lr_scale self.stopping_criterion_factor = stopping_criterion_factor self.loss_smoothing_beta = loss_smoothing_beta self.save_dir = save_dir self.verbose = verbose self.num_batches_ = (num_samples // batch_size) self.current_lr_ = minimum_lr if (lr_scale == 'exp'): self.lr_multiplier_ = ((maximum_lr / float(minimum_lr)) ** (1.0 / float(self.num_batches_))) else: extra_batch = int(((num_samples % batch_size) != 0)) self.lr_multiplier_ = np.linspace(minimum_lr, maximum_lr, num=(self.num_batches_ + extra_batch)) if (self.validation_sample_rate < 0): self.validation_sample_rate = (self.validation_data[0].shape[0] // batch_size) self.current_batch_ = 0 self.current_epoch_ = 0 self.best_loss_ = 1000000.0 self.running_loss_ = 0.0 self.history = {} def on_train_begin(self, logs=None): self.current_epoch_ = 1 K.set_value(self.model.optimizer.lr, self.initial_lr) warnings.simplefilter('ignore') def on_epoch_begin(self, epoch, logs=None): self.current_batch_ = 0 if (self.current_epoch_ > 1): warnings.warn('\n\nLearning rate finder should be used only with a single epoch. Hereafter, the callback will not measure the losses.\n\n') def on_batch_begin(self, batch, logs=None): self.current_batch_ += 1 def on_batch_end(self, batch, logs=None): if (self.current_epoch_ > 1): return if self.use_validation_set: (X, Y) = (self.validation_data[0], self.validation_data[1]) num_samples = (self.batch_size * self.validation_sample_rate) if (num_samples > X.shape[0]): num_samples = X.shape[0] idx = np.random.choice(X.shape[0], num_samples, replace=False) x = X[idx] y = Y[idx] values = self.model.evaluate(x, y, batch_size=self.batch_size, verbose=False) loss = values[0] else: loss = logs['loss'] running_loss = ((self.loss_smoothing_beta * loss) + ((1.0 - self.loss_smoothing_beta) * loss)) running_loss = (running_loss / (1.0 - (self.loss_smoothing_beta ** self.current_batch_))) if ((self.current_batch_ > 1) and (self.stopping_criterion_factor is not None) and (running_loss > (self.stopping_criterion_factor * self.best_loss_))): if self.verbose: print((' - LRFinder: Skipping iteration since loss is %d times as large as best loss (%0.4f)' % (self.stopping_criterion_factor, self.best_loss_))) return if ((running_loss < self.best_loss_) or (self.current_batch_ == 1)): self.best_loss_ = running_loss current_lr = K.get_value(self.model.optimizer.lr) self.history.setdefault('running_loss_', []).append(running_loss) if (self.lr_scale == 'exp'): self.history.setdefault('log_lrs', []).append(np.log10(current_lr)) else: self.history.setdefault('log_lrs', []).append(current_lr) if (self.lr_scale == 'exp'): current_lr *= self.lr_multiplier_ else: current_lr = self.lr_multiplier_[(self.current_batch_ - 1)] K.set_value(self.model.optimizer.lr, current_lr) for (k, v) in logs.items(): self.history.setdefault(k, []).append(v) if self.verbose: if self.use_validation_set: print((' - LRFinder: val_loss: %1.4f - lr = %1.8f ' % (values[0], current_lr))) else: print((' - LRFinder: lr = %1.8f ' % current_lr)) def on_epoch_end(self, epoch, logs=None): if ((self.save_dir is not None) and (self.current_epoch_ <= 1)): if (not os.path.exists(self.save_dir)): os.makedirs(self.save_dir) losses_path = os.path.join(self.save_dir, 'losses.npy') lrs_path = os.path.join(self.save_dir, 'lrs.npy') np.save(losses_path, self.losses) np.save(lrs_path, self.lrs) if self.verbose: print(('\tLR Finder : Saved the losses and learning rate values in path : {%s}' % self.save_dir)) self.current_epoch_ += 1 warnings.simplefilter('default') def plot_schedule(self, clip_beginning=None, clip_endding=None): '\n Plots the schedule from the callback itself.\n # Arguments:\n clip_beginning: Integer or None. If positive integer, it will\n remove the specified portion of the loss graph to remove the large\n loss values in the beginning of the graph.\n clip_endding: Integer or None. If negative integer, it will\n remove the specified portion of the ending of the loss graph to\n remove the sharp increase in the loss values at high learning rates.\n ' try: import matplotlib.pyplot as plt plt.style.use('seaborn-white') except ImportError: print('Matplotlib not found. Please use `pip install matplotlib` first.') return if ((clip_beginning is not None) and (clip_beginning < 0)): clip_beginning = (- clip_beginning) if ((clip_endding is not None) and (clip_endding > 0)): clip_endding = (- clip_endding) losses = self.losses lrs = self.lrs if clip_beginning: losses = losses[clip_beginning:] lrs = lrs[clip_beginning:] if clip_endding: losses = losses[:clip_endding] lrs = lrs[:clip_endding] plt.plot(lrs, losses) plt.title('Learning rate vs Loss') plt.xlabel('learning rate') plt.ylabel('loss') plt.show() @classmethod def restore_schedule_from_dir(cls, directory, clip_beginning=None, clip_endding=None): '\n Loads the training history from the saved numpy files in the given directory.\n # Arguments:\n directory: String. Path to the directory where the serialized numpy\n arrays of the loss and learning rates are saved.\n clip_beginning: Integer or None. If positive integer, it will\n remove the specified portion of the loss graph to remove the large\n loss values in the beginning of the graph.\n clip_endding: Integer or None. If negative integer, it will\n remove the specified portion of the ending of the loss graph to\n remove the sharp increase in the loss values at high learning rates.\n Returns:\n tuple of (losses, learning rates)\n ' if ((clip_beginning is not None) and (clip_beginning < 0)): clip_beginning = (- clip_beginning) if ((clip_endding is not None) and (clip_endding > 0)): clip_endding = (- clip_endding) losses_path = os.path.join(directory, 'losses.npy') lrs_path = os.path.join(directory, 'lrs.npy') if ((not os.path.exists(losses_path)) or (not os.path.exists(lrs_path))): print(('%s and %s could not be found at directory : {%s}' % (losses_path, lrs_path, directory))) losses = None lrs = None else: losses = np.load(losses_path) lrs = np.load(lrs_path) if clip_beginning: losses = losses[clip_beginning:] lrs = lrs[clip_beginning:] if clip_endding: losses = losses[:clip_endding] lrs = lrs[:clip_endding] return (losses, lrs) @classmethod def plot_schedule_from_file(cls, directory, clip_beginning=None, clip_endding=None): '\n Plots the schedule from the saved numpy arrays of the loss and learning\n rate values in the specified directory.\n # Arguments:\n directory: String. Path to the directory where the serialized numpy\n arrays of the loss and learning rates are saved.\n clip_beginning: Integer or None. If positive integer, it will\n remove the specified portion of the loss graph to remove the large\n loss values in the beginning of the graph.\n clip_endding: Integer or None. If negative integer, it will\n remove the specified portion of the ending of the loss graph to\n remove the sharp increase in the loss values at high learning rates.\n ' try: import matplotlib.pyplot as plt plt.style.use('seaborn-white') except ImportError: print('Matplotlib not found. Please use `pip install matplotlib` first.') return (losses, lrs) = cls.restore_schedule_from_dir(directory, clip_beginning=clip_beginning, clip_endding=clip_endding) if ((losses is None) or (lrs is None)): return else: plt.plot(lrs, losses) plt.title('Learning rate vs Loss') plt.xlabel('learning rate') plt.ylabel('loss') plt.show() @property def lrs(self): return np.array(self.history['log_lrs']) @property def losses(self): return np.array(self.history['running_loss_'])
class CosineAnnealingScheduler(Callback): 'Cosine annealing scheduler.\n ' def __init__(self, T_max, eta_max, eta_min=0, verbose=0, epoch_start=80, restart_epochs=None, gamma=1, expansion=1, flat_end=False): super(CosineAnnealingScheduler, self).__init__() self.epoch_start = epoch_start self.expansion = expansion self.T_max = T_max self.eta_max = eta_max self.eta_min = eta_min self.verbose = verbose self.restart_epochs = restart_epochs self.gamma = gamma self.flat_end = flat_end def on_epoch_begin(self, epoch, logs=None): if (not hasattr(self.model.optimizer, 'learning_rate')): raise ValueError('Optimizer must have a "learning_rate" attribute.') if (epoch > (self.epoch_start - 1)): if (self.restart_epochs is None): learning_rate = (self.eta_min + ((((self.eta_max * self.gamma) - self.eta_min) * (1 + math.cos(((math.pi * (epoch - self.epoch_start)) / self.T_max)))) / 2)) K.set_value(self.model.optimizer.learning_rate, learning_rate) else: learning_rate = (self.eta_min + ((((self.eta_max * self.gamma) - self.eta_min) * (1 + math.cos(((math.pi * ((epoch % (self.restart_epochs + self.epoch_start)) - self.epoch_start)) / self.T_max)))) / 2)) K.set_value(self.model.optimizer.learning_rate, learning_rate) if (learning_rate <= self.eta_min): self.eta_max *= self.gamma self.T_max *= self.expansion if (self.flat_end and (epoch >= ((self.epoch_start - 1) + T_max))): learning_rate = self.eta_min else: learning_rate = self.model.optimizer.learning_rate if (self.verbose > 0): print(('\nEpoch %05d: CosineAnnealingScheduler setting learning rate to %s.' % ((epoch + 1), learning_rate))) def on_epoch_end(self, epoch, logs=None): logs = (logs or {}) logs['learning_rate'] = K.get_value(self.model.optimizer.learning_rate)
class CyclicLR(Callback): 'This callback implements a cyclical learning rate policy (CLR).\n The method cycles the learning rate between two boundaries with\n some constant frequency, as detailed in this paper (https://arxiv.org/abs/1506.01186).\n The amplitude of the cycle can be scaled on a per-iteration or\n per-cycle basis.\n This class has three built-in policies, as put forth in the paper.\n "triangular":\n A basic triangular cycle w/ no amplitude scaling.\n "triangular2":\n A basic triangular cycle that scales initial amplitude by half each cycle.\n "exp_range":\n A cycle that scales initial amplitude by gamma**(cycle iterations) at each\n cycle iteration.\n For more detail, please see paper.\n\n # Example\n ```python\n clr = CyclicLR(base_lr=0.001, max_lr=0.006,\n step_size=2000., mode=\'triangular\')\n model.fit(X_train, Y_train, callbacks=[clr])\n ```\n\n Class also supports custom scaling functions:\n ```python\n clr_fn = lambda x: 0.5*(1+np.sin(x*np.pi/2.))\n clr = CyclicLR(base_lr=0.001, max_lr=0.006,\n step_size=2000., scale_fn=clr_fn,\n scale_mode=\'cycle\')\n model.fit(X_train, Y_train, callbacks=[clr])\n ```\n # Arguments\n base_lr: initial learning rate which is the\n lower boundary in the cycle.\n max_lr: upper boundary in the cycle. Functionally,\n it defines the cycle amplitude (max_lr - base_lr).\n The lr at any cycle is the sum of base_lr\n and some scaling of the amplitude; therefore\n max_lr may not actually be reached depending on\n scaling function.\n step_size: number of training iterations per\n half cycle. Authors suggest setting step_size\n 2-8 x training iterations in epoch.\n mode: one of {triangular, triangular2, exp_range}.\n Default \'triangular\'.\n Values correspond to policies detailed above.\n If scale_fn is not None, this argument is ignored.\n gamma: constant in \'exp_range\' scaling function:\n gamma**(cycle iterations)\n scale_fn: Custom scaling policy defined by a single\n argument lambda function, where\n 0 <= scale_fn(x) <= 1 for all x >= 0.\n mode paramater is ignored\n scale_mode: {\'cycle\', \'iterations\'}.\n Defines whether scale_fn is evaluated on\n cycle number or cycle iterations (training\n iterations since start of cycle). Default is \'cycle\'.\n ' def __init__(self, base_lr=0.001, max_lr=0.006, step_size=2000.0, mode='triangular', gamma=1.0, scale_fn=None, scale_mode='cycle'): super(CyclicLR, self).__init__() self.base_lr = base_lr self.max_lr = max_lr self.step_size = step_size self.mode = mode self.gamma = gamma if (scale_fn == None): if (self.mode == 'triangular'): self.scale_fn = (lambda x: 1.0) self.scale_mode = 'cycle' elif (self.mode == 'triangular2'): self.scale_fn = (lambda x: (1 / (2.0 ** (x - 1)))) self.scale_mode = 'cycle' elif (self.mode == 'exp_range'): self.scale_fn = (lambda x: (gamma ** x)) self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.clr_iterations = 0.0 self.trn_iterations = 0.0 self.history = {} self._reset() def _reset(self, new_base_lr=None, new_max_lr=None, new_step_size=None): 'Resets cycle iterations.\n Optional boundary/step size adjustment.\n ' if (new_base_lr != None): self.base_lr = new_base_lr if (new_max_lr != None): self.max_lr = new_max_lr if (new_step_size != None): self.step_size = new_step_size self.clr_iterations = 0.0 def clr(self): cycle = np.floor((1 + (self.clr_iterations / (2 * self.step_size)))) x = np.abs((((self.clr_iterations / self.step_size) - (2 * cycle)) + 1)) if (self.scale_mode == 'cycle'): return (self.base_lr + (((self.max_lr - self.base_lr) * np.maximum(0, (1 - x))) * self.scale_fn(cycle))) else: return (self.base_lr + (((self.max_lr - self.base_lr) * np.maximum(0, (1 - x))) * self.scale_fn(self.clr_iterations))) def on_train_begin(self, logs={}): logs = (logs or {}) if (self.clr_iterations == 0): K.set_value(self.model.optimizer.lr, self.base_lr) else: K.set_value(self.model.optimizer.lr, self.clr()) def on_batch_end(self, epoch, logs=None): logs = (logs or {}) self.trn_iterations += 1 self.clr_iterations += 1 self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr)) self.history.setdefault('iterations', []).append(self.trn_iterations) for (k, v) in logs.items(): self.history.setdefault(k, []).append(v) K.set_value(self.model.optimizer.lr, self.clr())
class LRFinder(Callback): def __init__(self, num_samples, batch_size, minimum_lr=1e-05, maximum_lr=10.0, lr_scale='exp', validation_data=None, validation_sample_rate=5, stopping_criterion_factor=4.0, loss_smoothing_beta=0.98, save_dir=None, verbose=True): "\n This class uses the Cyclic Learning Rate history to find a\n set of learning rates that can be good initializations for the\n One-Cycle training proposed by Leslie Smith in the paper referenced\n below.\n A port of the Fast.ai implementation for Keras.\n # Note\n This requires that the model be trained for exactly 1 epoch. If the model\n is trained for more epochs, then the metric calculations are only done for\n the first epoch.\n # Interpretation\n Upon visualizing the loss plot, check where the loss starts to increase\n rapidly. Choose a learning rate at somewhat prior to the corresponding\n position in the plot for faster convergence. This will be the maximum_lr lr.\n Choose the max value as this value when passing the `max_val` argument\n to OneCycleLR callback.\n Since the plot is in log-scale, you need to compute 10 ^ (-k) of the x-axis\n # Arguments:\n num_samples: Integer. Number of samples in the dataset.\n batch_size: Integer. Batch size during training.\n minimum_lr: Float. Initial learning rate (and the minimum).\n maximum_lr: Float. Final learning rate (and the maximum).\n lr_scale: Can be one of ['exp', 'linear']. Chooses the type of\n scaling for each update to the learning rate during subsequent\n batches. Choose 'exp' for large range and 'linear' for small range.\n validation_data: Requires the validation dataset as a tuple of\n (X, y) belonging to the validation set. If provided, will use the\n validation set to compute the loss metrics. Else uses the training\n batch loss. Will warn if not provided to alert the user.\n validation_sample_rate: Positive or Negative Integer. Number of batches to sample from the\n validation set per iteration of the LRFinder. Larger number of\n samples will reduce the variance but will take longer time to execute\n per batch.\n If Positive > 0, will sample from the validation dataset\n If Megative, will use the entire dataset\n stopping_criterion_factor: Integer or None. A factor which is used\n to measure large increase in the loss value during training.\n Since callbacks cannot stop training of a model, it will simply\n stop logging the additional values from the epochs after this\n stopping criterion has been met.\n If None, this check will not be performed.\n loss_smoothing_beta: Float. The smoothing factor for the moving\n average of the loss function.\n save_dir: Optional, String. If passed a directory path, the callback\n will save the running loss and learning rates to two separate numpy\n arrays inside this directory. If the directory in this path does not\n exist, they will be created.\n verbose: Whether to print the learning rate after every batch of training.\n # References:\n - [A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, weight_decay, and weight decay](https://arxiv.org/abs/1803.09820)\n " super(LRFinder, self).__init__() if (lr_scale not in ['exp', 'linear']): raise ValueError("`lr_scale` must be one of ['exp', 'linear']") if (validation_data is not None): self.validation_data = validation_data self.use_validation_set = True if ((validation_sample_rate > 0) or (validation_sample_rate < 0)): self.validation_sample_rate = validation_sample_rate else: raise ValueError('`validation_sample_rate` must be a positive or negative integer other than o') else: self.use_validation_set = False self.validation_sample_rate = 0 self.num_samples = num_samples self.batch_size = batch_size self.initial_lr = minimum_lr self.final_lr = maximum_lr self.lr_scale = lr_scale self.stopping_criterion_factor = stopping_criterion_factor self.loss_smoothing_beta = loss_smoothing_beta self.save_dir = save_dir self.verbose = verbose self.num_batches_ = (num_samples // batch_size) self.current_lr_ = minimum_lr if (lr_scale == 'exp'): self.lr_multiplier_ = ((maximum_lr / float(minimum_lr)) ** (1.0 / float(self.num_batches_))) else: extra_batch = int(((num_samples % batch_size) != 0)) self.lr_multiplier_ = np.linspace(minimum_lr, maximum_lr, num=(self.num_batches_ + extra_batch)) if (self.validation_sample_rate < 0): self.validation_sample_rate = (self.validation_data[0].shape[0] // batch_size) self.current_batch_ = 0 self.current_epoch_ = 0 self.best_loss_ = 1000000.0 self.running_loss_ = 0.0 self.history = {} def on_train_begin(self, logs=None): self.current_epoch_ = 1 K.set_value(self.model.optimizer.lr, self.initial_lr) warnings.simplefilter('ignore') def on_epoch_begin(self, epoch, logs=None): self.current_batch_ = 0 if (self.current_epoch_ > 1): warnings.warn('\n\nLearning rate finder should be used only with a single epoch. Hereafter, the callback will not measure the losses.\n\n') def on_batch_begin(self, batch, logs=None): self.current_batch_ += 1 def on_batch_end(self, batch, logs=None): if (self.current_epoch_ > 1): return if self.use_validation_set: (X, Y) = (self.validation_data[0], self.validation_data[1]) num_samples = (self.batch_size * self.validation_sample_rate) if (num_samples > X.shape[0]): num_samples = X.shape[0] idx = np.random.choice(X.shape[0], num_samples, replace=False) x = X[idx] y = Y[idx] values = self.model.evaluate(x, y, batch_size=self.batch_size, verbose=False) loss = values[0] else: loss = logs['loss'] running_loss = ((self.loss_smoothing_beta * loss) + ((1.0 - self.loss_smoothing_beta) * loss)) running_loss = (running_loss / (1.0 - (self.loss_smoothing_beta ** self.current_batch_))) if ((self.current_batch_ > 1) and (self.stopping_criterion_factor is not None) and (running_loss > (self.stopping_criterion_factor * self.best_loss_))): if self.verbose: print((' - LRFinder: Skipping iteration since loss is %d times as large as best loss (%0.4f)' % (self.stopping_criterion_factor, self.best_loss_))) return if ((running_loss < self.best_loss_) or (self.current_batch_ == 1)): self.best_loss_ = running_loss current_lr = K.get_value(self.model.optimizer.lr) self.history.setdefault('running_loss_', []).append(running_loss) if (self.lr_scale == 'exp'): self.history.setdefault('log_lrs', []).append(np.log10(current_lr)) else: self.history.setdefault('log_lrs', []).append(current_lr) if (self.lr_scale == 'exp'): current_lr *= self.lr_multiplier_ else: current_lr = self.lr_multiplier_[(self.current_batch_ - 1)] K.set_value(self.model.optimizer.lr, current_lr) for (k, v) in logs.items(): self.history.setdefault(k, []).append(v) if self.verbose: if self.use_validation_set: print((' - LRFinder: val_loss: %1.4f - lr = %1.8f ' % (values[0], current_lr))) else: print((' - LRFinder: lr = %1.8f ' % current_lr)) def on_epoch_end(self, epoch, logs=None): if ((self.save_dir is not None) and (self.current_epoch_ <= 1)): if (not os.path.exists(self.save_dir)): os.makedirs(self.save_dir) losses_path = os.path.join(self.save_dir, 'losses.npy') lrs_path = os.path.join(self.save_dir, 'lrs.npy') np.save(losses_path, self.losses) np.save(lrs_path, self.lrs) if self.verbose: print(('\tLR Finder : Saved the losses and learning rate values in path : {%s}' % self.save_dir)) self.current_epoch_ += 1 warnings.simplefilter('default') def plot_schedule(self, clip_beginning=None, clip_endding=None): '\n Plots the schedule from the callback itself.\n # Arguments:\n clip_beginning: Integer or None. If positive integer, it will\n remove the specified portion of the loss graph to remove the large\n loss values in the beginning of the graph.\n clip_endding: Integer or None. If negative integer, it will\n remove the specified portion of the ending of the loss graph to\n remove the sharp increase in the loss values at high learning rates.\n ' try: import matplotlib.pyplot as plt plt.style.use('seaborn-white') except ImportError: print('Matplotlib not found. Please use `pip install matplotlib` first.') return if ((clip_beginning is not None) and (clip_beginning < 0)): clip_beginning = (- clip_beginning) if ((clip_endding is not None) and (clip_endding > 0)): clip_endding = (- clip_endding) losses = self.losses lrs = self.lrs if clip_beginning: losses = losses[clip_beginning:] lrs = lrs[clip_beginning:] if clip_endding: losses = losses[:clip_endding] lrs = lrs[:clip_endding] plt.plot(lrs, losses) plt.title('Learning rate vs Loss') plt.xlabel('learning rate') plt.ylabel('loss') plt.show() @classmethod def restore_schedule_from_dir(cls, directory, clip_beginning=None, clip_endding=None): '\n Loads the training history from the saved numpy files in the given directory.\n # Arguments:\n directory: String. Path to the directory where the serialized numpy\n arrays of the loss and learning rates are saved.\n clip_beginning: Integer or None. If positive integer, it will\n remove the specified portion of the loss graph to remove the large\n loss values in the beginning of the graph.\n clip_endding: Integer or None. If negative integer, it will\n remove the specified portion of the ending of the loss graph to\n remove the sharp increase in the loss values at high learning rates.\n Returns:\n tuple of (losses, learning rates)\n ' if ((clip_beginning is not None) and (clip_beginning < 0)): clip_beginning = (- clip_beginning) if ((clip_endding is not None) and (clip_endding > 0)): clip_endding = (- clip_endding) losses_path = os.path.join(directory, 'losses.npy') lrs_path = os.path.join(directory, 'lrs.npy') if ((not os.path.exists(losses_path)) or (not os.path.exists(lrs_path))): print(('%s and %s could not be found at directory : {%s}' % (losses_path, lrs_path, directory))) losses = None lrs = None else: losses = np.load(losses_path) lrs = np.load(lrs_path) if clip_beginning: losses = losses[clip_beginning:] lrs = lrs[clip_beginning:] if clip_endding: losses = losses[:clip_endding] lrs = lrs[:clip_endding] return (losses, lrs) @classmethod def plot_schedule_from_file(cls, directory, clip_beginning=None, clip_endding=None): '\n Plots the schedule from the saved numpy arrays of the loss and learning\n rate values in the specified directory.\n # Arguments:\n directory: String. Path to the directory where the serialized numpy\n arrays of the loss and learning rates are saved.\n clip_beginning: Integer or None. If positive integer, it will\n remove the specified portion of the loss graph to remove the large\n loss values in the beginning of the graph.\n clip_endding: Integer or None. If negative integer, it will\n remove the specified portion of the ending of the loss graph to\n remove the sharp increase in the loss values at high learning rates.\n ' try: import matplotlib.pyplot as plt plt.style.use('seaborn-white') except ImportError: print('Matplotlib not found. Please use `pip install matplotlib` first.') return (losses, lrs) = cls.restore_schedule_from_dir(directory, clip_beginning=clip_beginning, clip_endding=clip_endding) if ((losses is None) or (lrs is None)): return else: plt.plot(lrs, losses) plt.title('Learning rate vs Loss') plt.xlabel('learning rate') plt.ylabel('loss') plt.show() @property def lrs(self): return np.array(self.history['log_lrs']) @property def losses(self): return np.array(self.history['running_loss_'])
class History(object): '\n Custom class to help get log data from keras.callbacks.History objects.\n :param history: a ``keras.callbacks.History object`` or ``None``.\n ' def __init__(self, history=None): if (history is not None): self.epoch = history.epoch self.history = history.history else: self.epoch = [] self.history = {}
def concatenate_history(hlist, reindex_epoch=False): '\n A helper function to concatenate training history object (``keras.callbacks.History``) into a single one, with a help ``History`` class.\n :param hlist: a list of ``keras.callbacks.History`` objects to concatenate.\n :param reindex_epoch: True or False whether to reindex epoch counters to an increasing order.\n :return his: an instance of ``History`` class that contain concatenated information of epoch and training history.\n ' his = History() for h in hlist: his.epoch = (his.epoch + h.epoch) for (key, value) in h.history.items(): his.history.setdefault(key, []) his.history[key] = (his.history[key] + value) if reindex_epoch: his.epoch = list(np.arange(0, len(his.epoch))) return his
def plot_from_history(history): "\n Plot losses in training history.\n :param history: a ``keras.callbacks.History`` or (this module's) ``History`` object.\n " assert isinstance(history, (keras.callbacks.History, History)), "history must be a ``keras.callbacks.History`` or (this module's) ``History`` object. " epoch = history.epoch val_exist = ('val_loss' in history.history) plt.plot(epoch, history.history['loss'], '.-', label='train') if val_exist: plt.plot(epoch, history.history['val_loss'], '.-', label='valid') plt.xlabel('epoch') plt.ylabel('losses') plt.legend()
def save_history_to_csv(history, filepath): '\n Save a training history into a csv file.\n :param history: a ``History`` callback instance from ``Model`` instance.\n :param filepath: a string filepath.\n ' hist = history.history hist['epoch'] = history.epoch df = pd.DataFrame.from_dict(hist) df.to_csv(filepath, index=False)
def reset_keras(per_process_gpu_memory_fraction=1.0): "\n Reset Keras session and set GPU configuration as well as collect unused memory.\n This is adapted from [jaycangel's post on fastai forum](https://forums.fast.ai/t/how-could-i-release-gpu-memory-of-keras/2023/18).\n Calling this before any training will clear Keras session. Hence, a Keras model must be redefined and compiled again.\n It can be used in during hyperparameter scan or K-fold validation when model training is invoked several times.\n :param per_process_gpu_memory_fraction: tensorflow's config.gpu_options.per_process_gpu_memory_fraction\n " sess = K.get_session() K.clear_session() sess.close() gc.collect() config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = per_process_gpu_memory_fraction config.gpu_options.visible_device_list = '0' K.set_session(tf.Session(config=config))
def cuda_release_memory(): "\n Force cuda to release GPU memory by closing it.\n :return cuda: numba's cuda module.\n " spec = importlib.util.find_spec('numba') if (spec is None): raise Exception("numba module cannot be found. Can't function before numba module is installed.") else: from numba import cuda cuda.select_device(0) cuda.close() return cuda
def moving_window_avg(x, window=5): '\n Return a moving-window average.\n :param x: a numpy array\n :param window: an integer, number of data points for window size.\n ' return pd.DataFrame(x).rolling(window=window, min_periods=1).mean().values.squeeze()
def set_momentum(optimizer, mom_val): '\n Helper to set momentum of Keras optimizers.\n :param optimizer: Keras optimizer\n :param mom_val: value of momentum.\n ' keys = dir(optimizer) if ('momentum' in keys): K.set_value(optimizer.momentum, mom_val) if ('rho' in keys): K.set_value(optimizer.rho, mom_val) if ('beta_1' in keys): K.set_value(optimizer.beta_1, mom_val)
def set_lr(optimizer, lr): '\n Helper to set learning rate of Keras optimizers.\n :param optimizer: Keras optimizer\n :param lr: value of learning rate.\n ' K.set_value(optimizer.lr, lr)
def dot_product(x, kernel): return tf.tensordot(x, kernel, axes=1)
class Attention(Layer): def __init__(self, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, return_attention=False, **kwargs): '\n Keras Layer that implements an Attention mechanism for temporal data.\n Supports Masking.\n Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]\n # Input shape\n 3D tensor with shape: `(samples, steps, features)`.\n # Output shape\n 2D tensor with shape: `(samples, features)`.\n :param kwargs:\n ' self.supports_masking = True self.return_attention = return_attention self.init = initializers.get('zeros') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(Attention, self).__init__(**kwargs) def get_config(self): config = super().get_config().copy() config.update({'supports_masking': self.supports_masking, 'return_attention': self.return_attention, 'init': self.init, 'W_regularizer': self.W_regularizer, 'W_constraint': self.W_constraint, 'b_regularizer': self.b_regularizer, 'b_constraint': self.b_constraint, 'bias': self.bias}) return config def build(self, input_shape): assert (len(input_shape) == 3) self.W = self.add_weight(shape=(int(input_shape[(- 1)]),), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint) if self.bias: self.b = self.add_weight(shape=(int(input_shape[1]),), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint) else: self.b = None self.built = True def compute_mask(self, input, input_mask=None): return None def call(self, x, mask=None): eij = dot_product(x, self.W) if self.bias: eij += self.b eij = K.tanh(eij) a = K.exp(eij) if (mask is not None): a *= K.cast(mask, K.floatx()) a /= K.cast((K.sum(a, axis=1, keepdims=True) + K.epsilon()), K.floatx()) weighted_input = (x * K.expand_dims(a)) result = K.sum(weighted_input, axis=1) if self.return_attention: return [result, a] return result def compute_output_shape(self, input_shape): if self.return_attention: return [(input_shape[0], input_shape[(- 1)]), (input_shape[0], input_shape[1])] else: return (input_shape[0], input_shape[(- 1)])
class LayerNormalization(Layer): '\n Implementation of Layer Normalization (https://arxiv.org/abs/1607.06450).\n "Unlike batch normalization, layer normalization performs exactly\n the same computation at training and test times."\n ' def __init__(self, axis=(- 1), **kwargs): self.axis = axis super().__init__(**kwargs) def get_config(self): config = super().get_config() config['axis'] = self.axis return config def build(self, input_shape): dim = input_shape[(- 1)] self.gain = self.add_weight(name='gain', shape=(dim,), initializer='ones', trainable=True) self.bias = self.add_weight(name='bias', shape=(dim,), initializer='zeros', trainable=True) return super().build(input_shape) def call(self, inputs, **kwargs): mean = K.mean(inputs, axis=self.axis, keepdims=True) variance = K.mean(K.square((inputs - mean)), axis=self.axis, keepdims=True) epsilon = K.constant(1e-05, dtype=K.floatx()) normalized_inputs = ((inputs - mean) / K.sqrt((variance + epsilon))) result = ((self.gain * normalized_inputs) + self.bias) return result
class FocalLoss(tf.keras.losses.Loss): def __init__(self, gamma=2.0, alpha=4.0, reduction=tf.keras.losses.Reduction.AUTO, name='focal_loss'): 'Focal loss for multi-classification\n FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t)\n Notice: y_pred is probability after softmax\n gradient is d(Fl)/d(p_t) not d(Fl)/d(x) as described in paper\n d(Fl)/d(p_t) * [p_t(1-p_t)] = d(Fl)/d(x)\n Focal Loss for Dense Object Detection\n https://arxiv.org/abs/1708.02002\n\n Keyword Arguments:\n gamma {float} -- (default: {2.0})\n alpha {float} -- (default: {4.0})\n ' super(FocalLoss, self).__init__(reduction=reduction, name=name) self.gamma = float(gamma) self.alpha = float(alpha) def call(self, y_true, y_pred): "\n Arguments:\n y_true {tensor} -- ground truth labels, shape of [batch_size, num_cls]\n y_pred {tensor} -- model's output, shape of [batch_size, num_cls]\n\n Returns:\n [tensor] -- loss.\n " epsilon = 1e-09 y_true = tf.convert_to_tensor(y_true, tf.float32) y_pred = tf.convert_to_tensor(y_pred, tf.float32) model_out = tf.add(y_pred, epsilon) ce = tf.multiply(y_true, (- tf.math.log(model_out))) weight = tf.multiply(y_true, tf.pow(tf.subtract(1.0, model_out), self.gamma)) fl = tf.multiply(self.alpha, tf.multiply(weight, ce)) reduced_fl = tf.reduce_max(fl, axis=1) return tf.reduce_mean(reduced_fl)
class LDAMLoss(tf.keras.losses.Loss): def __init__(self, cls_num_list, max_m=0.5, weight=None, s=30, reduction=tf.keras.loses.Reduction.AUTO, name='LDAM'): super().__init__(reduction=reduction, name=name) m_list = (1.0 / np.sqrt(np.sqrt(cls_num_list))) m_list *= max_m(m_list.max()) self.m_list = tf.convert_to_tensor(m_list, tf.float32) assert (s > 0) self.s = s self.w = weight def __call__(self, y_true, y_pred): y_true = tf.convert_to_tensor(y_true, tf.float32) y_pred = tf.convert_to_tensor(y_pred, tf.float32)
class Lookahead(tf.keras.optimizers.Optimizer): 'This class allows to extend optimizers with the lookahead mechanism.\n The mechanism is proposed by Michael R. Zhang et.al in the paper\n [Lookahead Optimizer: k steps forward, 1 step back]\n (https://arxiv.org/abs/1907.08610v1). The optimizer iteratively updates two\n sets of weights: the search directions for weights are chosen by the inner\n optimizer, while the "slow weights" are updated each `k` steps based on the\n directions of the "fast weights" and the two sets of weights are\n synchronized. This method improves the learning stability and lowers the\n variance of its inner optimizer.\n Example of usage:\n ```python\n opt = tf.keras.optimizers.SGD(learning_rate)\n opt = tfa.optimizers.Lookahead(opt)\n ```\n ' @typechecked def __init__(self, optimizer: Union[(tf.keras.optimizers.Optimizer, str)], sync_period: int=6, slow_step_size: FloatTensorLike=0.5, name: str='Lookahead', **kwargs): 'Wrap optimizer with the lookahead mechanism.\n Args:\n optimizer: The original optimizer that will be used to compute\n and apply the gradients.\n sync_period: An integer. The synchronization period of lookahead.\n Enable lookahead mechanism by setting it with a positive value.\n slow_step_size: A floating point value.\n The ratio for updating the slow weights.\n name: Optional name for the operations created when applying\n gradients. Defaults to "Lookahead".\n **kwargs: keyword arguments. Allowed to be {`clipnorm`,\n `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients\n by norm; `clipvalue` is clip gradients by value, `decay` is\n included for backward compatibility to allow time inverse\n decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n ' super().__init__(name, **kwargs) if isinstance(optimizer, str): optimizer = tf.keras.optimizers.get(optimizer) if (not isinstance(optimizer, tf.keras.optimizers.Optimizer)): raise TypeError('optimizer is not an object of tf.keras.optimizers.Optimizer') self._optimizer = optimizer self._set_hyper('sync_period', sync_period) self._set_hyper('slow_step_size', slow_step_size) self._initialized = False def _create_slots(self, var_list): self._optimizer._create_slots(var_list=var_list) for var in var_list: self.add_slot(var, 'slow') def _create_hypers(self): self._optimizer._create_hypers() def _prepare(self, var_list): return self._optimizer._prepare(var_list=var_list) def apply_gradients(self, grads_and_vars, name=None): self._optimizer._iterations = self.iterations return super().apply_gradients(grads_and_vars, name) def _init_op(self, var): slow_var = self.get_slot(var, 'slow') return slow_var.assign(tf.where(tf.equal(self.iterations, tf.constant(0, dtype=self.iterations.dtype)), var, slow_var), use_locking=self._use_locking) def _look_ahead_op(self, var): var_dtype = var.dtype.base_dtype slow_var = self.get_slot(var, 'slow') local_step = tf.cast((self.iterations + 1), tf.dtypes.int64) sync_period = self._get_hyper('sync_period', tf.dtypes.int64) slow_step_size = self._get_hyper('slow_step_size', var_dtype) step_back = (slow_var + (slow_step_size * (var - slow_var))) sync_cond = tf.equal((tf.math.floordiv(local_step, sync_period) * sync_period), local_step) with tf.control_dependencies([step_back]): slow_update = slow_var.assign(tf.where(sync_cond, step_back, slow_var), use_locking=self._use_locking) var_update = var.assign(tf.where(sync_cond, step_back, var), use_locking=self._use_locking) return tf.group(slow_update, var_update) @property def weights(self): return (self._weights + self._optimizer.weights) def _resource_apply_dense(self, grad, var): init_op = self._init_op(var) with tf.control_dependencies([init_op]): train_op = self._optimizer._resource_apply_dense(grad, var) with tf.control_dependencies([train_op]): look_ahead_op = self._look_ahead_op(var) return tf.group(init_op, train_op, look_ahead_op) def _resource_apply_sparse(self, grad, var, indices): init_op = self._init_op(var) with tf.control_dependencies([init_op]): train_op = self._optimizer._resource_apply_sparse(grad, var, indices) with tf.control_dependencies([train_op]): look_ahead_op = self._look_ahead_op(var) return tf.group(init_op, train_op, look_ahead_op) def get_config(self): config = {'optimizer': tf.keras.optimizers.serialize(self._optimizer), 'sync_period': self._serialize_hyperparameter('sync_period'), 'slow_step_size': self._serialize_hyperparameter('slow_step_size')} base_config = super().get_config() return {**base_config, **config} @property def learning_rate(self): return self._optimizer._get_hyper('learning_rate') @learning_rate.setter def learning_rate(self, learning_rate): self._optimizer._set_hyper('learning_rate', learning_rate) @property def lr(self): return self.learning_rate @lr.setter def lr(self, lr): self.learning_rate = lr @classmethod def from_config(cls, config, custom_objects=None): optimizer = tf.keras.optimizers.deserialize(config.pop('optimizer'), custom_objects=custom_objects) return cls(optimizer, **config)
class NovoGrad(tf.keras.optimizers.Optimizer): 'The NovoGrad Optimizer was first proposed in [Stochastic Gradient\n Methods with Layerwise Adaptvie Moments for training of Deep\n Networks](https://arxiv.org/pdf/1905.11286.pdf)\n NovoGrad is a first-order SGD-based algorithm, which computes second\n moments per layer instead of per weight as in Adam. Compared to Adam,\n NovoGrad takes less memory, and has been found to be more numerically\n stable. More specifically we compute (for more information on the\n computation please refer to this\n [link](https://nvidia.github.io/OpenSeq2Seq/html/optimizers.html):\n Second order moment = exponential moving average of Layer-wise square\n of grads:\n v_t <-- beta_2 * v_{t-1} + (1-beta_2) * (g_t)^2\n First order moment in one of four modes:\n 1. moment of grads normalized by v_t:\n m_t <- beta_1 * m_{t-1} + [ g_t / (sqrt(v_t)+epsilon)]\n 2. moment similar to Adam: exponential moving average of grads\n normalized by v_t (set grad_averaging = True to use this):\n m_t <- beta_1 * m_{t-1} +\n [(1 - beta_1) * (g_t / (sqrt(v_t) + epsilon))]\n 3. weight decay adds a w_d term after grads are rescaled by\n 1/sqrt(v_t) (set weight_decay > 0 to use this0:\n m_t <- beta_1 * m_{t-1} +\n [(g_t / (sqrt(v_t) + epsilon)) + (w_d * w_{t-1})]\n 4. weight decay + exponential moving average from Adam:\n m_t <- beta_1 * m_{t-1} +\n [(1 - beta_1) * ((g_t / (sqrt(v_t + epsilon)) +\n (w_d * w_{t-1}))]\n Weight update:\n w_t <- w_{t-1} - lr_t * m_t\n Example of usage:\n ```python\n opt = tfa.optimizers.NovoGrad(\n lr=1e-3,\n beta_1=0.9,\n beta_2=0.999,\n weight_decay=0.001,\n grad_averaging=False\n )\n ```\n ' @typechecked def __init__(self, learning_rate: Union[(FloatTensorLike, Callable)]=0.001, beta_1: FloatTensorLike=0.9, beta_2: FloatTensorLike=0.999, epsilon: FloatTensorLike=1e-07, weight_decay: FloatTensorLike=0.0, grad_averaging: bool=False, amsgrad: bool=False, name: str='NovoGrad', **kwargs): 'Construct a new NovoGrad optimizer.\n Args:\n learning_rate: A `Tensor` or a floating point value. or a schedule\n that is a `tf.keras.optimizers.schedules.LearningRateSchedule`\n The learning rate.\n beta_1: A float value or a constant float tensor.\n The exponential decay rate for the 1st moment estimates.\n beta_2: A float value or a constant float tensor.\n The exponential decay rate for the 2nd moment estimates.\n epsilon: A small constant for numerical stability.\n weight_decay: A floating point value. Weight decay for each param.\n grad_averaging: determines whether to use Adam style exponential\n moving averaging for the first order moments.\n **kwargs: keyword arguments. Allowed to be {`clipnorm`,\n `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients\n by norm; `clipvalue` is clip gradients by value, `decay` is\n included for backward compatibility to allow time inverse\n decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n ' super().__init__(name, **kwargs) if (weight_decay < 0.0): raise ValueError('Weight decay rate cannot be negative') self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._set_hyper('beta_1', beta_1) self._set_hyper('beta_2', beta_2) self._set_hyper('weight_decay', weight_decay) self._set_hyper('grad_averaging', grad_averaging) self.amsgrad = amsgrad self.epsilon = (epsilon or tf.keras.backend.epsilon()) def _create_slots(self, var_list): for var in var_list: self.add_slot(var=var, slot_name='m', initializer='zeros') for var in var_list: self.add_slot(var=var, slot_name='v', initializer=tf.zeros(shape=[], dtype=var.dtype)) if self.amsgrad: for var in var_list: self.add_slot(var, 'vhat') def _prepare_local(self, var_device, var_dtype, apply_state): super()._prepare_local(var_device, var_dtype, apply_state) beta_1_t = tf.identity(self._get_hyper('beta_1', var_dtype)) beta_2_t = tf.identity(self._get_hyper('beta_2', var_dtype)) apply_state[(var_device, var_dtype)].update(dict(epsilon=tf.convert_to_tensor(self.epsilon, var_dtype), beta_1_t=beta_1_t, beta_2_t=beta_2_t, one_minus_beta_2_t=(1 - beta_2_t), one_minus_beta_1_t=(1 - beta_1_t))) def set_weights(self, weights): params = self.weights num_vars = int(((len(params) - 1) / 2)) if (len(weights) == ((3 * num_vars) + 1)): weights = weights[:len(params)] super().set_weights(weights) def _resource_apply_dense(self, grad, var, apply_state=None): (var_device, var_dtype) = (var.device, var.dtype.base_dtype) coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) weight_decay = self._get_hyper('weight_decay') grad_averaging = self._get_hyper('grad_averaging') v = self.get_slot(var, 'v') g_2 = tf.reduce_sum(tf.square(tf.cast(grad, tf.float32))) v_t = tf.cond(tf.equal(self.iterations, 0), (lambda : g_2), (lambda : ((v * coefficients['beta_2_t']) + (g_2 * coefficients['one_minus_beta_2_t'])))) v_t = v.assign(v_t, use_locking=self._use_locking) if self.amsgrad: vhat = self.get_slot(var, 'vhat') vhat_t = vhat.assign(tf.maximum(vhat, v_t), use_locking=self._use_locking) grad = (grad / (tf.sqrt(vhat_t) + self.epsilon)) else: grad = (grad / (tf.sqrt(v_t) + self.epsilon)) grad = tf.cond(tf.greater(weight_decay, 0), (lambda : (grad + (weight_decay * var))), (lambda : grad)) grad = tf.cond(tf.logical_and(grad_averaging, tf.not_equal(self.iterations, 0)), (lambda : (grad * coefficients['one_minus_beta_1_t'])), (lambda : grad)) m = self.get_slot(var, 'm') return training_ops.resource_apply_keras_momentum(var.handle, m.handle, coefficients['lr_t'], grad, coefficients['beta_1_t'], use_locking=self._use_locking, use_nesterov=False) def _resource_apply_sparse(self, grad, var, indices, apply_state=None): (var_device, var_dtype) = (var.device, var.dtype.base_dtype) coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) weight_decay = self._get_hyper('weight_decay') grad_averaging = self._get_hyper('grad_averaging') v = self.get_slot(var, 'v') g_2 = tf.reduce_sum(tf.square(tf.cast(grad, tf.float32))) v_t = tf.cond(tf.equal(self.iterations, 0), (lambda : g_2), (lambda : ((v * coefficients['beta_2_t']) + (g_2 * coefficients['one_minus_beta_2_t'])))) v_t = v.assign(v_t, use_locking=self._use_locking) if self.amsgrad: vhat = self.get_slot(var, 'vhat') vhat_t = vhat.assign(tf.maximum(vhat, v_t), use_locking=self._use_locking) grad = (grad / (tf.sqrt(vhat_t) + self.epsilon)) else: grad = (grad / (tf.sqrt(v_t) + self.epsilon)) grad = tf.cond(tf.greater(weight_decay, 0), (lambda : (grad + (weight_decay * var))), (lambda : grad)) grad = tf.cond(tf.logical_and(grad_averaging, tf.not_equal(self.iterations, 0)), (lambda : (grad * coefficients['one_minus_beta_1_t'])), (lambda : grad)) m = self.get_slot(var, 'm') return training_ops.resource_sparse_apply_keras_momentum(var.handle, m.handle, coefficients['lr_t'], tf.gather(grad, indices), indices, coefficients['beta_1_t'], use_locking=self._use_locking, use_nesterov=False) def get_config(self): config = super().get_config() config.update({'learning_rate': self._serialize_hyperparameter('learning_rate'), 'beta_1': self._serialize_hyperparameter('beta_1'), 'beta_2': self._serialize_hyperparameter('beta_2'), 'epsilon': self.epsilon, 'weight_decay': self._serialize_hyperparameter('weight_decay'), 'grad_averaging': self._serialize_hyperparameter('grad_averaging')}) return config
def Ranger(sync_period=6, slow_step_size=0.5, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, weight_decay=0.0, amsgrad=False, sma_threshold=5.0, total_steps=0, warmup_proportion=0.1, min_lr=0.0, name='Ranger'): '\n function returning a tf.keras.optimizers.Optimizer object\n returned optimizer is a Ranger optimizer\n Ranger is an optimizer combining RAdam (https://arxiv.org/abs/1908.03265) and Lookahead (https://arxiv.org/abs/1907.0861)\n returned optimizer can be fed into the model.compile method of a tf.keras model as an optimizer\n ...\n Attributes\n ----------\n learning_rate : float\n step size to take for RAdam optimizer (depending on gradient)\n beta_1 : float\n parameter that specifies the exponentially moving average length for momentum (0<=beta_1<=1)\n beta_2 : float\n parameter that specifies the exponentially moving average length for variance (0<=beta_2<=1)\n epsilon : float\n small number to cause stability for variance division\n weight_decay : float\n number with which the weights of the model are multiplied each iteration (0<=weight_decay<=1)\n amsgrad : bool\n parameter that specifies whether to use amsgrad version of Adam (https://arxiv.org/abs/1904.03590)\n total_steps : int\n total number of training steps\n warmup_proportion : float\n the proportion of updated over which the learning rate is increased from min learning rate to learning rate (0<=warmup_proportion<=1)\n min_lr : float\n learning rate at which the optimizer starts\n k : int\n parameter that specifies after how many steps the lookahead step backwards should be applied\n alpha : float\n parameter that specifies how much in the direction of the fast weights should be moved (0<=alpha<=1)\n ' inner = RectifiedAdam(learning_rate, beta_1, beta_2, epsilon, weight_decay, amsgrad, sma_threshold, total_steps, warmup_proportion, min_lr, name) optim = Lookahead(inner, sync_period, slow_step_size, name) return optim
class RectifiedAdam(tf.keras.optimizers.Optimizer): 'Variant of the Adam optimizer whose adaptive learning rate is rectified\n so as to have a consistent variance.\n It implements the Rectified Adam (a.k.a. RAdam) proposed by\n Liyuan Liu et al. in [On The Variance Of The Adaptive Learning Rate\n And Beyond](https://arxiv.org/pdf/1908.03265v1.pdf).\n Example of usage:\n ```python\n opt = tfa.optimizers.RectifiedAdam(lr=1e-3)\n ```\n Note: `amsgrad` is not described in the original paper. Use it with\n caution.\n RAdam is not a placement of the heuristic warmup, the settings should be\n kept if warmup has already been employed and tuned in the baseline method.\n You can enable warmup by setting `total_steps` and `warmup_proportion`:\n ```python\n opt = tfa.optimizers.RectifiedAdam(\n lr=1e-3,\n total_steps=10000,\n warmup_proportion=0.1,\n min_lr=1e-5,\n )\n ```\n In the above example, the learning rate will increase linearly\n from 0 to `lr` in 1000 steps, then decrease linearly from `lr` to `min_lr`\n in 9000 steps.\n Lookahead, proposed by Michael R. Zhang et.al in the paper\n [Lookahead Optimizer: k steps forward, 1 step back]\n (https://arxiv.org/abs/1907.08610v1), can be integrated with RAdam,\n which is announced by Less Wright and the new combined optimizer can also\n be called "Ranger". The mechanism can be enabled by using the lookahead\n wrapper. For example:\n ```python\n radam = tfa.optimizers.RectifiedAdam()\n ranger = tfa.optimizers.Lookahead(radam, sync_period=6, slow_step_size=0.5)\n ```\n ' @typechecked def __init__(self, learning_rate: Union[(FloatTensorLike, Callable)]=0.001, beta_1: FloatTensorLike=0.9, beta_2: FloatTensorLike=0.999, epsilon: FloatTensorLike=1e-07, weight_decay: FloatTensorLike=0.0, amsgrad: bool=False, sma_threshold: FloatTensorLike=5.0, total_steps: int=0, warmup_proportion: FloatTensorLike=0.1, min_lr: FloatTensorLike=0.0, name: str='RectifiedAdam', **kwargs): 'Construct a new RAdam optimizer.\n Args:\n learning_rate: A `Tensor` or a floating point value. or a schedule\n that is a `tf.keras.optimizers.schedules.LearningRateSchedule`\n The learning rate.\n beta_1: A float value or a constant float tensor.\n The exponential decay rate for the 1st moment estimates.\n beta_2: A float value or a constant float tensor.\n The exponential decay rate for the 2nd moment estimates.\n epsilon: A small constant for numerical stability.\n weight_decay: A floating point value. Weight decay for each param.\n amsgrad: boolean. Whether to apply AMSGrad variant of this\n algorithm from the paper "On the Convergence of Adam and\n beyond".\n sma_threshold. A float value.\n The threshold for simple mean average.\n total_steps: An integer. Total number of training steps.\n Enable warmup by setting a positive value.\n warmup_proportion: A floating point value.\n The proportion of increasing steps.\n min_lr: A floating point value. Minimum learning rate after warmup.\n name: Optional name for the operations created when applying\n gradients. Defaults to "RectifiedAdam".\n **kwargs: keyword arguments. Allowed to be {`clipnorm`,\n `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients\n by norm; `clipvalue` is clip gradients by value, `decay` is\n included for backward compatibility to allow time inverse\n decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n ' super().__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('beta_1', beta_1) self._set_hyper('beta_2', beta_2) self._set_hyper('decay', self._initial_decay) self._set_hyper('weight_decay', weight_decay) self._set_hyper('sma_threshold', sma_threshold) self._set_hyper('total_steps', float(total_steps)) self._set_hyper('warmup_proportion', warmup_proportion) self._set_hyper('min_lr', min_lr) self.epsilon = (epsilon or tf.keras.backend.epsilon()) self.amsgrad = amsgrad self._initial_weight_decay = weight_decay self._initial_total_steps = total_steps def _create_slots(self, var_list): for var in var_list: self.add_slot(var, 'm') for var in var_list: self.add_slot(var, 'v') if self.amsgrad: for var in var_list: self.add_slot(var, 'vhat') def set_weights(self, weights): params = self.weights num_vars = int(((len(params) - 1) / 2)) if (len(weights) == ((3 * num_vars) + 1)): weights = weights[:len(params)] super().set_weights(weights) def _resource_apply_dense(self, grad, var): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) m = self.get_slot(var, 'm') v = self.get_slot(var, 'v') beta_1_t = self._get_hyper('beta_1', var_dtype) beta_2_t = self._get_hyper('beta_2', var_dtype) epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype) local_step = tf.cast((self.iterations + 1), var_dtype) beta_1_power = tf.pow(beta_1_t, local_step) beta_2_power = tf.pow(beta_2_t, local_step) if (self._initial_total_steps > 0): total_steps = self._get_hyper('total_steps', var_dtype) warmup_steps = (total_steps * self._get_hyper('warmup_proportion', var_dtype)) min_lr = self._get_hyper('min_lr', var_dtype) decay_steps = tf.maximum((total_steps - warmup_steps), 1) decay_rate = ((min_lr - lr_t) / decay_steps) lr_t = tf.where((local_step <= warmup_steps), (lr_t * (local_step / warmup_steps)), (lr_t + (decay_rate * tf.minimum((local_step - warmup_steps), decay_steps)))) sma_inf = ((2.0 / (1.0 - beta_2_t)) - 1.0) sma_t = (sma_inf - (((2.0 * local_step) * beta_2_power) / (1.0 - beta_2_power))) m_t = m.assign(((beta_1_t * m) + ((1.0 - beta_1_t) * grad)), use_locking=self._use_locking) m_corr_t = (m_t / (1.0 - beta_1_power)) v_t = v.assign(((beta_2_t * v) + ((1.0 - beta_2_t) * tf.square(grad))), use_locking=self._use_locking) if self.amsgrad: vhat = self.get_slot(var, 'vhat') vhat_t = vhat.assign(tf.maximum(vhat, v_t), use_locking=self._use_locking) v_corr_t = tf.sqrt((vhat_t / (1.0 - beta_2_power))) else: vhat_t = None v_corr_t = tf.sqrt((v_t / (1.0 - beta_2_power))) r_t = tf.sqrt(((((((sma_t - 4.0) / (sma_inf - 4.0)) * (sma_t - 2.0)) / (sma_inf - 2.0)) * sma_inf) / sma_t)) sma_threshold = self._get_hyper('sma_threshold', var_dtype) var_t = tf.where((sma_t >= sma_threshold), ((r_t * m_corr_t) / (v_corr_t + epsilon_t)), m_corr_t) if (self._initial_weight_decay > 0.0): var_t += (self._get_hyper('weight_decay', var_dtype) * var) var_update = var.assign_sub((lr_t * var_t), use_locking=self._use_locking) updates = [var_update, m_t, v_t] if self.amsgrad: updates.append(vhat_t) return tf.group(*updates) def _resource_apply_sparse(self, grad, var, indices): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) beta_1_t = self._get_hyper('beta_1', var_dtype) beta_2_t = self._get_hyper('beta_2', var_dtype) epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype) local_step = tf.cast((self.iterations + 1), var_dtype) beta_1_power = tf.pow(beta_1_t, local_step) beta_2_power = tf.pow(beta_2_t, local_step) if (self._initial_total_steps > 0): total_steps = self._get_hyper('total_steps', var_dtype) warmup_steps = (total_steps * self._get_hyper('warmup_proportion', var_dtype)) min_lr = self._get_hyper('min_lr', var_dtype) decay_steps = tf.maximum((total_steps - warmup_steps), 1) decay_rate = ((min_lr - lr_t) / decay_steps) lr_t = tf.where((local_step <= warmup_steps), (lr_t * (local_step / warmup_steps)), (lr_t + (decay_rate * tf.minimum((local_step - warmup_steps), decay_steps)))) sma_inf = ((2.0 / (1.0 - beta_2_t)) - 1.0) sma_t = (sma_inf - (((2.0 * local_step) * beta_2_power) / (1.0 - beta_2_power))) m = self.get_slot(var, 'm') m_scaled_g_values = (grad * (1 - beta_1_t)) m_t = m.assign((m * beta_1_t), use_locking=self._use_locking) with tf.control_dependencies([m_t]): m_t = self._resource_scatter_add(m, indices, m_scaled_g_values) m_corr_t = (m_t / (1.0 - beta_1_power)) v = self.get_slot(var, 'v') v_scaled_g_values = ((grad * grad) * (1 - beta_2_t)) v_t = v.assign((v * beta_2_t), use_locking=self._use_locking) with tf.control_dependencies([v_t]): v_t = self._resource_scatter_add(v, indices, v_scaled_g_values) if self.amsgrad: vhat = self.get_slot(var, 'vhat') vhat_t = vhat.assign(tf.maximum(vhat, v_t), use_locking=self._use_locking) v_corr_t = tf.sqrt((vhat_t / (1.0 - beta_2_power))) else: vhat_t = None v_corr_t = tf.sqrt((v_t / (1.0 - beta_2_power))) r_t = tf.sqrt(((((((sma_t - 4.0) / (sma_inf - 4.0)) * (sma_t - 2.0)) / (sma_inf - 2.0)) * sma_inf) / sma_t)) sma_threshold = self._get_hyper('sma_threshold', var_dtype) var_t = tf.where((sma_t >= sma_threshold), ((r_t * m_corr_t) / (v_corr_t + epsilon_t)), m_corr_t) if (self._initial_weight_decay > 0.0): var_t += (self._get_hyper('weight_decay', var_dtype) * var) with tf.control_dependencies([var_t]): var_update = self._resource_scatter_add(var, indices, tf.gather(((- lr_t) * var_t), indices)) updates = [var_update, m_t, v_t] if self.amsgrad: updates.append(vhat_t) return tf.group(*updates) def get_config(self): config = super().get_config() config.update({'learning_rate': self._serialize_hyperparameter('learning_rate'), 'beta_1': self._serialize_hyperparameter('beta_1'), 'beta_2': self._serialize_hyperparameter('beta_2'), 'decay': self._serialize_hyperparameter('decay'), 'weight_decay': self._serialize_hyperparameter('weight_decay'), 'sma_threshold': self._serialize_hyperparameter('sma_threshold'), 'epsilon': self.epsilon, 'amsgrad': self.amsgrad, 'total_steps': self._serialize_hyperparameter('total_steps'), 'warmup_proportion': self._serialize_hyperparameter('warmup_proportion'), 'min_lr': self._serialize_hyperparameter('min_lr')}) return config
class rec_optimizer(Optimizer): def __init__(self, layers=2, nodes=20): pass
def _solve(a, b, c): 'Return solution of a quadratic minimization.\n The optimization equation is:\n f(a, b, c) = argmin_w{1/2 * a * w^2 + b * w + c * |w|}\n we get optimal solution w*:\n w* = -(b - sign(b)*c)/a if |b| > c else w* = 0\n REQUIRES: Dimensionality of a and b must be same\n Args:\n a: A Tensor\n b: A Tensor\n c: A Tensor with one element.\n Returns:\n A Tensor w, which is solution for the equation\n ' w = (((c * tf.sign(b)) - b) / a) w = (tf.cast((tf.abs(b) > c), dtype=b.dtype) * w) return w
class Yogi(tf.keras.optimizers.Optimizer): 'Optimizer that implements the Yogi algorithm in Keras.\n See Algorithm 2 of\n https://papers.nips.cc/paper/8186-adaptive-methods-for-nonconvex-optimization.pdf.\n ' @typechecked def __init__(self, learning_rate: Union[(FloatTensorLike, Callable)]=0.01, beta1: FloatTensorLike=0.9, beta2: FloatTensorLike=0.999, epsilon: FloatTensorLike=0.001, l1_regularization_strength: FloatTensorLike=0.0, l2_regularization_strength: FloatTensorLike=0.0, initial_accumulator_value: FloatTensorLike=1e-06, activation: str='sign', name: str='Yogi', **kwargs): 'Construct a new Yogi optimizer.\n Args:\n learning_rate: A Tensor or a floating point value.\n The learning rate.\n beta1: A float value or a constant float tensor.\n The exponential decay rate for the 1st moment estimates.\n beta2: A float value or a constant float tensor.\n The exponential decay rate for the 2nd moment estimates.\n epsilon: A constant trading off adaptivity and noise.\n l1_regularization_strength: A float value, must be greater than or\n equal to zero.\n l2_regularization_strength: A float value, must be greater than or\n equal to zero.\n initial_accumulator_value: The starting value for accumulators.\n Only positive values are allowed.\n activation: Use hard sign or soft tanh to determin sign.\n name: Optional name for the operations created when applying\n gradients. Defaults to "Yogi".\n **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`,\n `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue`\n is clip gradients by value, `decay` is included for backward\n compatibility to allow time inverse decay of learning rate. `lr`\n is included for backward compatibility, recommended to use\n `learning_rate` instead.\n ' super().__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._set_hyper('beta_1', beta1) self._set_hyper('beta_2', beta2) self._set_hyper('epsilon', epsilon) self._set_hyper('l1_regularization_strength', l1_regularization_strength) self._set_hyper('l2_regularization_strength', l2_regularization_strength) self._beta1 = beta1 self._activation = activation self._initial_accumulator_value = initial_accumulator_value self._l1_regularization_strength = l1_regularization_strength self._l2_regularization_strength = l2_regularization_strength def _create_slots(self, var_list): 'See `tf.train.Optimizer._create_slots()`.' for var in var_list: init = tf.constant_initializer(self._initial_accumulator_value) self.add_slot(var, 'v', init) if (self._beta1 > 0.0): self.add_slot(var, 'm') def _resource_apply_dense(self, grad, var): 'See `tf.train.Optimizer._apply_dense()`.' var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) beta1_t = self._get_hyper('beta_1', var_dtype) beta2_t = self._get_hyper('beta_2', var_dtype) epsilon_t = self._get_hyper('epsilon', var_dtype) l1_t = self._get_hyper('l1_regularization_strength', var_dtype) l2_t = self._get_hyper('l2_regularization_strength', var_dtype) local_step = tf.cast((self.iterations + 1), var_dtype) beta1_power = tf.pow(beta1_t, local_step) beta2_power = tf.pow(beta2_t, local_step) lr = ((lr_t * tf.sqrt((1 - beta2_power))) / (1 - beta1_power)) update_vs = [] if (self._beta1 == 0.0): v = self.get_slot(var, 'v') grad2 = (grad * grad) if (self._activation == 'sign'): sign = tf.sign((grad2 - v)) elif (self._activation == 'tanh'): sign = tf.tanh((10 * (grad2 - v))) else: raise NotImplementedError('Activation function can be sign or tanh') v_t = v.assign_add((((1 - beta2_t) * sign) * grad2), use_locking=self._use_locking) v_sqrt = tf.sqrt(v_t) per_coord_lr = (lr / (v_sqrt + epsilon_t)) new_var = (var - (per_coord_lr * grad)) if (self._l1_regularization_strength > 0): new_var = _solve((1 + (l2_t * per_coord_lr)), (- new_var), (l1_t * per_coord_lr)) elif (self._l2_regularization_strength > 0): new_var = (new_var / (1 + (l2_t * per_coord_lr))) var_update = var.assign(new_var, use_locking=self._use_locking) update_vs.append(var_update) update_vs.append(v_t) else: m = self.get_slot(var, 'm') m_t = m.assign(((m * beta1_t) + (grad * (1 - beta1_t))), use_locking=self._use_locking) v = self.get_slot(var, 'v') grad2 = (grad * grad) if (self._activation == 'sign'): sign = tf.sign((grad2 - v)) elif (self._activation == 'tanh'): sign = tf.tanh((10 * (grad2 - v))) else: raise NotImplementedError('Activation function can be sign or tanh') v_t = v.assign_add((((1 - beta2_t) * sign) * grad2), use_locking=self._use_locking) v_sqrt = tf.sqrt(v_t) per_coord_lr = (lr / (v_sqrt + epsilon_t)) new_var = (var - (per_coord_lr * m_t)) if (self._l1_regularization_strength > 0): new_var = _solve((1 + (l2_t * per_coord_lr)), (- new_var), (l1_t * per_coord_lr)) elif (self._l2_regularization_strength > 0): new_var = (new_var / (1 + (l2_t * per_coord_lr))) var_update = var.assign(new_var, use_locking=self._use_locking) update_vs.append(var_update) update_vs.append(m_t) update_vs.append(v_t) return tf.group(*update_vs) def _resource_apply_sparse(self, grad, var, indices): 'Applies sparse gradients to a variable.\n Args:\n grad: A tensor for the `values` of `tf.IndexedSlices`.\n var: A `tf.Variable` object.\n indices: A tensor for the `indices` of `tf.IndexedSlices`.\n Returns:\n An op which updates `var` with `grad` and `indices`.\n ' var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) beta1_t = self._get_hyper('beta_1', var_dtype) beta2_t = self._get_hyper('beta_2', var_dtype) epsilon_t = self._get_hyper('epsilon', var_dtype) l1_t = self._get_hyper('l1_regularization_strength', var_dtype) l2_t = self._get_hyper('l2_regularization_strength', var_dtype) local_step = tf.cast((self.iterations + 1), var_dtype) beta1_power = tf.pow(beta1_t, local_step) beta2_power = tf.pow(beta2_t, local_step) lr = ((lr_t * tf.sqrt((1 - beta2_power))) / (1 - beta1_power)) update_vs = [] if (self._beta1 == 0.0): v = self.get_slot(var, 'v') grad2 = (grad * grad) v_slice = tf.gather(v, indices) if (self._activation == 'sign'): sign = tf.sign((grad2 - v_slice)) elif (self._activation == 'tanh'): sign = tf.tanh((10 * (grad2 - v_slice))) else: raise NotImplementedError('Activation function can be sign or tanh') v_scaled_g_values = (v_slice + (((1 - beta2_t) * sign) * grad2)) v_t = self._resource_scatter_update(v, indices, v_scaled_g_values) v_sqrt = tf.sqrt(v_scaled_g_values) per_coord_lr = (lr / (v_sqrt + epsilon_t)) var_slice = tf.gather(var, indices) new_var = (var_slice - (per_coord_lr * grad)) if (self._l1_regularization_strength > 0): new_var = _solve((1 + (l2_t * per_coord_lr)), (- new_var), (l1_t * per_coord_lr)) elif (self._l2_regularization_strength > 0): new_var = (new_var / (1 + (l2_t * per_coord_lr))) var_update = self._resource_scatter_update(var, indices, new_var) update_vs.append(var_update) update_vs.append(v_t) else: m = self.get_slot(var, 'm') m_scaled_g_values = (grad * (1 - beta1_t)) m_t = m.assign((m * beta1_t), use_locking=self._use_locking) with tf.control_dependencies([m_t]): m_slice = (tf.gather(m, indices) + m_scaled_g_values) m_t = self._resource_scatter_update(m, indices, m_slice) v = self.get_slot(var, 'v') grad2 = (grad * grad) v_slice = tf.gather(v, indices) if (self._activation == 'sign'): sign = tf.sign((grad2 - tf.gather(v, indices))) elif (self._activation == 'tanh'): sign = tf.tanh((10 * (grad2 - tf.gather(v, indices)))) else: raise NotImplementedError('Activation function can be sign or tanh') v_scaled_g_values = (v_slice + (((1 - beta2_t) * sign) * grad2)) v_t = self._resource_scatter_update(v, indices, v_scaled_g_values) v_sqrt = tf.sqrt(v_scaled_g_values) per_coord_lr = (lr / (v_sqrt + epsilon_t)) var_slice = tf.gather(var, indices) new_var = (var_slice - (per_coord_lr * m_slice)) if (self._l1_regularization_strength > 0): new_var = _solve((1 + (l2_t * per_coord_lr)), (- new_var), (l1_t * per_coord_lr)) elif (self._l2_regularization_strength > 0): new_var = (new_var / (1 + (l2_t * per_coord_lr))) var_update = self._resource_scatter_update(var, indices, new_var) update_vs.append(var_update) update_vs.append(m_t) update_vs.append(v_t) return tf.group(*update_vs) def get_config(self): config = super().get_config() config.update({'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._serialize_hyperparameter('decay'), 'beta1': self._serialize_hyperparameter('beta_1'), 'beta2': self._serialize_hyperparameter('beta_2'), 'epsilon': self._serialize_hyperparameter('epsilon'), 'l1_t': self._serialize_hyperparameter('l1_regularization_strength'), 'l2_t': self._serialize_hyperparameter('l2_regularization_strength'), 'activation': self._activation, 'initial_accumulator_value': self._initial_accumulator_value}) return config
def attention_simple(inputs, timesteps): input_dim = int(inputs.shape[(- 1)]) a = Permute((2, 1), name='transpose')(inputs) a = Dense(timesteps, activation='softmax', name='attention_probs')(a) a_probs = Permute((2, 1), name='attention_vec')(a) output_attention_mul = Multiply(name='focused_attention')([inputs, a_probs]) output_flat = Lambda((lambda x: K.sum(x, axis=1)), name='temporal_average')(output_attention_mul) return (output_flat, a_probs)
def dense_model(timesteps, n_class, n_features, classifier_architecture, dropout): inputs = Input((timesteps, n_features)) x = Dense(128, activation=Mish())(inputs) x = LayerNormalization()(x) (x, a) = attention_simple(x, timesteps) for (d, dr) in zip(classifier_architecture, dropout): x = Dropout(dr)(x) x = Dense(d, activation=Mish())(x) x = LayerNormalization()(x) outputs = Dense(n_class, activation='softmax')(x) model = Model(inputs, outputs) return model
class LogAudioCallback(Callback): 'Log audio samples to Weights & Biases.' model: pl.LightningModule stored_forward: MethodType def __init__(self, on_train: bool, on_val: bool, on_test: bool, save_audio_sr: int=48000, n_batches: int=1, log_on_epoch_end: bool=False, max_audio_samples: int=8): self.on_train = on_train self.on_val = on_val self.on_test = on_test self.save_audio_sr = save_audio_sr self.n_batches = n_batches self.saved_targets: dict[(str, list[Any])] = dict(train=[], val=[], test=[]) self.saved_reconstructions: dict[(str, list[Any])] = dict(train=[], val=[], test=[]) self.log_on_epoch_end = log_on_epoch_end self.max_audio_samples = max_audio_samples def setup(self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: str) -> None: self.model = pl_module def _wrap_forward(self, split: str) -> None: self.stored_forward = self.model.forward def wrapped_forward(self, *args, callback=None, split=None, **kwargs): output = callback.stored_forward(*args, **kwargs) callback._save_batch(output, split, 'reconstruction') return output wrapped_forward = partial(MethodType(wrapped_forward, self.model), callback=self, split=split) self.model.forward = wrapped_forward def _unwrap_forward(self) -> None: self.model.forward = self.stored_forward def on_train_batch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule, batch: Any, batch_idx: int) -> None: if (self.on_train and (batch_idx < self.n_batches)): self._wrap_forward('train') self._save_batch(batch[0], 'train', 'target') def on_train_batch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule, outputs: Any, batch: Any, batch_idx: int) -> None: if (self.on_train and (batch_idx < self.n_batches)): self._unwrap_forward() elif (self.on_train and (batch_idx == self.n_batches) and (not self.log_on_epoch_end)): self._log_audio('train') self._clear_saved_batches('train') def on_train_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: if (self.on_train and self.log_on_epoch_end): self._log_audio('train') self._clear_saved_batches('train') def on_validation_batch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule, batch: Any, batch_idx: int, dataloader_idx: int=0) -> None: if (self.on_val and (batch_idx < self.n_batches)): self._wrap_forward('val') self._save_batch(batch[0], 'val', 'target') def on_validation_batch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int=0) -> None: if (self.on_val and (batch_idx < self.n_batches)): self._unwrap_forward() elif (self.on_val and (batch_idx == self.n_batches) and (not self.log_on_epoch_end)): self._log_audio('val') self._clear_saved_batches('val') def on_validation_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: if (self.on_val and self.log_on_epoch_end): self._log_audio('val') self._clear_saved_batches('val') def on_test_batch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule, batch: Any, batch_idx: int, dataloader_idx: int) -> None: if (self.on_test and (batch_idx < self.n_batches)): self._wrap_forward('test') self._save_batch(batch[0], 'test', 'target') def on_test_batch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int) -> None: if (self.on_test and (batch_idx < self.n_batches)): self._unwrap_forward() elif (self.on_test and (batch_idx == self.n_batches) and (not self.log_on_epoch_end)): self._log_audio('test') self._clear_saved_batches('test') def on_test_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: if (self.on_test and self.log_on_epoch_end): self._log_audio('test') self._clear_saved_batches('test') def _save_batch(self, batch: Any, split: str, type: Literal[('target', 'reconstruction')]) -> None: batch = batch.detach().cpu() if (type == 'target'): self.saved_targets[split].append(batch) elif (type == 'reconstruction'): self.saved_reconstructions[split].append(batch) else: raise ValueError(f'Unknown type {type}') def _clear_saved_batches(self, split: str) -> None: self.saved_targets[split] = [] self.saved_reconstructions[split] = [] def _log_audio(self, split: str) -> None: if ((len(self.saved_targets[split]) == 0) or (len(self.saved_reconstructions[split]) == 0)): return targets = torch.cat(self.saved_targets[split], dim=0) reconstructions = torch.cat(self.saved_reconstructions[split], dim=0) if (self.max_audio_samples is not None): targets = targets[:self.max_audio_samples] reconstructions = reconstructions[:self.max_audio_samples] signals = reduce((lambda x, y: (x + y)), zip(targets, reconstructions)) audio_signal = torch.hstack(signals).cpu() if isinstance(self.model.logger, WandbLogger): audio_signal = audio_signal.squeeze().numpy() audio = Audio(audio_signal, caption=f'{split}/audio', sample_rate=self.save_audio_sr) if (self.model.logger is not None): self.model.logger.experiment.log({f'{split}/audio': audio}) elif isinstance(self.model.logger, TensorBoardLogger): outdir = Path(self.model.logger.log_dir).joinpath('audio') outdir.mkdir(parents=True, exist_ok=True) torchaudio.save(str(outdir.joinpath(f'{split}.wav')), audio_signal, self.save_audio_sr)
class CleanWandbCacheCallback(pl.Callback): def __init__(self, every_n_epochs: int=1, max_size_in_gb: float=1.0): self.every_n_epochs = every_n_epochs self.gb_str = f'{max_size_in_gb}GB' def on_train_epoch_end(self, trainer: 'pl.Trainer', pl_module: 'pl.LightningModule') -> None: if ((trainer.current_epoch % self.every_n_epochs) == 0): subprocess.Popen(['wandb', 'artifact', 'cache', 'cleanup', self.gb_str])
class SaveConfigCallbackWanb(SaveConfigCallback): "\n Custom callback to move the config file saved by LightningCLI to the\n experiment directory created by WandbLogger. This has a few benefits:\n 1. The config file is saved in the same directory as the other files created\n by wandb, so it's easier to find.\n 2. The config file is uploaded to wandb and can be viewed in the UI.\n 3. Subsequent runs won't be blocked by the config file already existing.\n " def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) def setup(self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: str) -> None: super().setup(trainer, pl_module, stage) if isinstance(trainer.logger, WandbLogger): config = Path(trainer.log_dir).joinpath('config.yaml') assert config.exists() experiment_dir = Path(trainer.logger.experiment.dir) if (not experiment_dir.exists()): experiment_dir.mkdir(parents=True) config.rename(experiment_dir.joinpath('model-config.yaml'))