code
stringlengths
17
6.64M
def _resnet(arch, block, layers, pretrained, progress, **kwargs): model = ResNet(block, layers, **kwargs) if pretrained: state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) model.load_state_dict(state_dict) return model
def resnet18(pretrained=False, progress=True, **kwargs): 'Constructs a ResNet-18 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n ' return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
def resnet34(pretrained=False, progress=True, **kwargs): 'Constructs a ResNet-34 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n ' return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet50(pretrained=False, progress=True, **kwargs): 'Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n ' return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet101(pretrained=False, progress=True, **kwargs): 'Constructs a ResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n ' return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
def resnet152(pretrained=False, progress=True, **kwargs): 'Constructs a ResNet-152 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n ' return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs): 'Constructs a ResNeXt-50 32x4d model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n ' kwargs['groups'] = 32 kwargs['width_per_group'] = 4 return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs): 'Constructs a ResNeXt-101 32x8d model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n ' kwargs['groups'] = 32 kwargs['width_per_group'] = 8 return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs): 'Constructs a Wide ResNet-50-2 model.\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n ' kwargs['width_per_group'] = (64 * 2) return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs): 'Constructs a Wide ResNet-101-2 model.\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n ' kwargs['width_per_group'] = (64 * 2) return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
class VGGLike(nn.Module): def __init__(self, n_classes): super(VGGLike, self).__init__() self.features = nn.Sequential(nn.Conv2d(1, 32, (3, 3)), nn.BatchNorm2d(32), nn.ReLU(inplace=True), nn.Conv2d(32, 32, (3, 3)), nn.BatchNorm2d(32), nn.ReLU(inplace=True), nn.Conv2d(32, 32, (3, 3)), nn.BatchNorm2d(32), nn.ReLU(inplace=True), nn.MaxPool2d(2, 2), nn.Conv2d(32, 64, (3, 3)), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.Conv2d(64, 64, (3, 3)), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.MaxPool2d(2, 2), nn.Conv2d(64, 128, (3, 3)), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 128, (3, 3)), nn.BatchNorm2d(128), nn.ReLU(inplace=True)) self.avg_pool = nn.AdaptiveAvgPool2d((1, 1)) self.max_pool = nn.AdaptiveMaxPool2d((1, 1)) self.fc = nn.Linear(256, n_classes) def forward(self, x): x = self.features(x) avg_ = self.avg_pool(x) max_ = self.max_pool(x) fc_in = torch.cat([avg_, max_], dim=1) fc_in = torch.flatten(fc_in, 1) fc_in = self.fc(fc_in) return fc_in
def parse_config(config_file: str) -> Dict: with open(config_file, 'r') as fd: cfg = yaml.load(fd, yaml.FullLoader) return cfg
def get_data_info(cfg: Dict, augment: Optional[bool]=True) -> Dict: try: print('[get_data_info]', cfg) meta_root = cfg['meta_root'] train_manifest = cfg['train_manifest'] val_manifest = cfg['val_manifest'] label_map = cfg['label_map'] train_manifest = os.path.join(meta_root, train_manifest) val_manifest = os.path.join(meta_root, val_manifest) label_map = os.path.join(meta_root, label_map) results = {'train': train_manifest, 'val': val_manifest, 'labels': label_map} test_manifest = cfg.get('test_manifest', None) if (test_manifest and (test_manifest != 'None')): test_manifest = os.path.join(meta_root, test_manifest) results['test'] = test_manifest results['bg_files'] = cfg.get('bg_files', None) print('[get_data_info]:', results) return results except KeyError as ex: print(ex) exit((- 1))
class OneCycleLR(Callback): def __init__(self, max_lr, end_percentage=0.1, scale_percentage=None, maximum_momentum=0.95, minimum_momentum=0.85, verbose=True): ' This callback implements a cyclical learning rate policy (CLR).\n This is a special case of Cyclic Learning Rates, where we have only 1 cycle.\n After the completion of 1 cycle, the learning rate will decrease rapidly to\n 100th its initial lowest value.\n # Arguments:\n max_lr: Float. Initial learning rate. This also sets the\n starting learning rate (which will be 10x smaller than\n this), and will increase to this value during the first cycle.\n end_percentage: Float. The percentage of all the epochs of training\n that will be dedicated to sharply decreasing the learning\n rate after the completion of 1 cycle. Must be between 0 and 1.\n scale_percentage: Float or None. If float, must be between 0 and 1.\n If None, it will compute the scale_percentage automatically\n based on the `end_percentage`.\n maximum_momentum: Optional. Sets the maximum momentum (initial)\n value, which gradually drops to its lowest value in half-cycle,\n then gradually increases again to stay constant at this max value.\n Can only be used with SGD Optimizer.\n minimum_momentum: Optional. Sets the minimum momentum at the end of\n the half-cycle. Can only be used with SGD Optimizer.\n verbose: Bool. Whether to print the current learning rate after every\n epoch.\n # Reference\n - [A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, weight_decay, and weight decay](https://arxiv.org/abs/1803.09820)\n - [Super-Convergence: Very Fast Training of Residual Networks Using Large Learning Rates](https://arxiv.org/abs/1708.07120)\n ' super(OneCycleLR, self).__init__() if ((end_percentage < 0.0) or (end_percentage > 1.0)): raise ValueError('`end_percentage` must be between 0 and 1') if ((scale_percentage is not None) and ((scale_percentage < 0.0) or (scale_percentage > 1.0))): raise ValueError('`scale_percentage` must be between 0 and 1') self.initial_lr = max_lr self.end_percentage = end_percentage self.scale = (float(scale_percentage) if (scale_percentage is not None) else float(end_percentage)) self.max_momentum = maximum_momentum self.min_momentum = minimum_momentum self.verbose = verbose if ((self.max_momentum is not None) and (self.min_momentum is not None)): self._update_momentum = True else: self._update_momentum = False self.clr_iterations = 0.0 self.history = {} self.epochs = None self.batch_size = None self.samples = None self.steps = None self.num_iterations = None self.mid_cycle_id = None def _reset(self): '\n Reset the callback.\n ' self.clr_iterations = 0.0 self.history = {} def compute_lr(self): '\n Compute the learning rate based on which phase of the cycle it is in.\n - If in the first half of training, the learning rate gradually increases.\n - If in the second half of training, the learning rate gradually decreases.\n - If in the final `end_percentage` portion of training, the learning rate\n is quickly reduced to near 100th of the original min learning rate.\n # Returns:\n the new learning rate\n ' if (self.clr_iterations > (2 * self.mid_cycle_id)): current_percentage = (self.clr_iterations - (2 * self.mid_cycle_id)) current_percentage /= float((self.num_iterations - (2 * self.mid_cycle_id))) new_lr = ((self.initial_lr * (1.0 + ((current_percentage * (1.0 - 100.0)) / 100.0))) * self.scale) elif (self.clr_iterations > self.mid_cycle_id): current_percentage = (1.0 - ((self.clr_iterations - self.mid_cycle_id) / self.mid_cycle_id)) new_lr = ((self.initial_lr * (1.0 + (current_percentage * ((self.scale * 100) - 1.0)))) * self.scale) else: current_percentage = (self.clr_iterations / self.mid_cycle_id) new_lr = ((self.initial_lr * (1.0 + (current_percentage * ((self.scale * 100) - 1.0)))) * self.scale) if (self.clr_iterations == self.num_iterations): self.clr_iterations = 0 return new_lr def compute_momentum(self): '\n Compute the momentum based on which phase of the cycle it is in.\n - If in the first half of training, the momentum gradually decreases.\n - If in the second half of training, the momentum gradually increases.\n - If in the final `end_percentage` portion of training, the momentum value\n is kept constant at the maximum initial value.\n # Returns:\n the new momentum value\n ' if (self.clr_iterations > (2 * self.mid_cycle_id)): new_momentum = self.max_momentum elif (self.clr_iterations > self.mid_cycle_id): current_percentage = (1.0 - ((self.clr_iterations - self.mid_cycle_id) / float(self.mid_cycle_id))) new_momentum = (self.max_momentum - (current_percentage * (self.max_momentum - self.min_momentum))) else: current_percentage = (self.clr_iterations / float(self.mid_cycle_id)) new_momentum = (self.max_momentum - (current_percentage * (self.max_momentum - self.min_momentum))) return new_momentum def on_train_begin(self, logs={}): logs = (logs or {}) self.epochs = self.params['epochs'] self.batch_size = self.params['batch_size'] self.samples = self.params['samples'] self.steps = self.params['steps'] if (self.steps is not None): self.num_iterations = (self.epochs * self.steps) else: if ((self.samples % self.batch_size) == 0): remainder = 0 else: remainder = 1 self.num_iterations = (((self.epochs + remainder) * self.samples) // self.batch_size) self.mid_cycle_id = int(((self.num_iterations * (1.0 - self.end_percentage)) / float(2))) self._reset() K.set_value(self.model.optimizer.lr, self.compute_lr()) if self._update_momentum: if (not hasattr(self.model.optimizer, 'momentum')): raise ValueError('Momentum can be updated only on SGD optimizer !') new_momentum = self.compute_momentum() K.set_value(self.model.optimizer.momentum, new_momentum) def on_batch_end(self, epoch, logs=None): logs = (logs or {}) self.clr_iterations += 1 new_lr = self.compute_lr() self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr)) K.set_value(self.model.optimizer.lr, new_lr) if self._update_momentum: if (not hasattr(self.model.optimizer, 'momentum')): raise ValueError('Momentum can be updated only on SGD optimizer !') new_momentum = self.compute_momentum() self.history.setdefault('momentum', []).append(K.get_value(self.model.optimizer.momentum)) K.set_value(self.model.optimizer.momentum, new_momentum) for (k, v) in logs.items(): self.history.setdefault(k, []).append(v) def on_epoch_end(self, epoch, logs=None): if self.verbose: if self._update_momentum: print((' - lr: %0.5f - momentum: %0.2f ' % (self.history['lr'][(- 1)], self.history['momentum'][(- 1)]))) else: print((' - lr: %0.5f ' % self.history['lr'][(- 1)]))
class LRFinder(Callback): def __init__(self, num_samples, batch_size, minimum_lr=1e-05, maximum_lr=10.0, lr_scale='exp', validation_data=None, validation_sample_rate=5, stopping_criterion_factor=4.0, loss_smoothing_beta=0.98, save_dir=None, verbose=True): "\n This class uses the Cyclic Learning Rate history to find a\n set of learning rates that can be good initializations for the\n One-Cycle training proposed by Leslie Smith in the paper referenced\n below.\n A port of the Fast.ai implementation for Keras.\n # Note\n This requires that the model be trained for exactly 1 epoch. If the model\n is trained for more epochs, then the metric calculations are only done for\n the first epoch.\n # Interpretation\n Upon visualizing the loss plot, check where the loss starts to increase\n rapidly. Choose a learning rate at somewhat prior to the corresponding\n position in the plot for faster convergence. This will be the maximum_lr lr.\n Choose the max value as this value when passing the `max_val` argument\n to OneCycleLR callback.\n Since the plot is in log-scale, you need to compute 10 ^ (-k) of the x-axis\n # Arguments:\n num_samples: Integer. Number of samples in the dataset.\n batch_size: Integer. Batch size during training.\n minimum_lr: Float. Initial learning rate (and the minimum).\n maximum_lr: Float. Final learning rate (and the maximum).\n lr_scale: Can be one of ['exp', 'linear']. Chooses the type of\n scaling for each update to the learning rate during subsequent\n batches. Choose 'exp' for large range and 'linear' for small range.\n validation_data: Requires the validation dataset as a tuple of\n (X, y) belonging to the validation set. If provided, will use the\n validation set to compute the loss metrics. Else uses the training\n batch loss. Will warn if not provided to alert the user.\n validation_sample_rate: Positive or Negative Integer. Number of batches to sample from the\n validation set per iteration of the LRFinder. Larger number of\n samples will reduce the variance but will take longer time to execute\n per batch.\n If Positive > 0, will sample from the validation dataset\n If Megative, will use the entire dataset\n stopping_criterion_factor: Integer or None. A factor which is used\n to measure large increase in the loss value during training.\n Since callbacks cannot stop training of a model, it will simply\n stop logging the additional values from the epochs after this\n stopping criterion has been met.\n If None, this check will not be performed.\n loss_smoothing_beta: Float. The smoothing factor for the moving\n average of the loss function.\n save_dir: Optional, String. If passed a directory path, the callback\n will save the running loss and learning rates to two separate numpy\n arrays inside this directory. If the directory in this path does not\n exist, they will be created.\n verbose: Whether to print the learning rate after every batch of training.\n # References:\n - [A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, weight_decay, and weight decay](https://arxiv.org/abs/1803.09820)\n " super(LRFinder, self).__init__() if (lr_scale not in ['exp', 'linear']): raise ValueError("`lr_scale` must be one of ['exp', 'linear']") if (validation_data is not None): self.validation_data = validation_data self.use_validation_set = True if ((validation_sample_rate > 0) or (validation_sample_rate < 0)): self.validation_sample_rate = validation_sample_rate else: raise ValueError('`validation_sample_rate` must be a positive or negative integer other than o') else: self.use_validation_set = False self.validation_sample_rate = 0 self.num_samples = num_samples self.batch_size = batch_size self.initial_lr = minimum_lr self.final_lr = maximum_lr self.lr_scale = lr_scale self.stopping_criterion_factor = stopping_criterion_factor self.loss_smoothing_beta = loss_smoothing_beta self.save_dir = save_dir self.verbose = verbose self.num_batches_ = (num_samples // batch_size) self.current_lr_ = minimum_lr if (lr_scale == 'exp'): self.lr_multiplier_ = ((maximum_lr / float(minimum_lr)) ** (1.0 / float(self.num_batches_))) else: extra_batch = int(((num_samples % batch_size) != 0)) self.lr_multiplier_ = np.linspace(minimum_lr, maximum_lr, num=(self.num_batches_ + extra_batch)) if (self.validation_sample_rate < 0): self.validation_sample_rate = (self.validation_data[0].shape[0] // batch_size) self.current_batch_ = 0 self.current_epoch_ = 0 self.best_loss_ = 1000000.0 self.running_loss_ = 0.0 self.history = {} def on_train_begin(self, logs=None): self.current_epoch_ = 1 K.set_value(self.model.optimizer.lr, self.initial_lr) warnings.simplefilter('ignore') def on_epoch_begin(self, epoch, logs=None): self.current_batch_ = 0 if (self.current_epoch_ > 1): warnings.warn('\n\nLearning rate finder should be used only with a single epoch. Hereafter, the callback will not measure the losses.\n\n') def on_batch_begin(self, batch, logs=None): self.current_batch_ += 1 def on_batch_end(self, batch, logs=None): if (self.current_epoch_ > 1): return if self.use_validation_set: (X, Y) = (self.validation_data[0], self.validation_data[1]) num_samples = (self.batch_size * self.validation_sample_rate) if (num_samples > X.shape[0]): num_samples = X.shape[0] idx = np.random.choice(X.shape[0], num_samples, replace=False) x = X[idx] y = Y[idx] values = self.model.evaluate(x, y, batch_size=self.batch_size, verbose=False) loss = values[0] else: loss = logs['loss'] running_loss = ((self.loss_smoothing_beta * loss) + ((1.0 - self.loss_smoothing_beta) * loss)) running_loss = (running_loss / (1.0 - (self.loss_smoothing_beta ** self.current_batch_))) if ((self.current_batch_ > 1) and (self.stopping_criterion_factor is not None) and (running_loss > (self.stopping_criterion_factor * self.best_loss_))): if self.verbose: print((' - LRFinder: Skipping iteration since loss is %d times as large as best loss (%0.4f)' % (self.stopping_criterion_factor, self.best_loss_))) return if ((running_loss < self.best_loss_) or (self.current_batch_ == 1)): self.best_loss_ = running_loss current_lr = K.get_value(self.model.optimizer.lr) self.history.setdefault('running_loss_', []).append(running_loss) if (self.lr_scale == 'exp'): self.history.setdefault('log_lrs', []).append(np.log10(current_lr)) else: self.history.setdefault('log_lrs', []).append(current_lr) if (self.lr_scale == 'exp'): current_lr *= self.lr_multiplier_ else: current_lr = self.lr_multiplier_[(self.current_batch_ - 1)] K.set_value(self.model.optimizer.lr, current_lr) for (k, v) in logs.items(): self.history.setdefault(k, []).append(v) if self.verbose: if self.use_validation_set: print((' - LRFinder: val_loss: %1.4f - lr = %1.8f ' % (values[0], current_lr))) else: print((' - LRFinder: lr = %1.8f ' % current_lr)) def on_epoch_end(self, epoch, logs=None): if ((self.save_dir is not None) and (self.current_epoch_ <= 1)): if (not os.path.exists(self.save_dir)): os.makedirs(self.save_dir) losses_path = os.path.join(self.save_dir, 'losses.npy') lrs_path = os.path.join(self.save_dir, 'lrs.npy') np.save(losses_path, self.losses) np.save(lrs_path, self.lrs) if self.verbose: print(('\tLR Finder : Saved the losses and learning rate values in path : {%s}' % self.save_dir)) self.current_epoch_ += 1 warnings.simplefilter('default') def plot_schedule(self, clip_beginning=None, clip_endding=None): '\n Plots the schedule from the callback itself.\n # Arguments:\n clip_beginning: Integer or None. If positive integer, it will\n remove the specified portion of the loss graph to remove the large\n loss values in the beginning of the graph.\n clip_endding: Integer or None. If negative integer, it will\n remove the specified portion of the ending of the loss graph to\n remove the sharp increase in the loss values at high learning rates.\n ' try: import matplotlib.pyplot as plt plt.style.use('seaborn-white') except ImportError: print('Matplotlib not found. Please use `pip install matplotlib` first.') return if ((clip_beginning is not None) and (clip_beginning < 0)): clip_beginning = (- clip_beginning) if ((clip_endding is not None) and (clip_endding > 0)): clip_endding = (- clip_endding) losses = self.losses lrs = self.lrs if clip_beginning: losses = losses[clip_beginning:] lrs = lrs[clip_beginning:] if clip_endding: losses = losses[:clip_endding] lrs = lrs[:clip_endding] plt.plot(lrs, losses) plt.title('Learning rate vs Loss') plt.xlabel('learning rate') plt.ylabel('loss') plt.show() @classmethod def restore_schedule_from_dir(cls, directory, clip_beginning=None, clip_endding=None): '\n Loads the training history from the saved numpy files in the given directory.\n # Arguments:\n directory: String. Path to the directory where the serialized numpy\n arrays of the loss and learning rates are saved.\n clip_beginning: Integer or None. If positive integer, it will\n remove the specified portion of the loss graph to remove the large\n loss values in the beginning of the graph.\n clip_endding: Integer or None. If negative integer, it will\n remove the specified portion of the ending of the loss graph to\n remove the sharp increase in the loss values at high learning rates.\n Returns:\n tuple of (losses, learning rates)\n ' if ((clip_beginning is not None) and (clip_beginning < 0)): clip_beginning = (- clip_beginning) if ((clip_endding is not None) and (clip_endding > 0)): clip_endding = (- clip_endding) losses_path = os.path.join(directory, 'losses.npy') lrs_path = os.path.join(directory, 'lrs.npy') if ((not os.path.exists(losses_path)) or (not os.path.exists(lrs_path))): print(('%s and %s could not be found at directory : {%s}' % (losses_path, lrs_path, directory))) losses = None lrs = None else: losses = np.load(losses_path) lrs = np.load(lrs_path) if clip_beginning: losses = losses[clip_beginning:] lrs = lrs[clip_beginning:] if clip_endding: losses = losses[:clip_endding] lrs = lrs[:clip_endding] return (losses, lrs) @classmethod def plot_schedule_from_file(cls, directory, clip_beginning=None, clip_endding=None): '\n Plots the schedule from the saved numpy arrays of the loss and learning\n rate values in the specified directory.\n # Arguments:\n directory: String. Path to the directory where the serialized numpy\n arrays of the loss and learning rates are saved.\n clip_beginning: Integer or None. If positive integer, it will\n remove the specified portion of the loss graph to remove the large\n loss values in the beginning of the graph.\n clip_endding: Integer or None. If negative integer, it will\n remove the specified portion of the ending of the loss graph to\n remove the sharp increase in the loss values at high learning rates.\n ' try: import matplotlib.pyplot as plt plt.style.use('seaborn-white') except ImportError: print('Matplotlib not found. Please use `pip install matplotlib` first.') return (losses, lrs) = cls.restore_schedule_from_dir(directory, clip_beginning=clip_beginning, clip_endding=clip_endding) if ((losses is None) or (lrs is None)): return else: plt.plot(lrs, losses) plt.title('Learning rate vs Loss') plt.xlabel('learning rate') plt.ylabel('loss') plt.show() @property def lrs(self): return np.array(self.history['log_lrs']) @property def losses(self): return np.array(self.history['running_loss_'])
class CosineAnnealingScheduler(Callback): 'Cosine annealing scheduler.\n ' def __init__(self, T_max, eta_max, eta_min=0, verbose=0, epoch_start=80, restart_epochs=None, gamma=1, expansion=1, flat_end=False): super(CosineAnnealingScheduler, self).__init__() self.epoch_start = epoch_start self.expansion = expansion self.T_max = T_max self.eta_max = eta_max self.eta_min = eta_min self.verbose = verbose self.restart_epochs = restart_epochs self.gamma = gamma self.flat_end = flat_end def on_epoch_begin(self, epoch, logs=None): if (not hasattr(self.model.optimizer, 'learning_rate')): raise ValueError('Optimizer must have a "learning_rate" attribute.') if (epoch > (self.epoch_start - 1)): if (self.restart_epochs is None): learning_rate = (self.eta_min + ((((self.eta_max * self.gamma) - self.eta_min) * (1 + math.cos(((math.pi * (epoch - self.epoch_start)) / self.T_max)))) / 2)) K.set_value(self.model.optimizer.learning_rate, learning_rate) else: learning_rate = (self.eta_min + ((((self.eta_max * self.gamma) - self.eta_min) * (1 + math.cos(((math.pi * ((epoch % (self.restart_epochs + self.epoch_start)) - self.epoch_start)) / self.T_max)))) / 2)) K.set_value(self.model.optimizer.learning_rate, learning_rate) if (learning_rate <= self.eta_min): self.eta_max *= self.gamma self.T_max *= self.expansion if (self.flat_end and (epoch >= ((self.epoch_start - 1) + T_max))): learning_rate = self.eta_min else: learning_rate = self.model.optimizer.learning_rate if (self.verbose > 0): print(('\nEpoch %05d: CosineAnnealingScheduler setting learning rate to %s.' % ((epoch + 1), learning_rate))) def on_epoch_end(self, epoch, logs=None): logs = (logs or {}) logs['learning_rate'] = K.get_value(self.model.optimizer.learning_rate)
class CyclicLR(Callback): 'This callback implements a cyclical learning rate policy (CLR).\n The method cycles the learning rate between two boundaries with\n some constant frequency, as detailed in this paper (https://arxiv.org/abs/1506.01186).\n The amplitude of the cycle can be scaled on a per-iteration or\n per-cycle basis.\n This class has three built-in policies, as put forth in the paper.\n "triangular":\n A basic triangular cycle w/ no amplitude scaling.\n "triangular2":\n A basic triangular cycle that scales initial amplitude by half each cycle.\n "exp_range":\n A cycle that scales initial amplitude by gamma**(cycle iterations) at each\n cycle iteration.\n For more detail, please see paper.\n\n # Example\n ```python\n clr = CyclicLR(base_lr=0.001, max_lr=0.006,\n step_size=2000., mode=\'triangular\')\n model.fit(X_train, Y_train, callbacks=[clr])\n ```\n\n Class also supports custom scaling functions:\n ```python\n clr_fn = lambda x: 0.5*(1+np.sin(x*np.pi/2.))\n clr = CyclicLR(base_lr=0.001, max_lr=0.006,\n step_size=2000., scale_fn=clr_fn,\n scale_mode=\'cycle\')\n model.fit(X_train, Y_train, callbacks=[clr])\n ```\n # Arguments\n base_lr: initial learning rate which is the\n lower boundary in the cycle.\n max_lr: upper boundary in the cycle. Functionally,\n it defines the cycle amplitude (max_lr - base_lr).\n The lr at any cycle is the sum of base_lr\n and some scaling of the amplitude; therefore\n max_lr may not actually be reached depending on\n scaling function.\n step_size: number of training iterations per\n half cycle. Authors suggest setting step_size\n 2-8 x training iterations in epoch.\n mode: one of {triangular, triangular2, exp_range}.\n Default \'triangular\'.\n Values correspond to policies detailed above.\n If scale_fn is not None, this argument is ignored.\n gamma: constant in \'exp_range\' scaling function:\n gamma**(cycle iterations)\n scale_fn: Custom scaling policy defined by a single\n argument lambda function, where\n 0 <= scale_fn(x) <= 1 for all x >= 0.\n mode paramater is ignored\n scale_mode: {\'cycle\', \'iterations\'}.\n Defines whether scale_fn is evaluated on\n cycle number or cycle iterations (training\n iterations since start of cycle). Default is \'cycle\'.\n ' def __init__(self, base_lr=0.001, max_lr=0.006, step_size=2000.0, mode='triangular', gamma=1.0, scale_fn=None, scale_mode='cycle'): super(CyclicLR, self).__init__() self.base_lr = base_lr self.max_lr = max_lr self.step_size = step_size self.mode = mode self.gamma = gamma if (scale_fn == None): if (self.mode == 'triangular'): self.scale_fn = (lambda x: 1.0) self.scale_mode = 'cycle' elif (self.mode == 'triangular2'): self.scale_fn = (lambda x: (1 / (2.0 ** (x - 1)))) self.scale_mode = 'cycle' elif (self.mode == 'exp_range'): self.scale_fn = (lambda x: (gamma ** x)) self.scale_mode = 'iterations' else: self.scale_fn = scale_fn self.scale_mode = scale_mode self.clr_iterations = 0.0 self.trn_iterations = 0.0 self.history = {} self._reset() def _reset(self, new_base_lr=None, new_max_lr=None, new_step_size=None): 'Resets cycle iterations.\n Optional boundary/step size adjustment.\n ' if (new_base_lr != None): self.base_lr = new_base_lr if (new_max_lr != None): self.max_lr = new_max_lr if (new_step_size != None): self.step_size = new_step_size self.clr_iterations = 0.0 def clr(self): cycle = np.floor((1 + (self.clr_iterations / (2 * self.step_size)))) x = np.abs((((self.clr_iterations / self.step_size) - (2 * cycle)) + 1)) if (self.scale_mode == 'cycle'): return (self.base_lr + (((self.max_lr - self.base_lr) * np.maximum(0, (1 - x))) * self.scale_fn(cycle))) else: return (self.base_lr + (((self.max_lr - self.base_lr) * np.maximum(0, (1 - x))) * self.scale_fn(self.clr_iterations))) def on_train_begin(self, logs={}): logs = (logs or {}) if (self.clr_iterations == 0): K.set_value(self.model.optimizer.lr, self.base_lr) else: K.set_value(self.model.optimizer.lr, self.clr()) def on_batch_end(self, epoch, logs=None): logs = (logs or {}) self.trn_iterations += 1 self.clr_iterations += 1 self.history.setdefault('lr', []).append(K.get_value(self.model.optimizer.lr)) self.history.setdefault('iterations', []).append(self.trn_iterations) for (k, v) in logs.items(): self.history.setdefault(k, []).append(v) K.set_value(self.model.optimizer.lr, self.clr())
class LRFinder(Callback): def __init__(self, num_samples, batch_size, minimum_lr=1e-05, maximum_lr=10.0, lr_scale='exp', validation_data=None, validation_sample_rate=5, stopping_criterion_factor=4.0, loss_smoothing_beta=0.98, save_dir=None, verbose=True): "\n This class uses the Cyclic Learning Rate history to find a\n set of learning rates that can be good initializations for the\n One-Cycle training proposed by Leslie Smith in the paper referenced\n below.\n A port of the Fast.ai implementation for Keras.\n # Note\n This requires that the model be trained for exactly 1 epoch. If the model\n is trained for more epochs, then the metric calculations are only done for\n the first epoch.\n # Interpretation\n Upon visualizing the loss plot, check where the loss starts to increase\n rapidly. Choose a learning rate at somewhat prior to the corresponding\n position in the plot for faster convergence. This will be the maximum_lr lr.\n Choose the max value as this value when passing the `max_val` argument\n to OneCycleLR callback.\n Since the plot is in log-scale, you need to compute 10 ^ (-k) of the x-axis\n # Arguments:\n num_samples: Integer. Number of samples in the dataset.\n batch_size: Integer. Batch size during training.\n minimum_lr: Float. Initial learning rate (and the minimum).\n maximum_lr: Float. Final learning rate (and the maximum).\n lr_scale: Can be one of ['exp', 'linear']. Chooses the type of\n scaling for each update to the learning rate during subsequent\n batches. Choose 'exp' for large range and 'linear' for small range.\n validation_data: Requires the validation dataset as a tuple of\n (X, y) belonging to the validation set. If provided, will use the\n validation set to compute the loss metrics. Else uses the training\n batch loss. Will warn if not provided to alert the user.\n validation_sample_rate: Positive or Negative Integer. Number of batches to sample from the\n validation set per iteration of the LRFinder. Larger number of\n samples will reduce the variance but will take longer time to execute\n per batch.\n If Positive > 0, will sample from the validation dataset\n If Megative, will use the entire dataset\n stopping_criterion_factor: Integer or None. A factor which is used\n to measure large increase in the loss value during training.\n Since callbacks cannot stop training of a model, it will simply\n stop logging the additional values from the epochs after this\n stopping criterion has been met.\n If None, this check will not be performed.\n loss_smoothing_beta: Float. The smoothing factor for the moving\n average of the loss function.\n save_dir: Optional, String. If passed a directory path, the callback\n will save the running loss and learning rates to two separate numpy\n arrays inside this directory. If the directory in this path does not\n exist, they will be created.\n verbose: Whether to print the learning rate after every batch of training.\n # References:\n - [A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch size, weight_decay, and weight decay](https://arxiv.org/abs/1803.09820)\n " super(LRFinder, self).__init__() if (lr_scale not in ['exp', 'linear']): raise ValueError("`lr_scale` must be one of ['exp', 'linear']") if (validation_data is not None): self.validation_data = validation_data self.use_validation_set = True if ((validation_sample_rate > 0) or (validation_sample_rate < 0)): self.validation_sample_rate = validation_sample_rate else: raise ValueError('`validation_sample_rate` must be a positive or negative integer other than o') else: self.use_validation_set = False self.validation_sample_rate = 0 self.num_samples = num_samples self.batch_size = batch_size self.initial_lr = minimum_lr self.final_lr = maximum_lr self.lr_scale = lr_scale self.stopping_criterion_factor = stopping_criterion_factor self.loss_smoothing_beta = loss_smoothing_beta self.save_dir = save_dir self.verbose = verbose self.num_batches_ = (num_samples // batch_size) self.current_lr_ = minimum_lr if (lr_scale == 'exp'): self.lr_multiplier_ = ((maximum_lr / float(minimum_lr)) ** (1.0 / float(self.num_batches_))) else: extra_batch = int(((num_samples % batch_size) != 0)) self.lr_multiplier_ = np.linspace(minimum_lr, maximum_lr, num=(self.num_batches_ + extra_batch)) if (self.validation_sample_rate < 0): self.validation_sample_rate = (self.validation_data[0].shape[0] // batch_size) self.current_batch_ = 0 self.current_epoch_ = 0 self.best_loss_ = 1000000.0 self.running_loss_ = 0.0 self.history = {} def on_train_begin(self, logs=None): self.current_epoch_ = 1 K.set_value(self.model.optimizer.lr, self.initial_lr) warnings.simplefilter('ignore') def on_epoch_begin(self, epoch, logs=None): self.current_batch_ = 0 if (self.current_epoch_ > 1): warnings.warn('\n\nLearning rate finder should be used only with a single epoch. Hereafter, the callback will not measure the losses.\n\n') def on_batch_begin(self, batch, logs=None): self.current_batch_ += 1 def on_batch_end(self, batch, logs=None): if (self.current_epoch_ > 1): return if self.use_validation_set: (X, Y) = (self.validation_data[0], self.validation_data[1]) num_samples = (self.batch_size * self.validation_sample_rate) if (num_samples > X.shape[0]): num_samples = X.shape[0] idx = np.random.choice(X.shape[0], num_samples, replace=False) x = X[idx] y = Y[idx] values = self.model.evaluate(x, y, batch_size=self.batch_size, verbose=False) loss = values[0] else: loss = logs['loss'] running_loss = ((self.loss_smoothing_beta * loss) + ((1.0 - self.loss_smoothing_beta) * loss)) running_loss = (running_loss / (1.0 - (self.loss_smoothing_beta ** self.current_batch_))) if ((self.current_batch_ > 1) and (self.stopping_criterion_factor is not None) and (running_loss > (self.stopping_criterion_factor * self.best_loss_))): if self.verbose: print((' - LRFinder: Skipping iteration since loss is %d times as large as best loss (%0.4f)' % (self.stopping_criterion_factor, self.best_loss_))) return if ((running_loss < self.best_loss_) or (self.current_batch_ == 1)): self.best_loss_ = running_loss current_lr = K.get_value(self.model.optimizer.lr) self.history.setdefault('running_loss_', []).append(running_loss) if (self.lr_scale == 'exp'): self.history.setdefault('log_lrs', []).append(np.log10(current_lr)) else: self.history.setdefault('log_lrs', []).append(current_lr) if (self.lr_scale == 'exp'): current_lr *= self.lr_multiplier_ else: current_lr = self.lr_multiplier_[(self.current_batch_ - 1)] K.set_value(self.model.optimizer.lr, current_lr) for (k, v) in logs.items(): self.history.setdefault(k, []).append(v) if self.verbose: if self.use_validation_set: print((' - LRFinder: val_loss: %1.4f - lr = %1.8f ' % (values[0], current_lr))) else: print((' - LRFinder: lr = %1.8f ' % current_lr)) def on_epoch_end(self, epoch, logs=None): if ((self.save_dir is not None) and (self.current_epoch_ <= 1)): if (not os.path.exists(self.save_dir)): os.makedirs(self.save_dir) losses_path = os.path.join(self.save_dir, 'losses.npy') lrs_path = os.path.join(self.save_dir, 'lrs.npy') np.save(losses_path, self.losses) np.save(lrs_path, self.lrs) if self.verbose: print(('\tLR Finder : Saved the losses and learning rate values in path : {%s}' % self.save_dir)) self.current_epoch_ += 1 warnings.simplefilter('default') def plot_schedule(self, clip_beginning=None, clip_endding=None): '\n Plots the schedule from the callback itself.\n # Arguments:\n clip_beginning: Integer or None. If positive integer, it will\n remove the specified portion of the loss graph to remove the large\n loss values in the beginning of the graph.\n clip_endding: Integer or None. If negative integer, it will\n remove the specified portion of the ending of the loss graph to\n remove the sharp increase in the loss values at high learning rates.\n ' try: import matplotlib.pyplot as plt plt.style.use('seaborn-white') except ImportError: print('Matplotlib not found. Please use `pip install matplotlib` first.') return if ((clip_beginning is not None) and (clip_beginning < 0)): clip_beginning = (- clip_beginning) if ((clip_endding is not None) and (clip_endding > 0)): clip_endding = (- clip_endding) losses = self.losses lrs = self.lrs if clip_beginning: losses = losses[clip_beginning:] lrs = lrs[clip_beginning:] if clip_endding: losses = losses[:clip_endding] lrs = lrs[:clip_endding] plt.plot(lrs, losses) plt.title('Learning rate vs Loss') plt.xlabel('learning rate') plt.ylabel('loss') plt.show() @classmethod def restore_schedule_from_dir(cls, directory, clip_beginning=None, clip_endding=None): '\n Loads the training history from the saved numpy files in the given directory.\n # Arguments:\n directory: String. Path to the directory where the serialized numpy\n arrays of the loss and learning rates are saved.\n clip_beginning: Integer or None. If positive integer, it will\n remove the specified portion of the loss graph to remove the large\n loss values in the beginning of the graph.\n clip_endding: Integer or None. If negative integer, it will\n remove the specified portion of the ending of the loss graph to\n remove the sharp increase in the loss values at high learning rates.\n Returns:\n tuple of (losses, learning rates)\n ' if ((clip_beginning is not None) and (clip_beginning < 0)): clip_beginning = (- clip_beginning) if ((clip_endding is not None) and (clip_endding > 0)): clip_endding = (- clip_endding) losses_path = os.path.join(directory, 'losses.npy') lrs_path = os.path.join(directory, 'lrs.npy') if ((not os.path.exists(losses_path)) or (not os.path.exists(lrs_path))): print(('%s and %s could not be found at directory : {%s}' % (losses_path, lrs_path, directory))) losses = None lrs = None else: losses = np.load(losses_path) lrs = np.load(lrs_path) if clip_beginning: losses = losses[clip_beginning:] lrs = lrs[clip_beginning:] if clip_endding: losses = losses[:clip_endding] lrs = lrs[:clip_endding] return (losses, lrs) @classmethod def plot_schedule_from_file(cls, directory, clip_beginning=None, clip_endding=None): '\n Plots the schedule from the saved numpy arrays of the loss and learning\n rate values in the specified directory.\n # Arguments:\n directory: String. Path to the directory where the serialized numpy\n arrays of the loss and learning rates are saved.\n clip_beginning: Integer or None. If positive integer, it will\n remove the specified portion of the loss graph to remove the large\n loss values in the beginning of the graph.\n clip_endding: Integer or None. If negative integer, it will\n remove the specified portion of the ending of the loss graph to\n remove the sharp increase in the loss values at high learning rates.\n ' try: import matplotlib.pyplot as plt plt.style.use('seaborn-white') except ImportError: print('Matplotlib not found. Please use `pip install matplotlib` first.') return (losses, lrs) = cls.restore_schedule_from_dir(directory, clip_beginning=clip_beginning, clip_endding=clip_endding) if ((losses is None) or (lrs is None)): return else: plt.plot(lrs, losses) plt.title('Learning rate vs Loss') plt.xlabel('learning rate') plt.ylabel('loss') plt.show() @property def lrs(self): return np.array(self.history['log_lrs']) @property def losses(self): return np.array(self.history['running_loss_'])
class History(object): '\n Custom class to help get log data from keras.callbacks.History objects.\n :param history: a ``keras.callbacks.History object`` or ``None``.\n ' def __init__(self, history=None): if (history is not None): self.epoch = history.epoch self.history = history.history else: self.epoch = [] self.history = {}
def concatenate_history(hlist, reindex_epoch=False): '\n A helper function to concatenate training history object (``keras.callbacks.History``) into a single one, with a help ``History`` class.\n :param hlist: a list of ``keras.callbacks.History`` objects to concatenate.\n :param reindex_epoch: True or False whether to reindex epoch counters to an increasing order.\n :return his: an instance of ``History`` class that contain concatenated information of epoch and training history.\n ' his = History() for h in hlist: his.epoch = (his.epoch + h.epoch) for (key, value) in h.history.items(): his.history.setdefault(key, []) his.history[key] = (his.history[key] + value) if reindex_epoch: his.epoch = list(np.arange(0, len(his.epoch))) return his
def plot_from_history(history): "\n Plot losses in training history.\n :param history: a ``keras.callbacks.History`` or (this module's) ``History`` object.\n " assert isinstance(history, (keras.callbacks.History, History)), "history must be a ``keras.callbacks.History`` or (this module's) ``History`` object. " epoch = history.epoch val_exist = ('val_loss' in history.history) plt.plot(epoch, history.history['loss'], '.-', label='train') if val_exist: plt.plot(epoch, history.history['val_loss'], '.-', label='valid') plt.xlabel('epoch') plt.ylabel('losses') plt.legend()
def save_history_to_csv(history, filepath): '\n Save a training history into a csv file.\n :param history: a ``History`` callback instance from ``Model`` instance.\n :param filepath: a string filepath.\n ' hist = history.history hist['epoch'] = history.epoch df = pd.DataFrame.from_dict(hist) df.to_csv(filepath, index=False)
def reset_keras(per_process_gpu_memory_fraction=1.0): "\n Reset Keras session and set GPU configuration as well as collect unused memory.\n This is adapted from [jaycangel's post on fastai forum](https://forums.fast.ai/t/how-could-i-release-gpu-memory-of-keras/2023/18).\n Calling this before any training will clear Keras session. Hence, a Keras model must be redefined and compiled again.\n It can be used in during hyperparameter scan or K-fold validation when model training is invoked several times.\n :param per_process_gpu_memory_fraction: tensorflow's config.gpu_options.per_process_gpu_memory_fraction\n " sess = K.get_session() K.clear_session() sess.close() gc.collect() config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = per_process_gpu_memory_fraction config.gpu_options.visible_device_list = '0' K.set_session(tf.Session(config=config))
def cuda_release_memory(): "\n Force cuda to release GPU memory by closing it.\n :return cuda: numba's cuda module.\n " spec = importlib.util.find_spec('numba') if (spec is None): raise Exception("numba module cannot be found. Can't function before numba module is installed.") else: from numba import cuda cuda.select_device(0) cuda.close() return cuda
def moving_window_avg(x, window=5): '\n Return a moving-window average.\n :param x: a numpy array\n :param window: an integer, number of data points for window size.\n ' return pd.DataFrame(x).rolling(window=window, min_periods=1).mean().values.squeeze()
def set_momentum(optimizer, mom_val): '\n Helper to set momentum of Keras optimizers.\n :param optimizer: Keras optimizer\n :param mom_val: value of momentum.\n ' keys = dir(optimizer) if ('momentum' in keys): K.set_value(optimizer.momentum, mom_val) if ('rho' in keys): K.set_value(optimizer.rho, mom_val) if ('beta_1' in keys): K.set_value(optimizer.beta_1, mom_val)
def set_lr(optimizer, lr): '\n Helper to set learning rate of Keras optimizers.\n :param optimizer: Keras optimizer\n :param lr: value of learning rate.\n ' K.set_value(optimizer.lr, lr)
def dot_product(x, kernel): return tf.tensordot(x, kernel, axes=1)
class Attention(Layer): def __init__(self, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, return_attention=False, **kwargs): '\n Keras Layer that implements an Attention mechanism for temporal data.\n Supports Masking.\n Follows the work of Raffel et al. [https://arxiv.org/abs/1512.08756]\n # Input shape\n 3D tensor with shape: `(samples, steps, features)`.\n # Output shape\n 2D tensor with shape: `(samples, features)`.\n :param kwargs:\n ' self.supports_masking = True self.return_attention = return_attention self.init = initializers.get('zeros') self.W_regularizer = regularizers.get(W_regularizer) self.b_regularizer = regularizers.get(b_regularizer) self.W_constraint = constraints.get(W_constraint) self.b_constraint = constraints.get(b_constraint) self.bias = bias super(Attention, self).__init__(**kwargs) def get_config(self): config = super().get_config().copy() config.update({'supports_masking': self.supports_masking, 'return_attention': self.return_attention, 'init': self.init, 'W_regularizer': self.W_regularizer, 'W_constraint': self.W_constraint, 'b_regularizer': self.b_regularizer, 'b_constraint': self.b_constraint, 'bias': self.bias}) return config def build(self, input_shape): assert (len(input_shape) == 3) self.W = self.add_weight(shape=(int(input_shape[(- 1)]),), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint) if self.bias: self.b = self.add_weight(shape=(int(input_shape[1]),), initializer='zero', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint) else: self.b = None self.built = True def compute_mask(self, input, input_mask=None): return None def call(self, x, mask=None): eij = dot_product(x, self.W) if self.bias: eij += self.b eij = K.tanh(eij) a = K.exp(eij) if (mask is not None): a *= K.cast(mask, K.floatx()) a /= K.cast((K.sum(a, axis=1, keepdims=True) + K.epsilon()), K.floatx()) weighted_input = (x * K.expand_dims(a)) result = K.sum(weighted_input, axis=1) if self.return_attention: return [result, a] return result def compute_output_shape(self, input_shape): if self.return_attention: return [(input_shape[0], input_shape[(- 1)]), (input_shape[0], input_shape[1])] else: return (input_shape[0], input_shape[(- 1)])
class LayerNormalization(Layer): '\n Implementation of Layer Normalization (https://arxiv.org/abs/1607.06450).\n "Unlike batch normalization, layer normalization performs exactly\n the same computation at training and test times."\n ' def __init__(self, axis=(- 1), **kwargs): self.axis = axis super().__init__(**kwargs) def get_config(self): config = super().get_config() config['axis'] = self.axis return config def build(self, input_shape): dim = input_shape[(- 1)] self.gain = self.add_weight(name='gain', shape=(dim,), initializer='ones', trainable=True) self.bias = self.add_weight(name='bias', shape=(dim,), initializer='zeros', trainable=True) return super().build(input_shape) def call(self, inputs, **kwargs): mean = K.mean(inputs, axis=self.axis, keepdims=True) variance = K.mean(K.square((inputs - mean)), axis=self.axis, keepdims=True) epsilon = K.constant(1e-05, dtype=K.floatx()) normalized_inputs = ((inputs - mean) / K.sqrt((variance + epsilon))) result = ((self.gain * normalized_inputs) + self.bias) return result
class FocalLoss(tf.keras.losses.Loss): def __init__(self, gamma=2.0, alpha=4.0, name='focal_loss', reduction='auto'): 'Focal loss for multi-classification\n FL(p_t)=-alpha(1-p_t)^{gamma}ln(p_t)\n Notice: y_pred is probability after softmax\n gradient is d(Fl)/d(p_t) not d(Fl)/d(x) as described in paper\n d(Fl)/d(p_t) * [p_t(1-p_t)] = d(Fl)/d(x)\n Focal Loss for Dense Object Detection\n https://arxiv.org/abs/1708.02002\n\n Keyword Arguments:\n gamma {float} -- (default: {2.0})\n alpha {float} -- (default: {4.0})\n ' super(FocalLoss, self).__init__(reduction=reduction, name=name) self.gamma = float(gamma) self.alpha = float(alpha) def call(self, y_true, y_pred): "\n Arguments:\n y_true {tensor} -- ground truth labels, shape of [batch_size, num_cls]\n y_pred {tensor} -- model's output, shape of [batch_size, num_cls]\n\n Returns:\n [tensor] -- loss.\n " epsilon = 1e-09 y_true = tf.convert_to_tensor(y_true, tf.float32) y_pred = tf.convert_to_tensor(y_pred, tf.float32) model_out = tf.add(y_pred, epsilon) ce = tf.multiply(y_true, (- tf.math.log(model_out))) weight = tf.multiply(y_true, tf.pow(tf.subtract(1.0, model_out), self.gamma)) fl = tf.multiply(self.alpha, tf.multiply(weight, ce)) reduced_fl = tf.reduce_max(fl, axis=1) return tf.reduce_mean(reduced_fl)
class LDAMLoss(tf.keras.losses.Loss): def __init__(self, cls_num_list, max_m=0.5, weight=None, s=30, reduction=tf.keras.loses.Reduction.AUTO, name='LDAM'): super().__init__(reduction=reduction, name=name) m_list = (1.0 / np.sqrt(np.sqrt(cls_num_list))) m_list *= max_m(m_list.max()) self.m_list = tf.convert_to_tensor(m_list, tf.float32) assert (s > 0) self.s = s self.w = weight def __call__(self, y_true, y_pred): y_true = tf.convert_to_tensor(y_true, tf.float32) y_pred = tf.convert_to_tensor(y_pred, tf.float32)
class Lookahead(tf.keras.optimizers.Optimizer): 'This class allows to extend optimizers with the lookahead mechanism.\n The mechanism is proposed by Michael R. Zhang et.al in the paper\n [Lookahead Optimizer: k steps forward, 1 step back]\n (https://arxiv.org/abs/1907.08610v1). The optimizer iteratively updates two\n sets of weights: the search directions for weights are chosen by the inner\n optimizer, while the "slow weights" are updated each `k` steps based on the\n directions of the "fast weights" and the two sets of weights are\n synchronized. This method improves the learning stability and lowers the\n variance of its inner optimizer.\n Example of usage:\n ```python\n opt = tf.keras.optimizers.SGD(learning_rate)\n opt = tfa.optimizers.Lookahead(opt)\n ```\n ' @typechecked def __init__(self, optimizer: Union[(tf.keras.optimizers.Optimizer, str)], sync_period: int=6, slow_step_size: FloatTensorLike=0.5, name: str='Lookahead', **kwargs): 'Wrap optimizer with the lookahead mechanism.\n Args:\n optimizer: The original optimizer that will be used to compute\n and apply the gradients.\n sync_period: An integer. The synchronization period of lookahead.\n Enable lookahead mechanism by setting it with a positive value.\n slow_step_size: A floating point value.\n The ratio for updating the slow weights.\n name: Optional name for the operations created when applying\n gradients. Defaults to "Lookahead".\n **kwargs: keyword arguments. Allowed to be {`clipnorm`,\n `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients\n by norm; `clipvalue` is clip gradients by value, `decay` is\n included for backward compatibility to allow time inverse\n decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n ' super().__init__(name, **kwargs) if isinstance(optimizer, str): optimizer = tf.keras.optimizers.get(optimizer) if (not isinstance(optimizer, tf.keras.optimizers.Optimizer)): raise TypeError('optimizer is not an object of tf.keras.optimizers.Optimizer') self._optimizer = optimizer self._set_hyper('sync_period', sync_period) self._set_hyper('slow_step_size', slow_step_size) self._initialized = False def _create_slots(self, var_list): self._optimizer._create_slots(var_list=var_list) for var in var_list: self.add_slot(var, 'slow') def _create_hypers(self): self._optimizer._create_hypers() def _prepare(self, var_list): return self._optimizer._prepare(var_list=var_list) def apply_gradients(self, grads_and_vars, name=None): self._optimizer._iterations = self.iterations return super().apply_gradients(grads_and_vars, name) def _init_op(self, var): slow_var = self.get_slot(var, 'slow') return slow_var.assign(tf.where(tf.equal(self.iterations, tf.constant(0, dtype=self.iterations.dtype)), var, slow_var), use_locking=self._use_locking) def _look_ahead_op(self, var): var_dtype = var.dtype.base_dtype slow_var = self.get_slot(var, 'slow') local_step = tf.cast((self.iterations + 1), tf.dtypes.int64) sync_period = self._get_hyper('sync_period', tf.dtypes.int64) slow_step_size = self._get_hyper('slow_step_size', var_dtype) step_back = (slow_var + (slow_step_size * (var - slow_var))) sync_cond = tf.equal((tf.math.floordiv(local_step, sync_period) * sync_period), local_step) with tf.control_dependencies([step_back]): slow_update = slow_var.assign(tf.where(sync_cond, step_back, slow_var), use_locking=self._use_locking) var_update = var.assign(tf.where(sync_cond, step_back, var), use_locking=self._use_locking) return tf.group(slow_update, var_update) @property def weights(self): return (self._weights + self._optimizer.weights) def _resource_apply_dense(self, grad, var): init_op = self._init_op(var) with tf.control_dependencies([init_op]): train_op = self._optimizer._resource_apply_dense(grad, var) with tf.control_dependencies([train_op]): look_ahead_op = self._look_ahead_op(var) return tf.group(init_op, train_op, look_ahead_op) def _resource_apply_sparse(self, grad, var, indices): init_op = self._init_op(var) with tf.control_dependencies([init_op]): train_op = self._optimizer._resource_apply_sparse(grad, var, indices) with tf.control_dependencies([train_op]): look_ahead_op = self._look_ahead_op(var) return tf.group(init_op, train_op, look_ahead_op) def get_config(self): config = {'optimizer': tf.keras.optimizers.serialize(self._optimizer), 'sync_period': self._serialize_hyperparameter('sync_period'), 'slow_step_size': self._serialize_hyperparameter('slow_step_size')} base_config = super().get_config() return {**base_config, **config} @property def learning_rate(self): return self._optimizer._get_hyper('learning_rate') @learning_rate.setter def learning_rate(self, learning_rate): self._optimizer._set_hyper('learning_rate', learning_rate) @property def lr(self): return self.learning_rate @lr.setter def lr(self, lr): self.learning_rate = lr @classmethod def from_config(cls, config, custom_objects=None): optimizer = tf.keras.optimizers.deserialize(config.pop('optimizer'), custom_objects=custom_objects) return cls(optimizer, **config)
class NovoGrad(tf.keras.optimizers.Optimizer): 'The NovoGrad Optimizer was first proposed in [Stochastic Gradient\n Methods with Layerwise Adaptvie Moments for training of Deep\n Networks](https://arxiv.org/pdf/1905.11286.pdf)\n NovoGrad is a first-order SGD-based algorithm, which computes second\n moments per layer instead of per weight as in Adam. Compared to Adam,\n NovoGrad takes less memory, and has been found to be more numerically\n stable. More specifically we compute (for more information on the\n computation please refer to this\n [link](https://nvidia.github.io/OpenSeq2Seq/html/optimizers.html):\n Second order moment = exponential moving average of Layer-wise square\n of grads:\n v_t <-- beta_2 * v_{t-1} + (1-beta_2) * (g_t)^2\n First order moment in one of four modes:\n 1. moment of grads normalized by v_t:\n m_t <- beta_1 * m_{t-1} + [ g_t / (sqrt(v_t)+epsilon)]\n 2. moment similar to Adam: exponential moving average of grads\n normalized by v_t (set grad_averaging = True to use this):\n m_t <- beta_1 * m_{t-1} +\n [(1 - beta_1) * (g_t / (sqrt(v_t) + epsilon))]\n 3. weight decay adds a w_d term after grads are rescaled by\n 1/sqrt(v_t) (set weight_decay > 0 to use this0:\n m_t <- beta_1 * m_{t-1} +\n [(g_t / (sqrt(v_t) + epsilon)) + (w_d * w_{t-1})]\n 4. weight decay + exponential moving average from Adam:\n m_t <- beta_1 * m_{t-1} +\n [(1 - beta_1) * ((g_t / (sqrt(v_t + epsilon)) +\n (w_d * w_{t-1}))]\n Weight update:\n w_t <- w_{t-1} - lr_t * m_t\n Example of usage:\n ```python\n opt = tfa.optimizers.NovoGrad(\n lr=1e-3,\n beta_1=0.9,\n beta_2=0.999,\n weight_decay=0.001,\n grad_averaging=False\n )\n ```\n ' @typechecked def __init__(self, learning_rate: Union[(FloatTensorLike, Callable)]=0.001, beta_1: FloatTensorLike=0.9, beta_2: FloatTensorLike=0.999, epsilon: FloatTensorLike=1e-07, weight_decay: FloatTensorLike=0.0, grad_averaging: bool=False, amsgrad: bool=False, name: str='NovoGrad', **kwargs): 'Construct a new NovoGrad optimizer.\n Args:\n learning_rate: A `Tensor` or a floating point value. or a schedule\n that is a `tf.keras.optimizers.schedules.LearningRateSchedule`\n The learning rate.\n beta_1: A float value or a constant float tensor.\n The exponential decay rate for the 1st moment estimates.\n beta_2: A float value or a constant float tensor.\n The exponential decay rate for the 2nd moment estimates.\n epsilon: A small constant for numerical stability.\n weight_decay: A floating point value. Weight decay for each param.\n grad_averaging: determines whether to use Adam style exponential\n moving averaging for the first order moments.\n **kwargs: keyword arguments. Allowed to be {`clipnorm`,\n `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients\n by norm; `clipvalue` is clip gradients by value, `decay` is\n included for backward compatibility to allow time inverse\n decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n ' super().__init__(name, **kwargs) if (weight_decay < 0.0): raise ValueError('Weight decay rate cannot be negative') self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._set_hyper('beta_1', beta_1) self._set_hyper('beta_2', beta_2) self._set_hyper('weight_decay', weight_decay) self._set_hyper('grad_averaging', grad_averaging) self.amsgrad = amsgrad self.epsilon = (epsilon or tf.keras.backend.epsilon()) def _create_slots(self, var_list): for var in var_list: self.add_slot(var=var, slot_name='m', initializer='zeros') for var in var_list: self.add_slot(var=var, slot_name='v', initializer=tf.zeros(shape=[], dtype=var.dtype)) if self.amsgrad: for var in var_list: self.add_slot(var, 'vhat') def _prepare_local(self, var_device, var_dtype, apply_state): super()._prepare_local(var_device, var_dtype, apply_state) beta_1_t = tf.identity(self._get_hyper('beta_1', var_dtype)) beta_2_t = tf.identity(self._get_hyper('beta_2', var_dtype)) apply_state[(var_device, var_dtype)].update(dict(epsilon=tf.convert_to_tensor(self.epsilon, var_dtype), beta_1_t=beta_1_t, beta_2_t=beta_2_t, one_minus_beta_2_t=(1 - beta_2_t), one_minus_beta_1_t=(1 - beta_1_t))) def set_weights(self, weights): params = self.weights num_vars = int(((len(params) - 1) / 2)) if (len(weights) == ((3 * num_vars) + 1)): weights = weights[:len(params)] super().set_weights(weights) def _resource_apply_dense(self, grad, var, apply_state=None): (var_device, var_dtype) = (var.device, var.dtype.base_dtype) coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) weight_decay = self._get_hyper('weight_decay') grad_averaging = self._get_hyper('grad_averaging') v = self.get_slot(var, 'v') g_2 = tf.reduce_sum(tf.square(tf.cast(grad, tf.float32))) v_t = tf.cond(tf.equal(self.iterations, 0), (lambda : g_2), (lambda : ((v * coefficients['beta_2_t']) + (g_2 * coefficients['one_minus_beta_2_t'])))) v_t = v.assign(v_t, use_locking=self._use_locking) if self.amsgrad: vhat = self.get_slot(var, 'vhat') vhat_t = vhat.assign(tf.maximum(vhat, v_t), use_locking=self._use_locking) grad = (grad / (tf.sqrt(vhat_t) + self.epsilon)) else: grad = (grad / (tf.sqrt(v_t) + self.epsilon)) grad = tf.cond(tf.greater(weight_decay, 0), (lambda : (grad + (weight_decay * var))), (lambda : grad)) grad = tf.cond(tf.logical_and(grad_averaging, tf.not_equal(self.iterations, 0)), (lambda : (grad * coefficients['one_minus_beta_1_t'])), (lambda : grad)) m = self.get_slot(var, 'm') return training_ops.resource_apply_keras_momentum(var.handle, m.handle, coefficients['lr_t'], grad, coefficients['beta_1_t'], use_locking=self._use_locking, use_nesterov=False) def _resource_apply_sparse(self, grad, var, indices, apply_state=None): (var_device, var_dtype) = (var.device, var.dtype.base_dtype) coefficients = ((apply_state or {}).get((var_device, var_dtype)) or self._fallback_apply_state(var_device, var_dtype)) weight_decay = self._get_hyper('weight_decay') grad_averaging = self._get_hyper('grad_averaging') v = self.get_slot(var, 'v') g_2 = tf.reduce_sum(tf.square(tf.cast(grad, tf.float32))) v_t = tf.cond(tf.equal(self.iterations, 0), (lambda : g_2), (lambda : ((v * coefficients['beta_2_t']) + (g_2 * coefficients['one_minus_beta_2_t'])))) v_t = v.assign(v_t, use_locking=self._use_locking) if self.amsgrad: vhat = self.get_slot(var, 'vhat') vhat_t = vhat.assign(tf.maximum(vhat, v_t), use_locking=self._use_locking) grad = (grad / (tf.sqrt(vhat_t) + self.epsilon)) else: grad = (grad / (tf.sqrt(v_t) + self.epsilon)) grad = tf.cond(tf.greater(weight_decay, 0), (lambda : (grad + (weight_decay * var))), (lambda : grad)) grad = tf.cond(tf.logical_and(grad_averaging, tf.not_equal(self.iterations, 0)), (lambda : (grad * coefficients['one_minus_beta_1_t'])), (lambda : grad)) m = self.get_slot(var, 'm') return training_ops.resource_sparse_apply_keras_momentum(var.handle, m.handle, coefficients['lr_t'], tf.gather(grad, indices), indices, coefficients['beta_1_t'], use_locking=self._use_locking, use_nesterov=False) def get_config(self): config = super().get_config() config.update({'learning_rate': self._serialize_hyperparameter('learning_rate'), 'beta_1': self._serialize_hyperparameter('beta_1'), 'beta_2': self._serialize_hyperparameter('beta_2'), 'epsilon': self.epsilon, 'weight_decay': self._serialize_hyperparameter('weight_decay'), 'grad_averaging': self._serialize_hyperparameter('grad_averaging')}) return config
def Ranger(sync_period=6, slow_step_size=0.5, learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, weight_decay=0.0, amsgrad=False, sma_threshold=5.0, total_steps=0, warmup_proportion=0.1, min_lr=0.0, name='Ranger'): '\n function returning a tf.keras.optimizers.Optimizer object\n returned optimizer is a Ranger optimizer\n Ranger is an optimizer combining RAdam (https://arxiv.org/abs/1908.03265) and Lookahead (https://arxiv.org/abs/1907.0861)\n returned optimizer can be fed into the model.compile method of a tf.keras model as an optimizer\n ...\n Attributes\n ----------\n learning_rate : float\n step size to take for RAdam optimizer (depending on gradient)\n beta_1 : float\n parameter that specifies the exponentially moving average length for momentum (0<=beta_1<=1)\n beta_2 : float\n parameter that specifies the exponentially moving average length for variance (0<=beta_2<=1)\n epsilon : float\n small number to cause stability for variance division\n weight_decay : float\n number with which the weights of the model are multiplied each iteration (0<=weight_decay<=1)\n amsgrad : bool\n parameter that specifies whether to use amsgrad version of Adam (https://arxiv.org/abs/1904.03590)\n total_steps : int\n total number of training steps\n warmup_proportion : float\n the proportion of updated over which the learning rate is increased from min learning rate to learning rate (0<=warmup_proportion<=1)\n min_lr : float\n learning rate at which the optimizer starts\n k : int\n parameter that specifies after how many steps the lookahead step backwards should be applied\n alpha : float\n parameter that specifies how much in the direction of the fast weights should be moved (0<=alpha<=1)\n ' inner = RectifiedAdam(learning_rate, beta_1, beta_2, epsilon, weight_decay, amsgrad, sma_threshold, total_steps, warmup_proportion, min_lr, name) optim = Lookahead(inner, sync_period, slow_step_size, name) return optim
class RectifiedAdam(tf.keras.optimizers.Optimizer): 'Variant of the Adam optimizer whose adaptive learning rate is rectified\n so as to have a consistent variance.\n It implements the Rectified Adam (a.k.a. RAdam) proposed by\n Liyuan Liu et al. in [On The Variance Of The Adaptive Learning Rate\n And Beyond](https://arxiv.org/pdf/1908.03265v1.pdf).\n Example of usage:\n ```python\n opt = tfa.optimizers.RectifiedAdam(lr=1e-3)\n ```\n Note: `amsgrad` is not described in the original paper. Use it with\n caution.\n RAdam is not a placement of the heuristic warmup, the settings should be\n kept if warmup has already been employed and tuned in the baseline method.\n You can enable warmup by setting `total_steps` and `warmup_proportion`:\n ```python\n opt = tfa.optimizers.RectifiedAdam(\n lr=1e-3,\n total_steps=10000,\n warmup_proportion=0.1,\n min_lr=1e-5,\n )\n ```\n In the above example, the learning rate will increase linearly\n from 0 to `lr` in 1000 steps, then decrease linearly from `lr` to `min_lr`\n in 9000 steps.\n Lookahead, proposed by Michael R. Zhang et.al in the paper\n [Lookahead Optimizer: k steps forward, 1 step back]\n (https://arxiv.org/abs/1907.08610v1), can be integrated with RAdam,\n which is announced by Less Wright and the new combined optimizer can also\n be called "Ranger". The mechanism can be enabled by using the lookahead\n wrapper. For example:\n ```python\n radam = tfa.optimizers.RectifiedAdam()\n ranger = tfa.optimizers.Lookahead(radam, sync_period=6, slow_step_size=0.5)\n ```\n ' @typechecked def __init__(self, learning_rate: Union[(FloatTensorLike, Callable)]=0.001, beta_1: FloatTensorLike=0.9, beta_2: FloatTensorLike=0.999, epsilon: FloatTensorLike=1e-07, weight_decay: FloatTensorLike=0.0, amsgrad: bool=False, sma_threshold: FloatTensorLike=5.0, total_steps: int=0, warmup_proportion: FloatTensorLike=0.1, min_lr: FloatTensorLike=0.0, name: str='RectifiedAdam', **kwargs): 'Construct a new RAdam optimizer.\n Args:\n learning_rate: A `Tensor` or a floating point value. or a schedule\n that is a `tf.keras.optimizers.schedules.LearningRateSchedule`\n The learning rate.\n beta_1: A float value or a constant float tensor.\n The exponential decay rate for the 1st moment estimates.\n beta_2: A float value or a constant float tensor.\n The exponential decay rate for the 2nd moment estimates.\n epsilon: A small constant for numerical stability.\n weight_decay: A floating point value. Weight decay for each param.\n amsgrad: boolean. Whether to apply AMSGrad variant of this\n algorithm from the paper "On the Convergence of Adam and\n beyond".\n sma_threshold. A float value.\n The threshold for simple mean average.\n total_steps: An integer. Total number of training steps.\n Enable warmup by setting a positive value.\n warmup_proportion: A floating point value.\n The proportion of increasing steps.\n min_lr: A floating point value. Minimum learning rate after warmup.\n name: Optional name for the operations created when applying\n gradients. Defaults to "RectifiedAdam".\n **kwargs: keyword arguments. Allowed to be {`clipnorm`,\n `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients\n by norm; `clipvalue` is clip gradients by value, `decay` is\n included for backward compatibility to allow time inverse\n decay of learning rate. `lr` is included for backward\n compatibility, recommended to use `learning_rate` instead.\n ' super().__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('beta_1', beta_1) self._set_hyper('beta_2', beta_2) self._set_hyper('decay', self._initial_decay) self._set_hyper('weight_decay', weight_decay) self._set_hyper('sma_threshold', sma_threshold) self._set_hyper('total_steps', float(total_steps)) self._set_hyper('warmup_proportion', warmup_proportion) self._set_hyper('min_lr', min_lr) self.epsilon = (epsilon or tf.keras.backend.epsilon()) self.amsgrad = amsgrad self._initial_weight_decay = weight_decay self._initial_total_steps = total_steps def _create_slots(self, var_list): for var in var_list: self.add_slot(var, 'm') for var in var_list: self.add_slot(var, 'v') if self.amsgrad: for var in var_list: self.add_slot(var, 'vhat') def set_weights(self, weights): params = self.weights num_vars = int(((len(params) - 1) / 2)) if (len(weights) == ((3 * num_vars) + 1)): weights = weights[:len(params)] super().set_weights(weights) def _resource_apply_dense(self, grad, var): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) m = self.get_slot(var, 'm') v = self.get_slot(var, 'v') beta_1_t = self._get_hyper('beta_1', var_dtype) beta_2_t = self._get_hyper('beta_2', var_dtype) epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype) local_step = tf.cast((self.iterations + 1), var_dtype) beta_1_power = tf.pow(beta_1_t, local_step) beta_2_power = tf.pow(beta_2_t, local_step) if (self._initial_total_steps > 0): total_steps = self._get_hyper('total_steps', var_dtype) warmup_steps = (total_steps * self._get_hyper('warmup_proportion', var_dtype)) min_lr = self._get_hyper('min_lr', var_dtype) decay_steps = tf.maximum((total_steps - warmup_steps), 1) decay_rate = ((min_lr - lr_t) / decay_steps) lr_t = tf.where((local_step <= warmup_steps), (lr_t * (local_step / warmup_steps)), (lr_t + (decay_rate * tf.minimum((local_step - warmup_steps), decay_steps)))) sma_inf = ((2.0 / (1.0 - beta_2_t)) - 1.0) sma_t = (sma_inf - (((2.0 * local_step) * beta_2_power) / (1.0 - beta_2_power))) m_t = m.assign(((beta_1_t * m) + ((1.0 - beta_1_t) * grad)), use_locking=self._use_locking) m_corr_t = (m_t / (1.0 - beta_1_power)) v_t = v.assign(((beta_2_t * v) + ((1.0 - beta_2_t) * tf.square(grad))), use_locking=self._use_locking) if self.amsgrad: vhat = self.get_slot(var, 'vhat') vhat_t = vhat.assign(tf.maximum(vhat, v_t), use_locking=self._use_locking) v_corr_t = tf.sqrt((vhat_t / (1.0 - beta_2_power))) else: vhat_t = None v_corr_t = tf.sqrt((v_t / (1.0 - beta_2_power))) r_t = tf.sqrt(((((((sma_t - 4.0) / (sma_inf - 4.0)) * (sma_t - 2.0)) / (sma_inf - 2.0)) * sma_inf) / sma_t)) sma_threshold = self._get_hyper('sma_threshold', var_dtype) var_t = tf.where((sma_t >= sma_threshold), ((r_t * m_corr_t) / (v_corr_t + epsilon_t)), m_corr_t) if (self._initial_weight_decay > 0.0): var_t += (self._get_hyper('weight_decay', var_dtype) * var) var_update = var.assign_sub((lr_t * var_t), use_locking=self._use_locking) updates = [var_update, m_t, v_t] if self.amsgrad: updates.append(vhat_t) return tf.group(*updates) def _resource_apply_sparse(self, grad, var, indices): var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) beta_1_t = self._get_hyper('beta_1', var_dtype) beta_2_t = self._get_hyper('beta_2', var_dtype) epsilon_t = tf.convert_to_tensor(self.epsilon, var_dtype) local_step = tf.cast((self.iterations + 1), var_dtype) beta_1_power = tf.pow(beta_1_t, local_step) beta_2_power = tf.pow(beta_2_t, local_step) if (self._initial_total_steps > 0): total_steps = self._get_hyper('total_steps', var_dtype) warmup_steps = (total_steps * self._get_hyper('warmup_proportion', var_dtype)) min_lr = self._get_hyper('min_lr', var_dtype) decay_steps = tf.maximum((total_steps - warmup_steps), 1) decay_rate = ((min_lr - lr_t) / decay_steps) lr_t = tf.where((local_step <= warmup_steps), (lr_t * (local_step / warmup_steps)), (lr_t + (decay_rate * tf.minimum((local_step - warmup_steps), decay_steps)))) sma_inf = ((2.0 / (1.0 - beta_2_t)) - 1.0) sma_t = (sma_inf - (((2.0 * local_step) * beta_2_power) / (1.0 - beta_2_power))) m = self.get_slot(var, 'm') m_scaled_g_values = (grad * (1 - beta_1_t)) m_t = m.assign((m * beta_1_t), use_locking=self._use_locking) with tf.control_dependencies([m_t]): m_t = self._resource_scatter_add(m, indices, m_scaled_g_values) m_corr_t = (m_t / (1.0 - beta_1_power)) v = self.get_slot(var, 'v') v_scaled_g_values = ((grad * grad) * (1 - beta_2_t)) v_t = v.assign((v * beta_2_t), use_locking=self._use_locking) with tf.control_dependencies([v_t]): v_t = self._resource_scatter_add(v, indices, v_scaled_g_values) if self.amsgrad: vhat = self.get_slot(var, 'vhat') vhat_t = vhat.assign(tf.maximum(vhat, v_t), use_locking=self._use_locking) v_corr_t = tf.sqrt((vhat_t / (1.0 - beta_2_power))) else: vhat_t = None v_corr_t = tf.sqrt((v_t / (1.0 - beta_2_power))) r_t = tf.sqrt(((((((sma_t - 4.0) / (sma_inf - 4.0)) * (sma_t - 2.0)) / (sma_inf - 2.0)) * sma_inf) / sma_t)) sma_threshold = self._get_hyper('sma_threshold', var_dtype) var_t = tf.where((sma_t >= sma_threshold), ((r_t * m_corr_t) / (v_corr_t + epsilon_t)), m_corr_t) if (self._initial_weight_decay > 0.0): var_t += (self._get_hyper('weight_decay', var_dtype) * var) with tf.control_dependencies([var_t]): var_update = self._resource_scatter_add(var, indices, tf.gather(((- lr_t) * var_t), indices)) updates = [var_update, m_t, v_t] if self.amsgrad: updates.append(vhat_t) return tf.group(*updates) def get_config(self): config = super().get_config() config.update({'learning_rate': self._serialize_hyperparameter('learning_rate'), 'beta_1': self._serialize_hyperparameter('beta_1'), 'beta_2': self._serialize_hyperparameter('beta_2'), 'decay': self._serialize_hyperparameter('decay'), 'weight_decay': self._serialize_hyperparameter('weight_decay'), 'sma_threshold': self._serialize_hyperparameter('sma_threshold'), 'epsilon': self.epsilon, 'amsgrad': self.amsgrad, 'total_steps': self._serialize_hyperparameter('total_steps'), 'warmup_proportion': self._serialize_hyperparameter('warmup_proportion'), 'min_lr': self._serialize_hyperparameter('min_lr')}) return config
class rec_optimizer(Optimizer): def __init__(self, layers=2, nodes=20): pass
def _solve(a, b, c): 'Return solution of a quadratic minimization.\n The optimization equation is:\n f(a, b, c) = argmin_w{1/2 * a * w^2 + b * w + c * |w|}\n we get optimal solution w*:\n w* = -(b - sign(b)*c)/a if |b| > c else w* = 0\n REQUIRES: Dimensionality of a and b must be same\n Args:\n a: A Tensor\n b: A Tensor\n c: A Tensor with one element.\n Returns:\n A Tensor w, which is solution for the equation\n ' w = (((c * tf.sign(b)) - b) / a) w = (tf.cast((tf.abs(b) > c), dtype=b.dtype) * w) return w
class Yogi(tf.keras.optimizers.Optimizer): 'Optimizer that implements the Yogi algorithm in Keras.\n See Algorithm 2 of\n https://papers.nips.cc/paper/8186-adaptive-methods-for-nonconvex-optimization.pdf.\n ' @typechecked def __init__(self, learning_rate: Union[(FloatTensorLike, Callable)]=0.01, beta1: FloatTensorLike=0.9, beta2: FloatTensorLike=0.999, epsilon: FloatTensorLike=0.001, l1_regularization_strength: FloatTensorLike=0.0, l2_regularization_strength: FloatTensorLike=0.0, initial_accumulator_value: FloatTensorLike=1e-06, activation: str='sign', name: str='Yogi', **kwargs): 'Construct a new Yogi optimizer.\n Args:\n learning_rate: A Tensor or a floating point value.\n The learning rate.\n beta1: A float value or a constant float tensor.\n The exponential decay rate for the 1st moment estimates.\n beta2: A float value or a constant float tensor.\n The exponential decay rate for the 2nd moment estimates.\n epsilon: A constant trading off adaptivity and noise.\n l1_regularization_strength: A float value, must be greater than or\n equal to zero.\n l2_regularization_strength: A float value, must be greater than or\n equal to zero.\n initial_accumulator_value: The starting value for accumulators.\n Only positive values are allowed.\n activation: Use hard sign or soft tanh to determin sign.\n name: Optional name for the operations created when applying\n gradients. Defaults to "Yogi".\n **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`,\n `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue`\n is clip gradients by value, `decay` is included for backward\n compatibility to allow time inverse decay of learning rate. `lr`\n is included for backward compatibility, recommended to use\n `learning_rate` instead.\n ' super().__init__(name, **kwargs) self._set_hyper('learning_rate', kwargs.get('lr', learning_rate)) self._set_hyper('decay', self._initial_decay) self._set_hyper('beta_1', beta1) self._set_hyper('beta_2', beta2) self._set_hyper('epsilon', epsilon) self._set_hyper('l1_regularization_strength', l1_regularization_strength) self._set_hyper('l2_regularization_strength', l2_regularization_strength) self._beta1 = beta1 self._activation = activation self._initial_accumulator_value = initial_accumulator_value self._l1_regularization_strength = l1_regularization_strength self._l2_regularization_strength = l2_regularization_strength def _create_slots(self, var_list): 'See `tf.train.Optimizer._create_slots()`.' for var in var_list: init = tf.constant_initializer(self._initial_accumulator_value) self.add_slot(var, 'v', init) if (self._beta1 > 0.0): self.add_slot(var, 'm') def _resource_apply_dense(self, grad, var): 'See `tf.train.Optimizer._apply_dense()`.' var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) beta1_t = self._get_hyper('beta_1', var_dtype) beta2_t = self._get_hyper('beta_2', var_dtype) epsilon_t = self._get_hyper('epsilon', var_dtype) l1_t = self._get_hyper('l1_regularization_strength', var_dtype) l2_t = self._get_hyper('l2_regularization_strength', var_dtype) local_step = tf.cast((self.iterations + 1), var_dtype) beta1_power = tf.pow(beta1_t, local_step) beta2_power = tf.pow(beta2_t, local_step) lr = ((lr_t * tf.sqrt((1 - beta2_power))) / (1 - beta1_power)) update_vs = [] if (self._beta1 == 0.0): v = self.get_slot(var, 'v') grad2 = (grad * grad) if (self._activation == 'sign'): sign = tf.sign((grad2 - v)) elif (self._activation == 'tanh'): sign = tf.tanh((10 * (grad2 - v))) else: raise NotImplementedError('Activation function can be sign or tanh') v_t = v.assign_add((((1 - beta2_t) * sign) * grad2), use_locking=self._use_locking) v_sqrt = tf.sqrt(v_t) per_coord_lr = (lr / (v_sqrt + epsilon_t)) new_var = (var - (per_coord_lr * grad)) if (self._l1_regularization_strength > 0): new_var = _solve((1 + (l2_t * per_coord_lr)), (- new_var), (l1_t * per_coord_lr)) elif (self._l2_regularization_strength > 0): new_var = (new_var / (1 + (l2_t * per_coord_lr))) var_update = var.assign(new_var, use_locking=self._use_locking) update_vs.append(var_update) update_vs.append(v_t) else: m = self.get_slot(var, 'm') m_t = m.assign(((m * beta1_t) + (grad * (1 - beta1_t))), use_locking=self._use_locking) v = self.get_slot(var, 'v') grad2 = (grad * grad) if (self._activation == 'sign'): sign = tf.sign((grad2 - v)) elif (self._activation == 'tanh'): sign = tf.tanh((10 * (grad2 - v))) else: raise NotImplementedError('Activation function can be sign or tanh') v_t = v.assign_add((((1 - beta2_t) * sign) * grad2), use_locking=self._use_locking) v_sqrt = tf.sqrt(v_t) per_coord_lr = (lr / (v_sqrt + epsilon_t)) new_var = (var - (per_coord_lr * m_t)) if (self._l1_regularization_strength > 0): new_var = _solve((1 + (l2_t * per_coord_lr)), (- new_var), (l1_t * per_coord_lr)) elif (self._l2_regularization_strength > 0): new_var = (new_var / (1 + (l2_t * per_coord_lr))) var_update = var.assign(new_var, use_locking=self._use_locking) update_vs.append(var_update) update_vs.append(m_t) update_vs.append(v_t) return tf.group(*update_vs) def _resource_apply_sparse(self, grad, var, indices): 'Applies sparse gradients to a variable.\n Args:\n grad: A tensor for the `values` of `tf.IndexedSlices`.\n var: A `tf.Variable` object.\n indices: A tensor for the `indices` of `tf.IndexedSlices`.\n Returns:\n An op which updates `var` with `grad` and `indices`.\n ' var_dtype = var.dtype.base_dtype lr_t = self._decayed_lr(var_dtype) beta1_t = self._get_hyper('beta_1', var_dtype) beta2_t = self._get_hyper('beta_2', var_dtype) epsilon_t = self._get_hyper('epsilon', var_dtype) l1_t = self._get_hyper('l1_regularization_strength', var_dtype) l2_t = self._get_hyper('l2_regularization_strength', var_dtype) local_step = tf.cast((self.iterations + 1), var_dtype) beta1_power = tf.pow(beta1_t, local_step) beta2_power = tf.pow(beta2_t, local_step) lr = ((lr_t * tf.sqrt((1 - beta2_power))) / (1 - beta1_power)) update_vs = [] if (self._beta1 == 0.0): v = self.get_slot(var, 'v') grad2 = (grad * grad) v_slice = tf.gather(v, indices) if (self._activation == 'sign'): sign = tf.sign((grad2 - v_slice)) elif (self._activation == 'tanh'): sign = tf.tanh((10 * (grad2 - v_slice))) else: raise NotImplementedError('Activation function can be sign or tanh') v_scaled_g_values = (v_slice + (((1 - beta2_t) * sign) * grad2)) v_t = self._resource_scatter_update(v, indices, v_scaled_g_values) v_sqrt = tf.sqrt(v_scaled_g_values) per_coord_lr = (lr / (v_sqrt + epsilon_t)) var_slice = tf.gather(var, indices) new_var = (var_slice - (per_coord_lr * grad)) if (self._l1_regularization_strength > 0): new_var = _solve((1 + (l2_t * per_coord_lr)), (- new_var), (l1_t * per_coord_lr)) elif (self._l2_regularization_strength > 0): new_var = (new_var / (1 + (l2_t * per_coord_lr))) var_update = self._resource_scatter_update(var, indices, new_var) update_vs.append(var_update) update_vs.append(v_t) else: m = self.get_slot(var, 'm') m_scaled_g_values = (grad * (1 - beta1_t)) m_t = m.assign((m * beta1_t), use_locking=self._use_locking) with tf.control_dependencies([m_t]): m_slice = (tf.gather(m, indices) + m_scaled_g_values) m_t = self._resource_scatter_update(m, indices, m_slice) v = self.get_slot(var, 'v') grad2 = (grad * grad) v_slice = tf.gather(v, indices) if (self._activation == 'sign'): sign = tf.sign((grad2 - tf.gather(v, indices))) elif (self._activation == 'tanh'): sign = tf.tanh((10 * (grad2 - tf.gather(v, indices)))) else: raise NotImplementedError('Activation function can be sign or tanh') v_scaled_g_values = (v_slice + (((1 - beta2_t) * sign) * grad2)) v_t = self._resource_scatter_update(v, indices, v_scaled_g_values) v_sqrt = tf.sqrt(v_scaled_g_values) per_coord_lr = (lr / (v_sqrt + epsilon_t)) var_slice = tf.gather(var, indices) new_var = (var_slice - (per_coord_lr * m_slice)) if (self._l1_regularization_strength > 0): new_var = _solve((1 + (l2_t * per_coord_lr)), (- new_var), (l1_t * per_coord_lr)) elif (self._l2_regularization_strength > 0): new_var = (new_var / (1 + (l2_t * per_coord_lr))) var_update = self._resource_scatter_update(var, indices, new_var) update_vs.append(var_update) update_vs.append(m_t) update_vs.append(v_t) return tf.group(*update_vs) def get_config(self): config = super().get_config() config.update({'learning_rate': self._serialize_hyperparameter('learning_rate'), 'decay': self._serialize_hyperparameter('decay'), 'beta1': self._serialize_hyperparameter('beta_1'), 'beta2': self._serialize_hyperparameter('beta_2'), 'epsilon': self._serialize_hyperparameter('epsilon'), 'l1_t': self._serialize_hyperparameter('l1_regularization_strength'), 'l2_t': self._serialize_hyperparameter('l2_regularization_strength'), 'activation': self._activation, 'initial_accumulator_value': self._initial_accumulator_value}) return config
def get_flops(): run_meta = tf.RunMetadata() opts = tf.profiler.ProfileOptionBuilder.float_operation() flops = tf.profiler.profile(graph=K.get_session().graph, run_meta=run_meta, cmd='op', options=opts) return flops.total_float_ops
def attention_simple(inputs, timesteps): input_dim = int(inputs.shape[(- 1)]) a = Permute((2, 1), name='transpose')(inputs) a = Dense(timesteps, activation='softmax', name='attention_probs')(a) a_probs = Permute((2, 1), name='attention_vec')(a) output_attention_mul = Multiply(name='focused_attention')([inputs, a_probs]) output_flat = Lambda((lambda x: K.sum(x, axis=1)), name='temporal_average')(output_attention_mul) return (output_flat, a_probs)
def dense_model(timesteps, n_class, n_features, classifier_architecture, dropout): inputs = Input((timesteps, n_features)) x = BatchNormalization(axis=1, momentum=0.99)(inputs) x = Dense(128, activation=Mish())(x) x = LayerNormalization()(x) (x, a) = attention_simple(x, timesteps) for (d, dr) in zip(classifier_architecture, dropout): x = Dropout(dr)(x) x = Dense(d, activation=Mish())(x) x = LayerNormalization()(x) outputs = Dense(n_class, activation='softmax')(x) model = Model(inputs, outputs) return model
class ContactGenerator(Sequence): def __init__(self, pdb_id_list, features_path, distmap_path, dim, pad_size, batch_size, expected_n_channels): self.pdb_id_list = pdb_id_list self.features_path = features_path self.dim = dim self.pad_size = pad_size self.distmap_path = distmap_path self.batch_size = batch_size self.expected_n_channels = expected_n_channels def on_epoch_begin(self): self.indexes = np.arange(len(self.pdb_id_list)) np.random.shuffle(self.indexes) def __len__(self): return int((len(self.pdb_id_list) / self.batch_size)) def __getitem__(self, index): batch_list = self.pdb_id_list[(index * self.batch_size):((index + 1) * self.batch_size)] (X, Y) = get_input_output_dist(batch_list, self.features_path, self.distmap_path, self.pad_size, self.dim, self.expected_n_channels) Y[(Y < 8.0)] = 1.0 Y[(Y >= 8.0)] = 0.0 return (X, Y)
class BinnedDistGenerator(Sequence): def __init__(self, pdb_id_list, features_path, distmap_path, bins, dim, pad_size, batch_size, expected_n_channels): self.pdb_id_list = pdb_id_list self.features_path = features_path self.dim = dim self.pad_size = pad_size self.distmap_path = distmap_path self.bins = bins self.batch_size = batch_size self.expected_n_channels = expected_n_channels def on_epoch_begin(self): self.indexes = np.arange(len(self.pdb_id_list)) np.random.shuffle(self.indexes) def __len__(self): return int((len(self.pdb_id_list) / self.batch_size)) def __getitem__(self, index): batch_list = self.pdb_id_list[(index * self.batch_size):((index + 1) * self.batch_size)] (X, Y) = get_input_output_bins(batch_list, self.features_path, self.distmap_path, self.pad_size, self.dim, self.expected_n_channels, self.bins) return (X, Y)
class DistGenerator(Sequence): def __init__(self, pdb_id_list, features_path, distmap_path, dim, pad_size, batch_size, expected_n_channels, label_engineering=None): self.pdb_id_list = pdb_id_list self.features_path = features_path self.distmap_path = distmap_path self.dim = dim self.pad_size = pad_size self.batch_size = batch_size self.expected_n_channels = expected_n_channels self.label_engineering = label_engineering def on_epoch_begin(self): self.indexes = np.arange(len(self.pdb_id_list)) np.random.shuffle(self.indexes) def __len__(self): return int((len(self.pdb_id_list) / self.batch_size)) def __getitem__(self, index): batch_list = self.pdb_id_list[(index * self.batch_size):((index + 1) * self.batch_size)] (X, Y) = get_input_output_dist(batch_list, self.features_path, self.distmap_path, self.pad_size, self.dim, self.expected_n_channels) if (self.label_engineering is None): return (X, Y) if (self.label_engineering == '100/d'): return (X, (100.0 / Y)) try: t = float(self.label_engineering) Y[(Y > t)] = t except ValueError: print('ERROR!! Unknown label_engineering parameter!!') return return (X, Y)
def inv_log_cosh(y_true, y_pred): y_pred = ops.convert_to_tensor(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) def _logcosh(x): return ((x + nn.softplus(((- 2.0) * x))) - math_ops.log(2.0)) return K.mean(_logcosh(((100.0 / (y_pred + epsilon)) - (100.0 / (y_true + epsilon)))), axis=(- 1))
def basic_fcn(L, num_blocks, width, expected_n_channels): input_shape = (L, L, expected_n_channels) img_input = layers.Input(shape=input_shape) x = img_input for i in range(num_blocks): x = layers.Conv2D(width, (3, 3), padding='same')(x) x = layers.BatchNormalization()(x) x = layers.Activation('relu')(x) x = layers.Conv2D(1, (3, 3), padding='same', kernel_initializer='one')(x) x = layers.Activation('relu')(x) inputs = img_input model = tf.keras.models.Model(inputs, x, name='fcn') return model
def deepcon_rdd(L, num_blocks, width, expected_n_channels): print('') print('Model params:') print('L', L) print('num_blocks', num_blocks) print('width', width) print('expected_n_channels', expected_n_channels) print('') dropout_value = 0.3 my_input = Input(shape=(L, L, expected_n_channels)) tower = BatchNormalization()(my_input) tower = Activation('relu')(tower) tower = Convolution2D(width, 1, padding='same')(tower) n_channels = width d_rate = 1 for i in range(num_blocks): block = BatchNormalization()(tower) block = Activation('relu')(block) block = Convolution2D(n_channels, kernel_size=(3, 3), padding='same')(block) block = Dropout(dropout_value)(block) block = Activation('relu')(block) block = Convolution2D(n_channels, kernel_size=(3, 3), dilation_rate=(d_rate, d_rate), padding='same')(block) tower = add([block, tower]) if (d_rate == 1): d_rate = 2 elif (d_rate == 2): d_rate = 4 else: d_rate = 1 tower = BatchNormalization()(tower) tower = Activation('relu')(tower) tower = Convolution2D(1, 3, padding='same')(tower) tower = Activation('sigmoid')(tower) model = Model(my_input, tower) return model
def deepcon_rdd_distances(L, num_blocks, width, expected_n_channels): print('') print('Model params:') print('L', L) print('num_blocks', num_blocks) print('width', width) print('expected_n_channels', expected_n_channels) print('') dropout_value = 0.3 my_input = Input(shape=(L, L, expected_n_channels)) tower = BatchNormalization()(my_input) tower = Activation('relu')(tower) tower = Convolution2D(width, 1, padding='same')(tower) n_channels = width d_rate = 1 for i in range(num_blocks): block = BatchNormalization()(tower) block = Activation('relu')(block) block = Convolution2D(n_channels, kernel_size=(3, 3), padding='same')(block) block = Dropout(dropout_value)(block) block = Activation('relu')(block) block = Convolution2D(n_channels, kernel_size=(3, 3), dilation_rate=(d_rate, d_rate), padding='same')(block) tower = add([block, tower]) if (d_rate == 1): d_rate = 2 elif (d_rate == 2): d_rate = 4 else: d_rate = 1 tower = BatchNormalization()(tower) tower = Activation('relu')(tower) tower = Convolution2D(1, 3, padding='same')(tower) tower = Activation('relu')(tower) model = Model(my_input, tower) return model
def deepcon_rdd_binned(L, num_blocks, width, bins, expected_n_channels): print('') print('Model params:') print('L', L) print('num_blocks', num_blocks) print('width', width) print('expected_n_channels', expected_n_channels) print('') dropout_value = 0.3 my_input = Input(shape=(L, L, expected_n_channels)) tower = BatchNormalization()(my_input) tower = Activation('relu')(tower) tower = Convolution2D(width, 1, padding='same')(tower) n_channels = width d_rate = 1 for i in range(num_blocks): block = BatchNormalization()(tower) block = Activation('relu')(block) block = Convolution2D(n_channels, kernel_size=(3, 3), padding='same')(block) block = Dropout(dropout_value)(block) block = Activation('relu')(block) block = Convolution2D(n_channels, kernel_size=(3, 3), dilation_rate=(d_rate, d_rate), padding='same')(block) tower = add([block, tower]) if (d_rate == 1): d_rate = 2 elif (d_rate == 2): d_rate = 4 else: d_rate = 1 tower = BatchNormalization()(tower) tower = Activation('relu')(tower) tower = Convolution2D(bins, 3, padding='same')(tower) tower = Activation('softmax')(tower) model = Model(my_input, tower) return model
def load_satellite_data(path): train_file = os.path.join(path, 'satellite_train.npy') test_file = os.path.join(path, 'satellite_test.npy') (all_train_data, all_train_labels) = (np.load(train_file, allow_pickle=True)[()]['data'], np.load(train_file, allow_pickle=True)[()]['label']) (test_data, test_labels) = (np.load(test_file, allow_pickle=True)[()]['data'], np.load(test_file, allow_pickle=True)[()]['label']) all_train_labels = (all_train_labels - 1) test_labels = (test_labels - 1) all_train_data = ((all_train_data - all_train_data.mean(axis=1, keepdims=True)) / all_train_data.std(axis=1, keepdims=True)) test_data = ((test_data - test_data.mean(axis=1, keepdims=True)) / test_data.std(axis=1, keepdims=True)) (all_train_tensors, all_train_labeltensor) = (torch.from_numpy(all_train_data).type(torch.FloatTensor), torch.from_numpy(all_train_labels).type(torch.LongTensor)) (test_tensors, test_labeltensor) = (torch.from_numpy(test_data).type(torch.FloatTensor), torch.from_numpy(test_labels).type(torch.LongTensor)) testset = data_utils.TensorDataset(test_tensors, test_labeltensor) trainset = data_utils.TensorDataset(all_train_tensors, all_train_labeltensor) return (trainset, None, testset)
class NeuralNet(nn.Module): def __init__(self, input_size, hidden_size_1, hidden_size_2, num_classes): super(NeuralNet, self).__init__() self.fc1 = nn.Linear(input_size, hidden_size_1) self.relu = nn.ReLU() self.fc2 = nn.Linear(hidden_size_1, hidden_size_2) self.fc3 = nn.Linear(hidden_size_2, num_classes) def forward(self, x): out = self.fc1(x) out = self.relu(out) out = self.fc2(out) out = self.relu(out) out = self.fc3(out) return out
def train(X, Y, X_validation, Y_validation, kernels, num_features, num_classes, minibatch_size=256, max_epochs=100, patience=2, tranche_size=(2 ** 11), cache_size=(2 ** 14)): def init(layer): if isinstance(layer, nn.Linear): nn.init.constant_(layer.weight.data, 0) nn.init.constant_(layer.bias.data, 0) model = nn.Sequential(nn.Linear(num_features, num_classes)) loss_function = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters()) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5, min_lr=1e-08) model.apply(init) minibatch_count = 0 best_validation_loss = np.inf stall_count = 0 stop = False num_examples = len(X) num_tranches = np.int(np.ceil((num_examples / tranche_size))) cache = np.zeros((min(cache_size, num_examples), num_features)) cache_count = 0 for epoch in range(max_epochs): if ((epoch > 0) and stop): break for tranche_index in range(num_tranches): if ((epoch > 0) and stop): break a = (tranche_size * tranche_index) b = (a + tranche_size) Y_tranche = Y[a:b] if (b <= cache_count): X_tranche_transform = cache[a:b] else: X_tranche = X[a:b] X_tranche = ((X_tranche - X_tranche.mean(axis=1, keepdims=True)) / X_tranche.std(axis=1, keepdims=True)) X_tranche_transform = apply_kernels(X_tranche, kernels) if ((epoch == 0) and (tranche_index == 0)): f_mean = X_tranche_transform.mean(0) f_std = (X_tranche_transform.std(0) + 1e-08) X_validation = ((X_validation - X_validation.mean(axis=1, keepdims=True)) / X_validation.std(axis=1, keepdims=True)) X_validation_transform = apply_kernels(X_validation, kernels) X_validation_transform = ((X_validation_transform - f_mean) / f_std) X_validation_transform = torch.FloatTensor(X_validation_transform) Y_validation = torch.LongTensor(Y_validation) X_tranche_transform = ((X_tranche_transform - f_mean) / f_std) if (b <= cache_size): cache[a:b] = X_tranche_transform cache_count = b X_tranche_transform = torch.FloatTensor(X_tranche_transform) Y_tranche = torch.LongTensor(Y_tranche) minibatches = torch.randperm(len(X_tranche_transform)).split(minibatch_size) for (minibatch_index, minibatch) in enumerate(minibatches): if ((epoch > 0) and stop): break if ((minibatch_index > 0) and (len(minibatch) < minibatch_size)): break if ((epoch == 0) and (tranche_index == 0) and (minibatch_index == 0)): candidate_lr = (10 ** np.linspace((- 1), (- 6), 6)) best_lr = None best_training_loss = np.inf for lr in candidate_lr: lr_model = nn.Sequential(nn.Linear(num_features, num_classes)) lr_optimizer = optim.Adam(lr_model.parameters()) lr_model.apply(init) for param_group in lr_optimizer.param_groups: param_group['lr'] = lr lr_optimizer.zero_grad() Y_tranche_predictions = lr_model(X_tranche_transform[minibatch]) training_loss = loss_function(Y_tranche_predictions, Y_tranche[minibatch]) training_loss.backward() lr_optimizer.step() Y_tranche_predictions = lr_model(X_tranche_transform) training_loss = loss_function(Y_tranche_predictions, Y_tranche).item() if (training_loss < best_training_loss): best_training_loss = training_loss best_lr = lr for param_group in optimizer.param_groups: param_group['lr'] = best_lr optimizer.zero_grad() Y_tranche_predictions = model(X_tranche_transform[minibatch]) training_loss = loss_function(Y_tranche_predictions, Y_tranche[minibatch]) training_loss.backward() optimizer.step() minibatch_count += 1 if ((minibatch_count % 10) == 0): Y_validation_predictions = model(X_validation_transform) validation_loss = loss_function(Y_validation_predictions, Y_validation) scheduler.step(validation_loss) if (validation_loss.item() >= best_validation_loss): stall_count += 1 if (stall_count >= patience): stop = True else: best_validation_loss = validation_loss.item() if (not stop): stall_count = 0 return (model, f_mean, f_std)
def eval_batch_mlp(mlp, data, batch_idxs, criterion, device_id=0): ' evaluate a batch for the baseline mlp ' atom_types = to_one_hot(data['features']['atom_types'][(batch_idxs, ...)], NUM_ATOM_TYPES) targets = data['targets'][(batch_idxs, ...)] atom_types = Variable(atom_types) targets = Variable(targets) if torch.cuda.is_available(): atom_types = atom_types.cuda(device_id) targets = targets.cuda(device_id) outputs = mlp(atom_types) loss = criterion(outputs, targets) return loss
def eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion, device_id=0): ' evaluate a batch for the s2cnn ' geometry = data['features']['geometry'][(batch_idxs, ...)] atom_types = data['features']['atom_types'][(batch_idxs, ...)] atom_types_one_hot = to_one_hot(atom_types, NUM_ATOM_TYPES) targets = data['targets'][(batch_idxs, ...)] geometry = Variable(geometry) atom_types = Variable(atom_types) atom_types_one_hot = Variable(atom_types_one_hot) targets = Variable(targets) if torch.cuda.is_available(): atom_types_one_hot = atom_types_one_hot.cuda(device_id) geometry = geometry.cuda(device_id) atom_types = atom_types.cuda(device_id) targets = targets.cuda(device_id) outputs = mlp(atom_types_one_hot) outputs += s2cnn(geometry, atom_types) loss = criterion(outputs, targets) return loss
def train_baseline(mlp, data, train_batches, test_batches, num_epochs, learning_rate_mlp, device_id=0): ' train the baseline model ' optim = OPTIMIZER(mlp.parameters(), lr=learning_rate_mlp) criterion = nn.MSELoss() if torch.cuda.is_available(): criterion = criterion.cuda(device_id) for epoch in range(num_epochs): train_losses = [] print('training') for (iteration, batch_idxs) in enumerate(train_batches): mlp.train() optim.zero_grad() loss = eval_batch_mlp(mlp, data, batch_idxs, criterion, device_id) loss.backward() optim.step() train_losses.append(loss.item()) print('\riteration {}/{}'.format((iteration + 1), train_batches.num_iterations()), end='') print() test_losses = [] print('evaluating') for (iteration, batch_idxs) in enumerate(test_batches): mlp.eval() loss = eval_batch_mlp(mlp, data, batch_idxs, criterion) test_losses.append(loss.item()) print('\riteration {}/{}'.format((iteration + 1), test_batches.num_iterations()), end='') print() train_loss = np.sqrt(np.mean(train_losses)) test_loss = np.sqrt(np.mean(test_losses)) print('epoch {}/{} - avg train loss: {}, test loss: {}'.format((epoch + 1), num_epochs, train_loss, test_loss)) return (train_loss, test_loss)
def train_s2cnn(mlp, s2cnn, data, train_batches, test_batches, num_epochs, init_learning_rate_s2cnn, learning_rate_decay_epochs, device_id=0): ' train the s2cnn keeping the baseline frozen ' optim = OPTIMIZER(s2cnn.parameters(), lr=init_learning_rate_s2cnn) criterion = nn.MSELoss() if torch.cuda.is_available(): criterion = criterion.cuda(device_id) for epoch in range(num_epochs): optim = exp_lr_scheduler(optim, epoch, init_lr=init_learning_rate_s2cnn, lr_decay_epoch=learning_rate_decay_epochs) train_losses = [] print('training') for (iteration, batch_idxs) in enumerate(train_batches): s2cnn.train() mlp.eval() optim.zero_grad() loss = eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion) loss.backward() optim.step() train_losses.append(loss.item()) print('\riteration {}/{} - batch loss: {}'.format((iteration + 1), train_batches.num_iterations(), np.sqrt(train_losses[(- 1)])), end='') print() test_losses = [] print('evaluating') for (iteration, batch_idxs) in enumerate(test_batches): s2cnn.eval() mlp.eval() loss = eval_batch_s2cnn(mlp, s2cnn, data, batch_idxs, criterion) test_losses.append(loss.item()) print('\riteration {}/{} - batch loss: {}'.format((iteration + 1), test_batches.num_iterations(), np.sqrt(test_losses[(- 1)])), end='') print() train_loss = np.sqrt(np.mean(train_losses)) test_loss = np.sqrt(np.mean(test_losses)) print('epoch {}/{} - avg train loss: {}, test loss: {}'.format((epoch + 1), num_epochs, train_loss, test_loss)) return (train_loss, test_loss)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--data_path', type=str, default='data.joblib') parser.add_argument('--test_strat', type=int, default=0) parser.add_argument('--device_id', type=int, default=0) parser.add_argument('--num_epochs_s2cnn', type=int, default=30) parser.add_argument('--num_epochs_mlp', type=int, default=30) parser.add_argument('--batch_size_s2cnn', type=int, default=32) parser.add_argument('--batch_size_mlp', type=int, default=32) parser.add_argument('--init_learning_rate_s2cnn', type=int, default=0.001) parser.add_argument('--learning_rate_mlp', type=int, default=0.001) parser.add_argument('--learning_rate_decay_epochs', type=int, default=10) args = parser.parse_args() torch.cuda.set_device(args.device_id) print('evaluating on {}'.format(args.test_strat)) print('loading data...', end='') (data, train_idxs, test_idxs) = load_data(args.data_path, args.test_strat, cuda=args.device_id) print('done!') mlp = BaselineRegressor() s2cnn = S2CNNRegressor() if torch.cuda.is_available(): for model in [mlp, s2cnn]: model.cuda(args.device_id) print('training baseline model') print('mlp #params: {}'.format(count_params(mlp))) train_baseline(mlp, data, IndexBatcher(train_idxs, args.batch_size_mlp, cuda=args.device_id), IndexBatcher(test_idxs, args.batch_size_mlp, cuda=args.device_id), args.num_epochs_mlp, args.learning_rate_mlp, args.device_id) print('training residual s2cnn model') print('s2cnn #params: {}'.format(count_params(s2cnn))) train_s2cnn(mlp, s2cnn, data, IndexBatcher(train_idxs, args.batch_size_s2cnn, cuda=args.device_id), IndexBatcher(test_idxs, args.batch_size_s2cnn, cuda=args.device_id), args.num_epochs_s2cnn, args.init_learning_rate_s2cnn, args.learning_rate_decay_epochs, args.device_id)
class S2Block(nn.Module): ' simple s2 convolution block ' def __init__(self, b_in, b_out, f_in, f_out): ' b_in/b_out: bandwidth of input/output signals\n f_in/f_out: filters in input/output signals ' super(S2Block, self).__init__() self.grid_s2 = s2_near_identity_grid(n_alpha=(2 * b_in), n_beta=2) self.cnn = S2Convolution(nfeature_in=f_in, nfeature_out=f_out, b_in=b_in, b_out=b_out, grid=self.grid_s2) self.bn = nn.BatchNorm3d(f_out, affine=AFFINE) def forward(self, x): x = self.cnn(x) x = self.bn(x) x = nonlinearity(x) return x
class So3Block(nn.Module): ' simple so3 convolution block ' def __init__(self, b_in, b_out, f_in, f_out): ' b_in/b_out: bandwidth of input/output signals\n f_in/f_out: filters in input/output signals ' super(So3Block, self).__init__() self.grid_so3 = so3_near_identity_grid(n_alpha=(2 * b_in), n_beta=2, n_gamma=2) self.cnn = SO3Convolution(nfeature_in=f_in, nfeature_out=f_out, b_in=b_in, b_out=b_out, grid=self.grid_so3) self.bn = nn.BatchNorm3d(f_out, affine=AFFINE) def forward(self, x): x = self.cnn(x) x = self.bn(x) x = nonlinearity(x) return x
class DeepSet(nn.Module): ' deep set block ' def __init__(self, f, h1, h_latent, h2, n_objs): ' f: input filters\n h1, h2: hidden units for encoder/decoder mlps\n h_latent: dimensions\n n_objs: of objects to aggregate in latent space ' super(DeepSet, self).__init__() self.f = f self.h1 = h1 self.h3 = h2 self.n_objs = n_objs self.emb_h = nn.Linear(f, h1) self.emb_rep = nn.Linear(h1, h_latent) self.proj_h = nn.Linear(h_latent, h2) self.proj = nn.Linear(h2, 1) self.bn1 = nn.BatchNorm1d(h1, affine=AFFINE) self.bn2 = nn.BatchNorm1d(h_latent, affine=AFFINE) self.bn3 = nn.BatchNorm1d(h2, affine=AFFINE) def forward(self, x, mask): x = self.emb_h(x) x = self.bn1(x) x = nonlinearity(x) x = self.emb_rep(x) x = self.bn2(x) x = nonlinearity(x) (n, h_latent) = x.size() x = x.view((n // self.n_objs), self.n_objs, h_latent) x = torch.sum((x * mask), dim=1) x = self.proj_h(x) x = self.bn3(x) x = nonlinearity(x) x = self.proj(x) return x
class S2CNNRegressor(nn.Module): ' approximate energy using spherical representations ' def __init__(self): super(S2CNNRegressor, self).__init__() n_objs = 23 self.blocks = [S2Block(b_in=10, f_in=5, b_out=8, f_out=8), So3Block(b_in=8, b_out=6, f_in=8, f_out=16), So3Block(b_in=6, b_out=4, f_in=16, f_out=32), So3Block(b_in=4, b_out=2, f_in=32, f_out=64)] for (i, block) in enumerate(self.blocks): setattr(self, 'block{0}'.format(i), block) self.ds = DeepSet(64, 256, 64, 512, n_objs) def forward(self, x, atom_types): (n_batch, n_atoms, n_features, bandwidth, _) = x.size() mask = (atom_types > 0).view(n_batch, n_atoms, 1).float() x = x.view((n_batch * n_atoms), n_features, bandwidth, bandwidth) for block in self.blocks: x = block(x) x = so3_integrate(x) y = self.ds(x, mask) return y
class IndexBatcher(): def __init__(self, indices, n_batch, cuda=None): self.indices = indices.astype(np.int64) self.n_batch = n_batch self.pos = 0 self.cuda = cuda self.internal_indices = np.arange(len(indices)).astype(np.int64) np.random.shuffle(self.internal_indices) def __iter__(self): return self def reset(self): self.pos = 0 np.random.shuffle(self.internal_indices) def __next__(self): start = self.pos end = np.minimum((self.pos + self.n_batch), len(self.indices)) self.pos += self.n_batch if (self.pos >= len(self.indices)): self.reset() raise StopIteration tensor = torch.LongTensor(self.indices[self.internal_indices[start:end]]) if (self.cuda is not None): tensor.cuda(self.cuda) return tensor def num_iterations(self): return (len(self.indices) // self.n_batch) next = __next__
def to_one_hot(x, n): x_ = torch.unsqueeze(x, 2) dims = (*x.size(), n) one_hot = torch.FloatTensor(*dims).zero_() one_hot.scatter_(2, x_, 1) return one_hot
def load_data(path, test_strat_id=None, cuda=None): '\n Loads the data\n\n path: path to the molecule .gz\n batch_size: size of a mini batch\n test_strat_id: id of strat being used as test set\n ' data = joblib.load(path) type_remap = (- np.ones((int(data['features']['atom_types'].max()) + 1))) unique_types = np.unique(data['features']['atom_types']).astype(int) type_remap[unique_types] = np.arange(len(unique_types)) data['features']['atom_types'] = type_remap[data['features']['atom_types'].astype(int)] data['features']['geometry'] = torch.FloatTensor(data['features']['geometry'].astype(np.float32)) data['features']['atom_types'] = torch.LongTensor(data['features']['atom_types'].astype(np.int64)) data['targets'] = torch.from_numpy(data['targets']) if (cuda is not None): data['features']['geometry'].cuda(cuda) data['features']['atom_types'].cuda(cuda) data['targets'].cuda(cuda) train = np.ndarray(0) test = np.ndarray(0) if (not test_strat_id): test_strat_id = np.random.randint(len(data['strats'])) for i in range(len(data['strats'])): if (i != test_strat_id): train = np.concatenate((train, data['strats'][i])) else: test = np.concatenate((test, data['strats'][i])) return (data, train, test)
def exp_lr_scheduler(optimizer, epoch, init_lr=0.005, lr_decay_epoch=40): 'Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs.' lr = (init_lr * (0.1 ** (epoch // lr_decay_epoch))) if ((epoch % lr_decay_epoch) == 0): print('LR is set to {}'.format(lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr return optimizer
def count_params(model): return sum([np.prod(p.size()) for p in model.parameters() if p.requires_grad])
class Model(nn.Module): def __init__(self, nclasses): super().__init__() self.features = [6, 100, 100, nclasses] self.bandwidths = [64, 16, 10] assert (len(self.bandwidths) == (len(self.features) - 1)) sequence = [] grid = s2_equatorial_grid(max_beta=0, n_alpha=(2 * self.bandwidths[0]), n_beta=1) sequence.append(S2Convolution(self.features[0], self.features[1], self.bandwidths[0], self.bandwidths[1], grid)) for l in range(1, (len(self.features) - 2)): nfeature_in = self.features[l] nfeature_out = self.features[(l + 1)] b_in = self.bandwidths[l] b_out = self.bandwidths[(l + 1)] sequence.append(nn.BatchNorm3d(nfeature_in, affine=True)) sequence.append(nn.ReLU()) grid = so3_equatorial_grid(max_beta=0, max_gamma=0, n_alpha=(2 * b_in), n_beta=1, n_gamma=1) sequence.append(SO3Convolution(nfeature_in, nfeature_out, b_in, b_out, grid)) sequence.append(nn.BatchNorm3d(self.features[(- 2)], affine=True)) sequence.append(nn.ReLU()) self.sequential = nn.Sequential(*sequence) output_features = self.features[(- 2)] self.out_layer = nn.Linear(output_features, self.features[(- 1)]) def forward(self, x): x = self.sequential(x) x = so3_integrate(x) x = self.out_layer(x) return F.log_softmax(x, dim=1)
class Model(nn.Module): def __init__(self, nclasses): super().__init__() self.features = [6, 50, 70, 350, nclasses] self.bandwidths = [128, 32, 22, 7] assert (len(self.bandwidths) == (len(self.features) - 1)) sequence = [] grid = s2_equatorial_grid(max_beta=0, n_alpha=(2 * self.bandwidths[0]), n_beta=1) sequence.append(S2Convolution(self.features[0], self.features[1], self.bandwidths[0], self.bandwidths[1], grid)) for l in range(1, (len(self.features) - 2)): nfeature_in = self.features[l] nfeature_out = self.features[(l + 1)] b_in = self.bandwidths[l] b_out = self.bandwidths[(l + 1)] sequence.append(nn.BatchNorm3d(nfeature_in, affine=True)) sequence.append(nn.ReLU()) grid = so3_equatorial_grid(max_beta=0, max_gamma=0, n_alpha=(2 * b_in), n_beta=1, n_gamma=1) sequence.append(SO3Convolution(nfeature_in, nfeature_out, b_in, b_out, grid)) sequence.append(nn.BatchNorm3d(self.features[(- 2)], affine=True)) sequence.append(nn.ReLU()) self.sequential = nn.Sequential(*sequence) self.out_layer = nn.Sequential(nn.BatchNorm1d(self.features[(- 2)], affine=False), nn.Linear(self.features[(- 2)], self.features[(- 1)])) def forward(self, x): x = self.sequential(x) x = x.view(x.size(0), x.size(1), (- 1)).max((- 1))[0] x = self.out_layer(x) return F.log_softmax(x, dim=1)
class KeepName(): def __init__(self, transform): self.transform = transform def __call__(self, file_name): return (file_name, self.transform(file_name))
def main(log_dir, augmentation, dataset, batch_size, num_workers): print(check_output(['nodejs', '--version']).decode('utf-8')) torch.backends.cudnn.benchmark = True transform = torchvision.transforms.Compose([CacheNPY(prefix='b64_', repeat=augmentation, pick_randomly=False, transform=torchvision.transforms.Compose([ToMesh(random_rotations=True, random_translation=0.1), ProjectOnSphere(bandwidth=64)])), (lambda xs: torch.stack([torch.FloatTensor(x) for x in xs]))]) transform = KeepName(transform) test_set = Shrec17('data', dataset, perturbed=True, download=True, transform=transform) loader = importlib.machinery.SourceFileLoader('model', os.path.join(log_dir, 'model.py')) mod = types.ModuleType(loader.name) loader.exec_module(mod) model = mod.Model(55) model.cuda() model.load_state_dict(torch.load(os.path.join(log_dir, 'state.pkl'))) resdir = os.path.join(log_dir, (dataset + '_perturbed')) if os.path.isdir(resdir): shutil.rmtree(resdir) os.mkdir(resdir) predictions = [] ids = [] loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True, drop_last=False) for (batch_idx, data) in enumerate(loader): model.eval() if (dataset != 'test'): data = data[0] (file_names, data) = data (batch_size, rep) = data.size()[:2] data = data.view((- 1), *data.size()[2:]) data = data.cuda() with torch.no_grad(): pred = model(data).data pred = pred.view(batch_size, rep, (- 1)) pred = pred.sum(1) predictions.append(pred.cpu().numpy()) ids.extend([x.split('/')[(- 1)].split('.')[0] for x in file_names]) print('[{}/{}] '.format(batch_idx, len(loader))) predictions = np.concatenate(predictions) predictions_class = np.argmax(predictions, axis=1) for i in range(len(ids)): if ((i % 100) == 0): print('{}/{} '.format(i, len(ids)), end='\r') idfile = os.path.join(resdir, ids[i]) retrieved = [(predictions[(j, predictions_class[j])], ids[j]) for j in range(len(ids)) if (predictions_class[j] == predictions_class[i])] retrieved = sorted(retrieved, reverse=True) retrieved = [i for (_, i) in retrieved] with open(idfile, 'w') as f: f.write('\n'.join(retrieved)) url = 'https://shapenet.cs.stanford.edu/shrec17/code/evaluator.zip' file_path = 'evaluator.zip' r = requests.get(url, stream=True) with open(file_path, 'wb') as f: for chunk in r.iter_content(chunk_size=(16 * (1024 ** 2))): if chunk: f.write(chunk) f.flush() zip_ref = zipfile.ZipFile(file_path, 'r') zip_ref.extractall('.') zip_ref.close() print(check_output(['nodejs', 'evaluate.js', (os.path.join('..', log_dir) + '/')], cwd='evaluator').decode('utf-8')) shutil.copy2(os.path.join('evaluator', (log_dir + '.summary.csv')), os.path.join(log_dir, 'summary.csv'))
def main(log_dir, model_path, augmentation, dataset, batch_size, learning_rate, num_workers): arguments = copy.deepcopy(locals()) os.mkdir(log_dir) shutil.copy2(__file__, os.path.join(log_dir, 'script.py')) shutil.copy2(model_path, os.path.join(log_dir, 'model.py')) logger = logging.getLogger('train') logger.setLevel(logging.DEBUG) logger.handlers = [] ch = logging.StreamHandler() logger.addHandler(ch) fh = logging.FileHandler(os.path.join(log_dir, 'log.txt')) logger.addHandler(fh) logger.info('%s', repr(arguments)) torch.backends.cudnn.benchmark = True loader = importlib.machinery.SourceFileLoader('model', os.path.join(log_dir, 'model.py')) mod = types.ModuleType(loader.name) loader.exec_module(mod) model = mod.Model(55) model.cuda() logger.info('{} paramerters in total'.format(sum((x.numel() for x in model.parameters())))) logger.info('{} paramerters in the last layer'.format(sum((x.numel() for x in model.out_layer.parameters())))) bw = model.bandwidths[0] transform = CacheNPY(prefix='b{}_'.format(bw), repeat=augmentation, transform=torchvision.transforms.Compose([ToMesh(random_rotations=True, random_translation=0.1), ProjectOnSphere(bandwidth=bw)])) def target_transform(x): classes = ['02691156', '02747177', '02773838', '02801938', '02808440', '02818832', '02828884', '02843684', '02871439', '02876657', '02880940', '02924116', '02933112', '02942699', '02946921', '02954340', '02958343', '02992529', '03001627', '03046257', '03085013', '03207941', '03211117', '03261776', '03325088', '03337140', '03467517', '03513137', '03593526', '03624134', '03636649', '03642806', '03691459', '03710193', '03759954', '03761084', '03790512', '03797390', '03928116', '03938244', '03948459', '03991062', '04004475', '04074963', '04090263', '04099429', '04225987', '04256520', '04330267', '04379243', '04401088', '04460130', '04468005', '04530566', '04554684'] return classes.index(x[0]) train_set = Shrec17('data', dataset, perturbed=True, download=True, transform=transform, target_transform=target_transform) train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True, drop_last=True) optimizer = torch.optim.SGD(model.parameters(), lr=0, momentum=0.9) def train_step(data, target): model.train() (data, target) = (data.cuda(), target.cuda()) prediction = model(data) loss = F.nll_loss(prediction, target) optimizer.zero_grad() loss.backward() optimizer.step() correct = prediction.data.max(1)[1].eq(target.data).long().cpu().sum() return (loss.item(), correct.item()) def get_learning_rate(epoch): limits = [100, 200] lrs = [1, 0.1, 0.01] assert (len(lrs) == (len(limits) + 1)) for (lim, lr) in zip(limits, lrs): if (epoch < lim): return (lr * learning_rate) return (lrs[(- 1)] * learning_rate) for epoch in range(300): lr = get_learning_rate(epoch) logger.info('learning rate = {} and batch size = {}'.format(lr, train_loader.batch_size)) for p in optimizer.param_groups: p['lr'] = lr total_loss = 0 total_correct = 0 time_before_load = time.perf_counter() for (batch_idx, (data, target)) in enumerate(train_loader): time_after_load = time.perf_counter() time_before_step = time.perf_counter() (loss, correct) = train_step(data, target) total_loss += loss total_correct += correct logger.info('[{}:{}/{}] LOSS={:.2} <LOSS>={:.2} ACC={:.2} <ACC>={:.2} time={:.2}+{:.2}'.format(epoch, batch_idx, len(train_loader), loss, (total_loss / (batch_idx + 1)), (correct / len(data)), ((total_correct / len(data)) / (batch_idx + 1)), (time_after_load - time_before_load), (time.perf_counter() - time_before_step))) time_before_load = time.perf_counter() torch.save(model.state_dict(), os.path.join(log_dir, 'state.pkl'))
def s2_near_identity_grid(max_beta=(np.pi / 8), n_alpha=8, n_beta=3): '\n :return: rings around the north pole\n size of the kernel = n_alpha * n_beta\n ' beta = ((np.arange(start=1, stop=(n_beta + 1), dtype=np.float) * max_beta) / n_beta) alpha = np.linspace(start=0, stop=(2 * np.pi), num=n_alpha, endpoint=False) (B, A) = np.meshgrid(beta, alpha, indexing='ij') B = B.flatten() A = A.flatten() grid = np.stack((B, A), axis=1) return tuple((tuple(ba) for ba in grid))
def s2_equatorial_grid(max_beta=0, n_alpha=32, n_beta=1): '\n :return: rings around the equator\n size of the kernel = n_alpha * n_beta\n ' beta = np.linspace(start=((np.pi / 2) - max_beta), stop=((np.pi / 2) + max_beta), num=n_beta, endpoint=True) alpha = np.linspace(start=0, stop=(2 * np.pi), num=n_alpha, endpoint=False) (B, A) = np.meshgrid(beta, alpha, indexing='ij') B = B.flatten() A = A.flatten() grid = np.stack((B, A), axis=1) return tuple((tuple(ba) for ba in grid))
def s2_soft_grid(b): beta = (((np.arange((2 * b)) + 0.5) / (2 * b)) * np.pi) alpha = np.linspace(start=0, stop=(2 * np.pi), num=(2 * b), endpoint=False) (B, A) = np.meshgrid(beta, alpha, indexing='ij') B = B.flatten() A = A.flatten() grid = np.stack((B, A), axis=1) return tuple((tuple(ba) for ba in grid))
def s2_mm(x, y): '\n :param x: [l * m, batch, feature_in, complex]\n :param y: [l * m, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n ' from s2cnn.utils.complex import complex_mm assert (y.size(3) == 2) assert (x.size(3) == 2) nbatch = x.size(1) nfeature_in = x.size(2) nfeature_out = y.size(2) assert (y.size(1) == nfeature_in) nspec = x.size(0) assert (y.size(0) == nspec) if x.is_cuda: return _cuda_S2_mm.apply(x, y) nl = round((nspec ** 0.5)) Fz_list = [] begin = 0 for l in range(nl): L = ((2 * l) + 1) size = L Fx = x[begin:(begin + size)] Fy = y[begin:(begin + size)] Fx = Fx.view((L * nbatch), nfeature_in, 2) Fy = Fy.transpose(0, 1) Fy = Fy.contiguous() Fy = Fy.view(nfeature_in, (L * nfeature_out), 2) Fz = complex_mm(Fx, Fy, conj_y=True) Fz = Fz.view(L, nbatch, L, nfeature_out, 2) Fz = Fz.transpose(1, 2) Fz = Fz.contiguous() Fz = Fz.view((L * L), nbatch, nfeature_out, 2) Fz_list.append(Fz) begin += size z = torch.cat(Fz_list, 0) return z
class _cuda_S2_mm(torch.autograd.Function): @staticmethod def forward(ctx, x, y): ctx.save_for_backward(x, y) return _cuda_s2_mm(x, y) @staticmethod def backward(ctx, gradz): import s2cnn.utils.cuda as cuda_utils (x, y) = ctx.saved_tensors nl = round((x.size(0) ** 0.5)) nbatch = x.size(1) nfeature_in = x.size(2) nfeature_out = y.size(2) nspec = ((((4 * (nl ** 2)) - 1) * nl) // 3) device = torch.cuda.current_device() gradx_cuda_kernel = _setup_s2mm_gradx_cuda_kernel(nbatch=nbatch, nspec=nspec, nl=nl, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device) grady_cuda_kernel = _setup_s2mm_grady_cuda_kernel(nbatch=nbatch, nspec=nspec, nl=nl, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device) stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream) gradx = grady = None if ctx.needs_input_grad[0]: gradx = gradz.new_empty(((nl ** 2), nbatch, nfeature_in, 2)) gradx_cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks((((nl ** 2) * nbatch) * nfeature_in), 1024), 1, 1), args=[gradz.contiguous().data_ptr(), y.contiguous().data_ptr(), gradx.data_ptr()], stream=stream) if ctx.needs_input_grad[1]: grady = gradz.new_empty(((nl ** 2), nfeature_in, nfeature_out, 2)) grady_cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks((((nl ** 2) * nfeature_in) * nfeature_out), 1024), 1, 1), args=[gradz.contiguous().data_ptr(), x.contiguous().data_ptr(), grady.data_ptr()], stream=stream) return (gradx, grady)
def _cuda_s2_mm(x, y): '\n :param x: [l * m, batch, feature_in, complex]\n :param y: [l * m, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n ' import s2cnn.utils.cuda as cuda_utils assert (x.is_cuda and (x.dtype == torch.float32)) assert (y.is_cuda and (y.dtype == torch.float32)) assert (y.size(3) == 2) assert (x.size(3) == 2) nbatch = x.size(1) nfeature_in = x.size(2) nfeature_out = y.size(2) assert (y.size(1) == nfeature_in) assert (y.size(0) == x.size(0)) nl = round((x.size(0) ** 0.5)) nspec = ((((4 * (nl ** 2)) - 1) * nl) // 3) assert (x.size(0) == (nl ** 2)) assert (y.size(0) == (nl ** 2)) device = torch.cuda.current_device() cuda_kernel = _setup_s2mm_cuda_kernel(nbatch=nbatch, nspec=nspec, nfeature_in=nfeature_in, nfeature_out=nfeature_out, device=device) stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream) output = x.new_empty((nspec, nbatch, nfeature_out, 2)) cuda_kernel(block=(cuda_utils.CUDA_NUM_THREADS, 1, 1), grid=(cuda_utils.get_blocks(((nspec * nbatch) * nfeature_out), 1024), 1, 1), args=[x.contiguous().data_ptr(), y.contiguous().data_ptr(), output.data_ptr()], stream=stream) return output
@lru_cache(maxsize=32) def _setup_s2mm_cuda_kernel(nbatch, nspec, nfeature_in, nfeature_out, device=0): kernel = Template('\n#define COMPUTE_LMN(s) int l = powf(3.0/4.0 * s, 1.0/3.0) - 0.5; int L = l * (4 * l * l - 1) / 3; int rest = s - L; if (rest >= (2 * l + 1) * (2 * l + 1)) { ++l; L = l * (4 * l * l - 1) / 3; rest = s - L; } int m = rest / (2 * l + 1) - l; int n = rest % (2 * l + 1) - l;\n\n#define EXTRACT(i1, i2, n2, i3, n3) int i1 = index; int i3 = i1 % (n3); i1 /= n3; int i2 = i1 % (n2); i1 /= n2;\n\n#define CONTRACT1(s1, i2, n2, i3, n3) ( ( (l * l + (l + (s1))) * (n2) + (i2) ) * (n3) + (i3) )\n\n#define CONTRACT2(s1, s2, i2, n2, i3, n3) ( ( (L + (l + (s1)) * (2 * l + 1) + (l + (s2))) * (n2) + (i2) ) * (n3) + (i3) )\n\nextern "C"\n__global__ void main_(const float* in_x, const float* in_y, float* out) {\n for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < ${nspec} * ${nbatch} * ${nfeature_out}; index += blockDim.x * gridDim.x) {\n EXTRACT(s, i, ${nbatch}, f_out, ${nfeature_out})\n\n // compute s -> (l,m,n)\n COMPUTE_LMN(s)\n\n float out_re = 0.0;\n float out_im = 0.0;\n\n for (int f_in = 0; f_in < ${nfeature_in}; ++f_in) {\n float x_re = in_x[CONTRACT1(m, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 0];\n float x_im = in_x[CONTRACT1(m, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 1];\n float y_re = in_y[CONTRACT1(n, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 0];\n float y_im = in_y[CONTRACT1(n, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 1];\n\n // x times y conjugate\n out_re += x_re * y_re + x_im * y_im;\n out_im += x_im * y_re - x_re * y_im;\n }\n\n out[index * 2 + 0] = out_re;\n out[index * 2 + 1] = out_im;\n }\n}\n').substitute({'nbatch': nbatch, 'nspec': nspec, 'nfeature_in': nfeature_in, 'nfeature_out': nfeature_out}) import s2cnn.utils.cuda as cuda_utils return cuda_utils.compile_kernel(kernel, 's2mm.cu', 'main_')
@lru_cache(maxsize=32) def _setup_s2mm_gradx_cuda_kernel(nbatch, nspec, nl, nfeature_in, nfeature_out, device=0): kernel = Template('\n#define COMPUTE_LM(s) int l = sqrtf(s); int L = (4 * l * l - 1) * l / 3; int m = s - l * l - l;\n\n#define EXTRACT(i1, i2, n2, i3, n3) int i1 = index; int i3 = i1 % (n3); i1 /= n3; int i2 = i1 % (n2); i1 /= n2;\n\n#define CONTRACT1(s1, i2, n2, i3, n3) ( ( (l * l + (l + (s1))) * (n2) + (i2) ) * (n3) + (i3) )\n\n#define CONTRACT2(s1, s2, i2, n2, i3, n3) ( ( (L + (l + (s1)) * (2 * l + 1) + (l + (s2))) * (n2) + (i2) ) * (n3) + (i3) )\n\nextern "C"\n__global__ void main_(const float* grad_z, const float* y, float* grad_x) {\n for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (${nl} * ${nl}) * ${nbatch} * ${nfeature_in}; index += blockDim.x * gridDim.x) {\n EXTRACT(s, i, ${nbatch}, f_in, ${nfeature_in})\n\n // compute s -> (l,m)\n COMPUTE_LM(s)\n\n float out_re = 0.0;\n float out_im = 0.0;\n\n for (int f_out = 0; f_out < ${nfeature_out}; ++f_out) {\n for (int k = -l; k <= l; ++k) {\n float grad_z_re = grad_z[CONTRACT2(m, k, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 0];\n float grad_z_im = grad_z[CONTRACT2(m, k, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 1];\n float y_re = y[CONTRACT1(k, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 0];\n float y_im = y[CONTRACT1(k, f_in, ${nfeature_in}, f_out, ${nfeature_out}) * 2 + 1];\n\n // grad_z times y\n out_re += grad_z_re * y_re - grad_z_im * y_im;\n out_im += grad_z_re * y_im + grad_z_im * y_re;\n }\n }\n\n grad_x[index * 2 + 0] = out_re;\n grad_x[index * 2 + 1] = out_im;\n }\n}\n').substitute({'nbatch': nbatch, 'nspec': nspec, 'nl': nl, 'nfeature_in': nfeature_in, 'nfeature_out': nfeature_out}) import s2cnn.utils.cuda as cuda_utils return cuda_utils.compile_kernel(kernel, 's2mm_gradx.cu', 'main_')
@lru_cache(maxsize=32) def _setup_s2mm_grady_cuda_kernel(nbatch, nspec, nl, nfeature_in, nfeature_out, device=0): kernel = Template('\n#define COMPUTE_LM(s) int l = powf(s, 0.5); int L = (4 * l * l - 1) * l / 3; int m = s - l * l - l;\n\n#define EXTRACT(i1, i2, n2, i3, n3) int i1 = index; int i3 = i1 % (n3); i1 /= n3; int i2 = i1 % (n2); i1 /= n2;\n\n#define CONTRACT1(s1, i2, n2, i3, n3) ( ( (l * l + (l + (s1))) * (n2) + (i2) ) * (n3) + (i3) )\n\n#define CONTRACT2(s1, s2, i2, n2, i3, n3) ( ( (L + (l + (s1)) * (2 * l + 1) + (l + (s2))) * (n2) + (i2) ) * (n3) + (i3) )\n\nextern "C"\n__global__ void main_(const float* grad_z, const float* x, float* grad_y) {\n for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (${nl} * ${nl}) * ${nfeature_in} * ${nfeature_out}; index += blockDim.x * gridDim.x) {\n EXTRACT(s, f_in, ${nfeature_in}, f_out, ${nfeature_out})\n\n // compute s -> (l,m)\n COMPUTE_LM(s)\n\n float out_re = 0.0;\n float out_im = 0.0;\n\n for (int i = 0; i < ${nbatch}; ++i) {\n for (int k = -l; k <= l; ++k) {\n float grad_z_re = grad_z[CONTRACT2(k, m, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 0];\n float grad_z_im = grad_z[CONTRACT2(k, m, i, ${nbatch}, f_out, ${nfeature_out}) * 2 + 1];\n float x_re = x[CONTRACT1(k, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 0];\n float x_im = x[CONTRACT1(k, i, ${nbatch}, f_in, ${nfeature_in} ) * 2 + 1];\n\n // conjugate grad_z times x\n out_re += grad_z_re * x_re + grad_z_im * x_im;\n out_im += grad_z_re * x_im - grad_z_im * x_re;\n }\n }\n\n grad_y[index * 2 + 0] = out_re;\n grad_y[index * 2 + 1] = out_im;\n }\n}\n').substitute({'nbatch': nbatch, 'nspec': nspec, 'nl': nl, 'nfeature_in': nfeature_in, 'nfeature_out': nfeature_out}) import s2cnn.utils.cuda as cuda_utils return cuda_utils.compile_kernel(kernel, 's2mm_grady.cu', 'main_')
def test_compare_cuda_cpu(): x = torch.rand((((1 + 3) + 5) + 7), 2, 3, 2) y = torch.rand((((1 + 3) + 5) + 7), 3, 5, 2) z1 = s2_mm(x, y) z2 = s2_mm(x.cuda(), y.cuda()).cpu() q = ((z1 - z2).abs().max().item() / z1.std().item()) print(q) assert (q < 0.0001)
def so3_rft(x, b, grid): '\n Real Fourier Transform\n :param x: [..., beta_alpha_gamma]\n :param b: output bandwidth signal\n :param grid: tuple of (beta, alpha, gamma) tuples\n :return: [l * m * n, ..., complex]\n ' F = _setup_so3_ft(b, grid, device_type=x.device.type, device_index=x.device.index) assert (x.size((- 1)) == F.size(0)) sz = x.size() x = torch.einsum('ia,afc->fic', (x.view((- 1), x.size((- 1))), F.clone())) x = x.view((- 1), *sz[:(- 1)], 2) return x
@cached_dirpklgz('cache/setup_so3_ft') def __setup_so3_ft(b, grid): from lie_learn.representations.SO3.wigner_d import wigner_D_matrix n_spatial = len(grid) n_spectral = np.sum([(((2 * l) + 1) ** 2) for l in range(b)]) F = np.zeros((n_spatial, n_spectral), dtype=complex) for (i, (beta, alpha, gamma)) in enumerate(grid): Dmats = [wigner_D_matrix(l, alpha, beta, gamma, field='complex', normalization='quantum', order='centered', condon_shortley='cs').conj() for l in range(b)] F[i] = np.hstack([Dl.flatten() for Dl in Dmats]) F = F.view('float').reshape(((- 1), n_spectral, 2)) return F
@lru_cache(maxsize=32) def _setup_so3_ft(b, grid, device_type, device_index): F = __setup_so3_ft(b, grid) F = torch.tensor(F.astype(np.float32), dtype=torch.float32, device=torch.device(device_type, device_index)) return F
def so3_mm(x, y): '\n :param x: [l * m * n, batch, feature_in, complex]\n :param y: [l * m * n, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n ' from s2cnn.utils.complex import complex_mm import math assert (y.size(3) == 2) assert (x.size(3) == 2) nbatch = x.size(1) nfeature_in = x.size(2) nfeature_out = y.size(2) assert (y.size(1) == nfeature_in) nspec = x.size(0) assert (y.size(0) == nspec) nl = math.ceil((((3 / 4) * nspec) ** (1 / 3))) assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3)) if x.is_cuda: return _cuda_SO3_mm.apply(x, y) Fz_list = [] begin = 0 for l in range(nl): L = ((2 * l) + 1) size = (L ** 2) Fx = x[begin:(begin + size)] Fy = y[begin:(begin + size)] Fx = Fx.view(L, L, nbatch, nfeature_in, 2) Fx = Fx.transpose(0, 1) Fx = Fx.transpose(0, 2) Fx = Fx.transpose(2, 3) Fx = Fx.contiguous() Fx = Fx.view((nbatch * L), (nfeature_in * L), 2) Fy = Fy.view(L, L, nfeature_in, nfeature_out, 2) Fy = Fy.transpose(0, 2) Fy = Fy.contiguous() Fy = Fy.view((nfeature_in * L), (L * nfeature_out), 2) Fz = complex_mm(Fx, Fy, conj_y=True) Fz = Fz.view(nbatch, (L * L), nfeature_out, 2) Fz = Fz.transpose(0, 1) Fz_list.append(Fz) begin += size z = torch.cat(Fz_list, 0) return z
class _cuda_SO3_mm(torch.autograd.Function): @staticmethod def forward(ctx, x, y): '\n :param x: [l * m * n, batch, feature_in, complex]\n :param y: [l * m * n, feature_in, feature_out, complex]\n :return: [l * m * n, batch, feature_out, complex]\n ' assert (x.is_cuda and (x.dtype == torch.float32)) assert (y.is_cuda and (y.dtype == torch.float32)) assert (y.size(3) == 2) assert (x.size(3) == 2) nbatch = x.size(1) nfeature_in = x.size(2) nfeature_out = y.size(2) assert (y.size(1) == nfeature_in) nspec = x.size(0) assert (y.size(0) == nspec) nl = round((((3 / 4) * nspec) ** (1 / 3))) assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3)) ctx.save_for_backward(x, y) device = torch.cuda.current_device() cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nbatch, nj=nfeature_out, nk=nfeature_in, conj_y=True, trans_y_spec=True, device=device) output = x.new_empty((nspec, nbatch, nfeature_out, 2)) cuda_kernel(x, y, output) return output @staticmethod def backward(ctx, gradz): (x, y) = ctx.saved_tensors nspec = x.size(0) nbatch = x.size(1) nfeature_in = x.size(2) nfeature_out = y.size(2) nl = round((((3 / 4) * nspec) ** (1 / 3))) assert (nspec == ((nl * ((4 * (nl ** 2)) - 1)) // 3)) gradx = grady = None device = torch.cuda.current_device() if ctx.needs_input_grad[0]: gradx_cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nbatch, nj=nfeature_in, nk=nfeature_out, trans_y_feature=True, device=device) gradx = gradz.new_empty((nspec, nbatch, nfeature_in, 2)) gradx_cuda_kernel(gradz, y, gradx) if ctx.needs_input_grad[1]: grady_cuda_kernel = _setup_so3mm_cuda_kernel(nl=nl, ni=nfeature_out, nj=nfeature_in, nk=nbatch, trans_out_feature=True, conj_x=True, trans_x_spec=True, trans_x_feature=True, device=device) grady = gradz.new_empty((nspec, nfeature_in, nfeature_out, 2)) grady_cuda_kernel(gradz, x, grady) return (gradx, grady)
@lru_cache(maxsize=32) def _setup_so3mm_cuda_kernel(nl, ni, nj, nk, conj_x=False, conj_y=False, trans_x_spec=False, trans_x_feature=False, trans_y_spec=False, trans_y_feature=False, trans_out_feature=False, device=0): '\n return a function that computes\n out[l*m*n, i, j] = sum_k sum_p x[l*m*p, i, k] y[l*p*n, k, j]\n where out, x, y are complex valued\n\n if conj_x is set to True, x is conjugated\n if conj_y is set to True, y is conjugated\n if trans_x_spec is set to True m and p are permuted in x[...]\n if trans_y_spec is set to True p and n are permuted in y[...]\n if trans_x_feature is set to True i and k are permuted in x[...]\n if trans_y_feature is set to True k and j are permuted in y[...]\n if trans_out_feature is set to True i and j are permuted in out[...]\n ' kernel = '\n#define NI {}\n#define NJ {}\n#define NK {}\n'.format(ni, nj, nk) if ((not trans_x_spec) and (not trans_x_feature)): kernel += '#define INDEX_X (((L0 + m * L + p) * NI + i) * NK + k)\n' if ((not trans_x_spec) and trans_x_feature): kernel += '#define INDEX_X (((L0 + m * L + p) * NK + k) * NI + i)\n' if (trans_x_spec and (not trans_x_feature)): kernel += '#define INDEX_X (((L0 + p * L + m) * NI + i) * NK + k)\n' if (trans_x_spec and trans_x_feature): kernel += '#define INDEX_X (((L0 + p * L + m) * NK + k) * NI + i)\n' if ((not trans_y_spec) and (not trans_y_feature)): kernel += '#define INDEX_Y (((L0 + p * L + n) * NK + k) * NJ + j)\n' if ((not trans_y_spec) and trans_y_feature): kernel += '#define INDEX_Y (((L0 + p * L + n) * NJ + j) * NK + k)\n' if (trans_y_spec and (not trans_y_feature)): kernel += '#define INDEX_Y (((L0 + n * L + p) * NK + k) * NJ + j)\n' if (trans_y_spec and trans_y_feature): kernel += '#define INDEX_Y (((L0 + n * L + p) * NJ + j) * NK + k)\n' if (not trans_out_feature): kernel += '#define INDEX_OUT (((L0 + m * L + n) * NI + i) * NJ + j)\n' if trans_out_feature: kernel += '#define INDEX_OUT (((L0 + m * L + n) * NJ + j) * NI + i)\n' kernel += '\n#define CONJ_X {}\n#define CONJ_Y {}\n'.format(('x_im = -x_im;' if conj_x else ';'), ('y_im = -y_im;' if conj_y else ';')) kernel += '\n#define CEIL_DIV(x, y) (((x) + (y) - 1) / (y))\n\nextern "C"\n__global__ void main_(const float* in_x, const float* in_y, float* out)\n{\n // start of thread independant code\n int l = blockIdx.z;\n int L = 2 * l + 1;\n int L0 = (4 * l*l - 1) * l / 3;\n\n if (blockIdx.y * 32 >= L * NI || blockIdx.x * 32 >= L * NJ) {\n return;\n }\n\n int ntile = CEIL_DIV(L * NK, 32);\n // end of thread independant code\n\n int mi = blockIdx.y * 32 + threadIdx.y;\n int m = mi / NI;\n int i = mi % NI;\n int nj = blockIdx.x * 32 + threadIdx.x;\n int n = nj / NJ;\n int j = nj % NJ;\n\n float sum_re = 0.0;\n float sum_im = 0.0;\n\n for (int tile = 0; tile < ntile; ++tile) {\n __shared__ float tileX[2][32][32];\n __shared__ float tileY[2][32][32];\n\n int pk = tile * 32 + threadIdx.x;\n int p = pk / NK;\n int k = pk % NK;\n int index = INDEX_X * 2;\n tileX[0][threadIdx.y][threadIdx.x] = m < L && p < L ? in_x[index + 0] : 0.0;\n tileX[1][threadIdx.y][threadIdx.x] = m < L && p < L ? in_x[index + 1] : 0.0;\n\n pk = tile * 32 + threadIdx.y;\n p = pk / NK;\n k = pk % NK;\n index = INDEX_Y * 2;\n tileY[0][threadIdx.y][threadIdx.x] = p < L && n < L ? in_y[index + 0] : 0.0;\n tileY[1][threadIdx.y][threadIdx.x] = p < L && n < L ? in_y[index + 1] : 0.0;\n\n __syncthreads();\n\n for (int any = 0; any < 32; ++any) {\n float x_re = tileX[0][threadIdx.y][any];\n float x_im = tileX[1][threadIdx.y][any];\n float y_re = tileY[0][any][threadIdx.x];\n float y_im = tileY[1][any][threadIdx.x];\n\n CONJ_X\n CONJ_Y\n\n sum_re += x_re * y_re - x_im * y_im;\n sum_im += x_re * y_im + x_im * y_re;\n }\n\n __syncthreads();\n }\n\n if (m < L && n < L) {\n int index = INDEX_OUT * 2;\n out[index + 0] = sum_re;\n out[index + 1] = sum_im;\n }\n}\n' import s2cnn.utils.cuda as cuda_utils kernel = cuda_utils.compile_kernel(kernel, 'so3_mm.cu', 'main_') stream = cuda_utils.Stream(ptr=torch.cuda.current_stream().cuda_stream) def fun(x, y, output): assert output.is_contiguous() kernel(block=(32, 32, 1), grid=(math.ceil(((((2 * nl) - 1) * nj) / 32)), math.ceil(((((2 * nl) - 1) * ni) / 32)), nl), args=[x.contiguous().data_ptr(), y.contiguous().data_ptr(), output.data_ptr()], stream=stream) return fun
def test_compare_cuda_cpu(): x = torch.rand((((1 + 9) + 25) + 49), 2, 3, 2) y = torch.rand((((1 + 9) + 25) + 49), 3, 5, 2) z1 = so3_mm(x, y) z2 = so3_mm(x.cuda(), y.cuda()).cpu() q = ((z1 - z2).abs().max().item() / z1.std().item()) print(q) assert (q < 0.0001)
class S2Convolution(Module): def __init__(self, nfeature_in, nfeature_out, b_in, b_out, grid): "\n :param nfeature_in: number of input fearures\n :param nfeature_out: number of output features\n :param b_in: input bandwidth (precision of the input SOFT grid)\n :param b_out: output bandwidth\n :param grid: points of the sphere defining the kernel, tuple of (alpha, beta)'s\n " super(S2Convolution, self).__init__() self.nfeature_in = nfeature_in self.nfeature_out = nfeature_out self.b_in = b_in self.b_out = b_out self.grid = grid self.kernel = Parameter(torch.empty(nfeature_in, nfeature_out, len(grid)).uniform_((- 1), 1)) self.scaling = (1.0 / math.sqrt((((len(self.grid) * self.nfeature_in) * (self.b_out ** 4.0)) / (self.b_in ** 2.0)))) self.bias = Parameter(torch.zeros(1, nfeature_out, 1, 1, 1)) def forward(self, x): '\n :x: [batch, feature_in, beta, alpha]\n :return: [batch, feature_out, beta, alpha, gamma]\n ' assert (x.size(1) == self.nfeature_in) assert (x.size(2) == (2 * self.b_in)) assert (x.size(3) == (2 * self.b_in)) x = S2_fft_real.apply(x, self.b_out) y = s2_rft((self.kernel * self.scaling), self.b_out, self.grid) z = s2_mm(x, y) z = SO3_ifft_real.apply(z) z = (z + self.bias) return z
class SO3Convolution(Module): def __init__(self, nfeature_in, nfeature_out, b_in, b_out, grid): "\n :param nfeature_in: number of input fearures\n :param nfeature_out: number of output features\n :param b_in: input bandwidth (precision of the input SOFT grid)\n :param b_out: output bandwidth\n :param grid: points of the SO(3) group defining the kernel, tuple of (alpha, beta, gamma)'s\n " super(SO3Convolution, self).__init__() self.nfeature_in = nfeature_in self.nfeature_out = nfeature_out self.b_in = b_in self.b_out = b_out self.grid = grid self.kernel = Parameter(torch.empty(nfeature_in, nfeature_out, len(grid)).uniform_((- 1), 1)) self.bias = Parameter(torch.zeros(1, nfeature_out, 1, 1, 1)) self.scaling = (1.0 / math.sqrt((((len(self.grid) * self.nfeature_in) * (self.b_out ** 3.0)) / (self.b_in ** 3.0)))) def forward(self, x): '\n :x: [batch, feature_in, beta, alpha, gamma]\n :return: [batch, feature_out, beta, alpha, gamma]\n ' assert (x.size(1) == self.nfeature_in) assert (x.size(2) == (2 * self.b_in)) assert (x.size(3) == (2 * self.b_in)) assert (x.size(4) == (2 * self.b_in)) x = SO3_fft_real.apply(x, self.b_out) y = so3_rft((self.kernel * self.scaling), self.b_out, self.grid) assert (x.size(0) == y.size(0)) assert (x.size(2) == y.size(1)) z = so3_mm(x, y) assert (z.size(0) == x.size(0)) assert (z.size(1) == x.size(1)) assert (z.size(2) == y.size(2)) z = SO3_ifft_real.apply(z) z = (z + self.bias) return z
class SO3Shortcut(Module): '\n Useful for ResNet\n ' def __init__(self, nfeature_in, nfeature_out, b_in, b_out): super(SO3Shortcut, self).__init__() assert (b_out <= b_in) if ((nfeature_in != nfeature_out) or (b_in != b_out)): self.conv = SO3Convolution(nfeature_in=nfeature_in, nfeature_out=nfeature_out, b_in=b_in, b_out=b_out, grid=((0, 0, 0),)) else: self.conv = None def forward(self, x): '\n :x: [batch, feature_in, beta, alpha, gamma]\n :return: [batch, feature_out, beta, alpha, gamma]\n ' if (self.conv is not None): return self.conv(x) else: return x
def so3_integrate(x): '\n Integrate a signal on SO(3) using the Haar measure\n \n :param x: [..., beta, alpha, gamma] (..., 2b, 2b, 2b)\n :return y: [...] (...)\n ' assert (x.size((- 1)) == x.size((- 2))) assert (x.size((- 2)) == x.size((- 3))) b = (x.size((- 1)) // 2) w = _setup_so3_integrate(b, device_type=x.device.type, device_index=x.device.index) x = torch.sum(x, dim=(- 1)).squeeze((- 1)) x = torch.sum(x, dim=(- 1)).squeeze((- 1)) sz = x.size() x = x.view((- 1), (2 * b)) w = w.view((2 * b), 1) x = torch.mm(x, w).squeeze((- 1)) x = x.view(*sz[:(- 1)]) return x
@lru_cache(maxsize=32) @show_running def _setup_so3_integrate(b, device_type, device_index): import lie_learn.spaces.S3 as S3 return torch.tensor(S3.quadrature_weights(b), dtype=torch.float32, device=torch.device(device_type, device_index))
def so3_rotation(x, alpha, beta, gamma): '\n :param x: [..., beta, alpha, gamma] (..., 2b, 2b, 2b)\n ' b = (x.size()[(- 1)] // 2) x_size = x.size() Us = _setup_so3_rotation(b, alpha, beta, gamma, device_type=x.device.type, device_index=x.device.index) x = SO3_fft_real.apply(x) Fz_list = [] begin = 0 for l in range(b): L = ((2 * l) + 1) size = (L ** 2) Fx = x[begin:(begin + size)] Fx = Fx.view(L, (- 1), 2) U = Us[l].view(L, L, 2) Fz = complex_mm(U, Fx, conj_x=True) Fz = Fz.view(size, (- 1), 2) Fz_list.append(Fz) begin += size Fz = torch.cat(Fz_list, 0) z = SO3_ifft_real.apply(Fz) z = z.contiguous() z = z.view(*x_size) return z
@cached_dirpklgz('cache/setup_so3_rotation') def __setup_so3_rotation(b, alpha, beta, gamma): from lie_learn.representations.SO3.wigner_d import wigner_D_matrix Us = [wigner_D_matrix(l, alpha, beta, gamma, field='complex', normalization='quantum', order='centered', condon_shortley='cs') for l in range(b)] Us = [Us[l].astype(np.complex64).view(np.float32).reshape((((2 * l) + 1), ((2 * l) + 1), 2)) for l in range(b)] return Us
@lru_cache(maxsize=32) def _setup_so3_rotation(b, alpha, beta, gamma, device_type, device_index): Us = __setup_so3_rotation(b, alpha, beta, gamma) Us = [torch.tensor(U, dtype=torch.float32, device=torch.device(device_type, device_index)) for U in Us] return Us
def get_blocks(n, num_threads): n_per_instance = (((n + (num_threads * CUDA_MAX_GRID_DIM)) - 1) // (num_threads * CUDA_MAX_GRID_DIM)) return (((n + (num_threads * n_per_instance)) - 1) // (num_threads * n_per_instance))
def compile_kernel(kernel, filename, functioname): program = Program(kernel, filename) ptx = program.compile() m = function.Module() m.load(bytes(ptx.encode())) f = m.get_function(functioname) return f
class WaitPrint(threading.Thread): def __init__(self, t, message): super().__init__() self.t = t self.message = message self.running = True def stop(self): self.running = False def run(self): for _ in range(int((self.t // 0.1))): time.sleep(0.1) if (not self.running): return print(self.message, end='')