code
stringlengths
17
6.64M
def applySoftMax(inputSample, inputSampleShape, numClasses, softmaxTemperature): inputSampleReshaped = inputSample.dimshuffle(0, 2, 3, 4, 1) inputSampleFlattened = inputSampleReshaped.flatten(1) numClassifiedVoxels = ((inputSampleShape[2] * inputSampleShape[3]) * inputSampleShape[4]) firstDimOfinputSample2d = (inputSampleShape[0] * numClassifiedVoxels) inputSample2d = inputSampleFlattened.reshape((firstDimOfinputSample2d, numClasses)) p_y_given_x_2d = T.nnet.softmax((inputSample2d / softmaxTemperature)) p_y_given_x_class = p_y_given_x_2d.reshape((inputSampleShape[0], inputSampleShape[2], inputSampleShape[3], inputSampleShape[4], inputSampleShape[1])) p_y_given_x = p_y_given_x_class.dimshuffle(0, 4, 1, 2, 3) y_pred = T.argmax(p_y_given_x, axis=1) return (p_y_given_x, y_pred)
def applyBiasToFeatureMaps(bias, featMaps): featMaps = (featMaps + bias.dimshuffle('x', 0, 'x', 'x', 'x')) return featMaps
class parserConfigIni(object): def __init__(_self): _self.networkName = [] def readConfigIniFile(_self, fileName, task): def createModel(): print(' --- Creating model (Reading parameters...)') _self.readModelCreation_params(fileName) def trainModel(): print(' --- Training model (Reading parameters...)') _self.readModelTraining_params(fileName) def testModel(): print(' --- Testing model (Reading parameters...)') _self.readModelTesting_params(fileName) optionsParser = {0: createModel, 1: trainModel, 2: testModel} optionsParser[task]() def readModelCreation_params(_self, fileName): ConfigIni = ConfigParser.ConfigParser() ConfigIni.read(fileName) _self.networkName = ConfigIni.get('General', 'networkName') _self.folderName = ConfigIni.get('General', 'folderName') _self.n_classes = json.loads(ConfigIni.get('CNN_Architecture', 'n_classes')) _self.layers = json.loads(ConfigIni.get('CNN_Architecture', 'numkernelsperlayer')) _self.kernels = json.loads(ConfigIni.get('CNN_Architecture', 'kernelshapes')) _self.intermediate_ConnectedLayers = json.loads(ConfigIni.get('CNN_Architecture', 'intermediateConnectedLayers')) _self.pooling_scales = json.loads(ConfigIni.get('CNN_Architecture', 'pooling_scales')) _self.dropout_Rates = json.loads(ConfigIni.get('CNN_Architecture', 'dropout_Rates')) _self.activationType = json.loads(ConfigIni.get('CNN_Architecture', 'activationType')) _self.weight_Initialization_CNN = json.loads(ConfigIni.get('CNN_Architecture', 'weight_Initialization_CNN')) _self.weight_Initialization_FCN = json.loads(ConfigIni.get('CNN_Architecture', 'weight_Initialization_FCN')) _self.weightsFolderName = ConfigIni.get('CNN_Architecture', 'weights folderName') _self.weightsTrainedIdx = json.loads(ConfigIni.get('CNN_Architecture', 'weights trained indexes')) _self.batch_size = json.loads(ConfigIni.get('Training Parameters', 'batch_size')) _self.sampleSize_Train = json.loads(ConfigIni.get('Training Parameters', 'sampleSize_Train')) _self.sampleSize_Test = json.loads(ConfigIni.get('Training Parameters', 'sampleSize_Test')) _self.costFunction = json.loads(ConfigIni.get('Training Parameters', 'costFunction')) _self.L1_reg_C = json.loads(ConfigIni.get('Training Parameters', 'L1 Regularization Constant')) _self.L2_reg_C = json.loads(ConfigIni.get('Training Parameters', 'L2 Regularization Constant')) _self.learning_rate = json.loads(ConfigIni.get('Training Parameters', 'Leraning Rate')) _self.momentumType = json.loads(ConfigIni.get('Training Parameters', 'Momentum Type')) _self.momentumValue = json.loads(ConfigIni.get('Training Parameters', 'Momentum Value')) _self.momentumNormalized = json.loads(ConfigIni.get('Training Parameters', 'momentumNormalized')) _self.optimizerType = json.loads(ConfigIni.get('Training Parameters', 'Optimizer Type')) _self.rho_RMSProp = json.loads(ConfigIni.get('Training Parameters', 'Rho RMSProp')) _self.epsilon_RMSProp = json.loads(ConfigIni.get('Training Parameters', 'Epsilon RMSProp')) applyBatchNorm = json.loads(ConfigIni.get('Training Parameters', 'applyBatchNormalization')) if (applyBatchNorm == 1): _self.applyBatchNorm = True else: _self.applyBatchNorm = False _self.BatchNormEpochs = json.loads(ConfigIni.get('Training Parameters', 'BatchNormEpochs')) _self.tempSoftMax = json.loads(ConfigIni.get('Training Parameters', 'SoftMax temperature')) def readModelTraining_params(_self, fileName): ConfigIni = ConfigParser.ConfigParser() ConfigIni.read(fileName) _self.imagesFolder = ConfigIni.get('Training Images', 'imagesFolder') _self.GroundTruthFolder = ConfigIni.get('Training Images', 'GroundTruthFolder') _self.ROIFolder = ConfigIni.get('Training Images', 'ROIFolder') _self.indexesForTraining = json.loads(ConfigIni.get('Training Images', 'indexesForTraining')) _self.indexesForValidation = json.loads(ConfigIni.get('Training Images', 'indexesForValidation')) _self.imageTypesTrain = json.loads(ConfigIni.get('Training Images', 'imageTypes')) _self.numberOfEpochs = json.loads(ConfigIni.get('Training Parameters', 'number of Epochs')) _self.numberOfSubEpochs = json.loads(ConfigIni.get('Training Parameters', 'number of SubEpochs')) _self.numberOfSamplesSupEpoch = json.loads(ConfigIni.get('Training Parameters', 'number of samples at each SubEpoch Train')) _self.firstEpochChangeLR = json.loads(ConfigIni.get('Training Parameters', 'First Epoch Change LR')) _self.frequencyChangeLR = json.loads(ConfigIni.get('Training Parameters', 'Frequency Change LR')) _self.applyPadding = json.loads(ConfigIni.get('Training Parameters', 'applyPadding')) def readModelTesting_params(_self, fileName): ConfigIni = ConfigParser.ConfigParser() ConfigIni.read(fileName) _self.imagesFolder = ConfigIni.get('Segmentation Images', 'imagesFolder') _self.GroundTruthFolder = ConfigIni.get('Segmentation Images', 'GroundTruthFolder') _self.ROIFolder = ConfigIni.get('Segmentation Images', 'ROIFolder') _self.imageTypes = json.loads(ConfigIni.get('Segmentation Images', 'imageTypes')) _self.indexesToSegment = json.loads(ConfigIni.get('Segmentation Images', 'indexesToSegment')) _self.applyPadding = json.loads(ConfigIni.get('Segmentation Images', 'applyPadding'))
def printUsage(error_type): if (error_type == 1): print(' ** ERROR!!: Few parameters used.') else: print(' ** ERROR!!: Asked to start with an already created network but its name is not specified.') print(' ******** USAGE ******** ') print(' --- argv 1: Name of the configIni file.') print(' --- argv 2: Network model name')
def networkSegmentation(argv): if (len(argv) < 2): printUsage(1) sys.exit() configIniName = argv[0] networkModelName = argv[1] startTesting(networkModelName, configIniName) print(' ***************** SEGMENTATION DONE!!! ***************** ')
def arg_parse(): parser = argparse.ArgumentParser(description='GcnInformax Arguments.') parser.add_argument('--DS', dest='DS', help='Dataset') parser.add_argument('--local', dest='local', action='store_const', const=True, default=False) parser.add_argument('--glob', dest='glob', action='store_const', const=True, default=False) parser.add_argument('--prior', dest='prior', action='store_const', const=True, default=False) parser.add_argument('--lr', dest='lr', type=float, help='Learning rate.') parser.add_argument('--num-gc-layers', dest='num_gc_layers', type=int, default=5, help='Number of graph convolution layers before each pooling') parser.add_argument('--hidden-dim', dest='hidden_dim', type=int, default=32, help='') parser.add_argument('--repeats', dest='repeats', type=int, default=1, help='') parser.add_argument('--batch_size', dest='batch_size', type=int, default=128, help='') return parser.parse_args()
def raise_measure_error(measure): supported_measures = ['GAN', 'JSD', 'JSD_hard', 'X2', 'KL', 'RKL', 'DV', 'H2', 'W1'] raise NotImplementedError('Measure `{}` not supported. Supported: {}'.format(measure, supported_measures))
def get_positive_expectation(p_samples, measure, average=True, tau_plus=0.5): 'Computes the positive part of a divergence / difference.\n\n Args:\n p_samples: Positive samples.\n measure: Measure to compute for.\n average: Average the result over samples.\n\n Returns:\n torch.Tensor\n\n ' log_2 = math.log(2.0) if (measure == 'GAN'): Ep = (- F.softplus((- p_samples))) elif (measure == 'JSD'): Ep = (log_2 - F.softplus((- p_samples))) elif (measure == 'JSD_hard'): Ep = ((log_2 - F.softplus((- p_samples))) - ((tau_plus / (1 - tau_plus)) * (F.softplus((- p_samples)) + p_samples))) elif (measure == 'X2'): Ep = (p_samples ** 2) elif (measure == 'KL'): Ep = (p_samples + 1.0) elif (measure == 'RKL'): Ep = (- torch.exp((- p_samples))) elif (measure == 'DV'): Ep = p_samples elif (measure == 'H2'): Ep = (1.0 - torch.exp((- p_samples))) elif (measure == 'W1'): Ep = p_samples else: raise_measure_error(measure) if average: return Ep.mean() else: return Ep
def get_negative_expectation(q_samples, measure, average=True, beta=0, tau_plus=0.5): 'Computes the negative part of a divergence / difference.\n\n Args:\n q_samples: Negative samples.\n measure: Measure to compute for.\n average: Average the result over samples.\n\n Returns:\n torch.Tensor\n\n ' log_2 = math.log(2.0) if (measure == 'GAN'): Eq = (F.softplus((- q_samples)) + q_samples) elif (measure == 'JSD'): Eq = ((F.softplus((- q_samples)) + q_samples) - log_2) elif (measure == 'JSD_hard'): if (beta == 0): Eq = get_negative_expectation(q_samples, measure='JSD', average=average, beta=0) Eq = (Eq / (1 - tau_plus)) else: Eq = (F.softplus((- q_samples)) + q_samples) reweight = (((- 2) * q_samples) / max(q_samples.max(), q_samples.min().abs())) reweight = (beta * reweight).exp() reweight = (reweight / reweight.mean(dim=1).view((- 1), 1)) Eq = (reweight * Eq) Eq = (Eq / (1 - tau_plus)) Eq -= log_2 elif (measure == 'X2'): Eq = ((- 0.5) * ((torch.sqrt((q_samples ** 2)) + 1.0) ** 2)) elif (measure == 'KL'): Eq = torch.exp(q_samples) elif (measure == 'RKL'): Eq = (q_samples - 1.0) elif (measure == 'DV'): Eq = (log_sum_exp(q_samples, 0) - math.log(q_samples.size(0))) elif (measure == 'H2'): Eq = (torch.exp(q_samples) - 1.0) elif (measure == 'W1'): Eq = q_samples else: raise_measure_error(measure) if average: return Eq.mean() else: return Eq
def infer_conv_size(w, k, s, p): 'Infers the next size after convolution.\n\n Args:\n w: Input size.\n k: Kernel size.\n s: Stride.\n p: Padding.\n\n Returns:\n int: Output size.\n\n ' x = ((((w - k) + (2 * p)) // s) + 1) return x
class Convnet(nn.Module): 'Basic convnet convenience class.\n\n Attributes:\n conv_layers: nn.Sequential of nn.Conv2d layers with batch norm,\n dropout, nonlinearity.\n fc_layers: nn.Sequential of nn.Linear layers with batch norm,\n dropout, nonlinearity.\n reshape: Simple reshape layer.\n conv_shape: Shape of the convolutional output.\n\n ' def __init__(self, *args, **kwargs): super().__init__() self.create_layers(*args, **kwargs) def create_layers(self, shape, conv_args=None, fc_args=None): 'Creates layers\n\n conv_args are in format (dim_h, f_size, stride, pad, batch_norm, dropout, nonlinearity, pool)\n fc_args are in format (dim_h, batch_norm, dropout, nonlinearity)\n\n Args:\n shape: Shape of input.\n conv_args: List of tuple of convolutional arguments.\n fc_args: List of tuple of fully-connected arguments.\n ' (self.conv_layers, self.conv_shape) = self.create_conv_layers(shape, conv_args) (dim_x, dim_y, dim_out) = self.conv_shape dim_r = ((dim_x * dim_y) * dim_out) self.reshape = View((- 1), dim_r) (self.fc_layers, _) = self.create_linear_layers(dim_r, fc_args) def create_conv_layers(self, shape, conv_args): 'Creates a set of convolutional layers.\n\n Args:\n shape: Input shape.\n conv_args: List of tuple of convolutional arguments.\n\n Returns:\n nn.Sequential: a sequence of convolutional layers.\n\n ' conv_layers = nn.Sequential() conv_args = (conv_args or []) (dim_x, dim_y, dim_in) = shape for (i, (dim_out, f, s, p, batch_norm, dropout, nonlinearity, pool)) in enumerate(conv_args): name = '({}/{})_{}'.format(dim_in, dim_out, (i + 1)) conv_block = nn.Sequential() if (dim_out is not None): conv = nn.Conv2d(dim_in, dim_out, kernel_size=f, stride=s, padding=p, bias=(not batch_norm)) conv_block.add_module((name + 'conv'), conv) (dim_x, dim_y) = self.next_size(dim_x, dim_y, f, s, p) else: dim_out = dim_in if dropout: conv_block.add_module((name + 'do'), nn.Dropout2d(p=dropout)) if batch_norm: bn = nn.BatchNorm2d(dim_out) conv_block.add_module((name + 'bn'), bn) if nonlinearity: nonlinearity = get_nonlinearity(nonlinearity) conv_block.add_module(nonlinearity.__class__.__name__, nonlinearity) if pool: (pool_type, kernel, stride) = pool Pool = getattr(nn, pool_type) conv_block.add_module((name + 'pool'), Pool(kernel_size=kernel, stride=stride)) (dim_x, dim_y) = self.next_size(dim_x, dim_y, kernel, stride, 0) conv_layers.add_module(name, conv_block) dim_in = dim_out dim_out = dim_in return (conv_layers, (dim_x, dim_y, dim_out)) def create_linear_layers(self, dim_in, fc_args): '\n\n Args:\n dim_in: Number of input units.\n fc_args: List of tuple of fully-connected arguments.\n\n Returns:\n nn.Sequential.\n\n ' fc_layers = nn.Sequential() fc_args = (fc_args or []) for (i, (dim_out, batch_norm, dropout, nonlinearity)) in enumerate(fc_args): name = '({}/{})_{}'.format(dim_in, dim_out, (i + 1)) fc_block = nn.Sequential() if (dim_out is not None): fc_block.add_module((name + 'fc'), nn.Linear(dim_in, dim_out)) else: dim_out = dim_in if dropout: fc_block.add_module((name + 'do'), nn.Dropout(p=dropout)) if batch_norm: bn = nn.BatchNorm1d(dim_out) fc_block.add_module((name + 'bn'), bn) if nonlinearity: nonlinearity = get_nonlinearity(nonlinearity) fc_block.add_module(nonlinearity.__class__.__name__, nonlinearity) fc_layers.add_module(name, fc_block) dim_in = dim_out return (fc_layers, dim_in) def next_size(self, dim_x, dim_y, k, s, p): 'Infers the next size of a convolutional layer.\n\n Args:\n dim_x: First dimension.\n dim_y: Second dimension.\n k: Kernel size.\n s: Stride.\n p: Padding.\n\n Returns:\n (int, int): (First output dimension, Second output dimension)\n\n ' if isinstance(k, int): (kx, ky) = (k, k) else: (kx, ky) = k if isinstance(s, int): (sx, sy) = (s, s) else: (sx, sy) = s if isinstance(p, int): (px, py) = (p, p) else: (px, py) = p return (infer_conv_size(dim_x, kx, sx, px), infer_conv_size(dim_y, ky, sy, py)) def forward(self, x: torch.Tensor, return_full_list=False): 'Forward pass\n\n Args:\n x: Input.\n return_full_list: Optional, returns all layer outputs.\n\n Returns:\n torch.Tensor or list of torch.Tensor.\n\n ' if return_full_list: conv_out = [] for conv_layer in self.conv_layers: x = conv_layer(x) conv_out.append(x) else: conv_out = self.conv_layers(x) x = conv_out x = self.reshape(x) if return_full_list: fc_out = [] for fc_layer in self.fc_layers: x = fc_layer(x) fc_out.append(x) else: fc_out = self.fc_layers(x) return (conv_out, fc_out)
class FoldedConvnet(Convnet): 'Convnet with strided crop input.\n\n ' def create_layers(self, shape, crop_size=8, conv_args=None, fc_args=None): 'Creates layers\n\n conv_args are in format (dim_h, f_size, stride, pad, batch_norm, dropout, nonlinearity, pool)\n fc_args are in format (dim_h, batch_norm, dropout, nonlinearity)\n\n Args:\n shape: Shape of input.\n crop_size: Size of crops\n conv_args: List of tuple of convolutional arguments.\n fc_args: List of tuple of fully-connected arguments.\n ' self.crop_size = crop_size (dim_x, dim_y, dim_in) = shape if (dim_x != dim_y): raise ValueError('x and y dimensions must be the same to use Folded encoders.') self.final_size = ((2 * (dim_x // self.crop_size)) - 1) self.unfold = Unfold(dim_x, self.crop_size) self.refold = Fold(dim_x, self.crop_size) shape = (self.crop_size, self.crop_size, dim_in) (self.conv_layers, self.conv_shape) = self.create_conv_layers(shape, conv_args) (dim_x, dim_y, dim_out) = self.conv_shape dim_r = ((dim_x * dim_y) * dim_out) self.reshape = View((- 1), dim_r) (self.fc_layers, _) = self.create_linear_layers(dim_r, fc_args) def create_conv_layers(self, shape, conv_args): 'Creates a set of convolutional layers.\n\n Args:\n shape: Input shape.\n conv_args: List of tuple of convolutional arguments.\n\n Returns:\n nn.Sequential: A sequence of convolutional layers.\n\n ' conv_layers = nn.Sequential() conv_args = (conv_args or []) (dim_x, dim_y, dim_in) = shape for (i, (dim_out, f, s, p, batch_norm, dropout, nonlinearity, pool)) in enumerate(conv_args): name = '({}/{})_{}'.format(dim_in, dim_out, (i + 1)) conv_block = nn.Sequential() if (dim_out is not None): conv = nn.Conv2d(dim_in, dim_out, kernel_size=f, stride=s, padding=p, bias=(not batch_norm)) conv_block.add_module((name + 'conv'), conv) (dim_x, dim_y) = self.next_size(dim_x, dim_y, f, s, p) else: dim_out = dim_in if dropout: conv_block.add_module((name + 'do'), nn.Dropout2d(p=dropout)) if batch_norm: bn = nn.BatchNorm2d(dim_out) conv_block.add_module((name + 'bn'), bn) if nonlinearity: nonlinearity = get_nonlinearity(nonlinearity) conv_block.add_module(nonlinearity.__class__.__name__, nonlinearity) if pool: (pool_type, kernel, stride) = pool Pool = getattr(nn, pool_type) conv_block.add_module('pool', Pool(kernel_size=kernel, stride=stride)) (dim_x, dim_y) = self.next_size(dim_x, dim_y, kernel, stride, 0) conv_layers.add_module(name, conv_block) dim_in = dim_out if (dim_x != dim_y): raise ValueError('dim_x and dim_y do not match.') if (dim_x == 1): dim_x = self.final_size dim_y = self.final_size dim_out = dim_in return (conv_layers, (dim_x, dim_y, dim_out)) def forward(self, x: torch.Tensor, return_full_list=False): 'Forward pass\n\n Args:\n x: Input.\n return_full_list: Optional, returns all layer outputs.\n\n Returns:\n torch.Tensor or list of torch.Tensor.\n\n ' x = self.unfold(x) conv_out = [] for conv_layer in self.conv_layers: x = conv_layer(x) if (x.size(2) == 1): x = self.refold(x) conv_out.append(x) x = self.reshape(x) if return_full_list: fc_out = [] for fc_layer in self.fc_layers: x = fc_layer(x) fc_out.append(x) else: fc_out = self.fc_layers(x) if (not return_full_list): conv_out = conv_out[(- 1)] return (conv_out, fc_out)
def create_encoder(Module): class Encoder(Module): 'Encoder used for cortex_DIM.\n\n ' def __init__(self, *args, local_idx=None, multi_idx=None, conv_idx=None, fc_idx=None, **kwargs): '\n\n Args:\n args: Arguments for parent class.\n local_idx: Index in list of convolutional layers for local features.\n multi_idx: Index in list of convolutional layers for multiple globals.\n conv_idx: Index in list of convolutional layers for intermediate features.\n fc_idx: Index in list of fully-connected layers for intermediate features.\n kwargs: Keyword arguments for the parent class.\n ' super().__init__(*args, **kwargs) if (local_idx is None): raise ValueError('`local_idx` must be set') conv_idx = (conv_idx or local_idx) self.local_idx = local_idx self.multi_idx = multi_idx self.conv_idx = conv_idx self.fc_idx = fc_idx def forward(self, x: torch.Tensor): '\n\n Args:\n x: Input tensor.\n\n Returns:\n local_out, multi_out, hidden_out, global_out\n\n ' outs = super().forward(x, return_full_list=True) if (len(outs) == 2): (conv_out, fc_out) = outs else: (conv_before_out, res_out, conv_after_out, fc_out) = outs conv_out = ((conv_before_out + res_out) + conv_after_out) local_out = conv_out[self.local_idx] if (self.multi_idx is not None): multi_out = conv_out[self.multi_idx] else: multi_out = None if (len(fc_out) > 0): if (self.fc_idx is not None): hidden_out = fc_out[self.fc_idx] else: hidden_out = None global_out = fc_out[(- 1)] else: hidden_out = None global_out = None conv_out = conv_out[self.conv_idx] return (local_out, conv_out, multi_out, hidden_out, global_out) return Encoder
class ConvnetEncoder(create_encoder(Convnet)): pass
class FoldedConvnetEncoder(create_encoder(FoldedConvnet)): pass
class ResnetEncoder(create_encoder(ResNet)): pass
class FoldedResnetEncoder(create_encoder(FoldedResNet)): pass
class MIFCNet(nn.Module): 'Simple custom network for computing MI.\n\n ' def __init__(self, n_input, n_units): '\n\n Args:\n n_input: Number of input units.\n n_units: Number of output units.\n ' super().__init__() assert (n_units >= n_input) self.linear_shortcut = nn.Linear(n_input, n_units) self.block_nonlinear = nn.Sequential(nn.Linear(n_input, n_units), nn.BatchNorm1d(n_units), nn.ReLU(), nn.Linear(n_units, n_units)) eye_mask = np.zeros((n_units, n_input), dtype=np.uint8) for i in range(n_input): eye_mask[(i, i)] = 1 self.linear_shortcut.weight.data.uniform_((- 0.01), 0.01) self.linear_shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.0) def forward(self, x): '\n\n Args:\n x: Input tensor.\n\n Returns:\n torch.Tensor: network output.\n\n ' h = (self.block_nonlinear(x) + self.linear_shortcut(x)) return h
class MI1x1ConvNet(nn.Module): 'Simple custorm 1x1 convnet.\n\n ' def __init__(self, n_input, n_units): '\n\n Args:\n n_input: Number of input units.\n n_units: Number of output units.\n ' super().__init__() self.block_nonlinear = nn.Sequential(nn.Conv1d(n_input, n_units, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm1d(n_units), nn.ReLU(), nn.Conv1d(n_units, n_units, kernel_size=1, stride=1, padding=0, bias=True)) self.block_ln = nn.Sequential(Permute(0, 2, 1), nn.LayerNorm(n_units), Permute(0, 2, 1)) self.linear_shortcut = nn.Conv1d(n_input, n_units, kernel_size=1, stride=1, padding=0, bias=False) if (n_units >= n_input): eye_mask = np.zeros((n_units, n_input, 1), dtype=np.uint8) for i in range(n_input): eye_mask[(i, i, 0)] = 1 self.linear_shortcut.weight.data.uniform_((- 0.01), 0.01) self.linear_shortcut.weight.data.masked_fill_(torch.tensor(eye_mask), 1.0) def forward(self, x): '\n\n Args:\n x: Input tensor.\n\n Returns:\n torch.Tensor: network output.\n\n ' h = self.block_ln((self.block_nonlinear(x) + self.linear_shortcut(x))) return h
class View(torch.nn.Module): 'Basic reshape module.\n\n ' def __init__(self, *shape): '\n\n Args:\n *shape: Input shape.\n ' super().__init__() self.shape = shape def forward(self, input): 'Reshapes tensor.\n\n Args:\n input: Input tensor.\n\n Returns:\n torch.Tensor: Flattened tensor.\n\n ' return input.view(*self.shape)
class Unfold(torch.nn.Module): 'Module for unfolding tensor.\n\n Performs strided crops on 2d (image) tensors. Stride is assumed to be half the crop size.\n\n ' def __init__(self, img_size, fold_size): '\n\n Args:\n img_size: Input size.\n fold_size: Crop size.\n ' super().__init__() fold_stride = (fold_size // 2) self.fold_size = fold_size self.fold_stride = fold_stride self.n_locs = ((2 * (img_size // fold_size)) - 1) self.unfold = torch.nn.Unfold((self.fold_size, self.fold_size), stride=(self.fold_stride, self.fold_stride)) def forward(self, x): 'Unfolds tensor.\n\n Args:\n x: Input tensor.\n\n Returns:\n torch.Tensor: Unfolded tensor.\n\n ' N = x.size(0) x = self.unfold(x).reshape(N, (- 1), self.fold_size, self.fold_size, (self.n_locs * self.n_locs)).permute(0, 4, 1, 2, 3).reshape(((N * self.n_locs) * self.n_locs), (- 1), self.fold_size, self.fold_size) return x
class Fold(torch.nn.Module): 'Module (re)folding tensor.\n\n Undoes the strided crops above. Works only on 1x1.\n\n ' def __init__(self, img_size, fold_size): '\n\n Args:\n img_size: Images size.\n fold_size: Crop size.\n ' super().__init__() self.n_locs = ((2 * (img_size // fold_size)) - 1) def forward(self, x): '(Re)folds tensor.\n\n Args:\n x: Input tensor.\n\n Returns:\n torch.Tensor: Refolded tensor.\n\n ' (dim_c, dim_x, dim_y) = x.size()[1:] x = x.reshape((- 1), (self.n_locs * self.n_locs), dim_c, (dim_x * dim_y)) x = x.reshape((- 1), (self.n_locs * self.n_locs), dim_c, (dim_x * dim_y)).permute(0, 2, 3, 1).reshape((- 1), ((dim_c * dim_x) * dim_y), self.n_locs, self.n_locs).contiguous() return x
class Permute(torch.nn.Module): 'Module for permuting axes.\n\n ' def __init__(self, *perm): '\n\n Args:\n *perm: Permute axes.\n ' super().__init__() self.perm = perm def forward(self, input): 'Permutes axes of tensor.\n\n Args:\n input: Input tensor.\n\n Returns:\n torch.Tensor: permuted tensor.\n\n ' return input.permute(*self.perm)
class ResBlock(Convnet): 'Residual block for ResNet\n\n ' def create_layers(self, shape, conv_args=None): 'Creates layers\n\n Args:\n shape: Shape of input.\n conv_args: Layer arguments for block.\n ' final_nonlin = conv_args[(- 1)][_nonlin_idx] conv_args[(- 1)] = list(conv_args[(- 1)]) conv_args[(- 1)][_nonlin_idx] = None conv_args.append((None, 0, 0, 0, False, False, final_nonlin, None)) super().create_layers(shape, conv_args=conv_args) if (self.conv_shape != shape): (dim_x, dim_y, dim_in) = shape (dim_x_, dim_y_, dim_out) = self.conv_shape stride = (dim_x // dim_x_) (next_x, _) = self.next_size(dim_x, dim_y, 1, stride, 0) assert (next_x == dim_x_), (self.conv_shape, shape) self.downsample = nn.Sequential(nn.Conv2d(dim_in, dim_out, kernel_size=1, stride=stride, padding=0, bias=False), nn.BatchNorm2d(dim_out)) else: self.downsample = None def forward(self, x: torch.Tensor): 'Forward pass\n\n Args:\n x: Input.\n\n Returns:\n torch.Tensor or list of torch.Tensor.\n\n ' if (self.downsample is not None): residual = self.downsample(x) else: residual = x x = self.conv_layers[(- 1)]((self.conv_layers[:(- 1)](x) + residual)) return x
class ResNet(Convnet): def create_layers(self, shape, conv_before_args=None, res_args=None, conv_after_args=None, fc_args=None): 'Creates layers\n\n Args:\n shape: Shape of the input.\n conv_before_args: Arguments for convolutional layers before residuals.\n res_args: Residual args.\n conv_after_args: Arguments for convolutional layers after residuals.\n fc_args: Fully-connected arguments.\n\n ' (dim_x, dim_y, dim_in) = shape shape = (dim_x, dim_y, dim_in) (self.conv_before_layers, self.conv_before_shape) = self.create_conv_layers(shape, conv_before_args) (self.res_layers, self.res_shape) = self.create_res_layers(self.conv_before_shape, res_args) (self.conv_after_layers, self.conv_after_shape) = self.create_conv_layers(self.res_shape, conv_after_args) (dim_x, dim_y, dim_out) = self.conv_after_shape dim_r = ((dim_x * dim_y) * dim_out) self.reshape = View((- 1), dim_r) (self.fc_layers, _) = self.create_linear_layers(dim_r, fc_args) def create_res_layers(self, shape, block_args=None): 'Creates a set of residual blocks.\n\n Args:\n shape: input shape.\n block_args: Arguments for blocks.\n\n Returns:\n nn.Sequential: sequence of residual blocks.\n\n ' res_layers = nn.Sequential() block_args = (block_args or []) for (i, (conv_args, n_blocks)) in enumerate(block_args): block = ResBlock(shape, conv_args=conv_args) res_layers.add_module('block_{}_0'.format(i), block) for j in range(1, n_blocks): shape = block.conv_shape block = ResBlock(shape, conv_args=conv_args) res_layers.add_module('block_{}_{}'.format(i, j), block) shape = block.conv_shape return (res_layers, shape) def forward(self, x: torch.Tensor, return_full_list=False): 'Forward pass\n\n Args:\n x: Input.\n return_full_list: Optional, returns all layer outputs.\n\n Returns:\n torch.Tensor or list of torch.Tensor.\n\n ' if return_full_list: conv_before_out = [] for conv_layer in self.conv_before_layers: x = conv_layer(x) conv_before_out.append(x) else: conv_before_out = self.conv_layers(x) x = conv_before_out if return_full_list: res_out = [] for res_layer in self.res_layers: x = res_layer(x) res_out.append(x) else: res_out = self.res_layers(x) x = res_out if return_full_list: conv_after_out = [] for conv_layer in self.conv_after_layers: x = conv_layer(x) conv_after_out.append(x) else: conv_after_out = self.conv_after_layers(x) x = conv_after_out x = self.reshape(x) if return_full_list: fc_out = [] for fc_layer in self.fc_layers: x = fc_layer(x) fc_out.append(x) else: fc_out = self.fc_layers(x) return (conv_before_out, res_out, conv_after_out, fc_out)
class FoldedResNet(ResNet): 'Resnet with strided crop input.\n\n ' def create_layers(self, shape, crop_size=8, conv_before_args=None, res_args=None, conv_after_args=None, fc_args=None): 'Creates layers\n\n Args:\n shape: Shape of the input.\n crop_size: Size of the crops.\n conv_before_args: Arguments for convolutional layers before residuals.\n res_args: Residual args.\n conv_after_args: Arguments for convolutional layers after residuals.\n fc_args: Fully-connected arguments.\n\n ' self.crop_size = crop_size (dim_x, dim_y, dim_in) = shape self.final_size = ((2 * (dim_x // self.crop_size)) - 1) self.unfold = Unfold(dim_x, self.crop_size) self.refold = Fold(dim_x, self.crop_size) shape = (self.crop_size, self.crop_size, dim_in) (self.conv_before_layers, self.conv_before_shape) = self.create_conv_layers(shape, conv_before_args) (self.res_layers, self.res_shape) = self.create_res_layers(self.conv_before_shape, res_args) (self.conv_after_layers, self.conv_after_shape) = self.create_conv_layers(self.res_shape, conv_after_args) self.conv_after_shape = self.res_shape (dim_x, dim_y, dim_out) = self.conv_after_shape dim_r = ((dim_x * dim_y) * dim_out) self.reshape = View((- 1), dim_r) (self.fc_layers, _) = self.create_linear_layers(dim_r, fc_args) def create_res_layers(self, shape, block_args=None): 'Creates a set of residual blocks.\n\n Args:\n shape: input shape.\n block_args: Arguments for blocks.\n\n Returns:\n nn.Sequential: sequence of residual blocks.\n\n ' res_layers = nn.Sequential() block_args = (block_args or []) for (i, (conv_args, n_blocks)) in enumerate(block_args): block = ResBlock(shape, conv_args=conv_args) res_layers.add_module('block_{}_0'.format(i), block) for j in range(1, n_blocks): shape = block.conv_shape block = ResBlock(shape, conv_args=conv_args) res_layers.add_module('block_{}_{}'.format(i, j), block) shape = block.conv_shape (dim_x, dim_y) = shape[:2] if (dim_x != dim_y): raise ValueError('dim_x and dim_y do not match.') if (dim_x == 1): shape = (self.final_size, self.final_size, shape[2]) return (res_layers, shape) def forward(self, x: torch.Tensor, return_full_list=False): 'Forward pass\n\n Args:\n x: Input.\n return_full_list: Optional, returns all layer outputs.\n\n Returns:\n torch.Tensor or list of torch.Tensor.\n\n ' x = self.unfold(x) conv_before_out = [] for conv_layer in self.conv_before_layers: x = conv_layer(x) if (x.size(2) == 1): x = self.refold(x) conv_before_out.append(x) res_out = [] for res_layer in self.res_layers: x = res_layer(x) res_out.append(x) if (x.size(2) == 1): x = self.refold(x) res_out[(- 1)] = x conv_after_out = [] for conv_layer in self.conv_after_layers: x = conv_layer(x) if (x.size(2) == 1): x = self.refold(x) conv_after_out.append(x) x = self.reshape(x) if return_full_list: fc_out = [] for fc_layer in self.fc_layers: x = fc_layer(x) fc_out.append(x) else: fc_out = self.fc_layers(x) if (not return_full_list): conv_before_out = conv_before_out[(- 1)] res_out = res_out[(- 1)] conv_after_out = conv_after_out[(- 1)] return (conv_before_out, res_out, conv_after_out, fc_out)
class NormalizedDegree(object): def __init__(self, mean, std): self.mean = mean self.std = std def __call__(self, data): deg = degree(data.edge_index[0], dtype=torch.float) deg = ((deg - self.mean) / self.std) data.x = deg.view((- 1), 1) return data
class GcnInfomax(nn.Module): def __init__(self, hidden_dim, num_gc_layers, alpha=0.5, beta=1.0, gamma=0.1): super(GcnInfomax, self).__init__() self.alpha = alpha self.beta = beta self.gamma = gamma self.prior = args.prior self.embedding_dim = mi_units = (hidden_dim * num_gc_layers) self.encoder = Encoder(dataset_num_features, hidden_dim, num_gc_layers) self.local_d = FF(self.embedding_dim) self.global_d = FF(self.embedding_dim) if self.prior: self.prior_d = PriorDiscriminator(self.embedding_dim) self.init_emb() def init_emb(self): initrange = ((- 1.5) / self.embedding_dim) for m in self.modules(): if isinstance(m, nn.Linear): torch.nn.init.xavier_uniform_(m.weight.data) if (m.bias is not None): m.bias.data.fill_(0.0) def forward(self, x, edge_index, batch, num_graphs, beta): if (x is None): x = torch.ones(batch.shape[0]).to(device) (y, M) = self.encoder(x, edge_index, batch) g_enc = self.global_d(y) l_enc = self.local_d(M) mode = 'fd' measure = 'JSD_hard' local_global_loss = local_global_loss_(l_enc, g_enc, edge_index, batch, measure, beta) if self.prior: prior = torch.rand_like(y) term_a = torch.log(self.prior_d(prior)).mean() term_b = torch.log((1.0 - self.prior_d(y))).mean() PRIOR = ((- (term_a + term_b)) * self.gamma) else: PRIOR = 0 return (local_global_loss + PRIOR)
def svc_classify(x, y, search): kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=None) accuracies = [] for (train_index, test_index) in kf.split(x, y): (x_train, x_test) = (x[train_index], x[test_index]) (y_train, y_test) = (y[train_index], y[test_index]) if search: params = {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]} classifier = GridSearchCV(SVC(), params, cv=5, scoring='accuracy', verbose=0) else: classifier = SVC(C=10) classifier.fit(x_train, y_train) accuracies.append(accuracy_score(y_test, classifier.predict(x_test))) return np.mean(accuracies)
def evaluate_embedding(embeddings, labels, search=True): labels = preprocessing.LabelEncoder().fit_transform(labels) (x, y) = (np.array(embeddings), np.array(labels)) print(x.shape, y.shape) svc_accuracies = [svc_classify(x, y, search) for _ in range(1)] print('svc', np.mean(svc_accuracies)) return np.mean(svc_accuracies)
class Encoder(torch.nn.Module): def __init__(self, num_features, dim, num_gc_layers): super(Encoder, self).__init__() self.num_gc_layers = num_gc_layers self.convs = torch.nn.ModuleList() self.bns = torch.nn.ModuleList() for i in range(num_gc_layers): if i: nn = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim)) else: nn = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim)) conv = GINConv(nn) bn = torch.nn.BatchNorm1d(dim) self.convs.append(conv) self.bns.append(bn) def forward(self, x, edge_index, batch): if (x is None): x = torch.ones((batch.shape[0], 1)).to(device) xs = [] for i in range(self.num_gc_layers): x = F.relu(self.convs[i](x, edge_index)) x = self.bns[i](x) xs.append(x) xpool = [global_add_pool(x, batch) for x in xs] x = torch.cat(xpool, 1) return (x, torch.cat(xs, 1)) def get_embeddings(self, loader): device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) ret = [] y = [] with torch.no_grad(): for data in loader: data.to(device) (x, edge_index, batch) = (data.x, data.edge_index, data.batch) if (x is None): x = torch.ones((batch.shape[0], 1)).to(device) (x, _) = self.forward(x, edge_index, batch) ret.append(x.cpu().numpy()) y.append(data.y.cpu().numpy()) ret = np.concatenate(ret, 0) y = np.concatenate(y, 0) return (ret, y)
class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() try: num_features = dataset.num_features except: num_features = 1 dim = 32 self.encoder = Encoder(num_features, dim) self.fc1 = Linear((dim * 5), dim) self.fc2 = Linear(dim, dataset.num_classes) def forward(self, x, edge_index, batch): if (x is None): x = torch.ones(batch.shape[0]).to(device) (x, _) = self.encoder(x, edge_index, batch) x = F.relu(self.fc1(x)) x = F.dropout(x, p=0.5, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=(- 1))
def train(epoch): model.train() if (epoch == 51): for param_group in optimizer.param_groups: param_group['lr'] = (0.5 * param_group['lr']) loss_all = 0 for data in train_loader: data = data.to(device) optimizer.zero_grad() output = model(data.x, data.edge_index, data.batch) loss = F.nll_loss(output, data.y) loss.backward() loss_all += (loss.item() * data.num_graphs) optimizer.step() return (loss_all / len(train_dataset))
def test(loader): model.eval() correct = 0 for data in loader: data = data.to(device) output = model(data.x, data.edge_index, data.batch) pred = output.max(dim=1)[1] correct += pred.eq(data.y).sum().item() return (correct / len(loader.dataset))
def local_global_loss_(l_enc, g_enc, edge_index, batch, measure, beta=0): '\n Args:\n l: Local feature map.\n g: Global features.\n measure: Type of f-divergence. For use with mode `fd`\n mode: Loss mode. Fenchel-dual `fd`, NCE `nce`, or Donsker-Vadadhan `dv`.\n Returns:\n torch.Tensor: Loss.\n ' num_graphs = g_enc.shape[0] num_nodes = l_enc.shape[0] device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) pos_mask = torch.zeros((num_nodes, num_graphs)).to(device) neg_mask = torch.ones((num_nodes, num_graphs)).to(device) for (nodeidx, graphidx) in enumerate(batch): pos_mask[nodeidx][graphidx] = 1.0 neg_mask[nodeidx][graphidx] = 0.0 res = torch.mm(l_enc, g_enc.t()) E_pos = get_positive_expectation((res * pos_mask), measure, average=False).sum() E_pos = (E_pos / num_nodes) E_neg = get_negative_expectation((res * neg_mask), measure, average=False, beta=beta).sum() E_neg = (E_neg / (num_nodes * (num_graphs - 1))) return (E_neg - E_pos)
def adj_loss_(l_enc, g_enc, edge_index, batch): num_graphs = g_enc.shape[0] num_nodes = l_enc.shape[0] adj = torch.zeros((num_nodes, num_nodes)).cuda() mask = torch.eye(num_nodes).cuda() for (node1, node2) in zip(edge_index[0], edge_index[1]): adj[node1.item()][node2.item()] = 1.0 adj[node2.item()][node1.item()] = 1.0 res = torch.sigmoid(torch.mm(l_enc, l_enc.t())) res = ((1 - mask) * res) loss = nn.BCELoss()(res, adj) return loss
class GlobalDiscriminator(nn.Module): def __init__(self, args, input_dim): super().__init__() self.l0 = nn.Linear(32, 32) self.l1 = nn.Linear(32, 32) self.l2 = nn.Linear(512, 1) def forward(self, y, M, data): adj = Variable(data['adj'].float(), requires_grad=False).cuda() batch_num_nodes = data['num_nodes'].int().numpy() (M, _) = self.encoder(M, adj, batch_num_nodes) h = torch.cat((y, M), dim=1) h = F.relu(self.l0(h)) h = F.relu(self.l1(h)) return self.l2(h)
class PriorDiscriminator(nn.Module): def __init__(self, input_dim): super().__init__() self.l0 = nn.Linear(input_dim, input_dim) self.l1 = nn.Linear(input_dim, input_dim) self.l2 = nn.Linear(input_dim, 1) def forward(self, x): h = F.relu(self.l0(x)) h = F.relu(self.l1(h)) return torch.sigmoid(self.l2(h))
class FF(nn.Module): def __init__(self, input_dim): super().__init__() self.block = nn.Sequential(nn.Linear(input_dim, input_dim), nn.ReLU(), nn.Linear(input_dim, input_dim), nn.ReLU(), nn.Linear(input_dim, input_dim), nn.ReLU()) self.linear_shortcut = nn.Linear(input_dim, input_dim) def forward(self, x): return (self.block(x) + self.linear_shortcut(x))
class Model(nn.Module): def __init__(self, feature_dim=128): super(Model, self).__init__() self.f = [] for (name, module) in resnet50().named_children(): if (name == 'conv1'): module = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) if ((not isinstance(module, nn.Linear)) and (not isinstance(module, nn.MaxPool2d))): self.f.append(module) self.f = nn.Sequential(*self.f) self.g = nn.Sequential(nn.Linear(2048, 512, bias=False), nn.BatchNorm1d(512), nn.ReLU(inplace=True), nn.Linear(512, feature_dim, bias=True)) def forward(self, x): x = self.f(x) feature = torch.flatten(x, start_dim=1) out = self.g(feature) return (F.normalize(feature, dim=(- 1)), F.normalize(out, dim=(- 1)))
class CIFAR10Pair(CIFAR10): def __getitem__(self, index): (img, target) = (self.data[index], self.targets[index]) img = Image.fromarray(img) if (self.transform is not None): pos_1 = self.transform(img) pos_2 = self.transform(img) if (self.target_transform is not None): target = self.target_transform(target) return (pos_1, pos_2, target)
class CIFAR100Pair_true_label(CIFAR100): def __init__(self, root='../data', train=True, transform=None): super().__init__(root=root, train=train, transform=transform) def get_labels(i): return [index for index in range(len(self)) if (self.targets[index] == i)] self.label_index = [get_labels(i) for i in range(100)] def __getitem__(self, index): (img1, target) = (self.data[index], self.targets[index]) index_example_same_label = sample(self.label_index[self.targets[index]], 1)[0] img2 = self.data[index_example_same_label] img1 = Image.fromarray(img1) img2 = Image.fromarray(img2) if (self.transform is not None): pos_1 = self.transform(img1) pos_2 = self.transform(img2) if (self.target_transform is not None): target = self.target_transform(target) return (pos_1, pos_2, target)
class CIFAR100Pair(CIFAR100): def __getitem__(self, index): (img, target) = (self.data[index], self.targets[index]) img = Image.fromarray(img) if (self.transform is not None): pos_1 = self.transform(img) pos_2 = self.transform(img) if (self.target_transform is not None): target = self.target_transform(target) return (pos_1, pos_2, target)
class STL10Pair(STL10): def __getitem__(self, index): (img, target) = (self.data[index], self.labels[index]) img = Image.fromarray(np.transpose(img, (1, 2, 0))) if (self.transform is not None): pos_1 = self.transform(img) pos_2 = self.transform(img) return (pos_1, pos_2, target)
class GaussianBlur(object): def __init__(self, kernel_size, min=0.1, max=2.0): self.min = min self.max = max self.kernel_size = kernel_size def __call__(self, sample): sample = np.array(sample) prob = np.random.random_sample() if (prob < 0.5): sigma = (((self.max - self.min) * np.random.random_sample()) + self.min) sample = cv2.GaussianBlur(sample, (self.kernel_size, self.kernel_size), sigma) return sample
def get_dataset(dataset_name, root='../data', pair=True): if pair: if (dataset_name == 'cifar10'): train_data = CIFAR10Pair(root=root, train=True, transform=train_transform) memory_data = CIFAR10Pair(root=root, train=True, transform=test_transform) test_data = CIFAR10Pair(root=root, train=False, transform=test_transform) elif (dataset_name == 'cifar100'): train_data = CIFAR100Pair(root=root, train=True, transform=train_transform) memory_data = CIFAR100Pair(root=root, train=True, transform=test_transform) test_data = CIFAR100Pair(root=root, train=False, transform=test_transform) elif (dataset_name == 'stl10'): train_data = STL10Pair(root=root, split='train+unlabeled', transform=train_transform) memory_data = STL10Pair(root=root, split='train', transform=test_transform) test_data = STL10Pair(root=root, split='test', transform=test_transform) elif (dataset_name == 'cifar100_true_label'): train_data = CIFAR100Pair_true_label(root=root, train=True, transform=train_transform) memory_data = CIFAR100Pair_true_label(root=root, train=True, transform=test_transform) test_data = CIFAR100Pair_true_label(root=root, train=False, transform=test_transform) else: raise Exception('Invalid dataset name') elif (dataset_name in ['cifar10', 'cifar10_true_label']): train_data = CIFAR10(root=root, train=True, transform=train_transform) memory_data = CIFAR10(root=root, train=True, transform=test_transform) test_data = CIFAR10(root=root, train=False, transform=test_transform) elif (dataset_name in ['cifar100', 'cifar100_true_label']): train_data = CIFAR100(root=root, train=True, transform=train_transform) memory_data = CIFAR100(root=root, train=True, transform=test_transform) test_data = CIFAR100(root=root, train=False, transform=test_transform) elif (dataset_name == 'stl10'): train_data = STL10(root=root, split='train', transform=train_transform) memory_data = STL10(root=root, split='train', transform=test_transform) test_data = STL10(root=root, split='test', transform=test_transform) else: raise Exception('Invalid dataset name') return (train_data, memory_data, test_data)
class CurveBall(Optimizer): 'CurveBall optimizer' def __init__(self, params, lr=None, momentum=None, auto_lambda=True, lambd=10.0, lambda_factor=0.999, lambda_low=0.5, lambda_high=1.5, lambda_interval=5): defaults = dict(lr=lr, momentum=momentum, auto_lambda=auto_lambda, lambd=lambd, lambda_factor=lambda_factor, lambda_low=lambda_low, lambda_high=lambda_high, lambda_interval=lambda_interval) super().__init__(params, defaults) def step(self, model_fn, loss_fn): 'Performs a single optimization step' if (len(self.param_groups) != 1): raise ValueError('Since the hyper-parameters are set automatically, only one parameter group (with the same hyper-parameters) is supported.') group = self.param_groups[0] parameters = group['params'] state = self.state for p in parameters: if (p not in state): state[p] = {'z': t.zeros_like(p)} zs = [state[p]['z'] for p in parameters] global_state = state[parameters[0]] global_state.setdefault('count', 0) lambd = global_state.get('lambd', group['lambd']) predictions = model_fn() predictions_d = predictions.detach().requires_grad_(True) loss = loss_fn(predictions_d) (Jz,) = fmad(predictions, parameters, zs) (Jl,) = grad(loss, predictions_d, create_graph=True) Jl_d = Jl.detach() (Hl_Jz,) = grad(Jl, predictions_d, grad_outputs=Jz, retain_graph=True) delta_zs = grad(predictions, parameters, (Hl_Jz + Jl_d), retain_graph=True) for (z, dz) in zip(zs, delta_zs): dz.data.add_(lambd, z) lr = group['lr'] momentum = group['momentum'] if ((momentum < 0) or (lr < 0) or group['auto_lambda']): (Jdeltaz,) = fmad(predictions, parameters, delta_zs) (Hl_Jdeltaz,) = grad(Jl, predictions_d, grad_outputs=Jdeltaz) z_vec = t.cat([z.flatten() for z in zs]) dz_vec = t.cat([dz.flatten() for dz in delta_zs]) a11 = ((lambd * (dz_vec * dz_vec).sum()) + (Jdeltaz * Hl_Jdeltaz).sum()) a12 = ((lambd * (dz_vec * z_vec).sum()) + (Jz * Hl_Jdeltaz).sum()) a22 = ((lambd * (z_vec * z_vec).sum()) + (Jz * Hl_Jz).sum()) b1 = (Jl_d * Jdeltaz).sum() b2 = (Jl_d * Jz).sum() A = t.tensor([[a11.item(), a12.item()], [a12.item(), a22.item()]]) b = t.tensor([[b1.item()], [b2.item()]]) auto_params = (A.pinverse() @ b) lr = auto_params[0].item() momentum = (- auto_params[1].item()) for (p, z, dz) in zip(parameters, zs, delta_zs): z.data.mul_(momentum).add_((- lr), dz) p.data.add_(z) if group['auto_lambda']: if ((global_state['count'] % group['lambda_interval']) == 0): with t.no_grad(): new_loss = loss_fn(model_fn()) quadratic_change = ((- 0.5) * (auto_params * b).sum()) ratio = ((new_loss - loss) / quadratic_change) factor = (group['lambda_factor'] ** group['lambda_interval']) if (ratio < group['lambda_low']): lambd /= factor if (ratio > group['lambda_high']): lambd *= factor global_state['lambd'] = lambd global_state['count'] += 1 return (loss, predictions)
def fmad(ys, xs, dxs): 'Forward-mode automatic differentiation.' v = t.zeros_like(ys, requires_grad=True) g = grad(ys, xs, grad_outputs=v, create_graph=True) return grad(g, v, grad_outputs=dxs)
def train(args, net, device, train_loader, optimizer, epoch, logger): net.train() for (batch_idx, (data, target)) in enumerate(train_loader): start = time() (data, target) = (data.to(device), target.to(device)) model_fn = (lambda : net(data)) loss_fn = (lambda pred: F.cross_entropy(pred, target)) if isinstance(optimizer, CurveBall): (loss, predictions) = optimizer.step(model_fn, loss_fn) else: optimizer.zero_grad() predictions = model_fn() loss = loss_fn(predictions) loss.backward() optimizer.step() pred = predictions.max(1, keepdim=True)[1] accuracy = pred.eq(target.view_as(pred)).double().mean() stats = {'train.loss': loss.item(), 'train.accuracy': accuracy.item()} if logger: logger.update_average(stats) if (logger.avg_count['train.loss'] > 3): logger.update_average({'train.time': (time() - start)}) logger.print(line_prefix=('ep %i ' % epoch), prefix='train') else: print(stats)
def test(args, net, device, test_loader, logger): net.eval() with torch.no_grad(): for (data, target) in test_loader: start = time() (data, target) = (data.to(device), target.to(device)) predictions = net(data) loss = F.cross_entropy(predictions, target) pred = predictions.max(1, keepdim=True)[1] accuracy = pred.eq(target.view_as(pred)).double().mean() stats = {'val.loss': loss.item(), 'val.accuracy': accuracy.item()} if logger: logger.update_average(stats) if (logger.avg_count['val.loss'] > 3): logger.update_average({'val.time': (time() - start)}) logger.print(prefix='val') else: print(stats)
def main(): all_models = [name for name in dir(models) if callable(getattr(models, name))] parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training') parser.add_argument('experiment', nargs='?', default='test') parser.add_argument('-model', choices=all_models, default='BasicNetBN') parser.add_argument('-optimizer', choices=['sgd', 'adam', 'curveball'], default='curveball') parser.add_argument('-lr', default=(- 1), type=float, help='learning rate') parser.add_argument('-momentum', type=float, default=(- 1), metavar='M') parser.add_argument('-lambda', type=float, default=1.0) parser.add_argument('--no-auto-lambda', action='store_true', default=False, help='disables automatic lambda estimation') parser.add_argument('-batch-size', default=128, type=int) parser.add_argument('-epochs', default=200, type=int) parser.add_argument('-save-interval', default=10, type=int) parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('-outputdir', default='data/cifar-experiments', type=str) parser.add_argument('-datadir', default='data/cifar', type=str) parser.add_argument('-device', default='cuda', type=str) parser.add_argument('--parallel', action='store_true', default=False) args = parser.parse_args() args.outputdir += ((((('/' + args.model) + '/') + args.optimizer) + '/') + args.experiment) if os.path.isdir(args.outputdir): input('Directory already exists. Press Enter to overwrite or Ctrl+C to cancel.') if (not torch.cuda.is_available()): args.device = 'cpu' best_acc = 0 start_epoch = 0 transform_train = transforms.Compose([transforms.RandomCrop(32, padding=2, fill=(128, 128, 128)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]) transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]) train_set = torchvision.datasets.CIFAR10(root=args.datadir, train=True, download=True, transform=transform_train) train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, num_workers=2, shuffle=True) test_set = torchvision.datasets.CIFAR10(root=args.datadir, train=False, download=True, transform=transform_test) test_loader = torch.utils.data.DataLoader(test_set, batch_size=args.batch_size, num_workers=2, shuffle=False) net = getattr(models, args.model)() net = net.to(args.device) if ((args.device != 'cpu') and args.parallel): net = torch.nn.DataParallel(net) torch.backends.cudnn.benchmark = True if args.resume: print('Resuming from checkpoint..') assert os.path.isdir(args.outputdir), 'Error: no checkpoint directory found!' checkpoint = torch.load((args.outputdir + '/last.t7')) net.load_state_dict(checkpoint['net']) best_acc = checkpoint['acc'] start_epoch = checkpoint['epoch'] if (args.optimizer == 'sgd'): if (args.lr < 0): args.lr = 0.1 optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9) elif (args.optimizer == 'adam'): if (args.lr < 0): args.lr = 0.001 optimizer = optim.Adam(net.parameters(), lr=args.lr) elif (args.optimizer == 'curveball'): lambd = getattr(args, 'lambda') optimizer = CurveBall(net.parameters(), lr=args.lr, momentum=args.momentum, lambd=lambd, auto_lambda=(not args.no_auto_lambda)) logger = None if Logger: logger = Logger(args.outputdir, meta=args, resume=args.resume) for epoch in range(start_epoch, args.epochs): train(args, net, args.device, train_loader, optimizer, epoch, logger) test(args, net, args.device, test_loader, logger) if logger: acc = logger.average()['val.accuracy'] logger.append() if ((epoch % args.save_interval) == 0): print('Saving..') state = {'net': net.state_dict(), 'optimizer': optimizer.state_dict(), 'acc': acc, 'epoch': epoch} if (not os.path.isdir(args.outputdir)): os.mkdir(args.outputdir) torch.save(state, (args.outputdir + '/last.t7')) if (logger and (acc > best_acc)): shutil.copyfile((args.outputdir + '/last.t7'), (args.outputdir + '/best.t7')) best_acc = acc
class Flatten(nn.Module): def forward(self, input): return input.view(input.size(0), (- 1))
def onehot(target, like): 'Transforms numeric labels into one-hot regression targets.' out = torch.zeros_like(like) out.scatter_(1, target.unsqueeze(1), 1.0) return out
def train(args, model, device, train_loader, optimizer, epoch, logger): model.train() for (batch_idx, (data, target)) in enumerate(train_loader): start = time() (data, target) = (data.to(device), target.to(device)) model_fn = (lambda : model(data)) loss_fn = (lambda pred: F.cross_entropy(pred, target)) if isinstance(optimizer, CurveBall): (loss, predictions) = optimizer.step(model_fn, loss_fn) else: optimizer.zero_grad() predictions = model_fn() loss = loss_fn(predictions) loss.backward() optimizer.step() pred = predictions.max(1, keepdim=True)[1] accuracy = pred.eq(target.view_as(pred)).double().mean() stats = {'train.loss': loss.item(), 'train.accuracy': accuracy.item()} if logger: logger.update_average(stats) if (logger.avg_count['train.loss'] > 3): logger.update_average({'train.time': (time() - start)}) logger.print(line_prefix=('ep %i ' % epoch), prefix='train') else: print(stats)
def test(args, model, device, test_loader, logger): model.eval() with torch.no_grad(): for (data, target) in test_loader: start = time() (data, target) = (data.to(device), target.to(device)) predictions = model(data) loss = F.cross_entropy(predictions, target) pred = predictions.max(1, keepdim=True)[1] accuracy = pred.eq(target.view_as(pred)).double().mean() stats = {'val.loss': loss.item(), 'val.accuracy': accuracy.item()} if logger: logger.update_average(stats) if (logger.avg_count['val.loss'] > 3): logger.update_average({'val.time': (time() - start)}) else: print(stats) if logger: logger.print(prefix='val')
def main(): parser = argparse.ArgumentParser() parser.add_argument('experiment', nargs='?', default='test') parser.add_argument('-batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('-test-batch-size', type=int, default=1000, help='input batch size for testing (default: 1000)') parser.add_argument('-epochs', type=int, default=10, help='number of epochs to train (default: 10)') parser.add_argument('-optimizer', choices=['sgd', 'adam', 'curveball'], default='curveball', help='optimizer (sgd, adam, or curveball)') parser.add_argument('-lr', type=float, default=(- 1), metavar='LR', help='learning rate (default: 0.01 for SGD, 0.001 for Adam, 1 for CurveBall)') parser.add_argument('-momentum', type=float, default=(- 1), metavar='M', help='momentum (default: 0.5)') parser.add_argument('-lambda', type=float, default=1.0, help='lambda') parser.add_argument('--no-auto-lambda', action='store_true', default=False, help='disables automatic lambda estimation') parser.add_argument('--no-batch-norm', action='store_true', default=False) parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('-seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('-datadir', type=str, default='data/mnist', help='MNIST data directory') parser.add_argument('-outputdir', type=str, default='data/mnist-experiments', help='output directory') args = parser.parse_args() use_cuda = ((not args.no_cuda) and torch.cuda.is_available()) args.outputdir += ((('/' + args.optimizer) + '/') + args.experiment) if os.path.isdir(args.outputdir): input('Directory already exists. Press Enter to overwrite or Ctrl+C to cancel.') torch.manual_seed(args.seed) device = torch.device(('cuda' if use_cuda else 'cpu')) kwargs = ({'num_workers': 1, 'pin_memory': True} if use_cuda else {}) train_loader = torch.utils.data.DataLoader(datasets.MNIST(args.datadir, train=True, download=True, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.batch_size, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader(datasets.MNIST(args.datadir, train=False, transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])), batch_size=args.test_batch_size, shuffle=True, **kwargs) layers = [nn.Conv2d(1, 10, kernel_size=5), nn.MaxPool2d(kernel_size=2), nn.ReLU(), nn.Conv2d(10, 20, kernel_size=5), nn.MaxPool2d(kernel_size=2), nn.ReLU(), nn.Dropout2d(), Flatten(), nn.Linear(320, 50), nn.ReLU(), nn.Dropout(), nn.Linear(50, 10)] if (not args.no_batch_norm): insert_bnorm(layers) model = nn.Sequential(*layers) model.to(device) if (args.optimizer == 'sgd'): if (args.lr < 0): args.lr = 0.01 if (args.momentum < 0): args.momentum = 0.5 optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) elif (args.optimizer == 'adam'): if (args.lr < 0): args.lr = 0.001 optimizer = optim.Adam(model.parameters(), lr=args.lr) elif (args.optimizer == 'curveball'): lambd = getattr(args, 'lambda') optimizer = CurveBall(model.parameters(), lr=args.lr, momentum=args.momentum, lambd=lambd, auto_lambda=(not args.no_auto_lambda)) with Logger(args.outputdir, meta=args) as logger: for epoch in range(1, (args.epochs + 1)): train(args, model, device, train_loader, optimizer, epoch, logger) test(args, model, device, test_loader, logger) logger.append()
class Flatten(nn.Module): def forward(self, input): return input.view(input.size(0), (- 1))
def BasicNetBN(): return BasicNet(batch_norm=True)
def BasicNet(batch_norm=False): 'Basic network for CIFAR.' layers = [nn.Conv2d(3, 32, kernel_size=5, padding=2), nn.ReLU(), nn.MaxPool2d(kernel_size=3, stride=2, padding=1), nn.Conv2d(32, 32, kernel_size=5, padding=2), nn.ReLU(), nn.AvgPool2d(kernel_size=3, stride=2, padding=1), nn.Conv2d(32, 64, kernel_size=5, padding=2), nn.ReLU(), nn.AvgPool2d(kernel_size=3, stride=2, padding=1), Flatten(), nn.Linear(((4 * 4) * 64), 64), nn.ReLU(), nn.Linear(64, 10)] if batch_norm: insert_bnorm(layers, init_gain=True, eps=0.0001) return nn.Sequential(*layers)
def insert_bnorm(layers, init_gain=False, eps=1e-05, ignore_last_layer=True): 'Inserts batch-norm layers after each convolution/linear layer in a list of layers.' last = True for (idx, layer) in reversed(list(enumerate(layers))): if isinstance(layer, (nn.Conv2d, nn.Linear)): if (ignore_last_layer and last): last = False else: if isinstance(layer, nn.Conv2d): bnorm = nn.BatchNorm2d(layer.out_channels, eps=eps) elif isinstance(layer, nn.Linear): bnorm = nn.BatchNorm1d(layer.out_features, eps=eps) if init_gain: bnorm.weight.data[:] = 1.0 layers.insert((idx + 1), bnorm) return layers
class Bottleneck(nn.Module): def __init__(self, in_planes, growth_rate): super(Bottleneck, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.conv1 = nn.Conv2d(in_planes, (4 * growth_rate), kernel_size=1, bias=False) self.bn2 = nn.BatchNorm2d((4 * growth_rate)) self.conv2 = nn.Conv2d((4 * growth_rate), growth_rate, kernel_size=3, padding=1, bias=False) def forward(self, x): out = self.conv1(F.relu(self.bn1(x))) out = self.conv2(F.relu(self.bn2(out))) out = torch.cat([out, x], 1) return out
class Transition(nn.Module): def __init__(self, in_planes, out_planes): super(Transition, self).__init__() self.bn = nn.BatchNorm2d(in_planes) self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False) def forward(self, x): out = self.conv(F.relu(self.bn(x))) out = F.avg_pool2d(out, 2) return out
class DenseNet(nn.Module): def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10): super(DenseNet, self).__init__() self.growth_rate = growth_rate num_planes = (2 * growth_rate) self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False) self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0]) num_planes += (nblocks[0] * growth_rate) out_planes = int(math.floor((num_planes * reduction))) self.trans1 = Transition(num_planes, out_planes) num_planes = out_planes self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1]) num_planes += (nblocks[1] * growth_rate) out_planes = int(math.floor((num_planes * reduction))) self.trans2 = Transition(num_planes, out_planes) num_planes = out_planes self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2]) num_planes += (nblocks[2] * growth_rate) out_planes = int(math.floor((num_planes * reduction))) self.trans3 = Transition(num_planes, out_planes) num_planes = out_planes self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3]) num_planes += (nblocks[3] * growth_rate) self.bn = nn.BatchNorm2d(num_planes) self.linear = nn.Linear(num_planes, num_classes) def _make_dense_layers(self, block, in_planes, nblock): layers = [] for i in range(nblock): layers.append(block(in_planes, self.growth_rate)) in_planes += self.growth_rate return nn.Sequential(*layers) def forward(self, x): out = self.conv1(x) out = self.trans1(self.dense1(out)) out = self.trans2(self.dense2(out)) out = self.trans3(self.dense3(out)) out = self.dense4(out) out = F.avg_pool2d(F.relu(self.bn(out)), 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def DenseNet121(): return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=32)
def DenseNet169(): return DenseNet(Bottleneck, [6, 12, 32, 32], growth_rate=32)
def DenseNet201(): return DenseNet(Bottleneck, [6, 12, 48, 32], growth_rate=32)
def DenseNet161(): return DenseNet(Bottleneck, [6, 12, 36, 24], growth_rate=48)
def densenet_cifar(): return DenseNet(Bottleneck, [6, 12, 24, 16], growth_rate=12)
def test(): net = densenet_cifar() x = torch.randn(1, 3, 32, 32) y = net(x) print(y)
class Inception(nn.Module): def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes): super(Inception, self).__init__() self.b1 = nn.Sequential(nn.Conv2d(in_planes, n1x1, kernel_size=1), nn.BatchNorm2d(n1x1), nn.ReLU(True)) self.b2 = nn.Sequential(nn.Conv2d(in_planes, n3x3red, kernel_size=1), nn.BatchNorm2d(n3x3red), nn.ReLU(True), nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1), nn.BatchNorm2d(n3x3), nn.ReLU(True)) self.b3 = nn.Sequential(nn.Conv2d(in_planes, n5x5red, kernel_size=1), nn.BatchNorm2d(n5x5red), nn.ReLU(True), nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1), nn.BatchNorm2d(n5x5), nn.ReLU(True), nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1), nn.BatchNorm2d(n5x5), nn.ReLU(True)) self.b4 = nn.Sequential(nn.MaxPool2d(3, stride=1, padding=1), nn.Conv2d(in_planes, pool_planes, kernel_size=1), nn.BatchNorm2d(pool_planes), nn.ReLU(True)) def forward(self, x): y1 = self.b1(x) y2 = self.b2(x) y3 = self.b3(x) y4 = self.b4(x) return torch.cat([y1, y2, y3, y4], 1)
class GoogLeNet(nn.Module): def __init__(self): super(GoogLeNet, self).__init__() self.pre_layers = nn.Sequential(nn.Conv2d(3, 192, kernel_size=3, padding=1), nn.BatchNorm2d(192), nn.ReLU(True)) self.a3 = Inception(192, 64, 96, 128, 16, 32, 32) self.b3 = Inception(256, 128, 128, 192, 32, 96, 64) self.maxpool = nn.MaxPool2d(3, stride=2, padding=1) self.a4 = Inception(480, 192, 96, 208, 16, 48, 64) self.b4 = Inception(512, 160, 112, 224, 24, 64, 64) self.c4 = Inception(512, 128, 128, 256, 24, 64, 64) self.d4 = Inception(512, 112, 144, 288, 32, 64, 64) self.e4 = Inception(528, 256, 160, 320, 32, 128, 128) self.a5 = Inception(832, 256, 160, 320, 32, 128, 128) self.b5 = Inception(832, 384, 192, 384, 48, 128, 128) self.avgpool = nn.AvgPool2d(8, stride=1) self.linear = nn.Linear(1024, 10) def forward(self, x): out = self.pre_layers(x) out = self.a3(out) out = self.b3(out) out = self.maxpool(out) out = self.a4(out) out = self.b4(out) out = self.c4(out) out = self.d4(out) out = self.e4(out) out = self.maxpool(out) out = self.a5(out) out = self.b5(out) out = self.avgpool(out) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def test(): net = GoogLeNet() x = torch.randn(1, 3, 32, 32) y = net(x) print(y.size())
class LeNet(nn.Module): def __init__(self): super(LeNet, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(((16 * 5) * 5), 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): out = F.relu(self.conv1(x)) out = F.max_pool2d(out, 2) out = F.relu(self.conv2(out)) out = F.max_pool2d(out, 2) out = out.view(out.size(0), (- 1)) out = F.relu(self.fc1(out)) out = F.relu(self.fc2(out)) out = self.fc3(out) return out
class Block(nn.Module): 'Depthwise conv + Pointwise conv' def __init__(self, in_planes, out_planes, stride=1): super(Block, self).__init__() self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False) self.bn1 = nn.BatchNorm2d(in_planes) self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) self.bn2 = nn.BatchNorm2d(out_planes) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) return out
class MobileNet(nn.Module): cfg = [64, (128, 2), 128, (256, 2), 256, (512, 2), 512, 512, 512, 512, 512, (1024, 2), 1024] def __init__(self, num_classes=10): super(MobileNet, self).__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.layers = self._make_layers(in_planes=32) self.linear = nn.Linear(1024, num_classes) def _make_layers(self, in_planes): layers = [] for x in self.cfg: out_planes = (x if isinstance(x, int) else x[0]) stride = (1 if isinstance(x, int) else x[1]) layers.append(Block(in_planes, out_planes, stride)) in_planes = out_planes return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layers(out) out = F.avg_pool2d(out, 2) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def test(): net = MobileNet() x = torch.randn(1, 3, 32, 32) y = net(x) print(y.size())
class Block(nn.Module): 'expand + depthwise + pointwise' def __init__(self, in_planes, out_planes, expansion, stride): super(Block, self).__init__() self.stride = stride planes = (expansion * in_planes) self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) self.bn3 = nn.BatchNorm2d(out_planes) self.shortcut = nn.Sequential() if ((stride == 1) and (in_planes != out_planes)): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False), nn.BatchNorm2d(out_planes)) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) out = ((out + self.shortcut(x)) if (self.stride == 1) else out) return out
class MobileNetV2(nn.Module): cfg = [(1, 16, 1, 1), (6, 24, 2, 1), (6, 32, 3, 2), (6, 64, 4, 2), (6, 96, 3, 1), (6, 160, 3, 2), (6, 320, 1, 1)] def __init__(self, num_classes=10): super(MobileNetV2, self).__init__() self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(32) self.layers = self._make_layers(in_planes=32) self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False) self.bn2 = nn.BatchNorm2d(1280) self.linear = nn.Linear(1280, num_classes) def _make_layers(self, in_planes): layers = [] for (expansion, out_planes, num_blocks, stride) in self.cfg: strides = ([stride] + ([1] * (num_blocks - 1))) for stride in strides: layers.append(Block(in_planes, out_planes, expansion, stride)) in_planes = out_planes return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layers(out) out = F.relu(self.bn2(self.conv2(out))) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def test(): net = MobileNetV2() x = torch.randn(2, 3, 32, 32) y = net(x) print(y.size())
class SepConv(nn.Module): 'Separable Convolution.' def __init__(self, in_planes, out_planes, kernel_size, stride): super(SepConv, self).__init__() self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding=((kernel_size - 1) // 2), bias=False, groups=in_planes) self.bn1 = nn.BatchNorm2d(out_planes) def forward(self, x): return self.bn1(self.conv1(x))
class CellA(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(CellA, self).__init__() self.stride = stride self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride) if (stride == 2): self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) self.bn1 = nn.BatchNorm2d(out_planes) def forward(self, x): y1 = self.sep_conv1(x) y2 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1) if (self.stride == 2): y2 = self.bn1(self.conv1(y2)) return F.relu((y1 + y2))
class CellB(nn.Module): def __init__(self, in_planes, out_planes, stride=1): super(CellB, self).__init__() self.stride = stride self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride) self.sep_conv2 = SepConv(in_planes, out_planes, kernel_size=3, stride=stride) self.sep_conv3 = SepConv(in_planes, out_planes, kernel_size=5, stride=stride) if (stride == 2): self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False) self.bn1 = nn.BatchNorm2d(out_planes) self.conv2 = nn.Conv2d((2 * out_planes), out_planes, kernel_size=1, stride=1, padding=0, bias=False) self.bn2 = nn.BatchNorm2d(out_planes) def forward(self, x): y1 = self.sep_conv1(x) y2 = self.sep_conv2(x) y3 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1) if (self.stride == 2): y3 = self.bn1(self.conv1(y3)) y4 = self.sep_conv3(x) b1 = F.relu((y1 + y2)) b2 = F.relu((y3 + y4)) y = torch.cat([b1, b2], 1) return F.relu(self.bn2(self.conv2(y)))
class PNASNet(nn.Module): def __init__(self, cell_type, num_cells, num_planes): super(PNASNet, self).__init__() self.in_planes = num_planes self.cell_type = cell_type self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(num_planes) self.layer1 = self._make_layer(num_planes, num_cells=6) self.layer2 = self._downsample((num_planes * 2)) self.layer3 = self._make_layer((num_planes * 2), num_cells=6) self.layer4 = self._downsample((num_planes * 4)) self.layer5 = self._make_layer((num_planes * 4), num_cells=6) self.linear = nn.Linear((num_planes * 4), 10) def _make_layer(self, planes, num_cells): layers = [] for _ in range(num_cells): layers.append(self.cell_type(self.in_planes, planes, stride=1)) self.in_planes = planes return nn.Sequential(*layers) def _downsample(self, planes): layer = self.cell_type(self.in_planes, planes, stride=2) self.in_planes = planes return layer def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = self.layer5(out) out = F.avg_pool2d(out, 8) out = self.linear(out.view(out.size(0), (- 1))) return out
def PNASNetA(): return PNASNet(CellA, num_cells=6, num_planes=44)
def PNASNetB(): return PNASNet(CellB, num_cells=6, num_planes=32)
def test(): net = PNASNetB() x = torch.randn(1, 3, 32, 32) y = net(x) print(y)
class PreActBlock(nn.Module): 'Pre-activation version of the BasicBlock.' expansion = 1 def __init__(self, in_planes, planes, stride=1): super(PreActBlock, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) if ((stride != 1) or (in_planes != (self.expansion * planes))): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False)) def forward(self, x): out = F.relu(self.bn1(x)) shortcut = (self.shortcut(out) if hasattr(self, 'shortcut') else x) out = self.conv1(out) out = self.conv2(F.relu(self.bn2(out))) out += shortcut return out
class PreActBottleneck(nn.Module): 'Pre-activation version of the original Bottleneck module.' expansion = 4 def __init__(self, in_planes, planes, stride=1): super(PreActBottleneck, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn3 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, (self.expansion * planes), kernel_size=1, bias=False) if ((stride != 1) or (in_planes != (self.expansion * planes))): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False)) def forward(self, x): out = F.relu(self.bn1(x)) shortcut = (self.shortcut(out) if hasattr(self, 'shortcut') else x) out = self.conv1(out) out = self.conv2(F.relu(self.bn2(out))) out = self.conv3(F.relu(self.bn3(out))) out += shortcut return out
class PreActResNet(nn.Module): def __init__(self, block, num_blocks, num_classes=10): super(PreActResNet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.linear = nn.Linear((512 * block.expansion), num_classes) def _make_layer(self, block, planes, num_blocks, stride): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = (planes * block.expansion) return nn.Sequential(*layers) def forward(self, x): out = self.conv1(x) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def PreActResNet18(): return PreActResNet(PreActBlock, [2, 2, 2, 2])
def PreActResNet34(): return PreActResNet(PreActBlock, [3, 4, 6, 3])
def PreActResNet50(): return PreActResNet(PreActBottleneck, [3, 4, 6, 3])
def PreActResNet101(): return PreActResNet(PreActBottleneck, [3, 4, 23, 3])
def PreActResNet152(): return PreActResNet(PreActBottleneck, [3, 8, 36, 3])
def test(): net = PreActResNet18() y = net(torch.randn(1, 3, 32, 32)) print(y.size())
class BasicBlock(nn.Module): expansion = 1 def __init__(self, in_planes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.shortcut = nn.Sequential() if ((stride != 1) or (in_planes != (self.expansion * planes))): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes))) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.bn2(self.conv2(out)) out += self.shortcut(x) out = F.relu(out) return out
class Bottleneck(nn.Module): expansion = 4 def __init__(self, in_planes, planes, stride=1): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, (self.expansion * planes), kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d((self.expansion * planes)) self.shortcut = nn.Sequential() if ((stride != 1) or (in_planes != (self.expansion * planes))): self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((self.expansion * planes))) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = F.relu(self.bn2(self.conv2(out))) out = self.bn3(self.conv3(out)) out += self.shortcut(x) out = F.relu(out) return out
class ResNet(nn.Module): def __init__(self, block, num_blocks, num_classes=10): super(ResNet, self).__init__() self.in_planes = 64 self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) self.linear = nn.Linear((512 * block.expansion), num_classes) def _make_layer(self, block, planes, num_blocks, stride): strides = ([stride] + ([1] * (num_blocks - 1))) layers = [] for stride in strides: layers.append(block(self.in_planes, planes, stride)) self.in_planes = (planes * block.expansion) return nn.Sequential(*layers) def forward(self, x): out = F.relu(self.bn1(self.conv1(x))) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), (- 1)) out = self.linear(out) return out
def ResNet18(): return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34(): return ResNet(BasicBlock, [3, 4, 6, 3])