code
stringlengths
17
6.64M
def file_exists(file_path): return os.path.isfile(file_path)
def assert_file_exists(file_path): if file_exists(file_path): return else: raise FileNotFoundError('Cannot find file: {:s}'.format(os.path.abspath(file_path)))
def assert_path_exists(file_or_dir): if os.path.exists(file_or_dir): return else: raise FileNotFoundError('Cannot find file or dir: {:s}'.format(os.path.abspath(file_or_dir)))
def before_save(file_or_dir): '\n make sure that the dedicated path exists (create if not exist)\n :param file_or_dir:\n :return:\n ' dir_name = os.path.dirname(os.path.abspath(file_or_dir)) if (not os.path.exists(dir_name)): os.makedirs(dir_name)
def get_ext(filename): (_, ext) = os.path.splitext(filename) return ext
def register_file_handling(ext: str, saver: Saver, loader: Loader): assert (ext not in _ext_table) _ext_table[ext] = (saver, loader)
def save_file(obj, filename, *args, **kwargs): ext = get_ext(filename) if (ext in _ext_table): before_save(filename) return _ext_table[ext][0](obj, filename, *args, **kwargs) else: raise ValueError('Unsupported file {} with file extension {}'.format(filename, ext))
def load_file(filename, *args, **kwargs): ext = get_ext(filename) if (ext in _ext_table): return _ext_table[ext][1](filename, *args, **kwargs) else: raise ValueError('Unsupported file {} with file extension {}'.format(filename, ext))
def get_discretizer(method='mdlp', *args, **kwargs): if (method == 'mdlp'): return MDLP(*args, **kwargs) else: raise ValueError(('Not supporting method %s' % method))
def compute_mdlp_all_intervals(mdlp_discretizer): category_names = [] for (i, cut_points) in enumerate(mdlp_discretizer.cut_points_): if (cut_points is None): category_names.append(None) continue idxs = np.arange((len(cut_points) + 1)) names = mdlp_discretizer.assign_intervals(idxs, i) category_names.append(names) return category_names
class RuleList(BayesianRuleList): def __init__(self, min_rule_len=1, max_rule_len=2, min_support=0.02, lambda_=20, eta=1, iters=30000, n_chains=30, alpha=1, fim_method='eclat', feature_names=None, category_names=None, seed=None, verbose=0, discretize_method='mdlp', numeric_features=None): '\n Similar init as pysbrl.BayesianRuleList, copy all arguments here for better IDE hints\n ' super(RuleList, self).__init__(min_rule_len=min_rule_len, max_rule_len=max_rule_len, min_support=min_support, lambda_=lambda_, eta=eta, iters=iters, n_chains=n_chains, alpha=alpha, fim_method=fim_method, feature_names=feature_names, category_names=category_names, seed=seed) self.discretize_method = discretize_method self.numeric_features = numeric_features self.discretizer = None def _validate_discretizer(self): if (type(self.discretize_method) == str): self.discretizer = get_discretizer(self.discretize_method, continuous_features=self.numeric_features, random_state=self.seed) else: self.discretizer = self.discretize_method if (not hasattr(self.discretizer, 'fit')): raise ValueError('discretizer should have method fit!') if (not hasattr(self.discretizer, 'transform')): raise ValueError('discretizer should have method transform!') def fit(self, X, y): '\n Fit the rule list on given training data\n :param X: 2D array\n :param y: 1D integer array\n :return: self\n ' self._validate_discretizer() self.discretizer.fit(X, y) X_disc = self.discretizer.transform(X) self.category_names = compute_mdlp_all_intervals(self.discretizer) super(RuleList, self).fit(X_disc, y) return self def predict_proba(self, x): '\n Give the probability output (prediction) on the given data\n :param x:\n :return:\n ' if (self.discretizer is not None): x = self.discretizer.transform(x) return super(RuleList, self).predict_proba(x) def caught_matrix(self, x): '\n compute the caught matrix of x\n Each rule has an array of bools, showing whether each instances is caught by this rule\n :param np.ndarray x: 2D array (n_instances, n_features) should be categorical data, must be of type int\n :return:\n a bool np.ndarray of shape (n_rules, n_instances)\n ' if (self.discretizer is not None): x = self.discretizer.transform(x) return super(RuleList, self).caught_matrix(x) def decision_path(self, x): if (self.discretizer is not None): x = self.discretizer.transform(x) return super(RuleList, self).decision_path(x) def explain(self, x, trace_all=False): '\n Explain the prediction of a given input using rule(s)\n :param x: 1D array of a single input instance\n :param bool trace_all:\n :return:\n if `trace_all`, then all queried rules would be returned,\n else a single rule that captured the input would be returned\n ' x = x.reshape(1, (- 1)) assert (x.shape[1] == self.n_features) decision_path = self.decision_path(x) queried_rules = np.arange(self.n_rules)[decision_path] if trace_all: return [self.rule_list[i] for i in queried_rules] return self.rule_list[queried_rules[(- 1)]]
def surrogate(student, teacher, X, verbose=False): '\n Fit a model that surrogates the target_predict function\n :param student: An object (sklearn style model) that has fit, predict method.\n The fit method: (train_x, train_y, **kwargs)\n The predict method: (x) -> y (np.int)\n :param teacher: callable: (x: np.ndarray) -> np.ndarray,\n A callable function that takes a 2D data array as input, and output an 1D label array\n :param np.ndarray X:\n :param bool verbose:\n :return:\n ' y = teacher(X).astype(np.int) if verbose: print('Sampled', len(y), 'data') student.fit(X, y) return student
def fidelity(teacher, student, X): y_target = teacher(X) y_pred = student.predict(X) return accuracy(y_target, y_pred)
class Surrogate(object): '\n A factory like implementation of the surrogate algorithm\n Suitable for creating a Pipeline Surrogate model\n ' def __init__(self, teacher, student=None, is_continuous=None, is_categorical=None, is_integer=None, ranges=None, cov_factor=1.0, sampling_rate=2.0, seed=None, verbose=False): '\n :param teacher:\n :param student:\n :param np.ndarray or None is_continuous: default None.\n A bool mask array indicating whether each feature is continuous or not.\n If all three masks are all None, then by default all features are continuous.\n :param np.ndarray or None is_categorical: default None.\n A bool mask array indicating whether each feature is categorical.\n :param np.ndarray or None is_integer: default None.\n A bool mask array indicating whether each feature is integer.\n :param list or None ranges: List[Optional[(float, float)]]\n A list of (min, max) or None, indicating the ranges of each feature.\n :param float cov_factor: default 1.0\n :param float sampling_rate: default 2.0\n The sampling rate, i.e., the ratio n_samples / n_training_data\n :param int or None seed: The random seed for the algorithm\n :param boolean verbose:\n ' self.teacher = teacher self.student = student self.data_distribution = None self.cov_factor = cov_factor self.sampling_rate = sampling_rate self.is_continuous = is_continuous self.is_categorical = is_categorical self.is_integer = is_integer self.n_instances = 0 self.is_fit = False self.ranges = ranges self.seed = seed self.verbose = verbose def _validate(self, n_features): (self.is_continuous, self.is_categorical, self.is_integer) = check_input_constraints(n_features, self.is_continuous, self.is_categorical, self.is_integer) if (not callable(self.teacher)): raise ValueError('the teacher should be a callable function!') if (self.student is None): self.student = RuleList(numeric_features=np.logical_not(self.is_categorical)) if (not hasattr(self.student, 'fit')): raise ValueError('The student model should has fit function') if (not hasattr(self.student, 'predict')): raise ValueError('The student model should has predict function') def fit_distribution(self, X): '\n Fit the distribution of the training data.\n Subclasses can override this function to support more powerful distribution estimation methods.\n An idea would be using GAN to fit a more powerful distribution on image dataset\n and use it to sample training data for rule list\n :param X:\n :return:\n ' self.data_distribution = create_sampler(X, self.is_continuous, self.is_categorical, self.is_integer, self.ranges, self.cov_factor, seed=self.seed) self.n_instances = len(X) return self def fit(self, X, y=None): '\n :param np.ndarray X: The training data that we used for density estimation and oversampling\n :param None y: Place holder to conform with sklearn API\n :return: self\n ' if (y is not None): warnings.warn('Passing y to the fitting function, y will not be used!', Warning) self._validate(X.shape[1]) self.fit_distribution(X) n_samples = int((self.sampling_rate * self.n_instances)) sampled_x = self.sample(n_samples) surrogate(self.student, self.teacher, sampled_x, self.verbose) return self def sample(self, n): '\n An alias, that helps sampling the distribution\n :param int n: The number of samples to draw\n :return: sampled data\n ' if (self.data_distribution is None): raise ValueError('Call surrogate first to create a data_distribution!') return self.data_distribution(n) def score(self, X, y=None): if (y is not None): print('Warning: y will not be used in the score function!') return fidelity(self.teacher, self.student, X) def self_test(self, n_sample=200): '\n Use data randomly sampled from the estimated distribution to test the surrogate model.\n :param n_sample: number of test data to sample\n :return: score (fidelity) on the test ata\n ' x = self.sample(n_sample) return self.score(x)
def rule_surrogate(target, train_x, is_continuous=None, is_categorical=None, is_integer=None, ranges=None, cov_factor=1.0, sampling_rate=2.0, seed=None, rlargs=None): n_features = train_x.shape[1] (is_continuous, is_categorical, is_integer) = check_input_constraints(n_features, is_continuous, is_categorical, is_integer) if (rlargs is not None): rl = RuleList(numeric_features=np.logical_not(is_categorical), **rlargs) else: rl = None surrogator = Surrogate(target, rl, is_continuous, is_categorical, is_integer, ranges, cov_factor, sampling_rate, seed) surrogator.fit(train_x) return surrogator
def get_url(name, remote=False, root_url=None): if remote: return _remote_urls[name] if (root_url is None): return (STATIC_LOCAL_URL + _local_urls[name]) return (root_url + _local_urls[name])
def get_id(obj, prefix='rm', suffix=''): objid = ((prefix + str(id(obj))) + suffix) return objid
def install_static(local_path=None, target_name=None, overwrite=False): '\n Write the javascript libraries to the given location.\n This utility is used by the IPython notebook tools to enable easy use\n of pyLDAvis with no web connection.\n Parameters\n ----------\n local_path: string (optional)\n the path to a file or a directory\n target_name: string (optional)\n the name of the target file or directory\n overwrite: boolean (optional)\n whether to overwrite if existing file or directory exists\n Returns\n -------\n The urls of the local_files\n ' if ((IPYTHON_VERSION is None) or (IPYTHON_VERSION[0] < '2')): raise ModuleNotFoundError('IPython with version larger than 2.0 need to be installed!') if (IPYTHON_VERSION[0] > '3'): from notebook.nbextensions import install_nbextension, check_nbextension else: from IPython.html.nbextensions import install_nbextension, check_nbextension if (local_path is None): local_path = STATIC_DIR target_name = VERSIONED_NAME if (target_name is None): target_name = os.path.basename(local_path) if (not os.path.exists(local_path)): raise ValueError(('%s not found at %s' % (target_name, local_path))) if check_nbextension([target_name]): if overwrite: warnings.warn(('Extension %s already exists. Overwriting...' % target_name)) else: return ('/nbextensions/' + target_name) else: full_url = install_nbextension(local_path, overwrite=overwrite, destination=target_name) print(full_url) return ('/nbextensions/' + target_name)
def load_model(filename: str): if (not os.path.isfile(filename)): raise FileNotFoundError('Cannot find file: {:s}'.format(os.path.abspath(filename))) with open(filename, 'rb') as f: mdl = pickle.load(f) return mdl
def save_model(mdl, filename): dir_name = os.path.dirname(os.path.abspath(filename)) if (not os.path.exists(dir_name)): os.makedirs(dir_name) with open(filename, 'wb') as f: return pickle.dump(mdl, f)
def train_nn(dataset, neurons=(20,), **kwargs): (train_x, train_y, test_x, test_y) = (dataset['train_x'], dataset['train_y'], dataset['test_x'], dataset['test_y']) is_categorical = dataset.get('is_categorical', None) model = MLPClassifier(hidden_layer_sizes=neurons, **kwargs) if (is_categorical is not None): model = Pipeline([('one_hot', OneHotEncoder(categorical_features=is_categorical)), ('mlp', model)]) model.fit(train_x, train_y) train_score = model.score(train_x, train_y) test_score = model.score(test_x, test_y) print('Training score:', train_score) print('Test score:', test_score) return model
def train_surrogate(model, dataset, sampling_rate=2.0, **kwargs): (train_x, train_y, test_x, test_y) = (dataset['train_x'], dataset['train_y'], dataset['test_x'], dataset['test_y']) is_continuous = dataset.get('is_continuous', None) is_categorical = dataset.get('is_categorical', None) is_integer = dataset.get('is_integer', None) feature_names = dataset.get('feature_names', None) surrogate = rule_surrogate(model.predict, train_x, sampling_rate=sampling_rate, is_continuous=is_continuous, is_categorical=is_categorical, is_integer=is_integer, rlargs={'feature_names': feature_names, 'verbose': 2}, **kwargs) student = surrogate.student print('The surrogate rule list:') if isinstance(student, Pipeline): print(student.named_steps['rule_list']) else: print(student) train_fidelity = surrogate.score(train_x) test_fidelity = surrogate.score(test_x) print('Training fidelity:', train_fidelity) print('Test fidelity:', test_fidelity) return surrogate
def prepare_data(name_or_path): if (name_or_path == 'iris'): dataset = load_iris() elif (name_or_path == 'breast_cancer'): dataset = load_breast_cancer() else: try: with open(name_or_path, 'rb') as f: dataset = pickle.load(f) except FileNotFoundError: raise ValueError('Cannot locate dataset', name_or_path) if (('train_x' in dataset) and ('train_y' in dataset) and ('test_x' in dataset) and ('test_y' in dataset)): return dataset (dataset['train_x'], dataset['test_x'], dataset['train_y'], dataset['test_y']) = train_test_split(dataset['data'], dataset['target'], test_size=0.25, random_state=42) return dataset
def main(): from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--dataset', type=str, default='iris') parser.add_argument('--sample_rate', type=float, default=2.0) args = parser.parse_args() dataset = prepare_data(args.dataset) nn = train_nn(dataset, (20, 20)) train_surrogate(nn, dataset, args.sample_rate)
def normalize(data): meanv = np.mean(data, axis=0) stdv = np.std(data, axis=0) delta = (data - meanv) data = (delta / stdv) return data
def load_dataset(norm_flag=True): imgX = sio.loadmat('river/river_before.mat')['river_before'] imgY = sio.loadmat('river/river_after.mat')['river_after'] imgX = np.reshape(imgX, newshape=[(- 1), imgX.shape[(- 1)]]) imgY = np.reshape(imgY, newshape=[(- 1), imgY.shape[(- 1)]]) GT = sio.loadmat('river/groundtruth.mat')['lakelabel_v1'] if norm_flag: X = preprocessing.StandardScaler().fit_transform(imgX) Y = preprocessing.StandardScaler().fit_transform(imgY) return (X, Y, GT)
def cva(X, Y): diff = (X - Y) diff_s = (diff ** 2).sum(axis=(- 1)) return np.sqrt(diff_s)
def SFA(X, Y): '\n see http://sigma.whu.edu.cn/data/res/files/SFACode.zip\n ' norm_flag = True (m, n) = np.shape(X) meanX = np.mean(X, axis=0) meanY = np.mean(Y, axis=0) stdX = np.std(X, axis=0) stdY = np.std(Y, axis=0) Xc = ((X - meanX) / stdX) Yc = ((Y - meanY) / stdY) Xc = Xc.T Yc = Yc.T A = (np.matmul((Xc - Yc), (Xc - Yc).T) / m) B = (((np.matmul(Yc, Yc.T) + np.matmul(Yc, Yc.T)) / 2) / m) (D, V) = scipy.linalg.eig(A, B) D = D.real if (norm_flag is True): aux1 = np.matmul(np.matmul(V.T, B), V) aux2 = (1 / np.sqrt(np.diag(aux1))) V = (V * aux2) X_trans = np.matmul(V.T, Xc).T Y_trans = np.matmul(V.T, Yc).T return (X_trans, Y_trans)
class LeNetBBB(nn.Module): def __init__(self, num_classes=10, var0=1, estimator='flipout'): _check_estimator(estimator) super().__init__() Conv2dVB = (Conv2dReparameterization if (estimator == 'reparam') else Conv2dFlipout) LinearVB = (LinearReparameterization if (estimator == 'reparam') else LinearFlipout) self.conv1 = Conv2dVB(1, 6, 5, prior_variance=var0) self.conv2 = Conv2dVB(6, 16, 5, prior_variance=var0) self.flatten = nn.Flatten() self.fc1 = LinearVB(256, 120, prior_variance=var0) self.fc2 = LinearVB(120, 84, prior_variance=var0) self.fc3 = LinearVB(84, num_classes, prior_variance=var0) def forward(self, x): (x, kl_total) = self.features(x) (x, kl) = self.fc3(x) kl_total += kl return (x, kl_total) def features(self, x, return_acts=False): kl_total = 0 (x, kl) = self.conv1(x) kl_total += kl x = F.max_pool2d(F.relu(x), 2, 2) (x, kl) = self.conv2(x) kl_total += kl x = F.max_pool2d(F.relu(x), 2, 2) x = self.flatten(x) (x, kl) = self.fc1(x) kl_total += kl x = F.relu(x) (x, kl) = self.fc2(x) kl_total += kl x = F.relu(x) return (x, kl_total)
def _check_estimator(estimator): assert (estimator in ['reparam', 'flipout']), 'Estimator must be either "reparam" or "flipout"'
class BasicBlock(nn.Module): def __init__(self, in_planes, out_planes, stride, var0=1, dropRate=0.0, estimator='reparam'): _check_estimator(estimator) super().__init__() Conv2dVB = (Conv2dReparameterization if (estimator == 'reparam') else Conv2dFlipout) self.bn1 = nn.BatchNorm2d(in_planes) self.relu1 = nn.ReLU(inplace=True) self.conv1 = Conv2dVB(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False, prior_variance=var0) self.bn2 = nn.BatchNorm2d(out_planes) self.relu2 = nn.ReLU(inplace=True) self.conv2 = Conv2dVB(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False, prior_variance=var0) self.droprate = dropRate self.equalInOut = (in_planes == out_planes) self.convShortcut = (((not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)) or None) def forward(self, x): kl_total = 0 if (not self.equalInOut): x = self.relu1(self.bn1(x)) else: out = self.relu1(self.bn1(x)) if self.equalInOut: (out, kl) = self.conv1(out) out = self.relu2(self.bn2(out)) else: (out, kl) = self.conv1(x) out = self.relu2(self.bn2(out)) kl_total += kl (out, kl) = self.conv2(out) kl_total += kl if (not self.equalInOut): out_shortcut = self.convShortcut(x) return (torch.add(out_shortcut, out), kl_total) else: return (torch.add(x, out), kl_total)
class NetworkBlock(nn.Module): def __init__(self, nb_layers, in_planes, out_planes, block, stride, var0=1, dropRate=0.0, estimator='reparam'): _check_estimator(estimator) super().__init__() self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, var0, dropRate) def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, var0, dropRate): layers = [] for i in range(nb_layers): layers.append(block((((i == 0) and in_planes) or out_planes), out_planes, (((i == 0) and stride) or 1), var0, dropRate)) return nn.Sequential(*layers) def forward(self, x): out = x kl_total = 0 for l in self.layer: (out, kl) = l(out) kl_total += kl return (out, kl_total)
class WideResNetBBB(nn.Module): def __init__(self, depth, widen_factor, num_classes, num_channel=3, var0=1, droprate=0, estimator='reparam', feature_extractor=False): _check_estimator(estimator) super().__init__() Conv2dVB = (Conv2dReparameterization if (estimator == 'reparam') else Conv2dFlipout) LinearVB = (LinearReparameterization if (estimator == 'reparam') else LinearFlipout) nChannels = [16, (16 * widen_factor), (32 * widen_factor), (64 * widen_factor)] assert (((depth - 4) % 6) == 0) n = ((depth - 4) // 6) block = BasicBlock self.conv1 = Conv2dVB(num_channel, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False, prior_variance=var0) self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, var0, droprate) self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, var0, droprate) self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, var0, droprate) self.bn1 = nn.BatchNorm2d(nChannels[3]) self.relu = nn.ReLU(inplace=True) self.fc = LinearVB(nChannels[3], num_classes, prior_variance=var0) self.nChannels = nChannels[3] self.feature_extractor = feature_extractor for m in self.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_() def forward(self, x): (out, kl_total) = self.features(x) if self.feature_extractor: return (out, kl_total) (out, kl) = self.fc(out) kl_total += kl return (out, kl_total) def features(self, x): kl_total = 0 (out, kl) = self.conv1(x) kl_total += kl (out, kl) = self.block1(out) kl_total += kl (out, kl) = self.block2(out) kl_total += kl (out, kl) = self.block3(out) kl_total += kl out = self.relu(self.bn1(out)) out = F.avg_pool2d(out, 8) out = out.view((- 1), self.nChannels) return (out, kl_total)
def _check_estimator(estimator): assert (estimator in ['reparam', 'flipout']), 'Estimator must be either "reparam" or "flipout"'
class CSGHMCTrainer(): def __init__(self, model, n_cycles, n_samples_per_cycle, n_epochs, initial_lr, num_batch, total_iters, data_size, weight_decay=0.0005, alpha=0.9): self.model = model self.n_cycles = n_cycles self.n_samples_per_cycle = n_samples_per_cycle self.n_epochs = n_epochs self.epoch_per_cycle = (n_epochs // n_cycles) self.num_batch = num_batch self.total_iters = total_iters self.data_size = data_size self.weight_decay = weight_decay self.alpha = alpha self.temperature = (1 / data_size) self.initial_lr = initial_lr self.lr = initial_lr def adjust_lr(self, epoch, batch_idx): rcounter = ((epoch * self.num_batch) + batch_idx) cos_inner = (np.pi * (rcounter % (self.total_iters // self.n_cycles))) cos_inner /= (self.total_iters // self.n_cycles) cos_out = (np.cos(cos_inner) + 1) self.lr = ((0.5 * cos_out) * self.initial_lr) def update_params(self, epoch): for p in self.model.parameters(): if (not hasattr(p, 'buf')): p.buf = torch.zeros(p.size()).cuda() d_p = p.grad d_p.add_(p, alpha=self.weight_decay) buf_new = (((1 - self.alpha) * p.buf) - (self.lr * d_p)) if (((epoch % self.epoch_per_cycle) + 1) > (self.epoch_per_cycle - self.n_samples_per_cycle)): eps = torch.randn(p.size()).cuda() buf_new += ((((((2 * self.lr) * self.alpha) * self.temperature) / self.data_size) ** 0.5) * eps) p.data.add_(buf_new) p.buf = buf_new
class LeNet(nn.Module): def __init__(self, num_classes=10): super().__init__() self.net = nn.Sequential(torch.nn.Conv2d(1, 6, 5), torch.nn.ReLU(), torch.nn.MaxPool2d(2), torch.nn.Conv2d(6, 16, 5), torch.nn.ReLU(), torch.nn.MaxPool2d(2), torch.nn.Flatten(), torch.nn.Linear(((16 * 4) * 4), 120), torch.nn.ReLU(), torch.nn.Linear(120, 84), torch.nn.ReLU(), torch.nn.Linear(84, num_classes)) def forward(self, x): return self.net(x)
class MLP(nn.Module): def __init__(self, size, act='sigmoid'): super(type(self), self).__init__() self.num_layers = (len(size) - 1) lower_modules = [] for i in range((self.num_layers - 1)): lower_modules.append(nn.Linear(size[i], size[(i + 1)])) if (act == 'relu'): lower_modules.append(nn.ReLU()) elif (act == 'sigmoid'): lower_modules.append(nn.Sigmoid()) else: raise ValueError(f"{act} activation hasn't been implemented") self.layer_1 = nn.Sequential(*lower_modules) self.layer_2 = nn.Linear(size[(- 2)], size[(- 1)]) def forward(self, x): o = self.layer_1(x) o = self.layer_2(o) return o
class BasicBlock(nn.Module): def __init__(self, in_planes, out_planes, stride, dropRate=0.0): super(BasicBlock, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.relu1 = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(out_planes) self.relu2 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False) self.droprate = dropRate self.equalInOut = (in_planes == out_planes) self.convShortcut = (((not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)) or None) def forward(self, x): if (not self.equalInOut): x = self.relu1(self.bn1(x)) else: out = self.relu1(self.bn1(x)) if self.equalInOut: out = self.relu2(self.bn2(self.conv1(out))) else: out = self.relu2(self.bn2(self.conv1(x))) if (self.droprate > 0): out = F.dropout(out, p=self.droprate, training=self.training) out = self.conv2(out) if (not self.equalInOut): return torch.add(self.convShortcut(x), out) else: return torch.add(x, out)
class NetworkBlock(nn.Module): def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0): super(NetworkBlock, self).__init__() self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate) def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate): layers = [] for i in range(nb_layers): layers.append(block((((i == 0) and in_planes) or out_planes), out_planes, (((i == 0) and stride) or 1), dropRate)) return nn.Sequential(*layers) def forward(self, x): return self.layer(x)
class WideResNet(nn.Module): def __init__(self, depth, widen_factor, num_classes=10, dropRate=0.0): super(WideResNet, self).__init__() nChannels = [16, (16 * widen_factor), (32 * widen_factor), (64 * widen_factor)] assert (((depth - 4) % 6) == 0) n = ((depth - 4) // 6) block = BasicBlock self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False) self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate) self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate) self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate) self.bn1 = nn.BatchNorm2d(nChannels[3]) self.relu = nn.ReLU(inplace=True) self.fc = nn.Linear(nChannels[3], num_classes) self.nChannels = nChannels[3] for m in self.modules(): if isinstance(m, nn.Conv2d): n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels) m.weight.data.normal_(0, math.sqrt((2.0 / n))) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() elif isinstance(m, nn.Linear): m.bias.data.zero_() def forward(self, x): out = self.conv1(x) out = self.block1(out) out = self.block2(out) out = self.block3(out) out = self.relu(self.bn1(out)) out = F.avg_pool2d(out, 8) out = out.view((- 1), self.nChannels) return self.fc(out)
class FixupBasicBlock(nn.Module): def __init__(self, in_planes, out_planes, stride, dropRate=0.0): super(FixupBasicBlock, self).__init__() self.bias1 = Bias() self.relu1 = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bias2 = Bias() self.relu2 = nn.ReLU(inplace=True) self.bias3 = Bias() self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False) self.bias4 = Bias() self.scale1 = Scale() self.droprate = dropRate self.equalInOut = (in_planes == out_planes) self.convShortcut = (((not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)) or None) def forward(self, x): if (not self.equalInOut): x = self.relu1(self.bias1(x)) else: out = self.relu1(self.bias1(x)) if self.equalInOut: out = self.bias3(self.relu2(self.bias2(self.conv1(out)))) else: out = self.bias3(self.relu2(self.bias2(self.conv1(x)))) if (self.droprate > 0): out = F.dropout(out, p=self.droprate, training=self.training) out = self.bias4(self.scale1(self.conv2(out))) if (not self.equalInOut): return torch.add(self.convShortcut(x), out) else: return torch.add(x, out)
class FixupNetworkBlock(nn.Module): def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0): super(FixupNetworkBlock, self).__init__() self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate) def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate): layers = [] for i in range(nb_layers): layers.append(block((((i == 0) and in_planes) or out_planes), out_planes, (((i == 0) and stride) or 1), dropRate)) return nn.Sequential(*layers) def forward(self, x): return self.layer(x)
class FixupWideResNet(nn.Module): def __init__(self, depth, widen_factor, num_classes=10, dropRate=0.0): super(FixupWideResNet, self).__init__() nChannels = [16, (16 * widen_factor), (32 * widen_factor), (64 * widen_factor)] assert (((depth - 4) % 6) == 0) n = ((depth - 4) // 6) block = FixupBasicBlock self.num_layers = (n * 3) self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False) self.bias1 = Bias() self.block1 = FixupNetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate) self.block2 = FixupNetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate) self.block3 = FixupNetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate) self.bias2 = Bias() self.relu = nn.ReLU(inplace=True) self.fc = nn.Linear(nChannels[3], num_classes) self.nChannels = nChannels[3] for m in self.modules(): if isinstance(m, FixupBasicBlock): conv = m.conv1 k = (conv.weight.shape[0] * np.prod(conv.weight.shape[2:])) nn.init.normal_(conv.weight, mean=0, std=(np.sqrt((2.0 / k)) * (self.num_layers ** (- 0.5)))) nn.init.constant_(m.conv2.weight, 0) if (m.convShortcut is not None): cs = m.convShortcut k = (cs.weight.shape[0] * np.prod(cs.weight.shape[2:])) nn.init.normal_(cs.weight, mean=0, std=np.sqrt((2.0 / k))) elif isinstance(m, nn.Linear): nn.init.constant_(m.weight, 0) nn.init.constant_(m.bias, 0) def forward(self, x): out = self.bias1(self.conv1(x)) out = self.block1(out) out = self.block2(out) out = self.block3(out) out = self.relu(out) out = F.avg_pool2d(out, 8) out = out.view((- 1), self.nChannels) return self.fc(self.bias2(out))
def main(args): device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) util.set_seed(args.seed) datagen = du.PermutedMnistGenerator(data_path=args.data_root, num_tasks=args.num_tasks, download=args.download) model = util.get_model(model_class='MLP') model.to(device) backend = (AsdlGGN if (args.approx_type == 'ggn') else AsdlEF) prior_mean = torch.zeros_like(parameters_to_vector(model.parameters())) la = Laplace(model, 'classification', subset_of_weights='all', hessian_structure=args.hessian_structure, prior_mean=prior_mean, prior_precision=args.prior_prec_init, backend=backend) test_loaders = list() results = list() for task_id in range(args.num_tasks): print() print(f'Task {(task_id + 1)}') print() (train_loader, test_loader) = datagen.next_task(args.batch_size) test_loaders.append(test_loader) train(args, model, la, train_loader, task_id, device) la.fit(train_loader, override=False) test_accs = test(args, la, test_loaders, device) results.append(test_accs) print() print(f'Test accuracies after task {(task_id + 1)}:') print(test_accs, np.nanmean(test_accs)) print() print('---------------------------------------------------------------') results = np.stack(results) if (args.run_name is None): results_path = f'{args.benchmark}_marglik_{args.hessian_structure}_{args.seed}.npy' else: results_path = f'{args.run_name}.npy' np.save(os.path.join(args.results_root, results_path), results)
def train(args, model, la, train_loader, task_id, device): model.train() N = len(train_loader.dataset) loss_fn = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=args.lr) n_steps = (args.num_epochs * len(train_loader)) scheduler = CosineAnnealingLR(optimizer, n_steps, eta_min=(args.lr * 0.001)) for epoch in range(args.num_epochs): train_loss = 0.0 for (X, y) in train_loader: f = model(X.to(device)) mean = parameters_to_vector(model.parameters()) loss = (loss_fn(f, y.to(device)) - ((args.lam * la.log_prob(mean)) / N)) optimizer.zero_grad() loss.backward() optimizer.step() scheduler.step() train_loss += (loss.item() * len(X)) train_loss /= N if (((epoch + 1) % 5) != 0): continue marglik = optimize_marglik(la, train_loader) print(f'Task {(task_id + 1)} epoch {(epoch + 1)} - train loss: {train_loss:.3f}, neg. log marglik: {marglik:.3f}')
def optimize_marglik(la, train_loader): prior_prec = la.prior_precision hyper_la = deepcopy(la) hyper_la.prior_mean = la.mean hyper_la.fit(train_loader, override=False) hyper_la.optimize_prior_precision(init_prior_prec=prior_prec) la.prior_precision = hyper_la.prior_precision.clone() return (- hyper_la.log_marginal_likelihood().detach().item())
@torch.no_grad() def test(args, laplace, test_loaders, device): test_accs = list() for test_loader in test_loaders: correct = 0 for (X, y) in test_loader: f = laplace(X.to(device)) correct += (y.to(device) == f.argmax(1)).sum() acc = (correct.item() / len(test_loader.dataset)) test_accs.append(acc) test_accs.extend([np.nan for _ in range((args.num_tasks - len(test_accs)))]) return np.array(test_accs)
def marglik_optimization(model, train_loader, valid_loader=None, likelihood='classification', prior_structure='layerwise', prior_prec_init=1.0, sigma_noise_init=1.0, temperature=1.0, n_epochs=500, lr=0.001, lr_min=None, optimizer='Adam', scheduler='exp', n_epochs_burnin=0, n_hypersteps=100, marglik_frequency=1, lr_hyp=0.1, laplace=KronLaplace, backend=AsdlGGN, backend_kwargs=None): "Runs marglik optimization training for a given model and training dataloader.\n Parameters\n ----------\n model : torch.nn.Module\n torch model\n train_loader : DataLoader\n pytorch training dataset loader\n likelihood : str\n 'classification' or 'regression'\n prior_structure : str\n 'scalar', 'layerwise', 'diagonal'\n temperature : float default=1\n factor for the likelihood for 'overcounting' data.\n Often required when using data augmentation.\n lr : float\n learning rate for model optimizer\n lr_min : float\n minimum learning rate, defaults to lr and hence no decay\n to have the learning rate decay from 1e-3 to 1e-6, set\n lr=1e-3 and lr_min=1e-6.\n optimizer : str\n either 'Adam' or 'SGD'\n scheduler : str\n either 'exp' for exponential and 'cos' for cosine decay towards lr_min\n " if (lr_min is None): lr_min = lr if (backend_kwargs is None): backend_kwargs = dict() device = parameters_to_vector(model.parameters()).device N = len(train_loader.dataset) last_layer = (laplace in [DiagLLLaplace, KronLLLaplace, FullLLLaplace]) if last_layer: assert (prior_structure != 'layerwise'), 'Not supported' lap = laplace(model, likelihood, sigma_noise=1.0, prior_precision=1.0, backend=backend, **backend_kwargs) (X, _) = next(iter(train_loader)) with torch.no_grad(): lap.model.find_last_layer(X.to(device)) last_layer_model = lap.model.last_layer P = len(parameters_to_vector(last_layer_model.parameters())) else: H = len(list(model.parameters())) P = len(parameters_to_vector(model.parameters())) hyperparameters = list() log_prior_prec_init = np.log((temperature * prior_prec_init)) if (prior_structure == 'scalar'): log_prior_prec = (log_prior_prec_init * torch.ones(1, device=device)) elif (prior_structure == 'layerwise'): log_prior_prec = (log_prior_prec_init * torch.ones(H, device=device)) elif (prior_structure == 'diagonal'): log_prior_prec = (log_prior_prec_init * torch.ones(P, device=device)) else: raise ValueError(f'Invalid prior structure {prior_structure}') log_prior_prec.requires_grad = True hyperparameters.append(log_prior_prec) if (likelihood == 'classification'): criterion = CrossEntropyLoss(reduction='mean') sigma_noise = 1.0 elif (likelihood == 'regression'): criterion = MSELoss(reduction='mean') log_sigma_noise_init = np.log(sigma_noise_init) log_sigma_noise = (log_sigma_noise_init * torch.ones(1, device=device)) log_sigma_noise.requires_grad = True hyperparameters.append(log_sigma_noise) if (optimizer == 'Adam'): optimizer = Adam(model.parameters(), lr=lr) elif (optimizer == 'SGD'): is_fixup = (lambda param: (param.size() == torch.Size([1]))) fixup_params = [p for p in model.parameters() if is_fixup(p)] wrn_params = [p for p in model.parameters() if (not is_fixup(p))] params = [{'params': wrn_params}, {'params': fixup_params, 'lr': (lr / 10.0)}] optimizer = SGD(params, lr=lr, momentum=0.9, nesterov=True) else: raise ValueError(f'Invalid optimizer {optimizer}') n_steps = (n_epochs * len(train_loader)) if (scheduler == 'exp'): min_lr_factor = (lr_min / lr) gamma = np.exp((np.log(min_lr_factor) / n_steps)) scheduler = ExponentialLR(optimizer, gamma=gamma) elif (scheduler == 'cos'): scheduler = CosineAnnealingLR(optimizer, n_steps, eta_min=lr_min) else: raise ValueError(f'Invalid scheduler {scheduler}') hyper_optimizer = Adam(hyperparameters, lr=lr_hyp) best_marglik = np.inf best_model_dict = None best_precision = None losses = list() margliks = list() for epoch in range(1, (n_epochs + 1)): epoch_loss = 0 epoch_perf = 0 for (X, y) in train_loader: (X, y) = (X.to(device), y.to(device)) optimizer.zero_grad() if (likelihood == 'regression'): sigma_noise = torch.exp(log_sigma_noise).detach() crit_factor = (temperature / (2 * sigma_noise.square())) else: crit_factor = temperature prior_prec = torch.exp(log_prior_prec).detach() if last_layer: theta = parameters_to_vector(last_layer_model.parameters()) delta = util.expand_prior_precision(prior_prec, last_layer_model) else: theta = parameters_to_vector(model.parameters()) delta = util.expand_prior_precision(prior_prec, model) f = model(X) loss = (criterion(f, y) + ((((0.5 * (delta * theta)) @ theta) / N) / crit_factor)) loss.backward() optimizer.step() epoch_loss += (loss.cpu().item() / len(train_loader)) if (likelihood == 'regression'): epoch_perf += ((f.detach() - y).square().sum() / N) else: epoch_perf += (torch.sum((torch.argmax(f.detach(), dim=(- 1)) == y)).item() / N) scheduler.step() losses.append(epoch_loss) if (valid_loader is not None): with torch.no_grad(): valid_perf = valid_performance(model, valid_loader, likelihood, device) logging.info(((f'MARGLIK[epoch={epoch}]: network training. Loss={losses[(- 1)]:.3f}; ' + f'Perf={epoch_perf:.3f}; Valid perf={valid_perf:.3f}; ') + f'lr={scheduler.get_last_lr()[0]:.7f}')) else: logging.info((f'MARGLIK[epoch={epoch}]: network training. Loss={losses[(- 1)]:.3f}; ' + f'Perf={epoch_perf:.3f}; lr={scheduler.get_last_lr()[0]:.7f}')) if (((epoch % marglik_frequency) != 0) or (epoch < n_epochs_burnin)): continue sigma_noise = (1 if (likelihood == 'classification') else torch.exp(log_sigma_noise)) prior_prec = torch.exp(log_prior_prec) lap = laplace(model, likelihood, sigma_noise=sigma_noise, prior_precision=prior_prec, temperature=temperature, backend=backend, **backend_kwargs) lap.fit(train_loader) for _ in range(n_hypersteps): hyper_optimizer.zero_grad() if (likelihood == 'classification'): sigma_noise = None elif (likelihood == 'regression'): sigma_noise = torch.exp(log_sigma_noise) prior_prec = torch.exp(log_prior_prec) marglik = (- lap.log_marginal_likelihood(prior_prec, sigma_noise)) marglik.backward() hyper_optimizer.step() margliks.append(marglik.item()) if (margliks[(- 1)] < best_marglik): best_model_dict = deepcopy(model.state_dict()) best_precision = deepcopy(prior_prec.detach()) best_sigma = (1 if (likelihood == 'classification') else deepcopy(sigma_noise.detach())) best_marglik = margliks[(- 1)] logging.info((f'MARGLIK[epoch={epoch}]: marglik optimization. MargLik={best_marglik:.2f}. ' + 'Saving new best model.')) else: logging.info((f'MARGLIK[epoch={epoch}]: marglik optimization. MargLik={margliks[(- 1)]:.2f}.' + f'No improvement over {best_marglik:.2f}')) logging.info('MARGLIK: finished training. Recover best model and fit Lapras.') if (best_model_dict is not None): model.load_state_dict(best_model_dict) sigma_noise = best_sigma prior_prec = best_precision lap = laplace(model, likelihood, sigma_noise=sigma_noise, prior_precision=prior_prec, backend=backend, **backend_kwargs) lap.fit(train_loader) return (lap, model, margliks, losses)
def valid_performance(model, test_loader, likelihood, device): N = len(test_loader.dataset) perf = 0 for (X, y) in test_loader: (X, y) = (X.to(device), y.to(device)) if (likelihood == 'classification'): perf += ((torch.argmax(model(X), dim=(- 1)) == y).sum() / N) else: perf += ((model(X) - y).square().sum() / N) return perf.item()
def main(base_config, config, temperature, seed, noda): config_name = config.split('/')[(- 1)].split('.')[0] if noda: assert (temperature == 1) run_name = ((('CIFARNODA' + '_') + config_name) + f'_{seed}') else: run_name = ((('CIFAR' + '_') + config_name) + f'_{temperature}_{seed}') cmd_train = f'python marglik_training/train_marglik.py --run_name {run_name} --config {base_config} {config} --seed {seed} --temperature {temperature}' print('RUN:', cmd_train) os.system(cmd_train) if (('map' in base_config) and (not ('marglik' in base_config))): return model_path = (('models/' + run_name) + '_model.pt') delta_path = (('models/' + run_name) + '_delta.pt') for benchmark in ['CIFAR-10-OOD', 'CIFAR-10-C']: for pred in ['map', 'mc', 'probit', 'bridge']: bs = 16 meth = ('map' if (pred == 'map') else 'laplace') pred_cli = (f'--method {meth}' if (meth == 'map') else f'--method {meth} --link_approx {pred}') if noda: benchmark_name = ('CIFARNODA' + benchmark[5:]) name = f'{benchmark_name}_{config_name}_{pred}_onmarglik_{seed}' flag = '--noda' else: name = f'{benchmark}_{config_name}_{pred}_onmarglik_{temperature}_{seed}' flag = '' cmd = f'python uq.py --benchmark {benchmark} --model WRN16-4-fixup {pred_cli} --run_name {name} --prior_precision {delta_path} --model_path {model_path} --config {config} --seed 711 --batch_size {bs} --temperature {temperature} {flag}' print('SUBMIT:', cmd) os.system(cmd)
def main(base_config, config, seed): config_name = config.split('/')[(- 1)].split('.')[0] run_name = ((('FMNIST' + '_') + config_name) + f'_{seed}') model_path = (('models/' + run_name) + '_model.pt') delta_path = (('models/' + run_name) + '_delta.pt') cmd_train = f'python marglik_training/train_marglik.py --run_name {run_name} --config {base_config} {config} --seed {seed}' print('RUN:', cmd_train) if (model_path.split('/')[1] not in os.listdir('models/')): os.system(cmd_train) if (('map' in base_config) and (not ('marglik' in base_config))): return for benchmark in ['R-FMNIST', 'FMNIST-OOD']: for pred in ['map', 'mc', 'probit', 'bridge']: meth = ('map' if (pred == 'map') else 'laplace') pred_cli = (f'--method {meth}' if (meth == 'map') else f'--method {meth} --link_approx {pred}') name = f'{benchmark}_{config_name}_{pred}_onmarglik_{seed}' if ((name + '.npy') in os.listdir('results/')): continue cmd = f'python uq.py --benchmark {benchmark} --model LeNet {pred_cli} --run_name {name} --prior_precision {delta_path} --model_path {model_path} --config {config} --seed 711' print('SUBMIT:', cmd) os.system(cmd)
def main(base_config, config, seed): config_name = config.split('/')[(- 1)].split('.')[0] run_name = ((('MNIST' + '_') + config_name) + f'_{seed}') cmd_train = f'python marglik_training/train_marglik.py --run_name {run_name} --config {base_config} {config} --seed {seed}' print('RUN:', cmd_train) os.system(cmd_train) model_path = (('models/' + run_name) + '_model.pt') delta_path = (('models/' + run_name) + '_delta.pt') for benchmark in ['R-MNIST', 'MNIST-OOD']: for pred in ['map', 'mc', 'probit', 'bridge']: meth = ('map' if (pred == 'map') else 'laplace') pred_cli = (f'--method {meth}' if (meth == 'map') else f'--method {meth} --link_approx {pred}') name = f'{benchmark}_{config_name}_{pred}_onmarglik_{seed}' cmd = f'python uq.py --benchmark {benchmark} --model LeNet {pred_cli} --run_name {name} --prior_precision {delta_path} --model_path {model_path} --config {config} --seed 711' print('SUBMIT:', cmd) os.system(cmd)
def get_laplace_class(flavor, last_layer): if (flavor == 'diag'): if last_layer: return DiagLLLaplace else: return DiagLaplace elif (flavor == 'kron'): if last_layer: return KronLLLaplace else: return KronLaplace elif (flavor == 'full'): if last_layer: return FullLLLaplace else: return FullLaplace else: raise ValueError()
def get_backend(backend, approx_type): if (backend == 'kazuki'): if (approx_type == 'ggn'): return AsdlGGN else: return AsdlEF elif (backend == 'backpack'): if (approx_type == 'ggn'): return BackPackGGN else: return BackPackEF else: raise ValueError()
def main(args): device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu')) args.prior_precision = util.get_prior_precision(args, device) util.set_seed(args.seed) (in_data_loaders, ids, no_loss_acc) = du.get_in_distribution_data_loaders(args, device) (train_loader, val_loader, in_test_loader) = in_data_loaders mixture_components = fit_models(args, train_loader, val_loader, device) metrics = evaluate_models(args, mixture_components, in_test_loader, ids, no_loss_acc, device) util.save_results(args, metrics)
def fit_models(args, train_loader, val_loader, device): ' load pre-trained weights, fit inference methods, and tune hyperparameters ' mixture_components = list() for model_idx in range(args.nr_components): model = util.load_pretrained_model(args, model_idx, device) if (args.method in ['laplace', 'mola']): if (type(args.prior_precision) is str): prior_precision = torch.load(args.prior_precision, map_location=device) elif (type(args.prior_precision) is float): prior_precision = args.prior_precision else: raise ValueError('prior precision has to be either float or string (file path)') Backend = get_backend(args.backend, args.approx_type) optional_args = dict() if (args.subset_of_weights == 'last_layer'): optional_args['last_layer_name'] = args.last_layer_name print('Fitting Laplace approximation...') model = Laplace(model, args.likelihood, subset_of_weights=args.subset_of_weights, hessian_structure=args.hessian_structure, prior_precision=prior_precision, temperature=args.temperature, backend=Backend, **optional_args) model.fit(train_loader) if ((args.optimize_prior_precision is not None) and (args.method == 'laplace')): if ((type(prior_precision) is float) and (args.prior_structure != 'scalar')): n = (model.n_params if (args.prior_structure == 'all') else model.n_layers) prior_precision = (prior_precision * torch.ones(n, device=device)) print('Optimizing prior precision for Laplace approximation...') verbose_prior = (args.prior_structure == 'scalar') model.optimize_prior_precision(method=args.optimize_prior_precision, init_prior_prec=prior_precision, val_loader=val_loader, pred_type=args.pred_type, link_approx=args.link_approx, n_samples=args.n_samples, verbose=verbose_prior) elif (args.method in ['swag', 'multi-swag']): print('Fitting SWAG...') model = fit_swag_and_precompute_bn_params(model, device, train_loader, args.swag_n_snapshots, args.swag_lr, args.swag_c_epochs, args.swag_c_batches, args.data_parallel, args.n_samples, args.swag_bn_update_subset) elif ((args.method == 'map') and (args.likelihood == 'classification') and args.use_temperature_scaling): print('Fitting temperature scaling model on validation data...') all_y_prob = [model(d[0].to(device)).detach().cpu() for d in val_loader] all_y_prob = torch.cat(all_y_prob, dim=0) all_y_true = torch.cat([d[1] for d in val_loader], dim=0) temperature_scaling_model = pycalib.calibration_methods.TemperatureScaling() temperature_scaling_model.fit(all_y_prob.numpy(), all_y_true.numpy()) model = (model, temperature_scaling_model) if ((args.likelihood == 'regression') and (args.sigma_noise is None)): print('Optimizing noise standard deviation on validation data...') args.sigma_noise = wu.optimize_noise_standard_deviation(model, val_loader, device) mixture_components.append(model) return mixture_components
def evaluate_models(args, mixture_components, in_test_loader, ids, no_loss_acc, device): ' evaluate the models and return relevant evaluation metrics ' metrics = [] for (i, id) in enumerate(ids): test_loader = (in_test_loader if (i == 0) else du.get_ood_test_loader(args, id)) (test_output, test_time) = util.timing((lambda : test(mixture_components, test_loader, args.method, pred_type=args.pred_type, link_approx=args.link_approx, n_samples=args.n_samples, device=device, no_loss_acc=no_loss_acc, likelihood=args.likelihood, sigma_noise=args.sigma_noise))) (some_metrics, all_y_prob, all_y_var) = test_output some_metrics['test_time'] = test_time if (i == 0): all_y_prob_in = all_y_prob.clone() more_metrics = compute_metrics(i, id, all_y_prob, test_loader, all_y_prob_in, all_y_var, args) metrics.append({**some_metrics, **more_metrics}) print(', '.join([f'{k}: {v:.4f}' for (k, v) in metrics[(- 1)].items()])) return metrics
def compute_metrics(i, id, all_y_prob, test_loader, all_y_prob_in, all_y_var, args): ' compute evaluation metrics ' metrics = {} if ((args.benchmark in ['R-MNIST', 'R-FMNIST', 'CIFAR-10-C', 'ImageNet-C']) and (args.benchmark != 'WILDS-poverty')): print(f'{args.benchmark} with distribution shift intensity {i}') labels = torch.cat([data[1] for data in test_loader]) metrics['brier'] = util.get_brier_score(all_y_prob, labels) (metrics['ece'], metrics['mce']) = util.get_calib(all_y_prob, labels) if (args.benchmark in ['MNIST-OOD', 'FMNIST-OOD', 'CIFAR-10-OOD']): print(f'{args.benchmark} - dataset: {id}') if (i > 0): metrics['auroc'] = util.get_auroc(all_y_prob_in, all_y_prob) (metrics['fpr95'], _) = util.get_fpr95(all_y_prob_in, all_y_prob) if (args.benchmark == 'WILDS-poverty'): print(f'{args.benchmark} with distribution shift intensity {i}') labels = torch.cat([data[1] for data in test_loader]) metrics['calib_regression'] = util.get_calib_regression(all_y_prob.numpy(), all_y_var.sqrt().numpy(), labels.numpy()) return metrics
def get_in_distribution_data_loaders(args, device): ' load in-distribution datasets and return data loaders ' if (args.benchmark in ['R-MNIST', 'MNIST-OOD']): if (args.benchmark == 'R-MNIST'): no_loss_acc = False ids = [0, 15, 30, 45, 60, 75, 90, 105, 120, 135, 150, 165, 180] else: no_loss_acc = True ids = ['MNIST', 'EMNIST', 'FMNIST', 'KMNIST'] (train_loader, val_loader, in_test_loader) = get_mnist_loaders(args.data_root, model_class=args.model, batch_size=args.batch_size, val_size=args.val_set_size, download=args.download, device=device) elif (args.benchmark in ['R-FMNIST', 'FMNIST-OOD']): if (args.benchmark == 'R-FMNIST'): no_loss_acc = False ids = [0, 15, 30, 45, 60, 75, 90, 105, 120, 135, 150, 165, 180] else: no_loss_acc = True ids = ['FMNIST', 'EMNIST', 'MNIST', 'KMNIST'] (train_loader, val_loader, in_test_loader) = get_fmnist_loaders(args.data_root, model_class=args.model, batch_size=args.batch_size, val_size=args.val_set_size, download=args.download, device=device) elif (args.benchmark in ['R-FMNIST', 'FMNIST-OOD']): if (args.benchmark == 'R-FMNIST'): no_loss_acc = False ids = [0, 15, 30, 45, 60, 75, 90, 105, 120, 135, 150, 165, 180] else: no_loss_acc = True ids = ['FMNIST', 'EMNIST', 'MNIST', 'KMNIST'] (train_loader, val_loader, in_test_loader) = get_fmnist_loaders(args.data_root, model_class=args.model, batch_size=args.batch_size, val_size=args.val_set_size, download=args.download, device=device) elif (args.benchmark in ['CIFAR-10-C', 'CIFAR-10-OOD']): if (args.benchmark == 'CIFAR-10-C'): no_loss_acc = False ids = [0, 1, 2, 3, 4, 5] else: no_loss_acc = True ids = ['CIFAR-10', 'SVHN', 'LSUN', 'CIFAR-100'] (train_loader, val_loader, in_test_loader) = get_cifar10_loaders(args.data_root, batch_size=args.batch_size, train_batch_size=args.batch_size, val_size=args.val_set_size, download=args.download, data_augmentation=(not args.noda)) elif (args.benchmark == 'ImageNet-C'): no_loss_acc = False ids = [0, 1, 2, 3, 4, 5] (train_loader, val_loader, in_test_loader) = get_imagenet_loaders(args.data_root, batch_size=args.batch_size, train_batch_size=args.batch_size, val_size=args.val_set_size) elif ('WILDS' in args.benchmark): dataset = args.benchmark[6:] no_loss_acc = False ids = [f'{dataset}-id', f'{dataset}-ood'] (train_loader, val_loader, in_test_loader) = wu.get_wilds_loaders(dataset, args.data_root, args.data_fraction, args.model_seed) return ((train_loader, val_loader, in_test_loader), ids, no_loss_acc)
def get_ood_test_loader(args, id): ' load out-of-distribution test data and return data loader ' if (args.benchmark == 'R-MNIST'): (_, test_loader) = get_rotated_mnist_loaders(id, args.data_root, model_class=args.model, download=args.download) elif (args.benchmark == 'R-FMNIST'): (_, test_loader) = get_rotated_fmnist_loaders(id, args.data_root, model_class=args.model, download=args.download) elif (args.benchmark == 'CIFAR-10-C'): test_loader = load_corrupted_cifar10(id, data_dir=args.data_root, batch_size=args.batch_size, cuda=torch.cuda.is_available()) elif (args.benchmark == 'ImageNet-C'): test_loader = load_corrupted_imagenet(id, data_dir=args.data_root, batch_size=args.batch_size, cuda=torch.cuda.is_available()) elif (args.benchmark == 'MNIST-OOD'): (_, test_loader) = get_mnist_ood_loaders(id, data_path=args.data_root, batch_size=args.batch_size, download=args.download) elif (args.benchmark == 'FMNIST-OOD'): (_, test_loader) = get_mnist_ood_loaders(id, data_path=args.data_root, batch_size=args.batch_size, download=args.download) elif (args.benchmark == 'CIFAR-10-OOD'): (_, test_loader) = get_cifar10_ood_loaders(id, data_path=args.data_root, batch_size=args.batch_size, download=args.download) elif ('WILDS' in args.benchmark): dataset = args.benchmark[6:] test_loader = wu.get_wilds_ood_test_loader(dataset, args.data_root, args.data_fraction) return test_loader
def val_test_split(dataset, val_size=5000, batch_size=512, num_workers=5, pin_memory=False): test_size = (len(dataset) - val_size) (dataset_val, dataset_test) = data_utils.random_split(dataset, (val_size, test_size), generator=torch.Generator().manual_seed(42)) val_loader = data_utils.DataLoader(dataset_val, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=pin_memory) test_loader = data_utils.DataLoader(dataset_test, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=pin_memory) return (val_loader, test_loader)
def get_cifar10_loaders(data_path, batch_size=512, val_size=2000, train_batch_size=128, download=False, data_augmentation=True): mean = [(x / 255) for x in [125.3, 123.0, 113.9]] std = [(x / 255) for x in [63.0, 62.1, 66.7]] tforms = [transforms.ToTensor(), transforms.Normalize(mean, std)] tforms_test = transforms.Compose(tforms) if data_augmentation: tforms_train = transforms.Compose(([transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4)] + tforms)) else: tforms_train = tforms_test train_set = datasets.CIFAR10(data_path, train=True, transform=tforms_train, download=download) val_test_set = datasets.CIFAR10(data_path, train=False, transform=tforms_test, download=download) train_loader = data_utils.DataLoader(train_set, batch_size=train_batch_size, shuffle=True) (val_loader, test_loader) = val_test_split(val_test_set, batch_size=batch_size, val_size=val_size) return (train_loader, val_loader, test_loader)
def get_imagenet_loaders(data_path, batch_size=128, val_size=2000, train_batch_size=128, num_workers=5): normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) tforms_test = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]) tforms_train = transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]) data_path_train = os.path.join(data_path, 'ImageNet2012/train') data_path_val = os.path.join(data_path, 'ImageNet2012/val') train_set = datasets.ImageFolder(data_path_train, transform=tforms_train) val_test_set = datasets.ImageFolder(data_path_val, transform=tforms_test) train_loader = data_utils.DataLoader(train_set, batch_size=train_batch_size, shuffle=True, num_workers=num_workers, pin_memory=True) (val_loader, test_loader) = val_test_split(val_test_set, batch_size=batch_size, val_size=val_size, num_workers=num_workers, pin_memory=True) return (train_loader, val_loader, test_loader)
def get_mnist_loaders(data_path, batch_size=512, model_class='LeNet', train_batch_size=128, val_size=2000, download=False, device='cpu'): if (model_class == 'MLP'): tforms = transforms.Compose([transforms.ToTensor(), ReshapeTransform(((- 1),))]) else: tforms = transforms.ToTensor() train_set = datasets.MNIST(data_path, train=True, transform=tforms, download=download) val_test_set = datasets.MNIST(data_path, train=False, transform=tforms, download=download) Xys = [train_set[i] for i in range(len(train_set))] Xs = torch.stack([e[0] for e in Xys]).to(device) ys = torch.stack([torch.tensor(e[1]) for e in Xys]).to(device) train_loader = FastTensorDataLoader(Xs, ys, batch_size=batch_size, shuffle=True) (val_loader, test_loader) = val_test_split(val_test_set, batch_size=batch_size, val_size=val_size) return (train_loader, val_loader, test_loader)
def get_fmnist_loaders(data_path, batch_size=512, model_class='LeNet', train_batch_size=128, val_size=2000, download=False, device='cpu'): if (model_class == 'MLP'): tforms = transforms.Compose([transforms.ToTensor(), ReshapeTransform(((- 1),))]) else: tforms = transforms.ToTensor() train_set = datasets.FashionMNIST(data_path, train=True, transform=tforms, download=download) val_test_set = datasets.FashionMNIST(data_path, train=False, transform=tforms, download=download) Xys = [train_set[i] for i in range(len(train_set))] Xs = torch.stack([e[0] for e in Xys]).to(device) ys = torch.stack([torch.tensor(e[1]) for e in Xys]).to(device) train_loader = FastTensorDataLoader(Xs, ys, batch_size=batch_size, shuffle=True) (val_loader, test_loader) = val_test_split(val_test_set, batch_size=batch_size, val_size=val_size) return (train_loader, val_loader, test_loader)
def get_rotated_mnist_loaders(angle, data_path, model_class='LeNet', download=False): if (model_class == 'MLP'): shift_tforms = transforms.Compose([RotationTransform(angle), transforms.ToTensor(), ReshapeTransform(((- 1),))]) else: shift_tforms = transforms.Compose([RotationTransform(angle), transforms.ToTensor()]) rotated_mnist_val_test_set = datasets.MNIST(data_path, train=False, transform=shift_tforms, download=download) (shift_val_loader, shift_test_loader) = val_test_split(rotated_mnist_val_test_set, val_size=2000) return (shift_val_loader, shift_test_loader)
def get_rotated_fmnist_loaders(angle, data_path, model_class='LeNet', download=False): if (model_class == 'MLP'): shift_tforms = transforms.Compose([RotationTransform(angle), transforms.ToTensor(), ReshapeTransform(((- 1),))]) else: shift_tforms = transforms.Compose([RotationTransform(angle), transforms.ToTensor()]) rotated_fmnist_val_test_set = datasets.FashionMNIST(data_path, train=False, transform=shift_tforms, download=download) (shift_val_loader, shift_test_loader) = val_test_split(rotated_fmnist_val_test_set, val_size=2000) return (shift_val_loader, shift_test_loader)
class ReshapeTransform(): def __init__(self, new_size): self.new_size = new_size def __call__(self, img): return torch.reshape(img, self.new_size)
class RotationTransform(): 'Rotate the given angle.' def __init__(self, angle): self.angle = angle def __call__(self, x): return TF.rotate(x, self.angle)
def uniform_noise(dataset, delta=1, size=5000, batch_size=512): if (dataset in ['MNIST', 'FMNIST', 'R-MNIST']): shape = (1, 28, 28) elif (dataset in ['SVHN', 'CIFAR10', 'CIFAR100', 'CIFAR-10-C']): shape = (3, 32, 32) elif (dataset in ['ImageNet', 'ImageNet-C']): shape = (3, 256, 256) data = (delta * torch.rand(((size,) + shape))) train = data_utils.TensorDataset(data, torch.zeros_like(data)) loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=False, num_workers=1) return loader
class DatafeedImage(torch.utils.data.Dataset): def __init__(self, x_train, y_train, transform=None): self.x_train = x_train self.y_train = y_train self.transform = transform def __getitem__(self, index): img = self.x_train[index] img = Image.fromarray(img) if (self.transform is not None): img = self.transform(img) return (img, self.y_train[index]) def __len__(self): return len(self.x_train)
def load_corrupted_cifar10(severity, data_dir='data', batch_size=256, cuda=True, workers=1): ' load corrupted CIFAR10 dataset ' x_file = (data_dir + ('/CIFAR-10-C/CIFAR10_c%d.npy' % severity)) np_x = np.load(x_file) y_file = (data_dir + '/CIFAR-10-C/CIFAR10_c_labels.npy') np_y = np.load(y_file).astype(np.int64) transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.2435, 0.2616))]) dataset = DatafeedImage(np_x, np_y, transform) dataset = data_utils.Subset(dataset, torch.randint(len(dataset), (10000,))) loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=cuda) return loader
def load_corrupted_imagenet(severity, data_dir='data', batch_size=128, cuda=True, workers=1): ' load corrupted ImageNet dataset ' normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transform = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]) corruption_types = ['brightness', 'contrast', 'defocus_blur', 'elastic_transform', 'fog', 'frost', 'gaussian_blur', 'gaussian_noise', 'glass_blur', 'impulse_noise', 'jpeg_compression', 'motion_blur', 'pixelate', 'saturate', 'shot_noise', 'snow', 'spatter', 'speckle_noise', 'zoom_blur'] dsets = list() for c in corruption_types: path = os.path.join(data_dir, ((('ImageNet-C/' + c) + '/') + str(severity))) dsets.append(datasets.ImageFolder(path, transform=transform)) dataset = data_utils.ConcatDataset(dsets) loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=cuda) return loader
def get_mnist_ood_loaders(ood_dataset, data_path='./data', batch_size=512, download=False): 'Get out-of-distribution val/test sets and val/test loaders (in-distribution: MNIST/FMNIST)' tforms = transforms.ToTensor() if (ood_dataset == 'FMNIST'): fmnist_val_test_set = datasets.FashionMNIST(data_path, train=False, transform=tforms, download=download) (val_loader, test_loader) = val_test_split(fmnist_val_test_set, batch_size=batch_size, val_size=0) elif (ood_dataset == 'EMNIST'): emnist_val_test_set = datasets.EMNIST(data_path, split='digits', train=False, transform=tforms, download=download) (val_loader, test_loader) = val_test_split(emnist_val_test_set, batch_size=batch_size, val_size=0) elif (ood_dataset == 'KMNIST'): kmnist_val_test_set = datasets.KMNIST(data_path, train=False, transform=tforms, download=download) (val_loader, test_loader) = val_test_split(kmnist_val_test_set, batch_size=batch_size, val_size=0) elif (ood_dataset == 'MNIST'): mnist_val_test_set = datasets.MNIST(data_path, train=False, transform=tforms, download=download) (val_loader, test_loader) = val_test_split(mnist_val_test_set, batch_size=batch_size, val_size=0) else: raise ValueError('Choose one out of FMNIST, EMNIST, MNIST, and KMNIST.') return (val_loader, test_loader)
def get_cifar10_ood_loaders(ood_dataset, data_path='./data', batch_size=512, download=False): 'Get out-of-distribution val/test sets and val/test loaders (in-distribution: CIFAR-10)' if (ood_dataset == 'SVHN'): svhn_tforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4376821, 0.4437697, 0.47280442), (0.19803012, 0.20101562, 0.19703614))]) svhn_val_test_set = datasets.SVHN(data_path, split='test', transform=svhn_tforms, download=download) (val_loader, test_loader) = val_test_split(svhn_val_test_set, batch_size=batch_size, val_size=0) elif (ood_dataset == 'LSUN'): lsun_tforms = transforms.Compose([transforms.Resize(size=(32, 32)), transforms.ToTensor()]) lsun_test_set = datasets.LSUN(data_path, classes=['classroom_val'], transform=lsun_tforms) val_loader = None test_loader = data_utils.DataLoader(lsun_test_set, batch_size=batch_size, shuffle=False) elif (ood_dataset == 'CIFAR-100'): cifar100_tforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))]) cifar100_val_test_set = datasets.CIFAR100(data_path, train=False, transform=cifar100_tforms, download=download) (val_loader, test_loader) = val_test_split(cifar100_val_test_set, batch_size=batch_size, val_size=0) else: raise ValueError('Choose one out of SVHN, LSUN, and CIFAR-100.') return (val_loader, test_loader)
class FastTensorDataLoader(): '\n Source: https://github.com/hcarlens/pytorch-tabular/blob/master/fast_tensor_data_loader.py\n and https://discuss.pytorch.org/t/dataloader-much-slower-than-manual-batching/27014/6\n ' def __init__(self, *tensors, batch_size=32, shuffle=False): assert all(((t.shape[0] == tensors[0].shape[0]) for t in tensors)) self.tensors = tensors self.dataset = tensors[0] self.dataset_len = self.tensors[0].shape[0] self.batch_size = batch_size self.shuffle = shuffle (n_batches, remainder) = divmod(self.dataset_len, self.batch_size) if (remainder > 0): n_batches += 1 self.n_batches = n_batches def __iter__(self): if self.shuffle: r = torch.randperm(self.dataset_len) self.tensors = [t[r] for t in self.tensors] self.i = 0 return self def __next__(self): if (self.i >= self.dataset_len): raise StopIteration batch = tuple((t[self.i:(self.i + self.batch_size)] for t in self.tensors)) self.i += self.batch_size return batch def __len__(self): return self.n_batches
class PermutedMnistGenerator(): def __init__(self, data_path='./data', num_tasks=10, random_seed=0, download=False): self.data_path = data_path self.num_tasks = num_tasks self.random_seed = random_seed self.download = download self.out_dim = 10 self.in_dim = 784 self.task_id = 0 def next_task(self, batch_size=256, val_size=0): if (self.task_id >= self.num_tasks): raise Exception('Number of tasks exceeded!') else: np.random.seed((self.task_id + self.random_seed)) perm_inds = np.arange(self.in_dim) if (self.task_id > 0): np.random.shuffle(perm_inds) tforms = transforms.Compose([transforms.ToTensor(), transforms.Lambda((lambda x: x.view((- 1))[perm_inds]))]) train_set = datasets.MNIST(self.data_path, train=True, transform=tforms, download=self.download) val_test_set = datasets.MNIST(self.data_path, train=False, transform=tforms, download=self.download) Xys = [train_set[i] for i in range(len(train_set))] Xs = torch.stack([e[0] for e in Xys]) ys = torch.stack([torch.tensor(e[1]) for e in Xys]) train_loader = FastTensorDataLoader(Xs, ys, batch_size=batch_size, shuffle=True) (val_loader, test_loader) = val_test_split(val_test_set, batch_size=batch_size, val_size=val_size, num_workers=0) self.task_id += 1 if (val_size > 0): return (train_loader, val_loader, test_loader) return (train_loader, test_loader)
@torch.no_grad() def test(components, test_loader, prediction_mode, pred_type='glm', n_samples=100, link_approx='probit', no_loss_acc=False, device='cpu', likelihood='classification', sigma_noise=None): temperature_scaling_model = None if (prediction_mode in ['map', 'laplace', 'bbb', 'csghmc']): model = components[0] if (prediction_mode in ['map', 'bbb']): if ((prediction_mode == 'map') and isinstance(model, tuple)): (model, temperature_scaling_model) = (model[0], model[1]) model.eval() elif (prediction_mode == 'csghmc'): for m in model: m.eval() elif (prediction_mode == 'swag'): (model, swag_samples, swag_bn_params) = components[0] if ((likelihood == 'regression') and (sigma_noise is None)): raise ValueError('Must provide sigma_noise for regression!') if (likelihood == 'classification'): loss_fn = nn.NLLLoss() elif (likelihood == 'regression'): loss_fn = nn.GaussianNLLLoss(full=True) else: raise ValueError(f'Invalid likelihood type {likelihood}') all_y_true = list() all_y_prob = list() all_y_var = list() for data in tqdm(test_loader): (x, y) = (data[0].to(device), data[1].to(device)) all_y_true.append(y.cpu()) if (prediction_mode in ['ensemble', 'mola', 'multi-swag']): K = len(components) pi = (torch.ones(K, device=device) / K) y_prob = mixture_model_pred(components, x, pi, prediction_mode=prediction_mode, pred_type=pred_type, link_approx=link_approx, n_samples=n_samples, likelihood=likelihood) elif (prediction_mode == 'laplace'): y_prob = model(x, pred_type=pred_type, link_approx=link_approx, n_samples=n_samples) elif (prediction_mode == 'map'): y_prob = model(x).detach() elif (prediction_mode == 'bbb'): y_prob = torch.stack([model(x)[0].softmax((- 1)) for _ in range(10)]).mean(0) elif (prediction_mode == 'csghmc'): y_prob = torch.stack([m(x).softmax((- 1)) for m in model]).mean(0) elif (prediction_mode == 'swag'): from baselines.swag.swag import predict_swag y_prob = predict_swag(model, x, swag_samples, swag_bn_params) else: raise ValueError('Choose one out of: map, ensemble, laplace, mola, bbb, csghmc, swag, multi-swag.') if (likelihood == 'regression'): y_mean = (y_prob if (prediction_mode == 'map') else y_prob[0]) y_var = (torch.zeros_like(y_mean) if (prediction_mode == 'map') else y_prob[1].squeeze(2)) all_y_prob.append(y_mean.cpu()) all_y_var.append(y_var.cpu()) else: all_y_prob.append(y_prob.cpu()) all_y_prob = torch.cat(all_y_prob, dim=0) all_y_true = torch.cat(all_y_true, dim=0) if (temperature_scaling_model is not None): print('Calibrating predictions using temperature scaling...') all_y_prob = torch.from_numpy(temperature_scaling_model.predict_proba(all_y_prob.numpy())) elif ((prediction_mode == 'map') and (likelihood == 'classification')): all_y_prob = all_y_prob.softmax(dim=1) metrics = {} if (likelihood == 'classification'): assert (all_y_prob.sum((- 1)).mean() == 1), '`all_y_prob` are logits but probs. are required' (c, preds) = torch.max(all_y_prob, 1) metrics['conf'] = c.mean().item() if (not no_loss_acc): if (likelihood == 'regression'): all_y_var = (torch.cat(all_y_var, dim=0) + (sigma_noise ** 2)) metrics['nll'] = loss_fn(all_y_prob, all_y_true, all_y_var).item() else: all_y_var = None metrics['nll'] = loss_fn(all_y_prob.log(), all_y_true).item() metrics['acc'] = (all_y_true == preds).float().mean().item() return (metrics, all_y_prob, all_y_var)
@torch.no_grad() def predict(dataloader, model): py = [] for (x, y) in dataloader: x = x.cuda() py.append(torch.softmax(model(x), (- 1))) return torch.cat(py, dim=0)
@torch.no_grad() def predict_ensemble(dataloader, models): py = [] for (x, y) in dataloader: x = x.cuda() _py = 0 for model in models: _py += ((1 / len(models)) * torch.softmax(model(x), (- 1))) py.append(_py) return torch.cat(py, dim=0)
@torch.no_grad() def predict_vb(dataloader, model, n_samples=1): py = [] for (x, y) in dataloader: x = x.cuda() _py = 0 for _ in range(n_samples): (f_s, _) = model(x) _py += torch.softmax(f_s, 1) _py /= n_samples py.append(_py) return torch.cat(py, dim=0)
def set_seed(seed): np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) cudnn.deterministic = True cudnn.benchmark = False
def load_pretrained_model(args, model_idx, device): ' Choose appropriate architecture and load pre-trained weights ' if ('WILDS' in args.benchmark): dataset = args.benchmark[6:] model = wu.load_pretrained_wilds_model(dataset, args.models_root, device, model_idx, args.model_seed) else: model = get_model(args.model, no_dropout=args.no_dropout).to(device) if (args.benchmark in ['R-MNIST', 'MNIST-OOD']): fpath = os.path.join(args.models_root, 'lenet_mnist/lenet_mnist_{}_{}') elif (args.benchmark in ['R-FMNIST', 'FMNIST-OOD']): fpath = os.path.join(args.models_root, 'lenet_fmnist/lenet_fmnist_{}.pt') elif (args.benchmark in ['CIFAR-10-C', 'CIFAR-10-OOD']): fpath = os.path.join(args.models_root, 'wrn16_4_cifar10/wrn_16-4_cifar10_{}_{}') elif (args.benchmark == 'ImageNet-C'): fpath = os.path.join(args.models_root, 'wrn50_2_imagenet/wrn_50-2_imagenet_{}_{}') if (args.method == 'csghmc'): fname = fpath.format(args.model_seed, (model_idx + 1)) state_dicts = torch.load(fname, map_location=device) for (m, state_dict) in zip(model, state_dicts): m.load_state_dict(state_dict) m.to(device) if args.data_parallel: model = [torch.nn.DataParallel(m) for m in model] else: if (args.model_path is not None): model.load_state_dict(torch.load(args.model_path, map_location=device), strict=False) else: fname = (fpath.format(args.model_seed, (model_idx + 1)) if ('FMNIST' not in args.benchmark) else fpath.format(args.model_seed)) load_model = (model.net if ((args.benchmark in ['R-MNIST', 'MNIST-OOD']) and ('BBB' not in args.model)) else model) load_model.load_state_dict(torch.load(fname, map_location=device)) model.to(device) if (args.data_parallel and (args.method != 'csghmc')): model = torch.nn.DataParallel(model) return model
def get_model(model_class, no_dropout=False): if (model_class == 'MLP'): model = mlp.MLP([784, 100, 100, 10], act='relu') elif (model_class == 'LeNet'): model = lenet.LeNet() elif (model_class == 'LeNet-BBB-reparam'): model = lenet_bbb.LeNetBBB(estimator='reparam') elif (model_class == 'LeNet-BBB-flipout'): model = lenet_bbb.LeNetBBB(estimator='flipout') elif (model_class == 'LeNet-CSGHMC'): model = [lenet.LeNet() for _ in range(12)] elif (model_class == 'WRN16-4'): model = wrn.WideResNet(16, 4, 10, dropRate=0.3) elif (model_class == 'WRN16-4-fixup'): model = wrn_fixup.FixupWideResNet(16, 4, 10, dropRate=(0.0 if no_dropout else 0.2)) elif (model_class == 'WRN16-4-BBB-reparam'): model = wrn_bbb.WideResNetBBB(16, 4, 10, estimator='reparam') elif (model_class == 'WRN16-4-BBB-flipout'): model = wrn_bbb.WideResNetBBB(16, 4, 10, estimator='flipout') elif (model_class == 'WRN16-4-CSGHMC'): model = [wrn.WideResNet(16, 4, 10, dropRate=0) for _ in range(12)] elif (model_class == 'WRN50-2'): model = torch_models.wide_resnet50_2() else: raise ValueError('Choose LeNet, WRN16-4, or WRN50-2 as model_class.') return model
def mixture_model_pred(components, x, mixture_weights, prediction_mode='mola', pred_type='glm', link_approx='probit', n_samples=100, likelihood='classification'): if (prediction_mode == 'ensemble'): return ensemble_pred(components, x, likelihood=likelihood) out = 0.0 for (model, pi) in zip(components, mixture_weights): if (prediction_mode == 'mola'): out_prob = model(x, pred_type=pred_type, n_samples=n_samples, link_approx=link_approx) elif (prediction_mode == 'multi-swag'): from baselines.swag.swag import predict_swag (swag_model, swag_samples, swag_bn_params) = model out_prob = predict_swag(swag_model, x, swag_samples, swag_bn_params) else: raise ValueError('For now only ensemble, mola, and multi-swag are supported.') out += (pi * out_prob) return out
def ensemble_pred(components, x, likelihood='classification'): ' Make predictions for deep ensemble ' outs = [] for model in components: model.eval() out_prob = model(x).detach() if (likelihood == 'classification'): out_prob = out_prob.softmax(1) outs.append(out_prob) outs = torch.stack(outs, dim=0) out_mean = torch.mean(outs, dim=0) if (likelihood == 'regression'): out_var = torch.var(outs, dim=0).unsqueeze(2) return [out_mean, out_var] else: return out_mean
def expand_prior_precision(prior_prec, model): theta = parameters_to_vector(model.parameters()) (device, P) = (theta.device, len(theta)) assert (prior_prec.ndim == 1) if (len(prior_prec) == 1): return (torch.ones(P, device=device) * prior_prec) elif (len(prior_prec) == P): return prior_prec.to(device) else: return torch.cat([(delta * torch.ones_like(m).flatten()) for (delta, m) in zip(prior_prec, model.parameters())])
def prior_prec_to_tensor(args, prior_prec, model): H = len(list(model.parameters())) theta = parameters_to_vector(model.parameters()) (device, P) = (theta.device, len(theta)) if (args.prior_structure == 'scalar'): log_prior_prec = torch.ones(1, device=device) elif (args.prior_structure == 'layerwise'): log_prior_prec = torch.ones(H, device=device) elif (args.prior_structure == 'all'): log_prior_prec = torch.ones(P, device=device) else: raise ValueError(f'Invalid prior structure {args.prior_structure}') return (log_prior_prec * prior_prec)
def get_auroc(py_in, py_out): (py_in, py_out) = (py_in.cpu().numpy(), py_out.cpu().numpy()) labels = np.zeros((len(py_in) + len(py_out)), dtype='int32') labels[:len(py_in)] = 1 examples = np.concatenate([py_in.max(1), py_out.max(1)]) return roc_auc_score(labels, examples)
def get_fpr95(py_in, py_out): (py_in, py_out) = (py_in.cpu().numpy(), py_out.cpu().numpy()) (conf_in, conf_out) = (py_in.max(1), py_out.max(1)) tpr = 95 perc = np.percentile(conf_in, (100 - tpr)) fp = np.sum((conf_out >= perc)) fpr = (np.sum((conf_out >= perc)) / len(conf_out)) return (fpr.item(), perc.item())
def get_brier_score(probs, targets): targets = F.one_hot(targets, num_classes=probs.shape[1]) return torch.mean(torch.sum(((probs - targets) ** 2), axis=1)).item()
def get_calib(pys, y_true, M=100): (pys, y_true) = (pys.cpu().numpy(), y_true.cpu().numpy()) (_, bins) = np.histogram(pys, M, range=(0, 1)) labels = pys.argmax(1) confs = np.max(pys, axis=1) conf_idxs = np.digitize(confs, bins) accs_bin = [] confs_bin = [] nitems_bin = [] for i in range(M): labels_i = labels[(conf_idxs == i)] y_true_i = y_true[(conf_idxs == i)] confs_i = confs[(conf_idxs == i)] acc = np.nan_to_num(np.mean((labels_i == y_true_i)), 0) conf = np.nan_to_num(np.mean(confs_i), 0) accs_bin.append(acc) confs_bin.append(conf) nitems_bin.append(len(labels_i)) (accs_bin, confs_bin) = (np.array(accs_bin), np.array(confs_bin)) nitems_bin = np.array(nitems_bin) ECE = np.average(np.abs((confs_bin - accs_bin)), weights=(nitems_bin / nitems_bin.sum())) MCE = np.max(np.abs((accs_bin - confs_bin))) return (ECE, MCE)
def get_calib_regression(pred_means, pred_stds, y_true, return_hist=False, M=10): '\n Kuleshov et al. ICML 2018, eq. 9\n * pred_means, pred_stds, y_true must be np.array\'s\n * Set return_hist to True to also return the "histogram"---useful for visualization (see paper)\n ' T = len(y_true) ps = np.linspace(0, 1, M) cdf_vals = [st.norm(m, s).cdf(y_t) for (m, s, y_t) in zip(pred_means, pred_stds, y_true)] p_hats = np.array([(len(np.where((cdf_vals <= p))[0]) / T) for p in ps]) cal = (T * mean_squared_error(ps, p_hats)) return ((cal, ps, p_hats) if return_hist else cal)
def get_sharpness(pred_stds): '\n Kuleshov et al. ICML 2018, eq. 10\n\n pred_means be np.array\n ' return np.mean((pred_stds ** 2))
def timing(fun): '\n Return the original output(s) and a wall-clock timing in second.\n ' if torch.cuda.is_available(): start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) torch.cuda.synchronize() start.record() ret = fun() end.record() torch.cuda.synchronize() elapsed_time = (start.elapsed_time(end) / 1000) else: start_time = time.time() ret = fun() end_time = time.time() elapsed_time = (end_time - start_time) return (ret, elapsed_time)
def save_results(args, metrics): ' Save the computed metrics ' if (args.run_name is None): res_str = (f'_{args.subset_of_weights}_{args.hessian_structure}' if (args.method in ['laplace', 'mola']) else '') temp_str = ('' if (args.temperature == 1.0) else f'_{args.temperature}') method_str = (f'temp' if (args.use_temperature_scaling and (args.method == 'map')) else args.method) frac_str = (f'_{args.data_fraction}' if (args.data_fraction < 1.0) else '') result_path = f'./results/{args.benchmark}/{method_str}{res_str}{temp_str}_{args.model_seed}{frac_str}.npy' else: result_path = f'./results/{args.run_name}.npy' Path(result_path).parent.mkdir(parents=True, exist_ok=True) print(f'Saving results to {result_path}...') np.save(result_path, metrics)
def get_prior_precision(args, device): ' Obtain the prior precision parameter from the cmd arguments ' if (type(args.prior_precision) is str): prior_precision = torch.load(args.prior_precision, map_location=device) elif (type(args.prior_precision) is float): prior_precision = args.prior_precision else: raise ValueError('Algorithm not happy with inputs prior precision :(') return prior_precision
class ProperDataLoader(): ' This class defines an iterator that wraps a PyTorch DataLoader \n to only return the first two of three elements of the data tuples.\n\n This is used to make the data loaders from the WILDS benchmark\n (which return (X, y, metadata) tuples, where metadata for example\n contains domain information) compatible with the uq.py script and\n with the laplace library (which both expect (X, y) tuples).\n ' def __init__(self, data_loader): self.data_loader = data_loader self.dataset = self.data_loader.dataset def __iter__(self): self.data_iter = iter(self.data_loader) return self def __next__(self): (X, y, _) = next(self.data_iter) return (X, y) def __len__(self): return len(self.data_loader)
def load_pretrained_wilds_model(dataset, model_dir, device, model_idx=0, model_seed=0): ' load pre-trained model ' config = get_default_config(dataset, algorithm=ALGORITHMS[model_idx]) is_featurizer = ((dataset in ['civilcomments', 'amazon']) and (ALGORITHMS[model_idx] == 'deepCORAL')) model = initialize_model(config, D_OUTS[dataset], is_featurizer=is_featurizer) if is_featurizer: model = nn.Sequential(*model) model = model.to(device) model_list_idx = ((model_idx * N_SEEDS[dataset]) + model_seed) model_name = list(MODEL_UUIDS[dataset].keys())[model_list_idx] model_path = ((Path(model_dir) / dataset) / f'{model_name}.pth') if (not model_path.exists()): model_path.parent.mkdir(exist_ok=True) model_url = (MODEL_URL % MODEL_UUIDS[dataset][model_name]) if (dataset == 'amazon'): model_url = model_url.replace('best_model.pth', AMAZON_MODELS[model_seed]) elif ((dataset == 'fmow') and (model_idx == 4)): model_url = model_url.replace('best_model.pth', FMOW_MODELS[model_seed]) elif ((dataset == 'poverty') and (model_idx == 4)): model_url = model_url.replace('best_model.pth', POVERTY_MODELS[model_seed]) print(f'Downloading pre-trained model parameters for {model_name} from {model_url}...') urllib.request.urlretrieve(model_url, model_path) print(f'Loading pre-trained model parameters for {model_name}...') state_dict = torch.load(model_path)['algorithm'] model_state_dict_keys = list(model.state_dict().keys()) model_state_dict = {} for m in state_dict: if ((dataset in ['civilcomments', 'amazon']) and (ALGORITHMS[model_idx] == 'deepCORAL') and ('featurizer' in m)): continue m_new = (m if (m.split('.')[0] == 'classifier') else '.'.join(m.split('.')[1:])) if ('classifier' in m_new): if (dataset == 'poverty'): m_new = m_new.replace('classifier', 'fc') elif ((dataset in ['civilcomments', 'amazon']) and (ALGORITHMS[model_idx] == 'deepCORAL')): m_new = m_new.replace('classifier', '1') if (m_new not in model_state_dict_keys): continue model_state_dict[m_new] = state_dict[m] model.load_state_dict(model_state_dict) model.eval() return model
def get_wilds_loaders(dataset, data_dir, data_fraction=1.0, model_seed=0): ' load in-distribution datasets and return data loaders ' config = get_default_config(dataset, data_fraction=data_fraction) dataset_kwargs = ({'fold': POVERTY_FOLDS[model_seed]} if (dataset == 'poverty') else {}) full_dataset = get_dataset(dataset=dataset, root_dir=data_dir, **dataset_kwargs) train_grouper = CombinatorialGrouper(dataset=full_dataset, groupby_fields=config.groupby_fields) if (dataset == 'fmow'): config.batch_size = (config.batch_size // 2) train_transform = initialize_transform(transform_name=config.train_transform, config=config, dataset=full_dataset) train_data = full_dataset.get_subset('train', frac=config.frac, transform=train_transform) train_loader = get_train_loader(loader=config.train_loader, dataset=train_data, batch_size=config.batch_size, uniform_over_groups=config.uniform_over_groups, grouper=train_grouper, distinct_groups=config.distinct_groups, n_groups_per_batch=config.n_groups_per_batch, **config.loader_kwargs) eval_transform = initialize_transform(transform_name=config.eval_transform, config=config, dataset=full_dataset) try: val_str = ('val' if (dataset == 'fmow') else 'id_val') val_data = full_dataset.get_subset(val_str, frac=config.frac, transform=eval_transform) val_loader = get_eval_loader(loader=config.eval_loader, dataset=val_data, batch_size=config.batch_size, grouper=train_grouper, **config.loader_kwargs) except: print(f"{dataset} dataset doesn't have an in-distribution validation split -- using train split instead!") val_loader = train_loader try: in_test_data = full_dataset.get_subset('id_test', frac=config.frac, transform=eval_transform) in_test_loader = get_eval_loader(loader=config.eval_loader, dataset=in_test_data, batch_size=config.batch_size, grouper=train_grouper, **config.loader_kwargs) except: print(f"{dataset} dataset doesn't have an in-distribution test split -- using validation split instead!") in_test_loader = val_loader train_loader = ProperDataLoader(train_loader) val_loader = ProperDataLoader(val_loader) in_test_loader = ProperDataLoader(in_test_loader) return (train_loader, val_loader, in_test_loader)