repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
BayesianRelevance | BayesianRelevance-master/src/lrp_rules_robustness_main.py | import os
import argparse
import numpy as np
import torch
import torchvision
from torch import nn
import torch.nn.functional as nnf
import torch.optim as torchopt
import torch.nn.functional as F
from utils.data import *
from utils.networks import *
from utils.savedir import *
from utils.seeding import *
from networks.baseNN import *
from networks.advNN import *
from networks.fullBNN import *
from utils.lrp import *
from attacks.gradient_based import evaluate_attack
from attacks.run_attacks import *
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from plot.lrp_distributions import significance_symbol
from scipy.stats import mannwhitneyu as stat_test
parser = argparse.ArgumentParser()
parser.add_argument("--n_inputs", default=500, type=int, help="Number of test points")
parser.add_argument("--topk", default=20, type=int, help="Choose model idx from pre defined settings")
parser.add_argument("--n_samples", default=100, type=int)
parser.add_argument("--attack_method", default="fgsm", type=str, help="fgsm, pgd")
parser.add_argument("--lrp_method", default="avg_heatmap", type=str, help="avg_prediction, avg_heatmap")
parser.add_argument("--load", type=eval, default="False", help="If True load dataframe else build it.")
parser.add_argument("--debug", default=False, type=eval, help="Run script in debugging mode.")
parser.add_argument("--device", default='cuda', type=str, help="cpu, cuda")
args = parser.parse_args()
lrp_robustness_method = "imagewise"
n_inputs=100 if args.debug else args.n_inputs
print("PyTorch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)
if args.device=="cuda":
torch.set_default_tensor_type('torch.cuda.FloatTensor')
df_savedir = os.path.join(TESTS,'debug/fullBNN') if args.debug is True else os.path.join(TESTS,'fullBNN')
filename="rules_robustness_main_"+str(args.attack_method)+"_images="+str(n_inputs)\
+"_samples="+str(args.n_samples)+"_topk="+str(args.topk)
datasets = ['MNIST', 'F. MNIST', 'CIFAR10']
alternative = 'less'
if args.load:
df = load_from_pickle(path=df_savedir, filename=filename)
else:
df = pd.DataFrame()
rules_list = ['epsilon','gamma','alpha1beta0']
### MNIST & Fashion MNIST HMC
for model_idx, dataset in [(2, 'MNIST'), (3, 'F. MNIST')]:
m = baseNN_settings["model_"+str(model_idx)]
x_test, y_test, inp_shape, num_classes = load_dataset(dataset_name=m["dataset"], shuffle=False, n_inputs=n_inputs)[2:]
det_model_savedir = get_model_savedir(model="baseNN", dataset=m["dataset"], architecture=m["architecture"],
debug=args.debug, model_idx=model_idx)
detnet = baseNN(inp_shape, num_classes, *list(m.values()))
detnet.load(savedir=det_model_savedir, device=args.device)
det_attacks = load_attack(method=args.attack_method, model_savedir=det_model_savedir)
det_predictions, det_atk_predictions, det_softmax_robustness, det_successful_idxs, det_failed_idxs = \
evaluate_attack(net=detnet, x_test=x_test, x_attack=det_attacks, y_test=y_test,
device=args.device, return_classification_idxs=True)
adv_model_savedir = get_model_savedir(model="advNN", dataset=m["dataset"], architecture=m["architecture"],
debug=args.debug, model_idx=model_idx, attack_method='fgsm')
advnet = advNN(inp_shape, num_classes, *list(m.values()), attack_method='fgsm')
advnet.load(savedir=adv_model_savedir, device=args.device)
adv_attacks = load_attack(method=args.attack_method, model_savedir=adv_model_savedir)
adv_predictions, adv_atk_predictions, adv_softmax_robustness, adv_successful_idxs, adv_failed_idxs = \
evaluate_attack(net=advnet, x_test=x_test, x_attack=adv_attacks, y_test=y_test,
device=args.device, return_classification_idxs=True)
m = fullBNN_settings["model_"+str(model_idx)]
bay_model_savedir = get_model_savedir(model="fullBNN", dataset=m["dataset"], architecture=m["architecture"],
model_idx=model_idx, debug=args.debug)
bayesnet = BNN(m["dataset"], *list(m.values())[1:], inp_shape, num_classes)
bayesnet.load(savedir=bay_model_savedir, device=args.device)
bay_attacks = load_attack(method=args.attack_method, model_savedir=bay_model_savedir, n_samples=args.n_samples)
bay_predictions, bay_atk_predictions, bay_softmax_robustness, bay_successful_idxs, bay_failed_idxs = \
evaluate_attack(net=bayesnet, n_samples=args.n_samples, x_test=x_test, x_attack=bay_attacks, y_test=y_test,
device=args.device, return_classification_idxs=True)
images = x_test.to(args.device)
labels = y_test.argmax(-1).to(args.device)
for rule in rules_list:
layer_idx = list(detnet.learnable_layers_idxs)[-1]
savedir = get_lrp_savedir(model_savedir=det_model_savedir, attack_method=args.attack_method, rule=rule,
layer_idx=layer_idx)
det_lrp = load_from_pickle(path=savedir, filename="det_lrp")
det_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
savedir = get_lrp_savedir(model_savedir=adv_model_savedir, attack_method=args.attack_method, rule=rule,
layer_idx=layer_idx)
adv_lrp = load_from_pickle(path=savedir, filename="det_lrp")
adv_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
savedir = get_lrp_savedir(model_savedir=bay_model_savedir, attack_method=args.attack_method,
rule=rule, layer_idx=layer_idx, lrp_method=args.lrp_method)
bay_lrp = load_from_pickle(path=savedir, filename="bay_lrp_samp="+str(args.n_samples))
bay_attack_lrp = load_from_pickle(path=savedir, filename="bay_attack_lrp_samp="+str(args.n_samples))
det_robustness, det_pxl_idxs = lrp_robustness(
original_heatmaps=det_lrp,
adversarial_heatmaps=det_attack_lrp,
topk=args.topk, method=lrp_robustness_method)
adv_robustness, adv_pxl_idxs = lrp_robustness(
original_heatmaps=adv_lrp,
adversarial_heatmaps=adv_attack_lrp,
topk=args.topk, method=lrp_robustness_method)
bay_robustness, bay_pxl_idxs = lrp_robustness(
original_heatmaps=bay_lrp,
adversarial_heatmaps=bay_attack_lrp,
topk=args.topk, method=lrp_robustness_method)
_, adv_p = stat_test(x=det_robustness, y=adv_robustness, alternative=alternative)
_, bay_p = stat_test(x=det_robustness, y=bay_robustness, alternative=alternative)
_, p = stat_test(x=adv_robustness, y=bay_robustness, alternative=alternative)
print("\np values =", adv_p, bay_p, p)
for im_idx in range(len(det_robustness)):
df = df.append({'rule':rule, 'layer_idx':layer_idx, 'model':'Adv - Det', 'dataset':dataset,
'robustness_diff':adv_robustness[im_idx]-det_robustness[im_idx],
'p_value':p, 'adv_p_value':adv_p, 'bay_p_value':bay_p},
ignore_index=True)
df = df.append({'rule':rule, 'layer_idx':layer_idx, 'model':f'Bay - Det', 'dataset':dataset,
'robustness_diff':bay_robustness[im_idx]-det_robustness[im_idx],
'p_value':p, 'adv_p_value':adv_p, 'bay_p_value':bay_p},
ignore_index=True)
### CIFAR-10 SVI
det_savedir = '../experiments/baseNN/cifar_resnet/'
det_attacks = load_attack(method=args.attack_method, model_savedir=det_savedir)
adv_savedir = '../experiments/advNN/cifar_resnet_atk=fgsm/'
adv_attacks = load_attack(method=args.attack_method, model_savedir=adv_savedir)
bay_savedir = '../experiments/fullBNN/cifar_resnet/'
bay_attacks = load_attack(method=args.attack_method, model_savedir=bay_savedir, n_samples=args.n_samples)
layer_idx = 38
for rule in rules_list:
savedir = get_lrp_savedir(model_savedir=det_savedir, attack_method=args.attack_method,
layer_idx=layer_idx, rule=rule)
det_lrp = load_from_pickle(path=savedir, filename="det_lrp")
det_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
savedir = get_lrp_savedir(model_savedir=adv_savedir, attack_method=args.attack_method,
layer_idx=layer_idx, rule=rule)
adv_lrp = load_from_pickle(path=savedir, filename="det_lrp")
adv_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
savedir = get_lrp_savedir(model_savedir=bay_savedir, attack_method=args.attack_method,
layer_idx=layer_idx, rule=rule)
bay_lrp = load_from_pickle(path=savedir, filename="bay_lrp_samp="+str(args.n_samples))
bay_attack_lrp = load_from_pickle(path=savedir, filename="bay_attack_lrp_samp="+str(args.n_samples))
det_robustness, det_pxl_idxs = lrp_robustness(original_heatmaps=det_lrp, adversarial_heatmaps=det_attack_lrp,
topk=args.topk, method=lrp_robustness_method)
adv_robustness, adv_pxl_idxs = lrp_robustness(original_heatmaps=adv_lrp, adversarial_heatmaps=adv_attack_lrp,
topk=args.topk, method=lrp_robustness_method)
bay_robustness, bay_pxl_idxs = lrp_robustness(original_heatmaps=bay_lrp, adversarial_heatmaps=bay_attack_lrp,
topk=args.topk, method=lrp_robustness_method)
_, adv_p = stat_test(x=det_robustness, y=adv_robustness, alternative=alternative)
_, bay_p = stat_test(x=det_robustness, y=bay_robustness, alternative=alternative)
_, p = stat_test(x=adv_robustness, y=bay_robustness, alternative=alternative)
print("\np values =", adv_p, bay_p, p)
for im_idx in range(len(det_robustness)):
df = df.append({'rule':rule, 'layer_idx':layer_idx, 'model':'Adv - Det', 'dataset':'CIFAR10',
'robustness_diff':adv_robustness[im_idx]-det_robustness[im_idx],
'p_value':p, 'adv_p_value':adv_p, 'bay_p_value':bay_p},
ignore_index=True)
df = df.append({'rule':rule, 'layer_idx':layer_idx, 'model':f'Bay - Det', 'dataset':'CIFAR10',
'robustness_diff':bay_robustness[im_idx]-det_robustness[im_idx],
'p_value':p, 'adv_p_value':adv_p, 'bay_p_value':bay_p},
ignore_index=True)
save_to_pickle(data=df, path=df_savedir, filename=filename)
### Plots
def plot_rules_robustness_diff(df, n_samples, datasets, savedir, filename):
os.makedirs(savedir, exist_ok=True)
sns.set_style("darkgrid")
matplotlib.rc('font', **{'size': 9})
adv_col = plt.cm.get_cmap('rocket', 100)(np.linspace(0, 1, 13))[3:]
bay_col = plt.cm.get_cmap('crest', 100)(np.linspace(0, 1, 10))[3:]
palettes = [adv_col, bay_col]
fig, ax = plt.subplots(len(datasets), 2, figsize=(4, 4), sharex=True, sharey='row', dpi=150,
facecolor='w', edgecolor='k')
fig.tight_layout()
fig.subplots_adjust(bottom=0.1)
for col_idx, model in enumerate(list(df['model'].unique())):
palette = {"epsilon":palettes[col_idx][2], "gamma":palettes[col_idx][4], "alpha1beta0":palettes[col_idx][6]}
for row_idx, dataset in enumerate(datasets):
temp_df = df[df['dataset']==dataset]
y = min(temp_df['robustness_diff'])*1.25
temp_df = temp_df[temp_df['model']==model]
assert len(temp_df['layer_idx'].unique())==1
layer_idx = int(temp_df['layer_idx'].unique()[0])
sns.boxplot(data=temp_df, ax=ax[row_idx, col_idx], x='rule', y='robustness_diff', orient='v', hue='rule',
palette=palette, dodge=False, flierprops={'markersize':3})
for rule, x in zip(temp_df['rule'].unique(), [-0.24, 0.75, 1.75]):
rule_df = temp_df[temp_df['rule']==rule]
assert len(df)/(6*3) == float(len(rule_df))
p_value = rule_df['p_value'].unique()[0]
assert len(rule_df['p_value'].unique())==1
significance = significance_symbol(p_value)
# if significance!='n.s.':
# y = min(temp_df['robustness_diff'])*1.5
ax[row_idx, col_idx].text(x=x, y=y, s=significance, weight='bold', size=8, color=palette[rule])
for i, patch in enumerate(ax[row_idx, col_idx].artists):
r, g, b, a = patch.get_facecolor()
col = (r, g, b, a)
patch.set_facecolor((r, g, b, .7))
patch.set_edgecolor(col)
for j in range(i*6, i*6+6):
line = ax[row_idx, col_idx].lines[j]
line.set_color(col)
line.set_mfc(col)
line.set_mec(col)
ax[0, col_idx].xaxis.set_label_position("top")
ax[0, col_idx].set_xlabel(model, weight='bold', size=9)
ax[row_idx, 0].set_ylabel("")
ax[row_idx, 1].set_ylabel(r"$\bf{" + dataset + "}$"+f"\nLayer idx={layer_idx}", rotation=270, #weight='bold',
size=9, labelpad=30)
ax[1, 0].set_ylabel("LRP robustness diff.", rotation=90, size=9)
ax[row_idx, 1].yaxis.set_label_position("right")
ax[row_idx, col_idx].get_legend().remove()
ax[row_idx, col_idx].set_xlabel("")
ax[row_idx, col_idx].set_xticklabels([r'$\epsilon$',r'$\gamma$',r'$\alpha\beta$'])
ax[2, col_idx].set_xlabel("LRP rule", weight='bold', labelpad=5)
# ax[row_idx, 1].text(x=0.5, y=0, s="Layer idx="+str(layer_idx), rotation=270, weight='bold', size=9)
plt.subplots_adjust(hspace=0.07)
plt.subplots_adjust(wspace=0.05)
fig.subplots_adjust(left=0.16)
fig.subplots_adjust(right=0.86)
fig.subplots_adjust(bottom=0.12)
print("\nSaving: ", os.path.join(savedir, filename+".png"))
fig.savefig(os.path.join(savedir, filename+".png"))
plt.close(fig)
# def plot_rules_robustness_diff(df, n_samples, datasets, savedir, filename):
# os.makedirs(savedir, exist_ok=True)
# sns.set_style("darkgrid")
# matplotlib.rc('font', **{'size': 9})
# adv_col = plt.cm.get_cmap('rocket', 100)(np.linspace(0, 1, 13))[3:]
# bay_col = plt.cm.get_cmap('crest', 100)(np.linspace(0, 1, 10))[3:]
# palettes = [adv_col, bay_col]
# fig, ax = plt.subplots(2, len(datasets), figsize=(4, 3), sharey=True, sharex=True, dpi=150,
# facecolor='w', edgecolor='k')
# fig.tight_layout()
# for row_idx, model in enumerate(list(df['model'].unique())):
# palette = {"epsilon":palettes[row_idx][2], "gamma":palettes[row_idx][4], "alpha1beta0":palettes[row_idx][6]}
# for col_idx, dataset in enumerate(datasets):
# temp_df = df[df['dataset']==dataset]
# temp_df = temp_df[temp_df['model']==model]
# layer_idx = int(temp_df['layer_idx'].unique()[0])
# sns.boxplot(data=temp_df, ax=ax[row_idx, col_idx], x='rule', y='robustness_diff', orient='v', hue='rule',
# palette=palette, dodge=False)
# for i, patch in enumerate(ax[row_idx, col_idx].artists):
# r, g, b, a = patch.get_facecolor()
# col = (r, g, b, a)
# patch.set_facecolor((r, g, b, .7))
# patch.set_edgecolor(col)
# for j in range(i*6, i*6+6):
# line = ax[row_idx, col_idx].lines[j]
# line.set_color(col)
# line.set_mfc(col)
# line.set_mec(col)
# ax[0, col_idx].set_xlabel("")
# ax[0, col_idx].xaxis.set_label_position("top")
# ax[0, col_idx].set_xlabel(r"$\bf{" + dataset + "}$"+f"\nLayer idx={layer_idx}", rotation=0, size=9, labelpad=4)
# ax[row_idx, 0].set_ylabel(model, weight='bold', size=9)
# ax[row_idx, 1].set_ylabel("")
# ax[row_idx, 2].set_ylabel("LRP rob. diff.", rotation=270, size=9, labelpad=-75)
# ax[row_idx, 2].set_ylabel("LRP rob. diff.", rotation=270, size=9, labelpad=-75)
# ax[row_idx, col_idx].get_legend().remove()
# ax[row_idx, col_idx].set_xlabel("")
# ax[row_idx, col_idx].set_xticklabels([r'$\epsilon$',r'$\gamma$',r'$\alpha\beta$'])
# ax[1, 1].set_xlabel("LRP rule", weight='bold')#, labelpad=5)
# # for rule, x in zip(temp_df['rule'].unique(), [-0.3, 0.7, 1.7]):
# # rule_df = temp_df[temp_df['rule']==rule]
# # p_value = rule_df['p_value'].unique()[0]
# # assert len(rule_df['p_value'].unique())==1
# # significance = significance_symbol(p_value)
# # print(dataset, rule, p_value, significance)
# # ax[0, col_idx].text(x=x, y=0, s=significance, weight='bold')
# # adv_p_value = rule_df['adv_p_value'].unique()[0]
# # bay_p_value = rule_df['bay_p_value'].unique()[0]
# # assert len(rule_df['adv_p_value'].unique())==1
# # assert len(rule_df['bay_p_value'].unique())==1
# # s = significance_symbol(adv_p_value) if row_idx==0 else significance_symbol(bay_p_value)
# # ax[row_idx, col_idx].text(x=x, y=0.75, s=s, weight='bold', size=7, color=palette[rule])
# plt.subplots_adjust(hspace=0.05)
# plt.subplots_adjust(wspace=0.05)
# fig.subplots_adjust(left=0.15)
# fig.subplots_adjust(right=0.95)
# fig.subplots_adjust(top=0.88)
# fig.subplots_adjust(bottom=0.15)
# print("\nSaving: ", os.path.join(savedir, filename+".png"))
# fig.savefig(os.path.join(savedir, filename+".png"))
# plt.close(fig)
plot_rules_robustness_diff(df=df,
n_samples=args.n_samples,
datasets=datasets,
savedir=os.path.join(TESTS,'figures/rules_robustness'),
filename=filename)
| 16,465 | 43.382749 | 120 | py |
BayesianRelevance | BayesianRelevance-master/src/full_test_cifar_resnet.py | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import numpy as np
from tqdm import tqdm
import random
from torch.utils.data import Subset, DataLoader
import bayesian_torch.bayesian_torch.models.deterministic.resnet as resnet
from attacks.run_attacks import run_attack, save_attack, load_attack
from utils.lrp import *
from full_test_cifar_bayesian_resnet import plot_lrp_grid
model_names = sorted(
name for name in resnet.__dict__
if name.islower() and not name.startswith("__")
and name.startswith("resnet") and callable(resnet.__dict__[name]))
print(model_names)
parser = argparse.ArgumentParser(description='CIFAR10')
parser.add_argument('--arch',
'-a',
metavar='ARCH',
default='resnet20',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet20)')
parser.add_argument('-j',
'--workers',
default=8,
type=int,
metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs',
default=200,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--lr',
'--learning-rate',
default=0.1,
type=float,
metavar='LR',
help='initial learning rate')
parser.add_argument('--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument('--weight-decay',
'--wd',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 5e-4)')
parser.add_argument('--print-freq',
'-p',
default=50,
type=int,
metavar='N',
help='print frequency (default: 20)')
parser.add_argument('--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
parser.add_argument('--half',
dest='half',
action='store_true',
help='use half-precision(16-bit) ')
parser.add_argument('--save-dir',
dest='save_dir',
help='The directory used to save the trained models',
default='../experiments/baseNN/cifar_resnet/',
type=str)
parser.add_argument(
'--save-every',
dest='save_every',
help='Saves checkpoints at every specified number of epochs',
type=int,
default=10)
parser.add_argument(
'--tensorboard',
type=bool,
default=False,
metavar='N',
help='use tensorboard for logging and visualization of training progress')
parser.add_argument(
'--log_dir',
type=str,
default='./bayesian_torch/logs/cifar/deterministic',
metavar='N',
help='use tensorboard for logging and visualization of training progress')
parser.add_argument('--mode', type=str, default='test', help='train | test')
parser.add_argument(
'--attack_method',
type=str,
default='fgsm',
help='fgsm, pgd')
parser.add_argument(
'--test_inputs',
type=int,
default=500)
best_prec1 = 0
attack_hyperparams={'epsilon':0.2}
def main():
global args, best_prec1
args = parser.parse_args()
# Check the save_dir exists or not
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
model = torch.nn.DataParallel(resnet.__dict__[args.arch]())
if torch.cuda.is_available():
model.cuda()
else:
model.cpu()
print(model)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.evaluate, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
tb_writer = None
# if args.tensorboard:
# logger_dir = os.path.join(args.log_dir, 'tb_logger')
# if not os.path.exists(logger_dir):
# os.makedirs(logger_dir)
# tb_writer = SummaryWriter(logger_dir)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
batch_size=512 if args.mode=='train' else 100
train_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
root='../experiments/bayesian_torch/data',
train=True,
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]),
download=True),
batch_size=batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
root='../experiments/bayesian_torch/data',
train=False,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if torch.cuda.is_available():
criterion = nn.CrossEntropyLoss().cuda()
else:
criterion = nn.CrossEntropyLoss().cpu()
if args.half:
model.half()
criterion.half()
optimizer = torch.optim.SGD(model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=[100, 150], last_epoch=args.start_epoch - 1)
if args.arch in ['resnet110']:
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr * 0.1
if args.evaluate:
validate(val_loader, model, criterion)
return
if args.mode == 'train':
for epoch in range(args.start_epoch, args.epochs):
# train for one epoch
print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
train(args, train_loader, model, criterion, optimizer, epoch,
tb_writer)
lr_scheduler.step()
prec1 = validate(args, val_loader, model, criterion, epoch,
tb_writer)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if epoch > 0 and epoch % args.save_every == 0:
if is_best:
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
},
is_best,
filename=os.path.join(
args.save_dir, '{}_cifar.pth'.format(args.arch)))
elif args.mode == 'test':
checkpoint_file = args.save_dir + '/{}_cifar.pth'.format(args.arch)
if torch.cuda.is_available():
checkpoint = torch.load(checkpoint_file)
device="cuda"
else:
checkpoint = torch.load(checkpoint_file,
map_location=torch.device('cpu'))
device="cpu"
model.load_state_dict(checkpoint['state_dict'])
evaluate(args, model, val_loader)
# model = convert_resnet(model).to(device)
# Adversarial attacks
method=args.attack_method
test_inputs = args.test_inputs
dataset = Subset(val_loader.dataset, range(test_inputs))
images, labels = ([],[])
for image, label in dataset:
images.append(image)
labels.append(label)
images = torch.stack(images)
attacks = attack(model, dataset, method=method, hyperparams=attack_hyperparams)
save_attack(inputs=images, attacks=attacks, method=method, model_savedir=args.save_dir)
# attacks = load_attack(method=method, model_savedir=args.save_dir)
# evaluate(args, model, DataLoader(dataset=list(zip(attacks, labels))))
# LRP
for rule in ['epsilon','gamma','alpha1beta0']:
learnable_layers_idxs=[38]
for layer_idx in learnable_layers_idxs:
print(f"\nlayer_idx = {layer_idx}")
savedir = get_lrp_savedir(model_savedir=args.save_dir, attack_method=method,
layer_idx=layer_idx, rule=rule)
det_lrp = compute_lrp(images, model, rule=rule, device=device)
det_attack_lrp = compute_lrp(attacks, model, device=device, rule=rule)
save_to_pickle(det_lrp, path=savedir, filename="det_lrp")
save_to_pickle(det_attack_lrp, path=savedir, filename="det_attack_lrp")
set_seed(0)
idxs = np.random.choice(len(images), 10, replace=False)
original_images_plot = torch.stack([images[i].squeeze() for i in idxs])
adversarial_images_plot = torch.stack([attacks[i].squeeze() for i in idxs])
lrp_heatmaps_plot = torch.stack([det_lrp[i].squeeze() for i in idxs])
attack_lrp_heatmaps_plot = torch.stack([det_attack_lrp[i].squeeze() for i in idxs])
plot_lrp_grid(original_images=original_images_plot.detach().cpu(),
adversarial_images=adversarial_images_plot.detach().cpu(),
bay_lrp_heatmaps=lrp_heatmaps_plot.detach().cpu(),
bay_attack_lrp_heatmaps=attack_lrp_heatmaps_plot.detach().cpu(),
filename="lrp", savedir=savedir)
def convert_resnet(module, modules=None):
import torch
from lrp.sequential import Sequential
from lrp.linear import Linear
from lrp.conv import Conv2d
conversion_table = {
'Linear': Linear,
'Conv2d': Conv2d
}
# First time
if modules is None:
modules = []
for m in module.children():
convert_resnet(m, modules=modules)
# Vgg model has a flatten, which is not represented as a module
# so this loop doesn't pick it up.
# This is a hack to make things work.
if isinstance(m, torch.nn.AdaptiveAvgPool2d):
modules.append(torch.nn.Flatten())
sequential = Sequential(*modules)
return sequential
# Recursion
if isinstance(module, torch.nn.Sequential):
for m in module.children():
convert_vgg(m, modules=modules)
elif isinstance(module, torch.nn.Linear) or isinstance(module, torch.nn.Conv2d):
class_name = module.__class__.__name__
lrp_module = conversion_table[class_name].from_torch(module)
modules.append(lrp_module)
# maxpool is handled with gradient for the moment
elif isinstance(module, torch.nn.ReLU):
# avoid inplace operations. They might ruin PatternNet pattern
# computations
modules.append(torch.nn.ReLU())
else:
modules.append(module)
def train(args,
train_loader,
model,
criterion,
optimizer,
epoch,
tb_writer=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if torch.cuda.is_available():
target = target.cuda()
input_var = input.cuda()
else:
target = target.cpu()
input_var = input.cpu()
target_var = target
if args.half:
input_var = input_var.half()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch,
i,
len(train_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1))
if tb_writer is not None:
tb_writer.add_scalar('train/loss', loss.item(), epoch)
tb_writer.add_scalar('train/accuracy', prec1.item(), epoch)
tb_writer.flush()
def validate(args, val_loader, model, criterion, epoch, tb_writer=None):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
if torch.cuda.is_available():
target = target.cuda()
input_var = input.cuda()
target_var = target.cuda()
else:
target = target.cpu()
input_var = input.cpu()
target_var = target.cpu()
if args.half:
input_var = input_var.half()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i,
len(val_loader),
batch_time=batch_time,
loss=losses,
top1=top1))
if tb_writer is not None:
tb_writer.add_scalar('val/loss', loss.item(), epoch)
tb_writer.add_scalar('val/accuracy', prec1.item(), epoch)
tb_writer.flush()
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def evaluate(args, model, val_loader):
model.eval()
correct = 0
output_list = []
labels_list = []
with torch.no_grad():
begin = time.time()
for data, target in val_loader:
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
else:
data, target = data.cpu(), target.cpu()
output = model(data)
output = torch.nn.functional.softmax(output, dim=1)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
output_list.append(output)
labels_list.append(target)
end = time.time()
print("inference throughput: ", 10000 / (end - begin), " images/s")
output = torch.cat(output_list)
target = torch.cat(labels_list)
print('\nTest Accuracy: {:.2f}%\n'.format(100. * correct /
len(val_loader.dataset)))
target_labels = target.cpu().data.numpy()
np.save('../experiments/bayesian_torch/probs_cifar_det.npy', output.data.cpu().numpy())
np.save('../experiments/bayesian_torch/cifar_test_labels.npy', target.data.cpu().numpy())
def attack(model, dataset, method, hyperparams):
model.eval()
adversarial_attacks = []
for data, target in tqdm(dataset):
data = data.unsqueeze(0)
target = torch.tensor(target).unsqueeze(0)
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
device = 'cuda'
else:
data, target = data.cpu(), target.cpu()
device = 'cpu'
perturbed_image = run_attack(net=model, image=data, label=target, method=method,
device=device, hyperparams=attack_hyperparams).squeeze()
perturbed_image = torch.clamp(perturbed_image, 0., 1.)
adversarial_attacks.append(perturbed_image)
return torch.stack(adversarial_attacks)
def compute_lrp(x_test, network, rule, device):
x_test = x_test.to(device)
explanations = []
for x in tqdm(x_test):
# Forward pass
x_copy = copy.deepcopy(x.detach()).unsqueeze(0)
x_copy.requires_grad = True
y_hat = network.forward(x_copy, explain=True, rule=rule)
# Choose argmax
y_hat = y_hat[torch.arange(x_copy.shape[0]), y_hat.max(1)[1]]
y_hat = y_hat.sum()
# Backward pass (compute explanation)
y_hat.backward()
lrp = x_copy.grad.squeeze(1)
explanations.append(lrp)
explanations = torch.stack(explanations)
return explanations
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""
Save the training model
"""
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1, )):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 16,798 | 26.271104 | 90 | py |
BayesianRelevance | BayesianRelevance-master/src/train_networks.py | import argparse
import numpy as np
import os
import torch
import attacks.deeprobust as deeprobust
import attacks.gradient_based as grad_based
from utils import savedir
from utils.data import *
from utils.seeding import *
from networks.advNN import *
from networks.baseNN import *
from networks.fullBNN import *
parser = argparse.ArgumentParser()
parser.add_argument("--model", default="baseNN", type=str, help="baseNN, fullBNN, advNN")
parser.add_argument("--model_idx", default=0, type=int, help="Choose model idx from pre defined settings.")
parser.add_argument("--load", default=False, type=eval, help="Load saved computations and evaluate them.")
parser.add_argument("--attack_method", default="fgsm", type=str, help="fgsm, pgd")
# parser.add_argument("--attack_iters", default=3, type=int, help="Number of iterations in iterative attacks.")
parser.add_argument("--epsilon", default=0.2, type=int, help="Strength of a perturbation.")
# parser.add_argument("--attack_lrp_rule", default='epsilon', type=str, help="LRP rule used for the attacks.")
parser.add_argument("--debug", default=False, type=eval, help="Run script in debugging mode.")
parser.add_argument("--device", default='cuda', type=str, help="cpu, cuda")
args = parser.parse_args()
attack_hyperparams={'epsilon':args.epsilon}#, 'iters':args.attack_iters, 'lrp_rule':args.attack_lrp_rule}
n_inputs=100 if args.debug else None
print("PyTorch Version: ", torch.__version__)
if args.device=="cuda":
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if args.model=="baseNN":
model = baseNN_settings["model_"+str(args.model_idx)]
train_loader, test_loader, inp_shape, out_size = data_loaders(dataset_name=model["dataset"], n_inputs=n_inputs,
batch_size=128, shuffle=True)
savedir = get_model_savedir(model=args.model, dataset=model["dataset"], architecture=model["architecture"],
baseiters=None, debug=args.debug, model_idx=args.model_idx)
net = baseNN(inp_shape, out_size, *list(model.values()))
if args.load:
net.load(savedir=savedir, device=args.device)
else:
net.train(train_loader=train_loader, savedir=savedir, device=args.device)
net.evaluate(test_loader=test_loader, device=args.device)
elif args.model=="advNN":
model = baseNN_settings["model_"+str(args.model_idx)]
train_loader, test_loader, inp_shape, out_size = data_loaders(dataset_name=model["dataset"], n_inputs=n_inputs,
batch_size=128, shuffle=True)
savedir = get_model_savedir(model=args.model, dataset=model["dataset"], architecture=model["architecture"],
baseiters=None, debug=args.debug, model_idx=args.model_idx, attack_method=args.attack_method)
net = advNN(inp_shape, out_size, *list(model.values()), attack_method=args.attack_method)
if args.load:
net.load(savedir=savedir, device=args.device)
else:
net.train(train_loader=train_loader, savedir=savedir, device=args.device, hyperparams=attack_hyperparams)
net.evaluate(test_loader=test_loader, device=args.device)
else:
if args.model=="fullBNN":
m = fullBNN_settings["model_"+str(args.model_idx)]
batch_size = 4000 if m["inference"] == "hmc" else 128
# num_workers = 0 if args.device=="cuda" else 4
train_loader, test_loader, inp_shape, out_size = data_loaders(dataset_name=m["dataset"], n_inputs=n_inputs,
batch_size=batch_size, shuffle=True)
savedir = get_model_savedir(model=args.model, dataset=m["dataset"], architecture=m["architecture"],
debug=args.debug, model_idx=args.model_idx)
net = BNN(m["dataset"], *list(m.values())[1:], inp_shape, out_size)
else:
raise NotImplementedError
if args.debug:
bayesian_samples = [1,5]
else:
bayesian_samples = [10, 50, 100]#[5,10,50]
if args.load:
net.load(savedir=savedir, device=args.device)
else:
net.train(train_loader=train_loader, savedir=savedir, device=args.device)
for n_samples in bayesian_samples:
net.evaluate(test_loader=test_loader, device=args.device, n_samples=n_samples)
| 4,377 | 38.441441 | 118 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp_rules_robustness.py | import os
import argparse
import numpy as np
import torch
import torchvision
from torch import nn
import torch.nn.functional as nnf
import torch.optim as torchopt
import torch.nn.functional as F
from utils.data import *
from utils.model_settings import *
from utils.savedir import *
from utils.seeding import *
from networks.baseNN import *
from networks.advNN import *
from networks.fullBNN import *
from utils.lrp import *
from attacks.gradient_based import evaluate_attack
from attacks.run_attacks import *
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from plot.lrp_distributions import significance_symbol
from scipy.stats import mannwhitneyu as stat_test
parser = argparse.ArgumentParser()
parser.add_argument("--n_inputs", default=500, type=int, help="Number of test points")
parser.add_argument("--model_idx", default=0, type=int, help="Choose model idx from pre defined settings")
parser.add_argument("--topk", default=20, type=int)
parser.add_argument("--n_samples", default=100, type=int)
parser.add_argument("--attack_method", default="fgsm", type=str, help="fgsm, pgd")
parser.add_argument("--lrp_method", default="avg_heatmap", type=str, help="avg_prediction, avg_heatmap")
parser.add_argument("--load", type=eval, default="False", help="If True load dataframe else build it.")
parser.add_argument("--debug", default=False, type=eval, help="Run script in debugging mode.")
parser.add_argument("--device", default='cuda', type=str, help="cpu, cuda")
rules_list = ['epsilon','gamma','alpha1beta0']
alternative = 'less'
args = parser.parse_args()
lrp_robustness_method = "imagewise"
n_inputs=100 if args.debug else args.n_inputs
print("PyTorch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)
if args.device=="cuda":
torch.set_default_tensor_type('torch.cuda.FloatTensor')
### Load models and attacks
## baseNN
m = baseNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, num_classes = load_dataset(dataset_name=m["dataset"], shuffle=False, n_inputs=n_inputs)[2:]
det_model_savedir = get_model_savedir(model="baseNN", dataset=m["dataset"], architecture=m["architecture"],
debug=args.debug, model_idx=args.model_idx)
detnet = baseNN(inp_shape, num_classes, *list(m.values()))
detnet.load(savedir=det_model_savedir, device=args.device)
det_attacks = load_attack(method=args.attack_method, model_savedir=det_model_savedir)
det_predictions, det_atk_predictions, det_softmax_robustness, det_successful_idxs, det_failed_idxs = \
evaluate_attack(net=detnet, x_test=x_test, x_attack=det_attacks, y_test=y_test,
device=args.device, return_classification_idxs=True)
## advNN
adv_model_savedir = get_model_savedir(model="advNN", dataset=m["dataset"], architecture=m["architecture"],
debug=args.debug, model_idx=args.model_idx, attack_method='fgsm')
advnet = advNN(inp_shape, num_classes, *list(m.values()), attack_method='fgsm')
advnet.load(savedir=adv_model_savedir, device=args.device)
adv_attacks = load_attack(method=args.attack_method, model_savedir=adv_model_savedir)
adv_predictions, adv_atk_predictions, adv_softmax_robustness, adv_successful_idxs, adv_failed_idxs = \
evaluate_attack(net=advnet, x_test=x_test, x_attack=adv_attacks, y_test=y_test,
device=args.device, return_classification_idxs=True)
## fullBNN
m = fullBNN_settings["model_"+str(args.model_idx)]
bay_model_savedir = get_model_savedir(model="fullBNN", dataset=m["dataset"], architecture=m["architecture"],
model_idx=args.model_idx, debug=args.debug)
bayesnet = BNN(m["dataset"], *list(m.values())[1:], inp_shape, num_classes)
bayesnet.load(savedir=bay_model_savedir, device=args.device)
bay_attacks = load_attack(method=args.attack_method, model_savedir=bay_model_savedir, n_samples=args.n_samples)
bay_predictions, bay_atk_predictions, bay_softmax_robustness, bay_successful_idxs, bay_failed_idxs = \
evaluate_attack(net=bayesnet, n_samples=args.n_samples, x_test=x_test, x_attack=bay_attacks, y_test=y_test,
device=args.device, return_classification_idxs=True)
### Load or fill the dataframe
# failed_atks_im_idxs = np.intersect1d(bay_failed_idxs, np.intersect1d(adv_failed_idxs, det_failed_idxs))
# failed_atks_im_idxs = np.intersect1d(bay_failed_idxs, adv_failed_idxs)
# print("\nN. of common failed atks idxs =", len(failed_atks_im_idxs))
images = x_test.to(args.device)
labels = y_test.argmax(-1).to(args.device)
plot_savedir = os.path.join(bay_model_savedir, str(args.attack_method))
filename="rules_robustness_"+m["dataset"]+"_"+str(bayesnet.inference)+"_"+str(args.attack_method)\
+"_images="+str(n_inputs)+"_samples="+str(args.n_samples)+"_topk="+str(args.topk)\
+"_model_idx="+str(args.model_idx)
if args.load:
df = load_from_pickle(path=plot_savedir, filename=filename)
else:
df = pd.DataFrame()
for rule in rules_list:
for layer_idx in detnet.learnable_layers_idxs:
savedir = get_lrp_savedir(model_savedir=det_model_savedir, attack_method=args.attack_method, rule=rule,
layer_idx=layer_idx)
det_lrp = load_from_pickle(path=savedir, filename="det_lrp")
det_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
savedir = get_lrp_savedir(model_savedir=adv_model_savedir, attack_method=args.attack_method, rule=rule,
layer_idx=layer_idx)
adv_lrp = load_from_pickle(path=savedir, filename="det_lrp")
adv_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
savedir = get_lrp_savedir(model_savedir=bay_model_savedir, attack_method=args.attack_method,
rule=rule, layer_idx=layer_idx, lrp_method=args.lrp_method)
bay_lrp = load_from_pickle(path=savedir, filename="bay_lrp_samp="+str(args.n_samples))
bay_attack_lrp = load_from_pickle(path=savedir, filename="bay_attack_lrp_samp="+str(args.n_samples))
det_robustness, det_pxl_idxs = lrp_robustness(
original_heatmaps=det_lrp,
adversarial_heatmaps=det_attack_lrp,
topk=args.topk, method=lrp_robustness_method)
adv_robustness, adv_pxl_idxs = lrp_robustness(
original_heatmaps=adv_lrp,
adversarial_heatmaps=adv_attack_lrp,
topk=args.topk, method=lrp_robustness_method)
bay_robustness, bay_pxl_idxs = lrp_robustness(
original_heatmaps=bay_lrp,
adversarial_heatmaps=bay_attack_lrp,
topk=args.topk, method=lrp_robustness_method)
_, adv_p = stat_test(x=det_robustness, y=adv_robustness, alternative=alternative)
_, bay_p = stat_test(x=det_robustness, y=bay_robustness, alternative=alternative)
_, p = stat_test(x=adv_robustness, y=bay_robustness, alternative=alternative)
print("\np values =", adv_p, bay_p, p)
for im_idx in range(len(det_robustness)):
# for im_idx in failed_atks_im_idxs:
df = df.append({'rule':rule, 'layer_idx':layer_idx, 'model':'Adv - Det',
'robustness_diff':adv_robustness[im_idx]-det_robustness[im_idx],
'p_value':p, 'adv_p_value':adv_p, 'bay_p_value':bay_p},
ignore_index=True)
df = df.append({'rule':rule, 'layer_idx':layer_idx, 'model':f'Bay - Det',#\nsamp={args.n_samples}',
'robustness_diff':bay_robustness[im_idx]-det_robustness[im_idx],
'p_value':p, 'adv_p_value':adv_p, 'bay_p_value':bay_p},
ignore_index=True)
save_to_pickle(data=df, path=plot_savedir, filename=filename)
### Plots
def plot_rules_robustness_diff(df, n_samples, learnable_layers_idxs, savedir, filename):
os.makedirs(savedir, exist_ok=True)
sns.set_style("darkgrid")
matplotlib.rc('font', **{'size': 9})
adv_col = plt.cm.get_cmap('rocket', 100)(np.linspace(0, 1, 13))[3:]
bay_col = plt.cm.get_cmap('crest', 100)(np.linspace(0, 1, 10))[3:]
palettes = [adv_col, bay_col]
fig, ax = plt.subplots(len(learnable_layers_idxs), 2, figsize=(3, 4.5), sharex=True, sharey='row', dpi=150,
facecolor='w', edgecolor='k')
fig.tight_layout()
fig.subplots_adjust(bottom=0.1)
if len(list(df['rule'].unique()))>1:
for col_idx, model in enumerate(list(df['model'].unique())):
palette = {"epsilon":palettes[col_idx][2], "gamma":palettes[col_idx][4], "alpha1beta0":palettes[col_idx][6]}
for row_idx, layer_idx in enumerate(learnable_layers_idxs):
temp_df = df[df['layer_idx']==layer_idx]
y = min(temp_df['robustness_diff'])*1.2
temp_df = temp_df[temp_df['model']==model]
sns.boxplot(data=temp_df, ax=ax[row_idx, col_idx], x='rule', y='robustness_diff', orient='v', hue='rule',
palette=palette, dodge=False, flierprops={'markersize':3})
for rule, x in zip(temp_df['rule'].unique(), [-0.3, 0.7, 1.7]):
rule_df = temp_df[temp_df['rule']==rule]
p_value = rule_df['p_value'].unique()[0]
assert len(rule_df['p_value'].unique())==1
significance = significance_symbol(p_value)
# if significance!='n.s.':
# y = min(temp_df['robustness_diff'])-0.12
ax[row_idx, col_idx].text(x=x, y=y, s=significance, weight='bold', size=8, color=palette[rule])
for i, patch in enumerate(ax[row_idx, col_idx].artists):
r, g, b, a = patch.get_facecolor()
col = (r, g, b, a)
patch.set_facecolor((r, g, b, .7))
patch.set_edgecolor(col)
for j in range(i*6, i*6+6):
line = ax[row_idx, col_idx].lines[j]
line.set_color(col)
line.set_mfc(col)
line.set_mec(col)
ax[0, col_idx].xaxis.set_label_position("top")
ax[0, col_idx].set_xlabel(model, weight='bold', size=9)
ax[row_idx, col_idx].set_ylabel("")
# ax[1, 0].set_ylabel("LRP robustness diff.", size=8)
ax[row_idx, 1].yaxis.set_label_position("right")
ax[row_idx, 1].set_ylabel("Layer idx="+str(layer_idx), rotation=270, labelpad=10, weight='bold', size=9)
ax[row_idx, col_idx].get_legend().remove()
ax[row_idx, col_idx].set_xlabel("")
ax[2, col_idx].set_xlabel("LRP rule", weight='bold', labelpad=5)
ax[row_idx, col_idx].set_xticklabels([r'$\epsilon$',r'$\gamma$',r'$\alpha\beta$'])
plt.subplots_adjust(hspace=0.07)
plt.subplots_adjust(wspace=0.05)
fig.subplots_adjust(bottom=0.12)
else:
for col_idx, model in enumerate(list(df['model'].unique())):
palette = {"epsilon":palettes[col_idx][2], "gamma":palettes[col_idx][4], "alpha1beta0":palettes[col_idx][6]}
for row_idx, layer_idx in enumerate(learnable_layers_idxs):
temp_df = df[df['layer_idx']==layer_idx]
temp_df = temp_df[temp_df['model']==model]
sns.boxplot(data=temp_df, ax=ax[row_idx, col_idx], x='rule', y='robustness_diff', orient='v', hue='rule',
palette=palette, dodge=False)
for i, patch in enumerate(ax[row_idx, col_idx].artists):
r, g, b, a = patch.get_facecolor()
col = (r, g, b, a)
patch.set_facecolor((r, g, b, .7))
patch.set_edgecolor(col)
for j in range(i*6, i*6+6):
line = ax[row_idx, col_idx].lines[j]
line.set_color(col)
line.set_mfc(col)
line.set_mec(col)
ax[0, col_idx].xaxis.set_label_position("top")
ax[0, col_idx].set_xlabel(model, weight='bold', size=9)
ax[row_idx, col_idx].set_ylabel("")
# ax[1, 0].set_ylabel("LRP robustness diff.", size=8)
ax[row_idx, 1].yaxis.set_label_position("right")
ax[row_idx, 1].set_ylabel("Layer idx="+str(layer_idx), rotation=270, labelpad=10, weight='bold', size=9)
ax[row_idx, col_idx].get_legend().remove()
ax[row_idx, col_idx].set_xlabel("")
ax[2, col_idx].set_xlabel("LRP rule", weight='bold', labelpad=5)
ax[row_idx, col_idx].set_xticklabels([r'$\epsilon$',r'$\gamma$',r'$\alpha\beta$'])
plt.subplots_adjust(hspace=0.05)
plt.subplots_adjust(wspace=0.05)
# fig.subplots_adjust(left=0.3)
fig.subplots_adjust(bottom=0.12)
print("\nSaving: ", os.path.join(savedir, filename+".png"))
fig.savefig(os.path.join(savedir, filename+".png"))
plt.close(fig)
plot_rules_robustness_diff(df=df,
n_samples=args.n_samples,
learnable_layers_idxs=detnet.learnable_layers_idxs,
savedir=os.path.join(TESTS,'figures/rules_robustness'),
filename=filename)
| 12,112 | 39.784512 | 118 | py |
BayesianRelevance | BayesianRelevance-master/src/full_test_cifar_bayesian_resnet.py | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import numpy as np
from tqdm import tqdm
import random
from torch.utils.data import Subset, DataLoader
import bayesian_torch.bayesian_torch.models.bayesian.resnet_variational as resnet
from attacks.run_attacks import run_attack, save_attack, load_attack
from utils.lrp import *
model_names = sorted(
name for name in resnet.__dict__
if name.islower() and not name.startswith("__")
and name.startswith("resnet") and callable(resnet.__dict__[name]))
print(model_names)
len_trainset = 50000
len_testset = 10000
parser = argparse.ArgumentParser(description='CIFAR10')
parser.add_argument('--arch',
'-a',
metavar='ARCH',
default='resnet20',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet20)')
parser.add_argument('-j',
'--workers',
default=8,
type=int,
metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs',
default=200,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--lr',
'--learning-rate',
default=0.001,
type=float,
metavar='LR',
help='initial learning rate')
parser.add_argument('--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument('--weight-decay',
'--wd',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 5e-4)')
parser.add_argument('--print-freq',
'-p',
default=50,
type=int,
metavar='N',
help='print frequency (default: 20)')
parser.add_argument('--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
parser.add_argument('--half',
dest='half',
action='store_true',
help='use half-precision(16-bit) ')
parser.add_argument('--save-dir',
dest='save_dir',
help='The directory used to save the trained models',
default='../experiments/fullBNN/cifar_resnet/',
type=str)
parser.add_argument(
'--save-every',
dest='save_every',
help='Saves checkpoints at every specified number of epochs',
type=int,
default=10)
parser.add_argument('--num_mc',
type=int,
default=5,
metavar='N',
help='number of Monte Carlo runs during training')
parser.add_argument(
'--tensorboard',
type=bool,
default=False,
metavar='N',
help='use tensorboard for logging and visualization of training progress')
parser.add_argument(
'--log_dir',
type=str,
default='./bayesian_torch/logs/cifar/bayesian',
metavar='N',
help='use tensorboard for logging and visualization of training progress')
parser.add_argument('--mode', type=str, default='test', help='train | test')
parser.add_argument(
'--n_samples',
type=int,
default=100,
metavar='N',
help='number of Monte Carlo samples to be drawn during inference')
parser.add_argument(
'--attack_method',
type=str,
default='fgsm',
help='fgsm, pgd')
parser.add_argument(
'--test_inputs',
type=int,
default=500)
best_prec1 = 0
attack_hyperparams={'epsilon':0.2}
def MOPED_layer(layer, det_layer, delta):
"""
Set the priors and initialize surrogate posteriors of Bayesian NN with Empirical Bayes
MOPED (Model Priors with Empirical Bayes using Deterministic DNN)
Reference:
[1] Ranganath Krishnan, Mahesh Subedar, Omesh Tickoo.
Specifying Weight Priors in Bayesian Deep Neural Networks with Empirical Bayes. AAAI 2020.
"""
if (str(layer) == 'Conv2dReparameterization()'):
#set the priors
print(str(layer))
layer.prior_weight_mu = det_layer.weight.data
if layer.prior_bias_mu is not None:
layer.prior_bias_mu = det_layer.bias.data
#initialize surrogate posteriors
layer.mu_kernel.data = det_layer.weight.data
layer.rho_kernel.data = get_rho(det_layer.weight.data, delta)
if layer.mu_bias is not None:
layer.mu_bias.data = det_layer.bias.data
layer.rho_bias.data = get_rho(det_layer.bias.data, delta)
elif (isinstance(layer, nn.Conv2d)):
print(str(layer))
layer.weight.data = det_layer.weight.data
if layer.bias is not None:
layer.bias.data = det_layer.bias.data
elif (str(layer) == 'LinearReparameterization()'):
print(str(layer))
layer.prior_weight_mu = det_layer.weight.data
if layer.prior_bias_mu is not None:
layer.prior_bias_mu = det_layer.bias.data
#initialize the surrogate posteriors
layer.mu_weight.data = det_layer.weight.data
layer.rho_weight.data = get_rho(det_layer.weight.data, delta)
if layer.mu_bias is not None:
layer.mu_bias.data = det_layer.bias.data
layer.rho_bias.data = get_rho(det_layer.bias.data, delta)
elif str(layer).startswith('Batch'):
#initialize parameters
print(str(layer))
layer.weight.data = det_layer.weight.data
if layer.bias is not None:
layer.bias.data = det_layer.bias.data
layer.running_mean.data = det_layer.running_mean.data
layer.running_var.data = det_layer.running_var.data
layer.num_batches_tracked.data = det_layer.num_batches_tracked.data
def main():
global args, best_prec1
args = parser.parse_args()
# Check the save_dir exists or not
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
model = torch.nn.DataParallel(resnet.__dict__[args.arch]())
if torch.cuda.is_available():
model.cuda()
else:
model.cpu()
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.evaluate, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
tb_writer = None
# if args.tensorboard:
# logger_dir = os.path.join(args.log_dir, 'tb_logger')
# if not os.path.exists(logger_dir):
# os.makedirs(logger_dir)
# tb_writer = SummaryWriter(logger_dir)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
batch_size = 128 if args.mode=='train' else 100
train_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
root='../experiments/bayesian_torch/data',
train=True,
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]),
download=True),
batch_size=batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
root='../experiments/bayesian_torch/data',
train=False,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if torch.cuda.is_available():
criterion = nn.CrossEntropyLoss().cuda()
else:
criterion = nn.CrossEntropyLoss().cpu()
if args.half:
model.half()
criterion.half()
if args.arch in ['resnet110']:
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr * 0.1
if args.evaluate:
validate(val_loader, model, criterion)
return
if args.mode == 'train':
for epoch in range(args.start_epoch, args.epochs):
lr = args.lr
if (epoch >= 80 and epoch < 120):
lr = 0.1 * args.lr
elif (epoch >= 120 and epoch < 160):
lr = 0.01 * args.lr
elif (epoch >= 160 and epoch < 180):
lr = 0.001 * args.lr
elif (epoch >= 180):
lr = 0.0005 * args.lr
optimizer = torch.optim.Adam(model.parameters(), lr)
# train for one epoch
print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
train(args, train_loader, model, criterion, optimizer, epoch,
tb_writer)
prec1 = validate(args, val_loader, model, criterion, epoch,
tb_writer)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if is_best:
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
},
is_best,
filename=os.path.join(
args.save_dir,
'bayesian_{}_cifar.pth'.format(args.arch)))
elif args.mode == 'test':
checkpoint_file = args.save_dir + '/bayesian_{}_cifar.pth'.format(
args.arch)
if torch.cuda.is_available():
checkpoint = torch.load(checkpoint_file)
device="cuda"
else:
checkpoint = torch.load(checkpoint_file, map_location=torch.device('cpu'))
device="cpu"
# print(model.state_dict().keys())
# print(checkpoint['state_dict'].keys())
# exit()
state_dict = checkpoint['state_dict']
model.load_state_dict(state_dict)
evaluate(args, model, val_loader, n_samples=args.n_samples)
# model = convert_resnet(model).to(device)
# Adversarial attacks
method=args.attack_method
test_inputs = args.test_inputs
n_samples = args.n_samples
print(f"\nn_samples = {n_samples}")
dataset = Subset(val_loader.dataset, range(test_inputs))
images, labels = ([],[])
for image, label in dataset:
images.append(image)
labels.append(label)
images = torch.stack(images)
bay_attack = attack(model, dataset, n_samples=n_samples, method=method, hyperparams=attack_hyperparams)
save_attack(inputs=images, attacks=bay_attack, method=method, model_savedir=args.save_dir, n_samples=n_samples)
# bay_attack = load_attack(method=method, model_savedir=args.save_dir, n_samples=n_samples)
# print(images.min(), images.max(), bay_attack.min(), bay_attack.max())
# evaluate(args, model, DataLoader(dataset=list(zip(images, labels))), n_samples=n_samples)
# evaluate(args, model, DataLoader(dataset=list(zip(bay_attack, labels))), n_samples=n_samples)
# LRP
for rule in ['epsilon','gamma','alpha1beta0']:
learnable_layers_idxs=[38] #[0,2,14,26,38]
for layer_idx in learnable_layers_idxs:
print(f"\nlayer_idx = {layer_idx}")
savedir = get_lrp_savedir(model_savedir=args.save_dir, attack_method=method,
layer_idx=layer_idx, rule=rule)
bay_lrp = compute_lrp(images, model, rule=rule, n_samples=n_samples, device=device)
bay_attack_lrp = compute_lrp(bay_attack, model,
device=device, rule=rule, n_samples=n_samples)
save_to_pickle(bay_lrp, path=savedir, filename="bay_lrp_samp="+str(n_samples))
save_to_pickle(bay_attack_lrp, path=savedir, filename="bay_attack_lrp_samp="+str(n_samples))
# bay_lrp = load_from_pickle(path=savedir, filename="bay_lrp_samp="+str(n_samples))
# bay_attack_lrp = load_from_pickle(path=savedir, filename="bay_attack_lrp_samp="+str(n_samples))
set_seed(0)
idxs = np.random.choice(len(images), 10, replace=False)
original_images_plot = torch.stack([images[i].squeeze() for i in idxs])
adversarial_images_plot = torch.stack([bay_attack[i].squeeze() for i in idxs])
bay_lrp_heatmaps_plot = torch.stack([bay_lrp[i].squeeze() for i in idxs])
bay_attack_lrp_heatmaps_plot = torch.stack([bay_attack_lrp[i].squeeze() for i in idxs])
plot_lrp_grid(original_images=original_images_plot.detach().cpu(),
adversarial_images=adversarial_images_plot.detach().cpu(),
bay_lrp_heatmaps=bay_lrp_heatmaps_plot.detach().cpu(),
bay_attack_lrp_heatmaps=bay_attack_lrp_heatmaps_plot.detach().cpu(),
filename="lrp_samp="+str(n_samples), savedir=savedir)
def convert_resnet(module, modules=None):
import torch
from lrp.sequential import Sequential
from lrp.linear import Linear
from lrp.conv import Conv2d
conversion_table = {
'Linear': Linear,
'Conv2d': Conv2d
}
# First time
if modules is None:
modules = []
for m in module.children():
convert_resnet(m, modules=modules)
# Vgg model has a flatten, which is not represented as a module
# so this loop doesn't pick it up.
# This is a hack to make things work.
if isinstance(m, torch.nn.AdaptiveAvgPool2d):
modules.append(torch.nn.Flatten())
sequential = Sequential(*modules)
return sequential
# Recursion
if isinstance(module, torch.nn.Sequential):
for m in module.children():
convert_vgg(m, modules=modules)
elif isinstance(module, torch.nn.Linear) or isinstance(module, torch.nn.Conv2d):
class_name = module.__class__.__name__
lrp_module = conversion_table[class_name].from_torch(module)
modules.append(lrp_module)
# maxpool is handled with gradient for the moment
elif isinstance(module, torch.nn.ReLU):
# avoid inplace operations. They might ruin PatternNet pattern
# computations
modules.append(torch.nn.ReLU())
else:
modules.append(module)
def train(args,
train_loader,
model,
criterion,
optimizer,
epoch,
tb_writer=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if torch.cuda.is_available():
target = target.cuda()
input_var = input.cuda()
target_var = target
else:
target = target.cpu()
input_var = input.cpu()
target_var = target
if args.half:
input_var = input_var.half()
# compute output
output_ = []
kl_ = []
for mc_run in range(args.num_mc):
output, kl = model(input_var)
output_.append(output)
kl_.append(kl)
output = torch.mean(torch.stack(output_), dim=0)
kl = torch.mean(torch.stack(kl_), dim=0)
cross_entropy_loss = criterion(output, target_var)
scaled_kl = kl / len_trainset
#ELBO loss
loss = cross_entropy_loss + scaled_kl
'''
#another way of computing gradients with multiple MC samples
cross_entropy_loss = 0
scaled_kl = 0
for mc_run in range(args.num_mc):
output, kl = model(input_var)
cross_entropy_loss += criterion(output, target_var)
scaled_kl += (kl/len_trainset)
cross_entropy_loss = cross_entropy_loss/args.num_mc
scaled_kl = scaled_kl/args.num_mc
loss = cross_entropy_loss + scaled_kl
#end
'''
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch,
i,
len(train_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1))
if tb_writer is not None:
tb_writer.add_scalar('train/cross_entropy_loss',
cross_entropy_loss.item(), epoch)
tb_writer.add_scalar('train/kl_div', scaled_kl.item(), epoch)
tb_writer.add_scalar('train/elbo_loss', loss.item(), epoch)
tb_writer.add_scalar('train/accuracy', prec1.item(), epoch)
tb_writer.flush()
def plot_lrp_grid(original_images, adversarial_images, bay_lrp_heatmaps, bay_attack_lrp_heatmaps, filename, savedir):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(4, len(original_images), figsize = (12,4))
for i in range(0, len(original_images)):
original_image = original_images[i].permute(1,2,0) if len(original_images[i].shape) > 2 else original_images[i]
adversarial_image = adversarial_images[i].permute(1,2,0) if len(adversarial_images[i].shape) > 2 else adversarial_images[i]
bay_lrp_heatmap = bay_lrp_heatmaps[i].permute(1,2,0) if len(bay_lrp_heatmaps[i].shape) > 2 else bay_lrp_heatmaps[i]
bay_attack_lrp_heatmap = bay_attack_lrp_heatmaps[i].permute(1,2,0) if len(bay_attack_lrp_heatmaps[i].shape) > 2 else bay_attack_lrp_heatmaps[i]
axes[0, i].imshow(torch.clamp(original_image, 0., 1.))
axes[1, i].imshow(torch.clamp(bay_lrp_heatmap, 0., 1.))
axes[2, i].imshow(torch.clamp(adversarial_image, 0., 1.))
axes[3, i].imshow(torch.clamp(bay_attack_lrp_heatmap, 0., 1.))
os.makedirs(os.path.dirname(savedir+"/"), exist_ok=True)
plt.savefig(os.path.join(savedir, filename+".png"))
def validate(args, val_loader, model, criterion, epoch, tb_writer=None):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
if torch.cuda.is_available():
target = target.cuda()
input_var = input.cuda()
target_var = target.cuda()
else:
target = target.cpu()
input_var = input.cpu()
target_var = target.cpu()
if args.half:
input_var = input_var.half()
# compute output
output_ = []
kl_ = []
for mc_run in range(args.num_mc):
output, kl = model(input_var)
output_.append(output)
kl_.append(kl)
output = torch.mean(torch.stack(output_), dim=0)
kl = torch.mean(torch.stack(kl_), dim=0)
cross_entropy_loss = criterion(output, target_var)
scaled_kl = kl / len_trainset
#ELBO loss
loss = cross_entropy_loss + scaled_kl
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i,
len(val_loader),
batch_time=batch_time,
loss=losses,
top1=top1))
if tb_writer is not None:
tb_writer.add_scalar('val/cross_entropy_loss',
cross_entropy_loss.item(), epoch)
tb_writer.add_scalar('val/kl_div', scaled_kl.item(), epoch)
tb_writer.add_scalar('val/elbo_loss', loss.item(), epoch)
tb_writer.add_scalar('val/accuracy', prec1.item(), epoch)
tb_writer.flush()
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def evaluate(args, model, val_loader, n_samples):
pred_probs_mc = []
test_loss = 0
correct = 0
output_list = []
labels_list = []
model.eval()
with torch.no_grad():
begin = time.time()
for data, target in val_loader:
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
else:
data, target = data.cpu(), target.cpu()
output_mc = []
for mc_run in range(n_samples):
random.seed(mc_run)
output, _ = model.forward(data)
output_mc.append(output)
output_ = torch.stack(output_mc)
output_list.append(output_)
labels_list.append(target)
end = time.time()
print("inference throughput: ", len(val_loader.dataset) / (end - begin),
" images/s")
output = torch.stack(output_list)
output = output.permute(1, 0, 2, 3)
output = output.contiguous().view(n_samples, len(val_loader.dataset), -1)
output = torch.nn.functional.softmax(output, dim=2)
labels = torch.cat(labels_list)
pred_mean = output.mean(dim=0)
Y_pred = torch.argmax(pred_mean, axis=1)
print('Test accuracy:',
(Y_pred.data.cpu().numpy() == labels.data.cpu().numpy()).mean() *
100)
np.save('../experiments/bayesian_torch/probs_cifar_mc.npy', output.data.cpu().numpy())
np.save('../experiments/bayesian_torch/cifar_test_labels_mc.npy', labels.data.cpu().numpy())
def attack(model, dataset, n_samples, method, hyperparams):
model.eval()
adversarial_attacks = []
for data, target in tqdm(dataset):
data = data.unsqueeze(0)
target = torch.tensor(target).unsqueeze(0)
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
device = 'cuda'
else:
data, target = data.cpu(), target.cpu()
device = 'cpu'
samples_attacks=[]
for idx in list(range(n_samples)):
random.seed(idx)
perturbed_image = run_attack(net=model, image=data, label=target, method=method,
device=device, hyperparams=hyperparams).squeeze()
# print(perturbed_image[0,0,:5])
perturbed_image = torch.clamp(perturbed_image, 0., 1.)
samples_attacks.append(perturbed_image.unsqueeze(0))
# exit()
adversarial_attack = torch.stack(samples_attacks).mean(0)
adversarial_attacks.append(adversarial_attack)
adversarial_attacks = torch.cat(adversarial_attacks)
return adversarial_attacks
def compute_lrp(x_test, network, rule, device, n_samples, avg_posterior=False):
x_test = x_test.to(device)
explanations = []
for x in tqdm(x_test):
post_explanations = []
for idx in range(n_samples):
# Forward pass
x_copy = copy.deepcopy(x.detach()).unsqueeze(0)
x_copy.requires_grad = True
random.seed(idx)
y_hat = network.forward(x_copy, explain=True, rule=rule)[0]
# Choose argmax
y_hat = y_hat[torch.arange(x_copy.shape[0]), y_hat.max(1)[1]]
y_hat = y_hat.sum()
# Backward pass (compute explanation)
y_hat.backward()
lrp = x_copy.grad.squeeze(1)
post_explanations.append(lrp)
post_explanations = torch.stack(post_explanations).mean(0).squeeze()
explanations.append(post_explanations)
return torch.stack(explanations)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""
Save the training model
"""
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1, )):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 23,122 | 28.012547 | 145 | py |
BayesianRelevance | BayesianRelevance-master/src/full_test_cifar_adversarial_resnet.py | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import numpy as np
from tqdm import tqdm
import random
from torch.utils.data import Subset, DataLoader
import bayesian_torch.bayesian_torch.models.deterministic.resnet as resnet
from attacks.run_attacks import run_attack, save_attack, load_attack
from utils.lrp import *
from full_test_cifar_bayesian_resnet import plot_lrp_grid
model_names = sorted(
name for name in resnet.__dict__
if name.islower() and not name.startswith("__")
and name.startswith("resnet") and callable(resnet.__dict__[name]))
print(model_names)
parser = argparse.ArgumentParser(description='CIFAR10')
parser.add_argument('--arch',
'-a',
metavar='ARCH',
default='resnet20',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet20)')
parser.add_argument('-j',
'--workers',
default=8,
type=int,
metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs',
default=150,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--lr',
'--learning-rate',
default=0.1,
type=float,
metavar='LR',
help='initial learning rate')
parser.add_argument('--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument('--weight-decay',
'--wd',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 5e-4)')
parser.add_argument('--print-freq',
'-p',
default=50,
type=int,
metavar='N',
help='print frequency (default: 20)')
parser.add_argument('--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
parser.add_argument('--half',
dest='half',
action='store_true',
help='use half-precision(16-bit) ')
parser.add_argument('--save-dir',
dest='save_dir',
help='The directory used to save the trained models',
default='../experiments/advNN/cifar_resnet_atk=fgsm/',
type=str)
parser.add_argument(
'--save-every',
dest='save_every',
help='Saves checkpoints at every specified number of epochs',
type=int,
default=10)
parser.add_argument(
'--tensorboard',
type=bool,
default=False,
metavar='N',
help='use tensorboard for logging and visualization of training progress')
parser.add_argument(
'--log_dir',
type=str,
default='./bayesian_torch/logs/cifar/adversarial',
metavar='N',
help='use tensorboard for logging and visualization of training progress')
parser.add_argument('--mode', type=str, default='test', help='train | test')
parser.add_argument(
'--attack_method',
type=str,
default='fgsm',
help='fgsm, pgd')
parser.add_argument(
'--test_inputs',
type=int,
default=500)
best_prec1 = 0
attack_hyperparams={'epsilon':0.2}
def main():
global args, best_prec1
args = parser.parse_args()
# Check the save_dir exists or not
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
model = torch.nn.DataParallel(resnet.__dict__[args.arch]())
if torch.cuda.is_available():
model.cuda()
else:
model.cpu()
print(model)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.evaluate, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
tb_writer = None
# if args.tensorboard:
# logger_dir = os.path.join(args.log_dir, 'tb_logger')
# if not os.path.exists(logger_dir):
# os.makedirs(logger_dir)
# tb_writer = SummaryWriter(logger_dir)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
batch_size=512 if args.mode=='train' else 100
train_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
root='../experiments/bayesian_torch/data',
train=True,
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]),
download=True),
batch_size=batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
root='../experiments/bayesian_torch/data',
train=False,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if torch.cuda.is_available():
criterion = nn.CrossEntropyLoss().cuda()
else:
criterion = nn.CrossEntropyLoss().cpu()
if args.half:
model.half()
criterion.half()
optimizer = torch.optim.SGD(model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=[100, 150], last_epoch=args.start_epoch - 1)
if args.arch in ['resnet110']:
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr * 0.1
if args.evaluate:
validate(val_loader, model, criterion)
return
if args.mode == 'train':
for epoch in range(args.start_epoch, args.epochs):
# train for one epoch
print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
train(args, train_loader, args.attack_method, model, criterion, optimizer, epoch,
tb_writer)
lr_scheduler.step()
prec1 = validate(args, val_loader, model, criterion, epoch,
tb_writer)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if epoch > 0 and epoch % args.save_every == 0:
if is_best:
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
},
is_best,
filename=os.path.join(
args.save_dir, '{}_cifar.pth'.format(args.arch)))
elif args.mode == 'test':
checkpoint_file = args.save_dir + '/{}_cifar.pth'.format(args.arch)
if torch.cuda.is_available():
checkpoint = torch.load(checkpoint_file)
device="cuda"
else:
checkpoint = torch.load(checkpoint_file,
map_location=torch.device('cpu'))
device="cpu"
model.load_state_dict(checkpoint['state_dict'])
# evaluate(args, model, val_loader)
# model = convert_resnet(model).to(device)
# Adversarial attacks
method=args.attack_method
test_inputs = args.test_inputs
dataset = Subset(val_loader.dataset, range(test_inputs))
images, labels = ([],[])
for image, label in dataset:
images.append(image)
labels.append(label)
images = torch.stack(images)
attacks = attack(model, dataset, method=method, hyperparams=attack_hyperparams)
save_attack(inputs=images, attacks=attacks, method=method, model_savedir=args.save_dir)
# attacks = load_attack(method=method, model_savedir=args.save_dir)
evaluate(args, model, DataLoader(dataset=list(zip(attacks, labels))))
# LRP
for rule in ['epsilon','gamma','alpha1beta0']:
learnable_layers_idxs=[38]
for layer_idx in learnable_layers_idxs:
print(f"\nlayer_idx = {layer_idx}")
savedir = get_lrp_savedir(model_savedir=args.save_dir, attack_method=method,
layer_idx=layer_idx, rule=rule)
det_lrp = compute_lrp(images, model, rule=rule, device=device)
det_attack_lrp = compute_lrp(attacks, model, device=device, rule=rule)
save_to_pickle(det_lrp, path=savedir, filename="det_lrp")
save_to_pickle(det_attack_lrp, path=savedir, filename="det_attack_lrp")
set_seed(0)
idxs = np.random.choice(len(images), 10, replace=False)
original_images_plot = torch.stack([images[i].squeeze() for i in idxs])
adversarial_images_plot = torch.stack([attacks[i].squeeze() for i in idxs])
lrp_heatmaps_plot = torch.stack([det_lrp[i].squeeze() for i in idxs])
attack_lrp_heatmaps_plot = torch.stack([det_attack_lrp[i].squeeze() for i in idxs])
plot_lrp_grid(original_images=original_images_plot.detach().cpu(),
adversarial_images=adversarial_images_plot.detach().cpu(),
bay_lrp_heatmaps=lrp_heatmaps_plot.detach().cpu(),
bay_attack_lrp_heatmaps=attack_lrp_heatmaps_plot.detach().cpu(),
filename="lrp", savedir=savedir)
def convert_resnet(module, modules=None):
import torch
from lrp.sequential import Sequential
from lrp.linear import Linear
from lrp.conv import Conv2d
conversion_table = {
'Linear': Linear,
'Conv2d': Conv2d
}
# First time
if modules is None:
modules = []
for m in module.children():
convert_resnet(m, modules=modules)
# Vgg model has a flatten, which is not represented as a module
# so this loop doesn't pick it up.
# This is a hack to make things work.
if isinstance(m, torch.nn.AdaptiveAvgPool2d):
modules.append(torch.nn.Flatten())
sequential = Sequential(*modules)
return sequential
# Recursion
if isinstance(module, torch.nn.Sequential):
for m in module.children():
convert_vgg(m, modules=modules)
elif isinstance(module, torch.nn.Linear) or isinstance(module, torch.nn.Conv2d):
class_name = module.__class__.__name__
lrp_module = conversion_table[class_name].from_torch(module)
modules.append(lrp_module)
# maxpool is handled with gradient for the moment
elif isinstance(module, torch.nn.ReLU):
# avoid inplace operations. They might ruin PatternNet pattern
# computations
modules.append(torch.nn.ReLU())
else:
modules.append(module)
def train(args,
train_loader,
attack_method,
model,
criterion,
optimizer,
epoch,
tb_writer=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if torch.cuda.is_available():
target = target.cuda()
input_var = input.cuda()
device="cuda"
else:
target = target.cpu()
input_var = input.cpu()
device="cpu"
target_var = target
if args.half:
input_var = input_var.half()
# compute attacks
model.eval()
adv_attacks = attack(model=model, dataset=list(zip(input_var, target_var)),
method=attack_method, hyperparams=attack_hyperparams)
# switch to train mode
model.train()
# compute output
output = model(adv_attacks)
loss = criterion(output, target_var)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch,
i,
len(train_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1))
if tb_writer is not None:
tb_writer.add_scalar('train/loss', loss.item(), epoch)
tb_writer.add_scalar('train/accuracy', prec1.item(), epoch)
tb_writer.flush()
def validate(args, val_loader, model, criterion, epoch, tb_writer=None):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
if torch.cuda.is_available():
target = target.cuda()
input_var = input.cuda()
target_var = target.cuda()
else:
target = target.cpu()
input_var = input.cpu()
target_var = target.cpu()
if args.half:
input_var = input_var.half()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i,
len(val_loader),
batch_time=batch_time,
loss=losses,
top1=top1))
if tb_writer is not None:
tb_writer.add_scalar('val/loss', loss.item(), epoch)
tb_writer.add_scalar('val/accuracy', prec1.item(), epoch)
tb_writer.flush()
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def evaluate(args, model, val_loader):
model.eval()
correct = 0
output_list = []
labels_list = []
with torch.no_grad():
begin = time.time()
for data, target in val_loader:
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
else:
data, target = data.cpu(), target.cpu()
output = model(data)
output = torch.nn.functional.softmax(output, dim=1)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
output_list.append(output)
labels_list.append(target)
end = time.time()
print("inference throughput: ", 10000 / (end - begin), " images/s")
output = torch.cat(output_list)
target = torch.cat(labels_list)
print('\nTest Accuracy: {:.2f}%\n'.format(100. * correct /
len(val_loader.dataset)))
target_labels = target.cpu().data.numpy()
np.save('../experiments/bayesian_torch/probs_cifar_det.npy', output.data.cpu().numpy())
np.save('../experiments/bayesian_torch/cifar_test_labels.npy', target.data.cpu().numpy())
def attack(model, dataset, method, hyperparams):
model.eval()
adversarial_attacks = []
for data, target in tqdm(dataset):
data = data.unsqueeze(0)
target = torch.tensor(target).unsqueeze(0)
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
device = 'cuda'
else:
data, target = data.cpu(), target.cpu()
device = 'cpu'
perturbed_image = run_attack(net=model, image=data, label=target, method=method,
device=device, hyperparams=hyperparams).squeeze()
perturbed_image = torch.clamp(perturbed_image, 0., 1.)
adversarial_attacks.append(perturbed_image)
return torch.stack(adversarial_attacks)
def compute_lrp(x_test, network, rule, device):
x_test = x_test.to(device)
explanations = []
for x in tqdm(x_test):
# Forward pass
x_copy = copy.deepcopy(x.detach()).unsqueeze(0)
x_copy.requires_grad = True
y_hat = network.forward(x_copy, explain=True, rule=rule)
# Choose argmax
y_hat = y_hat[torch.arange(x_copy.shape[0]), y_hat.max(1)[1]]
y_hat = y_hat.sum()
# Backward pass (compute explanation)
y_hat.backward()
lrp = x_copy.grad.squeeze(1)
explanations.append(lrp)
explanations = torch.stack(explanations)
return explanations
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""
Save the training model
"""
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1, )):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 17,052 | 26.328526 | 90 | py |
BayesianRelevance | BayesianRelevance-master/src/deterministic_atk_vs_bayesian_net.py | import os
import torch
import argparse
import numpy as np
from utils.data import *
from utils import savedir
from utils.seeding import *
from attacks.gradient_based import *
from networks.baseNN import *
from networks.fullBNN import *
from networks.redBNN import *
parser = argparse.ArgumentParser()
parser.add_argument("--model", default="fullBNN", type=str, help="baseNN, fullBNN, redBNN")
parser.add_argument("--model_idx", default=0, type=int, help="choose model idx from pre defined settings")
parser.add_argument("--inference", default="svi", type=str, help="svi, hmc")
parser.add_argument("--load", default=True, type=eval)
parser.add_argument("--attack_method", default="fgsm", type=str, help="fgsm, pgd")
parser.add_argument("--atk_inputs", default=1000, type=int, help="number of input points")
parser.add_argument("--layer_idx", default=-1, type=int)
parser.add_argument("--debug", default=False, type=eval)
parser.add_argument("--device", default='cpu', type=str, help="cpu, cuda")
args = parser.parse_args()
n_inputs=100 if args.debug else None
atk_inputs=100 if args.debug else args.atk_inputs
print("PyTorch Version: ", torch.__version__)
if args.device=="cuda":
torch.set_default_tensor_type('torch.cuda.FloatTensor')
model = baseNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, out_size = load_dataset(dataset_name=model["dataset"], n_inputs=atk_inputs)[2:]
savedir = get_savedir(model="baseNN", dataset=model["dataset"], architecture=model["architecture"],
baseiters=None, debug=args.debug, model_idx=args.model_idx)
net = baseNN(inp_shape, out_size, *list(model.values()))
if args.load:
net.load(savedir=savedir, device=args.device)
x_attack = load_attack(method=args.attack_method, filename=net.name, savedir=savedir)
else:
x_train, y_train, _, _, inp_shape, out_size = load_dataset(dataset_name=model["dataset"], n_inputs=n_inputs)
train_loader = DataLoader(dataset=list(zip(x_train, y_train)), batch_size=128, shuffle=True)
net.train(train_loader=train_loader, savedir=savedir, device=args.device)
x_attack = attack(net=net, x_test=x_test, y_test=y_test, savedir=savedir,
device=args.device, method=args.attack_method, filename=net.name)
attack_evaluation(net=net, x_test=x_test, x_attack=x_attack, y_test=y_test, device=args.device)
if args.model=="fullBNN":
m = fullBNN_settings["model_"+str(args.model_idx)]
x_train, y_train, _, _, inp_shape, out_size = load_dataset(dataset_name=m["dataset"], n_inputs=n_inputs)
x_test, y_test = load_dataset(dataset_name=m["dataset"], n_inputs=atk_inputs)[2:4]
savedir = get_savedir(model=args.model, dataset=m["dataset"], architecture=m["architecture"],
debug=args.debug, model_idx=args.model_idx)
net = BNN(m["dataset"], *list(m.values())[1:], inp_shape, out_size)
elif args.model=="redBNN":
m = redBNN_settings["model_"+str(args.model_idx)]
base_m = baseNN_settings["model_"+str(m["baseNN_idx"])]
x_train, y_train, _, _, inp_shape, out_size = load_dataset(dataset_name=m["dataset"], n_inputs=n_inputs)
x_test, y_test = load_dataset(dataset_name=m["dataset"], n_inputs=atk_inputs)[2:4]
savedir = get_savedir(model=args.model, dataset=m["dataset"], architecture=m["architecture"],
debug=args.debug, model_idx=args.model_idx)
basenet = baseNN(inp_shape, out_size, *list(base_m.values()))
basenet_savedir = get_savedir(model="baseNN", dataset=m["dataset"],
architecture=m["architecture"], debug=args.debug, model_idx=m["baseNN_idx"])
basenet.load(savedir=basenet_savedir, device=args.device)
hyp = get_hyperparams(m)
net = redBNN(dataset_name=m["dataset"], inference=m["inference"], base_net=basenet, hyperparams=hyp,
layer_idx=args.layer_idx)
else:
raise NotImplementedError
if args.debug:
bayesian_defence_samples=[1]
else:
if m["inference"]=="svi":
bayesian_defence_samples=[1,10,50]
elif m["inference"]=="hmc":
bayesian_defence_samples=[1,5,10]
if args.load:
net.load(savedir=savedir, device=args.device)
else:
batch_size = int(len(x_train)/max(bayesian_attack_samples)) if m["inference"] == "hmc" else 128
num_workers = 0 if args.device=="cuda" else 4
train_loader = DataLoader(dataset=list(zip(x_train, y_train)), batch_size=batch_size,
num_workers=num_workers, shuffle=True)
net.train(train_loader=train_loader, savedir=savedir, device=args.device)
for n_samples in bayesian_defence_samples:
attack_evaluation(net=net, x_test=x_test, x_attack=x_attack, y_test=y_test,
device=args.device, n_samples=n_samples)
| 4,760 | 41.132743 | 112 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp_rules_robustness_cifar.py | import os
import argparse
import numpy as np
import torch
import torchvision
from torch import nn
import torch.nn.functional as nnf
import torch.optim as torchopt
import torch.nn.functional as F
from utils.data import *
from utils.networks import *
from utils.savedir import *
from utils.seeding import *
from networks.baseNN import *
from networks.fullBNN import *
from utils.lrp import *
from attacks.gradient_based import evaluate_attack
from attacks.run_attacks import *
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from plot.lrp_distributions import significance_symbol
from scipy.stats import mannwhitneyu as stat_test
parser = argparse.ArgumentParser()
parser.add_argument("--n_inputs", default=500, type=int, help="Number of test points")
parser.add_argument("--topk", default=20, type=int, help="Choose model idx from pre defined settings")
parser.add_argument("--n_samples", default=100, type=int)
parser.add_argument("--attack_method", default="fgsm", type=str, help="fgsm, pgd")
parser.add_argument("--lrp_method", default="avg_heatmap", type=str, help="avg_prediction, avg_heatmap")
parser.add_argument("--load", type=eval, default="False", help="If True load dataframe else build it.")
parser.add_argument("--debug", default=False, type=eval, help="Run script in debugging mode.")
parser.add_argument("--device", default='cuda', type=str, help="cpu, cuda")
args = parser.parse_args()
lrp_robustness_method = "imagewise"
n_inputs=100 if args.debug else args.n_inputs
alternative = 'less'
print("PyTorch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)
if args.device=="cuda":
torch.set_default_tensor_type('torch.cuda.FloatTensor')
learnable_layers_idxs = [38]
det_savedir = '../experiments/baseNN/cifar_resnet/'
det_attacks = load_attack(method=args.attack_method, model_savedir=det_savedir)
adv_savedir = '../experiments/advNN/cifar_resnet_atk=fgsm/'
adv_attacks = load_attack(method=args.attack_method, model_savedir=adv_savedir)
bay_savedir = '../experiments/fullBNN/cifar_resnet/'
bay_attacks = load_attack(method=args.attack_method, model_savedir=bay_savedir, n_samples=args.n_samples)
plot_savedir = os.path.join(bay_savedir, str(args.attack_method))
filename="rules_robustness_cifar_svi_"+str(args.attack_method)+"_images="+str(n_inputs)\
+"_samples="+str(args.n_samples)+"_topk="+str(args.topk)
if args.load:
df = load_from_pickle(path=plot_savedir, filename=filename)
else:
df = pd.DataFrame()
rules_list = ['epsilon','gamma','alpha1beta0']
for rule in rules_list:
for layer_idx in learnable_layers_idxs:
savedir = get_lrp_savedir(model_savedir=det_savedir, attack_method=args.attack_method,
layer_idx=layer_idx, rule=rule)
det_lrp = load_from_pickle(path=savedir, filename="det_lrp")
det_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
savedir = get_lrp_savedir(model_savedir=adv_savedir, attack_method=args.attack_method,
layer_idx=layer_idx, rule=rule)
adv_lrp = load_from_pickle(path=savedir, filename="det_lrp")
adv_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
savedir = get_lrp_savedir(model_savedir=bay_savedir, attack_method=args.attack_method,
layer_idx=layer_idx, rule=rule)
bay_lrp = load_from_pickle(path=savedir, filename="bay_lrp_samp="+str(args.n_samples))
bay_attack_lrp = load_from_pickle(path=savedir, filename="bay_attack_lrp_samp="+str(args.n_samples))
det_robustness, det_pxl_idxs = lrp_robustness(original_heatmaps=det_lrp, adversarial_heatmaps=det_attack_lrp,
topk=args.topk, method=lrp_robustness_method)
adv_robustness, adv_pxl_idxs = lrp_robustness(original_heatmaps=adv_lrp, adversarial_heatmaps=adv_attack_lrp,
topk=args.topk, method=lrp_robustness_method)
bay_robustness, bay_pxl_idxs = lrp_robustness(original_heatmaps=bay_lrp, adversarial_heatmaps=bay_attack_lrp,
topk=args.topk, method=lrp_robustness_method)
_, adv_p = stat_test(x=det_robustness, y=adv_robustness, alternative=alternative)
_, bay_p = stat_test(x=det_robustness, y=bay_robustness, alternative=alternative)
_, p = stat_test(x=adv_robustness, y=bay_robustness, alternative=alternative)
print("\np values =", adv_p, bay_p, p)
for im_idx in range(len(det_robustness)):
df = df.append({'rule':rule, 'layer_idx':layer_idx, 'model':'Adv - Det',
'robustness_diff':adv_robustness[im_idx]-det_robustness[im_idx],
'p_value':p, 'adv_p_value':adv_p, 'bay_p_value':bay_p},
ignore_index=True)
df = df.append({'rule':rule, 'layer_idx':layer_idx, 'model':f'Bay - Det', #samp={args.n_samples}',
'robustness_diff':bay_robustness[im_idx]-det_robustness[im_idx],
'p_value':p, 'adv_p_value':adv_p, 'bay_p_value':bay_p},
ignore_index=True)
save_to_pickle(data=df, path=plot_savedir, filename=filename)
### Plots
def plot_rules_robustness(df, n_samples, learnable_layers_idxs, savedir, filename):
print(df.head())
os.makedirs(savedir, exist_ok=True)
sns.set_style("darkgrid")
matplotlib.rc('font', **{'size': 9})
det_col = plt.cm.get_cmap('rocket', 100)(np.linspace(0, 1, 13))[3:]
bay_col = plt.cm.get_cmap('crest', 100)(np.linspace(0, 1, 10))[3:]
palettes = [det_col, bay_col]
fig, ax = plt.subplots(len(learnable_layers_idxs), 2, figsize=(3, 2), sharex=True, sharey=True, dpi=150,
facecolor='w', edgecolor='k')
fig.tight_layout()
for col_idx, model in enumerate(list(df['model'].unique())):
palette = {"epsilon":palettes[col_idx][2], "gamma":palettes[col_idx][4], "alpha1beta0":palettes[col_idx][6]}
for row_idx, layer_idx in enumerate(learnable_layers_idxs):
temp_df = df[df['layer_idx']==layer_idx]
y = min(temp_df['robustness_diff'])*1.2
temp_df = temp_df[temp_df['model']==model]
sns.boxplot(data=temp_df, ax=ax[col_idx], x='rule', y='robustness_diff', orient='v', hue='rule',
palette=palette, dodge=False, flierprops={'markersize':3})
for rule, x in zip(temp_df['rule'].unique(), [-0.3, 0.7, 1.7]):
rule_df = temp_df[temp_df['rule']==rule]
p_value = rule_df['p_value'].unique()[0]
assert len(rule_df['p_value'].unique())==1
significance = significance_symbol(p_value)
# if significance!='n.s.':
# y = min(temp_df['robustness_diff'])-0.12
ax[col_idx].text(x=x, y=y, s=significance, weight='bold', size=8, color=palette[rule])
for i, patch in enumerate(ax[col_idx].artists):
r, g, b, a = patch.get_facecolor()
col = (r, g, b, a)
patch.set_facecolor((r, g, b, .7))
patch.set_edgecolor(col)
for j in range(i*6, i*6+6):
line = ax[col_idx].lines[j]
line.set_color(col)
line.set_mfc(col)
line.set_mec(col)
ax[col_idx].xaxis.set_label_position("top")
ax[col_idx].set_xlabel(model, weight='bold', size=9)
ax[col_idx].set_ylabel("")
ax[0].set_ylabel("LRP robustness diff.")
ax[1].yaxis.set_label_position("right")
ax[1].set_ylabel("Layer idx="+str(layer_idx), rotation=270, labelpad=10, weight='bold', size=9)
ax[col_idx].get_legend().remove()
ax[col_idx].set_xlabel("")
ax[0].set_xlabel("Adv - Det")
ax[1].set_xlabel("Bay - Det")
ax[col_idx].text(x=0.4, y=-0.48, s="LRP rule", weight='bold')
ax[col_idx].set_xticklabels([r'$\epsilon$',r'$\gamma$',r'$\alpha\beta$'])
plt.subplots_adjust(hspace=0.05)
plt.subplots_adjust(wspace=0.05)
fig.subplots_adjust(left=0.15)
fig.subplots_adjust(bottom=0.2)
print("\nSaving: ", os.path.join(savedir, filename+".png"))
fig.savefig(os.path.join(savedir, filename+".png"))
plt.close(fig)
plot_rules_robustness(df=df,
n_samples=args.n_samples,
learnable_layers_idxs=learnable_layers_idxs,
savedir=os.path.join(TESTS,'figures/rules_robustness'),
filename=filename)
| 8,959 | 44.025126 | 122 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp_heatmaps_det_vs_bay.py | import os
import argparse
import numpy as np
import torch
import torchvision
from torch import nn
import torch.nn.functional as nnf
import torch.optim as torchopt
import torch.nn.functional as F
from utils.data import *
from utils.networks import *
from utils.savedir import *
from utils.seeding import *
from networks.baseNN import *
from networks.fullBNN import *
from networks.redBNN import *
from utils.lrp import *
from plot.lrp_heatmaps import plot_heatmaps_det_vs_bay
from attacks.gradient_based import evaluate_attack
from attacks.run_attacks import *
parser = argparse.ArgumentParser()
parser.add_argument("--n_inputs", default=500, type=int, help="Number of test points")
parser.add_argument("--model_idx", default=2, type=int, help="Choose model idx from pre defined settings")
parser.add_argument("--topk", default=30, type=int, help="Choose model idx from pre defined settings")
parser.add_argument("--n_samples", default=100, type=int)
parser.add_argument("--attack_method", default="fgsm", type=str, help="fgsm, pgd")
parser.add_argument("--lrp_method", default="avg_heatmap", type=str, help="avg_prediction, avg_heatmap")
parser.add_argument("--rule", default="epsilon", type=str, help="Rule for LRP computation.")
parser.add_argument("--normalize", default=False, type=eval, help="Normalize lrp heatmaps.")
parser.add_argument("--debug", default=False, type=eval, help="Run script in debugging mode.")
parser.add_argument("--device", default='cuda', type=str, help="cpu, cuda")
args = parser.parse_args()
lrp_robustness_method = "imagewise"
n_inputs=100 if args.debug else args.n_inputs
print("PyTorch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)
if args.device=="cuda":
torch.set_default_tensor_type('torch.cuda.FloatTensor')
### Load models and attacks
m = baseNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, num_classes = load_dataset(dataset_name=m["dataset"], shuffle=False, n_inputs=n_inputs)[2:]
det_model_savedir = get_model_savedir(model="baseNN", dataset=m["dataset"], architecture=m["architecture"],
debug=args.debug, model_idx=args.model_idx)
detnet = baseNN(inp_shape, num_classes, *list(m.values()))
detnet.load(savedir=det_model_savedir, device=args.device)
det_attacks = load_attack(method=args.attack_method, model_savedir=det_model_savedir)
det_predictions, det_atk_predictions, det_softmax_robustness, det_successful_idxs, det_failed_idxs = \
evaluate_attack(net=detnet, x_test=x_test, x_attack=det_attacks, y_test=y_test,
device=args.device, return_classification_idxs=True)
m = fullBNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, out_size = load_dataset(dataset_name=m["dataset"], shuffle=False, n_inputs=n_inputs)[2:]
bay_model_savedir = get_model_savedir(model="fullBNN", dataset=m["dataset"], architecture=m["architecture"],
model_idx=args.model_idx, debug=args.debug)
bayesnet = BNN(m["dataset"], *list(m.values())[1:], inp_shape, num_classes)
bayesnet.load(savedir=bay_model_savedir, device=args.device)
bay_attacks = load_attack(method=args.attack_method, model_savedir=bay_model_savedir, n_samples=args.n_samples)
bay_predictions, bay_atk_predictions, bay_softmax_robustness, bay_successful_idxs, bay_failed_idxs = \
evaluate_attack(net=bayesnet, n_samples=args.n_samples, x_test=x_test, x_attack=bay_attacks, y_test=y_test,
device=args.device, return_classification_idxs=True)
images = x_test.to(args.device)
labels = y_test.argmax(-1).to(args.device)
### Load explanations
layer_idx = detnet.learnable_layers_idxs[-1]
savedir = get_lrp_savedir(model_savedir=det_model_savedir, attack_method=args.attack_method, rule=args.rule,
layer_idx=layer_idx)
det_lrp = load_from_pickle(path=savedir, filename="det_lrp")
det_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
savedir = get_lrp_savedir(model_savedir=bay_model_savedir, attack_method=args.attack_method,
rule=args.rule, layer_idx=layer_idx, lrp_method=args.lrp_method)
bay_lrp = load_from_pickle(path=savedir, filename="bay_lrp_samp="+str(args.n_samples))
bay_attack_lrp = load_from_pickle(path=savedir, filename="bay_attack_lrp_samp="+str(args.n_samples))
if args.normalize:
for im_idx in range(det_lrp.shape[0]):
det_lrp[im_idx] = normalize(det_lrp[im_idx])
det_attack_lrp[im_idx] = normalize(det_attack_lrp[im_idx])
bay_lrp[im_idx] = normalize(bay_lrp[im_idx])
bay_attack_lrp[im_idx] = normalize(bay_attack_lrp[im_idx])
det_robustness, det_pxl_idxs = lrp_robustness(original_heatmaps=det_lrp, adversarial_heatmaps=det_attack_lrp,
topk=args.topk, method=lrp_robustness_method)
bay_robustness, bay_pxl_idxs = lrp_robustness(original_heatmaps=bay_lrp, adversarial_heatmaps=bay_attack_lrp,
topk=args.topk, method=lrp_robustness_method)
### Select failed attack
set_seed(0)
shared_failed_idxs = np.intersect1d(det_failed_idxs, bay_failed_idxs)
shared_failed_idxs = shared_failed_idxs[np.where(bay_robustness[shared_failed_idxs]!=1.)]
im_idx = shared_failed_idxs[np.argmin(det_robustness[shared_failed_idxs])]
print("\ndet LRP robustness =", det_robustness[im_idx])
print("bay LRP robustness =", bay_robustness[im_idx])
# print((images[im_idx]-det_attacks[im_idx]).abs().max())
# exit()
print("\ndet distance =", torch.norm(images[im_idx]-det_attacks[im_idx], 2).item())
print("bay distance =", torch.norm(images[im_idx]-bay_attacks[im_idx], 2).item())
### Plots
# savedir = get_lrp_savedir(model_savedir=bay_model_savedir, attack_method=args.attack_method,
# rule=args.rule, lrp_method=args.lrp_method)
filename="heatmaps_det_vs_bay_"+m["dataset"]+"_topk="+str(args.topk)+"_rule="+str(args.rule)\
+"_failed_atk="+str(args.attack_method)+"_model_idx="+str(args.model_idx)
if args.normalize:
filename+="_norm"
plot_heatmaps_det_vs_bay(image=images[im_idx].detach().cpu().numpy(),
det_attack=det_attacks[im_idx].detach().cpu().numpy(),
bay_attack=bay_attacks[im_idx].detach().cpu().numpy(),
det_prediction=det_predictions[im_idx],
bay_prediction=bay_predictions[im_idx],
label=labels[im_idx],
det_explanation=det_lrp[im_idx],
det_attack_explanation=det_attack_lrp[im_idx],
bay_explanation=bay_lrp[im_idx],
bay_attack_explanation=bay_attack_lrp[im_idx],
lrp_rob_method=lrp_robustness_method,
topk=args.topk, rule=args.rule, savedir=os.path.join(TESTS,'figures'), filename=filename)
| 6,989 | 46.22973 | 120 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp_robustness_distributions.py | import os
import argparse
import numpy as np
import torch
import torchvision
from torch import nn
import torch.nn.functional as nnf
import torch.optim as torchopt
import torch.nn.functional as F
from utils.data import *
from utils.networks import *
from utils.savedir import *
from utils.seeding import *
from networks.baseNN import *
from networks.fullBNN import *
from networks.redBNN import *
from utils.lrp import *
from plot.lrp_heatmaps import *
import plot.lrp_distributions as plot_lrp
from attacks.gradient_based import evaluate_attack
from attacks.run_attacks import *
parser = argparse.ArgumentParser()
parser.add_argument("--n_inputs", default=500, type=int, help="Number of test points")
parser.add_argument("--topk", default=20, type=int, help="Top k most relevant pixels.")
parser.add_argument("--model_idx", default=0, type=int, help="Choose model idx from pre defined settings")
parser.add_argument("--model", default="fullBNN", type=str, help="baseNN, fullBNN, redBNN")
parser.add_argument("--attack_method", default="fgsm", type=str, help="fgsm, pgd")
parser.add_argument("--lrp_method", default="avg_heatmap", type=str, help="avg_prediction, avg_heatmap")
parser.add_argument("--rule", default="epsilon", type=str, help="Rule for LRP computation.")
parser.add_argument("--redBNN_layer_idx", default=-1, type=int, help="Bayesian layer idx in redBNN.")
parser.add_argument("--normalize", default=False, type=eval, help="Normalize lrp heatmaps.")
parser.add_argument("--debug", default=False, type=eval, help="Run script in debugging mode.")
parser.add_argument("--device", default='cuda', type=str, help="cpu, cuda")
args = parser.parse_args()
lrp_robustness_method = "imagewise"
n_samples_list=[10, 50, 100]
n_inputs=100 if args.debug else args.n_inputs
topk=args.topk
print("PyTorch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)
if args.device=="cuda":
torch.set_default_tensor_type('torch.cuda.FloatTensor')
### Load models and attacks
model = baseNN_settings["model_"+str(args.model_idx)]
_, _, x_test, y_test, inp_shape, num_classes = load_dataset(dataset_name=model["dataset"],
shuffle=False, n_inputs=n_inputs)
det_model_savedir = get_model_savedir(model="baseNN", dataset=model["dataset"], architecture=model["architecture"],
debug=args.debug, model_idx=args.model_idx)
detnet = baseNN(inp_shape, num_classes, *list(model.values()))
detnet.load(savedir=det_model_savedir, device=args.device)
det_attack = load_attack(method=args.attack_method, model_savedir=det_model_savedir)
if args.model=="fullBNN":
m = fullBNN_settings["model_"+str(args.model_idx)]
bay_model_savedir = get_model_savedir(model=args.model, dataset=m["dataset"], architecture=m["architecture"],
model_idx=args.model_idx, debug=args.debug)
bayesnet = BNN(m["dataset"], *list(m.values())[1:], inp_shape, num_classes)
bayesnet.load(savedir=bay_model_savedir, device=args.device)
elif args.model=="redBNN":
m = redBNN_settings["model_"+str(args.model_idx)]
base_m = baseNN_settings["model_"+str(m["baseNN_idx"])]
x_test, y_test, inp_shape, out_size = load_dataset(dataset_name=m["dataset"], shuffle=False, n_inputs=n_inputs)[2:]
basenet = baseNN(inp_shape, out_size, *list(base_m.values()))
basenet_savedir = get_model_savedir(model="baseNN", dataset=m["dataset"],
architecture=m["architecture"], debug=args.debug, model_idx=m["baseNN_idx"])
basenet.load(savedir=basenet_savedir, device=args.device)
hyp = get_hyperparams(m)
layer_idx=args.redBNN_layer_idx+basenet.n_learnable_layers+1 if args.redBNN_layer_idx<0 else args.redBNN_layer_idx
bayesnet = redBNN(dataset_name=m["dataset"], inference=m["inference"], base_net=basenet, hyperparams=hyp,
layer_idx=layer_idx)
bay_model_savedir = get_model_savedir(model=args.model, dataset=m["dataset"], architecture=m["architecture"],
debug=args.debug, model_idx=args.model_idx, layer_idx=layer_idx)
bayesnet.load(savedir=bay_model_savedir, device=args.device)
else:
raise NotImplementedError
bay_attack=[]
for n_samples in n_samples_list:
bay_attack.append(load_attack(method=args.attack_method, model_savedir=bay_model_savedir, n_samples=n_samples))
if m["inference"]=="svi":
mode_attack = load_attack(method=args.attack_method, model_savedir=bay_model_savedir,
n_samples=n_samples, atk_mode=True)
images = x_test.to(args.device)
labels = y_test.argmax(-1).to(args.device)
# for layer_idx in detnet.learnable_layers_idxs:
for layer_idx in [detnet.learnable_layers_idxs[-1]]:
### Load explanations
savedir = get_lrp_savedir(model_savedir=det_model_savedir, attack_method=args.attack_method,
rule=args.rule, layer_idx=layer_idx)
det_lrp = load_from_pickle(path=savedir, filename="det_lrp")
det_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
savedir = get_lrp_savedir(model_savedir=bay_model_savedir, attack_method=args.attack_method,
rule=args.rule, layer_idx=layer_idx, lrp_method=args.lrp_method)
bay_lrp=[]
bay_attack_lrp=[]
for n_samples in n_samples_list:
bay_lrp.append(load_from_pickle(path=savedir, filename="bay_lrp_samp="+str(n_samples)))
bay_attack_lrp.append(load_from_pickle(path=savedir, filename="bay_attack_lrp_samp="+str(n_samples)))
n_images = det_lrp.shape[0]
if det_attack_lrp.shape[0]!=n_images or bay_lrp[0].shape[0]!=n_inputs or bay_attack_lrp[0].shape[0]!=n_inputs:
print("det_lrp.shape[0] =", det_lrp.shape[0])
print("det_attack_lrp.shape[0] =", det_attack_lrp.shape[0])
print("bay_lrp[0].shape[0] =", bay_lrp[0].shape[0])
print("bay_attack_lrp[0].shape[0] =", bay_attack_lrp[0].shape[0])
raise ValueError("Inconsistent n_inputs")
if m["inference"]=="svi":
mode_lrp = load_from_pickle(path=savedir, filename="mode_lrp_avg_post")
mode_attack_lrp=[]
for samp_idx, n_samples in enumerate(n_samples_list):
mode_attack_lrp.append(load_from_pickle(path=savedir, filename="mode_attack_lrp_samp="+str(n_samples)))
mode_attack_lrp.append(load_from_pickle(path=savedir, filename="mode_attack_lrp_avg_post"))
if mode_lrp.shape[0]!=n_inputs or mode_attack_lrp[0].shape[0]!=n_inputs:
print("mode_lrp.shape[0] =", mode_lrp.shape[0])
print("mode_attack_lrp[0].shape[0] =", mode_attack_lrp[0].shape[0])
raise ValueError("Inconsistent n_inputs")
### Normalize heatmaps
if args.normalize:
for im_idx in range(det_lrp.shape[0]):
det_lrp[im_idx] = normalize(det_lrp[im_idx])
det_attack_lrp[im_idx] = normalize(det_attack_lrp[im_idx])
for samp_idx in range(len(n_samples_list)):
bay_lrp[samp_idx][im_idx] = normalize(bay_lrp[samp_idx][im_idx])
bay_attack_lrp[samp_idx][im_idx] = normalize(bay_attack_lrp[samp_idx][im_idx])
if m["inference"]=="svi":
mode_lrp[im_idx] = normalize(mode_lrp[im_idx])
for samp_idx in range(len(n_samples_list)):
mode_attack_lrp[samp_idx][im_idx] = normalize(mode_attack_lrp[samp_idx][im_idx])
mode_attack_lrp[samp_idx+1][im_idx] = normalize(mode_attack_lrp[samp_idx+1][im_idx])
### Evaluate explanations
det_preds, det_atk_preds, det_softmax_robustness, det_successful_idxs, det_failed_idxs = evaluate_attack(net=detnet,
x_test=images, x_attack=det_attack, y_test=y_test, device=args.device, return_classification_idxs=True)
det_softmax_robustness = det_softmax_robustness.detach().cpu().numpy()
det_lrp_robustness, det_lrp_pxl_idxs = lrp_robustness(original_heatmaps=det_lrp,
adversarial_heatmaps=det_attack_lrp,
topk=topk, method=lrp_robustness_method)
succ_det_lrp_robustness, succ_det_lrp_pxl_idxs = lrp_robustness(original_heatmaps=det_lrp[det_successful_idxs],
adversarial_heatmaps=det_attack_lrp[det_successful_idxs],
topk=topk, method=lrp_robustness_method)
fail_det_lrp_robustness, fail_det_lrp_pxl_idxs = lrp_robustness(original_heatmaps=det_lrp[det_failed_idxs],
adversarial_heatmaps=det_attack_lrp[det_failed_idxs],
topk=topk, method=lrp_robustness_method)
bay_preds=[]
bay_atk_preds=[]
bay_softmax_robustness=[]
bay_successful_idxs=[]
bay_failed_idxs=[]
bay_lrp_robustness=[]
bay_lrp_pxl_idxs=[]
succ_bay_lrp_robustness=[]
succ_bay_lrp_pxl_idxs=[]
fail_bay_lrp_robustness=[]
fail_bay_lrp_pxl_idxs=[]
for samp_idx, n_samples in enumerate(n_samples_list):
preds, atk_preds, softmax_rob, succ_idxs, fail_idxs = evaluate_attack(net=bayesnet, x_test=images,
x_attack=bay_attack[samp_idx], y_test=y_test, device=args.device,
n_samples=n_samples, return_classification_idxs=True)
bay_preds.append(preds)
bay_atk_preds.append(atk_preds)
bay_softmax_robustness.append(softmax_rob.detach().cpu().numpy())
bay_successful_idxs.append(succ_idxs)
bay_failed_idxs.append(fail_idxs)
robustness, pxl_idxs = lrp_robustness(original_heatmaps=bay_lrp[samp_idx],
adversarial_heatmaps=bay_attack_lrp[samp_idx],
topk=topk, method=lrp_robustness_method)
bay_lrp_robustness.append(robustness)
bay_lrp_pxl_idxs.append(pxl_idxs)
robustness, pxl_idxs = lrp_robustness(original_heatmaps=bay_lrp[samp_idx][succ_idxs],
adversarial_heatmaps=bay_attack_lrp[samp_idx][succ_idxs],
topk=topk, method=lrp_robustness_method)
succ_bay_lrp_robustness.append(robustness)
succ_bay_lrp_pxl_idxs.append(pxl_idxs)
robustness, pxl_idxs = lrp_robustness(original_heatmaps=bay_lrp[samp_idx][fail_idxs],
adversarial_heatmaps=bay_attack_lrp[samp_idx][fail_idxs],
topk=topk, method=lrp_robustness_method)
fail_bay_lrp_robustness.append(robustness)
fail_bay_lrp_pxl_idxs.append(pxl_idxs)
if m["inference"]=="svi":
mode_preds=[]
mode_atk_preds=[]
mode_softmax_robustness=[]
mode_successful_idxs=[]
mode_failed_idxs=[]
mode_lrp_robustness=[]
mode_lrp_pxl_idxs=[]
succ_mode_lrp_robustness=[]
succ_mode_lrp_pxl_idxs=[]
fail_mode_lrp_robustness=[]
fail_mode_lrp_pxl_idxs=[]
for samp_idx, n_samples in enumerate(n_samples_list):
preds, atk_preds, softmax_rob, succ_idxs, fail_idxs = evaluate_attack(net=bayesnet,
x_test=images, x_attack=mode_attack,
y_test=y_test, device=args.device, n_samples=n_samples,
return_classification_idxs=True)
mode_preds.append(preds)
mode_atk_preds.append(atk_preds)
mode_softmax_robustness.append(softmax_rob.detach().cpu().numpy())
mode_successful_idxs.append(succ_idxs)
mode_failed_idxs.append(fail_idxs)
robustness, pxl_idxs = lrp_robustness(original_heatmaps=mode_lrp,
adversarial_heatmaps=mode_attack_lrp[samp_idx],
topk=topk, method=lrp_robustness_method)
mode_lrp_robustness.append(robustness)
mode_lrp_pxl_idxs.append(pxl_idxs)
robustness, pxl_idxs = lrp_robustness(original_heatmaps=mode_lrp[succ_idxs],
adversarial_heatmaps=mode_attack_lrp[samp_idx][succ_idxs],
topk=topk, method=lrp_robustness_method)
succ_mode_lrp_robustness.append(robustness)
succ_mode_lrp_pxl_idxs.append(pxl_idxs)
robustness, pxl_idxs = lrp_robustness(original_heatmaps=mode_lrp[fail_idxs],
adversarial_heatmaps=mode_attack_lrp[samp_idx][fail_idxs],
topk=topk, method=lrp_robustness_method)
fail_mode_lrp_robustness.append(robustness)
fail_mode_lrp_pxl_idxs.append(pxl_idxs)
preds, atk_preds, softmax_rob, succ_idxs, fail_idxs = evaluate_attack(net=bayesnet,
x_test=images, x_attack=mode_attack, avg_posterior=True,
y_test=y_test, device=args.device, n_samples=n_samples,
return_classification_idxs=True)
mode_preds.append(preds)
mode_atk_preds.append(atk_preds)
mode_softmax_robustness.append(softmax_rob.detach().cpu().numpy())
mode_successful_idxs.append(succ_idxs)
mode_failed_idxs.append(fail_idxs)
robustness, pxl_idxs = lrp_robustness(original_heatmaps=mode_lrp,
adversarial_heatmaps=mode_attack_lrp[samp_idx+1],
topk=topk, method=lrp_robustness_method)
mode_lrp_robustness.append(robustness)
mode_lrp_pxl_idxs.append(pxl_idxs)
robustness, pxl_idxs = lrp_robustness(original_heatmaps=mode_lrp[succ_idxs],
adversarial_heatmaps=mode_attack_lrp[samp_idx+1][succ_idxs],
topk=topk, method=lrp_robustness_method)
succ_mode_lrp_robustness.append(robustness)
succ_mode_lrp_pxl_idxs.append(pxl_idxs)
robustness, pxl_idxs = lrp_robustness(original_heatmaps=mode_lrp[fail_idxs],
adversarial_heatmaps=mode_attack_lrp[samp_idx+1][fail_idxs],
topk=topk, method=lrp_robustness_method)
fail_mode_lrp_robustness.append(robustness)
fail_mode_lrp_pxl_idxs.append(pxl_idxs)
### Plots
filename = lrp_robustness_method
if args.normalize:
filename+="_norm"
plot_attacks_explanations(images=images,
explanations=det_lrp,
attacks=det_attack,
attacks_explanations=det_attack_lrp,
predictions=det_preds.argmax(-1),
attacks_predictions=det_atk_preds.argmax(-1),
successful_attacks_idxs=det_successful_idxs,
failed_attacks_idxs=det_failed_idxs,
labels=labels, lrp_rob_method=lrp_robustness_method,
rule=args.rule, savedir=savedir,
pxl_idxs=det_lrp_pxl_idxs,
filename="det_lrp_attacks_"+filename,
layer_idx=layer_idx)
for samp_idx, n_samples in enumerate(n_samples_list):
plot_attacks_explanations(images=images,
explanations=bay_lrp[samp_idx],
attacks=bay_attack[samp_idx],
attacks_explanations=bay_attack_lrp[samp_idx],
predictions=bay_preds[samp_idx].argmax(-1),
attacks_predictions=bay_atk_preds[samp_idx].argmax(-1),
successful_attacks_idxs=bay_successful_idxs[samp_idx],
failed_attacks_idxs=bay_failed_idxs[samp_idx],
labels=labels, lrp_rob_method=lrp_robustness_method,
rule=args.rule, savedir=savedir,
pxl_idxs=bay_lrp_pxl_idxs[samp_idx],
filename="bay_lrp_attacks_samp="+str(n_samples)+"_"+filename,
layer_idx=layer_idx)
if m["inference"]=="svi": # mode vs mode only
plot_attacks_explanations(images=images,
explanations=mode_lrp,
attacks=mode_attack,
attacks_explanations=mode_attack_lrp[-1],
predictions=mode_preds[-1].argmax(-1),
attacks_predictions=mode_atk_preds[-1].argmax(-1),
successful_attacks_idxs=mode_successful_idxs[-1],
failed_attacks_idxs=mode_failed_idxs[-1],
labels=labels, lrp_rob_method=lrp_robustness_method,
rule=args.rule, savedir=savedir,
pxl_idxs=mode_lrp_pxl_idxs[-1],
filename="mode_lrp_attacks",
layer_idx=layer_idx)
filename=args.rule+"_lrp_robustness"+m["dataset"]+"_images="+str(n_inputs)+\
"_samples="+str(n_samples)+"_pxls="+str(topk)+"_atk="+str(args.attack_method)+"_layeridx="+str(layer_idx)
if args.normalize:
filename+="_norm"
plot_lrp.lrp_imagewise_robustness_distributions(
det_lrp_robustness=det_lrp_robustness,
det_successful_lrp_robustness=succ_det_lrp_robustness,
det_failed_lrp_robustness=fail_det_lrp_robustness,
bay_lrp_robustness=bay_lrp_robustness,
bay_successful_lrp_robustness=succ_bay_lrp_robustness,
bay_failed_lrp_robustness=fail_bay_lrp_robustness,
mode_lrp_robustness=mode_lrp_robustness,
mode_successful_lrp_robustness=succ_mode_lrp_robustness,
mode_failed_lrp_robustness=fail_mode_lrp_robustness,
n_samples_list=n_samples_list,
n_original_images=len(images),
savedir=savedir,
filename="dist_"+filename)
plot_lrp.lrp_robustness_scatterplot(
adversarial_robustness=det_softmax_robustness,
bayesian_adversarial_robustness=bay_softmax_robustness,
mode_adversarial_robustness=mode_softmax_robustness[-1] if m["inference"]=="svi" else None,
lrp_robustness=det_lrp_robustness,
bayesian_lrp_robustness=bay_lrp_robustness,
mode_lrp_robustness=mode_lrp_robustness[-1] if m["inference"]=="svi" else None,
n_samples_list=n_samples_list,
savedir=savedir,
filename="scatterplot_"+filename)
| 16,260 | 41.346354 | 118 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp_heatmaps_layers.py | import os
import argparse
import numpy as np
import torch
import torchvision
from torch import nn
import torch.nn.functional as nnf
import torch.optim as torchopt
import torch.nn.functional as F
from utils.data import *
from utils.networks import *
from utils.savedir import *
from utils.seeding import *
from networks.baseNN import *
from networks.fullBNN import *
from networks.redBNN import *
from utils.lrp import *
from plot.lrp_heatmaps import plot_attacks_explanations_layers
from attacks.gradient_based import evaluate_attack
from attacks.run_attacks import *
parser = argparse.ArgumentParser()
parser.add_argument("--n_inputs", default=500, type=int, help="Number of test points.")
parser.add_argument("--model_idx", default=0, type=int, help="Choose model idx from pre defined settings.")
parser.add_argument("--topk", default=30, type=int, help="Percentage of pixels for computing the LRP.") # 200
parser.add_argument("--model", default="baseNN", type=str, help="baseNN, fullBNN, redBNN")
parser.add_argument("--n_samples", default=50, type=int)
parser.add_argument("--attack_method", default="fgsm", type=str, help="fgsm, pgd")
parser.add_argument("--lrp_method", default="avg_heatmap", type=str, help="avg_prediction, avg_heatmap")
parser.add_argument("--rule", default="epsilon", type=str, help="Rule for LRP computation.")
parser.add_argument("--normalize", default=True, type=eval, help="Normalize lrp heatmaps.")
parser.add_argument("--debug", default=False, type=eval, help="Run script in debugging mode.")
parser.add_argument("--device", default='cuda', type=str, help="cpu, cuda")
args = parser.parse_args()
lrp_robustness_method = "imagewise"
n_inputs=100 if args.debug else args.n_inputs
print("PyTorch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)
if args.device=="cuda":
torch.set_default_tensor_type('torch.cuda.FloatTensor')
### Load models and attacks
if args.model=="baseNN":
m = baseNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, num_classes = load_dataset(dataset_name=m["dataset"],
shuffle=False, n_inputs=n_inputs)[2:]
model_savedir = get_model_savedir(model="baseNN", dataset=m["dataset"], architecture=m["architecture"],
debug=args.debug, model_idx=args.model_idx)
detnet = baseNN(inp_shape, num_classes, *list(m.values()))
detnet.load(savedir=model_savedir, device=args.device)
attacks = load_attack(method=args.attack_method, model_savedir=model_savedir)
predictions, atk_predictions, softmax_robustness, successful_idxs, failed_idxs = evaluate_attack(net=detnet,
x_test=x_test, x_attack=attacks, y_test=y_test, device=args.device, return_classification_idxs=True)
learnable_layers_idxs = detnet.learnable_layers_idxs
elif args.model=="fullBNN":
m = fullBNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, num_classes = load_dataset(dataset_name=m["dataset"], shuffle=False, n_inputs=n_inputs)[2:]
model_savedir = get_model_savedir(model=args.model, dataset=m["dataset"], architecture=m["architecture"],
model_idx=args.model_idx, debug=args.debug)
bayesnet = BNN(m["dataset"], *list(m.values())[1:], inp_shape, num_classes)
bayesnet.load(savedir=model_savedir, device=args.device)
attacks = load_attack(method=args.attack_method, model_savedir=model_savedir, n_samples=args.n_samples)
predictions, atk_predictions, softmax_robustness, successful_idxs, failed_idxs = evaluate_attack(net=bayesnet,
n_samples=n_samples, x_test=x_test, x_attack=attacks, y_test=y_test,
device=args.device, return_classification_idxs=True)
learnable_layers_idxs = bayesnet.learnable_layers_idxs
else:
raise NotImplementedError
images = x_test.to(args.device)
labels = y_test.argmax(-1).to(args.device)
explanations_layers = []
attacks_explanations_layers = []
pxl_idxs_layers=[]
for layer_idx in detnet.learnable_layers_idxs:
### Load explanations
if args.model=="baseNN":
savedir = get_lrp_savedir(model_savedir=model_savedir, attack_method=args.attack_method,
rule=args.rule, layer_idx=layer_idx)
lrp = load_from_pickle(path=savedir, filename="det_lrp")
attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
else:
raise NotImplementedError
# savedir = get_lrp_savedir(model_savedir=model_savedir, attack_method=args.attack_method,
# layer_idx=layer_idx, lrp_method=args.lrp_method)
# lrp.append(load_from_pickle(path=savedir, filename="bay_lrp_samp="+str(n_samples)))
# attack_lrp.append(load_from_pickle(path=savedir, filename="bay_attack_lrp_samp="+str(n_samples)))
if args.normalize:
for im_idx in range(lrp.shape[0]):
lrp[im_idx] = normalize(lrp[im_idx])
attack_lrp[im_idx] = normalize(attack_lrp[im_idx])
pxl_idxs = lrp_robustness(original_heatmaps=lrp, adversarial_heatmaps=attack_lrp,
topk=args.topk, method=lrp_robustness_method)[1]
explanations_layers.append(lrp.detach().cpu().numpy())
attacks_explanations_layers.append(attack_lrp.detach().cpu().numpy())
pxl_idxs_layers.append(pxl_idxs)
explanations_layers=np.array(explanations_layers)
attacks_explanations_layers=np.array(attacks_explanations_layers)
pxl_idxs_layers=np.array(pxl_idxs_layers)
### Plots
lrp_method=None if args.model=="baseNN" else args.lrp_method
# savedir = get_lrp_savedir(model_savedir=model_savedir, rule=args.rule,
# attack_method=args.attack_method, lrp_method=lrp_method)
filename="layers_heatmaps_"+m["dataset"]+"_topk="+str(args.topk)+"_rule="+str(args.rule)\
+"_atk="+str(args.attack_method)+"_model_idx="+str(args.model_idx)
if args.normalize:
filename+="_norm"
plot_attacks_explanations_layers(images=images,
attacks=attacks,
explanations=explanations_layers,
attacks_explanations=attacks_explanations_layers,
predictions=predictions,
attacks_predictions=atk_predictions,
successful_attacks_idxs=successful_idxs,
failed_attacks_idxs=failed_idxs,
labels=labels,
pxl_idxs=pxl_idxs_layers,
learnable_layers_idxs=learnable_layers_idxs,
lrp_rob_method=lrp_robustness_method,
rule=args.rule, savedir=os.path.join(TESTS,'figures'), filename=filename)
| 7,009 | 43.367089 | 122 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp_layers_mode_robustness.py | import os
import argparse
import numpy as np
import torch
import torchvision
from torch import nn
import torch.nn.functional as nnf
import torch.optim as torchopt
import torch.nn.functional as F
from utils.data import *
from utils.networks import *
from utils.savedir import *
from utils.seeding import *
from networks.baseNN import *
from networks.fullBNN import *
from networks.redBNN import *
from utils.lrp import *
from plot.lrp_heatmaps import *
import plot.lrp_distributions as plot_lrp
from attacks.gradient_based import evaluate_attack
from attacks.run_attacks import *
parser = argparse.ArgumentParser()
parser.add_argument("--n_inputs", default=500, type=int, help="Number of test points")
parser.add_argument("--model_idx", default=0, type=int, help="Choose model idx from pre defined settings")
parser.add_argument("--model", default="fullBNN", type=str, help="baseNN, fullBNN, redBNN")
parser.add_argument("--attack_method", default="fgsm", type=str, help="fgsm, pgd")
parser.add_argument("--lrp_method", default="avg_heatmap", type=str, help="avg_prediction, avg_heatmap")
parser.add_argument("--rule", default="epsilon", type=str, help="Rule for LRP computation.")
parser.add_argument("--normalize", default=True, type=eval, help="Normalize lrp heatmaps.")
parser.add_argument("--debug", default=False, type=eval, help="Run script in debugging mode.")
parser.add_argument("--device", default='cuda', type=str, help="cpu, cuda")
args = parser.parse_args()
lrp_robustness_method = "imagewise"
n_samples_list=[10,50]
topk_list = [10,30,100,300]
n_inputs=100 if args.debug else args.n_inputs
print("PyTorch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)
if args.device=="cuda":
torch.set_default_tensor_type('torch.cuda.FloatTensor')
### Load models and attacks
model = baseNN_settings["model_"+str(args.model_idx)]
_, _, x_test, y_test, inp_shape, num_classes = load_dataset(dataset_name=model["dataset"],
shuffle=False, n_inputs=n_inputs)
det_model_savedir = get_model_savedir(model="baseNN", dataset=model["dataset"], architecture=model["architecture"],
debug=args.debug, model_idx=args.model_idx)
detnet = baseNN(inp_shape, num_classes, *list(model.values()))
detnet.load(savedir=det_model_savedir, device=args.device)
det_attack = load_attack(method=args.attack_method, model_savedir=det_model_savedir)
if args.model=="fullBNN":
m = fullBNN_settings["model_"+str(args.model_idx)]
if m["inference"]!="svi":
raise NotImplementedError
bay_model_savedir = get_model_savedir(model=args.model, dataset=m["dataset"], architecture=m["architecture"],
model_idx=args.model_idx, debug=args.debug)
bayesnet = BNN(m["dataset"], *list(m.values())[1:], inp_shape, num_classes)
bayesnet.load(savedir=bay_model_savedir, device=args.device)
bay_attack=[]
for n_samples in n_samples_list:
bay_attack.append(load_attack(method=args.attack_method, model_savedir=bay_model_savedir,
n_samples=n_samples))
else:
raise NotImplementedError
images = x_test.to(args.device)
labels = y_test.argmax(-1).to(args.device)
det_lrp_robustness_topk=[]
bay_lrp_robustness_topk=[]
mode_lrp_robustness_topk=[]
for topk in topk_list:
det_lrp_robustness_layers=[]
bay_lrp_robustness_layers=[]
mode_lrp_robustness_layers=[]
for layer_idx in detnet.learnable_layers_idxs:
### Load explanations
savedir = get_lrp_savedir(model_savedir=det_model_savedir, attack_method=args.attack_method,
rule=args.rule, layer_idx=layer_idx)
det_lrp = load_from_pickle(path=savedir, filename="det_lrp")
det_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
savedir = get_lrp_savedir(model_savedir=bay_model_savedir, attack_method=args.attack_method,
rule=args.rule, layer_idx=layer_idx, lrp_method=args.lrp_method)
bay_lrp=[]
bay_attack_lrp=[]
for n_samples in n_samples_list:
bay_lrp.append(load_from_pickle(path=savedir, filename="bay_lrp_samp="+str(n_samples)))
bay_attack_lrp.append(load_from_pickle(path=savedir, filename="bay_attack_lrp_samp="+str(n_samples)))
mode_lrp = load_from_pickle(path=savedir, filename="mode_lrp_avg_post")
mode_attack_lrp=[]
for samp_idx, n_samples in enumerate(n_samples_list):
mode_attack_lrp.append(load_from_pickle(path=savedir, filename="mode_attack_lrp_samp="+str(n_samples)))
mode_attack_lrp.append(load_from_pickle(path=savedir, filename="mode_attack_lrp_avg_post"))
n_images = det_lrp.shape[0]
if det_attack_lrp.shape[0]!=n_images or bay_lrp[0].shape[0]!=n_inputs or bay_attack_lrp[0].shape[0]!=n_inputs:
print("det_lrp.shape[0] =", det_lrp.shape[0])
print("det_attack_lrp.shape[0] =", det_attack_lrp.shape[0])
print("bay_lrp[0].shape[0] =", bay_lrp[0].shape[0])
print("bay_attack_lrp[0].shape[0] =", bay_attack_lrp[0].shape[0])
raise ValueError("Inconsistent n_inputs")
### Normalize heatmaps
if args.normalize:
for im_idx in range(det_lrp.shape[0]):
det_lrp[im_idx] = normalize(det_lrp[im_idx])
det_attack_lrp[im_idx] = normalize(det_attack_lrp[im_idx])
mode_lrp[im_idx] = normalize(mode_lrp[im_idx])
for samp_idx in range(len(n_samples_list)):
bay_lrp[samp_idx][im_idx] = normalize(bay_lrp[samp_idx][im_idx])
bay_attack_lrp[samp_idx][im_idx] = normalize(bay_attack_lrp[samp_idx][im_idx])
mode_attack_lrp[samp_idx][im_idx] = normalize(mode_attack_lrp[samp_idx][im_idx])
mode_attack_lrp[samp_idx+1][im_idx] = normalize(mode_attack_lrp[samp_idx+1][im_idx])
### Evaluate explanations
# det eval det atk
det_lrp_robustness = lrp_robustness(original_heatmaps=det_lrp,
adversarial_heatmaps=det_attack_lrp,
topk=topk, method=lrp_robustness_method)[0]
bay_lrp_robustness=[]
mode_lrp_robustness=[]
for samp_idx, n_samples in enumerate(n_samples_list):
# bay eval bay atk
robustness = lrp_robustness(original_heatmaps=bay_lrp[samp_idx],
adversarial_heatmaps=bay_attack_lrp[samp_idx],
topk=topk, method=lrp_robustness_method)[0]
bay_lrp_robustness.append(robustness)
# bay eval mode atk
robustness = lrp_robustness(original_heatmaps=mode_lrp,
adversarial_heatmaps=mode_attack_lrp[samp_idx],
topk=topk, method=lrp_robustness_method)[0]
mode_lrp_robustness.append(robustness)
# mode eval mode atk
robustness = lrp_robustness(original_heatmaps=mode_lrp,
adversarial_heatmaps=mode_attack_lrp[samp_idx+1],
topk=topk, method=lrp_robustness_method)[0]
mode_lrp_robustness.append(robustness)
det_lrp_robustness_layers.append(det_lrp_robustness)
bay_lrp_robustness_layers.append(bay_lrp_robustness)
mode_lrp_robustness_layers.append(mode_lrp_robustness)
det_lrp_robustness_topk.append(det_lrp_robustness_layers)
bay_lrp_robustness_topk.append(bay_lrp_robustness_layers)
mode_lrp_robustness_topk.append(mode_lrp_robustness_layers)
det_lrp_robustness_topk = np.array(det_lrp_robustness_topk)
bay_lrp_robustness_topk = np.array(bay_lrp_robustness_topk)
mode_lrp_robustness_topk = np.array(mode_lrp_robustness_topk)
### Plots
savedir = get_lrp_savedir(model_savedir=bay_model_savedir, attack_method=args.attack_method,
rule=args.rule, lrp_method=args.lrp_method)
filename=args.rule+"_lrp_robustness_"+m["dataset"]+"_images="+str(n_inputs)+\
"_samples="+str(n_samples)+"_atk="+str(args.attack_method)
if args.normalize:
filename+="_norm"
plot_lrp.lrp_layers_mode_robustness(
det_lrp_robustness=det_lrp_robustness_topk,
bay_lrp_robustness=bay_lrp_robustness_topk,
mode_lrp_robustness=mode_lrp_robustness_topk,
n_samples_list=n_samples_list,
topk_list=topk_list,
learnable_layers_idxs=detnet.learnable_layers_idxs,
savedir=savedir,
filename="dist_"+filename+"_layers")
| 7,923 | 37.466019 | 116 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp_robustness_diff.py | import os
import argparse
import numpy as np
import torch
import torchvision
from torch import nn
import torch.nn.functional as nnf
import torch.optim as torchopt
import torch.nn.functional as F
from utils.data import *
from utils.networks import *
from utils.savedir import *
from utils.seeding import *
from networks.baseNN import *
from networks.advNN import *
from networks.fullBNN import *
from utils.lrp import *
from plot.lrp_heatmaps import *
import plot.lrp_distributions as plot_lrp
from attacks.gradient_based import evaluate_attack
from attacks.run_attacks import *
parser = argparse.ArgumentParser()
parser.add_argument("--n_inputs", default=500, type=int, help="Number of test points")
parser.add_argument("--model_idx", default=0, type=int, help="Choose model idx from pre defined settings")
parser.add_argument("--attack_method", default="fgsm", type=str, help="fgsm")
parser.add_argument("--lrp_method", default="avg_heatmap", type=str, help="avg_prediction, avg_heatmap")
parser.add_argument("--rule", default="epsilon", type=str, help="Rule for LRP computation.")
parser.add_argument("--normalize", default=False, type=eval, help="Normalize lrp heatmaps.")
parser.add_argument("--debug", default=False, type=eval, help="Run script in debugging mode.")
parser.add_argument("--device", default='cuda', type=str, help="cpu, cuda")
args = parser.parse_args()
lrp_robustness_method = "imagewise"
n_samples_list=[5] if args.debug else [10, 50, 100]
topk_list = [20]
n_inputs=100 if args.debug else args.n_inputs
print("PyTorch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)
if args.device=="cuda":
torch.set_default_tensor_type('torch.cuda.FloatTensor')
### Load models and attacks
## baseNN
model = baseNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, num_classes = load_dataset(dataset_name=model["dataset"],
shuffle=False, n_inputs=n_inputs)[2:]
det_model_savedir = get_model_savedir(model="baseNN", dataset=model["dataset"], architecture=model["architecture"],
debug=args.debug, model_idx=args.model_idx)
detnet = baseNN(inp_shape, num_classes, *list(model.values()))
detnet.load(savedir=det_model_savedir, device=args.device)
det_attack = load_attack(method=args.attack_method, model_savedir=det_model_savedir)
## advNN
adv_model_savedir = get_model_savedir(model="advNN", dataset=model["dataset"], architecture=model["architecture"],
debug=args.debug, model_idx=args.model_idx, attack_method='fgsm')
advnet = advNN(inp_shape, num_classes, *list(model.values()), attack_method='fgsm')
advnet.load(savedir=adv_model_savedir, device=args.device)
adv_attack = load_attack(method=args.attack_method, model_savedir=adv_model_savedir)
## fullBNN
m = fullBNN_settings["model_"+str(args.model_idx)]
bay_model_savedir = get_model_savedir(model="fullBNN", dataset=m["dataset"], architecture=m["architecture"],
model_idx=args.model_idx, debug=args.debug)
bayesnet = BNN(m["dataset"], *list(m.values())[1:], inp_shape, num_classes)
bayesnet.load(savedir=bay_model_savedir, device=args.device)
bay_attack=[]
for n_samples in n_samples_list:
bay_attack.append(load_attack(method=args.attack_method, model_savedir=bay_model_savedir,
n_samples=n_samples))
### plots
filename="lrp_robustness_"+m["dataset"]+"_"+str(bayesnet.inference)+"_images="+str(n_inputs)+"_rule="+str(args.rule)\
+"_samples="+str(n_samples)+"_atk="+str(args.attack_method)+"_model_idx="+str(args.model_idx)
images = x_test.to(args.device)
labels = y_test.argmax(-1).to(args.device)
det_lrp_robustness_topk=[]
adv_lrp_robustness_topk=[]
bay_lrp_robustness_topk=[]
det_failed_idxs_topk=[]
adv_failed_idxs_topk=[]
bay_failed_idxs_topk=[]
det_norm_topk=[]
adv_norm_topk=[]
bay_norm_topk=[]
det_softmax_robustness_topk=[]
adv_softmax_robustness_topk=[]
bay_softmax_robustness_topk=[]
for topk in topk_list:
det_lrp_robustness_layers=[]
adv_lrp_robustness_layers=[]
bay_lrp_robustness_layers=[]
det_failed_idxs_layers=[]
adv_failed_idxs_layers=[]
bay_failed_idxs_layers=[]
det_norm_layers=[]
adv_norm_layers=[]
bay_norm_layers=[]
det_softmax_robustness_layers=[]
adv_softmax_robustness_layers=[]
bay_softmax_robustness_layers=[]
for layer_idx in detnet.learnable_layers_idxs:
### Load explanations
savedir = get_lrp_savedir(model_savedir=det_model_savedir, attack_method=args.attack_method,
rule=args.rule, layer_idx=layer_idx)
det_lrp = load_from_pickle(path=savedir, filename="det_lrp")
det_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
savedir = get_lrp_savedir(model_savedir=adv_model_savedir, attack_method=args.attack_method,
rule=args.rule, layer_idx=layer_idx)
adv_lrp = load_from_pickle(path=savedir, filename="det_lrp")
adv_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
savedir = get_lrp_savedir(model_savedir=bay_model_savedir, attack_method=args.attack_method,
rule=args.rule, layer_idx=layer_idx, lrp_method=args.lrp_method)
bay_lrp=[]
bay_attack_lrp=[]
for n_samples in n_samples_list:
bay_lrp.append(load_from_pickle(path=savedir, filename="bay_lrp_samp="+str(n_samples)))
bay_attack_lrp.append(load_from_pickle(path=savedir, filename="bay_attack_lrp_samp="+str(n_samples)))
n_images = det_lrp.shape[0]
if det_attack_lrp.shape[0]!=n_images or bay_lrp[0].shape[0]!=n_inputs or bay_attack_lrp[0].shape[0]!=n_inputs:
print("det_lrp.shape[0] =", det_lrp.shape[0])
print("det_attack_lrp.shape[0] =", det_attack_lrp.shape[0])
print("bay_lrp[0].shape[0] =", bay_lrp[0].shape[0])
print("bay_attack_lrp[0].shape[0] =", bay_attack_lrp[0].shape[0])
raise ValueError("Inconsistent n_inputs")
### Normalize heatmaps
if args.normalize:
for im_idx in range(det_lrp.shape[0]):
det_lrp[im_idx] = normalize(det_lrp[im_idx])
det_attack_lrp[im_idx] = normalize(det_attack_lrp[im_idx])
adv_lrp[im_idx] = normalize(adv_lrp[im_idx])
adv_attack_lrp[im_idx] = normalize(adv_attack_lrp[im_idx])
for samp_idx in range(len(n_samples_list)):
bay_lrp[samp_idx][im_idx] = normalize(bay_lrp[samp_idx][im_idx])
bay_attack_lrp[samp_idx][im_idx] = normalize(bay_attack_lrp[samp_idx][im_idx])
### Evaluate explanations
det_preds, det_atk_preds, det_softmax_robustness, det_successful_idxs, det_failed_idxs = evaluate_attack(net=detnet,
x_test=images, x_attack=det_attack, y_test=y_test, device=args.device, return_classification_idxs=True)
det_softmax_robustness = det_softmax_robustness.detach().cpu().numpy()
det_lrp_robustness, det_lrp_pxl_idxs = lrp_robustness(original_heatmaps=det_lrp,
adversarial_heatmaps=det_attack_lrp,
topk=topk, method=lrp_robustness_method)
det_norm = lrp_distances(det_lrp, det_attack_lrp, axis_norm=1).detach().cpu().numpy()
adv_preds, adv_atk_preds, adv_softmax_robustness, adv_successful_idxs, adv_failed_idxs = evaluate_attack(net=advnet,
x_test=images, x_attack=adv_attack, y_test=y_test, device=args.device, return_classification_idxs=True)
adv_softmax_robustness = adv_softmax_robustness.detach().cpu().numpy()
adv_lrp_robustness, adv_lrp_pxl_idxs = lrp_robustness(original_heatmaps=adv_lrp,
adversarial_heatmaps=adv_attack_lrp,
topk=topk, method=lrp_robustness_method)
adv_norm = lrp_distances(adv_lrp, adv_attack_lrp, axis_norm=1).detach().cpu().numpy()
bay_preds=[]
bay_atk_preds=[]
bay_softmax_robustness=[]
bay_successful_idxs=[]
bay_failed_idxs=[]
bay_lrp_robustness=[]
bay_norm=[]
bay_lrp_pxl_idxs=[]
for samp_idx, n_samples in enumerate(n_samples_list):
preds, atk_preds, softmax_rob, succ_idxs, failed_idxs = evaluate_attack(net=bayesnet, x_test=images,
x_attack=bay_attack[samp_idx], y_test=y_test, device=args.device,
n_samples=n_samples, return_classification_idxs=True)
bay_preds.append(preds)
bay_atk_preds.append(atk_preds)
bay_softmax_robustness.append(softmax_rob.detach().cpu().numpy())
bay_successful_idxs.append(succ_idxs)
bay_failed_idxs.append(failed_idxs)
robustness, pxl_idxs = lrp_robustness(original_heatmaps=bay_lrp[samp_idx],
adversarial_heatmaps=bay_attack_lrp[samp_idx],
topk=topk, method=lrp_robustness_method)
bay_lrp_robustness.append(robustness)
bay_lrp_pxl_idxs.append(pxl_idxs)
bay_norm.append(lrp_distances(bay_lrp[samp_idx],
bay_attack_lrp[samp_idx],
axis_norm=1).detach().cpu().numpy())
det_lrp_robustness_layers.append(det_lrp_robustness)
adv_lrp_robustness_layers.append(adv_lrp_robustness)
bay_lrp_robustness_layers.append(bay_lrp_robustness)
det_failed_idxs_layers.append(det_failed_idxs)
adv_failed_idxs_layers.append(adv_failed_idxs)
bay_failed_idxs_layers.append(bay_failed_idxs)
det_norm_layers.append(det_norm)
adv_norm_layers.append(adv_norm)
bay_norm_layers.append(bay_norm)
det_softmax_robustness_layers.append(det_softmax_robustness)
adv_softmax_robustness_layers.append(adv_softmax_robustness)
bay_softmax_robustness_layers.append(bay_softmax_robustness)
### plot last layer atk explanations
savedir = get_lrp_savedir(model_savedir=bay_model_savedir, attack_method=args.attack_method,
rule=args.rule, layer_idx=layer_idx)
plot_attacks_explanations(images=images,
explanations=det_lrp,
attacks=det_attack,
attacks_explanations=det_attack_lrp,
predictions=det_preds.argmax(-1),
attacks_predictions=det_atk_preds.argmax(-1),
successful_attacks_idxs=det_successful_idxs,
failed_attacks_idxs=det_failed_idxs,
labels=labels, lrp_rob_method=lrp_robustness_method,
rule=args.rule,
savedir=os.path.join(TESTS,'figures/attacks_explanations'),
pxl_idxs=det_lrp_pxl_idxs,
filename="det_lrp_attacks_"+filename,
layer_idx=layer_idx)
for samp_idx, n_samples in enumerate(n_samples_list):
plot_attacks_explanations(images=images,
explanations=bay_lrp[samp_idx],
attacks=bay_attack[samp_idx],
attacks_explanations=bay_attack_lrp[samp_idx],
predictions=bay_preds[samp_idx].argmax(-1),
attacks_predictions=bay_atk_preds[samp_idx].argmax(-1),
successful_attacks_idxs=bay_successful_idxs[samp_idx],
failed_attacks_idxs=bay_failed_idxs[samp_idx],
labels=labels, lrp_rob_method=lrp_robustness_method,
rule=args.rule,
savedir=os.path.join(TESTS,'figures/attacks_explanations'),
pxl_idxs=bay_lrp_pxl_idxs[samp_idx],
filename="bay_lrp_attacks_samp="+str(n_samples)+"_"+filename,
layer_idx=layer_idx)
det_lrp_robustness_topk.append(det_lrp_robustness_layers)
adv_lrp_robustness_topk.append(adv_lrp_robustness_layers)
bay_lrp_robustness_topk.append(bay_lrp_robustness_layers)
det_failed_idxs_topk.append(det_failed_idxs_layers)
adv_failed_idxs_topk.append(adv_failed_idxs_layers)
bay_failed_idxs_topk.append(bay_failed_idxs_layers)
det_norm_topk.append(det_norm_layers)
adv_norm_topk.append(adv_norm_layers)
bay_norm_topk.append(bay_norm_layers)
det_softmax_robustness_topk.append(det_softmax_robustness_layers)
adv_softmax_robustness_topk.append(adv_softmax_robustness_layers)
bay_softmax_robustness_topk.append(bay_softmax_robustness_layers)
### Plots
if args.normalize:
filename+="_norm"
plot_lrp.lrp_layers_robustness_differences(
det_lrp_robustness=det_lrp_robustness_topk,
adv_lrp_robustness=adv_lrp_robustness_topk,
bay_lrp_robustness=bay_lrp_robustness_topk,
n_samples_list=n_samples_list,
topk_list=topk_list,
n_original_images=len(images),
learnable_layers_idxs=detnet.learnable_layers_idxs,
savedir=os.path.join(TESTS,'figures/layers_robustness'),
filename="diff_"+filename+"_layers")
| 12,050 | 38 | 119 | py |
BayesianRelevance | BayesianRelevance-master/src/compute_lrp.py | import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.nn.functional as nnf
import torch.optim as torchopt
import torchvision
from networks.advNN import *
from networks.baseNN import *
from networks.fullBNN import *
from utils.data import *
from utils.model_settings import *
from utils.savedir import *
from utils.seeding import *
import plot.lrp_distributions as plot_lrp
from attacks.gradient_based import evaluate_attack
from attacks.run_attacks import *
from utils.lrp import *
parser = argparse.ArgumentParser()
parser.add_argument("--model_idx", default=0, type=int, help="Choose model idx from pre defined settings")
parser.add_argument("--model", default="baseNN", type=str, help="baseNN, fullBNN, advNN")
parser.add_argument("--n_inputs", default=500, type=int, help="Number of test points")
parser.add_argument("--n_samples", default=100, type=int)
parser.add_argument("--attack_method", default="fgsm", type=str, help="fgsm, pgd")
parser.add_argument("--lrp_method", default="avg_heatmap", type=str, help="avg_prediction, avg_heatmap")
parser.add_argument("--rule", default="epsilon", type=str, help="Rule for LRP computation: epsilon, gamma, alpha1beta0")
parser.add_argument("--debug", default=False, type=eval, help="Run script in debugging mode.")
parser.add_argument("--device", default='cuda', type=str, help="cpu, cuda")
args = parser.parse_args()
n_inputs=100 if args.debug else args.n_inputs
n_samples=5 if args.debug else args.n_samples
print("PyTorch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)
if args.device=="cuda":
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if args.model in ["baseNN", "advNN"]:
if args.model=="baseNN":
model = baseNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, num_classes = load_dataset(dataset_name=model["dataset"], shuffle=False, n_inputs=n_inputs)[2:]
det_model_savedir = get_model_savedir(model="baseNN", dataset=model["dataset"], architecture=model["architecture"],
debug=args.debug, model_idx=args.model_idx)
detnet = baseNN(inp_shape, num_classes, *list(model.values()))
detnet.load(savedir=det_model_savedir, device=args.device)
det_attack = load_attack(method=args.attack_method, model_savedir=det_model_savedir)
elif args.model=="advNN":
model = baseNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, num_classes = load_dataset(dataset_name=model["dataset"], shuffle=False, n_inputs=n_inputs)[2:]
det_model_savedir = get_model_savedir(model=args.model, dataset=model["dataset"], architecture=model["architecture"],
debug=args.debug, model_idx=args.model_idx, attack_method='fgsm')
detnet = advNN(inp_shape, num_classes, *list(model.values()), attack_method='fgsm')
detnet.load(savedir=det_model_savedir, device=args.device)
det_attack = load_attack(method=args.attack_method, model_savedir=det_model_savedir)
### Deterministic explanations
images = x_test.to(args.device).detach()
labels = y_test.argmax(-1).to(args.device)
for layer_idx in detnet.learnable_layers_idxs:
savedir = get_lrp_savedir(model_savedir=det_model_savedir, attack_method=args.attack_method,
layer_idx=layer_idx, rule=args.rule)
det_lrp = compute_explanations(images, detnet, layer_idx=layer_idx, rule=args.rule, device=args.device,
method=args.lrp_method)
det_attack_lrp = compute_explanations(det_attack, detnet, layer_idx=layer_idx, rule=args.rule, device=args.device,
method=args.lrp_method)
save_to_pickle(det_lrp, path=savedir, filename="det_lrp")
save_to_pickle(det_attack_lrp, path=savedir, filename="det_attack_lrp")
elif args.model=="fullBNN":
m = fullBNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, num_classes = load_dataset(dataset_name=m["dataset"], shuffle=False, n_inputs=n_inputs)[2:]
bay_model_savedir = get_model_savedir(model=args.model, dataset=m["dataset"], architecture=m["architecture"],
model_idx=args.model_idx, debug=args.debug)
bayesnet = BNN(m["dataset"], *list(m.values())[1:], inp_shape, num_classes)
bayesnet.load(savedir=bay_model_savedir, device=args.device)
bay_attack = load_attack(method=args.attack_method, model_savedir=bay_model_savedir, n_samples=n_samples)
### Bayesian explanations
images = x_test.to(args.device).detach()
labels = y_test.argmax(-1).to(args.device)
for layer_idx in bayesnet.basenet.learnable_layers_idxs:
savedir = get_lrp_savedir(model_savedir=bay_model_savedir, attack_method=args.attack_method,
layer_idx=layer_idx, rule=args.rule, lrp_method=args.lrp_method)
bay_lrp = compute_explanations(images, bayesnet, rule=args.rule, layer_idx=layer_idx,
n_samples=n_samples, method=args.lrp_method, device=args.device)
bay_attack_lrp = compute_explanations(bay_attack, bayesnet, layer_idx=layer_idx,
device=args.device, rule=args.rule, n_samples=n_samples, method=args.lrp_method)
save_to_pickle(bay_lrp, path=savedir, filename="bay_lrp_samp="+str(n_samples))
save_to_pickle(bay_attack_lrp, path=savedir, filename="bay_attack_lrp_samp="+str(n_samples))
else:
raise NotImplementedError | 5,683 | 45.211382 | 130 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp_robustness_scatterplot.py | import os
import argparse
import numpy as np
import torch
import torchvision
from torch import nn
import torch.nn.functional as nnf
import torch.optim as torchopt
import torch.nn.functional as F
from utils.data import *
from utils.networks import *
from utils.savedir import *
from utils.seeding import *
from networks.baseNN import *
from networks.advNN import *
from networks.fullBNN import *
from utils.lrp import *
from plot.lrp_heatmaps import *
import plot.lrp_distributions as plot_lrp
from attacks.gradient_based import evaluate_attack
from attacks.run_attacks import *
parser = argparse.ArgumentParser()
parser.add_argument("--n_inputs", default=500, type=int, help="Number of test points")
parser.add_argument("--model_idx", default=0, type=int, help="Choose model idx from pre defined settings")
parser.add_argument("--attack_method", default="fgsm", type=str, help="fgsm")
parser.add_argument("--lrp_method", default="avg_heatmap", type=str, help="avg_prediction, avg_heatmap")
parser.add_argument("--rule", default="epsilon", type=str, help="Rule for LRP computation.")
parser.add_argument("--normalize", default=False, type=eval, help="Normalize lrp heatmaps.")
parser.add_argument("--debug", default=False, type=eval, help="Run script in debugging mode.")
parser.add_argument("--device", default='cuda', type=str, help="cpu, cuda")
args = parser.parse_args()
lrp_robustness_method = "imagewise"
n_samples_list=[5] if args.debug else [10,50,100]
topk_list = [20]
n_inputs=100 if args.debug else args.n_inputs
print("PyTorch Version: ", torch.__version__)
print("Torchvision Version: ", torchvision.__version__)
if args.device=="cuda":
torch.set_default_tensor_type('torch.cuda.FloatTensor')
### Load models and attacks
## baseNN
model = baseNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, num_classes = load_dataset(dataset_name=model["dataset"],
shuffle=False, n_inputs=n_inputs)[2:]
det_model_savedir = get_model_savedir(model="baseNN", dataset=model["dataset"], architecture=model["architecture"],
debug=args.debug, model_idx=args.model_idx)
detnet = baseNN(inp_shape, num_classes, *list(model.values()))
detnet.load(savedir=det_model_savedir, device=args.device)
det_attack = load_attack(method=args.attack_method, model_savedir=det_model_savedir)
## advNN
adv_model_savedir = get_model_savedir(model="advNN", dataset=model["dataset"], architecture=model["architecture"],
debug=args.debug, model_idx=args.model_idx, attack_method='fgsm')
advnet = advNN(inp_shape, num_classes, *list(model.values()), attack_method='fgsm')
advnet.load(savedir=adv_model_savedir, device=args.device)
adv_attack = load_attack(method=args.attack_method, model_savedir=adv_model_savedir)
## fullBNN
m = fullBNN_settings["model_"+str(args.model_idx)]
bay_model_savedir = get_model_savedir(model="fullBNN", dataset=m["dataset"], architecture=m["architecture"],
model_idx=args.model_idx, debug=args.debug)
bayesnet = BNN(m["dataset"], *list(m.values())[1:], inp_shape, num_classes)
bayesnet.load(savedir=bay_model_savedir, device=args.device)
bay_attack=[]
for n_samples in n_samples_list:
bay_attack.append(load_attack(method=args.attack_method, model_savedir=bay_model_savedir,
n_samples=n_samples))
### plot
images = x_test.to(args.device)
labels = y_test.argmax(-1).to(args.device)
det_lrp_robustness_topk=[]
adv_lrp_robustness_topk=[]
bay_lrp_robustness_topk=[]
det_failed_idxs_topk=[]
adv_failed_idxs_topk=[]
bay_failed_idxs_topk=[]
det_norm_topk=[]
adv_norm_topk=[]
bay_norm_topk=[]
det_softmax_robustness_topk=[]
adv_softmax_robustness_topk=[]
bay_softmax_robustness_topk=[]
for topk in topk_list:
det_lrp_robustness_layers=[]
adv_lrp_robustness_layers=[]
bay_lrp_robustness_layers=[]
det_failed_idxs_layers=[]
adv_failed_idxs_layers=[]
bay_failed_idxs_layers=[]
det_norm_layers=[]
adv_norm_layers=[]
bay_norm_layers=[]
det_softmax_robustness_layers=[]
adv_softmax_robustness_layers=[]
bay_softmax_robustness_layers=[]
for layer_idx in detnet.learnable_layers_idxs:
### Load explanations
savedir = get_lrp_savedir(model_savedir=det_model_savedir, attack_method=args.attack_method,
rule=args.rule, layer_idx=layer_idx)
det_lrp = load_from_pickle(path=savedir, filename="det_lrp")
det_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
savedir = get_lrp_savedir(model_savedir=adv_model_savedir, attack_method=args.attack_method,
rule=args.rule, layer_idx=layer_idx)
adv_lrp = load_from_pickle(path=savedir, filename="det_lrp")
adv_attack_lrp = load_from_pickle(path=savedir, filename="det_attack_lrp")
savedir = get_lrp_savedir(model_savedir=bay_model_savedir, attack_method=args.attack_method,
rule=args.rule, layer_idx=layer_idx, lrp_method=args.lrp_method)
bay_lrp=[]
bay_attack_lrp=[]
for n_samples in n_samples_list:
bay_lrp.append(load_from_pickle(path=savedir, filename="bay_lrp_samp="+str(n_samples)))
bay_attack_lrp.append(load_from_pickle(path=savedir, filename="bay_attack_lrp_samp="+str(n_samples)))
n_images = det_lrp.shape[0]
if det_attack_lrp.shape[0]!=n_images or bay_lrp[0].shape[0]!=n_inputs or bay_attack_lrp[0].shape[0]!=n_inputs:
print("det_lrp.shape[0] =", det_lrp.shape[0])
print("det_attack_lrp.shape[0] =", det_attack_lrp.shape[0])
print("bay_lrp[0].shape[0] =", bay_lrp[0].shape[0])
print("bay_attack_lrp[0].shape[0] =", bay_attack_lrp[0].shape[0])
raise ValueError("Inconsistent n_inputs")
### Normalize heatmaps
if args.normalize:
for im_idx in range(det_lrp.shape[0]):
det_lrp[im_idx] = normalize(det_lrp[im_idx])
det_attack_lrp[im_idx] = normalize(det_attack_lrp[im_idx])
adv_lrp[im_idx] = normalize(adv_lrp[im_idx])
adv_attack_lrp[im_idx] = normalize(adv_attack_lrp[im_idx])
for samp_idx in range(len(n_samples_list)):
bay_lrp[samp_idx][im_idx] = normalize(bay_lrp[samp_idx][im_idx])
bay_attack_lrp[samp_idx][im_idx] = normalize(bay_attack_lrp[samp_idx][im_idx])
### Evaluate explanations
det_preds, det_atk_preds, det_softmax_robustness, det_successful_idxs, det_failed_idxs = evaluate_attack(net=detnet,
x_test=images, x_attack=det_attack, y_test=y_test, device=args.device, return_classification_idxs=True)
det_softmax_robustness = det_softmax_robustness.detach().cpu().numpy()
det_lrp_robustness, det_lrp_pxl_idxs = lrp_robustness(original_heatmaps=det_lrp,
adversarial_heatmaps=det_attack_lrp,
topk=topk, method=lrp_robustness_method)
det_norm = lrp_distances(det_lrp, det_attack_lrp, axis_norm=1).detach().cpu().numpy()
adv_preds, adv_atk_preds, adv_softmax_robustness, adv_successful_idxs, adv_failed_idxs = evaluate_attack(net=advnet,
x_test=images, x_attack=adv_attack, y_test=y_test, device=args.device, return_classification_idxs=True)
adv_softmax_robustness = adv_softmax_robustness.detach().cpu().numpy()
adv_lrp_robustness, adv_lrp_pxl_idxs = lrp_robustness(original_heatmaps=adv_lrp,
adversarial_heatmaps=adv_attack_lrp,
topk=topk, method=lrp_robustness_method)
adv_norm = lrp_distances(adv_lrp, adv_attack_lrp, axis_norm=1).detach().cpu().numpy()
bay_preds=[]
bay_atk_preds=[]
bay_softmax_robustness=[]
bay_failed_idxs=[]
bay_lrp_robustness=[]
bay_norm=[]
for samp_idx, n_samples in enumerate(n_samples_list):
preds, atk_preds, softmax_rob, successf_idxs, failed_idxs = evaluate_attack(net=bayesnet, x_test=images,
x_attack=bay_attack[samp_idx], y_test=y_test, device=args.device,
n_samples=n_samples, return_classification_idxs=True)
bay_preds.append(preds)
bay_atk_preds.append(atk_preds)
bay_softmax_robustness.append(softmax_rob.detach().cpu().numpy())
bay_failed_idxs.append(failed_idxs)
robustness, pxl_idxs = lrp_robustness(original_heatmaps=bay_lrp[samp_idx],
adversarial_heatmaps=bay_attack_lrp[samp_idx],
topk=topk, method=lrp_robustness_method)
bay_lrp_robustness.append(robustness)
bay_norm.append(lrp_distances(bay_lrp[samp_idx],
bay_attack_lrp[samp_idx],
axis_norm=1).detach().cpu().numpy())
det_lrp_robustness_layers.append(det_lrp_robustness)
adv_lrp_robustness_layers.append(adv_lrp_robustness)
bay_lrp_robustness_layers.append(bay_lrp_robustness)
det_failed_idxs_layers.append(det_failed_idxs)
adv_failed_idxs_layers.append(adv_failed_idxs)
bay_failed_idxs_layers.append(bay_failed_idxs)
det_norm_layers.append(det_norm)
adv_norm_layers.append(adv_norm)
bay_norm_layers.append(bay_norm)
det_softmax_robustness_layers.append(det_softmax_robustness)
adv_softmax_robustness_layers.append(adv_softmax_robustness)
bay_softmax_robustness_layers.append(bay_softmax_robustness)
det_lrp_robustness_topk.append(det_lrp_robustness_layers)
adv_lrp_robustness_topk.append(adv_lrp_robustness_layers)
bay_lrp_robustness_topk.append(bay_lrp_robustness_layers)
det_failed_idxs_topk.append(det_failed_idxs_layers)
adv_failed_idxs_topk.append(adv_failed_idxs_layers)
bay_failed_idxs_topk.append(bay_failed_idxs_layers)
det_norm_topk.append(det_norm_layers)
adv_norm_topk.append(adv_norm_layers)
bay_norm_topk.append(bay_norm_layers)
det_softmax_robustness_topk.append(det_softmax_robustness_layers)
adv_softmax_robustness_topk.append(adv_softmax_robustness_layers)
bay_softmax_robustness_topk.append(bay_softmax_robustness_layers)
### Plots
savedir = get_lrp_savedir(model_savedir=bay_model_savedir, attack_method=args.attack_method,
rule=args.rule, lrp_method=args.lrp_method)
filename="lrp_robustness_"+m["dataset"]+"_"+str(bayesnet.inference)+"_images="+str(n_inputs)+"_rule="+str(args.rule)\
+"_samples="+str(n_samples)+"_atk="+str(args.attack_method)+"_model_idx="+str(args.model_idx)
if args.normalize:
filename+="_norm"
plot_lrp.lrp_layers_robustness_scatterplot(
det_lrp_robustness=det_lrp_robustness_topk,
adv_lrp_robustness=adv_lrp_robustness_topk,
bay_lrp_robustness=bay_lrp_robustness_topk,
det_softmax_robustness=det_softmax_robustness_topk,
adv_softmax_robustness=adv_softmax_robustness_topk,
bay_softmax_robustness=bay_softmax_robustness_topk,
n_samples_list=n_samples_list,
topk_list=topk_list,
n_original_images=len(images),
learnable_layers_idxs=detnet.learnable_layers_idxs,
savedir=os.path.join(TESTS,'figures/layers_robustness'),
filename="scatterplot_"+filename+"_layers_topk="+str(topk_list[-1])) | 10,729 | 38.304029 | 119 | py |
BayesianRelevance | BayesianRelevance-master/src/attack_explanations.py | import argparse
import numpy as np
import os
import torch
from attacks.gradient_based import evaluate_attack
from attacks.run_attacks import *
from networks.advNN import *
from networks.baseNN import *
from networks.fullBNN import *
from utils import savedir
from utils.data import *
from utils.seeding import *
parser = argparse.ArgumentParser()
parser.add_argument("--model", default="baseNN", type=str, help="baseNN, fullBNN, advNN")
parser.add_argument("--model_idx", default=0, type=int, help="Choose model idx from pre defined settings.")
parser.add_argument("--lrp_method", default="avg_heatmap", type=str, help="avg_prediction, avg_heatmap")
parser.add_argument("--rule", default="epsilon", type=str, help="Rule for LRP computation: epsilon, gamma, alpha1beta0")
parser.add_argument("--attack_method", default="fgsm", type=str, help="fgsm, pgd")
parser.add_argument("--epsilon", default=0.2, type=int, help="Strenght of a perturbation.")
parser.add_argument("--n_inputs", default=500, type=int, help="Number of test points to be attacked.")
parser.add_argument("--debug", default=False, type=eval, help="Run script in debugging mode.")
parser.add_argument("--device", default='cuda', type=str, help="cpu, cuda")
args = parser.parse_args()
MODE_ATKS = False
n_inputs=100 if args.debug else args.n_inputs
bayesian_attack_samples=[100]
hyperparams={'epsilon':args.epsilon}
print("PyTorch Version: ", torch.__version__)
if args.attack_method=="deepfool":
args.device="cpu"
if args.device=="cuda":
torch.set_default_tensor_type('torch.cuda.FloatTensor')
model = baseNN_settings["model_"+str(args.model_idx)]
x_test, y_test, inp_shape, out_size = load_dataset(dataset_name=model["dataset"], shuffle=False, n_inputs=n_inputs)[2:]
images = x_test.to(args.device)
labels = y_test.argmax(-1).to(args.device)
if args.model=="baseNN":
model = baseNN_settings["model_"+str(args.model_idx)]
model_savedir = get_model_savedir(model=args.model, dataset=model["dataset"], architecture=model["architecture"],
debug=args.debug, model_idx=args.model_idx)
net = baseNN(inp_shape, out_size, *list(model.values()))
net.load(savedir=model_savedir, device=args.device)
for layer_idx in net.learnable_layers_idxs:
print("\nlayer_idx =", layer_idx)
lrp_savedir = get_lrp_savedir(model_savedir=model_savedir, attack_method=args.attack_method,
layer_idx=layer_idx, rule=args.rule)
lrp = load_from_pickle(path=lrp_savedir, filename="det_lrp")
lrp_attack = attack(net=net, x_test=lrp, y_test=y_test, hyperparams=hyperparams,
device=args.device, method=args.attack_method)
save_to_pickle(lrp_attack, path=lrp_savedir, filename="det_lrp_attack")
elif args.model=="advNN":
model = baseNN_settings["model_"+str(args.model_idx)]
model_savedir = get_model_savedir(model=args.model, dataset=model["dataset"], architecture=model["architecture"],
debug=args.debug, model_idx=args.model_idx, attack_method='fgsm')
net = advNN(inp_shape, out_size, *list(model.values()), attack_method='fgsm')
net.load(savedir=model_savedir, device=args.device)
for layer_idx in net.learnable_layers_idxs:
print("\nlayer_idx =", layer_idx)
lrp_savedir = get_lrp_savedir(model_savedir=model_savedir, attack_method=args.attack_method,
layer_idx=layer_idx, rule=args.rule)
lrp = load_from_pickle(path=lrp_savedir, filename="det_lrp")
lrp_attack = attack(net=net, x_test=lrp, y_test=y_test, hyperparams=hyperparams,
device=args.device, method=args.attack_method)
save_to_pickle(lrp_attack, path=lrp_savedir, filename="det_lrp_attack")
elif args.model=="fullBNN":
m = fullBNN_settings["model_"+str(args.model_idx)]
model_savedir = get_model_savedir(model=args.model, dataset=m["dataset"], architecture=m["architecture"],
debug=args.debug, model_idx=args.model_idx)
net = BNN(m["dataset"], *list(m.values())[1:], inp_shape, out_size)
net.load(savedir=model_savedir, device=args.device)
batch_size = 4000 if m["inference"] == "hmc" else 128
num_workers = 0 if args.device=="cuda" else 4
for layer_idx in net.basenet.learnable_layers_idxs:
lrp_savedir = get_lrp_savedir(model_savedir=model_savedir, attack_method=args.attack_method,
layer_idx=layer_idx, rule=args.rule, lrp_method=args.lrp_method)
for n_samples in bayesian_attack_samples:
print(f"\n--- Layer_idx = {layer_idx} samp = {n_samples} ---")
lrp = load_from_pickle(path=lrp_savedir, filename="bay_lrp_samp="+str(n_samples))
lrp_attack = attack(net=net, x_test=lrp, y_test=y_test, device=args.device, hyperparams=hyperparams,
method=args.attack_method, n_samples=n_samples)
save_to_pickle(lrp_attack, path=lrp_savedir, filename="bay_lrp_attack_samp="+str(n_samples))
else:
raise NotImplementedError
| 4,775 | 40.530435 | 120 | py |
BayesianRelevance | BayesianRelevance-master/src/networks/redBNN.py | """
Neural network with one bayesian layer.
"""
import argparse
import copy
import numpy as np
import os
import pandas as pd
from collections import OrderedDict
import torch
import torch.distributions.constraints as constraints
import torch.nn.functional as nnf
import torch.optim as torchopt
from torch import nn
softplus = torch.nn.Softplus()
import pyro
import pyro.optim as pyroopt
from pyro import poutine
from pyro.distributions import Categorical, Normal, OneHotCategorical, Uniform
from pyro.infer import Predictive, SVI, TraceMeanField_ELBO, Trace_ELBO
from pyro.infer.mcmc import HMC, MCMC, NUTS
from pyro.nn import PyroModule
from networks.baseNN import baseNN
from utils.data import *
from utils.savedir import *
DEBUG=False
redBNN_settings = {"model_0":{"dataset":"mnist", "inference":"svi", "hidden_size":512,
"n_inputs":60000, "epochs":5, "lr":0.01,
"activation":"leaky", "architecture":"conv", "baseNN_idx":0},
"model_1":{"dataset":"fashion_mnist", "inference":"svi", "hidden_size":1024,
"n_inputs":60000, "epochs":5, "lr":0.01,
"activation":"leaky", "architecture":"conv", "baseNN_idx":1},
}
def get_hyperparams(model_dict):
if model_dict["inference"] == "svi":
return {"epochs":model_dict["epochs"], "lr":model_dict["lr"]}
elif model_dict["inference"] == "hmc":
return {"hmc_samples":model_dict["hmc_samples"], "warmup":model_dict["warmup"]}
class redBNN(PyroModule):
def __init__(self, dataset_name, inference, hyperparams, base_net, layer_idx):
super(redBNN, self).__init__()
self.dataset_name = dataset_name
self.hidden_size=base_net.hidden_size
self.architecture=base_net.architecture
self.activation=base_net.activation
self.inference = inference
self.basenet = base_net
self.hyperparams = hyperparams
self.layer_idx = layer_idx
w, b, w_name, b_name = self._bayesian_layer(layer_idx)
self.name = self._set_name()
print("\nBayesian layer:", w_name, b_name)
print("redBNN n. of learnable weights = ", sum(p.numel() for p in [w,b]))
self.n_layers=self.basenet.n_layers
def _set_name(self):
name = str(self.basenet.dataset_name)+"_redBNN_idx="+str(self.layer_idx)+"_hid="+str(self.hidden_size)+\
"_arch="+str(self.architecture)+"_act="+str(self.activation)
if self.inference == "svi":
return name+"_ep="+str(self.hyperparams["epochs"])+"_lr="+\
str(self.hyperparams["lr"])+"_"+str(self.inference)
elif self.inference == "hmc":
return name+"_samp="+str(self.hyperparams["hmc_samples"])+\
"_warm="+str(self.hyperparams["warmup"])+"_"+str(self.inference)
def _bayesian_layer(self, layer_idx):
learnable_params = self.basenet.model.state_dict()
n_learnable_layers = int(len(learnable_params)/2)
layer_idx=layer_idx+n_learnable_layers+1 if layer_idx<0 else layer_idx
if layer_idx > len(learnable_params)/2:
raise ValueError(f"\n\nThere are only {n_learnable_layers} learnable layers.\n")
w_name, w = list(learnable_params.items())[2*(layer_idx-1)]
b_name, b = list(learnable_params.items())[2*(layer_idx-1)+1]
return w, b, w_name, b_name
def model(self, x_data, y_data):
w, b, w_name, b_name = self._bayesian_layer(self.layer_idx)
model=self.basenet.model
for param_name in self.basenet.model.state_dict().keys():
if param_name not in [w_name, b_name]:
pyro.get_param_store().__delitem__('module$$$'+param_name)
# print("param store =", pyro.get_param_store().get_all_param_names())
w_prior = Normal(loc=torch.zeros_like(w), scale=torch.ones_like(w))
b_prior = Normal(loc=torch.zeros_like(b), scale=torch.ones_like(b))
priors = {w_name: w_prior, b_name: b_prior}
lifted_module = pyro.random_module("module", model, priors)()
with pyro.plate("data", len(x_data)):
logits = lifted_module(x_data)
lhat = nnf.log_softmax(logits, dim=-1)
cond_model = pyro.sample("obs", Categorical(logits=lhat), obs=y_data)
def guide(self, x_data, y_data=None):
w, b, w_name, b_name = self._bayesian_layer(self.layer_idx)
model=self.basenet.model
w_loc = pyro.param(w_name+"_loc", torch.randn_like(w))
w_scale = pyro.param(w_name+"_scale", torch.randn_like(w))
w_dist = Normal(loc=w_loc, scale=w_scale)
b_loc = pyro.param(b_name+"_loc", torch.randn_like(b))
b_scale = pyro.param(b_name+"_scale", torch.randn_like(b))
b_dist = Normal(loc=b_loc, scale=b_scale)
dists = {w_name: w_dist, b_name: b_dist}
lifted_module = pyro.random_module("module", model, dists)()
with pyro.plate("data", len(x_data)):
logits = lifted_module(x_data)
return logits
def save(self, savedir):
filename=self.name+"_weights"
if self.inference == "svi":
os.makedirs(savedir, exist_ok=True)
self.basenet.to("cpu")
self.to("cpu")
param_store = pyro.get_param_store()
print(f"\nlearned params = {param_store.keys()}")
fullpath=os.path.join(savedir, filename+".pt")
print("\nSaving: ", fullpath)
param_store.save(fullpath)
elif self.inference == "hmc":
savedir=os.path.join(savedir, "weights")
os.makedirs(savedir, exist_ok=True)
self.basenet.to("cpu")
self.to("cpu")
for idx, weights in enumerate(self.posterior_samples):
fullpath=os.path.join(savedir, filename+"_"+str(idx)+".pt")
torch.save(weights.state_dict(), fullpath)
def load(self, savedir, device):
filename=self.name+"_weights"
if self.inference == "svi":
os.makedirs(savedir, exist_ok=True)
param_store = pyro.get_param_store()
param_store.load(os.path.join(savedir, filename + ".pt"))
for key, value in param_store.items():
param_store.replace_param(key, value.to(device), value)
print("\nLoading ", os.path.join(savedir, filename + ".pt"))
elif self.inference == "hmc":
savedir=os.path.join(savedir, "weights")
os.makedirs(savedir, exist_ok=True)
self.posterior_samples=[]
for idx in range(self.n_samples):
net_copy = copy.deepcopy(self.basenet)
fullpath=os.path.join(savedir, filename+"_"+str(idx)+".pt")
net_copy.load_state_dict(torch.load(fullpath))
self.posterior_samples.append(net_copy)
if len(self.posterior_samples)!=self.n_samples:
raise AttributeError("wrong number of posterior models")
self.to(device)
self.basenet.to(device)
self.device=device
def forward(self, inputs, n_samples=10, avg_posterior=False, sample_idxs=None, training=False,
expected_out=True, layer_idx=-1, *args, **kwargs):
if sample_idxs:
if len(sample_idxs) != n_samples:
raise ValueError("Number of sample_idxs should match number of samples.")
else:
sample_idxs = list(range(n_samples))
if self.inference == "svi":
if DEBUG:
print("\nguide_trace =",
list(poutine.trace(self.guide).get_trace(inputs).nodes.keys()))
preds = []
if training:
for _ in range(n_samples):
guide_trace = poutine.trace(self.guide).get_trace(inputs)
preds.append(guide_trace.nodes['_RETURN']['value'])
else:
w, b, w_name, b_name = self._bayesian_layer(self.layer_idx)
for seed in sample_idxs:
pyro.set_rng_seed(seed)
guide_trace = poutine.trace(self.guide).get_trace(inputs)
weights = {}
for key, value in self.basenet.model.state_dict().items():
weights.update({str(key):value})
if key in [w_name, b_name]:
w = guide_trace.nodes[str(f"module$$${key}")]["value"]
weights.update({str(key):w})
basenet_copy = copy.deepcopy(self.basenet)
basenet_copy.model.load_state_dict(weights)
preds.append(basenet_copy.forward(inputs, layer_idx=layer_idx, *args, **kwargs))
elif self.inference == "hmc":
if n_samples>len(self.posterior_samples):
raise ValueError("Too many samples. Max available samples =", len(self.posterior_samples))
if explain:
preds = []
posterior_predictive = self.posterior_samples
for seed in sample_idxs:
net = posterior_predictive[seed]
preds.append(net.forward(inputs, explain=explain, rule=rule))
else:
preds = []
posterior_predictive = self.posterior_samples
for seed in sample_idxs:
net = posterior_predictive[seed]
preds.append(net.forward(inputs))
logits = torch.stack(preds)
return logits.mean(0) if expected_out else logits
def _train_hmc(self, train_loader, savedir, device): # todo: refactor + check inferred weights
raise NotImplementedError
# print("\n == redBNN HMC training ==")
# num_samples, warmup_steps = (self.hyperparams["hmc_samples"], self.hyperparams["warmup"])
# print("\nnum_chains =", len(train_loader), "\n")
# pyro.clear_param_store()
# batch_samples = 1
# kernel = HMC(self.model, step_size=step_size, num_steps=num_steps)
# mcmc = MCMC(kernel=kernel, num_samples=batch_samples, warmup_steps=warmup, num_chains=1)
# self.posterior_samples=[]
# state_dict_keys = ['out.weight','out.bias']
# start = time.time()
# for x_batch, y_batch in train_loader:
# x_batch = x_batch.to(device)
# labels = y_batch.to(device).argmax(-1)
# mcmc.run(x_batch, labels)
# posterior_sample = mcmc.get_samples(batch_samples)
# net_copy = copy.deepcopy(self.basenet)
# model_dict=OrderedDict({})
# for weights_key in state_dict_keys:
# model_dict.update({weights_key:posterior_sample['module$$$'+weights_key][0]})
# net_copy.load_state_dict(model_dict)
# self.posterior_samples.append(net_copy)
# if DEBUG:
# print(net_copy.state_dict()['out.weight'][0,:5])
# execution_time(start=start, end=time.time())
# out_weight, out_bias = (torch.cat(out_weight), torch.cat(out_bias))
# self.posterior_samples = {"module$$$out.weight":out_weight, "module$$$out.bias":out_bias}
# execution_time(start=start, end=time.time())
# self.save(savedir)
def _train_svi(self, train_loader, savedir, device):
print("\n == redBNN SVI training ==")
epochs, lr = (self.hyperparams["epochs"], self.hyperparams["lr"])
optimizer = pyro.optim.Adam({"lr":lr})
elbo = Trace_ELBO()
svi = SVI(self.model, self.guide, optimizer, loss=elbo)
loss_list = []
accuracy_list = []
start = time.time()
for epoch in range(epochs):
loss = 0.0
correct_predictions = 0.0
for x_batch, y_batch in train_loader:
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
loss += svi.step(x_data=x_batch, y_data=y_batch.argmax(dim=-1))
outputs = self.forward(x_batch, n_samples=10, training=True, avg_posterior=False).to(device)
predictions = outputs.argmax(-1)
labels = y_batch.argmax(-1)
correct_predictions += (predictions == labels).sum().item()
total_loss = loss / len(train_loader.dataset)
accuracy = 100 * correct_predictions / len(train_loader.dataset)
print(f"\n[Epoch {epoch + 1}]\t loss: {total_loss:.2f} \t accuracy: {accuracy:.2f}",
end="\t")
loss_list.append(loss)
accuracy_list.append(accuracy)
execution_time(start=start, end=time.time())
self.save(savedir)
plot_loss_accuracy(dict={'loss':loss_list, 'accuracy':accuracy_list},
path=os.path.join(savedir, self.name+"_training.png"))
def train(self, train_loader, savedir, device):
self.to(device)
self.basenet.to(device)
self.device=device
if self.inference == "svi":
self._train_svi(train_loader, savedir, device)
elif self.inference == "hmc":
self._train_hmc(train_loader, savedir, device)
def evaluate(self, test_loader, device, n_samples):
self.to(device)
self.basenet.to(device)
with torch.no_grad():
correct_predictions = 0.0
for x_batch, y_batch in test_loader:
x_batch = x_batch.to(device)
outputs = self.forward(x_batch, n_samples=n_samples)
predictions = outputs.to(device).argmax(-1)
labels = y_batch.to(device).argmax(-1)
correct_predictions += (predictions == labels).sum().item()
accuracy = 100 * correct_predictions / len(test_loader.dataset)
print("Accuracy: %.2f%%" % (accuracy))
return accuracy
| 14,109 | 35.840731 | 112 | py |
BayesianRelevance | BayesianRelevance-master/src/networks/advNN.py | """
Deterministic Neural Network model with adversarial training.
"""
import argparse
import numpy as np
import os
import torch
import torch.nn.functional as F
import torch.nn.functional as nnf
import torch.optim as torchopt
from torch import nn
from tqdm import tqdm
from utils.data import *
from utils.model_settings import baseNN_settings
from utils.savedir import *
from utils.seeding import *
from lrp.conv import Conv2d
from lrp.linear import Linear
from lrp.maxpool import MaxPool2d
from lrp.sequential import Sequential
from attacks.run_attacks import attack
from networks.baseNN import baseNN
DEBUG = False
class advNN(baseNN):
def __init__(self, input_shape, output_size, dataset_name, hidden_size, activation,
architecture, epochs, lr, attack_method):
super(advNN, self).__init__(input_shape, output_size, dataset_name, hidden_size, activation,
architecture, epochs, lr)
if math.log(hidden_size, 2).is_integer() is False or hidden_size<16:
raise ValueError("\nhidden size should be a power of 2 greater than 16.")
self.dataset_name = dataset_name
self.architecture = architecture
self.hidden_size = hidden_size
self.activation = activation
self.attack_method = attack_method
self.lr, self.epochs = lr, epochs
self.loss_func = nn.CrossEntropyLoss()
self.set_model(architecture, activation, input_shape, output_size, hidden_size)
self.name = str(dataset_name)+"_advNN_hid="+str(hidden_size)+\
"_arch="+str(self.architecture)+"_act="+str(self.activation)+\
"_ep="+str(self.epochs)+"_lr="+str(self.lr)+"_atk="+str(attack_method)
print("\nadvNN total number of weights =", sum(p.numel() for p in self.parameters()))
self.n_layers = len(list(self.model.children()))
learnable_params = self.model.state_dict()
self.n_learnable_layers = int(len(learnable_params)/2)
def train(self, train_loader, savedir, device, hyperparams={}):
print("\n == advNN training ==")
self.to(device)
optimizer = torchopt.Adam(params=self.parameters(), lr=self.lr)
start = time.time()
for epoch in tqdm(range(self.epochs)):
total_loss = 0.0
correct_predictions = 0.0
for x_batch, y_batch in train_loader:
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
self.model.eval()
x_attack = attack(net=self, x_test=x_batch, y_test=y_batch, device=device, hyperparams=hyperparams,
method=self.attack_method, verbose=False)
outputs = self.forward(x_attack)
self.model.train()
optimizer.zero_grad()
y_batch = y_batch.argmax(-1)
loss = self.loss_func(outputs, y_batch)
loss.backward()
optimizer.step()
predictions = outputs.argmax(dim=1)
correct_predictions += (predictions == y_batch).sum()
total_loss += loss.data.item() / len(train_loader.dataset)
accuracy = 100 * correct_predictions / len(train_loader.dataset)
print(f"\n[Epoch {epoch + 1}]\t loss: {total_loss:.8f} \t accuracy: {accuracy:.2f}",
end="\t")
execution_time(start=start, end=time.time())
self.model.eval()
print(self.state_dict().keys())
self.save(savedir)
| 3,593 | 35.30303 | 115 | py |
BayesianRelevance | BayesianRelevance-master/src/networks/baseNN.py | """
Deterministic Neural Network model.
Last layer is separated from the others.
"""
import argparse
import numpy as np
import os
import torch
import torch.nn.functional as F
import torch.nn.functional as nnf
import torch.optim as torchopt
from torch import nn
from utils.data import *
from utils.model_settings import baseNN_settings
from utils.savedir import *
from utils.seeding import *
from lrp.conv import Conv2d
from lrp.linear import Linear
from lrp.maxpool import MaxPool2d
from lrp.sequential import Sequential
DEBUG = False
class baseNN(nn.Module):
def __init__(self, input_shape, output_size, dataset_name, hidden_size, activation,
architecture, epochs, lr):
super(baseNN, self).__init__()
if math.log(hidden_size, 2).is_integer() is False or hidden_size<16:
raise ValueError("\nhidden size should be a power of 2 greater than 16.")
self.dataset_name = dataset_name
self.architecture = architecture
self.hidden_size = hidden_size
self.activation = activation
self.lr, self.epochs = lr, epochs
self.loss_func = nn.CrossEntropyLoss()
self.set_model(architecture, activation, input_shape, output_size, hidden_size)
self.name = str(dataset_name)+"_baseNN_hid="+str(hidden_size)+\
"_arch="+str(self.architecture)+"_act="+str(self.activation)+\
"_ep="+str(self.epochs)+"_lr="+str(self.lr)
print("\nbaseNN total number of weights =", sum(p.numel() for p in self.parameters()))
self.n_layers = len(list(self.model.children()))
learnable_params = self.model.state_dict()
self.n_learnable_layers = int(len(learnable_params)/2)
def set_model(self, architecture, activation, input_shape, output_size, hidden_size):
input_size = input_shape[0]*input_shape[1]*input_shape[2]
in_channels = input_shape[0]
if activation == "relu":
activ = nn.ReLU
elif activation == "leaky":
activ = nn.LeakyReLU
elif activation == "sigm":
activ = nn.Sigmoid
elif activation == "tanh":
activ = nn.Tanh
elif activation == "softplus":
activ = nn.Softplus
else:
raise AssertionError("\nWrong activation name.")
if architecture == "fc2":
self.model = nn.Sequential(
nn.Flatten(),
Linear(input_size, hidden_size),
activ(),
Linear(hidden_size, hidden_size),
activ(),
Linear(hidden_size, output_size)
)
self.learnable_layers_idxs = [1, 3, 5]
elif architecture == "fc3":
self.model = nn.Sequential(
nn.Flatten(),
Linear(input_size, hidden_size),
activ(),
Linear(hidden_size, hidden_size),
activ(),
Linear(hidden_size, hidden_size),
activ(),
Linear(hidden_size, hidden_size),
activ(),
Linear(hidden_size, output_size))
self.learnable_layers_idxs = [1, 3, 5, 7, 9, 11]
elif architecture == "conv":
if self.dataset_name in ["mnist","fashion_mnist"]:
self.model = nn.Sequential(
Conv2d(in_channels=in_channels, out_channels=16, kernel_size=5, padding=0),
activ(),
MaxPool2d(kernel_size=2),
Conv2d(in_channels=16, out_channels=hidden_size, kernel_size=5, padding=0),
activ(),
MaxPool2d(kernel_size=2, stride=1),
nn.Flatten(),
Linear(int(hidden_size/(4*4))*input_size, output_size))
self.learnable_layers_idxs = [0, 3, 7]
elif self.dataset_name in ["cifar"]:
self.model = nn.Sequential(
Conv2d(in_channels=in_channels, out_channels=32, kernel_size=5, padding=0),
activ(),
MaxPool2d(kernel_size=2),
Conv2d(in_channels=32, out_channels=hidden_size, kernel_size=5, padding=0),
activ(),
Conv2d(in_channels=hidden_size, out_channels=hidden_size, kernel_size=5, padding=0),
activ(),
MaxPool2d(kernel_size=2, stride=1),
nn.Flatten(),
Linear(41472, output_size))
self.learnable_layers_idxs = [0, 4, 7, 12]
elif architecture == "alexnet":
self.model = nn.Sequential(
Conv2d(in_channels=in_channels, out_channels=64, kernel_size=11, stride=4, padding=5, bias=True),
activ(),
MaxPool2d(kernel_size=2, stride=2),
Conv2d(in_channels=64, out_channels=192, kernel_size=5, padding=2, bias=True),
activ(),
MaxPool2d(kernel_size=2, stride=2),
Conv2d(in_channels=192, out_channels=384, kernel_size=3, padding=1, bias=True),
activ(),
Conv2d(in_channels=384, out_channels=hidden_size, kernel_size=3, padding=1, bias=True),
activ(),
Conv2d(in_channels=hidden_size, out_channels=128, kernel_size=3, padding=1, bias=True),
activ(),
MaxPool2d(kernel_size=2, stride=2),
nn.Flatten(),
Linear(1 * 1 * 128, output_size, bias=True)
)
else:
raise NotImplementedError()
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features + 1
def train(self, train_loader, savedir, device):
print("\n == baseNN training ==")
self.to(device)
optimizer = torchopt.Adam(params=self.parameters(), lr=self.lr)
start = time.time()
for epoch in range(self.epochs):
total_loss = 0.0
correct_predictions = 0.0
for x_batch, y_batch in train_loader:
x_batch = x_batch.to(device)
y_batch = y_batch.to(device).argmax(-1)
outputs = self.forward(x_batch)
optimizer.zero_grad()
loss = self.loss_func(outputs, y_batch)
loss.backward()
optimizer.step()
predictions = outputs.argmax(dim=1)
correct_predictions += (predictions == y_batch).sum()
total_loss += loss.data.item() / len(train_loader.dataset)
accuracy = 100 * correct_predictions / len(train_loader.dataset)
print(f"\n[Epoch {epoch + 1}]\t loss: {total_loss:.8f} \t accuracy: {accuracy:.2f}",
end="\t")
execution_time(start=start, end=time.time())
self.save(savedir)
def _get_learnable_layer_idx(self, layer_idx):
if abs(layer_idx)>self.n_layers:
raise ValueError(f"Max number of available layers is {self.n_layers}")
if layer_idx<0:
layer_idx = self.learnable_layers_idxs[layer_idx]
else:
layer_idx = self.learnable_layers_idxs[layer_idx]
return layer_idx
def _set_correct_layer_idx(self, layer_idx):
"""
-1 = n_learnable_layers-1 = last learnable layer idx
0 = -n_learnable_layers = firsy learnable layer idx
"""
# if layer_idx is not None:
if abs(layer_idx)>self.n_layers:
raise ValueError(f"Max number of available layers is {self.n_layers}")
if layer_idx==-1:
layer_idx=None
else:
if layer_idx<0:
layer_idx+=self.n_layers+1
else:
layer_idx+=1
return layer_idx
def forward(self, inputs, layer_idx=-1, softmax=False, *args, **kwargs):
layer_idx = self._set_correct_layer_idx(layer_idx)
# print(self.model(inputs).shape)
# preds = nn.Sequential(*list(self.model.children())[:layer_idx])(inputs)
model = Sequential(*list(self.model.children())[:layer_idx])
preds = model.forward(inputs, *args, **kwargs)
if softmax:
preds = nnf.softmax(preds, dim=-1)
return preds
def get_logits(self, *args, **kwargs):
return self.forward(layer_idx=-1, *args, **kwargs)
def save(self, savedir):
filename=self.name+"_weights.pt"
os.makedirs(savedir, exist_ok=True)
self.to("cpu")
torch.save(self.state_dict(), os.path.join(savedir, filename))
if DEBUG:
print("\nCheck saved weights:")
print("\nstate_dict()['l2.0.weight'] =", self.state_dict()["l2.0.weight"][0,0,:3])
print("\nstate_dict()['out.weight'] =",self.state_dict()["out.weight"][0,:3])
def load(self, device, savedir):
filename=self.name+"_weights.pt"
self.load_state_dict(torch.load(os.path.join(savedir, filename)))
self.to(device)
if DEBUG:
print("\nCheck loaded weights:")
print("\nstate_dict()['l2.0.weight'] =", self.state_dict()["l2.0.weight"][0,0,:3])
print("\nstate_dict()['out.weight'] =",self.state_dict()["out.weight"][0,:3])
def evaluate(self, test_loader, device, *args, **kwargs):
self.to(device)
with torch.no_grad():
correct_predictions = 0.0
for x_batch, y_batch in test_loader:
x_batch = x_batch.to(device)
y_batch = y_batch.to(device).argmax(-1)
outputs = self(x_batch)
predictions = outputs.argmax(dim=1)
correct_predictions += (predictions == y_batch).sum()
accuracy = 100 * correct_predictions / len(test_loader.dataset)
print("\nAccuracy: %.2f%%" % (accuracy))
return accuracy | 10,062 | 34.558304 | 113 | py |
BayesianRelevance | BayesianRelevance-master/src/networks/fullBNN.py | """
Bayesian Neural Network model
"""
import argparse
import copy
import keras
import numpy as np
import os
import pandas as pd
from collections import OrderedDict
import torch
import torch.distributions.constraints as constraints
import torch.nn.functional as nnf
import torch.optim as torchopt
from torch import nn
softplus = torch.nn.Softplus()
import pyro
import pyro.optim as pyroopt
from pyro import poutine
from pyro.distributions import Categorical, Normal, OneHotCategorical, Uniform
from pyro.infer import Predictive, SVI, TraceMeanField_ELBO, Trace_ELBO
from pyro.infer.mcmc import HMC, MCMC, NUTS
from pyro.nn import PyroModule
from networks.baseNN import baseNN
from utils.data import *
from utils.model_settings import fullBNN_settings
from utils.savedir import *
DEBUG=False
class BNN(PyroModule):
def __init__(self, dataset_name, hidden_size, activation, architecture, inference,
epochs, lr, hmc_samples, warmup, input_shape, output_size):
super(BNN, self).__init__()
self.dataset_name = dataset_name
self.inference = inference
self.architecture = architecture
self.epochs = epochs
self.lr = lr
self.hmc_samples = 20 if DEBUG else hmc_samples
self.warmup = 5 if DEBUG else warmup
self.step_size = 0.5
self.num_steps = 10
self.basenet = baseNN(dataset_name=dataset_name, input_shape=input_shape,
output_size=output_size, hidden_size=hidden_size,
activation=activation, architecture=architecture,
epochs=epochs, lr=lr)
self.name = self.get_name()
self.n_layers = self.basenet.n_layers
def get_name(self, n_inputs=None):
name = str(self.dataset_name)+"_fullBNN_"+str(self.inference)+"_hid="+\
str(self.basenet.hidden_size)+"_act="+str(self.basenet.activation)+\
"_arch="+str(self.basenet.architecture)
if n_inputs:
name = name+"_inp="+str(n_inputs)
if self.inference == "svi":
return name+"_ep="+str(self.epochs)+"_lr="+str(self.lr)
elif self.inference == "hmc":
return name+"_samp="+str(self.hmc_samples)+"_warm="+str(self.warmup)+\
"_stepsize="+str(self.step_size)+"_numsteps="+str(self.num_steps)
def model(self, x_data, y_data):
priors = {}
for key, value in self.basenet.state_dict().items():
loc = torch.zeros_like(value)
scale = torch.ones_like(value)
prior = Normal(loc=loc, scale=scale)
priors.update({str(key):prior})
lifted_module = pyro.random_module("module", self.basenet, priors)()
with pyro.plate("data", len(x_data)):
logits = lifted_module(x_data)
logits = nnf.log_softmax(logits, dim=-1)
obs = pyro.sample("obs", Categorical(logits=logits), obs=y_data)
def guide(self, x_data, y_data=None):
dists = {}
for key, value in self.basenet.state_dict().items():
loc = pyro.param(str(f"{key}_loc"), torch.randn_like(value, dtype=torch.float32))
scale = pyro.param(str(f"{key}_scale"), torch.randn_like(value, dtype=torch.float32))
distr = Normal(loc=loc, scale=softplus(scale))
dists.update({str(key):distr})
lifted_module = pyro.random_module("module", self.basenet, dists)()
with pyro.plate("data", len(x_data)):
logits = lifted_module(x_data)
return logits
def save(self, savedir):
filename=self.name+"_weights"
if self.inference == "svi":
os.makedirs(savedir, exist_ok=True)
self.basenet.to("cpu")
self.to("cpu")
param_store = pyro.get_param_store()
print(f"\nlearned params = {param_store.get_all_param_names()}")
fullpath=os.path.join(savedir, filename+".pt")
print("\nSaving: ", fullpath)
param_store.save(fullpath)
elif self.inference == "hmc":
savedir=os.path.join(savedir, "weights")
os.makedirs(savedir, exist_ok=True)
self.basenet.to("cpu")
self.to("cpu")
for idx, weights in enumerate(self.posterior_samples):
fullpath=os.path.join(savedir, filename+"_"+str(idx)+".pt")
torch.save(weights.state_dict(), fullpath)
def load(self, savedir, device):
filename=self.name+"_weights"
if self.inference == "svi":
os.makedirs(savedir, exist_ok=True)
param_store = pyro.get_param_store()
param_store.load(os.path.join(savedir, filename + ".pt"))
for key, value in param_store.items():
param_store.replace_param(key, value.to(device), value)
print("\nLoading ", os.path.join(savedir, filename + ".pt"))
elif self.inference == "hmc":
savedir=os.path.join(savedir, "weights")
os.makedirs(savedir, exist_ok=True)
self.posterior_samples=[]
for idx in range(self.hmc_samples):
net_copy = copy.deepcopy(self.basenet)
fullpath=os.path.join(savedir, filename+"_"+str(idx)+".pt")
net_copy.load_state_dict(torch.load(fullpath))
self.posterior_samples.append(net_copy)
if len(self.posterior_samples)!=self.hmc_samples:
raise AttributeError("wrong number of posterior models")
self.to(device)
self.basenet.to(device)
def _set_correct_layer_idx(self, layer_idx):
return self.basenet._set_correct_layer_idx(layer_idx)
def forward(self, inputs, n_samples=10, sample_idxs=None, training=False,
expected_out=True, softmax=False, layer_idx=-1, *args, **kwargs):
# change external attack libraries behavior #
n_samples = self.n_samples if hasattr(self, "n_samples") else n_samples
sample_idxs = self.sample_idxs if hasattr(self, "sample_idxs") else sample_idxs
# avg_posterior = self.avg_posterior if hasattr(self, "avg_posterior") else avg_posterior
layer_idx = self.layer_idx if hasattr(self, "layer_idx") else layer_idx
#############################################
if sample_idxs:
if len(sample_idxs) != n_samples:
raise ValueError("Number of sample_idxs should match number of samples.")
else:
sample_idxs = list(range(n_samples))
if self.inference == "svi":
preds = []
if training:
guide_trace = poutine.trace(self.guide).get_trace(inputs)
out = guide_trace.nodes['_RETURN']['value']
preds.append(out)
else:
# for seed in sample_idxs:
# pyro.set_rng_seed(seed)
# guide_trace = poutine.trace(self.guide).get_trace(inputs)
# sampled_dict = {}
# for key in self.basenet.state_dict().keys():
# dist = Normal(loc=guide_trace.nodes[key+"_loc"]["value"],
# scale=softplus(guide_trace.nodes[key+"_scale"]["value"]))
# weights = pyro.sample(key, dist)
# sampled_dict.update({key:weights})
# basenet_copy = copy.deepcopy(self.basenet)
# basenet_copy.load_state_dict(sampled_dict)
# out = basenet_copy.forward(inputs, layer_idx=layer_idx, *args, **kwargs)
# if softmax:
# out = nnf.softmax(out, dim=-1)
# preds.append(out)
for seed in sample_idxs:
pyro.set_rng_seed(seed)
guide_trace = poutine.trace(self.guide).get_trace(inputs)
weights = {}
for key, value in self.basenet.state_dict().items():
w = guide_trace.nodes[str(f"module$$${key}")]["value"]
weights.update({str(key):w})
basenet_copy = copy.deepcopy(self.basenet)
basenet_copy.load_state_dict(weights)
out = basenet_copy.forward(inputs, layer_idx=layer_idx, *args, **kwargs)
if softmax:
out = nnf.softmax(out, dim=-1)
preds.append(out)
elif self.inference == "hmc":
if n_samples>len(self.posterior_samples):
raise ValueError("Too many samples. Max available samples =", len(self.posterior_samples))
preds = []
posterior_predictive = self.posterior_samples
for seed in sample_idxs:
net = posterior_predictive[seed]
out = net.forward(inputs, layer_idx=layer_idx, *args, **kwargs)
if softmax:
out = nnf.softmax(out, dim=-1)
preds.append(out)
preds = torch.stack(preds)
return preds.mean(0) if expected_out else preds
def _train_hmc(self, train_loader, n_samples, warmup, step_size, num_steps, savedir, device):
print("\n == fullBNN HMC training ==")
pyro.clear_param_store()
if DEBUG:
num_batches = 1
batch_samples = 2
warmup = 2
else:
num_batches = int(len(train_loader.dataset)/train_loader.batch_size)
batch_samples = int(n_samples/num_batches)+1
print("\nn_batches =", num_batches,"\tbatch_samples =", batch_samples)
# kernel = HMC(self.model, step_size=step_size, num_steps=num_steps)
kernel = NUTS(self.model, adapt_step_size=True)
mcmc = MCMC(kernel=kernel, num_samples=batch_samples, warmup_steps=warmup, num_chains=1)
self.posterior_samples=[]
state_dict_keys = list(self.basenet.state_dict().keys())
start = time.time()
for x_batch, y_batch in train_loader:
x_batch = x_batch.to(device)
y_batch = y_batch.to(device).argmax(-1)
mcmc_run = mcmc.run(x_batch, y_batch)
posterior_samples = mcmc.get_samples(batch_samples)
# print('module$$$model.1.weight:\n', posterior_samples['module$$$model.1.weight'][:,0,:5])
for sample_idx in range(batch_samples):
net_copy = copy.deepcopy(self.basenet)
model_dict=OrderedDict({})
for weight_idx, weights in enumerate(posterior_samples.values()):
model_dict.update({state_dict_keys[weight_idx]:weights[sample_idx]})
net_copy.load_state_dict(model_dict)
self.posterior_samples.append(net_copy)
execution_time(start=start, end=time.time())
self.save(savedir)
def _train_svi(self, train_loader, epochs, lr, savedir, device):
print("\n == fullBNN SVI training ==")
optimizer = pyro.optim.Adam({"lr":lr})
elbo = Trace_ELBO()
svi = SVI(self.model, self.guide, optimizer, loss=elbo)
loss_list = []
accuracy_list = []
start = time.time()
for epoch in range(epochs):
loss = 0.0
correct_predictions = 0.0
for x_batch, y_batch in train_loader:
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
loss += svi.step(x_data=x_batch, y_data=y_batch.argmax(dim=-1))
outputs = self.forward(x_batch, training=True).to(device)
predictions = outputs.argmax(-1)
labels = y_batch.argmax(-1)
correct_predictions += (predictions == labels).sum().item()
if DEBUG:
print("\n", pyro.get_param_store()["model.0.weight_loc"][0][:5])
print("\n",predictions[:10],"\n", labels[:10])
total_loss = loss / len(train_loader.dataset)
accuracy = 100 * correct_predictions / len(train_loader.dataset)
print(f"\n[Epoch {epoch + 1}]\t loss: {total_loss:.2f} \t accuracy: {accuracy:.2f}",
end="\t")
loss_list.append(loss)
accuracy_list.append(accuracy)
execution_time(start=start, end=time.time())
self.save(savedir)
plot_loss_accuracy(dict={'loss':loss_list, 'accuracy':accuracy_list},
path=os.path.join(savedir, self.name+"_training.png"))
def train(self, train_loader, savedir, device):
self.to(device)
self.basenet.to(device)
if self.inference == "svi":
self._train_svi(train_loader, self.epochs, self.lr, savedir, device)
elif self.inference == "hmc":
self._train_hmc(train_loader, self.hmc_samples, self.warmup,
self.step_size, self.num_steps, savedir, device)
def evaluate(self, test_loader, device, avg_posterior=False, n_samples=10, sample_idxs=None):
self.to(device)
self.basenet.to(device)
with torch.no_grad():
correct_predictions = 0.0
for x_batch, y_batch in test_loader:
x_batch = x_batch.to(device)
outputs = self.forward(x_batch, n_samples=n_samples, sample_idxs=sample_idxs)
predictions = outputs.to(device).argmax(-1)
labels = y_batch.to(device).argmax(-1)
correct_predictions += (predictions == labels).sum().item()
accuracy = 100 * correct_predictions / len(test_loader.dataset)
print("Accuracy: %.2f%%" % (accuracy))
return accuracy
def get_logits(self, *args, **kwargs):
return self.forward(layer_idx=-1, *args, **kwargs) | 13,948 | 37.008174 | 106 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deepfool.py | ### CODE TAKEN FROM: https://github.com/aminul-huq/DeepFool/tree/master
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data_utils
import torchvision
import torchvision.transforms as transforms
import torchvision.models as models
import numpy as np
import math
import copy
import os
def DeepFool(image, net, num_classes=10, overshoot=0.02, max_iter=10):
f_image = net.forward(image).data.numpy().flatten()
I = (np.array(f_image)).flatten().argsort()[::-1]
I = I[0:num_classes]
label = I[0]
input_shape = image.detach().numpy().shape
pert_image = copy.deepcopy(image)
w = np.zeros(input_shape)
r_tot = np.zeros(input_shape)
loop_i = 0
x = torch.tensor(pert_image[None, :],requires_grad=True)
fs = net.forward(x[0])
fs_list = [fs[0,I[k]] for k in range(num_classes)]
k_i = label
while k_i == label and loop_i < max_iter:
pert = np.inf
fs[0, I[0]].backward(retain_graph=True)
grad_orig = x.grad.data.numpy().copy()
for k in range(1, num_classes):
#x.zero_grad()
fs[0, I[k]].backward(retain_graph=True)
cur_grad = x.grad.data.numpy().copy()
# set new w_k and new f_k
w_k = cur_grad - grad_orig
f_k = (fs[0, I[k]] - fs[0, I[0]]).data.numpy()
pert_k = abs(f_k)/np.linalg.norm(w_k.flatten())
# determine which w_k to use
if pert_k < pert:
pert = pert_k
w = w_k
# compute r_i and r_tot
# Added 1e-4 for numerical stability
r_i = (pert+1e-4) * w / np.linalg.norm(w)
r_tot = np.float32(r_tot + r_i)
pert_image = image + (1+overshoot)*torch.from_numpy(r_tot)
x = torch.tensor(pert_image, requires_grad=True)
fs = net.forward(x[0])
k_i = np.argmax(fs.data.numpy().flatten())
loop_i += 1
r_tot = (1+overshoot)*r_tot
return r_tot, loop_i, label, k_i, pert_image | 2,081 | 26.394737 | 72 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/beta.py | import copy
import math
import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as nnf
import torch.optim as optim
import torch.utils.data as data_utils
import torchvision
import torchvision.models as models
import torchvision.transforms as transforms
from utils.networks import change_beta
def get_beta(current_iter, iters):
start_beta, end_beta = 10.0, 100.0
return start_beta * (end_beta / start_beta) ** (current_iter / iters)
def clamp_image(x, mean, std):
"""
Helper method for clamping the adversarial example in order to ensure that it is a valid image
"""
upper = (1.0 - mean) / std
lower = (0.0 - mean) / std
if x.shape[1] == 3: # 3-channel image
for i in [0, 1, 2]:
x[0][i] = torch.clamp(x[0][i], min=lower[i], max=upper[i])
else:
x = torch.clamp(x, min=lower[0], max=upper[0])
return x
def Beta(image, model, target_image, data_mean, data_std, lrp_rule, iters,
delta=1, gamma=1, lr=0.01, beta_growth=False):
x = image.detach()
x_target = target_image.detach()
x_adv = x.clone().detach()
# produce explanations
x.requires_grad=True
x_target.requires_grad=True
x_adv.requires_grad=True
x_probs = nnf.softmax(model.forward(x, explain=True, rule=lrp_rule), dim=-1)
y_hat = x_probs[torch.arange(x.shape[0]), x_probs.max(1)[1]].sum()
y_hat.backward(retain_graph=True)
x_expl = x.grad.detach()
x_target_probs = nnf.softmax(model.forward(x_target, explain=True, rule=lrp_rule), dim=-1)
y_hat = x_target_probs[torch.arange(x_target.shape[0]), x_target_probs.max(1)[1]].sum()
y_hat.backward(retain_graph=True)
x_target_expl = x_target.grad.detach()
# optimize
optimizer = torch.optim.Adam([x_adv], lr=lr)
for i in range(iters):
if beta_growth:
if hasattr(model, "basenet"):
model.basenet = change_beta(model.basenet, get_beta(i, iters))
else:
model.model = change_beta(model.model, get_beta(i, iters))
optimizer.zero_grad()
# calculate loss
x_adv_probs = nnf.softmax(model.forward(x_adv, explain=True, rule=lrp_rule), dim=-1)
y_hat = x_adv_probs[torch.arange(x_adv.shape[0]), x_adv_probs.max(1)[1]].sum()
y_hat.backward(retain_graph=True)
x_adv_expl = x_adv.grad.detach()
loss_expl = nnf.mse_loss(x_adv_expl, x_target_expl)
loss_output = nnf.mse_loss(x_adv_probs, x_probs.detach())
total_loss = delta*loss_expl + gamma*loss_output
# update adversarial example
total_loss.backward()
optimizer.step()
# clamp adversarial example
# Note: x_adv.data returns tensor which shares data with x_adv but requires
# no gradient. Since we do not want to differentiate the clamping,
# this is what we need
x_adv.data = torch.clamp(x_adv.data, data_mean, data_std)
# print("Iteration {}: Total Loss: {}, Expl Loss: {}, Output Loss: {}".format(i, total_loss.item(), loss_expl.item(), loss_output.item()))
return x_adv
| 2,868 | 29.849462 | 140 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/topk.py | import torch
import torch.nn.functional as nnf
from utils.lrp import select_informative_pixels
def Topk(image, model, epsilon, lrp_rule, iters, k=20, step_size=0.5, lr=0.01):
x_orig = image.clone().detach()
x_orig.requires_grad=True
probs = nnf.softmax(model.forward(x_orig, explain=True, rule=lrp_rule), dim=-1)
y_hat = probs[torch.arange(x_orig.shape[0]), probs.max(1)[1]].sum()
y_hat.backward(retain_graph=True)
x_orig_lrp = x_orig.grad.detach()
topk_pxl_idxs = select_informative_pixels(x_orig_lrp, topk=k)[1]
x_adv = image.clone().detach()
x_adv.requires_grad = True
for i in range(iters):
probs = nnf.softmax(model.forward(x_adv, explain=True, rule=lrp_rule), dim=-1)
y_hat = probs[torch.arange(x_adv.shape[0]), probs.max(1)[1]].sum()
y_hat.backward(retain_graph=True)
x_adv_lrp = x_adv.grad.detach()
x_adv_lrp.requires_grad = True
loss = - torch.sum(x_adv_lrp.flatten()[topk_pxl_idxs])
loss.backward()
x_adv = x_adv + step_size * x_adv.grad.data.sign()
x_adv = torch.clamp(x_adv, -epsilon, epsilon)
x_adv = x_adv.detach()
x_adv.requires_grad = True
# print("iteration {:.0f}, loss:{:.4f}".format(i,loss))
return x_adv
| 1,175 | 27 | 80 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/run_attacks.py | import copy
import numpy as np
import torch
from torch import autograd
from tqdm import tqdm
# from attacks.robustness_measures import softmax_robustness
from plot.attacks import plot_grid_attacks
from torch.autograd.gradcheck import zero_gradients
from utils.data import *
from utils.savedir import *
from utils.seeding import *
from attacks.beta import Beta
from attacks.deepfool import DeepFool
from attacks.deeprobust.cw import CarliniWagner
from attacks.deeprobust.fgsm import FGSM
from attacks.deeprobust.pgd import PGD
from attacks.region import TargetRegion
from attacks.topk import Topk
# from deeprobust.image.attack.Nattack import NATTACK
# from deeprobust.image.attack.YOPOpgd import FASTPGD
from utils.networks import relu_to_softplus
def attack(net, x_test, y_test, device, method, hyperparams={}, n_samples=None, sample_idxs=None, avg_posterior=False,
verbose=True):
if verbose:
print(f"\n\nCrafting {method} attacks")
net.to(device)
x_test, y_test = x_test.to(device), y_test.to(device)
if x_test.shape[1] == 3: # todo: test on 3-channels
data_mean = torch.empty(3)
for i in [0, 1, 2]:
data_mean[i], data_std[i] = x_test[:,i].mean().item(), x_test[:,i].std().item()
else:
data_mean, data_std = x_test.mean().item(), x_test.std().item()
if method=='region':
random.seed(0)
total_n_pxls = len(x_test[0].flatten())
hyperparams['target_pxls'] = random.sample(list(range(total_n_pxls)), int(total_n_pxls*0.2))
adversarial_attacks = []
if n_samples is None or avg_posterior is True:
net.avg_posterior=avg_posterior
iterable_x_test = tqdm(range(len(x_test))) if verbose else range(len(x_test))
for idx in iterable_x_test:
image = x_test[idx].unsqueeze(0)
label = y_test[idx].argmax(-1).unsqueeze(0)
num_classes = len(y_test[idx])
hyperparams['num_classes']=num_classes
if method=='beta':
random.seed(idx)
target_image = random.choice(x_test[idx+1:]) if idx<len(x_test)/2 else random.choice(x_test[:idx-1])
hyperparams['target_image'] = target_image.unsqueeze(0)
hyperparams['data_mean'] = data_mean
hyperparams['data_std'] = data_std
perturbed_image = run_attack(net=net, image=image, label=label, method=method,
device=device, hyperparams=hyperparams)
perturbed_image = torch.clamp(perturbed_image, 0., 1.)
adversarial_attacks.append(perturbed_image)
del net.avg_posterior
else:
if sample_idxs is not None:
if len(sample_idxs) != n_samples:
raise ValueError("Number of sample_idxs should match number of samples.")
else:
sample_idxs = list(range(n_samples))
iterable_x_test = tqdm(range(len(x_test))) if verbose else range(len(x_test))
for idx in iterable_x_test:
image = x_test[idx].unsqueeze(0)
label = y_test[idx].argmax(-1).unsqueeze(0)
num_classes = len(y_test[idx])
hyperparams['num_classes']=num_classes
if method=='beta':
random.seed(idx)
target_image = random.choice(x_test[idx+1:]) if idx<len(x_test)/2 else random.choice(x_test[:idx-1])
hyperparams['target_image'] = target_image.unsqueeze(0)
hyperparams['data_mean'] = data_mean
hyperparams['data_std'] = data_std
samples_attacks=[]
for idx in sample_idxs:
net.n_samples, net.sample_idxs = (1, [idx])
perturbed_image = run_attack(net=net, image=image, label=label, method=method,
device=device, hyperparams=hyperparams)
perturbed_image = torch.clamp(perturbed_image, 0., 1.)
samples_attacks.append(perturbed_image)
adversarial_attacks.append(torch.stack(samples_attacks).mean(0))
del net.n_samples, net.sample_idxs
adversarial_attacks = torch.cat(adversarial_attacks)
return adversarial_attacks
def run_attack(net, image, label, method, device, hyperparams=None):
assert 'epsilon' in hyperparams
if method == "fgsm":
adversary = FGSM
adversary_params = {'epsilon':hyperparams['epsilon'], 'order': np.inf, 'clip_max':None, 'clip_min':None}
adv = adversary(net, device)
perturbed_image = adv.generate(image, label, **adversary_params)
elif method == "pgd":
adversary = PGD
adversary_params = {'epsilon':hyperparams['epsilon'], 'clip_max': 1.0, 'clip_min': 0.0, 'print_process': False}
adv = adversary(net, device)
perturbed_image = adv.generate(image, label, **adversary_params)
elif method == "cw":
adversary = CarliniWagner
adversary_params = {'confidence': 1e-4, 'clip_max': 1, 'clip_min': 0, 'max_iterations': 1000,
'initial_const': 1e-2, 'binary_search_steps': 5, 'learning_rate': 5e-3,
'abort_early': True}
adv = adversary(net, device)
target_label = 1 # todo: set to random class different from true label
perturbed_image = adv.generate(image, label, target_label=target_label, **adversary_params)
elif method == "deepfool":
perturbed_image=DeepFool(image, net=net, num_classes=10, overshoot=0.02, max_iter=10)[-1].squeeze(0)
elif method == "beta":
if hasattr(net, "basenet"):
net.basenet = relu_to_softplus(net.basenet, beta=100)
else:
net.model = relu_to_softplus(net.model, beta=100)
perturbed_image = Beta(image, model=net, target_image=hyperparams['target_image'],
iters=hyperparams['iters'], lrp_rule=hyperparams['lrp_rule'],
data_mean=hyperparams['data_mean'], data_std=hyperparams['data_std'])
elif method == "topk":
perturbed_image = Topk(image, model=net, epsilon=hyperparams['epsilon'], iters=hyperparams['iters'],
lrp_rule=hyperparams['lrp_rule'])
elif method == "region":
perturbed_image = TargetRegion(image, model=net, epsilon=hyperparams['epsilon'], iters=hyperparams['iters'],
lrp_rule=hyperparams['lrp_rule'], target_pxls=hyperparams['target_pxls'])
return perturbed_image
def save_attack(inputs, attacks, method, model_savedir, atk_mode=False, n_samples=None):
filename, savedir = get_atk_filename_savedir(attack_method=method, model_savedir=model_savedir,
atk_mode=atk_mode, n_samples=n_samples)
save_to_pickle(data=attacks, path=savedir, filename=filename)
set_seed(0)
idxs = np.random.choice(len(inputs), 10, replace=False)
original_images_plot = torch.stack([inputs[i].squeeze() for i in idxs])
perturbed_images_plot = torch.stack([attacks[i].squeeze() for i in idxs])
plot_grid_attacks(original_images=original_images_plot.detach().cpu(),
perturbed_images=perturbed_images_plot.detach().cpu(),
filename=filename, savedir=savedir)
def load_attack(method, model_savedir, atk_mode=False, n_samples=None):
filename, savedir = get_atk_filename_savedir(attack_method=method, model_savedir=model_savedir,
atk_mode=atk_mode, n_samples=n_samples)
return load_from_pickle(path=savedir, filename=filename)
| 6,699 | 35.216216 | 118 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/gradient_based.py | """
FGSM and PGD classic & bayesian adversarial attacks
"""
import os
import sys
import copy
import torch
import numpy as np
from tqdm import tqdm
import torch.nn.functional as nnf
from torch.utils.data import DataLoader
from utils.data import *
from utils.seeding import *
from utils.savedir import *
from utils.networks import *
from attacks.robustness_measures import *
from plot.attacks import plot_grid_attacks
DEBUG=False
def loss_gradient_sign(net, n_samples, image, label, avg_posterior, sample_idxs=None):
if n_samples is None or avg_posterior is True:
image.requires_grad = True
output = net.forward(inputs=image, avg_posterior=avg_posterior)
loss = torch.nn.CrossEntropyLoss()(output, label)
net.zero_grad()
loss.backward()
gradient_sign = image.grad.data.sign()
else:
if sample_idxs is not None:
if len(sample_idxs) != n_samples:
raise ValueError("Number of sample_idxs should match number of samples.")
else:
sample_idxs = list(range(n_samples))
loss_gradients=[]
for idx in sample_idxs:
x_copy = copy.deepcopy(image)
x_copy.requires_grad = True
output = net.forward(inputs=x_copy, n_samples=1, sample_idxs=[idx], avg_posterior=avg_posterior)
loss = torch.nn.CrossEntropyLoss()(output.to(dtype=torch.double), label)
net.zero_grad()
loss.backward()
loss_gradient = copy.deepcopy(x_copy.grad.data[0].sign())
loss_gradients.append(loss_gradient)
gradient_sign = torch.stack(loss_gradients,0).mean(0)
return gradient_sign
def fgsm_attack(net, image, label, hyperparams=None, n_samples=None, sample_idxs=None, avg_posterior=False):
epsilon = hyperparams["epsilon"] if hyperparams is not None else 0.25
gradient_sign = loss_gradient_sign(net=net, n_samples=n_samples, image=image, label=label,
avg_posterior=avg_posterior, sample_idxs=sample_idxs)
perturbed_image = image + epsilon * gradient_sign
perturbed_image = torch.clamp(perturbed_image, 0, 1)
return perturbed_image
def pgd_attack(net, image, label, hyperparams=None, n_samples=None, sample_idxs=None, avg_posterior=False):
if hyperparams is not None:
epsilon, alpha, iters = (hyperparams["epsilon"], 2/image.max(), 40)
else:
epsilon, alpha, iters = (0.25, 2/225, 40)
original_image = copy.deepcopy(image)
for i in range(iters):
gradient_sign = loss_gradient_sign(net=net, n_samples=n_samples, image=image, label=label,
avg_posterior=avg_posterior, sample_idxs=sample_idxs)
perturbed_image = image + alpha * gradient_sign
eta = torch.clamp(perturbed_image - original_image, min=-epsilon, max=epsilon)
image = torch.clamp(original_image + eta, min=0, max=1)
perturbed_image = image.detach()
return perturbed_image
def attack(net, x_test, y_test, device, method,
hyperparams=None, n_samples=None, sample_idxs=None, avg_posterior=False):
print(f"\n\nProducing {method} attacks", end="\t")
if n_samples:
print(f"with {n_samples} attack samples", end="\t")
if avg_posterior:
print(f"on the posterior mode")
net.to(device)
x_test, y_test = x_test.to(device), y_test.to(device)
adversarial_attack = []
for idx in tqdm(range(len(x_test))):
image = x_test[idx].unsqueeze(0)
label = y_test[idx].argmax(-1).unsqueeze(0)
if method == "fgsm":
perturbed_image = fgsm_attack(net=net, image=image, label=label,
hyperparams=hyperparams, n_samples=n_samples,
avg_posterior=avg_posterior, sample_idxs=sample_idxs)
elif method == "pgd":
perturbed_image = pgd_attack(net=net, image=image, label=label,
hyperparams=hyperparams, n_samples=n_samples,
avg_posterior=avg_posterior, sample_idxs=sample_idxs)
adversarial_attack.append(perturbed_image)
adversarial_attack = torch.cat(adversarial_attack)
return adversarial_attack
def save_attack(inputs, attacks, method, model_savedir, atk_mode=False, n_samples=None):
filename, savedir = get_atk_filename_savedir(attack_method=method, model_savedir=model_savedir,
atk_mode=atk_mode, n_samples=n_samples)
save_to_pickle(data=attacks, path=savedir, filename=filename)
set_seed(0)
idxs = np.random.choice(len(inputs), 10, replace=False)
original_images_plot = torch.stack([inputs[i].squeeze() for i in idxs])
perturbed_images_plot = torch.stack([attacks[i].squeeze() for i in idxs])
plot_grid_attacks(original_images=original_images_plot.detach().cpu(),
perturbed_images=perturbed_images_plot.detach().cpu(),
filename=filename, savedir=savedir)
def load_attack(method, model_savedir, atk_mode=False, n_samples=None):
filename, savedir = get_atk_filename_savedir(attack_method=method, model_savedir=model_savedir,
atk_mode=atk_mode, n_samples=n_samples)
return load_from_pickle(path=savedir, filename=filename)
def evaluate_attack(net, x_test, x_attack, y_test, device, n_samples=None, sample_idxs=None,
avg_posterior=False, return_classification_idxs=False):
""" Evaluates the network on the original data and its adversarially perturbed version.
When using a Bayesian network `n_samples` should be specified for the evaluation.
"""
x_test = x_test.clone()
x_attack = x_attack.clone()
print(f"\nEvaluating against the attacks", end="")
if avg_posterior:
print(" with the posterior mode")
else:
if n_samples:
print(f" with {n_samples} defense samples")
x_test, x_attack, y_test = x_test.to(device), x_attack.to(device), y_test.to(device)
test_loader = DataLoader(dataset=list(zip(x_test, y_test)), batch_size=128, shuffle=False)
attack_loader = DataLoader(dataset=list(zip(x_attack, y_test)), batch_size=128, shuffle=False)
with torch.no_grad():
original_outputs = []
original_correct = 0.0
correct_class_idxs = []
batch_size=0
for batch_idx, (images, labels) in enumerate(test_loader):
out = net.forward(images, n_samples=n_samples, sample_idxs=sample_idxs, avg_posterior=avg_posterior,
softmax=True)
original_correct += ((out.argmax(-1) == labels.argmax(-1)).sum().item())
original_outputs.append(out)
correct_idxs = np.where(out.argmax(-1).cpu() == labels.argmax(-1).cpu())[0]
correct_class_idxs.extend(correct_idxs+batch_size*batch_idx)
batch_size = len(images)
if DEBUG:
print("\nlabels", labels.argmax(-1))
print("det out", out.argmax(-1))
print("correct_class_idxs", correct_class_idxs)
adversarial_outputs = []
adversarial_correct = 0.0
wrong_atk_class_idxs = []
correct_atk_class_idxs = []
batch_size=0
for batch_idx, (attacks, labels) in enumerate(attack_loader):
out = net.forward(attacks, n_samples=n_samples, sample_idxs=sample_idxs, avg_posterior=avg_posterior,
softmax=True)
adversarial_correct += ((out.argmax(-1) == labels.argmax(-1)).sum().item())
adversarial_outputs.append(out)
wrong_idxs = np.where(out.argmax(-1).cpu() != labels.argmax(-1).cpu())[0]
wrong_atk_class_idxs.extend(wrong_idxs+batch_size*batch_idx)
correct_idxs = np.where(out.argmax(-1).cpu() == labels.argmax(-1).cpu())[0]
correct_atk_class_idxs.extend(correct_idxs+batch_size*batch_idx)
batch_size = len(attacks)
successful_atk_idxs = np.intersect1d(correct_class_idxs, wrong_atk_class_idxs)
failed_atk_idxs = np.intersect1d(correct_class_idxs, correct_atk_class_idxs)
if DEBUG:
print("bay out", out.argmax(-1))
print("wrong_atk_class_idxs", wrong_atk_class_idxs)
print("successful_atk_idxs", successful_atk_idxs)
original_accuracy = 100 * original_correct / len(x_test)
adversarial_accuracy = 100 * adversarial_correct / len(x_test)
print(f"\ntest accuracy = {original_accuracy}\tadversarial accuracy = {adversarial_accuracy}",
end="\t")
original_outputs = torch.cat(original_outputs)
adversarial_outputs = torch.cat(adversarial_outputs)
softmax_rob = softmax_robustness(original_outputs, adversarial_outputs)
if return_classification_idxs:
return original_outputs, adversarial_outputs, softmax_rob, successful_atk_idxs, failed_atk_idxs
else:
return original_outputs, adversarial_outputs, softmax_rob
| 8,049 | 34.307018 | 108 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/robustness_measures.py | import torch
import torch.nn.functional as nnf
DEBUG=False
def softmax_difference(original_predictions, adversarial_predictions):
"""
Compute the difference between predictions and adversarial
predictions.
"""
# original_predictions = nnf.softmax(original_predictions, dim=-1)
# adversarial_predictions = nnf.softmax(adversarial_predictions, dim=-1)
if original_predictions.abs().max()>1 or original_predictions.abs().max()>1:
raise ValueError("Pass softmax outputs")
if len(original_predictions) != len(adversarial_predictions):
raise ValueError("Input arrays should have the same length.")
if DEBUG:
print("\n\n", original_predictions[0], "\t", adversarial_predictions[0], end="\n\n")
abs_diff = (original_predictions-adversarial_predictions).abs()
return abs_diff
def softmax_robustness(original_outputs, adversarial_outputs, norm="linf"):
"""
Robustness = 1 - norm(softmax difference between predictions).
This robustness measure is strictly dependent on the epsilon chosen for the
perturbations.
"""
softmax_differences = softmax_difference(original_outputs, adversarial_outputs)
if norm=="l2":
softmax_differences = torch.norm(softmax_differences, dim=-1)
elif norm=="linf":
softmax_differences = softmax_differences.max(dim=-1)[0]
robustness = (torch.ones_like(softmax_differences)-softmax_differences)
print(f"avg softmax robustness = {robustness.mean().item():.2f}", end="\t")
print(f"(min = {robustness.min().item():.2f} max = {robustness.max().item():.2f})")
return robustness
| 1,637 | 33.125 | 92 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/region.py | import torch
import torch.nn.functional as nnf
def TargetRegion(image, model, epsilon, lrp_rule, iters, target_pxls, step_size=0.5, lr=0.01):
x_adv = image.clone().detach()
x_adv.requires_grad = True
for i in range(iters):
probs = nnf.softmax(model.forward(x_adv, explain=True, rule=lrp_rule), dim=-1)
y_hat = probs[torch.arange(x_adv.shape[0]), probs.max(1)[1]].sum()
y_hat.backward(retain_graph=True)
x_adv_lrp = x_adv.grad.detach()
x_adv_lrp.requires_grad = True
loss = torch.sum(x_adv_lrp.flatten()[target_pxls])
loss.backward()
x_adv = x_adv + step_size * x_adv.grad.data.sign()
x_adv = torch.clamp(x_adv, -epsilon, epsilon)
x_adv = x_adv.detach()
x_adv.requires_grad = True
# print("iteration {:.0f}, loss:{:.4f}".format(i,loss))
return x_adv
| 872 | 27.16129 | 94 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deeprobust/pgd.py | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
from attacks.deeprobust.base_attack import BaseAttack
class PGD(BaseAttack):
"""
This is the multi-step version of FGSM attack.
"""
def __init__(self, model, device = 'cuda'):
super(PGD, self).__init__(model, device)
def generate(self, image, label, **kwargs):
"""
Call this function to generate PGD adversarial examples.
Parameters
----------
image :
original image
label :
target label
kwargs :
user defined paremeters
"""
## check and parse parameters for attack
label = label.type(torch.FloatTensor)
assert self.check_type_device(image, label)
assert self.parse_params(**kwargs)
return pgd_attack(self.model,
self.image,
self.label,
self.epsilon,
self.clip_max,
self.clip_min,
self.num_steps,
self.step_size,
self.print_process)
##default parameter for mnist data set.
def parse_params(self,
epsilon = 0.03,
num_steps = 40,
step_size = 0.01,
clip_max = 1.0,
clip_min = 0.0,
print_process = False
):
"""parse_params.
Parameters
----------
epsilon :
perturbation constraint
num_steps :
iteration step
step_size :
step size
clip_max :
maximum pixel value
clip_min :
minimum pixel value
print_process :
whether to print out the log during optimization process, True or False print out the log during optimization process, True or False.
"""
self.epsilon = epsilon
self.num_steps = num_steps
self.step_size = step_size
self.clip_max = clip_max
self.clip_min = clip_min
self.print_process = print_process
return True
def pgd_attack(model,
X,
y,
epsilon,
clip_max,
clip_min,
num_steps,
step_size,
print_process,
mean = 0.0,
std = 1.0,
bound = 'linf'):
out = model(X)
out = out[0] if len(out)>1 else out # handle bayesian_torch fwd
err = (out.data.max(1)[1] != y.data).float().sum()
#TODO: find a other way
device = X.device
imageArray = X.detach().cpu().numpy()
X_random = np.random.uniform(-epsilon, epsilon, X.shape)
imageArray = np.clip(imageArray + X_random, 0, 1.0)
X_pgd = torch.tensor(imageArray).to(device).float()
X_pgd.requires_grad = True
for i in range(num_steps):
pred = model(X_pgd)
pred = pred[0] if len(pred)>1 else pred # handle bayesian_torch fwd
loss = nn.CrossEntropyLoss()(pred, y)
if print_process:
print("iteration {:.0f}, loss:{:.4f}".format(i,loss))
loss.backward()
if bound == 'linf':
eta = step_size * X_pgd.grad.data.sign()
X_pgd = X_pgd + eta
eta = torch.clamp(X_pgd.data - X.data, -epsilon, epsilon)
X_pgd = X.data + eta
X_pgd = (torch.clamp(X_pgd * std + mean, clip_min, clip_max) - mean) / std
X_pgd = X_pgd.detach()
X_pgd.requires_grad_()
X_pgd.retain_grad()
if bound == 'l2':
output = model(X+delta)
output = output[0] if len(output)>1 else output # handle bayesian_torch fwd
incorrect = output.max(1)[1] != y
correct = (~incorrect).unsqueeze(1).unsqueeze(1).unsqueeze(1).float()
#Finding the correct examples so as to attack only them
loss = nn.CrossEntropyLoss()(model(X + delta), y)
loss.backward()
delta.data += correct*alpha*delta.grad.detach() / norms(delta.grad.detach())
delta.data *= epsilon / norms(delta.detach()).clamp(min=epsilon)
delta.data = torch.min(torch.max(delta.detach(), -X), 1-X) # clip X+delta to [0,1]
delta.grad.zero_()
return X_pgd
| 4,514 | 29.924658 | 145 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deeprobust/evaluation_attack.py | import requests
import torch
from torchvision import datasets,models,transforms
import torch.nn.functional as F
import os
import numpy as np
import argparse
import matplotlib.pyplot as plt
import random
from attacks.deeprobust.image import utils
def run_attack(attackmethod, batch_size, batch_num, device, test_loader, random_targeted = False, target_label = -1, **kwargs):
test_loss = 0
correct = 0
samplenum = 1000
count = 0
classnum = 10
for count, (data, target) in enumerate(test_loader):
if count == batch_num:
break
print('batch:{}'.format(count))
data, target = data.to(device), target.to(device)
if(random_targeted == True):
r = list(range(0, target)) + list(range(target+1, classnum))
target_label = random.choice(r)
adv_example = attackmethod.generate(data, target, target_label = target_label, **kwargs)
elif(target_label >= 0):
adv_example = attackmethod.generate(data, target, target_label = target_label, **kwargs)
else:
adv_example = attackmethod.generate(data, target, **kwargs)
output = model(adv_example)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim = 1, keepdim = True) # get the index of the max log-probability.
correct += pred.eq(target.view_as(pred)).sum().item()
batch_num = count+1
test_loss /= len(test_loader.dataset)
print("===== ACCURACY =====")
print('Attack Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, batch_num * batch_size,
100. * correct / (batch_num * batch_size)))
def load_net(attack_model, filename, path):
if(attack_model == "CNN"):
from deeprobust.image.netmodels.CNN import Net
model = Net()
if(attack_model == "ResNet18"):
import deeprobust.image.netmodels.resnet as Net
model = Net.ResNet18()
model.load_state_dict(torch.load(path + filename))
model.eval()
return model
def generate_dataloader(dataset, batch_size):
if(dataset == "MNIST"):
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('deeprobust/image/data', train = False,
download = True,
transform = transforms.Compose([transforms.ToTensor()])),
batch_size = args.batch_size,
shuffle = True)
print("Loading MNIST dataset.")
elif(dataset == "CIFAR" or args.dataset == 'CIFAR10'):
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('deeprobust/image/data', train = False,
download = True,
transform = transforms.Compose([transforms.ToTensor()])),
batch_size = args.batch_size,
shuffle = True)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
print("Loading CIFAR10 dataset.")
elif(dataset == "ImageNet"):
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('deeprobust/image/data', train=False,
download = True,
transform = transforms.Compose([transforms.ToTensor()])),
batch_size = args.batch_size,
shuffle = True)
print("Loading ImageNet dataset.")
return test_loader
def parameter_parser():
parser = argparse.ArgumentParser(description = "Run attack algorithms.", usage ='Use -h for more information.')
parser.add_argument("--attack_method",
default = 'PGD',
help = "Choose a attack algorithm from: PGD(default), FGSM, LBFGS, CW, deepfool, onepixel, Nattack")
parser.add_argument("--attack_model",
default = "CNN",
help = "Choose network structure from: CNN, ResNet")
parser.add_argument("--path",
default = "./trained_models/",
help = "Type the path where the model is saved.")
parser.add_argument("--file_name",
default = 'MNIST_CNN_epoch_20.pt',
help = "Type the file_name of the model that is to be attack. The model structure should be matched with the ATTACK_MODEL parameter.")
parser.add_argument("--dataset",
default = 'MNIST',
help = "Choose a dataset from: MNIST(default), CIFAR(or CIFAR10), ImageNet")
parser.add_argument("--epsilon", type = float, default = 0.3)
parser.add_argument("--batch_num", type = int, default = 1000)
parser.add_argument("--batch_size", type = int, default = 1000)
parser.add_argument("--num_steps", type = int, default = 40)
parser.add_argument("--step_size", type = float, default = 0.01)
parser.add_argument("--random_targeted", type = bool, default = False,
help = "default: False. By setting this parameter be True, the program would random generate target labels for the input samples.")
parser.add_argument("--target_label", type = int, default = -1,
help = "default: -1. Generate all attack Fixed target label.")
parser.add_argument("--device", default = 'cuda',
help = "Choose the device.")
return parser.parse_args()
if __name__ == "__main__":
# read arguments
args = parameter_parser() # read argument and creat an argparse object
# download example model
example_model_path = './trained_models/MNIST_CNN_epoch_20.pt'
if not (os.path.exists('./trained_models')):
os.mkdir('./trained_models')
print('create path: ./trained_models')
model_url = "https://github.com/I-am-Bot/deeprobust_trained_model/blob/master/MNIST_CNN_epoch_20.pt?raw=true"
r = requests.get(model_url)
print('Downloading example model...')
with open(example_model_path,'wb') as f:
f.write(r.content)
print('Downloaded.')
# load model
model = load_net(args.attack_model, args.file_name, args.path)
print("===== START ATTACK =====")
if(args.attack_method == "PGD"):
from deeprobust.image.attack.pgd import PGD
test_loader = generate_dataloader(args.dataset, args.batch_size)
attack_method = PGD(model, args.device)
utils.tab_printer(args)
run_attack(attack_method, args.batch_size, args.batch_num, args.device, test_loader, epsilon = args.epsilon)
elif(args.attack_method == "FGSM"):
from deeprobust.image.attack.fgsm import FGSM
test_loader = generate_dataloader(args.dataset, args.batch_size)
attack_method = FGSM(model, args.device)
utils.tab_printer(args)
run_attack(attack_method, args.batch_size, args.batch_num, args.device, test_loader, epsilon = args.epsilon)
elif(args.attack_method == "LBFGS"):
from deeprobust.image.attack.lbfgs import LBFGS
try:
if (args.batch_size >1):
raise ValueError("batch_size shouldn't be larger than 1.")
except ValueError:
args.batch_size = 1
try:
if (args.random_targeted == 0 and args.target_label == -1):
raise ValueError("No target label assigned. Random generate target for each input.")
except ValueError:
args.random_targeted = True
utils.tab_printer(args)
test_loader = generate_dataloader(args.dataset, args.batch_size)
attack_method = LBFGS(model, args.device)
run_attack(attack_method, 1, args.batch_num, args.device, test_loader, random_targeted = args.random_targeted, target_label = args.target_label)
elif(args.attack_method == "CW"):
from deeprobust.image.attack.cw import CarliniWagner
attack_method = CarliniWagner(model, args.device)
try:
if (args.batch_size > 1):
raise ValueError("batch_size shouldn't be larger than 1.")
except ValueError:
args.batch_size = 1
try:
if (args.random_targeted == 0 and args.target_label == -1):
raise ValueError("No target label assigned. Random generate target for each input.")
except ValueError:
args.random_targeted = True
utils.tab_printer(args)
test_loader = generate_dataloader(args.dataset, args.batch_size)
run_attack(attack_method, 1, args.batch_num, args.device, test_loader, random_targeted = args.random_targeted, target_label = args.target_label)
elif(args.attack_method == "deepfool"):
from deeprobust.image.attack.deepfool import DeepFool
attack_method = DeepFool(model, args.device)
try:
if (args.batch_size > 1):
raise ValueError("batch_size shouldn't be larger than 1.")
except ValueError:
args.batch_size = 1
utils.tab_printer(args)
test_loader = generate_dataloader(args.dataset, args.batch_size)
run_attack(attack_method, args.batch_size, args.batch_num, args.device, test_loader)
elif(args.attack_method == "onepixel"):
from deeprobust.image.attack.onepixel import Onepixel
attack_method = Onepixel(model, args.device)
try:
if (args.batch_size > 1):
raise ValueError("batch_size shouldn't be larger than 1.")
except ValueError:
args.batch_size = 1
utils.tab_printer(args)
test_loader = generate_dataloader(args.dataset, args.batch_size)
run_attack(attack_method, args.batch_size, args.batch_num, args.device, test_loader)
elif(args.attack_method == "Nattack"):
pass
| 9,853 | 42.409692 | 158 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deeprobust/deepfool.py | import numpy as np
from torch.autograd import Variable
import torch as torch
import copy
from torch.autograd.gradcheck import zero_gradients
from attacks.deeprobust.base_attack import BaseAttack
class DeepFool(BaseAttack):
"""DeepFool attack.
"""
def __init__(self, model, device = 'cuda' ):
super(DeepFool, self).__init__(model, device)
self.model = model
self.device = device
def generate(self, image, label, **kwargs):
"""
Call this function to generate adversarial examples.
Parameters
----------
image : 1*H*W*3
original image
label : int
target label
kwargs :
user defined paremeters
Returns
-------
adv_img :
adversarial examples
"""
#check type device
assert self.check_type_device(image, label)
is_cuda = torch.cuda.is_available()
if (is_cuda and self.device == 'cuda'):
self.image = image.cuda()
self.model = self.model.cuda()
else:
self.image = image
assert self.parse_params(**kwargs)
adv_img, self.r, self.ite = deepfool(self.model,
self.image,
self.num_classes,
self.overshoot,
self.max_iteration,
self.device)
return adv_img
def getpert(self):
return self.r, self.ite
def parse_params(self,
num_classes = 10,
overshoot = 0.02,
max_iteration = 50):
"""
Parse the user defined parameters
Parameters
----------
num_classes : int
limits the number of classes to test against. (default = 10)
overshoot : float
used as a termination criterion to prevent vanishing updates (default = 0.02).
max_iteration : int
maximum number of iteration for deepfool (default = 50)
"""
self.num_classes = num_classes
self.overshoot = overshoot
self.max_iteration = max_iteration
return True
def deepfool(model, image, num_classes, overshoot, max_iter, device):
f_image = model.forward(image).data.cpu().numpy().flatten()
output = (np.array(f_image)).flatten().argsort()[::-1]
output = output[0:num_classes]
label = output[0]
input_shape = image.cpu().numpy().shape
x = copy.deepcopy(image).requires_grad_(True)
w = np.zeros(input_shape)
r_tot = np.zeros(input_shape)
fs = model.forward(x)
fs_list = [fs[0,output[k]] for k in range(num_classes)]
current_pred_label = label
for i in range(max_iter):
pert = np.inf
fs[0, output[0]].backward(retain_graph = True)
grad_orig = x.grad.data.cpu().numpy().copy()
for k in range(1, num_classes):
zero_gradients(x)
fs[0, output[k]].backward(retain_graph=True)
cur_grad = x.grad.data.cpu().numpy().copy()
# set new w_k and new f_k
w_k = cur_grad - grad_orig
f_k = (fs[0, output[k]] - fs[0, output[0]]).data.cpu().numpy()
pert_k = abs(f_k)/np.linalg.norm(w_k.flatten())
# determine which w_k to use
if pert_k < pert:
pert = pert_k
w = w_k
# compute r_i and r_tot
# Added 1e-4 for numerical stability
r_i = (pert+1e-4) * w / np.linalg.norm(w)
r_tot = np.float32(r_tot + r_i)
pert_image = image + (1+overshoot)*torch.from_numpy(r_tot).to(device)
x = pert_image.detach().requires_grad_(True)
fs = model.forward(x)
if (not np.argmax(fs.data.cpu().numpy().flatten()) == label):
break
r_tot = (1+overshoot)*r_tot
return pert_image, r_tot, i
| 3,969 | 27.561151 | 90 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deeprobust/base_attack.py | from abc import ABCMeta
import torch
class BaseAttack(object):
"""
Attack base class.
"""
__metaclass__ = ABCMeta
def __init__(self, model, device = 'cuda'):
self.model = model
self.device = device
def generate(self, image, label, **kwargs):
"""
Overide this function for the main body of attack algorithm.
Parameters
----------
image :
original image
label :
original label
kwargs :
user defined parameters
"""
return input
def parse_params(self, **kwargs):
"""
Parse user defined parameters.
"""
return True
def check_type_device(self, image, label):
"""
Check device, match variable type to device type.
Parameters
----------
image :
image
label :
label
"""
################## devices
if self.device == 'cuda':
image = image.cuda()
label = label.cuda()
self.model = self.model.cuda()
elif self.device == 'cpu':
image = image.cpu()
label = label.cpu()
self.model = self.model.cpu()
else:
raise ValueError('Please input cpu or cuda')
################## data type
if type(image).__name__ == 'Tensor':
image = image.float()
image = image.float().clone().detach().requires_grad_(True)
elif type(x).__name__ == 'ndarray':
image = image.astype('float')
image = torch.tensor(image, requires_grad=True)
else:
raise ValueError('Input values only take numpy arrays or torch tensors')
if type(label).__name__ == 'Tensor':
label = label.long()
elif type(label).__name__ == 'ndarray':
label = label.astype('long')
label = torch.tensor(y)
else:
raise ValueError('Input labels only take numpy arrays or torch tensors')
#################### set init attributes
self.image = image
self.label = label
return True
def get_or_predict_lable(self, image):
output = self.model(image)
pred = output.argmax(dim=1, keepdim=True)
return(pred)
| 2,341 | 25.314607 | 84 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deeprobust/cw.py | import torch
from torch import optim
import torch.nn as nn
import numpy as np
import logging
from attacks.deeprobust.base_attack import BaseAttack
from attacks.deeprobust.utils import onehot_like
from attacks.deeprobust.optimizer import AdamOptimizer
class CarliniWagner(BaseAttack):
"""
C&W attack is an effective method to calcuate high-confidence adversarial examples.
References
----------
.. [1] Carlini, N., & Wagner, D. (2017, May). Towards evaluating the robustness of neural networks. https://arxiv.org/pdf/1608.04644.pdf
This reimplementation is based on https://github.com/kkew3/pytorch-cw2
Copyright 2018 Kaiwen Wu
Examples
--------
>>> from deeprobust.image.attack.cw import CarliniWagner
>>> from deeprobust.image.netmodels.CNN import Net
>>> from deeprobust.image.config import attack_params
>>> model = Net()
>>> model.load_state_dict(torch.load("./trained_models/MNIST_CNN_epoch_20.pt", map_location = torch.device('cuda')))
>>> model.eval()
>>> x,y = datasets.MNIST()
>>> attack = CarliniWagner(model, device='cuda')
>>> AdvExArray = attack.generate(x, y, target_label = 1, classnum = 10, **attack_params['CW_MNIST])
"""
def __init__(self, model, device = 'cuda'):
super(CarliniWagner, self).__init__(model, device)
self.model = model
self.device = device
def generate(self, image, label, target_label, **kwargs):
"""
Call this function to generate adversarial examples.
Parameters
----------
image :
original image
label :
target label
kwargs :
user defined paremeters
"""
assert self.check_type_device(image, label)
assert self.parse_params(**kwargs)
self.target = target_label
return self.cw(self.model,
self.image,
self.label,
self.target,
self.confidence,
self.clip_max,
self.clip_min,
self.max_iterations,
self.initial_const,
self.binary_search_steps,
self.learning_rate
)
def parse_params(self,
classnum = 10,
confidence = 1e-4,
clip_max = 1,
clip_min = 0,
max_iterations = 1000,
initial_const = 1e-2,
binary_search_steps = 5,
learning_rate = 0.00001,
abort_early = True):
"""
Parse the user defined parameters.
Parameters
----------
classnum :
number of class
confidence :
confidence
clip_max :
maximum pixel value
clip_min :
minimum pixel value
max_iterations :
maximum number of iterations
initial_const :
initialization of binary search
binary_search_steps :
step number of binary search
learning_rate :
learning rate
abort_early :
Set abort_early = True to allow early stop
"""
self.classnum = classnum
self.confidence = confidence
self.clip_max = clip_max
self.clip_min = clip_min
self.max_iterations = max_iterations
self.initial_const = initial_const
self.binary_search_steps = binary_search_steps
self.learning_rate = learning_rate
self.abort_early = abort_early
return True
def cw(self, model, image, label, target, confidence, clip_max, clip_min, max_iterations, initial_const, binary_search_steps, learning_rate):
#change the input image
img_tanh = self.to_attack_space(image.cpu())
img_ori ,_ = self.to_model_space(img_tanh)
img_ori = img_ori.to(self.device)
#binary search initialization
c = initial_const
c_low = 0
c_high = np.inf
found_adv = False
last_loss = np.inf
for step in range(binary_search_steps):
#initialize w : perturbed image in tanh space
w = torch.from_numpy(img_tanh.numpy())
optimizer = AdamOptimizer(img_tanh.shape)
is_adversarial = False
for iteration in range(max_iterations):
# adversary example
img_adv, adv_grid = self.to_model_space(w)
img_adv = img_adv.to(self.device)
img_adv.requires_grad = True
#output of the layer before softmax
output = model.get_logits(img_adv)
#pending success
is_adversarial = self.pending_f(img_adv)
#calculate loss function and gradient of loss funcition on x
loss, loss_grad = self.loss_function(
img_adv, c, self.target, img_ori, self.confidence, self.clip_min, self.clip_max
)
#calculate gradient of loss function on w
gradient = adv_grid.to(self.device) * loss_grad.to(self.device)
w = w + torch.from_numpy(optimizer(gradient.cpu().detach().numpy(), learning_rate)).float()
if is_adversarial:
found_adv = True
#do binary search on c
if found_adv:
c_high = c
else:
c_low = c
if c_high == np.inf:
c *= 10
else:
c = (c_high + c_low) / 2
if (step % 10 == 0):
print("iteration:{:.0f},loss:{:.4f}".format(step,loss))
# if (step == 50):
# learning_rate = learning_rate/100
#abort early
if(self.abort_early == True and (step % 10) == 0 and step > 100) :
print("early abortion?", loss, last_loss)
if not (loss <= 0.9999 * last_loss):
break
last_loss = loss
return img_adv.detach()
def loss_function(
self, x_p, const, target, reconstructed_original, confidence, min_, max_):
"""Returns the loss and the gradient of the loss w.r.t. x,
assuming that logits = model(x)."""
## get the output of model before softmax
x_p.requires_grad = True
logits = self.model.get_logits(x_p).to(self.device)
## find the largest class except the target class
targetlabel_mask = (torch.from_numpy(onehot_like(np.zeros(self.classnum), target))).double()
secondlargest_mask = (torch.from_numpy(np.ones(self.classnum)) - targetlabel_mask).to(self.device)
secondlargest = np.argmax((logits.double() * secondlargest_mask).cpu().detach().numpy())
is_adv_loss = logits[0][secondlargest] - logits[0][target]
# is_adv is True as soon as the is_adv_loss goes below 0
# but sometimes we want additional confidence
is_adv_loss += confidence
if is_adv_loss == 0:
is_adv_loss_grad = 0
else:
is_adv_loss.backward()
is_adv_loss_grad = x_p.grad
is_adv_loss = max(0, is_adv_loss)
s = max_ - min_
squared_l2_distance = np.sum( ((x_p - reconstructed_original) ** 2).cpu().detach().numpy() ) / s ** 2
total_loss = squared_l2_distance + const * is_adv_loss
squared_l2_distance_grad = (2 / s ** 2) * (x_p - reconstructed_original)
#print(is_adv_loss_grad)
total_loss_grad = squared_l2_distance_grad + const * is_adv_loss_grad
return total_loss, total_loss_grad
def pending_f(self, x_p):
"""Pending is the loss function is less than 0
"""
targetlabel_mask = torch.from_numpy(onehot_like(np.zeros(self.classnum), self.target))
secondlargest_mask = torch.from_numpy(np.ones(self.classnum)) - targetlabel_mask
targetlabel_mask = targetlabel_mask.to(self.device)
secondlargest_mask = secondlargest_mask.to(self.device)
Zx_i = np.max((self.model.get_logits(x_p).double().to(self.device) * secondlargest_mask).cpu().detach().numpy())
Zx_t = np.max((self.model.get_logits(x_p).double().to(self.device) * targetlabel_mask).cpu().detach().numpy())
if ( Zx_i - Zx_t < - self.confidence):
return True
else:
return False
def to_attack_space(self, x):
x = x.detach()
# map from [min_, max_] to [-1, +1]
# x'=(x- 0.5 * (max+min) / 0.5 * (max-min))
a = (self.clip_min + self.clip_max) / 2
b = (self.clip_max - self.clip_min) / 2
x = (x - a) / b
# from [-1, +1] to approx. (-1, +1)
x = x * 0.999999
# from (-1, +1) to (-inf, +inf)
return np.arctanh(x)
def to_model_space(self, x):
"""Transforms an input from the attack space
to the model space. This transformation and
the returned gradient are elementwise."""
# from (-inf, +inf) to (-1, +1)
x = np.tanh(x)
grad = 1 - np.square(x)
# map from (-1, +1) to (min_, max_)
a = (self.clip_min + self.clip_max) / 2
b = (self.clip_max - self.clip_min) / 2
x = x * b + a
grad = grad * b
return x, grad
| 9,404 | 31.884615 | 145 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deeprobust/fgsm.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from numpy import linalg as LA
from attacks.deeprobust.base_attack import BaseAttack
class FGSM(BaseAttack):
"""
FGSM attack is an one step gradient descent method.
"""
def __init__(self, model, device = 'cuda'):
super(FGSM, self).__init__(model, device)
def generate(self, image, label, **kwargs):
""""
Call this function to generate FGSM adversarial examples.
Parameters
----------
image :
original image
label :
target label
kwargs :
user defined paremeters
"""
label = label.type(torch.FloatTensor)
## check and parse parameters for attack
assert self.check_type_device(image, label)
assert self.parse_params(**kwargs)
return fgm(self.model,
self.image,
self.label,
self.epsilon,
self.order,
self.clip_min,
self.clip_max,
self.device)
def parse_params(self,
epsilon = 0.2,
order = np.inf,
clip_max = None,
clip_min = None):
"""
Parse the user defined parameters.
:param model: victim model
:param image: original attack images
:param label: target labels
:param epsilon: perturbation constraint
:param order: constraint type
:param clip_min: minimum pixel value
:param clip_max: maximum pixel value
:param device: device type, cpu or gpu
:type image: [N*C*H*W],floatTensor
:type label: int
:type epsilon: float
:type order: int
:type clip_min: float
:type clip_max: float
:type device: string('cpu' or 'cuda')
:return: perturbed images
:rtype: [N*C*H*W], floatTensor
"""
self.epsilon = epsilon
self.order = order
self.clip_max = clip_max
self.clip_min = clip_min
return True
def fgm(model, image, label, epsilon, order, clip_min, clip_max, device):
imageArray = image.cpu().detach().numpy()
X_fgsm = torch.tensor(imageArray).to(device)
#print(image.data)
X_fgsm.requires_grad = True
opt = optim.SGD([X_fgsm], lr=1e-3)
opt.zero_grad()
out = model(X_fgsm)
out = out[0] if len(out)>1 else out # handle bayesian_torch fwd
loss = nn.CrossEntropyLoss()(out, label)
loss.backward()
# print(X_fgsm)
# print(X_fgsm.grad)
if order == np.inf:
d = epsilon * X_fgsm.grad.data.sign()
elif order == 2:
gradient = X_fgsm.grad
d = torch.zeros(gradient.shape, device = device)
for i in range(gradient.shape[0]):
norm_grad = gradient[i].data/LA.norm(gradient[i].data.cpu().numpy())
d[i] = norm_grad * epsilon
else:
raise ValueError('Other p norms may need other algorithms')
x_adv = X_fgsm + d
if clip_max == None and clip_min == None:
clip_max = np.inf
clip_min = -np.inf
x_adv = torch.clamp(x_adv, clip_min, clip_max)
return x_adv
| 3,309 | 25.269841 | 80 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deeprobust/utils.py | import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import urllib.request
import os
def create_train_dataset(batch_size = 128, root = '../data'):
"""
Create different training dataset
"""
transform_train = transforms.Compose([
transforms.ToTensor(),
])
trainset = torchvision.datasets.MNIST(root=root, train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
return trainloader
def create_test_dataset(batch_size = 128, root = '../data'):
transform_test = transforms.Compose([
transforms.ToTensor(),
])
testset = torchvision.datasets.MNIST(root=root, train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)
return testloader
def download_model(url, file):
print('Dowloading from {} to {}'.format(url, file))
try:
urllib.request.urlretrieve(url, file)
except:
raise Exception("Download failed! Make sure you have stable Internet connection and enter the right name")
def save_checkpoint(now_epoch, net, optimizer, lr_scheduler, file_name):
checkpoint = {'epoch': now_epoch,
'state_dict': net.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'lr_scheduler_state_dict':lr_scheduler.state_dict()}
if os.path.exists(file_name):
print('Overwriting {}'.format(file_name))
torch.save(checkpoint, file_name)
# link_name = os.path.join(*file_name.split(os.path.sep)[:-1], 'last.checkpoint')
# #print(link_name)
# make_symlink(source = file_name, link_name=link_name)
def load_checkpoint(file_name, net = None, optimizer = None, lr_scheduler = None):
if os.path.isfile(file_name):
print("=> loading checkpoint '{}'".format(file_name))
check_point = torch.load(file_name)
if net is not None:
print('Loading network state dict')
net.load_state_dict(check_point['state_dict'])
if optimizer is not None:
print('Loading optimizer state dict')
optimizer.load_state_dict(check_point['optimizer_state_dict'])
if lr_scheduler is not None:
print('Loading lr_scheduler state dict')
lr_scheduler.load_state_dict(check_point['lr_scheduler_state_dict'])
return check_point['epoch']
else:
print("=> no checkpoint found at '{}'".format(file_name))
def make_symlink(source, link_name):
"""
Note: overwriting enabled!
"""
if os.path.exists(link_name):
print("Link name already exist! Removing '{}' and overwriting".format(link_name))
os.remove(link_name)
if os.path.exists(source):
os.symlink(source, link_name)
return
else:
print('Source path not exists')
from texttable import Texttable
def tab_printer(args):
"""
Function to print the logs in a nice tabular format.
input:
param args: Parameters used for the model.
"""
args = vars(args)
keys = sorted(args.keys())
t = Texttable()
t.add_rows([["Parameter", "Value"]] + [[k.replace("_"," ").capitalize(), args[k]] for k in keys])
print(t.draw())
def onehot_like(a, index, value=1):
"""Creates an array like a, with all values
set to 0 except one.
Parameters
----------
a : array_like
The returned one-hot array will have the same shape
and dtype as this array
index : int
The index that should be set to `value`
value : single value compatible with a.dtype
The value to set at the given index
Returns
-------
`numpy.ndarray`
One-hot array with the given value at the given
location and zeros everywhere else.
"""
#TODO: change the note here.
x = np.zeros_like(a)
x[index] = value
return x
def reduce_sum(x, keepdim=True):
# silly PyTorch, when will you get proper reducing sums/means?
for a in reversed(range(1, x.dim())):
x = x.sum(a, keepdim=keepdim)
return x
def arctanh(x, eps=1e-6):
"""
Calculate arctanh(x)
"""
x *= (1. - eps)
return (np.log((1 + x) / (1 - x))) * 0.5
def l2r_dist(x, y, keepdim=True, eps=1e-8):
d = (x - y)**2
d = reduce_sum(d, keepdim=keepdim)
d += eps # to prevent infinite gradient at 0
return d.sqrt()
def l2_dist(x, y, keepdim=True):
d = (x - y)**2
return reduce_sum(d, keepdim=keepdim)
def l1_dist(x, y, keepdim=True):
d = torch.abs(x - y)
return reduce_sum(d, keepdim=keepdim)
def l2_norm(x, keepdim=True):
norm = reduce_sum(x*x, keepdim=keepdim)
return norm.sqrt()
def l1_norm(x, keepdim=True):
return reduce_sum(x.abs(), keepdim=keepdim)
def adjust_learning_rate(optimizer, epoch, learning_rate):
"""decrease the learning rate"""
lr = learning_rate
if epoch >= 55:
lr = learning_rate * 0.1
if epoch >= 75:
lr = learning_rate * 0.01
if epoch >= 90:
lr = learning_rate * 0.001
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
| 6,457 | 29.462264 | 114 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deeprobust/optimizer.py | """
This module include the following optimizer:
1. differential_evolution:
The differential evolution global optimization algorithm
https://github.com/scipy/scipy/blob/70e61dee181de23fdd8d893eaa9491100e2218d7/scipy/optimize/_differentialevolution.py
modified by:
https://github.com/DebangLi/one-pixel-attack-pytorch/blob/master/differential_evolution.py
2. Basic Adam Optimizer
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize import OptimizeResult, minimize
from scipy.optimize.optimize import _status_message
from scipy._lib._util import check_random_state
from scipy._lib.six import xrange, string_types
import warnings
__all__ = ['differential_evolution', 'AdamOptimizer']
_MACHEPS = np.finfo(np.float64).eps
def differential_evolution(func, bounds, args=(), strategy='best1bin',
maxiter=1000, popsize=15, tol=0.01,
mutation=(0.5, 1), recombination=0.7, seed=None,
callback=None, disp=False, polish=True,
init='latinhypercube', atol=0):
"""Finds the global minimum of a multivariate function.
Differential Evolution is stochastic in nature (does not use gradient
methods) to find the minimium, and can search large areas of candidate
space, but often requires larger numbers of function evaluations than
conventional gradient based techniques.
The algorithm is due to Storn and Price [1]_.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence
Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
defining the lower and upper bounds for the optimizing argument of
`func`. It is required to have ``len(bounds) == len(x)``.
``len(bounds)`` is used to determine the number of parameters in ``x``.
args : tuple, optional
Any additional fixed parameters needed to
completely specify the objective function.
strategy : str, optional
The differential evolution strategy to use. Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'currenttobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'currenttobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
The default is 'best1bin'.
maxiter : int, optional
The maximum number of generations over which the entire population is
evolved. The maximum number of function evaluations (with no polishing)
is: ``(maxiter + 1) * popsize * len(x)``
popsize : int, optional
A multiplier for setting the total population size. The population has
``popsize * len(x)`` individuals (unless the initial population is
supplied via the `init` keyword).
tol : float, optional
Relative tolerance for convergence, the solving stops when
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
where and `atol` and `tol` are the absolute and relative tolerance
respectively.
mutation : float or tuple(float, float), optional
The mutation constant. In the literature this is also known as
differential weight, being denoted by F.
If specified as a float it should be in the range [0, 2].
If specified as a tuple ``(min, max)`` dithering is employed. Dithering
randomly changes the mutation constant on a generation by generation
basis. The mutation constant for that generation is taken from
``U[min, max)``. Dithering can help speed convergence significantly.
Increasing the mutation constant increases the search radius, but will
slow down convergence.
recombination : float, optional
The recombination constant, should be in the range [0, 1]. In the
literature this is also known as the crossover probability, being
denoted by CR. Increasing this value allows a larger number of mutants
to progress into the next generation, but at the risk of population
stability.
seed : int or `np.random.RandomState`, optional
If `seed` is not specified the `np.RandomState` singleton is used.
If `seed` is an int, a new `np.random.RandomState` instance is used,
seeded with seed.
If `seed` is already a `np.random.RandomState instance`, then that
`np.random.RandomState` instance is used.
Specify `seed` for repeatable minimizations.
disp : bool, optional
Display status messages
callback : callable, `callback(xk, convergence=val)`, optional
A function to follow the progress of the minimization. ``xk`` is
the current value of ``x0``. ``val`` represents the fractional
value of the population convergence. When ``val`` is greater than one
the function halts. If callback returns `True`, then the minimization
is halted (any polishing is still carried out).
polish : bool, optional
If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B`
method is used to polish the best population member at the end, which
can improve the minimization slightly.
init : str or array-like, optional
Specify which type of population initialization is performed. Should be
one of:
- 'latinhypercube'
- 'random'
- array specifying the initial population. The array should have
shape ``(M, len(x))``, where len(x) is the number of parameters.
`init` is clipped to `bounds` before use.
The default is 'latinhypercube'. Latin Hypercube sampling tries to
maximize coverage of the available parameter space. 'random'
initializes the population randomly - this has the drawback that
clustering can occur, preventing the whole of parameter space being
covered. Use of an array to specify a population subset could be used,
for example, to create a tight bunch of initial guesses in an location
where the solution is known to exist, thereby reducing time for
convergence.
atol : float, optional
Absolute tolerance for convergence, the solving stops when
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
where and `atol` and `tol` are the absolute and relative tolerance
respectively.
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes. If `polish`
was employed, and a lower minimum was obtained by the polishing, then
OptimizeResult also contains the ``jac`` attribute.
Notes
-----
Differential evolution is a stochastic population based method that is
useful for global optimization problems. At each pass through the population
the algorithm mutates each candidate solution by mixing with other candidate
solutions to create a trial candidate. There are several strategies [2]_ for
creating trial candidates, which suit some problems more than others. The
'best1bin' strategy is a good starting point for many systems. In this
strategy two members of the population are randomly chosen. Their difference
is used to mutate the best member (the `best` in `best1bin`), :math:`b_0`,
so far:
.. math::
b' = b_0 + mutation * (population[rand0] - population[rand1])
A trial vector is then constructed. Starting with a randomly chosen 'i'th
parameter the trial is sequentially filled (in modulo) with parameters from
`b'` or the original candidate. The choice of whether to use `b'` or the
original candidate is made with a binomial distribution (the 'bin' in
'best1bin') - a random number in [0, 1) is generated. If this number is
less than the `recombination` constant then the parameter is loaded from
`b'`, otherwise it is loaded from the original candidate. The final
parameter is always loaded from `b'`. Once the trial candidate is built
its fitness is assessed. If the trial is better than the original candidate
then it takes its place. If it is also better than the best overall
candidate it also replaces that.
To improve your chances of finding a global minimum use higher `popsize`
values, with higher `mutation` and (dithering), but lower `recombination`
values. This has the effect of widening the search radius, but slowing
convergence.
.. versionadded:: 0.15.0
References
----------
.. [1] Storn, R and Price, K, Differential Evolution - a Simple and
Efficient Heuristic for Global Optimization over Continuous Spaces,
Journal of Global Optimization, 1997, 11, 341 - 359.
.. [2] http://www1.icsi.berkeley.edu/~storn/code.html
.. [3] http://en.wikipedia.org/wiki/Differential_evolution
"""
solver = DifferentialEvolutionSolver(func, bounds, args=args,
strategy=strategy, maxiter=maxiter,
popsize=popsize, tol=tol,
mutation=mutation,
recombination=recombination,
seed=seed, polish=polish,
callback=callback,
disp=disp, init=init, atol=atol)
return solver.solve()
class DifferentialEvolutionSolver(object):
"""This class implements the differential evolution solver
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence
Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
defining the lower and upper bounds for the optimizing argument of
`func`. It is required to have ``len(bounds) == len(x)``.
``len(bounds)`` is used to determine the number of parameters in ``x``.
args : tuple, optional
Any additional fixed parameters needed to
completely specify the objective function.
strategy : str, optional
The differential evolution strategy to use. Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'currenttobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'currenttobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
The default is 'best1bin'
maxiter : int, optional
The maximum number of generations over which the entire population is
evolved. The maximum number of function evaluations (with no polishing)
is: ``(maxiter + 1) * popsize * len(x)``
popsize : int, optional
A multiplier for setting the total population size. The population has
``popsize * len(x)`` individuals (unless the initial population is
supplied via the `init` keyword).
tol : float, optional
Relative tolerance for convergence, the solving stops when
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
where and `atol` and `tol` are the absolute and relative tolerance
respectively.
mutation : float or tuple(float, float), optional
The mutation constant. In the literature this is also known as
differential weight, being denoted by F.
If specified as a float it should be in the range [0, 2].
If specified as a tuple ``(min, max)`` dithering is employed. Dithering
randomly changes the mutation constant on a generation by generation
basis. The mutation constant for that generation is taken from
U[min, max). Dithering can help speed convergence significantly.
Increasing the mutation constant increases the search radius, but will
slow down convergence.
recombination : float, optional
The recombination constant, should be in the range [0, 1]. In the
literature this is also known as the crossover probability, being
denoted by CR. Increasing this value allows a larger number of mutants
to progress into the next generation, but at the risk of population
stability.
seed : int or `np.random.RandomState`, optional
If `seed` is not specified the `np.random.RandomState` singleton is
used.
If `seed` is an int, a new `np.random.RandomState` instance is used,
seeded with `seed`.
If `seed` is already a `np.random.RandomState` instance, then that
`np.random.RandomState` instance is used.
Specify `seed` for repeatable minimizations.
disp : bool, optional
Display status messages
callback : callable, `callback(xk, convergence=val)`, optional
A function to follow the progress of the minimization. ``xk`` is
the current value of ``x0``. ``val`` represents the fractional
value of the population convergence. When ``val`` is greater than one
the function halts. If callback returns `True`, then the minimization
is halted (any polishing is still carried out).
polish : bool, optional
If True, then `scipy.optimize.minimize` with the `L-BFGS-B` method
is used to polish the best population member at the end. This requires
a few more function evaluations.
maxfun : int, optional
Set the maximum number of function evaluations. However, it probably
makes more sense to set `maxiter` instead.
init : str or array-like, optional
Specify which type of population initialization is performed. Should be
one of:
- 'latinhypercube'
- 'random'
- array specifying the initial population. The array should have
shape ``(M, len(x))``, where len(x) is the number of parameters.
`init` is clipped to `bounds` before use.
The default is 'latinhypercube'. Latin Hypercube sampling tries to
maximize coverage of the available parameter space. 'random'
initializes the population randomly - this has the drawback that
clustering can occur, preventing the whole of parameter space being
covered. Use of an array to specify a population could be used, for
example, to create a tight bunch of initial guesses in an location
where the solution is known to exist, thereby reducing time for
convergence.
atol : float, optional
Absolute tolerance for convergence, the solving stops when
``np.std(pop) <= atol + tol * np.abs(np.mean(population_energies))``,
where and `atol` and `tol` are the absolute and relative tolerance
respectively.
"""
# Dispatch of mutation strategy method (binomial or exponential).
_binomial = {'best1bin': '_best1',
'randtobest1bin': '_randtobest1',
'currenttobest1bin': '_currenttobest1',
'best2bin': '_best2',
'rand2bin': '_rand2',
'rand1bin': '_rand1'}
_exponential = {'best1exp': '_best1',
'rand1exp': '_rand1',
'randtobest1exp': '_randtobest1',
'currenttobest1exp': '_currenttobest1',
'best2exp': '_best2',
'rand2exp': '_rand2'}
__init_error_msg = ("The population initialization method must be one of "
"'latinhypercube' or 'random', or an array of shape "
"(M, N) where N is the number of parameters and M>5")
def __init__(self, func, bounds, args=(),
strategy='best1bin', maxiter=1000, popsize=15,
tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
maxfun=np.inf, callback=None, disp=False, polish=True,
init='latinhypercube', atol=0):
if strategy in self._binomial:
self.mutation_func = getattr(self, self._binomial[strategy])
elif strategy in self._exponential:
self.mutation_func = getattr(self, self._exponential[strategy])
else:
raise ValueError("Please select a valid mutation strategy")
self.strategy = strategy
self.callback = callback
self.polish = polish
# relative and absolute tolerances for convergence
self.tol, self.atol = tol, atol
# Mutation constant should be in [0, 2). If specified as a sequence
# then dithering is performed.
self.scale = mutation
if (not np.all(np.isfinite(mutation)) or
np.any(np.array(mutation) >= 2) or
np.any(np.array(mutation) < 0)):
raise ValueError('The mutation constant must be a float in '
'U[0, 2), or specified as a tuple(min, max)'
' where min < max and min, max are in U[0, 2).')
self.dither = None
if hasattr(mutation, '__iter__') and len(mutation) > 1:
self.dither = [mutation[0], mutation[1]]
self.dither.sort()
self.cross_over_probability = recombination
self.func = func
self.args = args
# convert tuple of lower and upper bounds to limits
# [(low_0, high_0), ..., (low_n, high_n]
# -> [[low_0, ..., low_n], [high_0, ..., high_n]]
self.limits = np.array(bounds, dtype='float').T
if (np.size(self.limits, 0) != 2 or not
np.all(np.isfinite(self.limits))):
raise ValueError('bounds should be a sequence containing '
'real valued (min, max) pairs for each value'
' in x')
if maxiter is None: # the default used to be None
maxiter = 1000
self.maxiter = maxiter
if maxfun is None: # the default used to be None
maxfun = np.inf
self.maxfun = maxfun
# population is scaled to between [0, 1].
# We have to scale between parameter <-> population
# save these arguments for _scale_parameter and
# _unscale_parameter. This is an optimization
self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])
self.parameter_count = np.size(self.limits, 1)
self.random_number_generator = check_random_state(seed)
# default population initialization is a latin hypercube design, but
# there are other population initializations possible.
# the minimum is 5 because 'best2bin' requires a population that's at
# least 5 long
self.num_population_members = max(5, popsize * self.parameter_count)
self.population_shape = (self.num_population_members,
self.parameter_count)
self._nfev = 0
if isinstance(init, string_types):
if init == 'latinhypercube':
self.init_population_lhs()
elif init == 'random':
self.init_population_random()
else:
raise ValueError(self.__init_error_msg)
else:
self.init_population_array(init)
self.disp = disp
def init_population_lhs(self):
"""
Initializes the population with Latin Hypercube Sampling.
Latin Hypercube Sampling ensures that each parameter is uniformly
sampled over its range.
"""
rng = self.random_number_generator
# Each parameter range needs to be sampled uniformly. The scaled
# parameter range ([0, 1)) needs to be split into
# `self.num_population_members` segments, each of which has the following
# size:
segsize = 1.0 / self.num_population_members
# Within each segment we sample from a uniform random distribution.
# We need to do this sampling for each parameter.
samples = (segsize * rng.random_sample(self.population_shape)
# Offset each segment to cover the entire parameter range [0, 1)
+ np.linspace(0., 1., self.num_population_members,
endpoint=False)[:, np.newaxis])
# Create an array for population of candidate solutions.
self.population = np.zeros_like(samples)
# Initialize population of candidate solutions by permutation of the
# random samples.
for j in range(self.parameter_count):
order = rng.permutation(range(self.num_population_members))
self.population[:, j] = samples[order, j]
# reset population energies
self.population_energies = (np.ones(self.num_population_members) *
np.inf)
# reset number of function evaluations counter
self._nfev = 0
def init_population_random(self):
"""
Initialises the population at random. This type of initialization
can possess clustering, Latin Hypercube sampling is generally better.
"""
rng = self.random_number_generator
self.population = rng.random_sample(self.population_shape)
# reset population energies
self.population_energies = (np.ones(self.num_population_members) *
np.inf)
# reset number of function evaluations counter
self._nfev = 0
def init_population_array(self, init):
"""
Initialises the population with a user specified population.
Parameters
----------
init : np.ndarray
Array specifying subset of the initial population. The array should
have shape (M, len(x)), where len(x) is the number of parameters.
The population is clipped to the lower and upper `bounds`.
"""
# make sure you're using a float array
popn = np.asfarray(init)
if (np.size(popn, 0) < 5 or
popn.shape[1] != self.parameter_count or
len(popn.shape) != 2):
raise ValueError("The population supplied needs to have shape"
" (M, len(x)), where M > 4.")
# scale values and clip to bounds, assigning to population
self.population = np.clip(self._unscale_parameters(popn), 0, 1)
self.num_population_members = np.size(self.population, 0)
self.population_shape = (self.num_population_members,
self.parameter_count)
# reset population energies
self.population_energies = (np.ones(self.num_population_members) *
np.inf)
# reset number of function evaluations counter
self._nfev = 0
@property
def x(self):
"""
The best solution from the solver
Returns
-------
x : ndarray
The best solution from the solver.
"""
return self._scale_parameters(self.population[0])
@property
def convergence(self):
"""
The standard deviation of the population energies divided by their
mean.
"""
return (np.std(self.population_energies) /
np.abs(np.mean(self.population_energies) + _MACHEPS))
def solve(self):
"""
Runs the DifferentialEvolutionSolver.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes. If `polish`
was employed, and a lower minimum was obtained by the polishing,
then OptimizeResult also contains the ``jac`` attribute.
"""
nit, warning_flag = 0, False
status_message = _status_message['success']
# The population may have just been initialized (all entries are
# np.inf). If it has you have to calculate the initial energies.
# Although this is also done in the evolve generator it's possible
# that someone can set maxiter=0, at which point we still want the
# initial energies to be calculated (the following loop isn't run).
if np.all(np.isinf(self.population_energies)):
self._calculate_population_energies()
# do the optimisation.
for nit in xrange(1, self.maxiter + 1):
# evolve the population by a generation
try:
next(self)
except StopIteration:
warning_flag = True
status_message = _status_message['maxfev']
break
if self.disp:
print("differential_evolution step %d: f(x)= %g"
% (nit,
self.population_energies[0]))
# should the solver terminate?
convergence = self.convergence
if (self.callback and
self.callback(self._scale_parameters(self.population[0]),
convergence=self.tol / convergence) is True):
warning_flag = True
status_message = ('callback function requested stop early '
'by returning True')
break
intol = (np.std(self.population_energies) <=
self.atol +
self.tol * np.abs(np.mean(self.population_energies)))
if warning_flag or intol:
break
else:
status_message = _status_message['maxiter']
warning_flag = True
DE_result = OptimizeResult(
x=self.x,
fun=self.population_energies[0],
nfev=self._nfev,
nit=nit,
message=status_message,
success=(warning_flag is not True))
if self.polish:
result = minimize(self.func,
np.copy(DE_result.x),
method='L-BFGS-B',
bounds=self.limits.T,
args=self.args)
self._nfev += result.nfev
DE_result.nfev = self._nfev
if result.fun < DE_result.fun:
DE_result.fun = result.fun
DE_result.x = result.x
DE_result.jac = result.jac
# to keep internal state consistent
self.population_energies[0] = result.fun
self.population[0] = self._unscale_parameters(result.x)
return DE_result
def _calculate_population_energies(self):
"""
Calculate the energies of all the population members at the same time.
Puts the best member in first place. Useful if the population has just
been initialised.
"""
##############
## CHANGES: self.func operates on the entire parameters array
##############
itersize = max(0, min(len(self.population), self.maxfun - self._nfev + 1))
candidates = self.population[:itersize]
parameters = np.array([self._scale_parameters(c) for c in candidates]) # TODO: vectorize
energies = self.func(parameters, *self.args)
self.population_energies = energies
self._nfev += itersize
# for index, candidate in enumerate(self.population):
# if self._nfev > self.maxfun:
# break
# parameters = self._scale_parameters(candidate)
# self.population_energies[index] = self.func(parameters,
# *self.args)
# self._nfev += 1
##############
##############
minval = np.argmin(self.population_energies)
# put the lowest energy into the best solution position.
lowest_energy = self.population_energies[minval]
self.population_energies[minval] = self.population_energies[0]
self.population_energies[0] = lowest_energy
self.population[[0, minval], :] = self.population[[minval, 0], :]
def __iter__(self):
return self
def __next__(self):
"""
Evolve the population by a single generation
Returns
-------
x : ndarray
The best solution from the solver.
fun : float
Value of objective function obtained from the best solution.
"""
# the population may have just been initialized (all entries are
# np.inf). If it has you have to calculate the initial energies
if np.all(np.isinf(self.population_energies)):
self._calculate_population_energies()
if self.dither is not None:
self.scale = (self.random_number_generator.rand()
* (self.dither[1] - self.dither[0]) + self.dither[0])
##############
## CHANGES: self.func operates on the entire parameters array
##############
itersize = max(0, min(self.num_population_members, self.maxfun - self._nfev + 1))
trials = np.array([self._mutate(c) for c in range(itersize)]) # TODO: vectorize
for trial in trials: self._ensure_constraint(trial)
parameters = np.array([self._scale_parameters(trial) for trial in trials])
energies = self.func(parameters, *self.args)
self._nfev += itersize
for candidate,(energy,trial) in enumerate(zip(energies, trials)):
# if the energy of the trial candidate is lower than the
# original population member then replace it
if energy < self.population_energies[candidate]:
self.population[candidate] = trial
self.population_energies[candidate] = energy
# if the trial candidate also has a lower energy than the
# best solution then replace that as well
if energy < self.population_energies[0]:
self.population_energies[0] = energy
self.population[0] = trial
# for candidate in range(self.num_population_members):
# if self._nfev > self.maxfun:
# raise StopIteration
# # create a trial solution
# trial = self._mutate(candidate)
# # ensuring that it's in the range [0, 1)
# self._ensure_constraint(trial)
# # scale from [0, 1) to the actual parameter value
# parameters = self._scale_parameters(trial)
# # determine the energy of the objective function
# energy = self.func(parameters, *self.args)
# self._nfev += 1
# # if the energy of the trial candidate is lower than the
# # original population member then replace it
# if energy < self.population_energies[candidate]:
# self.population[candidate] = trial
# self.population_energies[candidate] = energy
# # if the trial candidate also has a lower energy than the
# # best solution then replace that as well
# if energy < self.population_energies[0]:
# self.population_energies[0] = energy
# self.population[0] = trial
##############
##############
return self.x, self.population_energies[0]
def next(self):
"""
Evolve the population by a single generation
Returns
-------
x : ndarray
The best solution from the solver.
fun : float
Value of objective function obtained from the best solution.
"""
# next() is required for compatibility with Python2.7.
return self.__next__()
def _scale_parameters(self, trial):
"""
scale from a number between 0 and 1 to parameters.
"""
return self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2
def _unscale_parameters(self, parameters):
"""
scale from parameters to a number between 0 and 1.
"""
return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5
def _ensure_constraint(self, trial):
"""
make sure the parameters lie between the limits
"""
for index in np.where((trial < 0) | (trial > 1))[0]:
trial[index] = self.random_number_generator.rand()
def _mutate(self, candidate):
"""
create a trial vector based on a mutation strategy
"""
trial = np.copy(self.population[candidate])
rng = self.random_number_generator
fill_point = rng.randint(0, self.parameter_count)
if self.strategy in ['currenttobest1exp', 'currenttobest1bin']:
bprime = self.mutation_func(candidate,
self._select_samples(candidate, 5))
else:
bprime = self.mutation_func(self._select_samples(candidate, 5))
if self.strategy in self._binomial:
crossovers = rng.rand(self.parameter_count)
crossovers = crossovers < self.cross_over_probability
# the last one is always from the bprime vector for binomial
# If you fill in modulo with a loop you have to set the last one to
# true. If you don't use a loop then you can have any random entry
# be True.
crossovers[fill_point] = True
trial = np.where(crossovers, bprime, trial)
return trial
elif self.strategy in self._exponential:
i = 0
while (i < self.parameter_count and
rng.rand() < self.cross_over_probability):
trial[fill_point] = bprime[fill_point]
fill_point = (fill_point + 1) % self.parameter_count
i += 1
return trial
def _best1(self, samples):
"""
best1bin, best1exp
"""
r0, r1 = samples[:2]
return (self.population[0] + self.scale *
(self.population[r0] - self.population[r1]))
def _rand1(self, samples):
"""
rand1bin, rand1exp
"""
r0, r1, r2 = samples[:3]
return (self.population[r0] + self.scale *
(self.population[r1] - self.population[r2]))
def _randtobest1(self, samples):
"""
randtobest1bin, randtobest1exp
"""
r0, r1, r2 = samples[:3]
bprime = np.copy(self.population[r0])
bprime += self.scale * (self.population[0] - bprime)
bprime += self.scale * (self.population[r1] -
self.population[r2])
return bprime
def _currenttobest1(self, candidate, samples):
"""
currenttobest1bin, currenttobest1exp
"""
r0, r1 = samples[:2]
bprime = (self.population[candidate] + self.scale *
(self.population[0] - self.population[candidate] +
self.population[r0] - self.population[r1]))
return bprime
def _best2(self, samples):
"""
best2bin, best2exp
"""
r0, r1, r2, r3 = samples[:4]
bprime = (self.population[0] + self.scale *
(self.population[r0] + self.population[r1] -
self.population[r2] - self.population[r3]))
return bprime
def _rand2(self, samples):
"""
rand2bin, rand2exp
"""
r0, r1, r2, r3, r4 = samples
bprime = (self.population[r0] + self.scale *
(self.population[r1] + self.population[r2] -
self.population[r3] - self.population[r4]))
return bprime
def _select_samples(self, candidate, number_samples):
"""
obtain random integers from range(self.num_population_members),
without replacement. You can't have the original candidate either.
"""
idxs = list(range(self.num_population_members))
idxs.remove(candidate)
self.random_number_generator.shuffle(idxs)
idxs = idxs[:number_samples]
return idxs
class AdamOptimizer:
"""Basic Adam optimizer implementation that can minimize w.r.t.
a single variable.
Parameters
----------
shape : tuple
shape of the variable w.r.t. which the loss should be minimized
"""
#TODO Add reference or rewrite the function.
def __init__(self, shape):
self.m = np.zeros(shape)
self.v = np.zeros(shape)
self.t = 0
def __call__(self, gradient, learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-8):
"""Updates internal parameters of the optimizer and returns
the change that should be applied to the variable.
Parameters
----------
gradient : `np.ndarray`
the gradient of the loss w.r.t. to the variable
learning_rate: float
the learning rate in the current iteration
beta1: float
decay rate for calculating the exponentially
decaying average of past gradients
beta2: float
decay rate for calculating the exponentially
decaying average of past squared gradients
epsilon: float
small value to avoid division by zero
"""
self.t += 1
self.m = beta1 * self.m + (1 - beta1) * gradient
self.v = beta2 * self.v + (1 - beta2) * gradient ** 2
bias_correction_1 = 1 - beta1 ** self.t
bias_correction_2 = 1 - beta2 ** self.t
m_hat = self.m / bias_correction_1
v_hat = self.v / bias_correction_2
return -learning_rate * m_hat / (np.sqrt(v_hat) + epsilon)
| 38,893 | 41.460699 | 117 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deeprobust/other/YOPOpgd.py | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torch.nn.functional as F
from attacks.deeprobust.base_attack import BaseAttack
class FASTPGD(BaseAttack):
'''
This module is the adversarial example gererated algorithm in YOPO.
References
----------
Original code: https://github.com/a1600012888/YOPO-You-Only-Propagate-Once
'''
# ImageNet pre-trained mean and std
# _mean = torch.tensor(np.array([0.485, 0.456, 0.406]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis])
# _std = torch.tensor(np.array([0.229, 0.224, 0.225]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis])
# _mean = torch.tensor(np.array([0]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis])
# _std = torch.tensor(np.array([1.0]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis])
def __init__(self, eps = 6 / 255.0, sigma = 3 / 255.0, nb_iter = 20,
norm = np.inf, DEVICE = torch.device('cpu'),
mean = torch.tensor(np.array([0]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis]),
std = torch.tensor(np.array([1.0]).astype(np.float32)[np.newaxis, :, np.newaxis, np.newaxis]), random_start = True):
'''
:param eps: maximum distortion of adversarial examples
:param sigma: single step size
:param nb_iter: number of attack iterations
:param norm: which norm to bound the perturbations
'''
self.eps = eps
self.sigma = sigma
self.nb_iter = nb_iter
self.norm = norm
self.criterion = torch.nn.CrossEntropyLoss().to(DEVICE)
self.DEVICE = DEVICE
self._mean = mean.to(DEVICE)
self._std = std.to(DEVICE)
self.random_start = random_start
def single_attack(self, net, inp, label, eta, target = None):
'''
Given the original image and the perturbation computed so far, computes
a new perturbation.
:param net:
:param inp: original image
:param label:
:param eta: perturbation computed so far
:return: a new perturbation
'''
adv_inp = inp + eta
#net.zero_grad()
pred = net(adv_inp)
if target is not None:
targets = torch.sum(pred[:, target])
grad_sign = torch.autograd.grad(targets, adv_in, only_inputs=True, retain_graph = False)[0].sign()
else:
loss = self.criterion(pred, label)
grad_sign = torch.autograd.grad(loss, adv_inp,
only_inputs=True, retain_graph = False)[0].sign()
adv_inp = adv_inp + grad_sign * (self.sigma / self._std)
tmp_adv_inp = adv_inp * self._std + self._mean
tmp_inp = inp * self._std + self._mean
tmp_adv_inp = torch.clamp(tmp_adv_inp, 0, 1) ## clip into 0-1
#tmp_adv_inp = (tmp_adv_inp - self._mean) / self._std
tmp_eta = tmp_adv_inp - tmp_inp
#tmp_eta = clip_eta(tmp_eta, norm=self.norm, eps=self.eps, DEVICE=self.DEVICE)
if self.norm == np.inf:
tmp_eta = torch.clamp(tmp_eta, -self.eps, self.eps)
eta = tmp_eta/ self._std
return eta
def attack(self, net, inp, label, target = None):
if self.random_start:
eta = torch.FloatTensor(*inp.shape).uniform_(-self.eps, self.eps)
else:
eta = torch.zeros_like(inp)
eta = eta.to(self.DEVICE)
eta = (eta - self._mean) / self._std
net.eval()
inp.requires_grad = True
eta.requires_grad = True
for i in range(self.nb_iter):
eta = self.single_attack(net, inp, label, eta, target)
#print(i)
#print(eta.max())
adv_inp = inp + eta
tmp_adv_inp = adv_inp * self._std + self._mean
tmp_adv_inp = torch.clamp(tmp_adv_inp, 0, 1)
adv_inp = (tmp_adv_inp - self._mean) / self._std
return adv_inp
def to(self, device):
self.DEVICE = device
self._mean = self._mean.to(device)
self._std = self._std.to(device)
self.criterion = self.criterion.to(device)
| 4,234 | 36.149123 | 133 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deeprobust/other/lbfgs.py | import torch
import torch.nn as nn
import scipy.optimize as so
import numpy as np
import torch.nn.functional as F #233
from attacks.deeprobust.base_attack import BaseAttack
class LBFGS(BaseAttack):
"""
LBFGS is the first adversarial generating algorithm.
"""
def __init__(self, model, label, device = 'cuda' ):
super(LBFGS, self).__init__(model, device)
def generate(self, image, label, target_label, **kwargs):
"""
Call this function to generate adversarial examples.
Parameters
----------
image :
original image
label :
target label
kwargs :
user defined paremeters
"""
assert self.check_type_device(image, label)
assert self.parse_params(**kwargs)
self.target_label = target_label
adv_img, self.dist, self.loss = optimize(self.model,
self.image,
self.label,
self.target_label,
self.bounds,
self.epsilon,
self.maxiter,
self.class_num,
self.device)
return adv_img
def distance(self):
return self.dist
def loss(self):
return self.loss
def parse_params(self,
clip_max = 1,
clip_min = 0,
class_num = 10,
epsilon = 1e-5, #step of finding initial c
maxiter = 20, #maximum of iteration in lbfgs optimization
):
"""
Parse the user defined parameters.
Parameters
----------
clip_max :
maximum pixel value
clip_min :
minimum pixel value
class_num :
total number of class
epsilon :
step length for binary seach
maxiter :
maximum number of iterations
"""
self.epsilon = epsilon
self.maxiter = maxiter
self.class_num = class_num
self.bounds = (clip_min, clip_max)
return True
def optimize(model, image, label, target_label, bounds, epsilon, maxiter, class_num, device):
x_t = image
x0 = image[0].to('cpu').detach().numpy()
min_, max_ = bounds
target_dist = torch.tensor(target_label)
target_dist = target_dist.unsqueeze_(0).long().to(device)
# store the shape for later and operate on the flattened input
shape = x0.shape
dtype = x0.dtype
x0 = x0.flatten().astype(np.float64)
n = len(x0)
bounds = [(min_, max_)] * n
def distance(x,y):
# calculate the distance
x = torch.from_numpy(x).double()
y = torch.from_numpy(y).double()
dist_squ = torch.norm(x - y)
return dist_squ **2
def loss(x, c):
#calculate the target function
v1 = distance(x0,x)
x = torch.tensor(x.astype(dtype).reshape(shape))
x = x.unsqueeze_(0).float().to(device)
predict = model(x)
v2 = F.nll_loss(predict, target_dist)
v = c * v1 + v2
#print(v)
return np.float64(v)
def pending_attack(target_model, adv_exp, target_label):
# pending if the attack success
adv_exp = adv_exp.reshape(shape).astype(dtype)
adv_exp = torch.from_numpy(adv_exp)
adv_exp = adv_exp.unsqueeze_(0).float().to(device)
predict1 = target_model(adv_exp)
label = predict1.argmax(dim=1, keepdim=True)
if label == target_label:
return True
else:
return False
def lbfgs_b(c):
#initial the variables
approx_grad_eps = (max_ - min_) / 100
print('in lbfgs_b:', 'c =', c)
#start optimization
optimize_output, f, d = so.fmin_l_bfgs_b(
loss,
x0,
args=(c,),
approx_grad = True,
bounds = bounds,
m = 15,
maxiter = maxiter,
factr = 1e10, #optimization accuracy
maxls = 5,
epsilon = approx_grad_eps)
print('finish optimization')
# LBFGS-B does not always exactly respect the boundaries
if np.amax(optimize_output) > max_ or np.amin(optimize_output) < min_: # pragma: no coverage
logging.info('Input out of bounds (min, max = {}, {}). Performing manual clip.'.format(
np.amin(optimize_output), np.amax(optimize_output)))
optimize_output = np.clip(optimize_output, min_, max_)
#optimize_output = optimize_output.reshape(shape).astype(dtype)
#test_input = torch.from_numpy(optimize_output)
#print(test_input)
#test_input = test_input.unsqueeze_(0).float()
is_adversarial = pending_attack(target_model = model, adv_exp = optimize_output, target_label = target_label)
return optimize_output, is_adversarial
#x_new, isadv = lbfgs_b(0)
# finding initial c
c = epsilon
print('finding initial c:')
for i in range(30):
c = 2 * c
x_new, is_adversarial = lbfgs_b(c)
if is_adversarial == False:
break
print('start binary search:')
if is_adversarial == True: # pragma: no cover
print('Could not find an adversarial; maybe the model returns wrong gradients')
return
print('c_high:',c)
# binary search
c_low = 0
c_high = c
while c_high - c_low >= epsilon:
print(c_high,' ',c_low)
c_half = (c_low + c_high) / 2
x_new, is_adversarial = lbfgs_b(c_half)
if is_adversarial:
c_low = c_half
else:
c_high = c_half
x_new, is_adversarial = lbfgs_b(c_low)
dis = distance(x_new, x0)
mintargetfunc = loss(x_new, c_low)
x_new = x_new.astype(dtype)
x_new = x_new.reshape(shape)
x_new = torch.from_numpy(x_new).unsqueeze_(0).float().to(device)
return x_new, dis, mintargetfunc
| 6,194 | 28.221698 | 117 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deeprobust/other/Universal.py | """
https://github.com/ferjad/Universal_Adversarial_Perturbation_pytorch
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
"""
from attacks.deeprobust.attack import deepfool
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import numpy as np
import torch
import torch.optim as optim
import torch.utils.data as data_utils
from torch.autograd.gradcheck import zero_gradients
import math
from PIL import Image
import torchvision.models as models
import sys
import random
import time
from tqdm import tqdm
def get_model(model,device):
if model == 'vgg16':
net = models.vgg16(pretrained=True)
elif model =='resnet18':
net = models.resnet18(pretrained=True)
net.eval()
net=net.to(device)
return net
def data_input_init(xi):
mean = [ 0.485, 0.456, 0.406 ]
std = [ 0.229, 0.224, 0.225 ]
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean = mean,
std = std)])
return (mean,std,transform)
def proj_lp(v, xi, p):
# Project on the lp ball centered at 0 and of radius xi
if p==np.inf:
v=torch.clamp(v,-xi,xi)
else:
v=v * min(1, xi/(torch.norm(v,p)+0.00001))
return v
def get_fooling_rate(data_list,v,model, device):
f = data_input_init(0)[2]
num_images = len(data_list)
fooled=0.0
for name in tqdm(data_list):
image = Image.open(name)
image = tf(image)
image = image.unsqueeze(0)
image = image.to(device)
_, pred = torch.max(model(image),1)
_, adv_pred = torch.max(model(image+v),1)
if(pred!=adv_pred):
fooled+=1
# Compute the fooling rate
fooling_rate = fooled/num_images
print('Fooling Rate = ', fooling_rate)
for param in model.parameters():
param.requires_grad = False
return fooling_rate,model
def universal_adversarial_perturbation(dataloader, model, device, xi=10, delta=0.2, max_iter_uni = 10, p=np.inf,
num_classes=10, overshoot=0.02, max_iter_df=10,t_p = 0.2):
"""universal_adversarial_perturbation.
Parameters
----------
dataloader :
dataloader
model :
target model
device :
device
xi :
controls the l_p magnitude of the perturbation
delta :
controls the desired fooling rate (default = 80% fooling rate)
max_iter_uni :
maximum number of iteration (default = 10*num_images)
p :
norm to be used (default = np.inf)
num_classes :
num_classes (default = 10)
overshoot :
to prevent vanishing updates (default = 0.02)
max_iter_df :
maximum number of iterations for deepfool (default = 10)
t_p :
truth percentage, for how many flipped labels in a batch. (default = 0.2)
Returns
-------
the universal perturbation matrix.
"""
time_start = time.time()
mean, std,tf = data_input_init(xi)
v = torch.zeros(1,3,224,224).to(device)
v.requires_grad_()
fooling_rate = 0.0
num_images = len(data_list)
itr = 0
while fooling_rate < 1-delta and itr < max_iter_uni:
# Iterate over the dataset and compute the purturbation incrementally
for i,(img, label) in enumerate(dataloader):
_, pred = torch.max(model(img),1)
_, adv_pred = torch.max(model(img+v),1)
if(pred == adv_pred):
perturb = deepfool(model, device)
_ = perturb.generate(img+v, num_classed = num_classed, overshoot = overshoot, max_iter = max_iter_df)
dr, iter = perturb.getpurb()
if(iter<max_iter_df-1):
v = v + torch.from_numpy(dr).to(device)
v = proj_lp(v,xi,p)
if(k%10==0):
print('Norm of v: '+str(torch.norm(v).detach().cpu().numpy()))
fooling_rate,model = get_fooling_rate(data_list,v,model, device)
itr = itr + 1
return v
| 4,136 | 27.93007 | 117 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deeprobust/other/Nattack.py | import torch
from torch import optim
import numpy as np
import logging
from attacks.deeprobust.base_attack import BaseAttack
from attacks.deeprobust.utils import onehot_like, arctanh
class NATTACK(BaseAttack):
"""
Nattack is a black box attack algorithm.
"""
def __init__(self, model, device = 'cuda'):
super(NATTACK, self).__init__(model, device)
self.model = model
self.device = device
def generate(self, **kwargs):
"""
Call this function to generate adversarial examples.
Parameters
----------
kwargs :
user defined paremeters
"""
assert self.parse_params(**kwargs)
return attack(self.model, self.dataloader, self.classnum,
self.clip_max, self.clip_min, self.epsilon,
self.population, self.max_iterations,
self.learning_rate, self.sigma, self.target_or_not)
assert self.check_type_device(self.dataloader)
def parse_params(self,
dataloader,
classnum,
target_or_not = False,
clip_max = 1,
clip_min = 0,
epsilon = 0.2,
population = 300,
max_iterations = 400,
learning_rate = 2,
sigma = 0.1
):
"""parse_params.
Parameters
----------
dataloader :
dataloader
classnum :
classnum
target_or_not :
target_or_not
clip_max :
maximum pixel value
clip_min :
minimum pixel value
epsilon :
perturb constraint
population :
population
max_iterations :
maximum number of iterations
learning_rate :
learning rate
sigma :
sigma
"""
self.dataloader = dataloader
self.classnum = classnum
self.target_or_not = target_or_not
self.clip_max = clip_max
self.clip_min = clip_min
self.epsilon = epsilon
self.population = population
self.max_iterations = max_iterations
self.learning_rate = learning_rate
self.sigma = sigma
return True
def attack(model, loader, classnum, clip_max, clip_min, epsilon, population, max_iterations, learning_rate, sigma, target_or_not):
logging.basicConfig(format = '%(asctime)s - %(levelname)s: %(message)s')
logger = logging.getLogger('log_nattack')
logger.setLevel(logging.DEBUG)
logger.info('Start attack.')
#initialization
totalImages = 0
succImages = 0
faillist = []
successlist = []
printlist = []
for i, (inputs, targets) in enumerate(loader):
success = False
print('attack picture No. ' + str(i))
c = inputs.size(1) # chanel
l = inputs.size(2) # length
w = inputs.size(3) # width
mu = arctanh((inputs * 2) - 1)
#mu = torch.from_numpy(np.random.randn(1, c, l, w) * 0.001).float() # random initialize mean
predict = model.forward(inputs)
## skip wrongly classified samples
if predict.argmax(dim = 1, keepdim = True) != targets:
print('skip the wrong example ', i)
continue
totalImages += 1
## finding most possible mean
for runstep in range(max_iterations):
# sample points from normal distribution
eps = torch.from_numpy(np.random.randn(population, c, l, w)).float()
z = mu.repeat(population, 1, 1, 1) + sigma * eps
# calculate g_z
g_z = np.tanh(z) * 1 / 2 + 1 / 2
# testing whether exists successful attack every 10 iterations.
if runstep % 10 == 0:
realdist = g_z - inputs
realclipdist = np.clip(realdist, -epsilon, epsilon).float()
realclipinput = realclipdist + inputs
info = 'inputs.shape__' + str(inputs.shape)
logging.debug(info)
predict = model.forward(realclipinput)
#pending attack
if (target_or_not == False):
if sum(predict.argmax(dim = 1, keepdim = True)[0] != targets) > 0 and (np.abs(realclipdist).max() <= epsilon):
succImages += 1
success = True
print('succeed attack Images: '+str(succImages)+' totalImages: '+str(totalImages))
print('steps: '+ str(runstep))
successlist.append(i)
printlist.append(runstep)
break
# calculate distance
dist = g_z - inputs
clipdist = np.clip(dist, -epsilon, epsilon)
proj_g_z = inputs + clipdist
proj_g_z = proj_g_z.float()
outputs = model.forward(proj_g_z)
# get cw loss on sampled images
target_onehot = np.zeros((1,classnum))
target_onehot[0][targets]=1.
real = (target_onehot * outputs.detach().numpy()).sum(1)
other = ((1. - target_onehot) * outputs.detach().numpy() - target_onehot * 10000.).max(1)
loss1 = np.clip(real - other, a_min= 0, a_max= 1e10)
Reward = 0.5 * loss1
# update mean by nes
A = ((Reward - np.mean(Reward)) / (np.std(Reward)+1e-7))
A = np.array(A, dtype= np.float32)
mu = mu - torch.from_numpy((learning_rate/(population*sigma)) *
((np.dot(eps.reshape(population,-1).T, A)).reshape(1, 1, 28, 28)))
if not success:
faillist.append(i)
print('failed:',faillist.__len__())
print('....................................')
else:
#print('succeed:',successlist.__len__())
print('....................................')
| 6,107 | 31.663102 | 130 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deeprobust/other/BPDA.py | """
https://github.com/lordwarlock/Pytorch-BPDA/blob/master/bpda.py
"""
import torch
import torch.nn as nn
import torchvision.models as models
import numpy as np
def normalize(image, mean, std):
return (image - mean)/std
def preprocess(image):
image = image / 255
image = np.transpose(image, (2, 0, 1))
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
image = normalize(image, mean, std)
return image
def image2tensor(image):
img_t = torch.Tensor(image)
img_t = img_t.unsqueeze(0)
img_t.requires_grad_()
return img_t
def label2tensor(label):
target = np.array([label])
target = torch.from_numpy(target).long()
return target
def get_img_grad_given_label(image, label, model):
logits = model(image)
ce = nn.CrossEntropyLoss()
loss = ce(logits, target)
loss.backward()
ret = image.grad.clone()
model.zero_grad()
image.grad.data.zero_()
return ret
def get_cw_grad(adv, origin, label, model):
logits = model(adv)
ce = nn.CrossEntropyLoss()
l2 = nn.MSELoss()
loss = ce(logits, label) + l2(0, origin - adv) / l2(0, origin)
loss.backward()
ret = adv.grad.clone()
model.zero_grad()
adv.grad.data.zero_()
origin.grad.data.zero_()
return ret
def l2_norm(adv, img):
adv = adv.detach().numpy()
img = img.detach().numpy()
ret = np.sum(np.square(adv - img))/np.sum(np.square(img))
return ret
def clip_bound(adv):
mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
adv = adv * std + mean
adv = np.clip(adv, 0., 1.)
adv = (adv - mean) / std
return adv.astype(np.float32)
def identity_transform(x):
return x.detach().clone()
def BPDA_attack(image,target, model, step_size = 1., iterations = 10, linf=False, transform_func=identity_transform):
target = label2tensor(target)
adv = image.detach().numpy()
adv = torch.from_numpy(adv)
adv.requires_grad_()
for _ in range(iterations):
adv_def = transform_func(adv)
adv_def.requires_grad_()
l2 = nn.MSELoss()
loss = l2(0, adv_def)
loss.backward()
g = get_cw_grad(adv_def, image, target, model)
if linf:
g = torch.sign(g)
print(g.numpy().sum())
adv = adv.detach().numpy() - step_size * g.numpy()
adv = clip_bound(adv)
adv = torch.from_numpy(adv)
adv.requires_grad_()
if linf:
print('label', torch.argmax(model(adv)), 'linf', torch.max(torch.abs(adv - image)).detach().numpy())
else:
print('label', torch.argmax(model(adv)), 'l2', l2_norm(adv, image))
return adv.detach().numpy()
if __name__ == '__main__':
import matplotlib
matplotlib.use('TkAgg')
import skimage
resnet18 = models.resnet18(pretrained=True).eval() # for CPU, remove cuda()
image = preprocess(skimage.io.imread('test.png'))
img_t = image2tensor(image)
BPDA_attack(img_t, 924, resnet18)
print('L-inf')
BPDA_attack(img_t, 924, resnet18, step_size = 0.003, linf=True)
| 3,177 | 28.981132 | 117 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deeprobust/other/onepixel.py | import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
from attacks.deeprobust.optimizer import differential_evolution
from attacks.deeprobust.base_attack import BaseAttack
from attacks.deeprobust.utils import progress_bar
class Onepixel(BaseAttack):
"""
Onepixel attack is an algorithm that allow attacker to only manipulate one (or a few) pixel to mislead classifier.
This is a re-implementation of One pixel attack.
Copyright (c) 2018 Debang Li
References
----------
Akhtar, N., & Mian, A. (2018).Threat of Adversarial Attacks on Deep Learning in Computer Vision: A Survey: A Survey. IEEE Access, 6, 14410-14430.
Reference code: https://github.com/DebangLi/one-pixel-attack-pytorch
"""
def __init__(self, model, device = 'cuda'):
super(Onepixel, self).__init__(model, device)
def generate(self, image, label, **kwargs):
"""
Call this function to generate Onepixel adversarial examples.
Parameters
----------
image :1*3*W*H
original image
label :
target label
kwargs :
user defined paremeters
"""
label = label.type(torch.FloatTensor)
## check and parse parameters for attack
assert self.check_type_device(image, label)
assert self.parse_params(**kwargs)
return self.one_pixel(self.image,
self.label,
self.targeted_attack,
self.pixels,
self.maxiter,
self.popsize,
self.print_log)
def get_pred():
return self.adv_pred
def parse_params(self,
pixels = 1,
maxiter = 100,
popsize = 400,
samples = 100,
targeted_attack = False,
print_log = True,
target = 0):
"""
Parse the user-defined params.
Parameters
----------
pixels :
maximum number of manipulated pixels
maxiter :
maximum number of iteration
popsize :
population size
samples :
samples
targeted_attack :
targeted attack or not
print_log :
Set print_log = True to print out details in the searching algorithm
target :
target label (if targeted attack is set to be True)
"""
self.pixels = pixels
self.maxiter = maxiter
self.popsize = popsize
self.samples = samples
self.targeted_attack = targeted_attack
self.print_log = print_log
self.target = target
return True
def one_pixel(self, img, label, targeted_attack = False, target = 0, pixels = 1, maxiter = 75, popsize = 400, print_log = False):
# label: a number
target_calss = target if targeted_attack else label
bounds = [(0,32), (0,32), (0,255), (0,255), (0,255)] * pixels
popmul = max(1, popsize/len(bounds))
predict_fn = lambda xs: predict_classes(
xs, img, target_calss, self.model, targeted_attack, self.device)
callback_fn = lambda x, convergence: attack_success(
x, img, target_calss, self.model, targeted_attack, print_log, self.device)
inits = np.zeros([popmul*len(bounds), len(bounds)])
for init in inits:
for i in range(pixels):
init[i*5+0] = np.random.random()*32
init[i*5+1] = np.random.random()*32
init[i*5+2] = np.random.normal(128,127)
init[i*5+3] = np.random.normal(128,127)
init[i*5+4] = np.random.normal(128,127)
attack_result = differential_evolution(predict_fn, bounds, maxiter = maxiter, popsize = popmul,
recombination = 1, atol = -1, callback = callback_fn, polish = False, init = inits)
attack_image = perturb_image(attack_result.x, img)
attack_var = Variable(attack_image, volatile=True).cuda()
predicted_probs = F.softmax(self.model(attack_var)).data.cpu().numpy()[0]
predicted_class = np.argmax(predicted_probs)
if (not targeted_attack and predicted_class != label) or (targeted_attack and predicted_class == target_calss):
self.adv_pred = predicted_class
return attack_image
return [None]
def perturb_image(xs, img):
if xs.ndim < 2:
xs = np.array([xs])
batch = len(xs)
imgs = img.repeat(batch, 1, 1, 1)
xs = xs.astype(int)
count = 0
for x in xs:
pixels = np.split(x, len(x)/5)
for pixel in pixels:
x_pos, y_pos, r, g, b = pixel
imgs[count, 0, x_pos, y_pos] = (r/255.0-0.4914)/0.2023
imgs[count, 1, x_pos, y_pos] = (g/255.0-0.4822)/0.1994
imgs[count, 2, x_pos, y_pos] = (b/255.0-0.4465)/0.2010
count += 1
return imgs
def predict_classes(xs, img, target_calss, net, minimize=True, device = 'cuda'):
imgs_perturbed = perturb_image(xs, img.clone()).to(device)
predictions = F.softmax(net(imgs_perturbed)).data.cpu().numpy()[:, target_calss]
return predictions if minimize else 1 - predictions
def attack_success(x, img, target_calss, net, targeted_attack = False, print_log=False, device = 'cuda'):
attack_image = perturb_image(x, img.clone()).to(device)
confidence = F.softmax(net(attack_image)).data.cpu().numpy()[0]
pred = np.argmax(confidence)
if (print_log):
print("Confidence: %.4f"%confidence[target_calss])
if (targeted_attack and pred == target_calss) or (not targeted_attack and pred != target_calss):
return True
| 5,935 | 30.743316 | 149 | py |
BayesianRelevance | BayesianRelevance-master/src/attacks/deeprobust/other/l2_attack.py | import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
class CarliniL2:
def __init__(self, model, device):
self.model = model
self.device = device
def parse_params(self, gan, confidence=0, targeted=False, learning_rate=1e-1,
binary_search_steps=5, max_iterations=10000, abort_early=False, initial_const=1,
clip_min=0, clip_max=1):
self.TARGETED = targeted
self.LEARNING_RATE = learning_rate
self.MAX_ITERATIONS = max_iterations
self.BINARY_SEARCH_STEPS = binary_search_steps
self.ABORT_EARLY = abort_early
self.CONFIDENCE = confidence
self.initial_const = initial_const
self.clip_min = clip_min
self.clip_max = clip_max
self.gan = gan
self.learning_rate = learning_rate
self.repeat = binary_search_steps >= 10
def get_or_guess_labels(self, x, y=None):
"""
Get the label to use in generating an adversarial example for x.
The kwargs are fed directly from the kwargs of the attack.
If 'y' is in kwargs, use that as the label.
Otherwise, use the model's prediction as the label.
"""
if y is not None:
labels = y
else:
preds = F.softmax(self.model(x))
preds_max = torch.max(preds, 1, keepdim=True)[0]
original_predictions = (preds == preds_max)
labels = original_predictions
del preds
return labels.float()
def atanh(self, x):
return 0.5 * torch.log((1 + x) / (1 - x))
def to_one_hot(self, x):
one_hot = torch.FloatTensor(x.shape[0], 10).to(x.get_device())
one_hot.zero_()
x = x.unsqueeze(1)
one_hot = one_hot.scatter_(1, x, 1)
return one_hot
def generate(self, imgs, y, start):
batch_size = imgs.shape[0]
labs = self.get_or_guess_labels(imgs, y)
def compare(x, y):
if self.TARGETED is None: return True
if sum(x.shape) != 0:
x = x.clone()
if self.TARGETED:
x[y] -= self.CONFIDENCE
else:
x[y] += self.CONFIDENCE
x = torch.argmax(x)
if self.TARGETED:
return x == y
else:
return x != y
# set the lower and upper bounds accordingly
lower_bound = torch.zeros(batch_size).to(self.device)
CONST = torch.ones(batch_size).to(self.device) * self.initial_const
upper_bound = (torch.ones(batch_size) * 1e10).to(self.device)
# the best l2, score, and image attack
o_bestl2 = [1e10] * batch_size
o_bestscore = [-1] * batch_size
o_bestattack = self.gan(start)
# check if the input label is one-hot, if not, then change it into one-hot vector
if len(labs.shape) == 1:
tlabs = self.to_one_hot(labs.long())
else:
tlabs = labs
for outer_step in range(self.BINARY_SEARCH_STEPS):
# completely reset adam's internal state.
modifier = nn.Parameter(start)
optimizer = torch.optim.Adam([modifier, ], lr=self.learning_rate)
bestl2 = [1e10] * batch_size
bestscore = -1 * torch.ones(batch_size, dtype=torch.float32).to(self.device)
# The last iteration (if we run many steps) repeat the search once.
if self.repeat and outer_step == self.BINARY_SEARCH_STEPS - 1:
CONST = upper_bound
prev = 1e6
for i in range(self.MAX_ITERATIONS):
optimizer.zero_grad()
nimgs = self.gan(modifier.to(self.device))
# distance to the input data
l2dist = torch.sum(torch.sum(torch.sum((nimgs - imgs) ** 2, 1), 1), 1)
loss2 = torch.sum(l2dist)
# prediction BEFORE-SOFTMAX of the model
scores = self.model(nimgs)
# compute the probability of the label class versus the maximum other
other = torch.max(((1 - tlabs) * scores - tlabs * 10000), 1)[0]
real = torch.sum(tlabs * scores, 1)
if self.TARGETED:
# if targeted, optimize for making the other class most likely
loss1 = torch.max(torch.zeros_like(other), other - real + self.CONFIDENCE)
else:
# if untargeted, optimize for making this class least likely.
loss1 = torch.max(torch.zeros_like(other), real - other + self.CONFIDENCE)
# sum up the losses
loss1 = torch.sum(CONST * loss1)
loss = loss1 + loss2
# update the modifier
loss.backward()
optimizer.step()
# check if we should abort search if we're getting nowhere.
if self.ABORT_EARLY and i % ((self.MAX_ITERATIONS // 10) or 1) == 0:
if loss > prev * .9999:
# print('Stop early')
break
prev = loss
# adjust the best result found so far
for e, (l2, sc, ii) in enumerate(zip(l2dist, scores, nimgs)):
lab = torch.argmax(tlabs[e])
if l2 < bestl2[e] and compare(sc, lab):
bestl2[e] = l2
bestscore[e] = torch.argmax(sc)
if l2 < o_bestl2[e] and compare(sc, lab):
o_bestl2[e] = l2
o_bestscore[e] = torch.argmax(sc)
o_bestattack[e] = ii
# adjust the constant as needed
for e in range(batch_size):
if compare(bestscore[e], torch.argmax(tlabs[e]).float()) and \
bestscore[e] != -1:
# success, divide CONST by two
upper_bound[e] = min(upper_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
# failure, either multiply by 10 if no solution found yet
# or do binary search with the known upper bound
lower_bound[e] = max(lower_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
CONST[e] *= 10
# return the best solution found
o_bestl2 = np.array(o_bestl2)
return o_bestattack | 6,741 | 37.747126 | 97 | py |
BayesianRelevance | BayesianRelevance-master/src/plot/lrp_heatmaps.py | import os
import lrp
import copy
import torch
import numpy as np
from tqdm import tqdm
import matplotlib
import pandas as pd
import seaborn as sns
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
from utils.savedir import *
from utils.seeding import set_seed
from utils.lrp import *
relevance_cmap = "RdBu_r"
DEBUG=False
def plot_explanations(images, explanations, rule, savedir, filename, layer_idx=-1):
savedir = os.path.join(savedir, lrp_savedir(layer_idx))
if images.shape != explanations.shape:
print(images.shape, "!=", explanations.shape)
raise ValueError
cmap = plt.cm.get_cmap(relevance_cmap)
rows = 2
cols = min(len(explanations), 6)
fig, axes = plt.subplots(rows, cols, figsize=(12, 4))
fig.tight_layout()
set_seed(0)
idxs = np.random.choice(len(explanations), cols)
for idx, col in enumerate(range(cols)):
image = np.squeeze(images[idx])
expl = np.squeeze(explanations[idx])
if len(image.shape) == 1:
image = np.expand_dims(image, axis=0)
expl = np.expand_dims(expl, axis=0)
axes[0, col].imshow(image)
im = axes[1, col].imshow(expl, cmap=cmap)
os.makedirs(savedir, exist_ok=True)
print("\nSaving: ", os.path.join(savedir,filename+".png"))
plt.savefig(os.path.join(savedir,filename+".png"))
def relevant_subset(images, pxl_idxs, lrp_rob_method):
flat_images = images.reshape(*images.shape[:1], -1)
images_rel = np.zeros(flat_images.shape)
print("\n", len(pxl_idxs[0]), "relevant pixels =", pxl_idxs[0])
if lrp_rob_method=="imagewise":
# different selection of pixels for each image
for image_idx, im_pxl_idxs in enumerate(pxl_idxs):
images_rel[image_idx,im_pxl_idxs] = flat_images[image_idx,im_pxl_idxs]
elif lrp_rob_method=="pixelwise":
# same pxls for all the images
images_rel[:,pxl_idxs] = flat_images[:,pxl_idxs]
else:
raise NotImplementedError
images_rel = images_rel.reshape(images.shape)
return images_rel
def plot_attacks_explanations(images, explanations, attacks, attacks_explanations,
predictions, attacks_predictions, successful_attacks_idxs, failed_attacks_idxs,
labels, pxl_idxs, lrp_rob_method, rule, savedir, filename, layer_idx=-1):
if DEBUG:
print(images.shape, explanations.shape, attacks.shape, attacks_explanations.shape)
print(predictions.shape, attacks_predictions.shape)
print(successful_attacks_idxs.shape, failed_attacks_idxs.shape)
if len(successful_attacks_idxs)<3 or len(failed_attacks_idxs)<3:
return None
images_cmap='Greys'
set_seed(0)
chosen_successful_idxs = np.random.choice(successful_attacks_idxs, 3)
chosen_failed_idxs = np.random.choice(failed_attacks_idxs, 3)
im_idxs = np.concatenate([chosen_successful_idxs, chosen_failed_idxs])
print("im_idxs =", im_idxs)
images = images[im_idxs].detach().cpu().numpy()
explanations = explanations[im_idxs].detach().cpu().numpy()
attacks = attacks[im_idxs].detach().cpu().numpy()
attacks_explanations = attacks_explanations[im_idxs].detach().cpu().numpy()
predictions = predictions[im_idxs].detach().cpu().numpy()
attacks_predictions = attacks_predictions[im_idxs].detach().cpu().numpy()
labels = labels[im_idxs].detach().cpu().numpy()
if images.shape != explanations.shape:
print(images.shape, "!=", explanations.shape)
raise ValueError
selected_pxl_idxs = pxl_idxs if lrp_rob_method=="pixelwise" else pxl_idxs[im_idxs]
images_rel = relevant_subset(images, selected_pxl_idxs, lrp_rob_method)
attacks_rel = relevant_subset(attacks, selected_pxl_idxs, lrp_rob_method)
explanations = relevant_subset(explanations, selected_pxl_idxs, lrp_rob_method)
attacks_explanations = relevant_subset(attacks_explanations, selected_pxl_idxs, lrp_rob_method)
images_rel = np.ma.masked_where(images_rel == 0., images_rel)
attacks_rel = np.ma.masked_where(attacks_rel == 0., attacks_rel)
cmap = plt.cm.get_cmap(relevance_cmap)
vmax_expl = max([max(explanations.flatten()), 0.000001])
vmin_expl = min([min(explanations.flatten()), -0.000001])
norm_expl = colors.TwoSlopeNorm(vcenter=0., vmax=vmax_expl, vmin=vmin_expl)
vmax_atk_expl = max([max(attacks_explanations.flatten()), 0.000001])
vmin_atk_expl = min([min(attacks_explanations.flatten()), -0.000001])
norm_atk_expl = colors.TwoSlopeNorm(vcenter=0., vmax=vmax_atk_expl, vmin=vmin_atk_expl)
rows = 4
cols = min(len(explanations)+1, 7)
fig, axes = plt.subplots(rows, cols, figsize=(10, 6), dpi=150)
fig.tight_layout()
fig.text(0.17, 0.97, "Successful attacks")
fig.text(0.74, 0.97, "Failed attacks")
for im_idx, axis_idx in enumerate([0,1,2,4,5,6]):
# print(f"explanations min={explanations[im_idx].min()} max={explanations[im_idx].max()}", end="\t")
# print(f"attacks explanations min={attacks_explanations[im_idx].min()} max={attacks_explanations[im_idx].max()}")
image = np.squeeze(images[im_idx])
image_rel = np.squeeze(images_rel[im_idx])
expl = np.squeeze(explanations[im_idx])
attack = np.squeeze(attacks[im_idx])
attack_rel = np.squeeze(attacks_rel[im_idx])
attack_expl = np.squeeze(attacks_explanations[im_idx])
if len(image.shape) == 1:
image = np.expand_dims(image, axis=0)
image_rel = np.expand_dims(image_rel, axis=0)
expl = np.expand_dims(expl, axis=0)
attack = np.expand_dims(attack, axis=0)
attack_rel = np.expand_dims(attack_rel, axis=0)
attack_expl = np.expand_dims(attack_expl, axis=0)
axes[0, axis_idx].imshow(image, cmap=images_cmap)
axes[0, axis_idx].imshow(image_rel)
axes[0, axis_idx].set_xlabel(f"label={labels[im_idx]}\nprediction={predictions[im_idx]}")
expl = axes[1, axis_idx].imshow(expl, cmap=cmap, norm=norm_expl)
axes[2, axis_idx].imshow(attack, cmap=images_cmap)
axes[2, axis_idx].imshow(attack_rel)
axes[2, axis_idx].set_xlabel(f"prediction={attacks_predictions[im_idx]}")
atk_expl = axes[3, axis_idx].imshow(attack_expl, cmap=cmap, norm=norm_atk_expl)
axes[0,0].set_ylabel("images")
axes[1,0].set_ylabel("lrp(images)")
axes[2,0].set_ylabel("im. attacks")
axes[3,0].set_ylabel("lrp(attacks)")
for idx in range(4):
axes[idx,3].set_axis_off()
# fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.5, 0.56, 0.01, 0.15])
cbar = fig.colorbar(expl, ax=axes[0, :].ravel().tolist(), cax=cbar_ax)
cbar.set_label('Relevance', labelpad=-60)
cbar_ax = fig.add_axes([0.5, 0.07, 0.01, 0.15])
cbar = fig.colorbar(atk_expl, ax=axes[2, :].ravel().tolist(), cax=cbar_ax)
cbar.set_label('Relevance', labelpad=-60)
os.makedirs(savedir, exist_ok=True)
print("\nSaving: ", os.path.join(savedir,filename+"_layeridx="+str(layer_idx)+".png"))
plt.savefig(os.path.join(savedir,filename+"_layeridx="+str(layer_idx)+".png"))
def plot_vanishing_explanations(images, samples_explanations, n_samples_list, rule, savedir, filename,
layer_idx=-1):
savedir = os.path.join(savedir, lrp_savedir(layer_idx))
if images.shape != samples_explanations[0].shape:
print(images.shape, "!=", samples_explanations[0].shape)
raise ValueError
vanishing_idxs=compute_vanishing_norm_idxs(samples_explanations, n_samples_list, norm="linfty")[0]
if len(vanishing_idxs)<=1:
raise ValueError("Not enough examples.")
rows = min(len(n_samples_list), 5)+1
cols = min(len(vanishing_idxs), 6)
set_seed(0)
chosen_idxs = np.random.choice(vanishing_idxs, cols)
fig, axes = plt.subplots(rows, cols, figsize=(10, 6))
fig.tight_layout()
for col_idx in range(cols):
cmap = plt.cm.get_cmap(relevance_cmap)
vmax = max(samples_explanations[:, chosen_idxs[col_idx]].flatten())
vmin = min(samples_explanations[:, chosen_idxs[col_idx]].flatten())
norm = colors.TwoSlopeNorm(vcenter=0., vmax=vmax, vmin=vmin)
for samples_idx, n_samples in enumerate(n_samples_list):
image = np.squeeze(images[chosen_idxs[col_idx]])
image = np.expand_dims(image, axis=0) if len(image.shape) == 1 else image
expl = np.squeeze(samples_explanations[samples_idx, chosen_idxs[col_idx]])
expl = np.expand_dims(expl, axis=0) if len(expl.shape) == 1 else expl
axes[0, col_idx].imshow(image)
im = axes[samples_idx+1, col_idx].imshow(expl, cmap=cmap, norm=norm)
# fig.subplots_adjust(right=0.83)
# cbar_ax = fig.add_axes([0.88, 0.12, 0.02, 0.6])
# cbar = fig.colorbar(im, ax=axes[samples_idx+1, :].ravel().tolist(), cax=cbar_ax)
# cbar.set_label('Relevance', labelpad=10)
os.makedirs(savedir, exist_ok=True)
print("\nSaving: ", os.path.join(savedir, filename+".png"))
plt.savefig(os.path.join(savedir, filename+".png"))
def plot_attacks_explanations_layers(images, explanations, attacks, attacks_explanations,
predictions, attacks_predictions, successful_attacks_idxs, failed_attacks_idxs,
labels, pxl_idxs, learnable_layers_idxs, lrp_rob_method, rule,
savedir, filename, layer_idx=-1):
n_layers = len(pxl_idxs)
images_cmap='Greys'
cmap = plt.cm.get_cmap(relevance_cmap)
set_seed(2)
im_idx = np.random.choice(successful_attacks_idxs, 1)
print("im_idx =", im_idx)
image = images[im_idx].detach().cpu().numpy()
attack = attacks[im_idx].detach().cpu().numpy()
prediction = predictions[im_idx].argmax().detach().cpu().numpy()
attack_prediction = attacks_predictions[im_idx].argmax().detach().cpu().numpy()
label = labels[im_idx].item()
explanations = explanations[:,im_idx]
attack_explanations = attacks_explanations[:,im_idx]
for layer_idx in range(len(learnable_layers_idxs)):
layer_pxl_idxs = pxl_idxs[layer_idx]
selected_pxl_idxs = layer_pxl_idxs if lrp_rob_method=="pixelwise" else layer_pxl_idxs[im_idx]
explanations[layer_idx] = relevant_subset(explanations[layer_idx], selected_pxl_idxs, lrp_rob_method)
attack_explanations[layer_idx] = relevant_subset(attack_explanations[layer_idx], selected_pxl_idxs, lrp_rob_method)
print("\nLayer idx=", layer_idx)
print(f"explanations min={explanations[layer_idx].min()} max={explanations[layer_idx].max()}")
print(f"attack_explanations min={attack_explanations[layer_idx].min()} max={attack_explanations[layer_idx].max()}")
vmax_expl = max([max(explanations.flatten()), 0.000001])
vmin_expl = min([min(explanations.flatten()), -0.000001])
norm_expl = colors.TwoSlopeNorm(vcenter=0., vmax=vmax_expl, vmin=vmin_expl)
vmax_atk_expl = max([max(attack_explanations.flatten()), 0.000001])
vmin_atk_expl = min([min(attack_explanations.flatten()), -0.000001])
norm_atk_expl = colors.TwoSlopeNorm(vcenter=0., vmax=vmax_atk_expl, vmin=vmin_atk_expl)
for layer_idx in range(len(learnable_layers_idxs)):
layer_pxl_idxs = pxl_idxs[layer_idx]
selected_pxl_idxs = layer_pxl_idxs if lrp_rob_method=="pixelwise" else layer_pxl_idxs[im_idx]
image_rel = relevant_subset(image, selected_pxl_idxs, lrp_rob_method)
attack_rel = relevant_subset(attack, selected_pxl_idxs, lrp_rob_method)
image_rel = np.ma.masked_where(image_rel == 0., image_rel)
attack_rel = np.ma.masked_where(attack_rel == 0., attack_rel)
rows = 2
cols = n_layers+1
fig, axes = plt.subplots(rows, cols, figsize=(7, 4), dpi=150)
fig.tight_layout()
fig.text(0.035, 0.63, f"Image", ha='center', rotation=90, weight='bold')
fig.text(0.035, 0.25, f"Attack", ha='center', rotation=90, weight='bold')
axes[0, 0].imshow(np.squeeze(image), cmap=images_cmap)
axes[0, 0].imshow(np.squeeze(image_rel))
fig.text(0.13, 0.5, f"Label={label}\nPrediction={prediction}", ha='center')
axes[1, 0].imshow(np.squeeze(attack), cmap=images_cmap)
axes[1, 0].imshow(np.squeeze(attack_rel))
fig.text(0.13, 0.05, f"Prediction={attack_prediction}", ha='center')
x_positions=[0.365, 0.58, 0.81]
for col_idx, layer_idx in enumerate(learnable_layers_idxs):
fig.text(x_positions[col_idx], 0.05, "Layer idx="+str(layer_idx), ha='center', weight='bold')
expl = np.squeeze(explanations[col_idx])
attack_expl = np.squeeze(attack_explanations[col_idx])
expl = axes[0, col_idx+1].imshow(expl, cmap=cmap, norm=norm_expl)
atk_expl = axes[1, col_idx+1].imshow(attack_expl, cmap=cmap, norm=norm_atk_expl)
for col_idx in range(len(learnable_layers_idxs)+1):
axes[0,col_idx].set_axis_off()
axes[1,col_idx].set_axis_off()
fig.subplots_adjust(right=0.88)
cbar_ax = fig.add_axes([0.91, 0.61, 0.01, 0.32])
cbar = fig.colorbar(expl, ax=axes[0, :].ravel().tolist(), cax=cbar_ax)
cbar.set_label('LRP', labelpad=-53)
cbar.outline.set_visible(False)
cbar_ax = fig.add_axes([0.91, 0.12, 0.01, 0.32])
cbar = fig.colorbar(atk_expl, ax=axes[1, :].ravel().tolist(), cax=cbar_ax)
cbar.set_label('LRP', labelpad=-53)
cbar.outline.set_visible(False)
os.makedirs(savedir, exist_ok=True)
print("\nSaving: ", os.path.join(savedir, filename+".png"))
plt.savefig(os.path.join(savedir,filename+"_layeridx="+str(layer_idx)+".png"))
def plot_heatmaps_det_vs_bay(image, det_attack, bay_attack, det_prediction, bay_prediction, label,
det_explanation, det_attack_explanation, bay_explanation, bay_attack_explanation,
lrp_rob_method, topk, rule, savedir, filename):
images_cmap='Greys'
cmap = plt.cm.get_cmap(relevance_cmap)
pxl_idxs = select_informative_pixels(det_explanation, topk=topk)[1].detach().cpu().numpy()
det_explanation = relevant_subset(det_explanation.detach().cpu().numpy(), [pxl_idxs], lrp_rob_method)
pxl_idxs = select_informative_pixels(det_attack_explanation, topk=topk)[1].detach().cpu().numpy()
det_attack_explanation = relevant_subset(det_attack_explanation.detach().cpu().numpy(), [pxl_idxs], lrp_rob_method)
pxl_idxs = select_informative_pixels(bay_explanation, topk=topk)[1].detach().cpu().numpy()
bay_explanation = relevant_subset(bay_explanation.detach().cpu().numpy(), [pxl_idxs], lrp_rob_method)
pxl_idxs = select_informative_pixels(bay_attack_explanation, topk=topk)[1].detach().cpu().numpy()
bay_attack_explanation = relevant_subset(bay_attack_explanation.detach().cpu().numpy(), [pxl_idxs], lrp_rob_method)
vmax = max([max(det_explanation.flatten()), max(bay_explanation.flatten()),
max(det_attack_explanation.flatten()), max(bay_attack_explanation.flatten()), 0.000001])
vmin = min([min(det_explanation.flatten()), min(bay_explanation.flatten()),
min(det_attack_explanation.flatten()), min(bay_attack_explanation.flatten()), -0.000001])
norm = colors.TwoSlopeNorm(vcenter=0., vmax=vmax, vmin=vmin)
fig, axes = plt.subplots(2, 3, figsize=(5.2, 3), dpi=150, sharex=True, sharey=True)
fig.tight_layout()
# axes[0, 0].imshow(np.squeeze(image), cmap=images_cmap)
# axes[1, 0].imshow(np.squeeze(image), cmap=images_cmap)
rc = {"font.family" : "serif", "mathtext.fontset" : "stix"}
plt.rcParams.update(rc)
plt.rcParams["font.serif"] = ["Times New Roman"] + plt.rcParams["font.serif"]
fig.text(0.035, 0.55, f"Deterministic", ha='center', rotation=90, weight='bold', size=12)
fig.text(0.035, 0.15, f"Bayesian", ha='center', rotation=90, weight='bold', size=12)
fig.text(0.2, 0.93, r"$\tilde{x}$", ha='center', size=15)
axes[0, 0].imshow(np.squeeze(det_attack), cmap=images_cmap)
axes[1, 0].imshow(np.squeeze(bay_attack), cmap=images_cmap)
fig.text(0.48, 0.93, r"$R(x)$", ha='center', size=15)
expl = axes[0, 1].imshow(np.squeeze(det_explanation), cmap=cmap, norm=norm)
expl = axes[1, 1].imshow(np.squeeze(bay_explanation), cmap=cmap, norm=norm)
fig.text(0.76, 0.93, r"$R(\tilde{x})$", ha='center', size=15)
atk_expl = axes[0, 2].imshow(np.squeeze(det_attack_explanation), cmap=cmap, norm=norm)
axes[1, 2].imshow(np.squeeze(bay_attack_explanation), cmap=cmap, norm=norm)
for col_idx in range(3):
axes[0,col_idx].set_axis_off()
axes[1,col_idx].set_axis_off()
fig.subplots_adjust(top=0.9)
fig.subplots_adjust(right=0.87)
# cbar_ax = fig.add_axes([0.91, 0.61, 0.01, 0.32])
# cbar = fig.colorbar(det_atk_expl, ax=axes[0, :].ravel().tolist(), cax=cbar_ax)
# cbar.set_label('LRP', labelpad=-50)
cbar_ax = fig.add_axes([0.88, 0.2, 0.02, 0.6])
cbar = fig.colorbar(atk_expl, ax=axes[1, :].ravel().tolist(), cax=cbar_ax)
# cbar.set_label('LRP', weight="bold", labelpad=-50)
cbar.outline.set_visible(False)
os.makedirs(savedir, exist_ok=True)
print("\nSaving: ", os.path.join(savedir, filename+".png"))
plt.savefig(os.path.join(savedir, filename+".png"))
| 17,425 | 41.502439 | 123 | py |
BayesianRelevance | BayesianRelevance-master/src/plot/lrp_distributions.py | import os
import lrp
import copy
import torch
import matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
from tqdm import tqdm
from scipy import stats
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
from utils.savedir import *
from utils.seeding import set_seed
from utils.lrp import *
def significance_symbol(p):
if p>0.05:
return 'n.s.'
elif 0.01 < p <= 0.05:
return '*'
elif 0.001 < p <= 0.01:
return '**'
elif 0.0001 < p <= 0.001:
return '***'
else:
return '****'
def stripplot_lrp_values(lrp_heatmaps_list, n_samples_list, savedir, filename, layer_idx=-1):
matplotlib.rc('font', **{'weight': 'bold', 'size': 12})
fig, ax = plt.subplots(1, 1, figsize=(10, 5), dpi=150, facecolor='w', edgecolor='k')
sns.set_style("darkgrid")
lrp_heatmaps_components = []
plot_samples = []
for samples_idx, n_samples in enumerate(n_samples_list):
print("\nsamples = ", n_samples, end="\t")
print(f"min = {lrp_heatmaps_list[samples_idx].min():.4f}", end="\t")
print(f"max = {lrp_heatmaps_list[samples_idx].max():.4f}")
flat_lrp_heatmap = np.array(lrp_heatmaps_list[samples_idx]).flatten()
lrp_heatmaps_components.extend(flat_lrp_heatmap)
plot_samples.extend(np.repeat(n_samples, len(flat_lrp_heatmap)))
df = pd.DataFrame(data={"lrp_heatmaps": lrp_heatmaps_components, "samples": plot_samples})
sns.stripplot(x="samples", y="lrp_heatmaps", data=df, linewidth=-0.1, ax=ax,
jitter=0.2, alpha=0.4, palette="gist_heat")
ax.set_ylabel("")
ax.set_xlabel("")
fig.text(0.5, 0.01, "Samples involved in the expectations ($w \sim p(w|D)$)", ha='center')
fig.text(0.03, 0.5, r"LRP heatmaps components components",
va='center', rotation='vertical')
os.makedirs(savedir, exist_ok=True)
print("\nSaving: ", os.path.join(savedir, filename+".png"))
fig.savefig(os.path.join(savedir, filename+".png"))
def lrp_labels_distributions(lrp_heatmaps, labels, num_classes, n_samples_list, savedir, filename, topk=None,
layer_idx=-1):
savedir = os.path.join(savedir, "labels_distributions")
os.makedirs(savedir, exist_ok=True)
if topk is not None:
flat_lrp_heatmaps, _ = select_informative_pixels(lrp_heatmaps, topk)
else:
flat_lrp_heatmaps = lrp_heatmaps.reshape(*lrp_heatmaps.shape[:2], -1)
### dataframe
lrp_list = []
labels_list = []
samples_list = []
for samples_idx, n_samples in enumerate(n_samples_list):
print("\nsamples = ", n_samples, end="\t")
print(f"min = {flat_lrp_heatmaps[samples_idx].min():.4f}", end="\t")
print(f"max = {flat_lrp_heatmaps[samples_idx].max():.4f}")
for im_idx, label in enumerate(labels):
lrp_heatmap = flat_lrp_heatmaps[samples_idx, im_idx, :]
lrp_list.extend(lrp_heatmap)
samples_list.extend(np.repeat(n_samples, len(lrp_heatmap)))
labels_list.extend(np.repeat(label, len(lrp_heatmap)))
df = pd.DataFrame(data={"lrp": lrp_list, "samples": samples_list, "label":labels_list})
print(df.head())
### plot
for label in range(num_classes):
matplotlib.rc('font', **{'weight': 'bold', 'size': 12})
fig, ax = plt.subplots(1, 1, figsize=(10, 5), dpi=150, facecolor='w', edgecolor='k')
sns.set_style("darkgrid")
colors = ["teal","skyblue","red"]
for idx, n_samples in enumerate(n_samples_list):
label_df = df.loc[df['label'] == label]
label_df = label_df.loc[label_df['samples'] == n_samples]
sns.distplot(label_df["lrp"], color=colors[idx], label=f"{n_samples} samples", ax=ax, kde=False)
ax.set_yscale('log')
plt.legend()
os.makedirs(savedir, exist_ok=True)
print("\nSaving: ", os.path.join(savedir, filename+"_label="+str(label)+".png"))
fig.savefig(os.path.join(savedir, filename+"_label="+str(label)+".png"))
plt.close(fig)
def lrp_samples_distributions(lrp_heatmaps, labels, num_classes, n_samples_list, savedir,
filename, layer_idx=-1):
savedir = os.path.join(savedir, "samples_distributions")
os.makedirs(savedir, exist_ok=True)
flat_lrp_heatmaps = lrp_heatmaps.reshape(*lrp_heatmaps.shape[:2], -1)
### dataframe
lrp_list=[]
labels_list=[]
samples_list=[]
for samples_idx, n_samples in enumerate(n_samples_list):
print("\nsamples =", n_samples, end="\t")
print(f"min = {flat_lrp_heatmaps[samples_idx].min():.4f}", end=" \t")
print(f"max = {flat_lrp_heatmaps[samples_idx].max():.4f}")
for pixel_idx in range(flat_lrp_heatmaps.shape[-1]):
pixel_lrp = flat_lrp_heatmaps[samples_idx, :, pixel_idx]
lrp_list.extend(pixel_lrp)
samples_list.extend(np.repeat(n_samples, len(pixel_lrp)))
labels_list.extend(labels)
df = pd.DataFrame(data={"lrp": lrp_list, "samples": samples_list,
"label":labels_list})
### plot
for samples_idx, n_samples in enumerate(n_samples_list):
samp_df = df.loc[df['samples'] == n_samples]
sns.set_style("darkgrid")
matplotlib.rc('font', **{'weight': 'bold', 'size': 12})
fig, ax = plt.subplots(1, 1, figsize=(10, 5), dpi=150, facecolor='w', edgecolor='k')
for label in range(num_classes):
label_df = samp_df.loc[samp_df['label'] == label]
sns.distplot(label_df["lrp"], label=f"class={label}", ax=ax, kde=False)
ax.set_yscale('log')
plt.legend()
print("\nSaving: ", os.path.join(savedir, filename+"_samp="+str(n_samples)+".png"))
fig.savefig(os.path.join(savedir, filename+"_samp="+str(n_samples)+".png"))
plt.close(fig)
def lrp_pixels_distributions(lrp_heatmaps, labels, num_classes, n_samples, savedir, filename, topk=1,
layer_idx=-1):
savedir = os.path.join(savedir, "pixels_distributions")
os.makedirs(savedir, exist_ok=True)
### dataframe
lrp_list=[]
labels_list=[]
samples_list=[]
pixel_idxs_list=[]
for im_idx in range(len(lrp_heatmaps)):
image_lrp_heatmaps = np.expand_dims(lrp_heatmaps[im_idx,:], axis=1)
image_label = labels[im_idx]
flat_lrp_heatmaps, chosen_pxl_idxs = select_informative_pixels(image_lrp_heatmaps, topk)
for samples_idx in range(n_samples):
for array_idx, pixel_idx in enumerate(chosen_pxl_idxs):
pixel_lrp = flat_lrp_heatmaps[samples_idx, array_idx][0]
lrp_list.append(pixel_lrp)
samples_list.append(n_samples)
pixel_idxs_list.append(pixel_idx)
labels_list.append(image_label)
df = pd.DataFrame(data={"lrp":lrp_list, "samples":samples_list,
"label":labels_list, "pixel_idx":pixel_idxs_list})
### plot
for array_idx, pixel_idx in enumerate(chosen_pxl_idxs):
sns.set_style("darkgrid")
matplotlib.rc('font', **{'weight': 'bold', 'size': 12})
fig, ax = plt.subplots(1, 1, figsize=(10, 5), dpi=150, facecolor='w', edgecolor='k')
pxl_df = df.loc[df['pixel_idx'] == pixel_idx]
for samples_idx in range(n_samples):
samp_df = pxl_df.loc[pxl_df['samples'] == n_samples]
sns.distplot(samp_df["lrp"], ax=ax, kde=False)
ax.set_yscale('log')
print("\nSaving: ", os.path.join(savedir, filename+"_im_idx="+str(im_idx)+".png"))
fig.savefig(os.path.join(savedir, filename+"_im_idx="+str(im_idx)+".png"))
plt.close(fig)
def lrp_imagewise_robustness_distributions(det_lrp_robustness, bay_lrp_robustness, mode_lrp_robustness,
det_successful_lrp_robustness, det_failed_lrp_robustness,
bay_successful_lrp_robustness, bay_failed_lrp_robustness,
mode_successful_lrp_robustness, mode_failed_lrp_robustness,
n_samples_list, n_original_images, savedir, filename):
n_samples_list=[n_samples_list[-1]]
print("\n=== Percentage of successful/failed attacks ===")
print("\ndeterministic attack:")
perc_det_successful = 100*len(det_successful_lrp_robustness)/n_original_images
perc_det_failed = 100*len(det_failed_lrp_robustness)/n_original_images
print(f"det. eval\t {perc_det_successful} successful \t{perc_det_failed} failed")
print("\nbayesian attack:")
perc_bay_successful = []
perc_bay_failed = []
for idx, n_samples in enumerate(n_samples_list):
perc_bay_successful.append(100*len(bay_successful_lrp_robustness[idx])/n_original_images)
perc_bay_failed.append(100*len(bay_failed_lrp_robustness[idx])/n_original_images)
print(f"bay. eval samp={n_samples}\t {perc_bay_successful[idx]} successful \t{perc_bay_failed[idx]} failed")
print("\nmode attack:")
perc_mode_successful = []
perc_mode_failed = []
for idx, n_samples in enumerate(n_samples_list):
perc_mode_successful.append(100*len(mode_successful_lrp_robustness[idx])/n_original_images)
perc_mode_failed.append(100*len(mode_failed_lrp_robustness[idx])/n_original_images)
print(f"bay. eval samp={n_samples} \t {perc_mode_successful[idx]} successful \t{perc_mode_failed[idx]} failed")
perc_mode_successful.append(100*len(mode_successful_lrp_robustness[-1])/n_original_images)
perc_mode_failed.append(100*len(mode_failed_lrp_robustness[-1])/n_original_images)
print(f"mode eval \t{perc_mode_successful[-1]} successful \t{perc_mode_failed[-1]} failed")
os.makedirs(savedir, exist_ok=True)
cmap = cm.get_cmap('rocket', 10)
det_col = [matplotlib.colors.rgb2hex(cmap(i)) for i in range(cmap.N)]
cmap = cm.get_cmap('crest', 10)
bay_col = [matplotlib.colors.rgb2hex(cmap(i)) for i in range(cmap.N)]
alpha=0.7
sns.set_style("darkgrid")
matplotlib.rc('font', **{'weight': 'bold', 'size': 10})
## Successful vs failed
fig, ax = plt.subplots(2+len(n_samples_list), 2, figsize=(6, 6), sharex=True, dpi=150, facecolor='w', edgecolor='k')
fig.tight_layout()
fig.subplots_adjust(bottom=0.1)
ax[0,0].xaxis.set_label_position("top")
ax[0,0].set_xlabel("Successful attacks", weight='bold', size=10)
sns.distplot(det_successful_lrp_robustness, ax=ax[0,0],
label=f"det atk {perc_det_successful:.1f}%",
bins=10, kde=False, color=det_col[7], hist_kws=dict(alpha=alpha))
sns.distplot(mode_successful_lrp_robustness[-1], bins=10, ax=ax[1,0],
label=f"mode atk {perc_mode_successful[-1]:.1f}%",
kde=False, color=bay_col[1], hist_kws=dict(alpha=alpha))
for samp_idx, n_samples in enumerate(n_samples_list):
sns.distplot(bay_successful_lrp_robustness[samp_idx], ax=ax[2+samp_idx,0],
label=f"bay atk {perc_bay_successful[samp_idx]:.1f}%", bins=10, kde=False,color=bay_col[7],
hist_kws=dict(alpha=alpha))
sns.distplot(mode_successful_lrp_robustness[samp_idx], ax=ax[2+samp_idx,0],
label=f"mode atk {perc_mode_successful[samp_idx]:.1f}%", bins=10, kde=False,color=bay_col[1],
hist_kws=dict(alpha=alpha))
ax[0,1].xaxis.set_label_position("top")
ax[0,1].set_xlabel("Failed attacks", weight='bold', size=10)
# print(mode_successful_lrp_robustness[-1])
# print(mode_failed_lrp_robustness[-1])
sns.distplot(det_failed_lrp_robustness, ax=ax[0,1], label=f"det atk {perc_det_failed:.1f}%",
bins=10, kde=False, color=det_col[7], hist_kws=dict(alpha=alpha))
sns.distplot(mode_failed_lrp_robustness[-1], ax=ax[1,1], label=f"mode atk {perc_mode_failed[-1]:.1f}%",
bins=10, kde=False, color=bay_col[1], hist_kws=dict(alpha=alpha))
for samp_idx, n_samples in enumerate(n_samples_list):
sns.distplot(bay_failed_lrp_robustness[samp_idx], ax=ax[2+samp_idx,1],
label=f"bay atk {perc_bay_failed[samp_idx]:.1f}%", bins=10, kde=False, color=bay_col[7],
hist_kws=dict(alpha=alpha))
sns.distplot(mode_failed_lrp_robustness[samp_idx], ax=ax[2+samp_idx,1],
label=f"mode atk {perc_mode_failed[samp_idx]:.1f}%", bins=10, kde=False,
color=bay_col[1],hist_kws=dict(alpha=alpha))
ax[2+samp_idx,1].set_ylabel("Bay. Net.\nsamp="+str(n_samples), rotation=270, labelpad=10, weight='bold', size=10)
ax[2+samp_idx,1].yaxis.set_label_position("right")
ax[len(n_samples_list)+1,0].set_xlabel("LRP Robustness", weight='bold', size=9)
ax[len(n_samples_list)+1,1].set_xlabel("LRP Robustness", weight='bold', size=9)
ax[2,0].set_xlim(-0.01,1.1)
ax[0,1].set_ylabel("Det. Net.", rotation=270, labelpad=10, weight='bold', size=10)
ax[0,1].yaxis.set_label_position("right")
ax[1,1].set_ylabel("Mode Net.", rotation=270, labelpad=10, weight='bold', size=10)
ax[1,1].yaxis.set_label_position("right")
for row_idx in range(2+len(n_samples_list)):
ax[row_idx,0].legend()
ax[row_idx,1].legend()
ax[row_idx,0].legend(prop={'size': 8})
ax[row_idx,1].legend(prop={'size': 8})
print("\nSaving: ", os.path.join(savedir, filename+"_succ_vs_failed.png"))
fig.savefig(os.path.join(savedir, filename+"_succ_vs_failed.png"))
plt.close(fig)
### All images
sns.set_style("darkgrid")
matplotlib.rc('font', **{'weight': 'bold', 'size': 10})
fig, ax = plt.subplots(3, 1, figsize=(5, 5), sharex=True, dpi=150, facecolor='w', edgecolor='k')
fig.tight_layout()
sns.distplot(det_lrp_robustness, ax=ax[0], label=f"det atk",
bins=10, kde=False, color=det_col[7], hist_kws=dict(alpha=alpha))
sns.distplot(mode_lrp_robustness[-1], ax=ax[1], label=f"mode atk",
bins=10, kde=False, color=bay_col[1], hist_kws=dict(alpha=alpha))
for samp_idx, n_samples in enumerate(n_samples_list):
sns.distplot(mode_lrp_robustness[samp_idx], ax=ax[2],
label=f"mode atk", bins=10, kde=False, color=bay_col[1],hist_kws=dict(alpha=alpha))
sns.distplot(bay_lrp_robustness[samp_idx], ax=ax[2], label="bay atk",
bins=10, kde=False, color=bay_col[7], hist_kws=dict(alpha=alpha))
ax[2+samp_idx].set_ylabel("Bay. Net.\nsamp="+str(n_samples), rotation=270, labelpad=10, weight='bold', size=10)
ax[2+samp_idx].yaxis.set_label_position("right")
ax[2].set_xlabel("LRP Robustness", weight='bold', size=9)
ax[2].set_xlim(-0.01,1.1)
ax[0].set_ylabel("Det. Net.", rotation=270, labelpad=10, weight='bold', size=10)
ax[0].yaxis.set_label_position("right")
ax[1].set_ylabel("Mode Net.", rotation=270, labelpad=10, weight='bold', size=10)
ax[1].yaxis.set_label_position("right")
for row_idx in range(2+len(n_samples_list)):
ax[row_idx].legend()
ax[row_idx].legend(prop={'size': 8})
print("\nSaving: ", os.path.join(savedir, filename+"_all_images.png"))
fig.savefig(os.path.join(savedir, filename+"_all_images.png"))
plt.close(fig)
def lrp_robustness_scatterplot(adversarial_robustness, bayesian_adversarial_robustness,
lrp_robustness, bayesian_lrp_robustness,
n_samples_list, savedir, filename,
mode_adversarial_robustness=None, mode_lrp_robustness=None):
os.makedirs(savedir, exist_ok=True)
sns.set_style("darkgrid")
matplotlib.rc('font', **{'weight': 'bold', 'size': 8})
fig, ax = plt.subplots(4, 3, figsize=(10, 6),
gridspec_kw={'width_ratios': [1, 2, 1], 'height_ratios': [1, 3, 3, 1]},
sharex=True, sharey=False, dpi=150, facecolor='w', edgecolor='k')
alpha=0.6
### scatterplot
ax[2,1].set_xlabel('Softmax robustness')
ax[1,0].set_ylabel('LRP robustness')
ax[2,0].set_ylabel('LRP robustness')
tot_num_images = len(adversarial_robustness)
sns.scatterplot(x=adversarial_robustness, y=lrp_robustness, ax=ax[1,1], label='deterministic', alpha=alpha)
if mode_adversarial_robustness is not None:
sns.scatterplot(x=mode_adversarial_robustness, y=mode_lrp_robustness, label='posterior mode',
ax=ax[1,1], alpha=alpha)
for idx, n_samples in enumerate(n_samples_list):
sns.scatterplot(x=bayesian_adversarial_robustness[idx], y=bayesian_lrp_robustness[idx],
label='posterior samp='+str(n_samples), ax=ax[2,1], alpha=alpha)
### degenerate softmax robustness
ax[2,0].set_xlabel('Softmax rob. = 0.')
im_idxs = np.where(adversarial_robustness==0.)[0]
sns.distplot(lrp_robustness[im_idxs], ax=ax[1,0], vertical=True,
label=f"{100*len(lrp_robustness[im_idxs])/tot_num_images}% images")
if mode_lrp_robustness is not None:
im_idxs = np.where(mode_adversarial_robustness==0.)[0]
sns.distplot(mode_lrp_robustness[im_idxs], vertical=True, color="darkorange", ax=ax[1,0],
label=f"{100*len(mode_lrp_robustness[im_idxs])/tot_num_images}% images")
for sample_idx, n_samples in enumerate(n_samples_list):
im_idxs = np.where(bayesian_adversarial_robustness[sample_idx]==0.)[0]
sns.distplot(bayesian_lrp_robustness[sample_idx][im_idxs], vertical=True, ax=ax[2,0],
label=f"{100*len(bayesian_lrp_robustness[sample_idx][im_idxs])/tot_num_images}% images")
ax[2,2].set_xlabel('Softmax rob. = 1.')
im_idxs = np.where(adversarial_robustness==1.)[0]
sns.distplot(lrp_robustness[im_idxs], ax=ax[1,2], vertical=True,
label=f"{100*len(lrp_robustness[im_idxs])/tot_num_images}% images")
if mode_lrp_robustness is not None:
im_idxs = np.where(mode_adversarial_robustness==1.)[0]
sns.distplot(mode_lrp_robustness[im_idxs], vertical=True, color="darkorange", ax=ax[1,2],
label=f"{100*len(mode_lrp_robustness[im_idxs])/tot_num_images}% images")
for sample_idx, n_samples in enumerate(n_samples_list):
im_idxs = np.where(bayesian_adversarial_robustness[sample_idx]==1.)[0]
sns.distplot(bayesian_lrp_robustness[sample_idx][im_idxs], vertical=True, ax=ax[2,2],
label=f"{100*len(bayesian_lrp_robustness[sample_idx][im_idxs])/tot_num_images}% images")
### softmax robustness distributions
sns.distplot(adversarial_robustness, ax=ax[0,1], vertical=False)
if mode_adversarial_robustness is not None:
sns.distplot(mode_adversarial_robustness, ax=ax[0,1], vertical=False)
for idx, n_samples in enumerate(n_samples_list):
sns.distplot(bayesian_adversarial_robustness[idx], ax=ax[3,1], vertical=False)
ax[1,0].set_ylim(0,1)
ax[2,0].set_ylim(0,1)
ax[1,2].set_ylim(0,1)
ax[2,2].set_ylim(0,1)
ax[1,1].set_xlim(0,1)
ax[2,1].set_xlim(0,1)
ax[1,0].legend()
ax[2,0].legend()
ax[1,2].legend()
ax[2,2].legend()
print("\nSaving: ", os.path.join(savedir, filename+".png"))
fig.savefig(os.path.join(savedir, filename+".png"))
plt.close(fig)
def lrp_layers_robustness_distributions(
det_lrp_robustness, det_successful_lrp_robustness, det_failed_lrp_robustness,
adv_lrp_robustness, adv_successful_lrp_robustness, adv_failed_lrp_robustness,
bay_lrp_robustness, bay_successful_lrp_robustness, bay_failed_lrp_robustness,
n_samples_list, topk_list,
n_original_images, learnable_layers_idxs, savedir, filename):
os.makedirs(savedir, exist_ok=True)
sns.set_style("darkgrid")
matplotlib.rc('font', **{'weight': 'bold', 'size': 10})
det_col = plt.cm.get_cmap('flare', 100)(np.linspace(0, 1, 10))[6]
adv_col = plt.cm.get_cmap('rocket', 100)(np.linspace(0, 1, 10))[7]
bay_col = plt.cm.get_cmap('crest', 100)(np.linspace(0, 1, len(n_samples_list)+1))[1:]
clip=(-0.1,1.1)
alphas = np.linspace(0.6, 0.3, num=len(topk_list))
topk_list=topk_list[:2]
if len(n_samples_list) > 1 and len(topk_list) > 1: # split cols
alpha = alphas[0]
fig, ax = plt.subplots(len(learnable_layers_idxs), len(topk_list), figsize=(5, 5), sharex=True, dpi=150,
facecolor='w', edgecolor='k', sharey=True)
fig.tight_layout()
fig.subplots_adjust(bottom=0.08)
for row_idx, layer_idx in enumerate(learnable_layers_idxs):
ax[row_idx,len(topk_list)-1].yaxis.set_label_position("right")
ax[row_idx,len(topk_list)-1].set_ylabel("Layer idx="+str(layer_idx), rotation=270, labelpad=10,
weight='bold', size=9)
for topk_idx, topk in enumerate(topk_list):
sns.kdeplot(det_lrp_robustness[topk_idx][row_idx], ax=ax[row_idx, topk_idx], label=f"Det.",
color=det_col, alpha=alpha, fill=True, linewidth=0, clip=clip)
sns.kdeplot(adv_lrp_robustness[topk_idx][row_idx], ax=ax[row_idx, topk_idx], label=f"Adv.",
color=adv_col, alpha=alpha, fill=True, linewidth=0, clip=clip)
for samp_idx, n_samples in enumerate(n_samples_list):
sns.kdeplot(bay_lrp_robustness[topk_idx][row_idx][samp_idx], ax=ax[row_idx, topk_idx],
color=bay_col[samp_idx],
label=f"Bay. samp={n_samples}", alpha=alpha,
fill=True, linewidth=0, clip=clip)
ax[0, topk_idx].set_title('topk='+str(topk),fontdict={'fontsize':10, 'fontweight':'bold'})
ax[len(learnable_layers_idxs)-1, topk_idx].set_xlabel("LRP Robustness")
plt.subplots_adjust(hspace=0.05)
plt.subplots_adjust(wspace=0.05)
ax[0,0].legend(prop={'size': 8})
print("\nSaving: ", os.path.join(savedir, filename+".png"))
fig.savefig(os.path.join(savedir, filename+".png"))
plt.close(fig)
else:
fig, ax = plt.subplots(len(learnable_layers_idxs), 1, figsize=(5, 5), sharex=True, dpi=150,
facecolor='w', edgecolor='k')
fig.tight_layout()
fig.subplots_adjust(bottom=0.1)
ax[len(learnable_layers_idxs)-1].set_xlabel("LRP Robustness", weight='bold')
for row_idx, layer_idx in enumerate(learnable_layers_idxs):
for topk_idx, topk in enumerate(topk_list):
sns.kdeplot(det_lrp_robustness[topk_idx][row_idx], ax=ax[row_idx], label=f"Det. topk={topk}",
color=det_col, alpha=alphas[topk_idx], fill=True, linewidth=0, clip=clip)
sns.kdeplot(adv_lrp_robustness[topk_idx][row_idx], ax=ax[row_idx], label=f"Adv. topk={topk}",
color=adv_col, alpha=alphas[topk_idx], fill=True, linewidth=0, clip=clip)
for samp_idx, n_samples in enumerate(n_samples_list):
sns.kdeplot(bay_lrp_robustness[topk_idx][row_idx][samp_idx], ax=ax[row_idx], color=bay_col[samp_idx],
label=f"Bay. samp={n_samples} topk={topk}", alpha=alphas[topk_idx],
fill=True, linewidth=0, clip=clip)
ax[row_idx].yaxis.set_label_position("right")
ax[row_idx].set_ylabel("Layer idx="+str(layer_idx), rotation=270, labelpad=15, weight='bold', size=9)
fig.subplots_adjust(top=0.9)
# ax[0].legend(bbox_to_anchor=(0.7, 1.45))
# ax[0].legend(loc="upper left")
# plt.setp(ax[0].get_legend().get_texts(), fontsize='8')
# plt.legend(frameon=False)
# plt.subplots_adjust(hspace=0.1)
print("\nSaving: ", os.path.join(savedir, filename+".png"))
fig.savefig(os.path.join(savedir, filename+".png"))
plt.close(fig)
def lrp_layers_robustness_differences(
det_lrp_robustness, adv_lrp_robustness, bay_lrp_robustness,
n_samples_list, topk_list,
n_original_images, learnable_layers_idxs, savedir, filename):
os.makedirs(savedir, exist_ok=True)
sns.set_style("darkgrid")
matplotlib.rc('font', **{'weight': 'bold', 'size': 8})
det_col = plt.cm.get_cmap('flare', 100)(np.linspace(0, 1, 10))[6]
adv_col = plt.cm.get_cmap('rocket', 100)(np.linspace(0, 1, 10))[7]
bay_col = plt.cm.get_cmap('crest', 100)(np.linspace(0, 1, len(n_samples_list)+1))[1:]
clip=(None, None)
alpha = 0.6
if len(n_samples_list) > 1 and len(topk_list) > 1: # split cols
fig, ax = plt.subplots(len(learnable_layers_idxs), len(topk_list), figsize=(5, 5), sharex=True, dpi=150,
facecolor='w', edgecolor='k', sharey=True)
fig.tight_layout()
fig.subplots_adjust(bottom=0.08)
for row_idx, layer_idx in enumerate(learnable_layers_idxs):
ax[row_idx,len(topk_list)-1].yaxis.set_label_position("right")
ax[row_idx,len(topk_list)-1].set_ylabel("Layer idx="+str(layer_idx), rotation=270, labelpad=10,
weight='bold', size=9)
for topk_idx, topk in enumerate(topk_list):
differences = [adv_rob-det_rob for adv_rob, det_rob
in zip(adv_lrp_robustness[topk_idx][row_idx], det_lrp_robustness[topk_idx][row_idx])]
sns.kdeplot(differences, ax=ax[row_idx, topk_idx], label=f"Adv.",
color=adv_col, alpha=alpha, fill=True, linewidth=0, clip=clip)
for samp_idx, n_samples in enumerate(n_samples_list):
differences = [bay_rob-det_rob for bay_rob, det_rob
in zip(bay_lrp_robustness[topk_idx][row_idx][samp_idx], det_lrp_robustness[topk_idx][row_idx])]
sns.kdeplot(differences, ax=ax[row_idx, topk_idx], color=bay_col[samp_idx],
label=f"Bay. samp={n_samples} ", alpha=alpha,
fill=True, linewidth=0, clip=clip)
ax[0, topk_idx].set_title('topk='+str(topk),fontdict={'fontsize':10, 'fontweight':'bold'})
ax[len(learnable_layers_idxs)-1, topk_idx].set_xlabel("LRP Robustness diff.")
plt.subplots_adjust(hspace=0.05)
plt.subplots_adjust(wspace=0.05)
ax[0,0].legend(prop={'size': 9})
else:
fig, ax = plt.subplots(len(learnable_layers_idxs), 1, figsize=(3, 3.8), sharex=True, dpi=150,
facecolor='w', edgecolor='k', sharey=True)
fig.tight_layout()
fig.subplots_adjust(left=0.25)
fig.subplots_adjust(bottom=0.1)
for row_idx, layer_idx in enumerate(learnable_layers_idxs):
ax[row_idx].yaxis.set_label_position("right")
ax[row_idx].set_ylabel("Layer idx="+str(layer_idx), rotation=270, labelpad=10,
weight='bold', size=9)
for topk_idx, topk in enumerate(topk_list):
differences = [adv_rob-det_rob for adv_rob, det_rob
in zip(adv_lrp_robustness[topk_idx][row_idx], det_lrp_robustness[topk_idx][row_idx])]
sns.kdeplot(differences, ax=ax[row_idx], label=f"Adv - Det",
color=adv_col, alpha=alpha, fill=True, linewidth=0, clip=clip)
for samp_idx, n_samples in enumerate(n_samples_list):
differences = [bay_rob-det_rob for bay_rob, det_rob
in zip(bay_lrp_robustness[topk_idx][row_idx][samp_idx], det_lrp_robustness[topk_idx][row_idx])]
sns.kdeplot(differences, ax=ax[row_idx], color=bay_col[samp_idx],
label=f"Bay - Det\nsamp={n_samples}", alpha=alpha,
fill=True, linewidth=0, clip=clip)
# ax[0].set_title('topk='+str(topk),fontdict={'fontsize':10, 'fontweight':'bold'})
ax[len(learnable_layers_idxs)-1].set_xlabel("LRP Robustness diff.", size=9)
plt.subplots_adjust(hspace=0.05)
# plt.subplots_adjust(wspace=0.05)
ax[0].legend(prop={'size':8}, bbox_to_anchor=(0.2, 1.1), framealpha=0.9)
filename+="_topk="+str(topk)
print("\nSaving: ", os.path.join(savedir, filename+".png"))
fig.savefig(os.path.join(savedir, filename+".png"))
plt.close(fig)
def lrp_layers_robustness_scatterplot(det_lrp_robustness, adv_lrp_robustness, bay_lrp_robustness,
det_softmax_robustness, adv_softmax_robustness, bay_softmax_robustness,
n_samples_list, topk_list,
n_original_images, learnable_layers_idxs, savedir, filename,
correlation='spearman'):
os.makedirs(savedir, exist_ok=True)
sns.set_style("darkgrid")
matplotlib.rc('font', **{'weight': 'bold', 'size': 9})
fig, ax = plt.subplots(len(learnable_layers_idxs), 1, figsize=(3, 5), sharex=True, dpi=150, facecolor='w', edgecolor='k')
fig.tight_layout()
det_col = plt.cm.get_cmap('flare', 100)(np.linspace(0, 1, 10))[6]
adv_col = plt.cm.get_cmap('rocket', 100)(np.linspace(0, 1, 10))[7]
bay_col = plt.cm.get_cmap('crest', 100)(np.linspace(0, 1, len(n_samples_list)+1))[1:]
alpha = 0.5
topk_idx=len(topk_list)-1
topk=topk_list[-1]
for layer_idx, layer in enumerate(learnable_layers_idxs):
legend = 'brief' if layer_idx==0 else False
if correlation=='spearman':
rho = stats.spearmanr(det_lrp_robustness[topk_idx][layer_idx],
det_softmax_robustness[topk_idx][layer_idx])[0]
elif correlation=='pearson':
rho = np.corrcoef(det_lrp_robustness[topk_idx][layer_idx],
det_softmax_robustness[topk_idx][layer_idx])[0,1]
else:
raise NotImplementedError
sns.scatterplot(det_lrp_robustness[topk_idx][layer_idx],
det_softmax_robustness[topk_idx][layer_idx],
ax=ax[layer_idx], label=r"Det $\rho$="+str(round(rho,2)),
alpha=alpha, linewidth=0,
legend=legend, color=det_col)
if correlation=='spearman':
rho = stats.spearmanr(adv_lrp_robustness[topk_idx][layer_idx],
adv_softmax_robustness[topk_idx][layer_idx])[0]
elif correlation=='pearson':
rho = np.corrcoef(adv_lrp_robustness[topk_idx][layer_idx],
adv_softmax_robustness[topk_idx][layer_idx])[0,1]
else:
raise NotImplementedError
sns.scatterplot(adv_lrp_robustness[topk_idx][layer_idx],
adv_softmax_robustness[topk_idx][layer_idx],
ax=ax[layer_idx], label=r"Adv $\rho$="+str(round(rho,2)),
alpha=alpha, linewidth=0,
legend=legend, color=adv_col)
for samp_idx, n_samples in enumerate(n_samples_list):
if samp_idx!=0:
if correlation=='spearman':
rho = stats.spearmanr(bay_lrp_robustness[topk_idx][layer_idx][samp_idx],
bay_softmax_robustness[topk_idx][layer_idx][samp_idx])[0]
elif correlation=='pearson':
rho = np.corrcoef(bay_lrp_robustness[topk_idx][layer_idx][samp_idx],
bay_softmax_robustness[topk_idx][layer_idx][samp_idx])[0,1]
else:
raise NotImplementedError
sns.scatterplot(bay_lrp_robustness[topk_idx][layer_idx][samp_idx],
bay_softmax_robustness[topk_idx][layer_idx][samp_idx],
ax=ax[layer_idx], label=r"Bay $\rho$="+str(round(rho,2))+"\nsamp="+str(n_samples),
alpha=alpha, linewidth=0,
legend=legend, color=bay_col[samp_idx])
ax[layer_idx].yaxis.set_label_position("right")
ax[layer_idx].set_ylabel("Layer idx="+str(layer), rotation=270, labelpad=10, size=9, weight='bold')
plt.subplots_adjust(hspace=0.05)
fig.subplots_adjust(left=0.35)
fig.subplots_adjust(bottom=0.08)
ax[len(learnable_layers_idxs)-1].set_xlabel("LRP Robustness", size=9)
fig.text(0.18, 0.4, "Softmax Robustness", size=9, ha='center', weight='normal', rotation=90)
# ax[0].legend(prop={'size': 8})
ax[0].legend(loc='center left', prop={'size':9}, bbox_to_anchor=(-.62, 0.6), framealpha=0.9)
# ax[int(len(learnable_layers_idxs)/2)].set_ylabel("Softmax robustness")
# plt.legend(frameon=False)
# plt.setp(ax[0].get_legend().get_texts(), fontsize='8')
# plt.setp(ax[0,1].get_legend().get_texts(), fontsize='8')
# plt.subplots_adjust(wspace=0.05)
# ax[0,0].legend(bbox_to_anchor=(-0.5, 0.5))
# ax[0,1].legend(bbox_to_anchor=(-1.6, -0.5))
print("\nSaving: ", os.path.join(savedir, filename+".png"))
fig.savefig(os.path.join(savedir, filename+".png"))
plt.close(fig)
def lrp_layers_mode_robustness(det_lrp_robustness, bay_lrp_robustness, mode_lrp_robustness,
n_samples_list, topk_list, learnable_layers_idxs, savedir, filename):
### dataframe
df_lrp_robustness= []
df_layer_idx = []
df_model_type = []
df_atk_type = []
df_n_samples = []
df_topk = []
for topk_idx, topk in enumerate(topk_list):
for layer_idx, layer in enumerate(learnable_layers_idxs):
subset_lrp_rob = det_lrp_robustness[topk_idx, layer_idx]
df_lrp_robustness.extend(subset_lrp_rob)
df_layer_idx.extend(np.repeat(layer, len(subset_lrp_rob)))
df_model_type.extend(np.repeat("deterministic", len(subset_lrp_rob)))
df_atk_type.extend(np.repeat("deterministic", len(subset_lrp_rob)))
df_n_samples.extend(np.repeat(None, len(subset_lrp_rob)))
df_topk.extend(np.repeat(topk, len(subset_lrp_rob)))
subset_lrp_rob = mode_lrp_robustness[topk_idx, layer_idx, -1]
df_lrp_robustness.extend(subset_lrp_rob)
df_layer_idx.extend(np.repeat(layer, len(subset_lrp_rob)))
df_model_type.extend(np.repeat("mode", len(subset_lrp_rob)))
df_atk_type.extend(np.repeat("mode", len(subset_lrp_rob)))
df_n_samples.extend(np.repeat(None, len(subset_lrp_rob)))
df_topk.extend(np.repeat(topk, len(subset_lrp_rob)))
for samp_idx, n_samples in enumerate(n_samples_list):
subset_lrp_rob = bay_lrp_robustness[topk_idx, layer_idx, samp_idx]
df_lrp_robustness.extend(subset_lrp_rob)
df_layer_idx.extend(np.repeat(layer, len(subset_lrp_rob)))
df_model_type.extend(np.repeat("bayesian", len(subset_lrp_rob)))
df_atk_type.extend(np.repeat("bayesian", len(subset_lrp_rob)))
df_n_samples.extend(np.repeat(None, len(subset_lrp_rob)))
df_topk.extend(np.repeat(topk, len(subset_lrp_rob)))
subset_lrp_rob = mode_lrp_robustness[topk_idx, layer_idx, samp_idx]
df_lrp_robustness.extend(subset_lrp_rob)
df_layer_idx.extend(np.repeat(layer, len(subset_lrp_rob)))
df_model_type.extend(np.repeat("bayesian", len(subset_lrp_rob)))
df_atk_type.extend(np.repeat("mode", len(subset_lrp_rob)))
df_n_samples.extend(np.repeat(None, len(subset_lrp_rob)))
df_topk.extend(np.repeat(topk, len(subset_lrp_rob)))
df = pd.DataFrame(data={"LRP Rob.":df_lrp_robustness, "Layer idx":df_layer_idx,
"Net.":df_model_type, "Atk.":df_atk_type, "Samp.":df_n_samples, "topk":df_topk})
### plot
os.makedirs(savedir, exist_ok=True)
sns.set_style("darkgrid")
matplotlib.rc('font', **{'size': 10}) #'weight': 'bold',
det_col = plt.cm.get_cmap('rocket', 100)(np.linspace(0, 1, 10))
bay_col = plt.cm.get_cmap('crest', 100)(np.linspace(0, 1, len(n_samples_list)+1))[1:]
fig, ax = plt.subplots(len(learnable_layers_idxs), 2, figsize=(8, 6), sharex=True, dpi=150,
facecolor='w', edgecolor='k', sharey=True)
fig.tight_layout()
fig.subplots_adjust(bottom=0.1)
ax[0, 0].set_title('Deterministic Nets', fontdict={'fontsize':10, 'fontweight':'bold'})
ax[0, 1].set_title('Bayesian Nets', fontdict={'fontsize':10, 'fontweight':'bold'})
x="topk"
y="LRP Rob."
for row_idx, layer_idx in enumerate(learnable_layers_idxs):
ax[row_idx,0].set_ylabel("LRP robustness")
ax[row_idx,1].yaxis.set_label_position("right")
ax[row_idx,1].set_ylabel("Layer idx="+str(layer_idx), rotation=270, labelpad=10, weight='bold', size=8)
subset_df = df.loc[(df['Net.'] == "deterministic") & (df['Atk.'] == "deterministic")]
sns.lineplot(data=subset_df, x=x, y=y, ax=ax[row_idx, 0], style="Atk.",
color=det_col[7], ci=100)
subset_df = df.loc[(df['Net.'] == "mode") & (df['Atk.'] == "mode")]
g = sns.lineplot(data=subset_df, x=x, y=y, dashes=[(2,2)], style="Atk.",
ax=ax[row_idx, 0], color=det_col[4], ci=100)
for samp_idx, n_samples in enumerate(n_samples_list):
subset_df = df.loc[(df['Net.'] == "bayesian") & (df['Atk.'] == "bayesian")]
sns.lineplot(data=subset_df, x=x, y=y, style="Atk.",
ax=ax[row_idx, 1],
color=bay_col[samp_idx], ci=100)
subset_df = df.loc[(df['Net.'] == "bayesian") & (df['Atk.'] == "mode")]
g = sns.lineplot(data=subset_df, x=x, y=y, dashes=[(2,2)],
ax=ax[row_idx, 1], style="Atk.",
color=bay_col[samp_idx], ci=100)
g.set_xticks(topk_list)
g.set_xticklabels(topk_list)
fig.subplots_adjust(top=0.9)
print("\nSaving: ", os.path.join(savedir, filename+"_all_images.png"))
fig.savefig(os.path.join(savedir, filename+"_all_images.png"))
plt.close(fig)
| 39,083 | 43.718535 | 126 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp/conv_cifar.py | import torch
import torch.nn.functional as F
from lrp.functional.conv_cifar import conv2d_cifar
class Conv2d(torch.nn.Conv2d):
def _conv_forward_explain(self, input, weight, conv2d_fn, **kwargs):
if self.padding_mode != 'zeros':
return conv2d_fn(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups, **kwargs)
p = kwargs.get('pattern')
if p is not None:
return conv2d_fn(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups, p)
else: return conv2d_fn(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def forward(self, input, explain=False, rule="epsilon", **kwargs):
if not explain: return super(Conv2d, self).forward(input)
return self._conv_forward_explain(input, self.weight, conv2d_cifar[rule], **kwargs)
@classmethod
def from_torch(cls, conv):
in_channels = conv.weight.shape[1] * conv.groups
bias = conv.bias is not None
module = cls(in_channels, conv.out_channels, conv.kernel_size, conv.stride, conv.padding, conv.dilation, conv.groups, bias=bias, padding_mode=conv.padding_mode)
module.load_state_dict(conv.state_dict())
return module
| 1,390 | 42.46875 | 168 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp/patterns.py | import torch
import torch.nn.functional as F
from .functional.utils import safe_divide
from tqdm import tqdm
__all__ = [
'fit_patternnet',
'fit_patternnet_positive',
]
"""
This implementation is based on the implementation from
https://github.com/albermax/innvestigate/blob/master/innvestigate/analyzer/pattern_based.py
"""
class RunningMean:
def __init__(self, shape, device):
self.value = torch.zeros(shape, device=device)
self.count = 0
def update(self, mean, cnt):
total = self.count + cnt
new_factor = safe_divide(cnt, total)
old_factor = 1 - new_factor
self.value = self.value * old_factor + mean * (new_factor)
self.count += cnt
def _prod(module, x, y, mask):
y_masked = y * mask
x_copy = x * 1. # [bs , h]
if isinstance(module, torch.nn.Linear):
W = module.weight # only for linear layers
W_fn = lambda w: w.t() # only for linear layers
elif isinstance(module, torch.nn.Conv2d):
p1, p2 = module.padding
s1, s2 = module.stride
k1, k2 = module.kernel_size
x = F.pad(x, (p1, p1, p2, p2)).unfold(2, k1, s1).unfold(3, k2, s2)
bs, c, h, w, *_ = x.shape # [bs, c, h, w, kh, kw]
x = x.permute(0, 2, 3, 1, 4, 5).contiguous()
# [ bs, h ]
x = x.view( -1, c*k1*k2, ) # [ bs*h*w, c*kh*kw ]
def reshape_output(o):
o = o.permute(0, 2, 3, 1).contiguous()
return o.view(-1, module.out_channels)
y_masked = reshape_output(y_masked) # [ bs, h, w, c ] -> [ bs*h*w, out_c ]
y = reshape_output(y) # [ bs, h, w, c ] -> [ bs*h*w, out_c ]
mask = reshape_output(mask) # [ bs, h, w, c ] -> [ bs*h*w, out_c ]
W = module.weight.view(module.out_channels, -1)
def W_fn(w):
w = w.view(W.t().shape)
w = w.t().contiguous()
return w.view(module.weight.shape)
else:
raise NotImplmentedError()
cnt = mask.sum(axis=0, keepdims=True)
cnt_all = torch.ones_like(mask).sum(axis=0, keepdims=True)
x_mean = safe_divide(x.t() @ mask, cnt)
xy_mean = safe_divide(x.t() @ y_masked, cnt)
y_mean = safe_divide(y.sum(0), cnt_all)
return cnt, cnt_all, x_mean, y_mean, xy_mean, W, W_fn
def _fit_pattern(model, train_loader, max_iter, device, mask_fn = lambda y: torch.ones_like(y)):
stats_x = []
stats_y = []
stats_xy = []
weights = []
cnt = []
cnt_all = []
first = True
for b, (x, _) in enumerate(tqdm(train_loader)):
x = x.to(device)
i = 0
for m in model:
y = m(x) # Note, this includes bias.
if not (isinstance(m, torch.nn.Linear) or isinstance(m, torch.nn.Conv2d)):
x = y.clone()
continue
mask = mask_fn(y).float().to(device)
if isinstance(m, torch.nn.Conv2d): y_wo_bias = y - m.bias.view(-1, 1, 1)
else: y_wo_bias = y - m.bias
cnt_, cnt_all_, x_, y_, xy_, w, w_fn = _prod(m, x, y_wo_bias, mask)
if first:
stats_x.append(RunningMean(x_.shape, device))
stats_y.append(RunningMean(y_.shape, device)) # Use all y
stats_xy.append(RunningMean(xy_.shape, device))
weights.append((w, w_fn))
stats_x[i].update(x_, cnt_)
stats_y[i].update(y_.sum(0), cnt_all_)
stats_xy[i].update(xy_, cnt_)
x = y.clone()
i += 1
first = False
if max_iter is not None and b+1 == max_iter: break
def pattern(x_mean, y_mean, xy_mean, W2d):
x_ = x_mean.value
y_ = y_mean.value
xy_ = xy_mean.value
W, w_fn = W2d
ExEy = x_ * y_
cov_xy = xy_ - ExEy # [in, out]
w_cov_xy = torch.diag(W @ cov_xy) # [out,]
A = safe_divide(cov_xy, w_cov_xy[None, :])
A = w_fn(A) # Reshape to original kernel size
return A
patterns = [pattern(*vars) for vars in zip(stats_x, stats_y, stats_xy, weights)]
return patterns
@torch.no_grad()
def fit_patternnet(model, train_loader, max_iter=None, device='cpu'):
return _fit_pattern(model, train_loader, max_iter, device)
@torch.no_grad()
def fit_patternnet_positive(model, train_loader, max_iter=None, device='cpu'):
return _fit_pattern(model, train_loader, max_iter, device, lambda y: y >= 0)
| 4,582 | 29.758389 | 96 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp/maxpool.py | import torch
from lrp.functional import maxpool2d
class MaxPool2d(torch.nn.MaxPool2d):
def forward(self, input, explain=False, rule="epsilon", **kwargs):
if not explain: return super(MaxPool2d, self).forward(input)
return maxpool2d[rule](input, self.kernel_size, self.stride, self.padding)
| 311 | 38 | 82 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp/sequential.py | import torch
from lrp.linear import Linear
from lrp.conv import Conv2d
from lrp.maxpool import MaxPool2d
from lrp.functional.utils import normalize
def grad_decorator_fn(module):
"""
Currently not used but can be used for debugging purposes.
"""
def fn(x):
return normalize(x)
return fn
avoid_normalization_on = ['relu', 'maxp']
def do_normalization(rule, module):
if "pattern" not in rule.lower(): return False
return not str(module)[:4].lower() in avoid_normalization_on
def is_kernel_layer(module):
return isinstance(module, Conv2d) or isinstance(module, Linear)
def is_rule_specific_layer(module):
return isinstance(module, MaxPool2d)
class Sequential(torch.nn.Sequential):
def forward(self, input, explain=False, rule="epsilon", pattern=None, *args, **kwargs):
if not explain: return super(Sequential, self).forward(input)
first = True
# copy references for user to be able to reuse patterns
if pattern is not None: pattern = list(pattern)
for module in self:
if do_normalization(rule, module):
input.register_hook(grad_decorator_fn(module))
if is_kernel_layer(module):
P = None
if pattern is not None:
P = pattern.pop(0)
input = module.forward(input, explain=True, rule=rule, pattern=P)
elif is_rule_specific_layer(module):
input = module.forward(input, explain=True, rule=rule)
else: # Use gradient as default for remaining layer types
input = module(input)
first = False
if do_normalization(rule, module):
input.register_hook(grad_decorator_fn(module))
return input
| 1,789 | 30.403509 | 91 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp/linear.py | import torch
from lrp.functional import linear
class Linear(torch.nn.Linear):
def forward(self, input, explain=False, rule="epsilon", **kwargs):
if not explain: return super(Linear, self).forward(input)
p = kwargs.get('pattern')
if p is not None: return linear[rule](input, self.weight, self.bias, p)
else: return linear[rule](input, self.weight, self.bias)
@classmethod
def from_torch(cls, lin):
bias = lin.bias is not None
module = cls(in_features=lin.in_features, out_features=lin.out_features, bias=bias)
module.load_state_dict(lin.state_dict())
return module
| 644 | 32.947368 | 91 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp/converter.py | import torch
from .conv import Conv2d
from .linear import Linear
from .sequential import Sequential
conversion_table = {
'Linear': Linear,
'Conv2d': Conv2d
}
# # # # # Convert torch.models.vggxx to lrp model
def convert_vgg(module, modules=None):
# First time
if modules is None:
modules = []
for m in module.children():
convert_vgg(m, modules=modules)
# Vgg model has a flatten, which is not represented as a module
# so this loop doesn't pick it up.
# This is a hack to make things work.
if isinstance(m, torch.nn.AdaptiveAvgPool2d):
modules.append(torch.nn.Flatten())
sequential = Sequential(*modules)
return sequential
# Recursion
if isinstance(module, torch.nn.Sequential):
for m in module.children():
convert_vgg(m, modules=modules)
elif isinstance(module, torch.nn.Linear) or isinstance(module, torch.nn.Conv2d):
class_name = module.__class__.__name__
lrp_module = conversion_table[class_name].from_torch(module)
modules.append(lrp_module)
# maxpool is handled with gradient for the moment
elif isinstance(module, torch.nn.ReLU):
# avoid inplace operations. They might ruin PatternNet pattern
# computations
modules.append(torch.nn.ReLU())
else:
modules.append(module)
| 1,436 | 30.23913 | 84 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp/conv.py | import torch
import torch.nn.functional as F
from lrp.functional import conv2d
class Conv2d(torch.nn.Conv2d):
def _conv_forward_explain(self, input, weight, conv2d_fn, **kwargs):
if self.padding_mode != 'zeros':
return conv2d_fn(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
weight, self.bias, self.stride,
_pair(0), self.dilation, self.groups, **kwargs)
p = kwargs.get('pattern')
if p is not None:
return conv2d_fn(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups, p)
else: return conv2d_fn(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def forward(self, input, explain=False, rule="epsilon", **kwargs):
if not explain: return super(Conv2d, self).forward(input)
return self._conv_forward_explain(input, self.weight, conv2d[rule], **kwargs)
@classmethod
def from_torch(cls, conv):
in_channels = conv.weight.shape[1] * conv.groups
bias = conv.bias is not None
module = cls(in_channels, conv.out_channels, conv.kernel_size, conv.stride, conv.padding, conv.dilation, conv.groups, bias=bias, padding_mode=conv.padding_mode)
module.load_state_dict(conv.state_dict())
return module
| 1,367 | 41.75 | 168 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp/functional/conv_cifar.py | import torch
import torch.nn.functional as F
from torch.autograd import Function
from .utils import identity_fn, gamma_fn, add_epsilon_fn, normalize
def _forward_rho(rho, incr, ctx, input, weight, bias, stride, padding, dilation, groups):
ctx.save_for_backward(input, weight, bias)
ctx.rho = rho
ctx.incr = incr
ctx.stride = stride
ctx.padding = padding
ctx.dilation = dilation
ctx.groups = groups
Z = F.conv2d(input, weight, bias, stride, padding, dilation, groups)
return Z
def _backward_rho(ctx, relevance_output):
input, weight, bias = ctx.saved_tensors
weight, bias = ctx.rho(weight, bias)
Z = ctx.incr(F.conv2d(input, weight, bias, ctx.stride, ctx.padding, ctx.dilation, ctx.groups))
relevance_output = relevance_output / Z
relevance_input = F.conv_transpose2d(relevance_output, weight, None, padding=1) # <---
relevance_input = relevance_input * input
return relevance_input, None, None, None, None, None, None,
class Conv2DEpsilon(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, **kwargs):
return _forward_rho(identity_fn, add_epsilon_fn(1e-1), ctx, input, weight, bias, stride, padding, dilation, groups)
@staticmethod
def backward(ctx, relevance_output):
return _backward_rho(ctx, relevance_output)
class Conv2DGamma(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, **kwargs):
return _forward_rho(gamma_fn(0.1), add_epsilon_fn(1e-10), ctx, input, weight, bias, stride, padding, dilation, groups)
@staticmethod
def backward(ctx, relevance_output):
return _backward_rho(ctx, relevance_output)
class Conv2DGammaEpsilon(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, **kwargs):
return _forward_rho(gamma_fn(0.1), add_epsilon_fn(1e-1), ctx, input, weight, bias, stride, padding, dilation, groups)
@staticmethod
def backward(ctx, relevance_output):
return _backward_rho(ctx, relevance_output)
def _conv_alpha_beta_forward(ctx, input, weight, bias, stride, padding, dilation, groups, **kwargs):
Z = F.conv2d(input, weight, bias, stride, padding, dilation, groups)
ctx.save_for_backward(input, weight, Z, bias)
return Z
def _conv_alpha_beta_backward(alpha, beta, ctx, relevance_output):
input, weights, Z, bias = ctx.saved_tensors
sel = weights > 0
zeros = torch.zeros_like(weights)
weights_pos = torch.where(sel, weights, zeros)
weights_neg = torch.where(~sel, weights, zeros)
input_pos = torch.where(input > 0, input, torch.zeros_like(input))
input_neg = torch.where(input <= 0, input, torch.zeros_like(input))
def f(X1, X2, W1, W2):
Z1 = F.conv2d(X1, W1, bias=None, stride=1, padding=1)
Z2 = F.conv2d(X2, W2, bias=None, stride=1, padding=1)
Z = Z1 + Z2
rel_out = relevance_output / (Z + (Z==0).float()* 1e-6)
t1 = F.conv_transpose2d(rel_out, W1, bias=None, padding=1)
t2 = F.conv_transpose2d(rel_out, W2, bias=None, padding=1)
r1 = t1 * X1
r2 = t2 * X2
return r1 + r2
pos_rel = f(input_pos, input_neg, weights_pos, weights_neg)
neg_rel = f(input_neg, input_pos, weights_pos, weights_neg)
return pos_rel * alpha - neg_rel * beta, None, None, None, None, None, None
class Conv2DAlpha1Beta0(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, **kwargs):
return _conv_alpha_beta_forward(ctx, input, weight, bias, stride, padding, dilation, groups, **kwargs)
@staticmethod
def backward(ctx, relevance_output):
return _conv_alpha_beta_backward(1., 0., ctx, relevance_output)
class Conv2DAlpha2Beta1(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, **kwargs):
return _conv_alpha_beta_forward(ctx, input, weight, bias, stride, padding, dilation, groups, **kwargs)
@staticmethod
def backward(ctx, relevance_output):
return _conv_alpha_beta_backward(2., 1., ctx, relevance_output)
def _pattern_forward(attribution, ctx, input, weight, bias, stride, padding, dilation, groups, pattern):
Z = F.conv2d(input, weight, bias, stride, padding, dilation, groups)
ctx.save_for_backward(input, weight, pattern)
ctx.stride = stride
ctx.padding = padding
ctx.attribution = attribution
return Z
def _pattern_backward(ctx, relevance_output):
input, weight, P = ctx.saved_tensors
if ctx.attribution: P = P * weight # PatternAttribution
relevance_input = F.conv_transpose2d(relevance_output, P, padding=ctx.padding, stride=ctx.stride)
return relevance_input, None, None, None, None, None, None, None
class Conv2DPatternAttribution(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, pattern=None):
return _pattern_forward(True, ctx, input, weight, bias, stride, padding, dilation, groups, pattern)
@staticmethod
def backward(ctx, relevance_output):
return _pattern_backward(ctx, relevance_output)
class Conv2DPatternNet(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, pattern=None):
return _pattern_forward(False, ctx, input, weight, bias, stride, padding, dilation, groups, pattern)
@staticmethod
def backward(ctx, relevance_output):
return _pattern_backward(ctx, relevance_output)
conv2d_cifar = {
"gradient": F.conv2d,
"epsilon": Conv2DEpsilon.apply,
"gamma": Conv2DGamma.apply,
"gamma+epsilon": Conv2DGammaEpsilon.apply,
"alpha1beta0": Conv2DAlpha1Beta0.apply,
"alpha2beta1": Conv2DAlpha2Beta1.apply,
"patternattribution": Conv2DPatternAttribution.apply,
"patternnet": Conv2DPatternNet.apply,
}
| 6,354 | 37.98773 | 126 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp/functional/maxpool.py | import torch
import torch.nn.functional as F
from torch.autograd import Function
class MaxPooling2d(Function):
@staticmethod
def forward(ctx, input, kernel_size=2, stride=None, padding=0):
ctx.kernel_size = kernel_size
ctx.stride = stride
ctx.padding = padding
ctx.save_for_backward(input)
return F.max_pool2d(input, kernel_size=kernel_size, stride=stride, padding=padding)
@staticmethod
def backward(ctx, relevance_output):
input, *_ = ctx.saved_tensors
Z = F.avg_pool2d(input, kernel_size=ctx.kernel_size, stride=ctx.stride, padding=ctx.padding) + 1e-10
relevance_output = relevance_output / Z
relevance_input = torch.autograd.grad(Z, input, relevance_output)
relevance_input = relevance_input * input
return relevance_input, None, None, None
maxpool2d = {
"gradient": F.max_pool2d,
"epsilon": F.max_pool2d,# MaxPooling2d.apply,
"gamma": F.max_pool2d,# MaxPooling2d.apply,
"gamma+epsilon": F.max_pool2d,# MaxPooling2d.apply,
"alpha1beta0": F.max_pool2d,# MaxPooling2d.apply,
"alpha2beta1": F.max_pool2d,# MaxPooling2d.apply,
"patternattribution": F.max_pool2d,# MaxPooling2d.apply,
"patternnet": F.max_pool2d,# MaxPooling2d.apply,
}
| 1,398 | 36.810811 | 108 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp/functional/utils.py | import torch
# # # rhos
identity_fn = lambda w, b: (w, b)
def gamma_fn(gamma):
def _gamma_fn(w, b):
w = w + w * torch.max(torch.tensor(0., device=w.device), w) * gamma
if b is not None: b = b + b * torch.max(torch.tensor(0., device=b.device), b) * gamma
return w, b
return _gamma_fn
# # # incrs
add_epsilon_fn = lambda e: lambda x: x + ((x > 0).float()*2-1) * e
# # # Other stuff
def safe_divide(a, b):
return a / (b + (b == 0).float())
def normalize(x):
n_dim = len(x.shape)
# This is what they do in `innvestigate`. Have no idea why?
# https://github.com/albermax/innvestigate/blob/1ed38a377262236981090bb0989d2e1a6892a0b1/innvestigate/layers.py#L321
if n_dim == 2: return x
abs = torch.abs(x.view(x.shape[0], -1))
absmax = torch.max(abs, axis=1)[0].view(x.shape[0], 1)
for i in range(2, n_dim): absmax = absmax.unsqueeze(-1)
x = safe_divide(x, absmax)
x = x.clamp(-1, 1)
return x
| 981 | 24.842105 | 120 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp/functional/linear.py | import torch
import torch.nn.functional as F
from torch.autograd import Function
from .utils import identity_fn, gamma_fn, add_epsilon_fn, normalize
def _forward_rho(rho, incr, ctx, input, weight, bias):
ctx.save_for_backward(input, weight, bias)
ctx.rho = rho
ctx.incr = incr
return F.linear(input, weight, bias)
def _backward_rho(ctx, relevance_output):
input, weight, bias = ctx.saved_tensors
rho = ctx.rho
incr = ctx.incr
weight, bias = rho(weight, bias)
Z = incr(F.linear(input, weight, bias))
relevance_output = relevance_output / Z
relevance_input = F.linear(relevance_output, weight.t(), bias=None)
relevance_input = relevance_input * input
return relevance_input, None, None
class LinearEpsilon(Function):
@staticmethod
def forward(ctx, input, weight, bias=None):
return _forward_rho(identity_fn, add_epsilon_fn(0.1), ctx, input, weight, bias) # TODO make batter way of choosing epsilon
@staticmethod
def backward(ctx, relevance_output):
return _backward_rho(ctx, relevance_output)
class LinearGamma(Function):
@staticmethod
def forward(ctx, input, weight, bias=None):
return _forward_rho(gamma_fn(0.1), add_epsilon_fn(1e-10), ctx, input, weight, bias)
@staticmethod
def backward(ctx, relevance_output):
return _backward_rho(ctx, relevance_output)
class LinearGammaEpsilon(Function):
@staticmethod
def forward(ctx, input, weight, bias=None):
return _forward_rho(gamma_fn(0.1), add_epsilon_fn(1e-1), ctx, input, weight, bias)
@staticmethod
def backward(ctx, relevance_output):
return _backward_rho(ctx, relevance_output)
def _forward_alpha_beta(ctx, input, weight, bias):
Z = F.linear(input, weight, bias)
ctx.save_for_backward(input, weight, bias)
return Z
def _backward_alpha_beta(alpha, beta, ctx, relevance_output):
"""
Inspired by https://github.com/albermax/innvestigate/blob/1ed38a377262236981090bb0989d2e1a6892a0b1/innvestigate/analyzer/relevance_based/relevance_rule.py#L270
"""
input, weights, bias = ctx.saved_tensors
sel = weights > 0
zeros = torch.zeros_like(weights)
weights_pos = torch.where(sel, weights, zeros)
weights_neg = torch.where(~sel, weights, zeros)
input_pos = torch.where(input > 0, input, torch.zeros_like(input))
input_neg = torch.where(input <= 0, input, torch.zeros_like(input))
def f(X1, X2, W1, W2):
Z1 = F.linear(X1, W1, bias=None)
Z2 = F.linear(X2, W2, bias=None)
Z = Z1 + Z2
rel_out = relevance_output / (Z + (Z==0).float()* 1e-6)
t1 = F.linear(rel_out, W1.t(), bias=None)
t2 = F.linear(rel_out, W2.t(), bias=None)
r1 = t1 * X1
r2 = t2 * X2
return r1 + r2
pos_rel = f(input_pos, input_neg, weights_pos, weights_neg)
neg_rel = f(input_neg, input_pos, weights_pos, weights_neg)
return pos_rel * alpha - neg_rel * beta, None, None
class LinearAlpha1Beta0(Function):
@staticmethod
def forward(ctx, input, weight, bias=None):
return _forward_alpha_beta(ctx, input, weight, bias)
@staticmethod
def backward(ctx, relevance_output):
return _backward_alpha_beta(1., 0., ctx, relevance_output)
class LinearAlpha2Beta1(Function):
@staticmethod
def forward(ctx, input, weight, bias=None):
return _forward_alpha_beta(ctx, input, weight, bias)
@staticmethod
def backward(ctx, relevance_output):
return _backward_alpha_beta(2., 1., ctx, relevance_output)
def _forward_pattern(attribution, ctx, input, weight, bias, pattern):
ctx.save_for_backward(input, weight, pattern)
ctx.attribution = attribution
return F.linear(input, weight, bias)
def _backward_pattern(ctx, relevance_output):
input, weight, P = ctx.saved_tensors
if ctx.attribution: P = P * weight # PatternAttribution
relevance_input = F.linear(relevance_output, P.t(), bias=None)
return relevance_input, None, None, None
class LinearPatternAttribution(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, pattern=None):
return _forward_pattern(True, ctx, input, weight, bias, pattern)
@staticmethod
def backward(ctx, relevance_output):
return _backward_pattern(ctx, relevance_output)
class LinearPatternNet(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, pattern=None):
return _forward_pattern(False, ctx, input, weight, bias, pattern)
@staticmethod
def backward(ctx, relevance_output):
return _backward_pattern(ctx, relevance_output)
linear = {
"gradient": F.linear,
"epsilon": LinearEpsilon.apply,
"gamma": LinearGamma.apply,
"gamma+epsilon": LinearGammaEpsilon.apply,
"alpha1beta0": LinearAlpha1Beta0.apply,
"alpha2beta1": LinearAlpha2Beta1.apply,
"patternattribution": LinearPatternAttribution.apply,
"patternnet": LinearPatternNet.apply,
}
| 5,193 | 32.509677 | 167 | py |
BayesianRelevance | BayesianRelevance-master/src/lrp/functional/conv.py | import torch
import torch.nn.functional as F
from torch.autograd import Function
from .utils import identity_fn, gamma_fn, add_epsilon_fn, normalize
def _forward_rho(rho, incr, ctx, input, weight, bias, stride, padding, dilation, groups):
ctx.save_for_backward(input, weight, bias)
ctx.rho = rho
ctx.incr = incr
ctx.stride = stride
ctx.padding = padding
ctx.dilation = dilation
ctx.groups = groups
Z = F.conv2d(input, weight, bias, stride, padding, dilation, groups)
return Z
def _backward_rho(ctx, relevance_output):
input, weight, bias = ctx.saved_tensors
weight, bias = ctx.rho(weight, bias)
Z = ctx.incr(F.conv2d(input, weight, bias, ctx.stride, ctx.padding, ctx.dilation, ctx.groups))
relevance_output = relevance_output / Z
relevance_input = F.conv_transpose2d(relevance_output, weight, None, padding=0)
relevance_input = relevance_input * input
return relevance_input, None, None, None, None, None, None,
class Conv2DEpsilon(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, **kwargs):
return _forward_rho(identity_fn, add_epsilon_fn(1e-1), ctx, input, weight, bias, stride, padding, dilation, groups)
@staticmethod
def backward(ctx, relevance_output):
return _backward_rho(ctx, relevance_output)
class Conv2DGamma(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, **kwargs):
return _forward_rho(gamma_fn(0.1), add_epsilon_fn(1e-10), ctx, input, weight, bias, stride, padding, dilation, groups)
@staticmethod
def backward(ctx, relevance_output):
return _backward_rho(ctx, relevance_output)
class Conv2DGammaEpsilon(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, **kwargs):
return _forward_rho(gamma_fn(0.1), add_epsilon_fn(1e-1), ctx, input, weight, bias, stride, padding, dilation, groups)
@staticmethod
def backward(ctx, relevance_output):
return _backward_rho(ctx, relevance_output)
def _conv_alpha_beta_forward(ctx, input, weight, bias, stride, padding, dilation, groups, **kwargs):
Z = F.conv2d(input, weight, bias, stride, padding, dilation, groups)
ctx.save_for_backward(input, weight, Z, bias)
return Z
def _conv_alpha_beta_backward(alpha, beta, ctx, relevance_output):
input, weights, Z, bias = ctx.saved_tensors
sel = weights > 0
zeros = torch.zeros_like(weights)
weights_pos = torch.where(sel, weights, zeros)
weights_neg = torch.where(~sel, weights, zeros)
input_pos = torch.where(input > 0, input, torch.zeros_like(input))
input_neg = torch.where(input <= 0, input, torch.zeros_like(input))
def f(X1, X2, W1, W2):
Z1 = F.conv2d(X1, W1, bias=None, stride=1, padding=0)
Z2 = F.conv2d(X2, W2, bias=None, stride=1, padding=0)
Z = Z1 + Z2
rel_out = relevance_output / (Z + (Z==0).float()* 1e-6)
t1 = F.conv_transpose2d(rel_out, W1, bias=None, padding=0)
t2 = F.conv_transpose2d(rel_out, W2, bias=None, padding=0)
r1 = t1 * X1
r2 = t2 * X2
return r1 + r2
pos_rel = f(input_pos, input_neg, weights_pos, weights_neg)
neg_rel = f(input_neg, input_pos, weights_pos, weights_neg)
return pos_rel * alpha - neg_rel * beta, None, None, None, None, None, None
class Conv2DAlpha1Beta0(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, **kwargs):
return _conv_alpha_beta_forward(ctx, input, weight, bias, stride, padding, dilation, groups, **kwargs)
@staticmethod
def backward(ctx, relevance_output):
return _conv_alpha_beta_backward(1., 0., ctx, relevance_output)
class Conv2DAlpha2Beta1(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, **kwargs):
return _conv_alpha_beta_forward(ctx, input, weight, bias, stride, padding, dilation, groups, **kwargs)
@staticmethod
def backward(ctx, relevance_output):
return _conv_alpha_beta_backward(2., 1., ctx, relevance_output)
def _pattern_forward(attribution, ctx, input, weight, bias, stride, padding, dilation, groups, pattern):
Z = F.conv2d(input, weight, bias, stride, padding, dilation, groups)
ctx.save_for_backward(input, weight, pattern)
ctx.stride = stride
ctx.padding = padding
ctx.attribution = attribution
return Z
def _pattern_backward(ctx, relevance_output):
input, weight, P = ctx.saved_tensors
if ctx.attribution: P = P * weight # PatternAttribution
relevance_input = F.conv_transpose2d(relevance_output, P, padding=ctx.padding, stride=ctx.stride)
return relevance_input, None, None, None, None, None, None, None
class Conv2DPatternAttribution(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, pattern=None):
return _pattern_forward(True, ctx, input, weight, bias, stride, padding, dilation, groups, pattern)
@staticmethod
def backward(ctx, relevance_output):
return _pattern_backward(ctx, relevance_output)
class Conv2DPatternNet(Function):
@staticmethod
def forward(ctx, input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, pattern=None):
return _pattern_forward(False, ctx, input, weight, bias, stride, padding, dilation, groups, pattern)
@staticmethod
def backward(ctx, relevance_output):
return _pattern_backward(ctx, relevance_output)
conv2d = {
"gradient": F.conv2d,
"epsilon": Conv2DEpsilon.apply,
"gamma": Conv2DGamma.apply,
"gamma+epsilon": Conv2DGammaEpsilon.apply,
"alpha1beta0": Conv2DAlpha1Beta0.apply,
"alpha2beta1": Conv2DAlpha2Beta1.apply,
"patternattribution": Conv2DPatternAttribution.apply,
"patternnet": Conv2DPatternNet.apply,
}
| 6,341 | 37.907975 | 126 | py |
BayesianRelevance | BayesianRelevance-master/src/utils/data.py | import os
import math
import time
import random
import numpy as np
import pickle as pkl
from utils.savedir import *
import torch
import keras
import tensorflow as tf
from keras import backend as K
from keras.datasets import mnist, fashion_mnist
from sklearn.datasets import make_moons
from pandas import DataFrame
from torch.utils.data import DataLoader
import torchvision
import torchvision.transforms as transforms
import torch.nn.functional as F
import seaborn as sns
import matplotlib.pyplot as plt
def execution_time(start, end):
hours, rem = divmod(end - start, 3600)
minutes, seconds = divmod(rem, 60)
print("\nExecution time = {:0>2}:{:0>2}:{:0>2}".format(int(hours), int(minutes), int(seconds)))
################
# data loaders #
################
def data_loaders(dataset_name, batch_size, n_inputs=None, channels="first", shuffle=False):
random.seed(0)
# batch_size = 256
if dataset_name == "cifar":
data_dir="../../cifar-10/"
transform_train = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
target_transform = torchvision.transforms.Compose([lambda x:torch.tensor([x]),
lambda x:F.one_hot(x,10),
lambda x:x.squeeze()])
trainset = torchvision.datasets.CIFAR10(root=data_dir, train=True, download=True, transform=transform_train,
target_transform=target_transform)
trainset = torch.utils.data.Subset(trainset, range(0, n_inputs)) if n_inputs else trainset
train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)
testset = torchvision.datasets.CIFAR10(root=data_dir, train=False, download=True, transform=transform_test,
target_transform=target_transform)
testset = torch.utils.data.Subset(testset, range(0, n_inputs)) if n_inputs else testset
test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False)
input_shape = next(iter(train_loader))[0].shape[1:]
# print(next(iter(train_loader))[0].shape)
# print(next(iter(train_loader))[1].shape)
num_classes = 10
else:
x_train, y_train, x_test, y_test, input_shape, num_classes = \
load_dataset(dataset_name=dataset_name, n_inputs=n_inputs, channels=channels)
train_loader = DataLoader(dataset=list(zip(x_train, y_train)), batch_size=batch_size,
shuffle=shuffle)
test_loader = DataLoader(dataset=list(zip(x_test, y_test)), batch_size=batch_size,
shuffle=shuffle)
return train_loader, test_loader, input_shape, num_classes
def classwise_data_loaders(dataset_name, batch_size, n_inputs=None, shuffle=False):
random.seed(0)
x_train, y_train, x_test, y_test, input_shape, num_classes = \
load_dataset(dataset_name=dataset_name)
train_loaders = []
test_loaders = []
for label in range(num_classes):
label_idxs = y_train.argmax(1)==label
x_train_label = x_train[label_idxs]
y_train_label = y_train[label_idxs]
label_idxs = y_test.argmax(1)==label
x_test_label = x_test[label_idxs]
y_test_label = y_test[label_idxs]
if n_inputs:
x_train_label = x_train_label[:n_inputs]
y_train_label = y_train_label[:n_inputs]
x_test_label = x_test_label[:n_inputs]
y_test_label = y_test_label[:n_inputs]
train_loader = DataLoader(dataset=list(zip(x_train_label, y_train_label)),
batch_size=batch_size, shuffle=shuffle)
test_loader = DataLoader(dataset=list(zip(x_test_label, y_test_label)),
batch_size=batch_size, shuffle=shuffle)
train_loaders.append(train_loader)
test_loaders.append(test_loader)
return train_loaders, test_loaders, input_shape, num_classes
def load_half_moons(channels="first", n_samples=30000):
x, y = make_moons(n_samples=n_samples, shuffle=False, noise=0.1, random_state=0)
x, y = (x.astype('float32'), y.astype('float32'))
x = (x-np.min(x))/(np.max(x)-np.min(x))
# train-test split
split_size = int(0.8 * len(x))
x_train, y_train = x[:split_size], y[:split_size]
x_test, y_test = x[split_size:], y[split_size:]
# image-like representation for compatibility with old code
n_channels = 1
n_coords = 2
if channels == "first":
x_train = x_train.reshape(x_train.shape[0], n_channels, n_coords, 1)
x_test = x_test.reshape(x_test.shape[0], n_channels, n_coords, 1)
elif channels == "last":
x_train = x_train.reshape(x_train.shape[0], 1, n_coords, n_channels)
x_test = x_test.reshape(x_test.shape[0], 1, n_coords, n_channels)
input_shape = x_train.shape[1:]
# binary one hot encoding
num_classes = 2
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
return x_train, y_train, x_test, y_test, input_shape, num_classes
def load_fashion_mnist(channels, img_rows=28, img_cols=28):
print("\nLoading fashion mnist.")
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
if channels == "first":
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
elif channels == "last":
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = x_train.shape[1:]
num_classes = 10
return x_train, y_train, x_test, y_test, input_shape, num_classes
def load_mnist(channels, img_rows=28, img_cols=28):
print("\nLoading mnist.")
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
if channels == "first":
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
elif channels == "last":
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = x_train.shape[1:]
num_classes = 10
return x_train, y_train, x_test, y_test, input_shape, num_classes
def labels_to_onehot(integer_labels, n_classes=None):
n_rows = len(integer_labels)
n_cols = n_classes if n_classes else integer_labels.max() + 1
onehot = np.zeros((n_rows, n_cols), dtype='uint8')
onehot[np.arange(n_rows), integer_labels] = 1
return onehot
def onehot_to_labels(y):
if type(y) is np.ndarray:
return np.argmax(y, axis=1)
elif type(y) is torch.Tensor:
return torch.max(y, 1)[1]
# def load_cifar(channels, img_rows=32, img_cols=32):
# x_train = None
# y_train = []
# data_dir="../../cifar-10/"
# for batch in range(1, 6):
# data_dic = unpickle(data_dir + "data_batch_{}".format(batch))
# if batch == 1:
# x_train = data_dic['data']
# else:
# x_train = np.vstack((x_train, data_dic['data']))
# y_train += data_dic['labels']
# test_data_dic = unpickle(data_dir + "test_batch")
# x_test = test_data_dic['data']
# y_test = test_data_dic['labels']
# x_train = x_train.reshape((len(x_train), 3, img_rows, img_cols))
# x_train = np.rollaxis(x_train, 1, 4)
# y_train = np.array(y_train)
# x_test = x_test.reshape((len(x_test), 3, img_rows, img_cols))
# x_test = np.rollaxis(x_test, 1, 4)
# y_test = np.array(y_test)
# input_shape = x_train.shape[1:]
# x_train = x_train.astype('float32')
# x_test = x_test.astype('float32')
# x_train /= 255
# x_test /= 255
# if channels == "first":
# x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols)
# x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)
# elif channels == "last":
# x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3)
# x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)
# y_train = keras.utils.to_categorical(y_train, 10)
# y_test = keras.utils.to_categorical(y_test, 10)
# input_shape = x_train.shape[1:]
# num_classes = 10
# return x_train, y_train, x_test, y_test, input_shape, num_classes
def load_dataset(dataset_name, n_inputs=None, channels="first", shuffle=False):
if dataset_name == "mnist":
x_train, y_train, x_test, y_test, input_shape, num_classes = load_mnist(channels)
# elif dataset_name == "cifar":
# x_train, y_train, x_test, y_test, input_shape, num_classes = load_cifar(channels)
elif dataset_name == "fashion_mnist":
x_train, y_train, x_test, y_test, input_shape, num_classes = load_fashion_mnist(channels)
elif dataset_name == "half_moons":
x_train, y_train, x_test, y_test, input_shape, num_classes = load_half_moons()
else:
raise AssertionError("\nDataset not available.")
x_train, y_train = torch.from_numpy(x_train), torch.from_numpy(y_train)
x_test, y_test = torch.from_numpy(x_test), torch.from_numpy(y_test)
if n_inputs:
x_train, y_train, _ = balanced_subset(x_train, y_train, num_classes, n_inputs)
x_test, y_test, _ = balanced_subset(x_test, y_test, num_classes, n_inputs)
print('x_train shape =', x_train.shape, '\nx_test shape =', x_test.shape)
print('y_train shape =', y_train.shape, '\ny_test shape =', y_test.shape)
if shuffle is True:
idxs = np.random.permutation(len(x_train))
x_train, y_train = (x_train[idxs], y_train[idxs])
idxs = np.random.permutation(len(x_test))
x_test, y_test = (x_test[idxs], y_test[idxs])
return x_train, y_train, x_test, y_test, input_shape, num_classes
def balanced_subset(inputs, labels, num_classes, subset_size):
n_samples = min(subset_size, len(inputs))
samples_per_class = int(n_samples/num_classes)
sampled_idxs = []
for target in range(num_classes+1):
while len(sampled_idxs) < target*samples_per_class:
idx = np.random.randint(0, len(inputs))
if labels[idx].argmax(-1)==(target-1):
sampled_idxs.append(idx)
return inputs[sampled_idxs], labels[sampled_idxs], sampled_idxs
############
# pickling #
############
def save_to_pickle(data, path, filename):
full_path=os.path.join(path, filename+".pkl")
print("\nSaving pickle: ", full_path)
# os.makedirs(os.path.dirname(path), exist_ok=True)
os.makedirs(path, exist_ok=True)
with open(full_path, 'wb') as f:
pkl.dump(data, f)
def load_from_pickle(path, filename):
full_path=os.path.join(path, filename+".pkl")
print("\nLoading from pickle: ", full_path)
with open(full_path, 'rb') as f:
u = pkl._Unpickler(f)
u.encoding = 'latin1'
data = u.load()
return data
def unpickle(file):
""" Load byte data from file"""
with open(file, 'rb') as f:
data = pkl.load(f, encoding='latin-1')
return data
def plot_loss_accuracy(dict, path):
fig, (ax1, ax2) = plt.subplots(2, figsize=(12,8))
ax1.plot(dict['loss'])
ax1.set_title("loss")
ax2.plot(dict['accuracy'])
ax2.set_title("accuracy")
os.makedirs(os.path.dirname(path), exist_ok=True)
fig.savefig(path) | 12,410 | 34.766571 | 116 | py |
BayesianRelevance | BayesianRelevance-master/src/utils/networks.py | import torch
import torch.nn as nn
def relu_to_softplus(model, beta):
for child_name, child in model.named_children():
if isinstance(child, nn.LeakyReLU):
setattr(model, child_name, nn.Softplus(beta=beta))
else:
relu_to_softplus(child, beta)
return model
def change_beta(model, beta):
for child_name, child in model.named_children():
if isinstance(child, nn.Softplus):
setattr(model, child_name, nn.Softplus(beta=beta))
else:
change_beta(child, beta)
return model | 492 | 22.47619 | 53 | py |
BayesianRelevance | BayesianRelevance-master/src/utils/seeding.py | import torch
import numpy as np
import random
import pyro
def set_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
pyro.set_rng_seed(seed)
set_seed(0) | 267 | 14.764706 | 36 | py |
BayesianRelevance | BayesianRelevance-master/src/utils/savedir.py | import os
import sys
import time
DATA = "../data/"
TESTS = "../experiments/"
ATK_DIR = "attacks/"
def get_model_savedir(model, dataset, architecture, iters=None, inference=None, baseiters=None,
model_idx=None, layer_idx=None, debug=False, torchvision=False, attack_method=None):
if torchvision:
model = str(model)+"_torchvision"
savedir = model+"/"+dataset+"_"+architecture
if iters is not None:
savedir+="_iters="+str(iters)
if model_idx is not None:
savedir+="_idx="+str(model_idx)
if inference is not None:
savedir+="_"+inference
if baseiters is not None:
savedir+="_baseiters="+str(baseiters)
if layer_idx:
savedir+="_layeridx="+str(layer_idx)
if attack_method:
savedir+="_atk="+str(attack_method)
if debug:
return os.path.join(TESTS, "debug/", savedir)
else:
return os.path.join(TESTS, savedir)
def get_lrp_savedir(model_savedir, rule, attack_method, lrp_method=None, layer_idx=None):
"""
model_savedir: original model directory.
attack_method: method used for computing the attacks.
rule: chosen LRP rule.
lrp_method: Bayesian method for computing the LRP, by default computes the avg heatmap.
layer_idx: LRP is computed at layer_idx, which is at the last layer by default.
"""
savedir = str(attack_method)+"/"
savedir += str(lrp_method)+"_"+str(rule)+"_lrp/" if lrp_method else str(rule)+"_lrp/"
if layer_idx is not None:
savedir += "pkl_layer_idx="+str(layer_idx)
return os.path.join(model_savedir, savedir)
def get_atk_filename_savedir(attack_method, model_savedir, atk_mode=False, n_samples=None):
if atk_mode:
filename = str(attack_method)+"_mode_attack"
else:
if n_samples:
filename = str(attack_method)+"_attackSamp="+str(n_samples)+"_attack"
else:
filename = str(attack_method)+"_attack"
savedir = os.path.join(model_savedir, str(attack_method)+"/"+ATK_DIR)
return filename, savedir | 2,068 | 28.557143 | 106 | py |
BayesianRelevance | BayesianRelevance-master/src/utils/lrp.py | import os
import lrp
import copy
import torch
import numpy as np
from torch import nn
from tqdm import tqdm
import torch.nn.functional as nnf
from torchvision import transforms
from scipy.stats import wasserstein_distance
from utils.savedir import *
from utils.seeding import set_seed
from utils.data import load_from_pickle, save_to_pickle
cmap_name="RdBu_r"
DEBUG=False
def select_informative_pixels(lrp_heatmaps, topk):
"""
Flattens image shape dimensions and selects the most relevant topk % pixels.
Returns lrp heatmaps on the selected pixels and the chosen pixel indexes.
"""
if DEBUG:
print(f"\nTop {topk}% most informative pixels:", end="\t")
if len(lrp_heatmaps.shape)==3:
if DEBUG:
print(f"(image shape) = {lrp_heatmaps.shape}", end="\t")
squeeze_dim=0
elif len(lrp_heatmaps.shape)==4:
if DEBUG:
print(f"(n. images, image shape) = {lrp_heatmaps.shape[0], lrp_heatmaps.shape[1:]}")
squeeze_dim=1
elif len(lrp_heatmaps.shape)==5:
if DEBUG:
print(f"(samples_list_size, n. images, image shape) = {lrp_heatmaps.shape[0], lrp_heatmaps.shape[1], lrp_heatmaps.shape[2:]}")
squeeze_dim=2
else:
raise ValueError("Wrong array shape.")
flat_lrp_heatmaps = lrp_heatmaps.reshape(*lrp_heatmaps.shape[:squeeze_dim], -1)
if len(flat_lrp_heatmaps.shape)==2:
flat_lrp_heatmap = flat_lrp_heatmaps.sum(0)
elif len(flat_lrp_heatmaps.shape)>2:
flat_lrp_heatmap = flat_lrp_heatmaps.sum(0).sum(0)
else:
flat_lrp_heatmap = flat_lrp_heatmaps
# topk_percentage = int(len(flat_lrp_heatmap)*topk/100)
# chosen_pxl_idxs = torch.argsort(flat_lrp_heatmap)[-topk_percentage:]
half_topk_percentage = int(len(flat_lrp_heatmap)*(topk/2)/100)
sorted_flat_lrp_idxs = torch.argsort(flat_lrp_heatmap)
chosen_pxl_idxs = [sorted_flat_lrp_idxs[-half_topk_percentage:], sorted_flat_lrp_idxs[:half_topk_percentage]]
chosen_pxl_idxs = torch.cat(chosen_pxl_idxs)
chosen_pxls_lrp = flat_lrp_heatmaps[..., chosen_pxl_idxs]
if DEBUG:
print("out shape =", chosen_pxls_lrp.shape)
return chosen_pxls_lrp, chosen_pxl_idxs
def compute_explanations(x_test, network, rule, method, device, n_samples=None, layer_idx=-1, avg_posterior=False):
x_test = x_test.to(device)
print("\nLRP layer idx =", layer_idx)
layer_idx = network._set_correct_layer_idx(layer_idx)
import itertools
if hasattr(network, "basenet"):
print(nn.Sequential(*list(network.basenet.model.children())[:layer_idx]))
else:
print(nn.Sequential(*list(network.model.children())[:layer_idx]))
if n_samples is None or avg_posterior is True:
explanations = []
for x in tqdm(x_test):
x = x.detach()
x.requires_grad=True
# Forward pass
y_hat = network.forward(x.unsqueeze(0), explain=True, rule=rule, layer_idx=layer_idx,
avg_posterior=avg_posterior)
# Choose argmax
y_hat = y_hat[torch.arange(x.shape[0]), y_hat.max(1)[1]].sum()
# Backward pass (compute explanation)
y_hat.backward()
lrp = x.grad
explanations.append(lrp)
else:
if method=="avg_prediction":
explanations = []
for x in tqdm(x_test):
# Forward pass
x_copy = copy.deepcopy(x.detach()).unsqueeze(0)
x_copy.requires_grad = True
y_hat = network.forward(inputs=x_copy, n_samples=n_samples,
explain=True, rule=rule, layer_idx=layer_idx)
# Choose argmax
y_hat = y_hat[torch.arange(x_copy.shape[0]), y_hat.max(1)[1]]
y_hat = y_hat.sum()
# Backward pass (compute explanation)
y_hat.backward()
lrp = x_copy.grad.squeeze(1)
explanations.append(lrp)
elif method=="avg_heatmap":
explanations = []
for x in tqdm(x_test):
post_explanations = []
for j in range(n_samples):
# Forward pass
x_copy = copy.deepcopy(x.detach()).unsqueeze(0)
x_copy.requires_grad = True
y_hat = network.forward(inputs=x_copy, n_samples=1, sample_idxs=[j],
explain=True, rule=rule, layer_idx=layer_idx)
# Choose argmax
y_hat = y_hat[torch.arange(x_copy.shape[0]), y_hat.max(1)[1]]
y_hat = y_hat.sum()
# Backward pass (compute explanation)
y_hat.backward()
lrp = x_copy.grad.squeeze(1)
post_explanations.append(lrp)
explanations.append(torch.stack(post_explanations).mean(0))
explanations = torch.stack(explanations)
return explanations
def normalize(lrp):
return 2*(lrp-lrp.min())/(lrp.max()-lrp.min())-1
def compute_vanishing_norm_idxs(inputs, n_samples_list, norm="linfty"):
if inputs.shape[0] != len(n_samples_list):
raise ValueError("First dimension should equal the length of `n_samples_list`")
inputs=np.transpose(inputs, (1, 0, 2, 3, 4))
vanishing_norm_idxs = []
non_null_idxs = []
print("\nvanishing norms:\n")
count_van_images = 0
count_incr_images = 0
count_null_images = 0
for idx, image in enumerate(inputs):
if norm == "linfty":
inputs_norm = np.max(np.abs(image[0]))
elif norm == "l2":
inputs_norm = np.linalg.norm(image[0])
else:
raise ValueError("Wrong norm name")
if inputs_norm != 0.0:
if DEBUG:
print("idx =",idx, end="\t")
count_samples_idx = 0
for samples_idx, n_samples in enumerate(n_samples_list):
if norm == "linfty":
new_inputs_norm = np.max(np.abs(image[samples_idx]))
elif norm == "l2":
new_inputs_norm = np.linalg.norm(image[samples_idx])
if new_inputs_norm <= inputs_norm:
if DEBUG:
print(new_inputs_norm, end="\t")
inputs_norm = copy.deepcopy(new_inputs_norm)
count_samples_idx += 1
if count_samples_idx == len(n_samples_list):
vanishing_norm_idxs.append(idx)
non_null_idxs.append(idx)
if DEBUG:
print("\tcount=", count_van_images)
count_van_images += 1
else:
non_null_idxs.append(idx)
count_incr_images += 1
if DEBUG:
print("\n")
else:
count_null_images += 1
print(f"vanishing norms = {100*count_van_images/len(inputs)} %")
print(f"increasing norms = {100*count_incr_images/len(inputs)} %")
print(f"null norms = {100*count_null_images/len(inputs)} %")
print("\nvanishing norms idxs = ", vanishing_norm_idxs)
return vanishing_norm_idxs, non_null_idxs
def lrp_distances(original_heatmaps, adversarial_heatmaps, pxl_idxs=None, axis_norm=0):
if original_heatmaps.shape[0]==0:
return torch.empty(0)
original_heatmaps = original_heatmaps.reshape(*original_heatmaps.shape[:1], -1)
adversarial_heatmaps = adversarial_heatmaps.reshape(*adversarial_heatmaps.shape[:1], -1)
if pxl_idxs is not None:
original_heatmaps = original_heatmaps[:, pxl_idxs]
adversarial_heatmaps = adversarial_heatmaps[:, pxl_idxs]
distances = torch.norm(original_heatmaps-adversarial_heatmaps, dim=axis_norm)
return distances
def lrp_robustness(original_heatmaps, adversarial_heatmaps, topk, method):
"""
Point-wise robustness measure. Computes the fraction of common topk relevant pixels between each original
image and adversarial image.
"""
robustness = []
chosen_pxl_idxs=[]
if method=="imagewise":
for im_idx in range(len(original_heatmaps)):
orig_pxl_idxs = select_informative_pixels(original_heatmaps[im_idx], topk=topk)[1]
adv_pxl_idxs = select_informative_pixels(adversarial_heatmaps[im_idx], topk=topk)[1]
pxl_idxs = np.intersect1d(orig_pxl_idxs.detach().cpu().numpy(), adv_pxl_idxs.detach().cpu().numpy())
robustness.append(len(pxl_idxs)/len(orig_pxl_idxs))
chosen_pxl_idxs.append(pxl_idxs)
elif method=="pixelwise":
for im_idx in range(len(original_heatmaps)):
orig_pxl_idxs = select_informative_pixels(original_heatmaps[im_idx], topk=topk)[1]
adv_pxl_idxs = select_informative_pixels(adversarial_heatmaps[im_idx], topk=topk)[1]
pxl_idxs = np.intersect1d(orig_pxl_idxs.detach().cpu().numpy(), adv_pxl_idxs.detach().cpu().numpy())
chosen_pxl_idxs.extend(pxl_idxs)
distances = lrp_distances(original_heatmaps, adversarial_heatmaps, chosen_pxl_idxs)
robustness = -np.array(distances.detach().cpu().numpy())
else:
raise NotImplementedError
if DEBUG:
print("\n", pxl_idxs.shape, np.array(chosen_pxl_idxs).shape, np.array(robustness).shape)
return np.array(robustness), np.array(chosen_pxl_idxs)
| 8,089 | 28.418182 | 129 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/examples/main_bayesian_flipout_cifar.py | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import bayesian_torch.models.bayesian.resnet_flipout as resnet
import numpy as np
model_names = sorted(
name for name in resnet.__dict__
if name.islower() and not name.startswith("__")
and name.startswith("resnet") and callable(resnet.__dict__[name]))
print(model_names)
len_trainset = 50000
len_testset = 10000
num_classes = 10
parser = argparse.ArgumentParser(description='CIFAR10')
parser.add_argument('--arch',
'-a',
metavar='ARCH',
default='resnet20',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet20)')
parser.add_argument('-j',
'--workers',
default=8,
type=int,
metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs',
default=200,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b',
'--batch-size',
default=512,
type=int,
metavar='N',
help='mini-batch size (default: 512)')
parser.add_argument('--lr',
'--learning-rate',
default=0.1,
type=float,
metavar='LR',
help='initial learning rate')
parser.add_argument('--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument('--weight-decay',
'--wd',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 5e-4)')
parser.add_argument('--print-freq',
'-p',
default=50,
type=int,
metavar='N',
help='print frequency (default: 20)')
parser.add_argument('--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
parser.add_argument('--half',
dest='half',
action='store_true',
help='use half-precision(16-bit) ')
parser.add_argument('--save-dir',
dest='save_dir',
help='The directory used to save the trained models',
default='./checkpoint/bayesian',
type=str)
parser.add_argument(
'--save-every',
dest='save_every',
help='Saves checkpoints at every specified number of epochs',
type=int,
default=10)
parser.add_argument('--mode', type=str, required=True, help='train | test')
parser.add_argument(
'--num_monte_carlo',
type=int,
default=20,
metavar='N',
help='number of Monte Carlo samples to be drawn during inference')
parser.add_argument('--num_mc',
type=int,
default=5,
metavar='N',
help='number of Monte Carlo runs during training')
parser.add_argument(
'--tensorboard',
type=bool,
default=True,
metavar='N',
help='use tensorboard for logging and visualization of training progress')
parser.add_argument(
'--log_dir',
type=str,
default='./logs/cifar/bayesian',
metavar='N',
help='use tensorboard for logging and visualization of training progress')
best_prec1 = 0
def main():
global args, best_prec1
args = parser.parse_args()
# Check the save_dir exists or not
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
model = torch.nn.DataParallel(resnet.__dict__[args.arch]())
if torch.cuda.is_available():
model.cuda()
else:
model.cpu()
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.evaluate, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
tb_writer = None
if args.tensorboard:
logger_dir = os.path.join(args.log_dir, 'tb_logger')
if not os.path.exists(logger_dir):
os.makedirs(logger_dir)
tb_writer = SummaryWriter(logger_dir)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
root='./data',
train=True,
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]),
download=True),
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
root='./data',
train=False,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if torch.cuda.is_available():
criterion = nn.CrossEntropyLoss().cuda()
else:
criterion = nn.CrossEntropyLoss().cpu()
if args.half:
model.half()
criterion.half()
'''
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[100, 150], last_epoch=args.start_epoch - 1)
if args.arch in ['resnet110']:
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr*0.1
'''
if args.evaluate:
validate(val_loader, model, criterion)
return
if args.mode == 'train':
for epoch in range(args.start_epoch, args.epochs):
lr = args.lr
if (epoch >= 80 and epoch < 120):
lr = 0.1 * args.lr
elif (epoch >= 120 and epoch < 160):
lr = 0.01 * args.lr
elif (epoch >= 160 and epoch < 180):
lr = 0.001 * args.lr
elif (epoch >= 180):
lr = 0.0005 * args.lr
optimizer = torch.optim.Adam(model.parameters(), lr)
print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
train(args, train_loader, model, criterion, optimizer, epoch,
tb_writer)
#lr_scheduler.step()
prec1 = validate(args, val_loader, model, criterion, epoch,
tb_writer)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if is_best:
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
},
is_best,
filename=os.path.join(
args.save_dir,
'bayesian_flipout_{}_cifar.pth'.format(args.arch)))
elif args.mode == 'test':
checkpoint_file = args.save_dir + '/bayesian_flipout_{}_cifar.pth'.format(
args.arch)
if torch.cuda.is_available():
checkpoint = torch.load(checkpoint_file)
else:
checkpoint = torch.load(checkpoint_file,
map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['state_dict'])
evaluate(args, model, val_loader)
def train(args,
train_loader,
model,
criterion,
optimizer,
epoch,
tb_writer=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
data_time.update(time.time() - end)
if torch.cuda.is_available():
target = target.cuda()
input_var = input.cuda()
target_var = target
else:
target = target.cpu()
input_var = input.cpu()
target_var = target
if args.half:
input_var = input_var.half()
input_var = torch.cat([input_var for _ in range(args.num_mc)], 0)
output_mc = []
kl_mc = []
output, kl = model(input_var)
output_mc.append(output)
kl_mc.append(kl)
output_ = torch.stack(output_mc)
output_mean = output_.reshape(args.num_mc, -1, num_classes).mean(dim=0)
kl = torch.stack(kl_mc)
cross_entropy_loss = criterion(output_mean, target_var)
scaled_kl = kl / len_trainset
#ELBO loss
loss = cross_entropy_loss + scaled_kl
'''
#another way of computing gradients with multiple MC samples
cross_entropy_loss = 0
scaled_kl = 0
for mc_run in range(args.num_mc):
output, kl = model(input_var)
cross_entropy_loss += criterion(output, target_var)
scaled_kl += (kl/len_trainset)
cross_entropy_loss = cross_entropy_loss/args.num_mc
scaled_kl = scaled_kl/args.num_mc
loss = cross_entropy_loss + scaled_kl
#end
'''
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output_mean.float()
loss = loss.float()
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch,
i,
len(train_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1))
if tb_writer is not None:
tb_writer.add_scalar('train/cross_entropy_loss',
cross_entropy_loss.item(), epoch)
tb_writer.add_scalar('train/kl_div', scaled_kl.item(), epoch)
tb_writer.add_scalar('train/elbo_loss', loss.item(), epoch)
tb_writer.add_scalar('train/accuracy', prec1.item(), epoch)
tb_writer.flush()
def validate(args, val_loader, model, criterion, epoch, tb_writer=None):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
if torch.cuda.is_available():
target = target.cuda()
input_var = input.cuda()
target_var = target.cuda()
else:
target = target.cpu()
input_var = input.cpu()
target_var = target.cpu()
if args.half:
input_var = input_var.half()
input_var = torch.cat([input_var for _ in range(args.num_mc)], 0)
output_mc = []
kl_mc = []
output, kl = model(input_var)
output_mc.append(output)
kl_mc.append(kl)
output_ = torch.stack(output_mc)
output_mean = output_.reshape(args.num_mc, -1,
num_classes).mean(dim=0)
kl = torch.stack(kl_mc)
cross_entropy_loss = criterion(output_mean, target_var)
scaled_kl = kl / len_trainset
#ELBO loss
loss = cross_entropy_loss + scaled_kl
output = output_mean.float()
loss = loss.float()
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i,
len(val_loader),
batch_time=batch_time,
loss=losses,
top1=top1))
if tb_writer is not None:
tb_writer.add_scalar('val/cross_entropy_loss',
cross_entropy_loss.item(), epoch)
tb_writer.add_scalar('val/kl_div', scaled_kl.item(), epoch)
tb_writer.add_scalar('val/elbo_loss', loss.item(), epoch)
tb_writer.add_scalar('val/accuracy', prec1.item(), epoch)
tb_writer.flush()
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def evaluate(args, model, val_loader):
pred_probs_mc = []
test_loss = 0
correct = 0
output_list = []
labels_list = []
model.eval()
with torch.no_grad():
begin = time.time()
for data, target in val_loader:
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
else:
data, target = data.cpu(), target.cpu()
data = torch.cat([data for _ in range(args.num_monte_carlo)], 0)
output_mc = []
output, _ = model.forward(data)
output_mc.append(output)
output_ = torch.stack(output_mc)
output_ = output_.reshape(args.num_monte_carlo, -1, num_classes)
output_list.append(output_)
labels_list.append(target)
end = time.time()
print("inference throughput: ", len_testset / (end - begin),
" images/s")
output = torch.stack(output_list)
output = output.permute(1, 0, 2, 3)
output = output.contiguous().view(args.num_monte_carlo, len_testset,
-1)
output = torch.nn.functional.softmax(output, dim=2)
labels = torch.cat(labels_list)
pred_mean = output.mean(dim=0)
Y_pred = torch.argmax(pred_mean, axis=1)
print('Test accuracy:',
(Y_pred.data.cpu().numpy() == labels.data.cpu().numpy()).mean() *
100)
np.save('./probs_cifar_mc_flipout.npy', output.data.cpu().numpy())
np.save('./cifar_test_labels_mc_flipout.npy',
labels.data.cpu().numpy())
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""
Save the training model
"""
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1, )):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 18,058 | 32.881801 | 111 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/examples/main_deterministic_cifar.py | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import bayesian_torch.models.deterministic.resnet as resnet
import numpy as np
model_names = sorted(
name for name in resnet.__dict__
if name.islower() and not name.startswith("__")
and name.startswith("resnet") and callable(resnet.__dict__[name]))
print(model_names)
parser = argparse.ArgumentParser(description='CIFAR10')
parser.add_argument('--arch',
'-a',
metavar='ARCH',
default='resnet20',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet20)')
parser.add_argument('-j',
'--workers',
default=8,
type=int,
metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs',
default=200,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b',
'--batch-size',
default=512,
type=int,
metavar='N',
help='mini-batch size (default: 512)')
parser.add_argument('--lr',
'--learning-rate',
default=0.1,
type=float,
metavar='LR',
help='initial learning rate')
parser.add_argument('--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument('--weight-decay',
'--wd',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 5e-4)')
parser.add_argument('--print-freq',
'-p',
default=50,
type=int,
metavar='N',
help='print frequency (default: 20)')
parser.add_argument('--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
parser.add_argument('--half',
dest='half',
action='store_true',
help='use half-precision(16-bit) ')
parser.add_argument('--save-dir',
dest='save_dir',
help='The directory used to save the trained models',
default='./checkpoint/deterministic',
type=str)
parser.add_argument(
'--save-every',
dest='save_every',
help='Saves checkpoints at every specified number of epochs',
type=int,
default=10)
parser.add_argument('--mode', type=str, required=True, help='train | test')
parser.add_argument(
'--tensorboard',
type=bool,
default=True,
metavar='N',
help='use tensorboard for logging and visualization of training progress')
parser.add_argument(
'--log_dir',
type=str,
default='./logs/cifar/deterministic',
metavar='N',
help='use tensorboard for logging and visualization of training progress')
best_prec1 = 0
def main():
global args, best_prec1
args = parser.parse_args()
# Check the save_dir exists or not
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
model = torch.nn.DataParallel(resnet.__dict__[args.arch]())
if torch.cuda.is_available():
model.cuda()
else:
model.cpu()
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.evaluate, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
tb_writer = None
if args.tensorboard:
logger_dir = os.path.join(args.log_dir, 'tb_logger')
if not os.path.exists(logger_dir):
os.makedirs(logger_dir)
tb_writer = SummaryWriter(logger_dir)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
root='./data',
train=True,
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]),
download=True),
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
root='./data',
train=False,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if torch.cuda.is_available():
criterion = nn.CrossEntropyLoss().cuda()
else:
criterion = nn.CrossEntropyLoss().cpu()
if args.half:
model.half()
criterion.half()
optimizer = torch.optim.SGD(model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=[100, 150], last_epoch=args.start_epoch - 1)
if args.arch in ['resnet110']:
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr * 0.1
if args.evaluate:
validate(val_loader, model, criterion)
return
if args.mode == 'train':
for epoch in range(args.start_epoch, args.epochs):
# train for one epoch
print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
train(args, train_loader, model, criterion, optimizer, epoch,
tb_writer)
lr_scheduler.step()
prec1 = validate(args, val_loader, model, criterion, epoch,
tb_writer)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if epoch > 0 and epoch % args.save_every == 0:
if is_best:
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
},
is_best,
filename=os.path.join(
args.save_dir, '{}_cifar.pth'.format(args.arch)))
elif args.mode == 'test':
checkpoint_file = args.save_dir + '/{}_cifar.pth'.format(args.arch)
if torch.cuda.is_available():
checkpoint = torch.load(checkpoint_file)
else:
checkpoint = torch.load(checkpoint_file,
map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['state_dict'])
evaluate(args, model, val_loader)
def train(args,
train_loader,
model,
criterion,
optimizer,
epoch,
tb_writer=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if torch.cuda.is_available():
target = target.cuda()
input_var = input.cuda()
else:
target = target.cpu()
input_var = input.cpu()
target_var = target
if args.half:
input_var = input_var.half()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch,
i,
len(train_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1))
if tb_writer is not None:
tb_writer.add_scalar('train/loss', loss.item(), epoch)
tb_writer.add_scalar('train/accuracy', prec1.item(), epoch)
tb_writer.flush()
def validate(args, val_loader, model, criterion, epoch, tb_writer=None):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
if torch.cuda.is_available():
target = target.cuda()
input_var = input.cuda()
target_var = target.cuda()
else:
target = target.cpu()
input_var = input.cpu()
target_var = target.cpu()
if args.half:
input_var = input_var.half()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i,
len(val_loader),
batch_time=batch_time,
loss=losses,
top1=top1))
if tb_writer is not None:
tb_writer.add_scalar('val/loss', loss.item(), epoch)
tb_writer.add_scalar('val/accuracy', prec1.item(), epoch)
tb_writer.flush()
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def evaluate(args, model, val_loader):
model.eval()
correct = 0
output_list = []
labels_list = []
with torch.no_grad():
begin = time.time()
for data, target in val_loader:
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
else:
data, target = data.cpu(), target.cpu()
output = model(data)
output = torch.nn.functional.softmax(output, dim=1)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
output_list.append(output)
labels_list.append(target)
end = time.time()
print("inference throughput: ", 10000 / (end - begin), " images/s")
output = torch.cat(output_list)
target = torch.cat(labels_list)
print('\nTest Accuracy: {:.2f}%\n'.format(100. * correct /
len(val_loader.dataset)))
target_labels = target.cpu().data.numpy()
np.save('./probs_cifar_det.npy', output.data.cpu().numpy())
np.save('./cifar_test_labels.npy', target.data.cpu().numpy())
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""
Save the training model
"""
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1, )):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 15,192 | 32.100218 | 78 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/examples/main_bayesian_cifar.py | import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
# from torch.utils.tensorboard import SummaryWriter
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import numpy as np
from tqdm import tqdm
import random
from torch.utils.data import Subset, DataLoader
import bayesian_torch.models.bayesian.resnet_variational as resnet
import sys
sys.path.append(".")
from attacks.run_attacks import run_attack, save_attack, load_attack
from utils.lrp import *
# import bayesian_torch.bayesian_torch.models.bayesian.resnet_variational as resnet
model_names = sorted(
name for name in resnet.__dict__
if name.islower() and not name.startswith("__")
and name.startswith("resnet") and callable(resnet.__dict__[name]))
print(model_names)
len_trainset = 50000
len_testset = 10000
parser = argparse.ArgumentParser(description='CIFAR10')
parser.add_argument('--arch',
'-a',
metavar='ARCH',
default='resnet20',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet20)')
parser.add_argument('-j',
'--workers',
default=8,
type=int,
metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument('--epochs',
default=200,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b',
'--batch-size',
default=512,
type=int,
metavar='N',
help='mini-batch size (default: 512)')
parser.add_argument('--lr',
'--learning-rate',
default=0.1,
type=float,
metavar='LR',
help='initial learning rate')
parser.add_argument('--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument('--weight-decay',
'--wd',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 5e-4)')
parser.add_argument('--print-freq',
'-p',
default=50,
type=int,
metavar='N',
help='print frequency (default: 20)')
parser.add_argument('--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
parser.add_argument('--half',
dest='half',
action='store_true',
help='use half-precision(16-bit) ')
parser.add_argument('--save-dir',
dest='save_dir',
help='The directory used to save the trained models',
default='./bayesian_torch/checkpoint/bayesian',
type=str)
parser.add_argument(
'--save-every',
dest='save_every',
help='Saves checkpoints at every specified number of epochs',
type=int,
default=10)
parser.add_argument('--mode', type=str, required=True, help='train | test')
parser.add_argument(
'--num_monte_carlo',
type=int,
default=20,
metavar='N',
help='number of Monte Carlo samples to be drawn during inference')
parser.add_argument('--num_mc',
type=int,
default=5,
metavar='N',
help='number of Monte Carlo runs during training')
parser.add_argument(
'--tensorboard',
type=bool,
default=False,
metavar='N',
help='use tensorboard for logging and visualization of training progress')
parser.add_argument(
'--log_dir',
type=str,
default='./bayesian_torch/logs/cifar/bayesian',
metavar='N',
help='use tensorboard for logging and visualization of training progress')
best_prec1 = 0
def MOPED_layer(layer, det_layer, delta):
"""
Set the priors and initialize surrogate posteriors of Bayesian NN with Empirical Bayes
MOPED (Model Priors with Empirical Bayes using Deterministic DNN)
Reference:
[1] Ranganath Krishnan, Mahesh Subedar, Omesh Tickoo.
Specifying Weight Priors in Bayesian Deep Neural Networks with Empirical Bayes. AAAI 2020.
"""
if (str(layer) == 'Conv2dReparameterization()'):
#set the priors
print(str(layer))
layer.prior_weight_mu = det_layer.weight.data
if layer.prior_bias_mu is not None:
layer.prior_bias_mu = det_layer.bias.data
#initialize surrogate posteriors
layer.mu_kernel.data = det_layer.weight.data
layer.rho_kernel.data = get_rho(det_layer.weight.data, delta)
if layer.mu_bias is not None:
layer.mu_bias.data = det_layer.bias.data
layer.rho_bias.data = get_rho(det_layer.bias.data, delta)
elif (isinstance(layer, nn.Conv2d)):
print(str(layer))
layer.weight.data = det_layer.weight.data
if layer.bias is not None:
layer.bias.data = det_layer.bias.data
elif (str(layer) == 'LinearReparameterization()'):
print(str(layer))
layer.prior_weight_mu = det_layer.weight.data
if layer.prior_bias_mu is not None:
layer.prior_bias_mu = det_layer.bias.data
#initialize the surrogate posteriors
layer.mu_weight.data = det_layer.weight.data
layer.rho_weight.data = get_rho(det_layer.weight.data, delta)
if layer.mu_bias is not None:
layer.mu_bias.data = det_layer.bias.data
layer.rho_bias.data = get_rho(det_layer.bias.data, delta)
elif str(layer).startswith('Batch'):
#initialize parameters
print(str(layer))
layer.weight.data = det_layer.weight.data
if layer.bias is not None:
layer.bias.data = det_layer.bias.data
layer.running_mean.data = det_layer.running_mean.data
layer.running_var.data = det_layer.running_var.data
layer.num_batches_tracked.data = det_layer.num_batches_tracked.data
def main():
global args, best_prec1
args = parser.parse_args()
# Check the save_dir exists or not
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
model = torch.nn.DataParallel(resnet.__dict__[args.arch]())
if torch.cuda.is_available():
model.cuda()
else:
model.cpu()
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.evaluate, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
tb_writer = None
# if args.tensorboard:
# logger_dir = os.path.join(args.log_dir, 'tb_logger')
# if not os.path.exists(logger_dir):
# os.makedirs(logger_dir)
# tb_writer = SummaryWriter(logger_dir)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
root='./bayesian_torch/data',
train=True,
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]),
download=True),
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(datasets.CIFAR10(
root='./bayesian_torch/data',
train=False,
transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
if torch.cuda.is_available():
criterion = nn.CrossEntropyLoss().cuda()
else:
criterion = nn.CrossEntropyLoss().cpu()
if args.half:
model.half()
criterion.half()
if args.arch in ['resnet110']:
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr * 0.1
if args.evaluate:
validate(val_loader, model, criterion)
return
if args.mode == 'train':
for epoch in range(args.start_epoch, args.epochs):
lr = args.lr
if (epoch >= 80 and epoch < 120):
lr = 0.1 * args.lr
elif (epoch >= 120 and epoch < 160):
lr = 0.01 * args.lr
elif (epoch >= 160 and epoch < 180):
lr = 0.001 * args.lr
elif (epoch >= 180):
lr = 0.0005 * args.lr
optimizer = torch.optim.Adam(model.parameters(), lr)
# train for one epoch
print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
train(args, train_loader, model, criterion, optimizer, epoch,
tb_writer)
prec1 = validate(args, val_loader, model, criterion, epoch,
tb_writer)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if is_best:
save_checkpoint(
{
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
},
is_best,
filename=os.path.join(
args.save_dir,
'bayesian_{}_cifar.pth'.format(args.arch)))
elif args.mode == 'test':
checkpoint_file = args.save_dir + '/bayesian_{}_cifar.pth'.format(
args.arch)
if torch.cuda.is_available():
checkpoint = torch.load(checkpoint_file)
device="cuda"
else:
checkpoint = torch.load(checkpoint_file,
map_location=torch.device('cpu'))
device="cpu"
print(resnet.__dict__[args.arch]())
print(model.state_dict().keys())
print(checkpoint['state_dict'].keys())
# exit()
model.load_state_dict(checkpoint['state_dict'])
# print(model)
evaluate(args, model, val_loader, n_samples=args.num_monte_carlo)
# Adversarial attacks
test_inputs = 100
n_samples_list = [1,5]
# n_samples_list = [10,50,100]
dataset = Subset(val_loader.dataset, range(test_inputs))
images, labels = ([],[])
for image, label in dataset:
images.append(image)
labels.append(label)
images = torch.stack(images)
bay_attack=[]
for n_samples in n_samples_list:
print(f"\nn_samples = {n_samples}")
# attacks = attack(model, dataset, n_samples=n_samples)
# save_attack(inputs=images, attacks=attacks, method='fgsm', model_savedir=args.save_dir, n_samples=n_samples)
attacks = load_attack(method='fgsm', model_savedir=args.save_dir, n_samples=n_samples)
evaluate(args, model, DataLoader(dataset=list(zip(attacks, labels))), n_samples=n_samples)
bay_attack.append(attacks)
# LRP
rule="epsilon"
learnable_layers_idxs=[38]#[0,2,14,26,38]
for layer_idx in learnable_layers_idxs:
print(f"\nlayer_idx = {layer_idx}")
savedir = get_lrp_savedir(model_savedir=args.save_dir, attack_method='fgsm', layer_idx=layer_idx, rule=rule)
bay_lrp=[]
bay_attack_lrp=[]
for samp_idx, n_samples in enumerate(n_samples_list):
bay_lrp.append(compute_lrp(images, model, rule=rule, n_samples=n_samples, device=device))
bay_attack_lrp.append(compute_lrp(bay_attack[samp_idx], model,
device=device, rule=rule, n_samples=n_samples))
save_to_pickle(bay_lrp[samp_idx], path=savedir, filename="bay_lrp_samp="+str(n_samples))
save_to_pickle(bay_attack_lrp[samp_idx], path=savedir, filename="bay_attack_lrp_samp="+str(n_samples))
def train(args,
train_loader,
model,
criterion,
optimizer,
epoch,
tb_writer=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if torch.cuda.is_available():
target = target.cuda()
input_var = input.cuda()
target_var = target
else:
target = target.cpu()
input_var = input.cpu()
target_var = target
if args.half:
input_var = input_var.half()
# compute output
output_ = []
kl_ = []
for mc_run in range(args.num_mc):
output, kl = model(input_var)
output_.append(output)
kl_.append(kl)
output = torch.mean(torch.stack(output_), dim=0)
kl = torch.mean(torch.stack(kl_), dim=0)
cross_entropy_loss = criterion(output, target_var)
scaled_kl = kl / len_trainset
#ELBO loss
loss = cross_entropy_loss + scaled_kl
'''
#another way of computing gradients with multiple MC samples
cross_entropy_loss = 0
scaled_kl = 0
for mc_run in range(args.num_mc):
output, kl = model(input_var)
cross_entropy_loss += criterion(output, target_var)
scaled_kl += (kl/len_trainset)
cross_entropy_loss = cross_entropy_loss/args.num_mc
scaled_kl = scaled_kl/args.num_mc
loss = cross_entropy_loss + scaled_kl
#end
'''
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch,
i,
len(train_loader),
batch_time=batch_time,
data_time=data_time,
loss=losses,
top1=top1))
if tb_writer is not None:
tb_writer.add_scalar('train/cross_entropy_loss',
cross_entropy_loss.item(), epoch)
tb_writer.add_scalar('train/kl_div', scaled_kl.item(), epoch)
tb_writer.add_scalar('train/elbo_loss', loss.item(), epoch)
tb_writer.add_scalar('train/accuracy', prec1.item(), epoch)
tb_writer.flush()
def validate(args, val_loader, model, criterion, epoch, tb_writer=None):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
if torch.cuda.is_available():
target = target.cuda()
input_var = input.cuda()
target_var = target.cuda()
else:
target = target.cpu()
input_var = input.cpu()
target_var = target.cpu()
if args.half:
input_var = input_var.half()
# compute output
output_ = []
kl_ = []
for mc_run in range(args.num_mc):
output, kl = model(input_var)
output_.append(output)
kl_.append(kl)
output = torch.mean(torch.stack(output_), dim=0)
kl = torch.mean(torch.stack(kl_), dim=0)
cross_entropy_loss = criterion(output, target_var)
scaled_kl = kl / len_trainset
#ELBO loss
loss = cross_entropy_loss + scaled_kl
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i,
len(val_loader),
batch_time=batch_time,
loss=losses,
top1=top1))
if tb_writer is not None:
tb_writer.add_scalar('val/cross_entropy_loss',
cross_entropy_loss.item(), epoch)
tb_writer.add_scalar('val/kl_div', scaled_kl.item(), epoch)
tb_writer.add_scalar('val/elbo_loss', loss.item(), epoch)
tb_writer.add_scalar('val/accuracy', prec1.item(), epoch)
tb_writer.flush()
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def evaluate(args, model, val_loader, n_samples):
pred_probs_mc = []
test_loss = 0
correct = 0
output_list = []
labels_list = []
model.eval()
with torch.no_grad():
begin = time.time()
for data, target in val_loader:
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
else:
data, target = data.cpu(), target.cpu()
output_mc = []
for mc_run in range(n_samples):
output, _ = model.forward(data)
output_mc.append(output)
output_ = torch.stack(output_mc)
output_list.append(output_)
labels_list.append(target)
end = time.time()
# print(len(val_loader.dataset))
print("inference throughput: ", len(val_loader.dataset) / (end - begin),
" images/s")
output = torch.stack(output_list)
output = output.permute(1, 0, 2, 3)
output = output.contiguous().view(n_samples, len(val_loader.dataset), -1)
output = torch.nn.functional.softmax(output, dim=2)
labels = torch.cat(labels_list)
pred_mean = output.mean(dim=0)
Y_pred = torch.argmax(pred_mean, axis=1)
print('Test accuracy:',
(Y_pred.data.cpu().numpy() == labels.data.cpu().numpy()).mean() *
100)
np.save('./bayesian_torch/probs_cifar_mc.npy', output.data.cpu().numpy())
np.save('./bayesian_torch/cifar_test_labels_mc.npy', labels.data.cpu().numpy())
def attack(model, dataset, n_samples):
model.eval()
adversarial_attacks = []
for data, target in tqdm(dataset):
data = data.unsqueeze(0)
target = torch.tensor(target).unsqueeze(0)
if torch.cuda.is_available():
data, target = data.cuda(), target.cuda()
device = 'cuda'
else:
data, target = data.cpu(), target.cpu()
device = 'cpu'
samples_attacks=[]
for idx in list(range(n_samples)):
# random.seed(idx)
perturbed_image = run_attack(net=model, image=data, label=target, method='fgsm',
device=device, hyperparams=None).squeeze()
perturbed_image = torch.clamp(perturbed_image, 0., 1.)
samples_attacks.append(perturbed_image)
adversarial_attacks.append(torch.stack(samples_attacks).mean(0))
return torch.stack(adversarial_attacks)
def compute_lrp(x_test, network, rule, device, n_samples=None, avg_posterior=False):
x_test = x_test.to(device)
explanations = []
for x in tqdm(x_test):
post_explanations = []
for j in range(n_samples):
# Forward pass
x_copy = copy.deepcopy(x.detach()).unsqueeze(0)
x_copy.requires_grad = True
y_hat = network.forward(x_copy, explain=True, rule=rule)[0]
# Choose argmax
y_hat = y_hat[torch.arange(x_copy.shape[0]), y_hat.max(1)[1]]
y_hat = y_hat.sum()
# Backward pass (compute explanation)
y_hat.backward()
lrp = x_copy.grad.squeeze(1)
post_explanations.append(lrp)
explanations.append(torch.stack(post_explanations).mean(0))
return torch.stack(explanations)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""
Save the training model
"""
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1, )):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 24,193 | 33.31773 | 122 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/examples/main_bayesian_imagenet.py | '''
code adapted from PyTorch examples
'''
import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
from torch.nn import functional as F
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import models.bayesian.resnet_variational_large as resnet
import models.deterministic.resnet_large as det_resnet
from torchsummary import summary
from utils import util
import csv
import numpy as np
from utils.util import get_rho
from torch.utils.tensorboard import SummaryWriter
torchvision.set_image_backend('accimage')
model_names = sorted(
name for name in resnet.__dict__
if name.islower() and not name.startswith("__")
and name.startswith("resnet") and callable(resnet.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data',
metavar='DIR',
default='data/imagenet',
help='path to dataset')
parser.add_argument('-a',
'--arch',
metavar='ARCH',
default='resnet50',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j',
'--workers',
default=8,
type=int,
metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs',
default=90,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--val_batch_size', default=1000, type=int)
parser.add_argument('-b',
'--batch-size',
default=32,
type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr',
'--learning-rate',
default=0.001,
type=float,
metavar='LR',
help='initial learning rate',
dest='lr')
parser.add_argument('--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument('--wd',
'--weight-decay',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p',
'--print-freq',
default=10,
type=int,
metavar='N',
help='print frequency (default: 10)')
parser.add_argument('--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained',
dest='pretrained',
action='store_true',
default=True,
help='use pre-trained model')
parser.add_argument('--world-size',
default=-1,
type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank',
default=-1,
type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url',
default='tcp://224.66.41.62:23456',
type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend',
default='nccl',
type=str,
help='distributed backend')
parser.add_argument('--seed',
default=None,
type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed',
action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--mode', type=str, required=True, help='train | test')
parser.add_argument('--save-dir',
dest='save_dir',
help='The directory used to save the trained models',
default='./checkpoint/bayesian',
type=str)
parser.add_argument(
'--tensorboard',
type=bool,
default=True,
metavar='N',
help='use tensorboard for logging and visualization of training progress')
parser.add_argument(
'--log_dir',
type=str,
default='./logs/imagenet/bayesian',
metavar='N',
help='use tensorboard for logging and visualization of training progress')
parser.add_argument('--num_monte_carlo',
type=int,
default=20,
metavar='N',
help='number of Monte Carlo samples')
parser.add_argument(
'--moped',
type=bool,
default=True,
help='set prior and initialize approx posterior with Empirical Bayes')
parser.add_argument('--delta',
type=float,
default=0.2,
help='delta value for variance scaling in MOPED')
best_acc1 = 0
len_trainset = 1281167
len_valset = 50000
def MOPED_layer(layer, det_layer, delta):
"""
Set the priors and initialize surrogate posteriors of Bayesian NN with Empirical Bayes
MOPED (Model Priors with Empirical Bayes using Deterministic DNN)
Reference:
[1] Ranganath Krishnan, Mahesh Subedar, Omesh Tickoo.
Specifying Weight Priors in Bayesian Deep Neural Networks with Empirical Bayes. AAAI 2020.
"""
if (str(layer) == 'Conv2dReparameterization()'):
#set the priors
print(str(layer))
layer.prior_weight_mu = det_layer.weight.data
if layer.prior_bias_mu is not None:
layer.prior_bias_mu = det_layer.bias.data
#initialize surrogate posteriors
layer.mu_kernel.data = det_layer.weight.data
layer.rho_kernel.data = get_rho(det_layer.weight.data, delta)
if layer.mu_bias is not None:
layer.mu_bias.data = det_layer.bias.data
layer.rho_bias.data = get_rho(det_layer.bias.data, delta)
elif (isinstance(layer, nn.Conv2d)):
print(str(layer))
layer.weight.data = det_layer.weight.data
if layer.bias is not None:
layer.bias.data = det_layer.bias.data2
elif (str(layer) == 'LinearReparameterization()'):
print(str(layer))
layer.prior_weight_mu = det_layer.weight.data
if layer.prior_bias_mu is not None:
layer.prior_bias_mu = det_layer.bias.data
#initialize the surrogate posteriors
layer.mu_weight.data = det_layer.weight.data
layer.rho_weight.data = get_rho(det_layer.weight.data, delta)
if layer.mu_bias is not None:
layer.mu_bias.data = det_layer.bias.data
layer.rho_bias.data = get_rho(det_layer.bias.data, delta)
elif str(layer).startswith('Batch'):
#initialize parameters
print(str(layer))
layer.weight.data = det_layer.weight.data
if layer.bias is not None:
layer.bias.data = det_layer.bias.data
layer.running_mean.data = det_layer.running_mean.data
layer.running_var.data = det_layer.running_var.data
layer.num_batches_tracked.data = det_layer.num_batches_tracked.data
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
if torch.cuda.is_available():
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker,
nprocs=ngpus_per_node,
args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
model = torch.nn.DataParallel(resnet.__dict__[args.arch]())
if torch.cuda.is_available():
model.cuda()
else:
model.cpu()
# define loss function (criterion) and optimizer
if torch.cuda.is_available():
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
else:
criterion = nn.CrossEntropyLoss().cpu()
optimizer = torch.optim.SGD(model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
tb_writer = None
if args.tensorboard:
logger_dir = os.path.join(args.log_dir, 'tb_logger')
if not os.path.exists(logger_dir):
os.makedirs(logger_dir)
tb_writer = SummaryWriter(logger_dir)
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
val_dataset = datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
print('len trainset: ', len(train_dataset))
print('len valset: ', len(val_dataset))
len_trainset = len(train_dataset)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
drop_last=True)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=args.val_batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
if args.mode == 'train':
if (args.moped):
print("MOPED enabled")
det_model = torch.nn.DataParallel(
det_resnet.__dict__[args.arch](pretrained=True))
det_model.cuda()
for (idx_1, layer_1), (det_idx_1, det_layer_1) in zip(
enumerate(model.children()),
enumerate(det_model.children())):
MOPED_layer(layer_1, det_layer_1, args.delta)
for (idx_2, layer_2), (det_idx_2, det_layer_2) in zip(
enumerate(layer_1.children()),
enumerate(det_layer_1.children())):
MOPED_layer(layer_2, det_layer_2, args.delta)
for (idx_3, layer_3), (det_idx_3, det_layer_3) in zip(
enumerate(layer_2.children()),
enumerate(det_layer_2.children())):
MOPED_layer(layer_3, det_layer_3, args.delta)
for (idx_4, layer_4), (det_idx_4, det_layer_4) in zip(
enumerate(layer_3.children()),
enumerate(det_layer_3.children())):
MOPED_layer(layer_4, det_layer_4, args.delta)
for (idx_5,
layer_5), (det_idx_5, det_layer_5) in zip(
enumerate(layer_4.children()),
enumerate(det_layer_4.children())):
MOPED_layer(layer_5, det_layer_5, args.delta)
for (idx_6,
layer_6), (det_idx_6, det_layer_6) in zip(
enumerate(layer_5.children()),
enumerate(det_layer_5.children())):
MOPED_layer(layer_6, det_layer_6,
args.delta)
model.state_dict()
del det_model
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args,
tb_writer)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, epoch, args,
tb_writer)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if is_best:
save_checkpoint(
{
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
},
is_best,
filename=os.path.join(
args.save_dir,
'bayesian_{}_imagenet.pth'.format(args.arch)))
elif args.mode == 'test':
checkpoint_file = args.save_dir + '/bayesian_{}_imagenet.pth'.format(
args.arch)
if torch.cuda.is_available():
checkpoint = torch.load(checkpoint_file)
else:
checkpoint = torch.load(checkpoint_file,
map_location=torch.device('cpu'))
print('load checkpoint.')
state_dict = checkpoint['state_dict']
model.load_state_dict(state_dict)
#Evaluate on test dataset
test_acc = evaluate(model, val_loader, args)
print('******Test data***********\n')
print('test_acc: ', test_acc)
def train(train_loader, model, criterion, optimizer, epoch, args, tb_writer):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
global opt_th
progress = ProgressMeter(len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output, kl = model(images)
cross_entropy_loss = criterion(output, target)
scaled_kl = (kl.data[0] / len_trainset)
elbo_loss = cross_entropy_loss + scaled_kl
loss = cross_entropy_loss + scaled_kl
output = output.float()
loss = loss.float()
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.mean().backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
if tb_writer is not None:
tb_writer.add_scalar('train/cross_entropy_loss',
cross_entropy_loss.item(), epoch)
tb_writer.add_scalar('train/kl_div', scaled_kl.item(), epoch)
tb_writer.add_scalar('train/elbo_loss', elbo_loss.item(), epoch)
tb_writer.add_scalar('train/loss', loss.item(), epoch)
tb_writer.add_scalar('train/accuracy', acc1.item(), epoch)
tb_writer.flush()
def validate(val_loader, model, criterion, epoch, args, tb_writer):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
preds_list = []
labels_list = []
unc_list = []
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output, kl = model(images)
cross_entropy_loss = criterion(output, target)
scaled_kl = (kl.data[0] / len_trainset)
elbo_loss = cross_entropy_loss + scaled_kl
loss = cross_entropy_loss + scaled_kl
output = output.float()
loss = loss.float()
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1,
top5=top5))
return top1.avg
def evaluate(model, val_loader, args):
pred_probs_mc = []
test_loss = 0
correct = 0
with torch.no_grad():
pred_probs_mc = []
output_list = []
label_list = []
begin = time.time()
for batch_idx, (data, target) in enumerate(val_loader):
#print('Batch idx {}, data shape {}, target shape {}'.format(batch_idx, data.shape, target.shape))
if torch.cuda.is_available():
data, target = data.cuda(non_blocking=True), target.cuda(
non_blocking=True)
else:
data, target = data.cpu(non_blocking=True), target.cpu(
non_blocking=True)
output_mc = []
output_mc_np = []
for mc_run in range(args.num_monte_carlo):
model.eval()
output, _ = model.forward(data)
pred_probs = torch.nn.functional.softmax(output, dim=1)
output_mc_np.append(pred_probs.cpu().data.numpy())
output_mc = torch.from_numpy(
np.mean(np.asarray(output_mc_np), axis=0))
output_list.append(output_mc)
label_list.append(target)
end = time.time()
print('inference throughput: ', len_valset / (end - begin),
' images/s')
labels = torch.cat(label_list).cuda()
probs = torch.cat(output_list).cuda()
target_labels = labels.data.cpu().numpy()
pred_mean = probs.data.cpu().numpy()
Y_pred = np.argmax(pred_mean, axis=1)
test_acc = (Y_pred == target_labels).mean()
print('Test accuracy:', test_acc * 100)
np.save(args.log_dir + '/bayesian_imagenet_probs.npy', pred_mean)
np.save(args.log_dir + '/bayesian_imagenet_labels.npy', target_labels)
return test_acc
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1**(epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1, )):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 26,624 | 36.082173 | 110 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/examples/main_bayesian_flipout_imagenet.py | '''
code adapted from PyTorch examples
'''
import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
from torch.nn import functional as F
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import models.bayesian.resnet_flipout_large as resnet
import models.deterministic.resnet_large as det_resnet
from torchsummary import summary
from utils import util
import csv
import numpy as np
from utils.util import get_rho
from torch.utils.tensorboard import SummaryWriter
torchvision.set_image_backend('accimage')
model_names = sorted(
name for name in resnet.__dict__
if name.islower() and not name.startswith("__")
and name.startswith("resnet") and callable(resnet.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data',
metavar='DIR',
default='data/imagenet',
help='path to dataset')
parser.add_argument('-a',
'--arch',
metavar='ARCH',
default='resnet50',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j',
'--workers',
default=8,
type=int,
metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs',
default=90,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--val_batch_size', default=1000, type=int)
parser.add_argument('-b',
'--batch-size',
default=32,
type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr',
'--learning-rate',
default=0.001,
type=float,
metavar='LR',
help='initial learning rate',
dest='lr')
parser.add_argument('--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument('--wd',
'--weight-decay',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p',
'--print-freq',
default=10,
type=int,
metavar='N',
help='print frequency (default: 10)')
parser.add_argument('--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained',
dest='pretrained',
action='store_true',
default=True,
help='use pre-trained model')
parser.add_argument('--world-size',
default=-1,
type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank',
default=-1,
type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url',
default='tcp://224.66.41.62:23456',
type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend',
default='nccl',
type=str,
help='distributed backend')
parser.add_argument('--seed',
default=None,
type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed',
action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--mode', type=str, required=True, help='train | test')
parser.add_argument('--save-dir',
dest='save_dir',
help='The directory used to save the trained models',
default='./checkpoint/bayesian',
type=str)
parser.add_argument(
'--tensorboard',
type=bool,
default=True,
metavar='N',
help='use tensorboard for logging and visualization of training progress')
parser.add_argument(
'--log_dir',
type=str,
default='./logs/imagenet/bayesian',
metavar='N',
help='use tensorboard for logging and visualization of training progress')
parser.add_argument('--num_monte_carlo',
type=int,
default=50,
metavar='N',
help='number of Monte Carlo samples')
parser.add_argument(
'--moped',
type=bool,
default=True,
help='set prior and initialize approx posterior with Empirical Bayes')
parser.add_argument('--delta',
type=float,
default=0.2,
help='delta value for variance scaling in MOPED')
best_acc1 = 0
len_trainset = 1281167
len_valset = 50000
num_classes = 1000
def MOPED_layer(layer, det_layer, delta):
"""
Set the priors and initialize surrogate posteriors of Bayesian NN with Empirical Bayes
MOPED (Model Priors with Empirical Bayes using Deterministic DNN)
Reference:
[1] Ranganath Krishnan, Mahesh Subedar, Omesh Tickoo.
Specifying Weight Priors in Bayesian Deep Neural Networks with Empirical Bayes. AAAI 2020.
[2] Ranganath Krishnan, Mahesh Subedar, Omesh Tickoo.
Efficient Priors for Scalable Variational Inference in Bayesian Deep Neural Networks. ICCV workshops 2019.
"""
if (str(layer) == 'Conv2dFlipout()'
or str(layer) == 'Conv2dReparameterization()'):
#set the priors
print(str(layer))
layer.prior_weight_mu = det_layer.weight.data
if layer.prior_bias_mu is not None:
layer.prior_bias_mu = det_layer.bias.data
#initialize surrogate posteriors
layer.mu_kernel.data = det_layer.weight.data
layer.rho_kernel.data = get_rho(det_layer.weight.data, delta)
if layer.mu_bias is not None:
layer.mu_bias.data = det_layer.bias.data
layer.rho_bias.data = get_rho(det_layer.bias.data, delta)
elif (isinstance(layer, nn.Conv2d)):
print(str(layer))
layer.weight.data = det_layer.weight.data
if layer.bias is not None:
layer.bias.data = det_layer.bias.data2
elif (str(layer) == 'LinearFlipout()'
or str(layer) == 'LinearReparameterization()'):
print(str(layer))
layer.prior_weight_mu = det_layer.weight.data
if layer.prior_bias_mu is not None:
layer.prior_bias_mu = det_layer.bias.data
#initialize the surrogate posteriors
layer.mu_weight.data = det_layer.weight.data
layer.rho_weight.data = get_rho(det_layer.weight.data, delta)
if layer.mu_bias is not None:
layer.mu_bias.data = det_layer.bias.data
layer.rho_bias.data = get_rho(det_layer.bias.data, delta)
elif str(layer).startswith('Batch'):
#initialize parameters
print(str(layer))
layer.weight.data = det_layer.weight.data
if layer.bias is not None:
layer.bias.data = det_layer.bias.data
layer.running_mean.data = det_layer.running_mean.data
layer.running_var.data = det_layer.running_var.data
layer.num_batches_tracked.data = det_layer.num_batches_tracked.data
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
if torch.cuda.is_available():
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker,
nprocs=ngpus_per_node,
args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
model = torch.nn.DataParallel(resnet.__dict__[args.arch]())
if torch.cuda.is_available():
model.cuda()
else:
model.cpu()
# define loss function (criterion) and optimizer
if torch.cuda.is_available():
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
else:
criterion = nn.CrossEntropyLoss().cpu()
optimizer = torch.optim.SGD(model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
tb_writer = None
if args.tensorboard:
logger_dir = os.path.join(args.log_dir, 'tb_logger')
if not os.path.exists(logger_dir):
os.makedirs(logger_dir)
tb_writer = SummaryWriter(logger_dir)
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
val_dataset = datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
print('len trainset: ', len(train_dataset))
print('len valset: ', len(val_dataset))
len_trainset = len(train_dataset)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
drop_last=True)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=args.val_batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
if args.mode == 'train':
if (args.moped):
print("MOPED enabled")
det_model = torch.nn.DataParallel(
det_resnet.__dict__[args.arch](pretrained=True))
det_model.cuda()
for (idx_1, layer_1), (det_idx_1, det_layer_1) in zip(
enumerate(model.children()),
enumerate(det_model.children())):
MOPED_layer(layer_1, det_layer_1, args.delta)
for (idx_2, layer_2), (det_idx_2, det_layer_2) in zip(
enumerate(layer_1.children()),
enumerate(det_layer_1.children())):
MOPED_layer(layer_2, det_layer_2, args.delta)
for (idx_3, layer_3), (det_idx_3, det_layer_3) in zip(
enumerate(layer_2.children()),
enumerate(det_layer_2.children())):
MOPED_layer(layer_3, det_layer_3, args.delta)
for (idx_4, layer_4), (det_idx_4, det_layer_4) in zip(
enumerate(layer_3.children()),
enumerate(det_layer_3.children())):
MOPED_layer(layer_4, det_layer_4, args.delta)
for (idx_5,
layer_5), (det_idx_5, det_layer_5) in zip(
enumerate(layer_4.children()),
enumerate(det_layer_4.children())):
MOPED_layer(layer_5, det_layer_5, args.delta)
for (idx_6,
layer_6), (det_idx_6, det_layer_6) in zip(
enumerate(layer_5.children()),
enumerate(det_layer_5.children())):
MOPED_layer(layer_6, det_layer_6,
args.delta)
model.state_dict()
del det_model
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args,
tb_writer)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, epoch, args,
tb_writer)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if is_best:
save_checkpoint(
{
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
},
is_best,
filename=os.path.join(
args.save_dir,
'bayesian_flipout_{}_imagenet.pth'.format(args.arch)))
elif args.mode == 'test':
checkpoint_file = args.save_dir + '/bayesian_flipout_{}_imagenet.pth'.format(
args.arch)
checkpoint = torch.load(checkpoint_file)
model.load_state_dict(checkpoint['state_dict'])
evaluate(model, val_loader, args)
def train(train_loader, model, criterion, optimizer, epoch, args, tb_writer):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
global opt_th
progress = ProgressMeter(len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
'''
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
'''
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output, kl = model(images)
cross_entropy_loss = criterion(output, target)
scaled_kl = (kl.data[0] / len_trainset)
elbo_loss = cross_entropy_loss + scaled_kl
loss = cross_entropy_loss + scaled_kl
output = output.float()
loss = loss.float()
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.mean().backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
if tb_writer is not None:
tb_writer.add_scalar('train/cross_entropy_loss',
cross_entropy_loss.item(), epoch)
tb_writer.add_scalar('train/kl_div', scaled_kl.item(), epoch)
tb_writer.add_scalar('train/elbo_loss', elbo_loss.item(), epoch)
tb_writer.add_scalar('train/loss', loss.item(), epoch)
tb_writer.add_scalar('train/accuracy', acc1.item(), epoch)
tb_writer.flush()
def validate(val_loader, model, criterion, epoch, args, tb_writer):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
preds_list = []
labels_list = []
unc_list = []
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output, kl = model(images)
cross_entropy_loss = criterion(output, target)
scaled_kl = (kl.data[0] / len_trainset)
elbo_loss = cross_entropy_loss + scaled_kl
loss = cross_entropy_loss + scaled_kl
output = output.float()
loss = loss.float()
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1,
top5=top5))
return top1.avg
def evaluate(model, val_loader, args):
pred_probs_mc = []
test_loss = 0
correct = 0
with torch.no_grad():
pred_probs_mc = []
output_list = []
labels_list = []
model.eval()
begin = time.time()
for batch_idx, (data, target) in enumerate(val_loader):
#print('Batch idx {}, data shape {}, target shape {}'.format(batch_idx, data.shape, target.shape))
if torch.cuda.is_available():
data, target = data.cuda(non_blocking=True), target.cuda(
non_blocking=True)
else:
data, target = data.cpu(non_blocking=True), target.cpu(
non_blocking=True)
data = torch.cat([data for _ in range(args.num_monte_carlo)], 0)
output_mc = []
output, _ = model.forward(data)
output_mc.append(output)
output_ = torch.stack(output_mc)
output_ = output_.reshape(args.num_monte_carlo, -1, num_classes)
output_list.append(output_)
labels_list.append(target)
end = time.time()
print("inference throughput: ", len_valset / (end - begin),
" images/s")
output = torch.stack(output_list)
output = output.permute(1, 0, 2, 3)
output = output.contiguous().view(args.num_monte_carlo, len_valset, -1)
output = torch.nn.functional.softmax(output, dim=2)
labels = torch.cat(labels_list)
pred_mean = output.mean(dim=0)
Y_pred = torch.argmax(pred_mean, axis=1)
print('Test accuracy:',
(Y_pred.data.cpu().numpy() == labels.data.cpu().numpy()).mean() *
100)
#np.save(args.log_dir+'/bayesian_flipout_imagenet_probs.npy', output.data.cpu().numpy())
#np.save(args.log_dir+'/bayesian_flipout_imagenet_labels.npy', labels.data.cpu().numpy())
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1**(epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1, )):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 26,792 | 36.472727 | 114 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/examples/main_deterministic_imagenet.py | '''
code adapted from PyTorch examples
'''
import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
from torch.nn import functional as F
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import models.deterministic.resnet_large as resnet
from torchsummary import summary
from utils import util
import csv
import numpy as np
from utils.util import get_rho
from torch.utils.tensorboard import SummaryWriter
torchvision.set_image_backend('accimage')
model_names = sorted(
name for name in resnet.__dict__
if name.islower() and not name.startswith("__")
and name.startswith("resnet") and callable(resnet.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data',
metavar='DIR',
default='data/imagenet',
help='path to dataset')
parser.add_argument('-a',
'--arch',
metavar='ARCH',
default='resnet50',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j',
'--workers',
default=8,
type=int,
metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs',
default=90,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--val_batch_size', default=1000, type=int)
parser.add_argument('-b',
'--batch-size',
default=32,
type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr',
'--learning-rate',
default=0.001,
type=float,
metavar='LR',
help='initial learning rate',
dest='lr')
parser.add_argument('--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument('--wd',
'--weight-decay',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p',
'--print-freq',
default=10,
type=int,
metavar='N',
help='print frequency (default: 10)')
parser.add_argument('--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained',
dest='pretrained',
action='store_true',
default=True,
help='use pre-trained model')
parser.add_argument('--world-size',
default=-1,
type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank',
default=-1,
type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url',
default='tcp://224.66.41.62:23456',
type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend',
default='nccl',
type=str,
help='distributed backend')
parser.add_argument('--seed',
default=None,
type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed',
action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
parser.add_argument('--mode', type=str, required=True, help='train | test')
parser.add_argument('--save-dir',
dest='save_dir',
help='The directory used to save the trained models',
default='./checkpoint/deterministic',
type=str)
parser.add_argument(
'--tensorboard',
type=bool,
default=True,
metavar='N',
help='use tensorboard for logging and visualization of training progress')
parser.add_argument(
'--log_dir',
type=str,
default='./logs/imagenet/deterministic',
metavar='N',
help='use tensorboard for logging and visualization of training progress')
best_acc1 = 0
len_trainset = 1281167
len_valset = 50000
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
if torch.cuda.is_available():
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker,
nprocs=ngpus_per_node,
args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
model = torch.nn.DataParallel(resnet.__dict__[args.arch](pretrained=True))
if torch.cuda.is_available():
model.cuda()
else:
model.cpu()
# define loss function (criterion) and optimizer
if torch.cuda.is_available():
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
else:
criterion = nn.CrossEntropyLoss().cpu()
optimizer = torch.optim.SGD(model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
tb_writer = None
if args.tensorboard:
logger_dir = os.path.join(args.log_dir, 'tb_logger')
if not os.path.exists(logger_dir):
os.makedirs(logger_dir)
tb_writer = SummaryWriter(logger_dir)
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
val_dataset = datasets.ImageFolder(
valdir,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
print('len trainset: ', len(train_dataset))
print('len valset: ', len(val_dataset))
len_trainset = len(train_dataset)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
drop_last=True)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=args.val_batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
if args.mode == 'train':
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args,
tb_writer)
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, epoch, args,
tb_writer)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if is_best:
save_checkpoint(
{
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
},
is_best,
filename=os.path.join(args.save_dir,
'{}_imagenet.pth'.format(args.arch)))
elif args.mode == 'test':
checkpoint_file = args.save_dir + '/{}_imagenet.pth'.format(args.arch)
if os.path.exists(checkpoint_file):
checkpoint = torch.load(checkpoint_file)
print('load checkpoint.')
model.load_state_dict(checkpoint['state_dict'])
#Evaluate on test dataset
test_acc = evaluate(model, val_loader, args)
print('******Test data***********\n')
print('test_acc: ', test_acc)
def train(train_loader, model, criterion, optimizer, epoch, args, tb_writer):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
global opt_th
progress = ProgressMeter(len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.mean().backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
if tb_writer is not None:
tb_writer.add_scalar('train/loss', loss.item(), epoch)
tb_writer.add_scalar('train/accuracy', acc1.item(), epoch)
tb_writer.flush()
def validate(val_loader, model, criterion, epoch, args, tb_writer):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), [batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
preds_list = []
labels_list = []
unc_list = []
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1,
top5=top5))
return top1.avg
def evaluate(model, val_loader, args):
pred_probs_mc = []
test_loss = 0
correct = 0
with torch.no_grad():
output_list = []
label_list = []
model.eval()
begin = time.time()
for batch_idx, (data, target) in enumerate(val_loader):
#print('Batch idx {}, data shape {}, target shape {}'.format(batch_idx, data.shape, target.shape))
if torch.cuda.is_available():
data, target = data.cuda(non_blocking=True), target.cuda(
non_blocking=True)
else:
data, target = data.cpu(non_blocking=True), target.cpu(
non_blocking=True)
output = model.forward(data)
pred_probs = torch.nn.functional.softmax(output, dim=1)
output_list.append(pred_probs)
label_list.append(target)
end = time.time()
print("inference throughput: ", len_valset / (end - begin),
" images/s")
labels = torch.cat(label_list).cuda()
probs = torch.cat(output_list).cuda()
target_labels = labels.data.cpu().numpy()
pred_mean = probs.data.cpu().numpy()
Y_pred = np.argmax(pred_mean, axis=1)
test_acc = (Y_pred == target_labels).mean()
print('Test accuracy:', test_acc * 100)
np.save(args.log_dir + '/deterministic_imagenet_probs.npy', pred_mean)
np.save(args.log_dir + '/deterministic_imagenet_labels.npy',
target_labels)
return test_acc
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1**(epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1, )):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 20,770 | 34.264856 | 110 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/examples/main_deterministic_mnist.py | from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import bayesian_torch.models.deterministic.simple_cnn as simple_cnn
def train(args, model, device, train_loader, optimizer, epoch, tb_writer=None):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if tb_writer is not None:
tb_writer.add_scalar('train/loss', loss.item(), epoch)
tb_writer.flush()
def test(args, model, device, test_loader, epoch, tb_writer=None):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(
output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(
dim=1,
keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print(
'\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
val_accuracy = correct / len(test_loader.dataset)
if tb_writer is not None:
tb_writer.add_scalar('val/loss', test_loss, epoch)
tb_writer.add_scalar('val/accuracy', val_accuracy, epoch)
tb_writer.flush()
def evaluate(args, model, device, test_loader):
model.eval()
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
output = torch.exp(output)
pred = output.argmax(
dim=1,
keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
print('\nTest set: Accuracy: {:.2f}%\n'.format(100. * correct /
len(test_loader.dataset)))
target_labels = target.cpu().data.numpy()
np.save('./probs_mnist.npy', output.cpu().data.numpy())
np.save('./mnist_test_labels.npy', target_labels)
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size',
type=int,
default=64,
metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size',
type=int,
default=1000,
metavar='N',
help='input batch size for testing (default: 10000)')
parser.add_argument('--epochs',
type=int,
default=14,
metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr',
type=float,
default=1.0,
metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma',
type=float,
default=0.7,
metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda',
action='store_true',
default=False,
help='disables CUDA training')
parser.add_argument('--seed',
type=int,
default=1,
metavar='S',
help='random seed (default: 1)')
parser.add_argument(
'--log-interval',
type=int,
default=10,
metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--mode', type=str, required=True, help='train | test')
parser.add_argument('--save_dir',
type=str,
default='./checkpoint/deterministic')
parser.add_argument(
'--tensorboard',
type=bool,
default=True,
metavar='N',
help=
'use tensorboard for logging and visualization of training progress')
parser.add_argument(
'--log_dir',
type=str,
default='./logs/mnist/deterministic',
metavar='N',
help=
'use tensorboard for logging and visualization of training progress')
args = parser.parse_args()
use_cuda = torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
tb_writer = None
if args.tensorboard:
logger_dir = os.path.join(args.log_dir, 'tb_logger')
if not os.path.exists(logger_dir):
os.makedirs(logger_dir)
tb_writer = SummaryWriter(logger_dir)
train_loader = torch.utils.data.DataLoader(datasets.MNIST(
'../data',
train=True,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
])),
batch_size=args.batch_size,
shuffle=True,
**kwargs)
test_loader = torch.utils.data.DataLoader(datasets.MNIST(
'../data',
train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
])),
batch_size=args.test_batch_size,
shuffle=True,
**kwargs)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
model = simple_cnn.SCNN()
model = model.to(device)
if args.mode == 'train':
for epoch in range(1, args.epochs + 1):
if (epoch < args.epochs / 2):
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
else:
optimizer = optim.Adadelta(model.parameters(), lr=args.lr / 10)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
train(args, model, device, train_loader, optimizer, epoch,
tb_writer)
test(args, model, device, test_loader, epoch, tb_writer)
scheduler.step()
torch.save(model.state_dict(), args.save_dir + "/mnist_scnn.pth")
elif args.mode == 'test':
checkpoint = args.save_dir + '/mnist_scnn.pth'
model.load_state_dict(torch.load(checkpoint))
evaluate(args, model, device, test_loader)
if __name__ == '__main__':
main()
| 7,802 | 36.157143 | 79 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/examples/main_bayesian_mnist.py | from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import scipy
from scipy.special import softmax
import bayesian_torch.models.bayesian.simple_cnn_variational as simple_cnn
len_trainset = 60000
len_testset = 10000
def train(args, model, device, train_loader, optimizer, epoch, tb_writer=None):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output_ = []
kl_ = []
for mc_run in range(args.num_mc):
output, kl = model(data)
output_.append(output)
kl_.append(kl)
output = torch.mean(torch.stack(output_), dim=0)
kl = torch.mean(torch.stack(kl_), dim=0)
nll_loss = F.nll_loss(output, target)
#ELBO loss
loss = nll_loss + (kl / len_trainset)
'''
#another way of computing gradients with multiple MC samples
nll_loss = 0
scaled_kl = 0
for mc_run in range(args.num_mc):
output, kl = model(data)
nll_loss += F.nll_loss(output, target)
scaled_kl += (kl/len_trainset)
loss = (nll_loss + scaled_kl)/args.num_mc
#end
'''
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if tb_writer is not None:
tb_writer.add_scalar('train/loss', loss.item(), epoch)
tb_writer.flush()
def test(args, model, device, test_loader, epoch, tb_writer=None):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output, kl = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() + (
kl / len_testset) # sum up batch loss
pred = output.argmax(
dim=1,
keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print(
'\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
val_accuracy = correct / len(test_loader.dataset)
if tb_writer is not None:
tb_writer.add_scalar('val/loss', test_loss, epoch)
tb_writer.add_scalar('val/accuracy', val_accuracy, epoch)
tb_writer.flush()
def evaluate(args, model, device, test_loader):
pred_probs_mc = []
test_loss = 0
correct = 0
with torch.no_grad():
pred_probs_mc = []
for data, target in test_loader:
data, target = data.to(device), target.to(device)
for mc_run in range(args.num_monte_carlo):
model.eval()
output, _ = model.forward(data)
#get probabilities from log-prob
pred_probs = torch.exp(output)
pred_probs_mc.append(pred_probs.cpu().data.numpy())
target_labels = target.cpu().data.numpy()
pred_mean = np.mean(pred_probs_mc, axis=0)
Y_pred = np.argmax(pred_mean, axis=1)
print('Test accuracy:', (Y_pred == target_labels).mean() * 100)
np.save('./probs_mnist_mc.npy', pred_probs_mc)
np.save('./mnist_test_labels_mc.npy', target_labels)
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size',
type=int,
default=64,
metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size',
type=int,
default=10000,
metavar='N',
help='input batch size for testing (default: 10000)')
parser.add_argument('--epochs',
type=int,
default=14,
metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr',
type=float,
default=1.0,
metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma',
type=float,
default=0.7,
metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda',
action='store_true',
default=False,
help='disables CUDA training')
parser.add_argument('--seed',
type=int,
default=1,
metavar='S',
help='random seed (default: 1)')
parser.add_argument(
'--log-interval',
type=int,
default=10,
metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save_dir',
type=str,
default='./checkpoint/bayesian')
parser.add_argument('--mode', type=str, required=True, help='train | test')
parser.add_argument(
'--num_monte_carlo',
type=int,
default=20,
metavar='N',
help='number of Monte Carlo samples to be drawn for inference')
parser.add_argument('--num_mc',
type=int,
default=5,
metavar='N',
help='number of Monte Carlo runs during training')
parser.add_argument(
'--tensorboard',
action="store_true",
help=
'use tensorboard for logging and visualization of training progress')
parser.add_argument(
'--log_dir',
type=str,
default='./logs/mnist/bayesian',
metavar='N',
help=
'use tensorboard for logging and visualization of training progress')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
tb_writer = None
if args.tensorboard:
logger_dir = os.path.join(args.log_dir, 'tb_logger')
print("yee")
if not os.path.exists(logger_dir):
os.makedirs(logger_dir)
tb_writer = SummaryWriter(logger_dir)
train_loader = torch.utils.data.DataLoader(datasets.MNIST(
'../data',
train=True,
download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
])),
batch_size=args.batch_size,
shuffle=True,
**kwargs)
test_loader = torch.utils.data.DataLoader(datasets.MNIST(
'../data',
train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
])),
batch_size=args.test_batch_size,
shuffle=False,
**kwargs)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
model = simple_cnn.SCNN()
model = model.to(device)
print(args.mode)
if args.mode == 'train':
for epoch in range(1, args.epochs + 1):
if (epoch < args.epochs / 2):
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
else:
optimizer = optim.Adadelta(model.parameters(), lr=args.lr / 10)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
train(args, model, device, train_loader, optimizer, epoch,
tb_writer)
test(args, model, device, test_loader, epoch, tb_writer)
scheduler.step()
torch.save(model.state_dict(),
args.save_dir + "/mnist_bayesian_scnn.pth")
elif args.mode == 'test':
checkpoint = args.save_dir + '/mnist_bayesian_scnn.pth'
model.load_state_dict(torch.load(checkpoint))
evaluate(args, model, device, test_loader)
if __name__ == '__main__':
main()
| 9,196 | 35.208661 | 79 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/models/flipout/simple_cnn.py | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from bayesian_torch.layers import Conv2dFlipout
from bayesian_torch.layers import LinearFlipout
prior_mu = 0
prior_sigma = 0.05
posterior_mu_init = 0
posterior_rho_init = -7.0 #-6.0
class SCNN(nn.Module):
def __init__(self):
super(SCNN, self).__init__()
self.conv1 = Conv2dFlipout(
in_channels=1,
out_channels=32,
kernel_size=3,
stride=1,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
)
self.conv2 = Conv2dFlipout(
in_channels=32,
out_channels=64,
kernel_size=3,
stride=1,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = LinearFlipout(
in_features=9216,
out_features=128,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
)
self.fc2 = LinearFlipout(in_features=128,
out_features=10,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init)
def forward(self, x):
kl_sum = 0
x, kl = self.conv1(x)
kl_sum += kl
x = F.relu(x)
x, kl = self.conv2(x)
kl_sum += kl
x = F.relu(x)
x = F.max_pool2d(x, 2)
#x = self.dropout1(x)
x = torch.flatten(x, 1)
x, kl = self.fc1(x)
kl_sum += kl
x = F.relu(x)
#x = self.dropout2(x)
x, kl = self.fc2(x)
kl_sum += kl
output = F.log_softmax(x, dim=1)
return output, kl_sum
| 2,267 | 28.842105 | 71 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/models/deterministic/resnet.py | '''
ResNet for CIFAR10.
Ref:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from lrp.linear import Linear
from lrp.conv_cifar import Conv2d
from lrp.sequential import Sequential
__all__ = [
'ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110',
'resnet1202'
]
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d) \
or isinstance(m, Linear) or isinstance(m, Conv2d): # <--------------
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = Conv2d(in_planes, # <------
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = Conv2d(planes, # <------
planes,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = Sequential() # <------
if stride != 1 or in_planes != planes:
if option == 'A':
self.shortcut = LambdaLayer(lambda x: F.pad(
x[:, :, ::2, ::2],
(0, 0, 0, 0, planes // 4, planes // 4), "constant", 0))
elif option == 'B':
self.shortcut = Sequential( # <------
Conv2d(in_planes, # <------
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False),
nn.BatchNorm2d(self.expansion * planes))
def forward(self, x, explain=False, rule='epsilon'):
out = F.relu(self.bn1(self.conv1(x, explain, rule)))
out = self.bn2(self.conv2(out, explain, rule))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = Conv2d(3, # <------
16,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = Linear(64, num_classes) # <-----------------------
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return Sequential(*layers) # <-----------------------
def forward(self, x, explain=False, rule="epsilon"):
out = F.relu(self.bn1(self.conv1(x, explain, rule)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out, explain, rule)
return out
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print(
"Total layers",
len(
list(
filter(lambda p: p.requires_grad and len(p.data.size()) > 1,
net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
| 4,977 | 29.539877 | 76 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/models/deterministic/resnet_large.py | # ResNet for ImageNet
# ResNet architecture ref:
# https://arxiv.org/abs/1512.03385
# Code from torchvision package
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = [
'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'
]
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 7,104 | 29.625 | 78 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/models/deterministic/simple_cnn.py | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
class SCNN(nn.Module):
def __init__(self):
super(SCNN, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
| 836 | 25.15625 | 44 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/models/bayesian/resnet_flipout.py | '''
Bayesian ResNet with Flipout Monte Carlo estimator for CIFAR10.
Ref:
ResNet architecture:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Flipout:
[2] Wen, Yeming, et al. "Flipout: Efficient Pseudo-Independent
Weight Perturbations on Mini-Batches." International Conference
on Learning Representations. 2018.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from bayesian_torch.layers import Conv2dFlipout
from bayesian_torch.layers import LinearFlipout
__all__ = [
'ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110'
]
prior_mu = 0.0
prior_sigma = 1.0
posterior_mu_init = 0.0
posterior_rho_init = -3.0
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = Conv2dFlipout(in_planes,
planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = Conv2dFlipout(planes,
planes,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x: F.pad(
x[:, :, ::2, ::2],
(0, 0, 0, 0, planes // 4, planes // 4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
Conv2dFlipout(in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False),
nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
kl_sum = 0
out, kl = self.conv1(x)
kl_sum += kl
out = self.bn1(out)
out = F.relu(out)
out, kl = self.conv2(out)
kl_sum += kl
out = self.bn2(out)
out += self.shortcut(x)
out = F.relu(out)
return out, kl_sum
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = Conv2dFlipout(3,
16,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = LinearFlipout(in_features=64, out_features=num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
kl_sum = 0
out, kl = self.conv1(x)
kl_sum += kl
out = self.bn1(out)
out = F.relu(out)
for l in self.layer1:
out, kl = l(out)
kl_sum += kl
for l in self.layer2:
out, kl = l(out)
kl_sum += kl
for l in self.layer3:
out, kl = l(out)
kl_sum += kl
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out, kl = self.linear(out)
kl_sum += kl
return out, kl_sum
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print(
"Total layers",
len(
list(
filter(lambda p: p.requires_grad and len(p.data.size()) > 1,
net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
| 5,606 | 28.356021 | 77 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/models/bayesian/resnet_variational.py | '''
Bayesian ResNet for CIFAR10.
ResNet architecture ref:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from bayesian_torch.bayesian_torch.layers import Conv2dReparameterization
from bayesian_torch.bayesian_torch.layers import LinearReparameterization
from lrp.sequential import Sequential
from lrp.linear import Linear
from lrp.conv import Conv2d
__all__ = [
'ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110'
]
prior_mu = 0.0
prior_sigma = 1.0
posterior_mu_init = 0.0
posterior_rho_init = -2.0
def _weights_init(m):
classname = m.__class__.__name__
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d) \
or isinstance(m, Linear) or isinstance(m, Conv2d): # <--------------
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = Conv2dReparameterization(
in_channels=in_planes,
out_channels=planes,
kernel_size=3,
stride=stride,
padding=1,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = Conv2dReparameterization(
in_channels=planes,
out_channels=planes,
kernel_size=3,
stride=1,
padding=1,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = Sequential() #<--------
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x: F.pad(
x[:, :, ::2, ::2],
(0, 0, 0, 0, planes // 4, planes // 4), "constant", 0))
elif option == 'B':
self.shortcut = Sequential(
Conv2dReparameterization(
in_channels=in_planes,
out_channels=self.expansion * planes,
kernel_size=1,
stride=stride,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x, explain=False, rule='epsilon'):
kl_sum = 0
out, kl = self.conv1(x, explain, rule)
kl_sum += kl
out = self.bn1(out)
out = F.relu(out)
out, kl = self.conv2(out, explain, rule)
kl_sum += kl
out = self.bn2(out)
out += self.shortcut(x)
out = F.relu(out)
return out, kl_sum
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = Conv2dReparameterization(
in_channels=3,
out_channels=16,
kernel_size=3,
stride=1,
padding=1,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = LinearReparameterization(
in_features=64,
out_features=num_classes,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init
)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return Sequential(*layers) # <-----------------------
def forward(self, x, explain=False, rule="epsilon"): # <-----------------------
kl_sum = 0
out, kl = self.conv1(x, explain, rule) # <-----------------------
# print(self.state_dict().keys())
# print(self.conv1.mu_kernel[0,0,0].cpu().detach(), "\t", self.conv1.rho_kernel[0,0,0].cpu().detach(), "\t",
# self.conv1.eps_kernel[0,0,0].cpu().detach())
kl_sum += kl
out = self.bn1(out)
out = F.relu(out)
for l in self.layer1:
out, kl = l(out)
kl_sum += kl
for l in self.layer2:
out, kl = l(out)
kl_sum += kl
for l in self.layer3:
out, kl = l(out)
kl_sum += kl
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out, kl = self.linear(out, explain, rule) # <-----------------------
kl_sum += kl
return out, kl_sum
def resnet20():
print("resnet20")
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
print("resnet32")
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
print("resnet44")
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
print("resnet56")
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
print("resnet110")
return ResNet(BasicBlock, [18, 18, 18])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print(
"Total layers",
len(
list(
filter(lambda p: p.requires_grad and len(p.data.size()) > 1,
net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
| 6,935 | 30.103139 | 116 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/models/bayesian/resnet_flipout_large.py | # Bayesian ResNet for ImageNet
# ResNet architecture ref:
# https://arxiv.org/abs/1512.03385
# Code adapted from torchvision package to build Bayesian model from deterministic model
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from bayesian_torch.layers import Conv2dFlipout
from bayesian_torch.layers import LinearFlipout
from bayesian_torch.layers import BatchNorm2dLayer
__all__ = [
'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'
]
prior_mu = 0.0
prior_sigma = 0.1
posterior_mu_init = 0.0
posterior_rho_init = -9.0
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return Conv2dFlipout(in_channels=in_planes,
out_channels=out_planes,
kernel_size=3,
stride=stride,
padding=1,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
kl_sum = 0
out, kl = self.conv1(x)
kl_sum += kl
out = self.bn1(out)
out = self.relu(out)
out, kl = self.conv2(out)
kl_sum += kl
out = self.bn2(out)
if self.downsample is not None:
residual, kl = self.downsample(x)
kl_sum += kl
out += residual
out = self.relu(out)
return out, kl_sum
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = Conv2dFlipout(in_channels=inplanes,
out_channels=planes,
kernel_size=1,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = Conv2dFlipout(in_channels=planes,
out_channels=planes,
kernel_size=3,
stride=stride,
padding=1,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = Conv2dFlipout(in_channels=planes,
out_channels=planes * 4,
kernel_size=1,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
kl_sum = 0
out, kl = self.conv1(x)
kl_sum += kl
out = self.bn1(out)
out = self.relu(out)
out, kl = self.conv2(out)
kl_sum += kl
out = self.bn2(out)
out = self.relu(out)
out, kl = self.conv3(out)
kl_sum += kl
out = self.bn3(out)
if self.downsample is not None:
residual, kl = self.downsample(x)
kl_sum += kl
out += residual
out = self.relu(out)
return out, kl_sum
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = Conv2dFlipout(in_channels=3,
out_channels=64,
kernel_size=7,
stride=2,
padding=3,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = LinearFlipout(in_features=512 * block.expansion,
out_features=num_classes,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
Conv2dFlipout(in_channels=self.inplanes,
out_channels=planes * block.expansion,
kernel_size=1,
stride=stride,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False),
BatchNorm2dLayer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
kl_sum = 0
x, kl = self.conv1(x)
kl_sum += kl
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
for layer in self.layer1:
if 'Flipout' in str(layer):
x, kl = layer(x)
if kl is None:
kl_sum += kl
else:
x = layer(x)
for layer in self.layer2:
if 'Flipout' in str(layer):
x, kl = layer(x)
if kl is None:
kl_sum += kl
else:
x = layer(x)
for layer in self.layer3:
if 'Flipout' in str(layer):
x, kl = layer(x)
if kl is None:
kl_sum += kl
else:
x = layer(x)
for layer in self.layer4:
if 'Flipout' in str(layer):
x, kl = layer(x)
if kl is None:
kl_sum += kl
else:
x = layer(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x, kl = self.fc(x)
kl_sum += kl
return x, kl_sum
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 10,869 | 33.507937 | 88 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/models/bayesian/resnet_variational_large.py | # Bayesian ResNet for ImageNet
# ResNet architecture ref:
# https://arxiv.org/abs/1512.03385
# Code adapted from torchvision package to build Bayesian model from deterministic model
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from bayesian_torch.layers import Conv2dReparameterization
from bayesian_torch.layers import LinearReparameterization
from bayesian_torch.layers import BatchNorm2dLayer
__all__ = [
'ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'
]
prior_mu = 0.0
prior_sigma = 0.1
posterior_mu_init = 0.0
posterior_rho_init = -9.0
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return Conv2dReparameterization(in_channels=in_planes,
out_channels=out_planes,
kernel_size=3,
stride=stride,
padding=1,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
kl_sum = 0
out, kl = self.conv1(x)
kl_sum += kl
out = self.bn1(out)
out = self.relu(out)
out, kl = self.conv2(out)
kl_sum += kl
out = self.bn2(out)
if self.downsample is not None:
residual, kl = self.downsample(x)
kl_sum += kl
out += residual
out = self.relu(out)
return out, kl_sum
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = Conv2dReparameterization(
in_channels=inplanes,
out_channels=planes,
kernel_size=1,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = Conv2dReparameterization(
in_channels=planes,
out_channels=planes,
kernel_size=3,
stride=stride,
padding=1,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = Conv2dReparameterization(
in_channels=planes,
out_channels=planes * 4,
kernel_size=1,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
kl_sum = 0
out, kl = self.conv1(x)
kl_sum += kl
out = self.bn1(out)
out = self.relu(out)
out, kl = self.conv2(out)
kl_sum += kl
out = self.bn2(out)
out = self.relu(out)
out, kl = self.conv3(out)
kl_sum += kl
out = self.bn3(out)
if self.downsample is not None:
residual, kl = self.downsample(x)
kl_sum += kl
out += residual
out = self.relu(out)
return out, kl_sum
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = Conv2dReparameterization(
in_channels=3,
out_channels=64,
kernel_size=7,
stride=2,
padding=3,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = LinearReparameterization(
in_features=512 * block.expansion,
out_features=num_classes,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
Conv2dReparameterization(in_channels=self.inplanes,
out_channels=planes * block.expansion,
kernel_size=1,
stride=stride,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
bias=False),
BatchNorm2dLayer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
kl_sum = 0
x, kl = self.conv1(x)
kl_sum += kl
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
for layer in self.layer1:
if 'Reparameterization' in str(layer):
x, kl = layer(x)
if kl is None:
kl_sum += kl
else:
x = layer(x)
for layer in self.layer2:
if 'Reparameterization' in str(layer):
x, kl = layer(x)
if kl is None:
kl_sum += kl
else:
x = layer(x)
for layer in self.layer3:
if 'Reparameterization' in str(layer):
x, kl = layer(x)
if kl is None:
kl_sum += kl
else:
x = layer(x)
for layer in self.layer4:
if 'Reparameterization' in str(layer):
x, kl = layer(x)
if kl is None:
kl_sum += kl
else:
x = layer(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x, kl = self.fc(x)
kl_sum += kl
return x, kl_sum
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 10,428 | 31.590625 | 88 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/models/bayesian/simple_cnn_variational.py | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from bayesian_torch.layers import Conv2dReparameterization
from bayesian_torch.layers import LinearReparameterization
prior_mu = 0.0
prior_sigma = 1.0
posterior_mu_init = 0.0
posterior_rho_init = -3.0
class SCNN(nn.Module):
def __init__(self):
super(SCNN, self).__init__()
self.conv1 = Conv2dReparameterization(
in_channels=1,
out_channels=32,
kernel_size=3,
stride=1,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
)
self.conv2 = Conv2dReparameterization(
in_channels=32,
out_channels=64,
kernel_size=3,
stride=1,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = LinearReparameterization(
in_features=9216,
out_features=128,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
)
self.fc2 = LinearReparameterization(
in_features=128,
out_features=10,
prior_mean=prior_mu,
prior_variance=prior_sigma,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
)
def forward(self, x):
kl_sum = 0
x, kl = self.conv1(x)
kl_sum += kl
x = F.relu(x)
x, kl = self.conv2(x)
kl_sum += kl
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x, kl = self.fc1(x)
kl_sum += kl
x = F.relu(x)
x = self.dropout2(x)
x, kl = self.fc2(x)
kl_sum += kl
output = F.log_softmax(x, dim=1)
return output, kl_sum
| 2,246 | 27.443038 | 58 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/layers/batchnorm.py | '''
wrapper for Batch Normalization layers
'''
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.nn.functional as F
class BatchNorm2dLayer(nn.Module):
def __init__(self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True):
super(BatchNorm2dLayer, self).__init__()
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.register_buffer('num_batches_tracked',
torch.tensor(0, dtype=torch.long))
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
self.register_parameter('num_batches_tracked', None)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'.format(
input.dim()))
def forward(self, input):
self._check_input_dim(input[0])
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
out = F.batch_norm(input[0], self.running_mean, self.running_var,
self.weight, self.bias, self.training
or not self.track_running_stats,
exponential_average_factor, self.eps)
kl = 0
return out, kl
class BatchNorm1dLayer(nn.Module):
def __init__(self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True):
super(BatchNorm1dLayer, self).__init__()
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.register_buffer('num_batches_tracked',
torch.tensor(0, dtype=torch.long))
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
self.register_parameter('num_batches_tracked', None)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.dim() != 3:
raise ValueError('expected 3D input (got {}D input)'.format(
input.dim()))
def forward(self, input):
self._check_input_dim(input[0])
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
out = F.batch_norm(input[0], self.running_mean, self.running_var,
self.weight, self.bias, self.training
or not self.track_running_stats,
exponential_average_factor, self.eps)
kl = 0
return out, kl
class BatchNorm3dLayer(nn.Module):
def __init__(self,
num_features,
eps=1e-5,
momentum=0.1,
affine=True,
track_running_stats=True):
super(BatchNorm3dLayer, self).__init__()
self.eps = eps
self.momentum = momentum
self.affine = affine
self.track_running_stats = track_running_stats
if self.affine:
self.weight = Parameter(torch.Tensor(num_features))
self.bias = Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
if self.track_running_stats:
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
self.register_buffer('num_batches_tracked',
torch.tensor(0, dtype=torch.long))
else:
self.register_parameter('running_mean', None)
self.register_parameter('running_var', None)
self.register_parameter('num_batches_tracked', None)
self.reset_parameters()
def reset_running_stats(self):
if self.track_running_stats:
self.running_mean.zero_()
self.running_var.fill_(1)
self.num_batches_tracked.zero_()
def reset_parameters(self):
self.reset_running_stats()
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'.format(
input.dim()))
def forward(self, input):
self._check_input_dim(input[0])
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
out = F.batch_norm(input[0], self.running_mean, self.running_var,
self.weight, self.bias, self.training
or not self.track_running_stats,
exponential_average_factor, self.eps)
kl = 0
return out, kl
| 7,672 | 37.365 | 78 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/layers/base_variational_layer.py | # Copyright (C) 2021 Intel Labs
#
# BSD-3-Clause License
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ===============================================================================================
import torch
import torch.nn as nn
import torch.distributions as distributions
class BaseVariationalLayer_(nn.Module):
def __init__(self):
super().__init__()
def kl_div(self, mu_q, sigma_q, mu_p, sigma_p):
"""
Calculates kl divergence between two gaussians (Q || P)
Parameters:
* mu_q: torch.Tensor -> mu parameter of distribution Q
* sigma_q: torch.Tensor -> sigma parameter of distribution Q
* mu_p: float -> mu parameter of distribution P
* sigma_p: float -> sigma parameter of distribution P
returns torch.Tensor of shape 0
"""
kl = torch.log(sigma_p) - torch.log(
sigma_q) + (sigma_q**2 + (mu_q - mu_p)**2) / (2 *
(sigma_p**2)) - 0.5
return kl.sum()
| 2,497 | 45.259259 | 97 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/layers/dropout.py | '''
wrapper for Dropout
'''
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.nn.functional as F
class Dropout(nn.Module):
__constants__ = ['p', 'inplace']
def __init__(self, p=0.5, inplace=False):
super(Dropout, self).__init__()
if p < 0 or p > 1:
raise ValueError(
"dropout probability has to be between 0 and 1, but got {}".
format(p))
self.p = p
self.inplace = inplace
def forward(self, input):
kl = 0
return F.dropout(input[0], self.p, self.training, self.inplace), kl
def extra_repr(self):
return 'p={}, inplace={}'.format(self.p, self.inplace)
| 703 | 23.275862 | 76 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/layers/relu.py | '''
wrapper for ReLU
'''
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.nn.functional as F
class ReLU(nn.Module):
__constants__ = ['inplace']
def __init__(self, inplace=False):
super(ReLU, self).__init__()
self.inplace = inplace
def forward(self, input):
kl = 0
return F.relu(input[0], inplace=self.inplace), kl
def extra_repr(self):
inplace_str = 'inplace=True' if self.inplace else ''
return inplace_str
| 508 | 19.36 | 60 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/layers/variational_layers/linear_variational.py | # Copyright (C) 2021 Intel Labs
#
# BSD-3-Clause License
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# Linear Reparameterization Layers with reparameterization estimator to perform
# variational inference in Bayesian neural networks. Reparameterization layers
# enables Monte Carlo approximation of the distribution over 'kernel' and 'bias'.
#
# Kullback-Leibler divergence between the surrogate posterior and prior is computed
# and returned along with the tensors of outputs after linear opertaion, which is
# required to compute Evidence Lower Bound (ELBO).
#
# @authors: Ranganath Krishnan
# ======================================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Module, Parameter
from ..base_variational_layer import BaseVariationalLayer_
import math
from lrp.functional.linear import linear # <------------------------------
from lrp.linear import Linear # <------------------------------
class LinearReparameterization(BaseVariationalLayer_):
def __init__(self,
in_features,
out_features,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements Linear layer with reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_features: int -> size of each input sample,
out_features: int -> size of each output sample,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super(LinearReparameterization, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init, # mean of weight
# variance of weight --> sigma = log (1 + exp(rho))
self.posterior_rho_init = posterior_rho_init,
self.bias = bias
self.mu_weight = Parameter(torch.Tensor(out_features, in_features))
self.rho_weight = Parameter(torch.Tensor(out_features, in_features))
self.register_buffer('eps_weight',
torch.Tensor(out_features, in_features))
self.register_buffer('prior_weight_mu',
torch.Tensor(out_features, in_features))
self.register_buffer('prior_weight_sigma',
torch.Tensor(out_features, in_features))
if bias:
self.mu_bias = Parameter(torch.Tensor(out_features))
self.rho_bias = Parameter(torch.Tensor(out_features))
self.register_buffer('eps_bias', torch.Tensor(out_features))
self.register_buffer('prior_bias_mu', torch.Tensor(out_features))
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_features))
else:
self.register_buffer('prior_bias_mu', None)
self.register_buffer('prior_bias_sigma', None)
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None)
self.init_parameters()
def init_parameters(self):
self.prior_weight_mu.fill_(self.prior_mean)
self.prior_weight_sigma.fill_(self.prior_variance)
self.mu_weight.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_weight.data.normal_(mean=self.posterior_rho_init[0], std=0.1)
if self.mu_bias is not None:
self.prior_bias_mu.fill_(self.prior_mean)
self.prior_bias_sigma.fill_(self.prior_variance)
self.mu_bias.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init[0],
std=0.1)
def forward(self, input, explain=False, rule="epsilon"): # <------------------------------
sigma_weight = torch.log1p(torch.exp(self.rho_weight))
weight = self.mu_weight + \
(sigma_weight * self.eps_weight.data.normal_())
kl_weight = self.kl_div(self.mu_weight, sigma_weight,
self.prior_weight_mu, self.prior_weight_sigma)
bias = None
weight = weight.to(input.device) # <------------------------------
if self.mu_bias is not None:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
bias = self.mu_bias + (sigma_bias * self.eps_bias.data.normal_())
kl_bias = self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
bias = bias.to(input.device) # <------------------------------
if explain is True:
out = linear[rule](input, weight, bias) # <------------------------------
# out = Linear(input, weight, bias)
else:
out = F.linear(input, weight, bias)
if self.mu_bias is not None:
kl = kl_weight + kl_bias
else:
kl = kl_weight
return out, kl
| 7,337 | 46.341935 | 148 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/layers/variational_layers/conv_variational.py | # Copyright (C) 2021 Intel Labs
#
# BSD-3-Clause License
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# Convolutional Layers with reparameterization estimator to perform variational
# inference in Bayesian neural networks. Reparameterization layers
# enables Monte Carlo approximation of the distribution over 'kernel' and 'bias'.
#
# Kullback-Leibler divergence between the surrogate posterior and prior is computed
# and returned along with the tensors of outputs after convolution operation, which is
# required to compute Evidence Lower Bound (ELBO).
#
# @authors: Ranganath Krishnan
#
# ======================================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from ..base_variational_layer import BaseVariationalLayer_
import math
from lrp.functional.conv_cifar import conv2d_cifar # <------------------------------
from lrp.conv import Conv2d # <------------------------------
__all__ = [
'Conv1dReparameterization',
'Conv2dReparameterization',
'Conv3dReparameterization',
'ConvTranspose1dReparameterization',
'ConvTranspose2dReparameterization',
'ConvTranspose3dReparameterization',
]
class Conv1dReparameterization(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements Conv1d layer with reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super(Conv1dReparameterization, self).__init__()
if in_channels % groups != 0:
raise ValueError('invalid in_channels size')
if out_channels % groups != 0:
raise ValueError('invalid in_channels size')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init, # mean of weight
# variance of weight --> sigma = log (1 + exp(rho))
self.posterior_rho_init = posterior_rho_init,
self.bias = bias
self.mu_kernel = Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size))
self.rho_kernel = Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(out_channels, in_channels // groups, kernel_size))
self.register_buffer(
'prior_weight_mu',
torch.Tensor(out_channels, in_channels // groups, kernel_size))
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(out_channels, in_channels // groups, kernel_size))
if self.bias:
self.mu_bias = Parameter(torch.Tensor(out_channels))
self.rho_bias = Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels))
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels))
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels))
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None)
self.register_buffer('prior_bias_mu', None)
self.register_buffer('prior_bias_sigma', None)
self.init_parameters()
def init_parameters(self):
self.prior_weight_mu.data.fill_(self.prior_mean)
self.prior_weight_sigma.fill_(self.prior_variance)
self.mu_kernel.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init[0], std=0.1)
if self.bias:
self.prior_bias_mu.data.fill_(self.prior_mean)
self.prior_bias_sigma.fill_(self.prior_variance)
self.mu_bias.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init[0],
std=0.1)
def forward(self, input):
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
weight = self.mu_kernel + (sigma_weight * eps_kernel)
kl_weight = self.kl_div(self.mu_kernel, sigma_weight,
self.prior_weight_mu, self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = self.mu_bias + (sigma_bias * eps_bias)
kl_bias = self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
out = F.conv1d(input, weight, bias, self.stride, self.padding,
self.dilation, self.groups)
if self.bias:
kl = kl_weight + kl_bias
else:
kl = kl_weight
return out, kl
class Conv2dReparameterization(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements Conv2d layer with reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super(Conv2dReparameterization, self).__init__()
if in_channels % groups != 0:
raise ValueError('invalid in_channels size')
if out_channels % groups != 0:
raise ValueError('invalid in_channels size')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init, # mean of weight
# variance of weight --> sigma = log (1 + exp(rho))
self.posterior_rho_init = posterior_rho_init,
self.bias = bias
self.mu_kernel = Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size))
self.rho_kernel = Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size))
self.register_buffer(
'prior_weight_mu',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size))
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size))
if self.bias:
self.mu_bias = Parameter(torch.Tensor(out_channels))
self.rho_bias = Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels))
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels))
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels))
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None)
self.register_buffer('prior_bias_mu', None)
self.register_buffer('prior_bias_sigma', None)
self.init_parameters()
def init_parameters(self):
self.prior_weight_mu.fill_(self.prior_mean)
self.prior_weight_sigma.fill_(self.prior_variance)
std=0.1
self.mu_kernel.data.normal_(mean=self.posterior_mu_init[0], std=std)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init[0], std=std)
if self.bias:
self.prior_bias_mu.fill_(self.prior_mean)
self.prior_bias_sigma.fill_(self.prior_variance)
self.mu_bias.data.normal_(mean=self.posterior_mu_init[0], std=std)
self.rho_bias.data.normal_(mean=self.posterior_rho_init[0], std=std)
def forward(self, input, explain=False, rule="epsilon"): # <------------------------------
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
weight = self.mu_kernel + (sigma_weight * eps_kernel)
kl_weight = self.kl_div(self.mu_kernel, sigma_weight,
self.prior_weight_mu, self.prior_weight_sigma)
weight = weight.to(input.device) # <------------------------------
bias = None
# print(self.posterior_mu_init[0], "\t", self.posterior_rho_init[0])
# print(self.mu_kernel.min().item(), self.mu_kernel.max().item(), "\t", self.rho_kernel.min().item(), self.rho_kernel.max().item())
# print(self.mu_kernel[0,0,0].cpu().detach(), "\t", self.rho_kernel[0,0,0].cpu().detach())
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = self.mu_bias + (sigma_bias * eps_bias)
kl_bias = self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
bias = bias.to(input.device) # <------------------------------
if explain is True:
out = conv2d_cifar[rule](input, weight, bias, self.stride, self.padding, self.dilation, self.groups) # <------------------------------
# out = Conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
else:
out = F.conv2d(input, weight, bias, self.stride, self.padding, self.dilation, self.groups)
if self.bias:
kl = kl_weight + kl_bias
else:
kl = kl_weight
return out, kl
class Conv3dReparameterization(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
prior_mean,
prior_variance,
posterior_mu_init,
posterior_rho_init,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True):
"""
Implements Conv3d layer with reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super(Conv3dReparameterization, self).__init__()
if in_channels % groups != 0:
raise ValueError('invalid in_channels size')
if out_channels % groups != 0:
raise ValueError('invalid in_channels size')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init, # mean of weight
# variance of weight --> sigma = log (1 + exp(rho))
self.posterior_rho_init = posterior_rho_init,
self.bias = bias
self.mu_kernel = Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size, kernel_size))
self.rho_kernel = Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size, kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size, kernel_size))
self.register_buffer(
'prior_weight_mu',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size, kernel_size))
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size, kernel_size))
if self.bias:
self.mu_bias = Parameter(torch.Tensor(out_channels))
self.rho_bias = Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels))
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels))
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels))
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None)
self.register_buffer('prior_bias_mu', None)
self.register_buffer('prior_bias_sigma', None)
self.init_parameters()
def init_parameters(self):
self.prior_weight_mu.fill_(self.prior_mean)
self.prior_weight_sigma.fill_(self.prior_variance)
self.mu_kernel.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init[0], std=0.1)
if self.bias:
self.prior_bias_mu.fill_(self.prior_mean)
self.prior_bias_sigma.fill_(self.prior_variance)
self.mu_bias.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init[0],
std=0.1)
def forward(self, input):
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
weight = self.mu_kernel + (sigma_weight * eps_kernel)
kl_weight = self.kl_div(self.mu_kernel, sigma_weight,
self.prior_weight_mu, self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = self.mu_bias + (sigma_bias * eps_bias)
kl_bias = self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
out = F.conv3d(input, weight, bias, self.stride, self.padding,
self.dilation, self.groups)
if self.bias:
kl = kl_weight + kl_bias
else:
kl = kl_weight
return out, kl
class ConvTranspose1dReparameterization(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
output_padding=0,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements ConvTranspose1d layer with reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super(ConvTranspose1dReparameterization, self).__init__()
if in_channels % groups != 0:
raise ValueError('invalid in_channels size')
if out_channels % groups != 0:
raise ValueError('invalid in_channels size')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.dilation = dilation
self.groups = groups
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init, # mean of weight
# variance of weight --> sigma = log (1 + exp(rho))
self.posterior_rho_init = posterior_rho_init,
self.bias = bias
self.mu_kernel = Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size))
self.rho_kernel = Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(in_channels, out_channels // groups, kernel_size))
self.register_buffer(
'prior_weight_mu',
torch.Tensor(in_channels, out_channels // groups, kernel_size))
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(in_channels, out_channels // groups, kernel_size))
if self.bias:
self.mu_bias = Parameter(torch.Tensor(out_channels))
self.rho_bias = Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels))
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels))
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels))
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None)
self.register_buffer('prior_bias_mu', None)
self.register_buffer('prior_bias_sigma', None)
self.init_parameters()
def init_parameters(self):
self.prior_weight_mu.fill_(self.prior_mean)
self.prior_weight_sigma.fill_(self.prior_variance)
self.mu_kernel.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init[0], std=0.1)
if self.bias:
self.prior_bias_mu.fill_(self.prior_mean)
self.prior_bias_sigma.fill_(self.prior_variance)
self.mu_bias.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init[0],
std=0.1)
def forward(self, input):
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
weight = self.mu_kernel + (sigma_weight * eps_kernel)
kl_weight = self.kl_div(self.mu_kernel, sigma_weight,
self.prior_weight_mu, self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = self.mu_bias + (sigma_bias * eps_bias)
kl_bias = self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
out = F.conv_transpose1d(input, weight, bias, self.stride,
self.padding, self.output_padding,
self.dilation, self.groups)
if self.bias:
kl = kl_weight + kl_bias
else:
kl = kl_weight
return out, kl
class ConvTranspose2dReparameterization(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
output_padding=0,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements ConvTranspose2d layer with reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super(ConvTranspose2dReparameterization, self).__init__()
if in_channels % groups != 0:
raise ValueError('invalid in_channels size')
if out_channels % groups != 0:
raise ValueError('invalid in_channels size')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.dilation = dilation
self.groups = groups
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init, # mean of weight
# variance of weight --> sigma = log (1 + exp(rho))
self.posterior_rho_init = posterior_rho_init,
self.bias = bias
self.mu_kernel = Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size))
self.rho_kernel = Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size))
self.register_buffer(
'prior_weight_mu',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size))
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size))
if self.bias:
self.mu_bias = Parameter(torch.Tensor(out_channels))
self.rho_bias = Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels))
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels))
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels))
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None)
self.register_buffer('prior_bias_mu', None)
self.register_buffer('prior_bias_sigma', None)
self.init_parameters()
def init_parameters(self):
self.prior_weight_mu.fill_(self.prior_mean)
self.prior_weight_sigma.fill_(self.prior_variance)
self.mu_kernel.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init[0], std=0.1)
if self.bias:
self.prior_bias_mu.fill_(self.prior_mean)
self.prior_bias_sigma.fill_(self.prior_variance)
self.mu_bias.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init[0],
std=0.1)
def forward(self, input):
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
weight = self.mu_kernel + (sigma_weight * eps_kernel)
kl_weight = self.kl_div(self.mu_kernel, sigma_weight,
self.prior_weight_mu, self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = self.mu_bias + (sigma_bias * eps_bias)
kl_bias = self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
out = F.conv_transpose2d(input, weight, bias, self.stride,
self.padding, self.output_padding,
self.dilation, self.groups)
if self.bias:
kl = kl_weight + kl_bias
else:
kl = kl_weight
return out, kl
class ConvTranspose3dReparameterization(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
output_padding=0,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements ConvTranspose3d layer with reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super(ConvTranspose3dReparameterization, self).__init__()
if in_channels % groups != 0:
raise ValueError('invalid in_channels size')
if out_channels % groups != 0:
raise ValueError('invalid in_channels size')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.output_padding = output_padding
self.dilation = dilation
self.groups = groups
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init, # mean of weight
# variance of weight --> sigma = log (1 + exp(rho))
self.posterior_rho_init = posterior_rho_init,
self.bias = bias
self.mu_kernel = Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size, kernel_size))
self.rho_kernel = Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size, kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size, kernel_size))
self.register_buffer(
'prior_weight_mu',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size, kernel_size))
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size, kernel_size))
if self.bias:
self.mu_bias = Parameter(torch.Tensor(out_channels))
self.rho_bias = Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels))
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels))
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels))
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None)
self.register_buffer('prior_bias_mu', None)
self.register_buffer('prior_bias_sigma', None)
self.init_parameters()
def init_parameters(self):
self.prior_weight_mu.fill_(self.prior_mean)
self.prior_weight_sigma.fill_(self.prior_variance)
self.mu_kernel.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init[0], std=0.1)
if self.bias:
self.prior_bias_mu.fill_(self.prior_mean)
self.prior_bias_sigma.fill_(self.prior_variance)
self.mu_bias.data.normal_(mean=self.posterior_mu_init[0], std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init[0],
std=0.1)
def forward(self, input):
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
weight = self.mu_kernel + (sigma_weight * eps_kernel)
kl_weight = self.kl_div(self.mu_kernel, sigma_weight,
self.prior_weight_mu, self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = self.mu_bias + (sigma_bias * eps_bias)
kl_bias = self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
out = F.conv_transpose3d(input, weight, bias, self.stride,
self.padding, self.output_padding,
self.dilation, self.groups)
if self.bias:
kl = kl_weight + kl_bias
else:
kl = kl_weight
return out, kl
| 38,039 | 44.231867 | 148 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/layers/variational_layers/rnn_variational.py | # Copyright (C) 2021 Intel Labs
#
# BSD-3-Clause License
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# LSTM Reparameterization Layer with reparameterization estimator to perform
# variational inference in Bayesian neural networks. Reparameterization layers
# enables Monte Carlo approximation of the distribution over 'kernel' and 'bias'.
#
# Kullback-Leibler divergence between the surrogate posterior and prior is computed
# and returned along with the tensors of outputs after linear opertaion, which is
# required to compute Evidence Lower Bound (ELBO).
#
# @authors: Piero Esposito
#
# ======================================================================================
from .linear_variational import LinearReparameterization
from ..base_variational_layer import BaseVariationalLayer_
import torch
class LSTMReparameterization(BaseVariationalLayer_):
def __init__(self,
in_features,
out_features,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements LSTM layer with reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init std for the trainable mu parameter, sampled from N(0, posterior_mu_init),
posterior_rho_init: float -> init std for the trainable rho parameter, sampled from N(0, posterior_rho_init),
in_features: int -> size of each input sample,
out_features: int -> size of each output sample,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init, # mean of weight
# variance of weight --> sigma = log (1 + exp(rho))
self.posterior_rho_init = posterior_rho_init,
self.bias = bias
self.ih = LinearReparameterization(
prior_mean=prior_mean,
prior_variance=prior_variance,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
in_features=in_features,
out_features=out_features * 4,
bias=bias)
self.hh = LinearReparameterization(
prior_mean=prior_mean,
prior_variance=prior_variance,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
in_features=out_features,
out_features=out_features * 4,
bias=bias)
def forward(self, X, hidden_states=None):
batch_size, seq_size, _ = X.size()
hidden_seq = []
c_ts = []
if hidden_states is None:
h_t, c_t = (torch.zeros(batch_size,
self.out_features).to(X.device),
torch.zeros(batch_size,
self.out_features).to(X.device))
else:
h_t, c_t = hidden_states
HS = self.out_features
kl = 0
for t in range(seq_size):
x_t = X[:, t, :]
ff_i, kl_i = self.ih(x_t)
ff_h, kl_h = self.hh(h_t)
gates = ff_i + ff_h
kl += kl_i + kl_h
i_t, f_t, g_t, o_t = (
torch.sigmoid(gates[:, :HS]), # input
torch.sigmoid(gates[:, HS:HS * 2]), # forget
torch.tanh(gates[:, HS * 2:HS * 3]),
torch.sigmoid(gates[:, HS * 3:]), # output
)
c_t = f_t * c_t + i_t * g_t
h_t = o_t * torch.tanh(c_t)
hidden_seq.append(h_t.unsqueeze(0))
c_ts.append(c_t.unsqueeze(0))
hidden_seq = torch.cat(hidden_seq, dim=0)
c_ts = torch.cat(c_ts, dim=0)
# reshape from shape (sequence, batch, feature) to (batch, sequence, feature)
hidden_seq = hidden_seq.transpose(0, 1).contiguous()
c_ts = c_ts.transpose(0, 1).contiguous()
return hidden_seq, (hidden_seq, c_ts), kl
| 5,973 | 40.486111 | 121 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/layers/flipout_layers/linear_flipout.py | # Copyright (C) 2021 Intel Labs
#
# BSD-3-Clause License
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# Linear Flipout Layers with flipout weight estimator to perform
# variational inference in Bayesian neural networks. Variational layers
# enables Monte Carlo approximation of the distribution over the weights
#
# @authors: Ranganath Krishnan, Piero Esposito
#
# ======================================================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.normal import Normal
from torch.distributions.uniform import Uniform
from ..base_variational_layer import BaseVariationalLayer_
__all__ = ["LinearFlipout"]
class LinearFlipout(BaseVariationalLayer_):
def __init__(self,
in_features,
out_features,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements Linear layer with Flipout reparameterization trick.
Ref: https://arxiv.org/abs/1803.04386
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_features: int -> size of each input sample,
out_features: int -> size of each output sample,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init
self.posterior_rho_init = posterior_rho_init
self.mu_weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.rho_weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.register_buffer('eps_weight',
torch.Tensor(out_features, in_features))
self.register_buffer('prior_weight_mu',
torch.Tensor(out_features, in_features))
self.register_buffer('prior_weight_sigma',
torch.Tensor(out_features, in_features))
if bias:
self.mu_bias = nn.Parameter(torch.Tensor(out_features))
self.rho_bias = nn.Parameter(torch.Tensor(out_features))
self.register_buffer('prior_bias_mu', torch.Tensor(out_features))
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_features))
self.register_buffer('eps_bias', torch.Tensor(out_features))
else:
self.register_buffer('prior_bias_mu', None)
self.register_buffer('prior_bias_sigma', None)
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None)
self.init_parameters()
def init_parameters(self):
# init prior mu
self.prior_weight_mu.fill_(self.prior_mean)
self.prior_weight_sigma.fill_(self.prior_variance)
# init weight and base perturbation weights
self.mu_weight.data.normal_(mean=self.posterior_mu_init, std=0.1)
self.rho_weight.data.normal_(mean=self.posterior_rho_init, std=0.1)
if self.mu_bias is not None:
self.prior_bias_mu.fill_(self.prior_mean)
self.prior_bias_sigma.fill_(self.prior_variance)
self.mu_bias.data.normal_(mean=self.posterior_mu_init, std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init, std=0.1)
def forward(self, x):
# sampling delta_W
sigma_weight = torch.log1p(torch.exp(self.rho_weight))
delta_weight = (sigma_weight * self.eps_weight.data.normal_())
# get kl divergence
kl = self.kl_div(self.mu_weight, sigma_weight, self.prior_weight_mu,
self.prior_weight_sigma)
bias = None
if self.mu_bias is not None:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
bias = (sigma_bias * self.eps_bias.data.normal_())
kl = kl + self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
# linear outputs
outputs = F.linear(x, self.mu_weight, self.mu_bias)
sign_input = x.clone().uniform_(-1, 1).sign()
sign_output = outputs.clone().uniform_(-1, 1).sign()
perturbed_outputs = F.linear(x * sign_input, delta_weight,
bias) * sign_output
# returning outputs + perturbations
return outputs + perturbed_outputs, kl
| 6,701 | 43.979866 | 148 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/layers/flipout_layers/rnn_flipout.py | # Copyright (C) 2021 Intel Labs
#
# BSD-3-Clause License
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# LSTM Flipout Layer with reparameterization estimator to perform
# variational inference in Bayesian neural networks. Reparameterization layers
# enables Monte Carlo approximation of the distribution over 'kernel' and 'bias'.
#
# Kullback-Leibler divergence between the surrogate posterior and prior is computed
# and returned along with the tensors of outputs after linear opertaion, which is
# required to compute Evidence Lower Bound (ELBO).
#
# @authors: Piero Esposito
#
# ======================================================================================
from .linear_flipout import LinearFlipout
from ..base_variational_layer import BaseVariationalLayer_
import torch
class LSTMFlipout(BaseVariationalLayer_):
def __init__(self,
in_features,
out_features,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements LSTM layer with reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_features: int -> size of each input sample,
out_features: int -> size of each output sample,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init, # mean of weight
self.posterior_rho_init = posterior_rho_init, # variance of weight --> sigma = log (1 + exp(rho))
self.bias = bias
self.ih = LinearFlipout(prior_mean=prior_mean,
prior_variance=prior_variance,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
in_features=in_features,
out_features=out_features * 4,
bias=bias)
self.hh = LinearFlipout(prior_mean=prior_mean,
prior_variance=prior_variance,
posterior_mu_init=posterior_mu_init,
posterior_rho_init=posterior_rho_init,
in_features=out_features,
out_features=out_features * 4,
bias=bias)
def forward(self, X, hidden_states=None):
batch_size, seq_size, _ = X.size()
hidden_seq = []
c_ts = []
if hidden_states is None:
h_t, c_t = (torch.zeros(batch_size,
self.out_features).to(X.device),
torch.zeros(batch_size,
self.out_features).to(X.device))
else:
h_t, c_t = hidden_states
HS = self.out_features
kl = 0
for t in range(seq_size):
x_t = X[:, t, :]
ff_i, kl_i = self.ih(x_t)
ff_h, kl_h = self.hh(h_t)
gates = ff_i + ff_h
kl += kl_i + kl_h
i_t, f_t, g_t, o_t = (
torch.sigmoid(gates[:, :HS]), # input
torch.sigmoid(gates[:, HS:HS * 2]), # forget
torch.tanh(gates[:, HS * 2:HS * 3]),
torch.sigmoid(gates[:, HS * 3:]), # output
)
c_t = f_t * c_t + i_t * g_t
h_t = o_t * torch.tanh(c_t)
hidden_seq.append(h_t.unsqueeze(0))
c_ts.append(c_t.unsqueeze(0))
hidden_seq = torch.cat(hidden_seq, dim=0)
c_ts = torch.cat(c_ts, dim=0)
# reshape from shape (sequence, batch, feature) to (batch, sequence, feature)
hidden_seq = hidden_seq.transpose(0, 1).contiguous()
c_ts = c_ts.transpose(0, 1).contiguous()
return hidden_seq, (hidden_seq, c_ts), kl
| 6,145 | 42.588652 | 148 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/layers/flipout_layers/conv_flipout.py | # Copyright (C) 2021 Intel Labs
#
# BSD-3-Clause License
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# Convolutional layers with flipout Monte Carlo weight estimator to perform
# variational inference in Bayesian neural networks. Variational layers
# enables Monte Carlo approximation of the distribution over the kernel
#
# @authors: Ranganath Krishnan, Piero Esposito
#
# ======================================================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..base_variational_layer import BaseVariationalLayer_
from torch.distributions.normal import Normal
from torch.distributions.uniform import Uniform
__all__ = [
'Conv1dFlipout',
'Conv2dFlipout',
'Conv3dFlipout',
'ConvTranspose1dFlipout',
'ConvTranspose2dFlipout',
'ConvTranspose3dFlipout',
]
class Conv1dFlipout(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements Conv1d layer with Flipout reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init
self.posterior_rho_init = posterior_rho_init
self.bias = bias
self.mu_kernel = nn.Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size))
self.rho_kernel = nn.Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(out_channels, in_channels // groups, kernel_size))
self.register_buffer(
'prior_weight_mu',
torch.Tensor(out_channels, in_channels // groups, kernel_size))
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(out_channels, in_channels // groups, kernel_size))
if self.bias:
self.mu_bias = nn.Parameter(torch.Tensor(out_channels))
self.rho_bias = nn.Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels))
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels))
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels))
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None)
self.register_buffer('prior_bias_mu', None)
self.register_buffer('prior_bias_sigma', None)
self.init_parameters()
def init_parameters(self):
# prior values
self.prior_weight_mu.data.fill_(self.prior_mean)
self.prior_weight_sigma.data.fill_(self.prior_variance)
# init our weights for the deterministic and perturbated weights
self.mu_kernel.data.normal_(mean=self.posterior_mu_init, std=.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init, std=.1)
if self.bias:
self.mu_bias.data.normal_(mean=self.posterior_mu_init, std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init, std=0.1)
self.prior_bias_mu.data.fill_(self.prior_mean)
self.prior_bias_sigma.data.fill_(self.prior_variance)
def forward(self, x):
# linear outputs
outputs = F.conv1d(x,
weight=self.mu_kernel,
bias=self.mu_bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups)
# sampling perturbation signs
sign_input = x.clone().uniform_(-1, 1).sign()
sign_output = outputs.clone().uniform_(-1, 1).sign()
# gettin perturbation weights
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
delta_kernel = (sigma_weight * eps_kernel)
kl = self.kl_div(self.mu_kernel, sigma_weight, self.prior_weight_mu,
self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = (sigma_bias * eps_bias)
kl = kl + self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
# perturbed feedforward
perturbed_outputs = F.conv1d(x * sign_input,
bias=bias,
weight=delta_kernel,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups) * sign_output
# returning outputs + perturbations
return outputs + perturbed_outputs, kl
class Conv2dFlipout(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements Conv2d layer with Flipout reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init
self.posterior_rho_init = posterior_rho_init
self.bias = bias
self.mu_kernel = nn.Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size))
self.rho_kernel = nn.Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size))
self.register_buffer(
'prior_weight_mu',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size))
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size))
if self.bias:
self.mu_bias = nn.Parameter(torch.Tensor(out_channels))
self.rho_bias = nn.Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels))
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels))
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels))
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None)
self.register_buffer('prior_bias_mu', None)
self.register_buffer('prior_bias_sigma', None)
self.init_parameters()
def init_parameters(self):
# prior values
self.prior_weight_mu.data.fill_(self.prior_mean)
self.prior_weight_sigma.data.fill_(self.prior_variance)
# init our weights for the deterministic and perturbated weights
self.mu_kernel.data.normal_(mean=self.posterior_mu_init, std=.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init, std=.1)
if self.bias:
self.mu_bias.data.normal_(mean=self.posterior_mu_init, std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init, std=0.1)
self.prior_bias_mu.data.fill_(self.prior_mean)
self.prior_bias_sigma.data.fill_(self.prior_variance)
def forward(self, x):
# linear outputs
outputs = F.conv2d(x,
weight=self.mu_kernel,
bias=self.mu_bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups)
# sampling perturbation signs
sign_input = x.clone().uniform_(-1, 1).sign()
sign_output = outputs.clone().uniform_(-1, 1).sign()
# gettin perturbation weights
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
delta_kernel = (sigma_weight * eps_kernel)
kl = self.kl_div(self.mu_kernel, sigma_weight, self.prior_weight_mu,
self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = (sigma_bias * eps_bias)
kl = kl + self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
# perturbed feedforward
perturbed_outputs = F.conv2d(x * sign_input,
weight=delta_kernel,
bias=bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups) * sign_output
# returning outputs + perturbations
return outputs + perturbed_outputs, kl
class Conv3dFlipout(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements Conv3d layer with Flipout reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init
self.posterior_rho_init = posterior_rho_init
self.mu_kernel = nn.Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size, kernel_size))
self.rho_kernel = nn.Parameter(
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size, kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size, kernel_size))
self.register_buffer(
'prior_weight_mu',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size, kernel_size))
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size, kernel_size))
if self.bias:
self.mu_bias = nn.Parameter(torch.Tensor(out_channels))
self.rho_bias = nn.Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels))
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels))
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels))
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None)
self.register_buffer('prior_bias_mu', None)
self.register_buffer('prior_bias_sigma', None)
self.init_parameters()
def init_parameters(self):
# prior values
self.prior_weight_mu.data.fill_(self.prior_mean)
self.prior_weight_sigma.data.fill_(self.prior_variance)
# init our weights for the deterministic and perturbated weights
self.mu_kernel.data.normal_(mean=self.posterior_mu_init, std=.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init, std=.1)
if self.bias:
self.mu_bias.data.normal_(mean=self.posterior_mu_init, std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init, std=0.1)
self.prior_bias_mu.data.fill_(self.prior_mean)
self.prior_bias_sigma.data.fill_(self.prior_variance)
def forward(self, x):
# linear outputs
outputs = F.conv3d(x,
weight=self.mu_kernel,
bias=self.mu_bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups)
# sampling perturbation signs
sign_input = x.clone().uniform_(-1, 1).sign()
sign_output = outputs.clone().uniform_(-1, 1).sign()
# gettin perturbation weights
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
delta_kernel = (sigma_weight * eps_kernel)
kl = self.kl_div(self.mu_kernel, sigma_weight, self.prior_weight_mu,
self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = (sigma_bias * eps_bias)
kl = kl + self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
# perturbed feedforward
perturbed_outputs = F.conv3d(x * sign_input,
weight=delta_kernel,
bias=bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups) * sign_output
# returning outputs + perturbations
return outputs + perturbed_outputs, kl
class ConvTranspose1dFlipout(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements ConvTranspose1d layer with Flipout reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init
self.posterior_rho_init = posterior_rho_init
self.mu_kernel = nn.Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size))
self.rho_kernel = nn.Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(in_channels, out_channels // groups, kernel_size))
self.register_buffer(
'prior_weight_mu',
torch.Tensor(in_channels, out_channels // groups, kernel_size))
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(out_channels, in_channels // groups, kernel_size))
if self.bias:
self.mu_bias = nn.Parameter(torch.Tensor(out_channels))
self.rho_bias = nn.Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels))
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels))
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels))
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None)
self.register_buffer('prior_bias_mu', None)
self.register_buffer('prior_bias_sigma', None)
self.init_parameters()
def init_parameters(self):
# prior values
self.prior_weight_mu.data.fill_(self.prior_mean)
self.prior_weight_sigma.data.fill_
(self.prior_variance)
# init our weights for the deterministic and perturbated weights
self.mu_kernel.data.normal_(mean=self.posterior_mu_init, std=.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init, std=.1)
if self.bias:
self.mu_bias.data.normal_(mean=self.posterior_mu_init, std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init, std=0.1)
self.prior_bias_mu.data.fill_(self.prior_mean)
self.prior_bias_sigma.data.fill_(self.prior_variance)
def forward(self, x):
# linear outputs
outputs = F.conv_transpose1d(x,
weight=self.mu_kernel,
bias=self.mu_bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups)
# sampling perturbation signs
sign_input = x.clone().uniform_(-1, 1).sign()
sign_output = outputs.clone().uniform_(-1, 1).sign()
# gettin perturbation weights
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
delta_kernel = (sigma_weight * eps_kernel)
kl = self.kl_div(self.mu_kernel, sigma_weight, self.prior_weight_mu,
self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = (sigma_bias * eps_bias)
kl = kl + self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
# perturbed feedforward
perturbed_outputs = F.conv_transpose1d(
x * sign_input,
weight=delta_kernel,
bias=bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups) * sign_output
# returning outputs + perturbations
return outputs + perturbed_outputs, kl
class ConvTranspose2dFlipout(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements ConvTranspose2d layer with Flipout reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init
self.posterior_rho_init = posterior_rho_init
self.mu_kernel = nn.Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size))
self.rho_kernel = nn.Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size))
self.register_buffer(
'prior_weight_mu',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size))
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(out_channels, in_channels // groups, kernel_size,
kernel_size))
if self.bias:
self.mu_bias = nn.Parameter(torch.Tensor(out_channels))
self.rho_bias = nn.Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels))
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels))
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels))
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None)
self.register_buffer('prior_bias_mu', None)
self.register_buffer('prior_bias_sigma', None)
self.init_parameters()
def init_parameters(self):
# prior values
self.prior_weight_mu.data.fill_(self.prior_mean)
self.prior_weight_sigma.data.fill_(self.prior_variance)
# init our weights for the deterministic and perturbated weights
self.mu_kernel.data.normal_(mean=self.posterior_mu_init, std=.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init, std=.1)
if self.bias:
self.mu_bias.data.normal_(mean=self.posterior_mu_init, std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init, std=0.1)
self.prior_bias_mu.data.fill_(self.prior_mean)
self.prior_bias_sigma.data.fill_(self.prior_variance)
def forward(self, x):
# linear outputs
outputs = F.conv_transpose2d(x,
bias=self.mu_bias,
weight=self.mu_kernel,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups)
# sampling perturbation signs
sign_input = x.clone().uniform_(-1, 1).sign()
sign_output = outputs.clone().uniform_(-1, 1).sign()
# gettin perturbation weights
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
delta_kernel = (sigma_weight * eps_kernel)
kl = self.kl_div(self.mu_kernel, sigma_weight, self.prior_weight_mu,
self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = (sigma_bias * eps_bias)
kl = kl + self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
# perturbed feedforward
perturbed_outputs = F.conv_transpose2d(
x * sign_input,
bias=bias,
weight=delta_kernel,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups) * sign_output
# returning outputs + perturbations
return outputs + perturbed_outputs, kl
class ConvTranspose3dFlipout(BaseVariationalLayer_):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
prior_mean=0,
prior_variance=1,
posterior_mu_init=0,
posterior_rho_init=-3.0,
bias=True):
"""
Implements ConvTranspose3d layer with Flipout reparameterization trick.
Inherits from bayesian_torch.layers.BaseVariationalLayer_
Parameters:
in_channels: int -> number of channels in the input image,
out_channels: int -> number of channels produced by the convolution,
kernel_size: int -> size of the convolving kernel,
stride: int -> stride of the convolution. Default: 1,
padding: int -> zero-padding added to both sides of the input. Default: 0,
dilation: int -> spacing between kernel elements. Default: 1,
groups: int -> number of blocked connections from input channels to output channels,
prior_mean: float -> mean of the prior arbitrary distribution to be used on the complexity cost,
prior_variance: float -> variance of the prior arbitrary distribution to be used on the complexity cost,
posterior_mu_init: float -> init trainable mu parameter representing mean of the approximate posterior,
posterior_rho_init: float -> init trainable rho parameter representing the sigma of the approximate posterior through softplus function,
bias: bool -> if set to False, the layer will not learn an additive bias. Default: True,
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.prior_mean = prior_mean
self.prior_variance = prior_variance
self.posterior_mu_init = posterior_mu_init
self.posterior_rho_init = posterior_rho_init
self.bias = bias
self.mu_kernel = nn.Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size, kernel_size))
self.rho_kernel = nn.Parameter(
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size, kernel_size))
self.register_buffer(
'eps_kernel',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size, kernel_size))
self.register_buffer(
'prior_weight_mu',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size, kernel_size))
self.register_buffer(
'prior_weight_sigma',
torch.Tensor(in_channels, out_channels // groups, kernel_size,
kernel_size, kernel_size))
if self.bias:
self.mu_bias = nn.Parameter(torch.Tensor(out_channels))
self.rho_bias = nn.Parameter(torch.Tensor(out_channels))
self.register_buffer('eps_bias', torch.Tensor(out_channels))
self.register_buffer('prior_bias_mu', torch.Tensor(out_channels))
self.register_buffer('prior_bias_sigma',
torch.Tensor(out_channels))
else:
self.register_parameter('mu_bias', None)
self.register_parameter('rho_bias', None)
self.register_buffer('eps_bias', None)
self.register_buffer('prior_bias_mu', None)
self.register_buffer('prior_bias_sigma', None)
self.init_parameters()
def init_parameters(self):
# prior values
self.prior_weight_mu.data.fill_(self.prior_mean)
self.prior_weight_sigma.data.fill_(self.prior_variance)
# init our weights for the deterministic and perturbated weights
self.mu_kernel.data.normal_(mean=self.posterior_mu_init, std=.1)
self.rho_kernel.data.normal_(mean=self.posterior_rho_init, std=.1)
if self.bias:
self.mu_bias.data.normal_(mean=self.posterior_mu_init, std=0.1)
self.rho_bias.data.normal_(mean=self.posterior_rho_init, std=0.1)
self.prior_bias_mu.data.fill_(self.prior_mean)
self.prior_bias_sigma.data.fill_(self.prior_variance)
def forward(self, x):
# linear outputs
outputs = F.conv_transpose3d(x,
weight=self.mu_kernel,
bias=self.mu_bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups)
# sampling perturbation signs
sign_input = x.clone().uniform_(-1, 1).sign()
sign_output = outputs.clone().uniform_(-1, 1).sign()
# gettin perturbation weights
sigma_weight = torch.log1p(torch.exp(self.rho_kernel))
eps_kernel = self.eps_kernel.data.normal_()
delta_kernel = (sigma_weight * eps_kernel)
kl = self.kl_div(self.mu_kernel, sigma_weight, self.prior_weight_mu,
self.prior_weight_sigma)
bias = None
if self.bias:
sigma_bias = torch.log1p(torch.exp(self.rho_bias))
eps_bias = self.eps_bias.data.normal_()
bias = (sigma_bias * eps_bias)
kl = kl + self.kl_div(self.mu_bias, sigma_bias, self.prior_bias_mu,
self.prior_bias_sigma)
# perturbed feedforward
perturbed_outputs = F.conv_transpose3d(
x * sign_input,
weight=delta_kernel,
bias=bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups) * sign_output
# returning outputs + perturbations
return outputs + perturbed_outputs, kl
| 39,426 | 42.042576 | 148 | py |
BayesianRelevance | BayesianRelevance-master/src/bayesian_torch/bayesian_torch/utils/util.py | # Copyright (C) 2021 Intel Labs
#
# BSD-3-Clause License
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Utily functions for variational inference in Bayesian deep neural networks
#
# @authors: Ranganath Krishnan
#
# ===============================================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn.functional as F
def entropy(prob):
return -1 * np.sum(prob * np.log(prob + 1e-15), axis=-1)
def predictive_entropy(mc_preds):
"""
Compute the entropy of the mean of the predictive distribution
obtained from Monte Carlo sampling during prediction phase.
"""
return entropy(np.mean(mc_preds, axis=0))
def mutual_information(mc_preds):
"""
Compute the difference between the entropy of the mean of the
predictive distribution and the mean of the entropy.
"""
MI = entropy(np.mean(mc_preds, axis=0)) - np.mean(entropy(mc_preds),
axis=0)
return MI
def ELBO_loss(out, y, kl_loss, num_data_samples, batch_size):
nll_loss = F.cross_entropy(out, y)
return nll_loss + ((1.0 / num_data_samples) * kl_loss)
def get_rho(sigma, delta):
"""
sigma is represented by softplus function 'sigma = log(1 + exp(rho))' to make sure it
remains always positive and non-transformed 'rho' gets updated during backprop.
"""
rho = torch.log(torch.expm1(delta * torch.abs(sigma)) + 1e-20)
return rho
def MOPED(model, det_model, det_checkpoint, delta):
"""
Set the priors and initialize surrogate posteriors of Bayesian NN with Empirical Bayes
MOPED (Model Priors with Empirical Bayes using Deterministic DNN)
Example implementation for Bayesian model with variational layers.
Reference:
[1] Ranganath Krishnan, Mahesh Subedar, Omesh Tickoo.
Specifying Weight Priors in Bayesian Deep Neural Networks with Empirical Bayes. AAAI 2020.
"""
det_model.load_state_dict(torch.load(det_checkpoint))
for (idx, layer), (det_idx,
det_layer) in zip(enumerate(model.modules()),
enumerate(det_model.modules())):
if (str(layer) == 'Conv1dVariational()'
or str(layer) == 'Conv2dVariational()'
or str(layer) == 'Conv3dVariational()'
or str(layer) == 'ConvTranspose1dVariational()'
or str(layer) == 'ConvTranspose2dVariational()'
or str(layer) == 'ConvTranspose3dVariational()'):
#set the priors
layer.prior_weight_mu.data = det_layer.weight
layer.prior_bias_mu.data = det_layer.bias
#initialize surrogate posteriors
layer.mu_kernel.data = det_layer.weight
layer.rho_kernel.data = get_rho(det_layer.weight.data, delta)
layer.mu_bias.data = det_layer.bias
layer.rho_bias.data = get_rho(det_layer.bias.data, delta)
elif (str(layer) == 'LinearVariational()'):
#set the priors
layer.prior_weight_mu.data = det_layer.weight
layer.prior_bias_mu.data = det_layer.bias
#initialize the surrogate posteriors
layer.mu_weight.data = det_layer.weight
layer.rho_weight.data = get_rho(det_layer.weight.data, delta)
layer.mu_bias.data = det_layer.bias
layer.rho_bias.data = get_rho(det_layer.bias.data, delta)
elif str(layer).startswith('Batch'):
#initialize parameters
layer.weight.data = det_layer.weight
layer.bias.data = det_layer.bias
layer.running_mean.data = det_layer.running_mean
layer.running_var.data = det_layer.running_var
layer.num_batches_tracked.data = det_layer.num_batches_tracked
model.state_dict()
return model
| 5,400 | 41.195313 | 98 | py |
ssl-torch | ssl-torch-main/transform.py | import numpy as np
import torch
from scipy import signal
import math
import cv2
import random
class Transform:
def __init__(self):
pass
def add_noise(self, signal, noise_amount):
"""
adding noise
"""
signal = signal.T
noise = (0.4 ** 0.5) * np.random.normal(1, noise_amount, np.shape(signal)[0])
noise = noise[:,None]
noised_signal = signal + noise
noised_signal = noised_signal.T
# print(noised_signal.shape)
return noised_signal
def add_noise_with_SNR(self,signal, noise_amount):
"""
adding noise
created using: https://stackoverflow.com/a/53688043/10700812
"""
signal = signal[0]
target_snr_db = noise_amount # 20
x_watts = signal ** 2 # Calculate signal power and convert to dB
sig_avg_watts = np.mean(x_watts)
sig_avg_db = 10 * np.log10(sig_avg_watts) # Calculate noise then convert to watts
noise_avg_db = sig_avg_db - target_snr_db
noise_avg_watts = 10 ** (noise_avg_db / 10)
mean_noise = 0
noise_volts = np.random.normal(mean_noise, np.sqrt(noise_avg_watts),
len(x_watts)) # Generate an sample of white noise
noised_signal = signal + noise_volts # noise added signal
noised_signal = noised_signal[None,:]
# print(noised_signal.shape)
return noised_signal
def scaled(self,signal, factor_list):
""""
scale the signal
"""
factor = round(np.random.uniform(factor_list[0],factor_list[1]),2)
signal[0] = 1 / (1 + np.exp(-signal[0]))
# print(signal.max())
return signal
def negate(self,signal):
"""
negate the signal
"""
signal[0] = signal[0] * (-1)
return signal
def hor_filp(self,signal):
"""
flipped horizontally
"""
hor_flipped = np.flip(signal,axis=1)
return hor_flipped
def permute(self,signal, pieces):
"""
signal: numpy array (batch x window)
pieces: number of segments along time
"""
signal = signal.T
pieces = int(np.ceil(np.shape(signal)[0] / (np.shape(signal)[0] // pieces)).tolist()) #向上取整
piece_length = int(np.shape(signal)[0] // pieces)
sequence = list(range(0, pieces))
np.random.shuffle(sequence)
permuted_signal = np.reshape(signal[:(np.shape(signal)[0] // pieces * pieces)],
(pieces, piece_length)).tolist()
tail = signal[(np.shape(signal)[0] // pieces * pieces):]
permuted_signal = np.asarray(permuted_signal)[sequence]
permuted_signal = np.concatenate(permuted_signal, axis=0)
permuted_signal = np.concatenate((permuted_signal,tail[:,0]), axis=0)
permuted_signal = permuted_signal[:,None]
permuted_signal = permuted_signal.T
return permuted_signal
def cutout_resize(self,signal,pieces):
"""
signal: numpy array (batch x window)
pieces: number of segments along time
cutout 1 piece
"""
signal = signal.T
pieces = int(np.ceil(np.shape(signal)[0] / (np.shape(signal)[0] // pieces)).tolist()) # 向上取整
piece_length = int(np.shape(signal)[0] // pieces)
import random
sequence = []
cutout = random.randint(0, pieces)
# print(cutout)
# sequence1 = list(range(0, cutout))
# sequence2 = list(range(int(cutout + 1), pieces))
# sequence = np.hstack((sequence1, sequence2))
for i in range(pieces):
if i == cutout:
pass
else:
sequence.append(i)
# print(sequence)
cutout_signal = np.reshape(signal[:(np.shape(signal)[0] // pieces * pieces)],
(pieces, piece_length)).tolist()
tail = signal[(np.shape(signal)[0] // pieces * pieces):]
cutout_signal = np.asarray(cutout_signal)[sequence]
cutout_signal = np.hstack(cutout_signal)
cutout_signal = np.concatenate((cutout_signal, tail[:, 0]), axis=0)
cutout_signal = cv2.resize(cutout_signal, (1, 3072), interpolation=cv2.INTER_LINEAR)
cutout_signal = cutout_signal.T
return cutout_signal
def cutout_zero(self,signal,pieces):
"""
signal: numpy array (batch x window)
pieces: number of segments along time
cutout 1 piece
"""
signal = signal.T
ones = np.ones((np.shape(signal)[0],np.shape(signal)[1]))
# print(ones.shape)
# assert False
pieces = int(np.ceil(np.shape(signal)[0] / (np.shape(signal)[0] // pieces)).tolist()) # 向上取整
piece_length = int(np.shape(signal)[0] // pieces)
cutout = random.randint(1, pieces)
cutout_signal = np.reshape(signal[:(np.shape(signal)[0] // pieces * pieces)],
(pieces, piece_length)).tolist()
ones_pieces = np.reshape(ones[:(np.shape(signal)[0] // pieces * pieces)],
(pieces, piece_length)).tolist()
tail = signal[(np.shape(signal)[0] // pieces * pieces):]
cutout_signal = np.asarray(cutout_signal)
ones_pieces = np.asarray(ones_pieces)
for i in range(pieces):
if i == cutout:
ones_pieces[i]*=0
cutout_signal = cutout_signal * ones_pieces
cutout_signal = np.hstack(cutout_signal)
cutout_signal = np.concatenate((cutout_signal, tail[:, 0]), axis=0)
cutout_signal = cutout_signal[:,None]
cutout_signal = cutout_signal.T
return cutout_signal
# mic
def crop_resize(self, signal, size):
signal = signal.T
size = signal.shape[0] * size
size = int(size)
start = random.randint(0, signal.shape[0]-size)
crop_signal = signal[start:start + size,:]
# print(crop_signal.shape)
crop_signal = cv2.resize(crop_signal, (1, 3072), interpolation=cv2.INTER_LINEAR)
# print(crop_signal.shape)
crop_signal = crop_signal.T
return crop_signal
def move_avg(self,a,n, mode="same"):
# a = a.T
result = np.convolve(a[0], np.ones((n,)) / n, mode=mode)
return result[None,:]
def bandpass_filter(self, x, order, cutoff, fs=100):
result = np.zeros((x.shape[0], x.shape[1]))
w1 = 2 * cutoff[0] / int(fs)
w2 = 2 * cutoff[1] / int(fs)
b, a = signal.butter(order, [w1, w2], btype='bandpass') # 配置滤波器 8 表示滤波器的阶数
result = signal.filtfilt(b, a, x, axis=1)
# print(result.shape)
return result
def lowpass_filter(self, x, order, cutoff, fs=100):
result = np.zeros((x.shape[0], x.shape[1]))
w1 = 2 * cutoff[0] / int(fs)
# w2 = 2 * cutoff[1] / fs
b, a = signal.butter(order, w1, btype='lowpass') # 配置滤波器 8 表示滤波器的阶数
result = signal.filtfilt(b, a, x, axis=1)
# print(result.shape)
return result
def highpass_filter(self, x, order, cutoff, fs=100):
result = np.zeros((x.shape[0], x.shape[1]))
w1 = 2 * cutoff[0] / int(fs)
# w2 = 2 * cutoff[1] / fs
b, a = signal.butter(order, w1, btype='highpass') # 配置滤波器 8 表示滤波器的阶数
result = signal.filtfilt(b, a, x, axis=1)
# print(result.shape)
return result
def time_warp(self,signal, sampling_freq, pieces, stretch_factor, squeeze_factor):
"""
signal: numpy array (batch x window)
sampling freq
pieces: number of segments along time
stretch factor
squeeze factor
"""
signal = signal.T
total_time = np.shape(signal)[0] // sampling_freq
segment_time = total_time / pieces
sequence = list(range(0, pieces))
stretch = np.random.choice(sequence, math.ceil(len(sequence) / 2), replace=False)
squeeze = list(set(sequence).difference(set(stretch)))
initialize = True
for i in sequence:
orig_signal = signal[int(i * np.floor(segment_time * sampling_freq)):int(
(i + 1) * np.floor(segment_time * sampling_freq))]
orig_signal = orig_signal.reshape(np.shape(orig_signal)[0], 1)
if i in stretch:
output_shape = int(np.ceil(np.shape(orig_signal)[0] * stretch_factor))
new_signal = cv2.resize(orig_signal, (1, output_shape), interpolation=cv2.INTER_LINEAR)
if initialize == True:
time_warped = new_signal
initialize = False
else:
time_warped = np.vstack((time_warped, new_signal))
elif i in squeeze:
output_shape = int(np.ceil(np.shape(orig_signal)[0] * squeeze_factor))
new_signal = cv2.resize(orig_signal, (1, output_shape), interpolation=cv2.INTER_LINEAR)
if initialize == True:
time_warped = new_signal
initialize = False
else:
time_warped = np.vstack((time_warped, new_signal))
time_warped = cv2.resize(time_warped, (1,3072), interpolation=cv2.INTER_LINEAR)
time_warped = time_warped.T
return time_warped
if __name__ == '__main__':
from transform import Transform
import matplotlib.pyplot as plt
Trans = Transform()
input = np.zeros((1,3072))
input = Trans.add_noise(input,10)
plt.subplot(211)
plt.plot(input[0])
# print(input.shape)
# output = Trans.cutout_resize(input,10)
order = random.randint(3, 10)
cutoff = random.uniform(5, 20)
output = Trans.filter(input, order, [2,15], mode='lowpass')
plt.subplot(212)
plt.plot(output[0])
plt.savefig('filter.png')
# print(output.shape)
| 9,975 | 34.884892 | 103 | py |
ssl-torch | ssl-torch-main/contrast.py | from net import resnet18, resnet34, resnet50, resnet101, resnet152
import torch
import torch.nn as nn
import numpy as np
# import pandas as pd
import tqdm
import mit_utils as utils
# import analytics
import time
import os, shutil
from mail import mail_it
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
import random
from torch.optim.lr_scheduler import CosineAnnealingLR
from warmup_scheduler import GradualWarmupScheduler
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument('-d', '--dataset', type=int)
# parser.add_argument('-g', '--gpu_id', type=str, default=0)
parser.add_argument('-F1', '--transform_function_1', type=str)
parser.add_argument('-F2', '--transform_function_2', type=str)
# parser.add_argument('-e', '--epoch', type=int, default=60)
arg = parser.parse_args()
torch.set_default_tensor_type(torch.FloatTensor)
device = "cuda"
log_dir = "logs"
model_name = 'resnet17'
model_save_dir = '%s/%s_%s' % (log_dir, model_name, time.strftime("%m%d%H%M"))
os.makedirs(model_save_dir, exist_ok=True)
log_file = "%s_%s_%s.log" % (arg.transform_function_1, arg.transform_function_2, time.strftime("%m%d%H%M"))
log_templete = {"acc": None,
"cm": None,
"f1": None,
"per F1":None,
"epoch":None,
}
data = np.load('data.npz')
orig_x = data['x']
x = np.zeros((orig_x.shape[0],3072))
x[:,36:3036] = orig_x
x = x[:,None,:]
y = data['y']
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = \
train_test_split(x, y, test_size=0.3)
x_train = torch.tensor(x_train, dtype=torch.float).to(device)
x_test = torch.tensor(x_test, dtype=torch.float).to(device)
y_train = torch.tensor(y_train, dtype=torch.long).to(device)
y_test = torch.tensor(y_test, dtype=torch.long).to(device)
print(x_train.shape)
import torch.nn.functional as F
from transform import Transform
def save_ckpt(state, is_best, model_save_dir, message='best_w.pth'):
current_w = os.path.join(model_save_dir, 'latest_w.pth')
best_w = os.path.join(model_save_dir, message)
torch.save(state, current_w)
if is_best: shutil.copyfile(current_w, best_w)
def transform(x, mode):
x_ = x.cpu().numpy()
Trans = Transform()
if mode == 'time_warp':
pieces = random.randint(5,20)
stretch = random.uniform(1.5,4)
squeeze = random.uniform(0.25,0.67)
x_ = Trans.time_warp(x_, 100, pieces, stretch, squeeze)
elif mode == 'noise':
factor = random.uniform(10,20)
x_ = Trans.add_noise_with_SNR(x_,factor)
elif mode == 'scale':
x_ = Trans.scaled(x_,[0.3,3])
elif mode == 'negate':
x_ = Trans.negate(x_)
elif mode == 'hor_flip':
x_ = Trans.hor_filp(x_)
elif mode == 'permute':
pieces = random.randint(5,20)
x_ = Trans.permute(x_,pieces)
elif mode == 'cutout_resize':
pieces = random.randint(5, 20)
x_ = Trans.cutout_resize(x_, pieces)
elif mode == 'cutout_zero':
pieces = random.randint(5, 20)
x_ = Trans.cutout_zero(x_, pieces)
elif mode == 'crop_resize':
size = random.uniform(0.25,0.75)
x_ = Trans.crop_resize(x_, size)
elif mode == 'move_avg':
n = random.randint(3, 10)
x_ = Trans.move_avg(x_,n, mode="same")
# to test
elif mode == 'lowpass':
order = random.randint(3, 10)
cutoff = random.uniform(5,20)
x_ = Trans.lowpass_filter(x_, order, [cutoff])
elif mode == 'highpass':
order = random.randint(3, 10)
cutoff = random.uniform(5, 10)
x_ = Trans.highpass_filter(x_, order, [cutoff])
elif mode == 'bandpass':
order = random.randint(3, 10)
cutoff_l = random.uniform(1, 5)
cutoff_h = random.uniform(20, 40)
cutoff = [cutoff_l, cutoff_h]
x_ = Trans.bandpass_filter(x_, order, cutoff)
else:
print("Error")
x_ = x_.copy()
x_ = x_[:,None,:]
return x_
def comtrast_loss(x, criterion):
LARGE_NUM = 1e9
temperature = 0.1
x = F.normalize(x, dim=-1)
num = int(x.shape[0] / 2)
hidden1, hidden2 = torch.split(x, num)
hidden1_large = hidden1
hidden2_large = hidden2
labels = torch.arange(0,num).to('cuda')
masks = F.one_hot(torch.arange(0,num), num).to('cuda')
logits_aa = torch.matmul(hidden1, hidden1_large.T) / temperature
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = torch.matmul(hidden2, hidden2_large.T) / temperature
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = torch.matmul(hidden1, hidden2_large.T) / temperature
logits_ba = torch.matmul(hidden2, hidden1_large.T) / temperature
# print(labels)
#
# print(torch.cat([logits_ab, logits_aa], 1).shape)
loss_a = criterion(torch.cat([logits_ab, logits_aa], 1),
labels)
loss_b = criterion(torch.cat([logits_ba, logits_bb], 1),
labels)
loss = torch.mean(loss_a + loss_b)
return loss, labels, logits_ab
net = resnet18(classification=False).to(device)
net = nn.DataParallel(net)
criterion = nn.CrossEntropyLoss().to(device)
batch_size = 512
optimizer = torch.optim.SGD(net.parameters(), lr=0.1 * (batch_size / 64), momentum=0.9, weight_decay=0.00001)
epochs = 70
lr_schduler = CosineAnnealingLR(optimizer, T_max=epochs - 10, eta_min=0.05)#default =0.07
scheduler_warmup = GradualWarmupScheduler(optimizer, multiplier=1, total_epoch=10, after_scheduler=lr_schduler)
optimizer.zero_grad()
optimizer.step()
scheduler_warmup.step()
train_dataset = torch.utils.data.TensorDataset(x_train, y_train)
train_iter = torch.utils.data.DataLoader(train_dataset, batch_size, shuffle=True)
test_dataset = torch.utils.data.TensorDataset(x_test, y_test)
test_iter = torch.utils.data.DataLoader(test_dataset, batch_size, shuffle=True)
target_class = ['W', 'N1', 'N2', 'N3', 'REM']
val_acc_list = []
n_train_samples = x_train.shape[0]
iter_per_epoch = n_train_samples // batch_size + 1
best_acc = -1
err = []
best_err = 1
margin = 1
for epoch in range(epochs):
net.train()
loss_sum = 0
evaluation = []
iter = 0
with tqdm.tqdm(total=iter_per_epoch) as pbar:
error_counter = 0
for X, y in train_iter:
trans = []
for i in range(X.shape[0]):
t1 = transform(X[i], arg.transform_function_1)
trans.append(t1)
for i in range(X.shape[0]):
t2 = transform(X[i], arg.transform_function_2)
trans.append(t2)
trans = np.concatenate(trans)
trans = torch.tensor(trans, dtype=torch.float, device="cuda")
output = net(trans)
optimizer.zero_grad()
l, lab_con, log_con = comtrast_loss(output, criterion)
_, log_p = torch.max(log_con.data,1)
evaluation.append((log_p == lab_con).tolist())
l.backward()
optimizer.step()
loss_sum += l
iter += 1
pbar.set_description("Epoch %d, loss = %.2f" % (epoch, l.data))
pbar.update(1)
err = l.data
evaluation = [item for sublist in evaluation for item in sublist]
train_acc = sum(evaluation) / len(evaluation)
error = 1 - train_acc
current_lr = optimizer.param_groups[0]['lr']
print("Epoch:", epoch,"lr:", current_lr, "error:", error, " train_loss =", loss_sum.data)
scheduler_warmup.step()
state = {"state_dict": net.state_dict(), "epoch": epoch}
save_ckpt(state, best_err > error, model_save_dir)
best_err = min(best_err, error)
#=========================
net = resnet18(classification=True).to('cuda')
net = nn.DataParallel(net)
checkpoint = torch.load(os.path.join(model_save_dir,'best_w.pth'))
net.load_state_dict(checkpoint['state_dict'], strict=False)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(net.parameters(), lr=0.1, momentum=0.9, weight_decay=0.00001)
epochs_t = 70
lr_schduler = CosineAnnealingLR(optimizer, T_max=epochs_t - 10, eta_min=0.09)#default =0.07
scheduler_warmup = GradualWarmupScheduler(optimizer, multiplier=1, total_epoch=10, after_scheduler=lr_schduler)
optimizer.zero_grad()
optimizer.step()
scheduler_warmup.step()
batch_size = 256
val_acc_list = []
n_train_samples = x_train.shape[0]
iter_per_epoch = n_train_samples // batch_size + 1
best_acc = -1
for epoch in range(epochs_t):
net.train()
loss_sum = 0
evaluation = []
iter = 0
with tqdm.tqdm(total=iter_per_epoch) as pbar:
for X, y in train_iter:
output = net(X)
_, predicted = torch.max(output.data, 1)
evaluation.append((predicted == y).tolist())
optimizer.zero_grad()
l = criterion(output, y)
l.backward()
optimizer.step()
loss_sum += l
iter += 1
pbar.set_description("Epoch %d, loss = %.2f" % (epoch, l.data))
pbar.update(1)
evaluation = [item for sublist in evaluation for item in sublist]
train_acc = sum(evaluation) / len(evaluation)
current_lr = optimizer.param_groups[0]['lr']
print("Epoch:", epoch,"lr:", current_lr," train_loss =", loss_sum.data, " train_acc =", train_acc)
# scheduler.step()
scheduler_warmup.step()
val_loss = 0
evaluation = []
pred_v = []
true_v = []
with torch.no_grad():
net.eval()
for X, y in test_iter:
output = net(X)
_, predicted = torch.max(output.data, 1)
evaluation.append((predicted == y).tolist())
l = criterion(output, y)
val_loss += l
pred_v.append(predicted.tolist())
true_v.append(y.tolist())
evaluation = [item for sublist in evaluation for item in sublist]
pred_v = [item for sublist in pred_v for item in sublist]
true_v = [item for sublist in true_v for item in sublist]
running_acc = sum(evaluation) / len(evaluation)
val_acc_list.append(running_acc)
print("val_loss =", val_loss, "val_acc =", running_acc)
state = {"state_dict": net.state_dict(), "epoch": epoch}
save_ckpt(state, best_acc < running_acc, model_save_dir, 'best_cls.pth')
best_acc = max(best_acc, running_acc)
print("Highest acc:", max(val_acc_list))
# =========================test
model = resnet18(classification=True).to('cuda')
checkpoint = torch.load(os.path.join(model_save_dir,'best_cls.pth'))
model.load_state_dict(checkpoint['state_dict'], strict=True)
epoch_b = checkpoint['epoch']
# model.train()
model.eval()
val_loss = 0
evaluation = []
pred_v = []
true_v = []
with torch.no_grad():
for X, y in test_iter:
output = model(X)
_, predicted = torch.max(output.data, 1)
evaluation.append((predicted == y).tolist())
l = criterion(output, y)
val_loss += l
pred_v.append(predicted.tolist())
true_v.append(y.tolist())
evaluation = [item for sublist in evaluation for item in sublist]
pred_v = [item for sublist in pred_v for item in sublist]
true_v = [item for sublist in true_v for item in sublist]
highest_acc = sum(evaluation) / len(evaluation)
print("epoch=" , epoch_b, "val_acc =", highest_acc)
def calculate_all_prediction(confMatrix):
'''
计算总精度:对角线上所有值除以总数
'''
total_sum = confMatrix.sum()
correct_sum = (np.diag(confMatrix)).sum()
prediction = round(100 * float(correct_sum) / float(total_sum), 2)
return prediction
def calculate_label_prediction(confMatrix, labelidx):
'''
计算某一个类标预测精度:该类被预测正确的数除以该类的总数
'''
label_total_sum = confMatrix.sum(axis=0)[labelidx]
label_correct_sum = confMatrix[labelidx][labelidx]
prediction = 0
if label_total_sum != 0:
prediction = round(100 * float(label_correct_sum) / float(label_total_sum), 2)
return prediction
def calculate_label_recall(confMatrix, labelidx):
'''
计算某一个类标的召回率:
'''
label_total_sum = confMatrix.sum(axis=1)[labelidx]
label_correct_sum = confMatrix[labelidx][labelidx]
recall = 0
if label_total_sum != 0:
recall = round(100 * float(label_correct_sum) / float(label_total_sum), 2)
return recall
def calculate_f1(prediction, recall):
if (prediction + recall) == 0:
return 0
return round(2 * prediction * recall / (prediction + recall), 2)
cm = confusion_matrix(true_v, pred_v)
f1_macro = f1_score(true_v, pred_v, average='macro')
i=0
f1 = []
for i in range(5):
r = calculate_label_recall(cm,i)
p = calculate_label_prediction(cm,i)
f = calculate_f1(p,r)
f1.append(f)
log_templete["acc"] = '{:.3%}'.format(highest_acc)
log_templete["epoch"] = epoch_b
log_templete["cm"] = str(cm)
log_templete["f1"] = str(f1_macro)
log_templete["per F1"] = str(f1)
log = log_templete
| 12,884 | 30.274272 | 111 | py |
ssl-torch | ssl-torch-main/net.py | import torch
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed1d.pth',
}
dp_rate = 0
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv1d(in_planes, out_planes, kernel_size=33, stride=stride,
padding=16, bias=False)
class BasicBlock(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.bn0 = nn.BatchNorm1d(inplanes)
self.relu = nn.ReLU(inplace=True)
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm1d(planes)
self.conv2 = conv3x3(planes, planes*2)
self.downsample = downsample
self.stride = stride
self.dropout = nn.Dropout(dp_rate)
def forward(self, x):
residual = x
out = self.bn0(x)
out = self.relu(out)
# out = self.dropout(out)
out = self.conv1(out)
out = self.bn1(out)
out = self.relu(out)
out = self.dropout(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
# residual = torch.cat((residual,residual),1)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.bn0 = nn.BatchNorm1d(inplanes)
self.conv1 = nn.Conv1d(inplanes, planes, kernel_size=33, bias=False, padding=16)
self.bn1 = nn.BatchNorm1d(planes)
self.conv2 = nn.Conv1d(planes, planes, kernel_size=65, stride=stride,
padding=32, bias=False)
self.bn2 = nn.BatchNorm1d(planes)
self.conv3 = nn.Conv1d(planes, planes * 4, kernel_size=1, bias=False, padding=0)
self.bn3 = nn.BatchNorm1d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dropout = nn.Dropout(dp_rate)
def forward(self, x):
residual = x
out = self.bn0(x)
out = self.relu(out)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.dropout(out)
out = self.conv3(out)
# out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
# residual = torch.cat((residual, residual), 1)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, classification, num_classes=5):
self.inplanes = 12
self.classification = classification
super(ResNet, self).__init__()
self.conv1 = nn.Conv1d(1, self.inplanes, kernel_size=33, stride=1, padding=16,
bias=False)
self.bn1 = nn.BatchNorm1d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
self.conv2 = nn.Conv1d(self.inplanes, self.inplanes, kernel_size=33, stride=2, padding=16,
bias=False)
self.bn2 = nn.BatchNorm1d(self.inplanes)
self.downsample = nn.MaxPool1d(kernel_size=2, stride=2)
self.conv3 = nn.Conv1d(self.inplanes, self.inplanes, kernel_size=33, stride=1, padding=16,
bias=False)
self.dropout = nn.Dropout(dp_rate)
self.layer1 = self._make_layer(block, 12, layers[0], stride=2)
self.layer2 = self._make_layer(block, 24, layers[1], stride=2)
self.layer3 = self._make_layer(block, 48, layers[2], stride=2)
self.layer4 = self._make_layer(block, 96, layers[3], stride=2)
# self.layer5 = self._make_layer(block, self.inplanes, layers[4], stride=2)
self.bn_final = nn.BatchNorm1d(96*2)
self.avgpool = nn.AdaptiveAvgPool1d(2)
self.fc1 = nn.Linear(96*4, 384)
self.bn3 = nn.BatchNorm1d(384)
self.fc2 = nn.Linear(384, 192)
self.bn4 = nn.BatchNorm1d(192)
self.fc3 = nn.Linear(192, 5)
self.softmax = nn.Softmax(1)
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight.data, mode='fan_in', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1:
downsample = nn.Sequential(
nn.Conv1d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm1d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# x = self.maxpool(x)
out = self.conv2(x)
out = self.bn2(out)
out = self.relu(out)
out = self.dropout(out)
out = self.conv3(out)
residual = self.downsample(x)
out += residual
x = self.relu(out)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# x = self.layer5(x)
x = self.bn_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.classification:
x = self.fc1(x)
x = self.bn3(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.bn4(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc3(x)
# x = self.softmax(x)
return x
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [ 2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
| 8,532 | 32.073643 | 98 | py |
ssl-torch | ssl-torch-main/mit_utils.py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 14 23:47:38 2019
@author: Winham
辅助函数
"""
import warnings
import numpy as np
from scipy.signal import resample
# import pywt
from sklearn.preprocessing import scale
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.utils.multiclass import unique_labels
import matplotlib.pyplot as plt
# ===========================================
warnings.filterwarnings("ignore")
import torch
import numpy as np
import time,os
from sklearn.metrics import f1_score
from torch import nn
def mkdirs(path):
if not os.path.exists(path):
os.makedirs(path)
def calc_f1(y_true, y_pre, threshold=0.5):
y_true = y_true.view(-1).cpu().detach().numpy().astype(np.int)
y_pre = y_pre.cpu().detach().numpy()
y_pre = np.argmax(y_pre, axis=-1)
return f1_score(y_true, y_pre, average='macro')
def print_time_cost(since):
time_elapsed = time.time() - since
return '{:.0f}m{:.0f}s\n'.format(time_elapsed // 60, time_elapsed % 60)
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
class WeightedMultilabel(nn.Module):
def __init__(self, weights: torch.Tensor):
super(WeightedMultilabel, self).__init__()
self.cerition = nn.BCEWithLogitsLoss(reduction='none')
self.weights = weights
def forward(self, outputs, targets):
loss = self.cerition(outputs, targets)
return (loss * self.weights).mean()
# =======================================
def sig_wt_filt(sig):
"""
对信号进行小波变换滤波
:param sig: 输入信号,1-d array
:return: 小波滤波后的信号,1-d array
"""
coeffs = pywt.wavedec(sig, 'db6', level=9)
coeffs[-1] = np.zeros(len(coeffs[-1]))
coeffs[-2] = np.zeros(len(coeffs[-2]))
coeffs[0] = np.zeros(len(coeffs[0]))
sig_filt = pywt.waverec(coeffs, 'db6')
return sig_filt
def multi_prep(sig, target_point_num=1280):
"""
信号预处理
:param sig: 原始信号,1-d array
:param target_point_num: 信号目标长度,int
:return: 重采样并z-score标准化后的信号,1-d array
"""
assert len(sig.shape) == 2, 'Not for 1-D data.Use 2-D data.'
sig = resample(sig, target_point_num, axis=1)
for i in range(sig.shape[0]):
sig[i] = sig_wt_filt(sig[i])
sig = scale(sig, axis=1)
return sig
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
绘制混淆矩阵图,来源:
https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
cm = confusion_matrix(y_true, y_pred)
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
# fig, ax = plt.subplots()
# # for i in range(5):
# # cm[i,i] = 0
# im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
# ax.figure.colorbar(im, ax=ax)
# ax.set(xticks=np.arange(cm.shape[1]),
# yticks=np.arange(cm.shape[0]),
# xticklabels=classes, yticklabels=classes,
# title=title,
# ylabel='True label',
# xlabel='Predicted label')
#
# plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
# rotation_mode="anchor")
#
# fmt = '.2f' if normalize else 'd'
# thresh = cm.max() / 2.
# for i in range(cm.shape[0]):
# for j in range(cm.shape[1]):
# ax.text(j, i, format(cm[i, j], fmt),
# ha="center", va="center",
# color="white" if cm[i, j] > thresh else "black")
# fig.tight_layout()
return cm
def print_results(y_true, y_pred, target_names):
"""
打印相关结果
:param y_true: 期望输出,1-d array
:param y_pred: 实际输出,1-d array
:param target_names: 各类别名称
:return: 打印结果
"""
overall_accuracy = accuracy_score(y_true, y_pred)
print('\n----- overall_accuracy: {0:f} -----'.format(overall_accuracy))
cm = confusion_matrix(y_true, y_pred)
for i in range(len(target_names)):
print(target_names[i] + ':')
Se = cm[i][i]/np.sum(cm[i])
Pp = cm[i][i]/np.sum(cm[:, i])
print(' Se = ' + str(Se))
print(' P+ = ' + str(Pp))
print('--------------------------------------')
| 4,714 | 29.031847 | 156 | py |
bert-extractive-summarizer | bert-extractive-summarizer-master/setup.py | from setuptools import setup
from setuptools import find_packages
setup(name='bert-extractive-summarizer',
version='0.10.1',
description='Extractive Text Summarization with BERT',
keywords=['bert', 'pytorch', 'machine learning',
'deep learning', 'extractive summarization', 'summary'],
long_description=open("README.md", "r", encoding='utf-8').read(),
long_description_content_type="text/markdown",
url='https://github.com/dmmiller612/bert-extractive-summarizer',
download_url='https://github.com/dmmiller612/bert-extractive-summarizer/archive/0.10.1.tar.gz',
author='Derek Miller',
author_email='dmmiller612@gmail.com',
install_requires=['transformers', 'scikit-learn', 'spacy'],
license='MIT',
packages=find_packages(),
zip_safe=False)
| 833 | 42.894737 | 101 | py |
bert-extractive-summarizer | bert-extractive-summarizer-master/summarizer/transformer_embeddings/bert_embedding.py | from typing import List, Union
import numpy as np
import torch
from numpy import ndarray
from transformers import (AlbertModel, AlbertTokenizer, BertModel,
BertTokenizer, DistilBertModel, DistilBertTokenizer,
PreTrainedModel, PreTrainedTokenizer, XLMModel,
XLMTokenizer, XLNetModel, XLNetTokenizer)
class BertEmbedding:
"""Bert Embedding Handler for BERT models."""
MODELS = {
'bert-base-uncased': (BertModel, BertTokenizer),
'bert-large-uncased': (BertModel, BertTokenizer),
'xlnet-base-cased': (XLNetModel, XLNetTokenizer),
'xlm-mlm-enfr-1024': (XLMModel, XLMTokenizer),
'distilbert-base-uncased': (DistilBertModel, DistilBertTokenizer),
'albert-base-v1': (AlbertModel, AlbertTokenizer),
'albert-large-v1': (AlbertModel, AlbertTokenizer)
}
def __init__(
self,
model: str,
custom_model: PreTrainedModel = None,
custom_tokenizer: PreTrainedTokenizer = None,
gpu_id: int = 0,
):
"""
Bert Embedding Constructor. Source for Bert embedding processing.
:param model: Model is the string path for the bert weights. If given a keyword, the s3 path will be used.
:param custom_model: This is optional if a custom bert model is used.
:param custom_tokenizer: Place to use custom tokenizer.
"""
base_model, base_tokenizer = self.MODELS.get(model, (None, None))
self.device = torch.device("cpu")
if torch.cuda.is_available():
assert (
isinstance(gpu_id, int) and (0 <= gpu_id and gpu_id < torch.cuda.device_count())
), f"`gpu_id` must be an integer between 0 to {torch.cuda.device_count() - 1}. But got: {gpu_id}"
self.device = torch.device(f"cuda:{gpu_id}")
if custom_model:
self.model = custom_model.to(self.device)
else:
self.model = base_model.from_pretrained(
model, output_hidden_states=True).to(self.device)
if custom_tokenizer:
self.tokenizer = custom_tokenizer
else:
self.tokenizer = base_tokenizer.from_pretrained(model)
self.model.eval()
def tokenize_input(self, text: str) -> torch.tensor:
"""
Tokenizes the text input.
:param text: Text to tokenize.
:return: Returns a torch tensor.
"""
tokenized_text = self.tokenizer.tokenize(text)
indexed_tokens = self.tokenizer.convert_tokens_to_ids(tokenized_text)
return torch.tensor([indexed_tokens]).to(self.device)
def _pooled_handler(self, hidden: torch.Tensor,
reduce_option: str) -> torch.Tensor:
"""
Handles torch tensor.
:param hidden: The hidden torch tensor to process.
:param reduce_option: The reduce option to use, such as mean, etc.
:return: Returns a torch tensor.
"""
if reduce_option == 'max':
return hidden.max(dim=1)[0].squeeze()
elif reduce_option == 'median':
return hidden.median(dim=1)[0].squeeze()
return hidden.mean(dim=1).squeeze()
def extract_embeddings(
self,
text: str,
hidden: Union[List[int], int] = -2,
reduce_option: str = 'mean',
hidden_concat: bool = False,
) -> torch.Tensor:
"""
Extracts the embeddings for the given text.
:param text: The text to extract embeddings for.
:param hidden: The hidden layer(s) to use for a readout handler.
:param reduce_option: How we should reduce the items.
:param hidden_concat: Whether or not to concat multiple hidden layers.
:return: A torch vector.
"""
tokens_tensor = self.tokenize_input(text)
pooled, hidden_states = self.model(tokens_tensor)[-2:]
# deprecated temporary keyword functions.
if reduce_option == 'concat_last_4':
last_4 = [hidden_states[i] for i in (-1, -2, -3, -4)]
cat_hidden_states = torch.cat(tuple(last_4), dim=-1)
return torch.mean(cat_hidden_states, dim=1).squeeze()
elif reduce_option == 'reduce_last_4':
last_4 = [hidden_states[i] for i in (-1, -2, -3, -4)]
return torch.cat(tuple(last_4), dim=1).mean(axis=1).squeeze()
elif type(hidden) == int:
hidden_s = hidden_states[hidden]
return self._pooled_handler(hidden_s, reduce_option)
elif hidden_concat:
last_states = [hidden_states[i] for i in hidden]
cat_hidden_states = torch.cat(tuple(last_states), dim=-1)
return torch.mean(cat_hidden_states, dim=1).squeeze()
last_states = [hidden_states[i] for i in hidden]
hidden_s = torch.cat(tuple(last_states), dim=1)
return self._pooled_handler(hidden_s, reduce_option)
def create_matrix(
self,
content: List[str],
hidden: Union[List[int], int] = -2,
reduce_option: str = 'mean',
hidden_concat: bool = False,
) -> ndarray:
"""
Create matrix from the embeddings.
:param content: The list of sentences.
:param hidden: Which hidden layer to use.
:param reduce_option: The reduce option to run.
:param hidden_concat: Whether or not to concat multiple hidden layers.
:return: A numpy array matrix of the given content.
"""
return np.asarray([
np.squeeze(self.extract_embeddings(
t, hidden=hidden, reduce_option=reduce_option, hidden_concat=hidden_concat
).data.cpu().numpy()) for t in content
])
def __call__(
self,
content: List[str],
hidden: Union[List[int], int] = -2,
reduce_option: str = 'mean',
hidden_concat: bool = False,
) -> ndarray:
"""
Create matrix from the embeddings.
:param content: The list of sentences.
:param hidden: Which hidden layer to use.
:param reduce_option: The reduce option to run.
:param hidden_concat: Whether or not to concat multiple hidden layers.
:return: A numpy array matrix of the given content.
"""
return self.create_matrix(content, hidden, reduce_option, hidden_concat)
| 6,387 | 35.712644 | 114 | py |
bert-extractive-summarizer | bert-extractive-summarizer-master/summarizer/transformer_embeddings/sbert_embedding.py | from typing import List
import numpy as np
import torch
from sentence_transformers import SentenceTransformer
class SBertEmbedding:
"""SBert Embedding. This is for the SentenceTransformer Package."""
def __init__(self, model: str):
"""
SBert Parent Handler.
:param model: The model string for SentenceTransformer.
"""
self.sbert_model = SentenceTransformer(model)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.sbert_model.to(self.device)
def extract_embeddings(self, sentences: List[str]) -> np.ndarray:
"""
Calculates sentence embeddings.
:param sentences: The sentences to summarizer.
:return Numpy array of sentences.
"""
embeddings = self.sbert_model.encode(sentences)
return embeddings
def __call__(self, sentences: List[str]) -> np.ndarray:
"""
Calculates sentence embeddings.
:param sentences: The sentences to summarizer.
:return Numpy array of sentences.
"""
return self.extract_embeddings(sentences)
| 1,129 | 27.974359 | 82 | py |
bert-extractive-summarizer | bert-extractive-summarizer-master/tests/test_summary_items.py | import pytest
import torch
from transformers import AlbertTokenizer, AlbertModel
from summarizer import Summarizer, TransformerSummarizer
@pytest.fixture()
def custom_summarizer():
albert_model = AlbertModel.from_pretrained('albert-base-v2', output_hidden_states=True)
albert_tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
return Summarizer(custom_model=albert_model, custom_tokenizer=albert_tokenizer)
@pytest.fixture()
def summarizer():
gpu_id = torch.cuda.device_count() - 1 # Use the last GPU
return Summarizer('distilbert-base-uncased', gpu_id=gpu_id)
@pytest.fixture()
def summarizer_multi_hidden():
return Summarizer('distilbert-base-uncased', hidden=[-1,-2])
@pytest.fixture()
def transformer_summarizer():
return TransformerSummarizer('DistilBert', 'distilbert-base-uncased')
@pytest.fixture()
def passage():
return '''
The Chrysler Building, the famous art deco New York skyscraper, will be sold for a small fraction of its previous sales price.
The deal, first reported by The Real Deal, was for $150 million, according to a source familiar with the deal.
Mubadala, an Abu Dhabi investment fund, purchased 90% of the building for $800 million in 2008.
Real estate firm Tishman Speyer had owned the other 10%.
The buyer is RFR Holding, a New York real estate company.
Officials with Tishman and RFR did not immediately respond to a request for comments.
It's unclear when the deal will close.
The building sold fairly quickly after being publicly placed on the market only two months ago.
The sale was handled by CBRE Group.
The incentive to sell the building at such a huge loss was due to the soaring rent the owners pay to Cooper Union, a New York college, for the land under the building.
The rent is rising from $7.75 million last year to $32.5 million this year to $41 million in 2028.
Meantime, rents in the building itself are not rising nearly that fast.
While the building is an iconic landmark in the New York skyline, it is competing against newer office towers with large floor-to-ceiling windows and all the modern amenities.
Still the building is among the best known in the city, even to people who have never been to New York.
It is famous for its triangle-shaped, vaulted windows worked into the stylized crown, along with its distinctive eagle gargoyles near the top.
It has been featured prominently in many films, including Men in Black 3, Spider-Man, Armageddon, Two Weeks Notice and Independence Day.
The previous sale took place just before the 2008 financial meltdown led to a plunge in real estate prices.
Still there have been a number of high profile skyscrapers purchased for top dollar in recent years, including the Waldorf Astoria hotel, which Chinese firm Anbang Insurance purchased in 2016 for nearly $2 billion, and the Willis Tower in Chicago, which was formerly known as Sears Tower, once the world's tallest.
Blackstone Group (BX) bought it for $1.3 billion 2015.
The Chrysler Building was the headquarters of the American automaker until 1953, but it was named for and owned by Chrysler chief Walter Chrysler, not the company itself.
Walter Chrysler had set out to build the tallest building in the world, a competition at that time with another Manhattan skyscraper under construction at 40 Wall Street at the south end of Manhattan. He kept secret the plans for the spire that would grace the top of the building, building it inside the structure and out of view of the public until 40 Wall Street was complete.
Once the competitor could rise no higher, the spire of the Chrysler building was raised into view, giving it the title.
'''
def test_space_in_sentences(summarizer, passage):
result = summarizer(passage, num_sentences=3)
assert result == 'The Chrysler Building, the famous art deco New York skyscraper, will be sold for a small fraction of its previous sales price. Mubadala, an Abu Dhabi investment fund, purchased 90% of the building for $800 million in 2008. He kept secret the plans for the spire that would grace the top of the building, building it inside the structure and out of view of the public until 40 Wall Street was complete.'
def test_transformer_summarizer(transformer_summarizer, passage):
result = transformer_summarizer(passage, num_sentences=2, return_as_list=True)
assert len(result) == 2
def test_single_with_use_first(transformer_summarizer, passage):
result = transformer_summarizer(passage, num_sentences=1, return_as_list=True)
assert result == ['The Chrysler Building, the famous art deco New York skyscraper, will be sold for a small fraction of its previous sales price.']
def test_single_without_use_first(transformer_summarizer, passage):
result = transformer_summarizer(passage, num_sentences=1, return_as_list=True, use_first=False)
assert len(result) == 1
def test_num_sentences(summarizer, passage):
result = summarizer(passage, num_sentences=3, return_as_list=True)
assert len(result) == 3
def test_elbow_calculation(summarizer, passage):
res = summarizer.calculate_elbow(passage, k_max=5)
assert len(res) == 4
def test_optimal_elbow_calculation(summarizer, passage):
res = summarizer.calculate_optimal_k(passage, k_max=6)
assert type(res) == int
def test_list_handling(summarizer, passage):
res = summarizer(passage, num_sentences=3, return_as_list=True)
assert type(res) == list
assert len(res) == 3
def test_multi_hidden(summarizer_multi_hidden, passage):
res = summarizer_multi_hidden(passage, num_sentences=5, min_length=40, max_length=500)
assert len(res) > 10
def test_multi_hidden_concat(summarizer_multi_hidden: Summarizer, passage):
summarizer_multi_hidden.hidden_concat = True
res = summarizer_multi_hidden(passage, num_sentences=5, min_length=40, max_length=500)
assert len(res) > 10
def test_summary_creation(summarizer, passage):
res = summarizer(passage, ratio=0.15, min_length=40, max_length=500)
assert len(res) > 10
def test_summary_embeddings(summarizer, passage):
embeddings = summarizer.run_embeddings(passage, ratio=0.15, min_length=25, max_length=500)
assert embeddings.shape[1] == 768
assert embeddings.shape[0] > 1
def test_summary_larger_ratio(summarizer, passage):
res = summarizer(passage, ratio=0.5)
assert len(res) > 10
def test_cluster_algorithm(summarizer, passage):
res = summarizer(passage, algorithm='gmm')
assert len(res) > 10
def test_do_not_use_first(summarizer, passage):
res = summarizer(passage, ratio=0.1, use_first=False)
assert res is not None
def test_albert(custom_summarizer, passage):
res = custom_summarizer(passage)
assert len(res) > 10
def test_num_sentences_embeddings(summarizer, passage):
result = summarizer.run_embeddings(passage, num_sentences=4)
assert result.shape == (4, 768)
def test_aggregate_embeddings(summarizer, passage):
result = summarizer.run_embeddings(passage, num_sentences=4, aggregate='mean')
assert result.shape == (768,)
| 7,139 | 46.6 | 424 | py |
probdet | probdet-master/src/single_image_inference.py | """
Probabilistic Detectron Single Image Inference Script
"""
import core
import cv2
import json
import os
import sys
import torch
import tqdm
# This is very ugly. Essential for now but should be fixed.
sys.path.append(os.path.join(core.top_dir(), 'src', 'detr'))
# Detectron imports
from detectron2.engine import launch
from detectron2.data import MetadataCatalog
from detectron2.data.transforms import ResizeShortestEdge
# Project imports
from core.evaluation_tools.evaluation_utils import get_train_contiguous_id_to_test_thing_dataset_id_dict
from core.setup import setup_config, setup_arg_parser
from probabilistic_inference.inference_utils import instances_to_json, build_predictor
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args):
# Setup config
cfg = setup_config(args,
random_seed=args.random_seed,
is_testing=True)
# Make sure only 1 data point is processed at a time. This simulates
# deployment.
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 32
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.MODEL.DEVICE = device.type
# Set up number of cpu threads#
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Create inference output directory
inference_output_dir = os.path.expanduser(args.output_dir)
os.makedirs(inference_output_dir, exist_ok=True)
# Get category mapping dictionary. Mapping here is from coco-->coco
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]).thing_dataset_id_to_contiguous_id
# If both dicts are equal or if we are performing out of distribution
# detection, just flip the test dict.
cat_mapping_dict = get_train_contiguous_id_to_test_thing_dataset_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id)
# Build predictor
cfg.MODEL.WEIGHTS = os.path.expanduser(args.model_ckpt)
predictor = build_predictor(cfg)
# List images in image folder
image_folder = os.path.expanduser(args.image_dir)
image_list = os.listdir(image_folder)
# Construct image resizer
resizer = ResizeShortestEdge(cfg.INPUT.MIN_SIZE_TEST, max_size=cfg.INPUT.MAX_SIZE_TEST)
final_output_list = []
with torch.no_grad():
with tqdm.tqdm(total=len(image_list)) as pbar:
for idx, input_file_name in enumerate(image_list):
# Read image, apply shortest edge resize, and change to channel first position
cv2_image = cv2.imread(os.path.join(image_folder, input_file_name))
if cv2_image.size != 0:
shape = cv2_image.shape
height = shape[0]
width = shape[1]
output_transform = resizer.get_transform(cv2_image)
cv2_image = output_transform.apply_image(cv2_image)
input_im_tensor = torch.tensor(cv2_image).to(
).permute(2, 0, 1)
input_im = [dict({'filename': input_file_name,
'image_id': input_file_name,
'height': height,
'width': width,
'image': input_im_tensor})]
# Perform inference
outputs = predictor(input_im)
# predictor.visualize_inference(input_im, outputs)
final_output_list.extend(instances_to_json(
outputs,
input_im[0]['image_id'],
cat_mapping_dict))
pbar.update(1)
else:
print('Failed to read image {}'.format(input_file_name))
with open(os.path.join(inference_output_dir, 'results.json'), 'w') as fp:
json.dump(final_output_list, fp, indent=4,
separators=(',', ': '))
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Support single gpu inference only.
args.num_gpus = 1
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 4,579 | 34.78125 | 104 | py |
probdet | probdet-master/src/apply_net.py | """
Probabilistic Detectron Inference Script
"""
import core
import json
import os
import sys
import torch
import tqdm
from shutil import copyfile
# This is very ugly. Essential for now but should be fixed.
sys.path.append(os.path.join(core.top_dir(), 'src', 'detr'))
# Detectron imports
from detectron2.engine import launch
from detectron2.data import build_detection_test_loader, MetadataCatalog
# Project imports
from core.evaluation_tools.evaluation_utils import get_train_contiguous_id_to_test_thing_dataset_id_dict
from core.setup import setup_config, setup_arg_parser
from offline_evaluation import compute_average_precision, compute_probabilistic_metrics, compute_ood_probabilistic_metrics, compute_calibration_errors
from probabilistic_inference.inference_utils import instances_to_json, get_inference_output_dir, build_predictor
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args):
# Setup config
cfg = setup_config(args,
random_seed=args.random_seed,
is_testing=True)
# Make sure only 1 data point is processed at a time. This simulates
# deployment.
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 32
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.MODEL.DEVICE = device.type
# Set up number of cpu threads#
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Create inference output directory and copy inference config file to keep
# track of experimental settings
inference_output_dir = get_inference_output_dir(
cfg['OUTPUT_DIR'],
args.test_dataset,
args.inference_config,
args.image_corruption_level)
os.makedirs(inference_output_dir, exist_ok=True)
copyfile(args.inference_config, os.path.join(
inference_output_dir, os.path.split(args.inference_config)[-1]))
# Get category mapping dictionary:
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
args.test_dataset).thing_dataset_id_to_contiguous_id
# If both dicts are equal or if we are performing out of distribution
# detection, just flip the test dict.
cat_mapping_dict = get_train_contiguous_id_to_test_thing_dataset_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id)
# Build predictor
predictor = build_predictor(cfg)
test_data_loader = build_detection_test_loader(
cfg, dataset_name=args.test_dataset)
final_output_list = []
if not args.eval_only:
with torch.no_grad():
with tqdm.tqdm(total=len(test_data_loader)) as pbar:
for idx, input_im in enumerate(test_data_loader):
# Apply corruption
outputs = predictor(input_im)
# predictor.visualize_inference(input_im, outputs)
final_output_list.extend(
instances_to_json(
outputs,
input_im[0]['image_id'],
cat_mapping_dict))
pbar.update(1)
with open(os.path.join(inference_output_dir, 'coco_instances_results.json'), 'w') as fp:
json.dump(final_output_list, fp, indent=4,
separators=(',', ': '))
if 'ood' in args.test_dataset:
compute_ood_probabilistic_metrics.main(args, cfg)
else:
compute_average_precision.main(args, cfg)
compute_probabilistic_metrics.main(args, cfg)
compute_calibration_errors.main(args, cfg)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Support single gpu inference only.
args.num_gpus = 1
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
| 4,133 | 33.45 | 150 | py |
probdet | probdet-master/src/core/setup.py | import numpy as np
import os
import random
import torch
from shutil import copyfile
# Detectron imports
import detectron2.utils.comm as comm
from detectron2.config import get_cfg, CfgNode as CN
from detectron2.engine import default_argument_parser, default_setup
from detectron2.utils.logger import setup_logger
# Detr imports
from d2.detr.config import add_detr_config
# Project imports
import core
from core.datasets.setup_datasets import setup_all_datasets
from probabilistic_modeling.probabilistic_retinanet import ProbabilisticRetinaNet
from probabilistic_modeling.probabilistic_generalized_rcnn import ProbabilisticGeneralizedRCNN, \
DropoutFastRCNNConvFCHead, ProbabilisticROIHeads
from probabilistic_modeling.probabilistic_detr import ProbabilisticDetr
def setup_arg_parser():
"""
Sets up argument parser for python scripts.
Returns:
arg_parser (ArgumentParser): Argument parser updated with probabilistic detectron args.
"""
arg_parser = default_argument_parser()
arg_parser.add_argument(
"--dataset-dir",
type=str,
default="",
help="path to dataset directory.")
arg_parser.add_argument(
"--random-seed",
type=int,
default=0,
help="random seed to be used for all scientific computing libraries")
# Inference arguments, will not be used during training.
arg_parser.add_argument(
"--inference-config",
type=str,
default="",
help="Inference parameter: Path to the inference config, which is different from training config. Check readme for more information.")
arg_parser.add_argument(
"--test-dataset",
type=str,
default="",
help="Inference parameter: Dataset used for testing. Can be one of the following: 'coco_2017_custom_val', 'openimages_val', 'openimages_ood_val' ")
arg_parser.add_argument(
"--image-corruption-level",
type=int,
default=0,
help="Inference parameter: Image corruption level between 0-5. Default is no corruption, level 0.")
# Evaluation arguments, will not be used during training.
arg_parser.add_argument(
"--iou-min",
type=float,
default=0.1,
help="Evaluation parameter: IOU threshold bellow which a detection is considered a false positive.")
arg_parser.add_argument(
"--iou-correct",
type=float,
default=0.5,
help="Evaluation parameter: IOU threshold above which a detection is considered a true positive.")
arg_parser.add_argument(
"--min-allowed-score",
type=float,
default=0.0,
help="Evaluation parameter:Minimum classification score for which a detection is considered in the evaluation.")
# Single image inference parameters
arg_parser.add_argument(
"--model-ckpt",
type=str,
default="",
help="Single image inference parameter: path to model checkpoint used for inference.")
arg_parser.add_argument(
"--image-dir",
type=str,
default="",
help="Single image inference parameter: path to image directory")
arg_parser.add_argument(
"--output-dir",
type=str,
default="",
help="Single image inference parameter: path to save results")
return arg_parser
def add_probabilistic_config(cfg):
"""
Add configuration elements specific to probabilistic detectron.
Args:
cfg (CfgNode): detectron2 configuration node.
"""
_C = cfg
# Probabilistic Modeling Setup
_C.MODEL.PROBABILISTIC_MODELING = CN()
_C.MODEL.PROBABILISTIC_MODELING.MC_DROPOUT = CN()
_C.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS = CN()
_C.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS = CN()
# Annealing step for losses that require some form of annealing
_C.MODEL.PROBABILISTIC_MODELING.ANNEALING_STEP = 0
# Monte-Carlo Dropout Settings
_C.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE = 0.0
# Loss configs
_C.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NAME = 'none'
_C.MODEL.PROBABILISTIC_MODELING.CLS_VAR_LOSS.NUM_SAMPLES = 3
_C.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NAME = 'none'
_C.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.COVARIANCE_TYPE = 'diagonal'
_C.MODEL.PROBABILISTIC_MODELING.BBOX_COV_LOSS.NUM_SAMPLES = 1000
# Probabilistic Inference Setup
_C.PROBABILISTIC_INFERENCE = CN()
_C.PROBABILISTIC_INFERENCE.MC_DROPOUT = CN()
_C.PROBABILISTIC_INFERENCE.BAYES_OD = CN()
_C.PROBABILISTIC_INFERENCE.ENSEMBLES_DROPOUT = CN()
_C.PROBABILISTIC_INFERENCE.ENSEMBLES = CN()
# General Inference Configs
_C.PROBABILISTIC_INFERENCE.INFERENCE_MODE = 'standard_nms'
_C.PROBABILISTIC_INFERENCE.MC_DROPOUT.ENABLE = False
_C.PROBABILISTIC_INFERENCE.MC_DROPOUT.NUM_RUNS = 1
_C.PROBABILISTIC_INFERENCE.AFFINITY_THRESHOLD = 0.7
# Bayes OD Configs
_C.PROBABILISTIC_INFERENCE.BAYES_OD.BOX_MERGE_MODE = 'bayesian_inference'
_C.PROBABILISTIC_INFERENCE.BAYES_OD.CLS_MERGE_MODE = 'bayesian_inference'
_C.PROBABILISTIC_INFERENCE.BAYES_OD.DIRCH_PRIOR = 'uniform'
# Ensembles Configs
_C.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_MERGE_MODE = 'pre_nms'
_C.PROBABILISTIC_INFERENCE.ENSEMBLES.RANDOM_SEED_NUMS = [
0, 1000, 2000, 3000, 4000]
# 'mixture_of_gaussian' or 'bayesian_inference'
_C.PROBABILISTIC_INFERENCE.ENSEMBLES.BOX_FUSION_MODE = 'mixture_of_gaussians'
def setup_config(args, random_seed=None, is_testing=False):
"""
Sets up config node with probabilistic detectron elements. Also sets up a fixed random seed for all scientific
computing libraries, and sets up all supported datasets as instances of coco.
Args:
args (Namespace): args from argument parser
random_seed (int): set a fixed random seed throughout torch, numpy, and python
is_testing (bool): set to true if inference. If true function will return an error if checkpoint directory not
already existing.
Returns:
(CfgNode) detectron2 config object
"""
# Get default detectron config file
cfg = get_cfg()
add_detr_config(cfg)
add_probabilistic_config(cfg)
# Update default config file with custom config file
configs_dir = core.configs_dir()
args.config_file = os.path.join(configs_dir, args.config_file)
cfg.merge_from_file(args.config_file)
# Add dropout rate for faster RCNN box head
cfg.MODEL.ROI_BOX_HEAD.DROPOUT_RATE = cfg.MODEL.PROBABILISTIC_MODELING.DROPOUT_RATE
# Update config with inference configurations. Only applicable for when in
# probabilistic inference mode.
if args.inference_config != "":
args.inference_config = os.path.join(
configs_dir, args.inference_config)
cfg.merge_from_file(args.inference_config)
# Create output directory
model_name = os.path.split(os.path.split(args.config_file)[0])[-1]
dataset_name = os.path.split(os.path.split(
os.path.split(args.config_file)[0])[0])[-1]
cfg['OUTPUT_DIR'] = os.path.join(core.data_dir(),
dataset_name,
model_name,
os.path.split(args.config_file)[-1][:-5],
'random_seed_' + str(random_seed))
if is_testing:
if not os.path.isdir(cfg['OUTPUT_DIR']):
raise NotADirectoryError(
"Checkpoint directory {} does not exist.".format(
cfg['OUTPUT_DIR']))
os.makedirs(cfg['OUTPUT_DIR'], exist_ok=True)
# copy config file to output directory
copyfile(args.config_file, os.path.join(
cfg['OUTPUT_DIR'], os.path.split(args.config_file)[-1]))
# Freeze config file
cfg['SEED'] = random_seed
cfg.freeze()
# Initiate default setup
default_setup(cfg, args)
# Setup logger for probabilistic detectron module
setup_logger(
output=cfg.OUTPUT_DIR,
distributed_rank=comm.get_rank(),
name="Probabilistic Detectron")
# Set a fixed random seed for all numerical libraries
if random_seed is not None:
torch.manual_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)
# Setup datasets
if args.image_corruption_level != 0:
image_root_corruption_prefix = '_' + str(args.image_corruption_level)
else:
image_root_corruption_prefix = None
dataset_dir = os.path.expanduser(args.dataset_dir)
# Handle cases when this function has been called multiple times. In that case skip fully.
# Todo this is very bad practice, should fix.
try:
setup_all_datasets(
dataset_dir,
image_root_corruption_prefix=image_root_corruption_prefix)
return cfg
except AssertionError:
return cfg
| 8,904 | 33.649805 | 155 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.