Yaning1001 commited on
Commit
4113c4d
·
verified ·
1 Parent(s): c91d7b1

Add files using upload-large-folder tool

Browse files
Files changed (34) hide show
  1. deeprobust/image/adversary_examples/advexample_fgsm.png +0 -0
  2. deeprobust/image/adversary_examples/advexample_pgd.png +0 -0
  3. deeprobust/image/adversary_examples/test.png +0 -0
  4. deeprobust/image/attack/__init__.py +15 -0
  5. deeprobust/image/attack/cw.py +285 -0
  6. deeprobust/image/attack/deepfool.py +147 -0
  7. deeprobust/image/defense/advexample_pgd.png +0 -0
  8. deeprobust/image/netmodels/__init__.py +7 -0
  9. deeprobust/image/netmodels/densenet.py +185 -0
  10. deeprobust/image/netmodels/preact_resnet.py +101 -0
  11. docs/source/deeprobust.graph.defense.rst +54 -0
  12. docs/source/deeprobust.graph.global_attack.rst +48 -0
  13. docs/source/deeprobust.graph.rst +35 -0
  14. docs/source/deeprobust.rst +16 -0
  15. examples/graph/cgscore_datasets_batch.py +223 -0
  16. examples/graph/cgscore_experiments/huggingface/download.py +0 -0
  17. examples/graph/cgscore_experiments/huggingface/upload.py +17 -0
  18. examples/graph/perturbated_data/metattack.py +0 -0
  19. examples/graph/read_score.py +78 -0
  20. examples/graph/test_airgnn.py +103 -0
  21. examples/graph/test_dice.py +80 -0
  22. examples/graph/test_fga.py +188 -0
  23. examples/graph/test_gcn_adj_cgscore.py +328 -0
  24. examples/graph/test_gcn_svd.py +52 -0
  25. examples/graph/test_ig.py +192 -0
  26. examples/graph/test_prbcd_cora.py +47 -0
  27. examples/graph/test_rgcn.py +52 -0
  28. examples/graph/test_sgc.py +52 -0
  29. examples/graph/test_simpgcn.py +48 -0
  30. examples/image/test1.py +122 -0
  31. examples/image/test_ImageNet.py +33 -0
  32. examples/image/test_onepixel.py +80 -0
  33. examples/image/test_pgdtraining.py +38 -0
  34. examples/image/test_train.py +2 -0
deeprobust/image/adversary_examples/advexample_fgsm.png ADDED
deeprobust/image/adversary_examples/advexample_pgd.png ADDED
deeprobust/image/adversary_examples/test.png ADDED
deeprobust/image/attack/__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #__init__.py
2
+ import logging
3
+
4
+ from deeprobust.image.attack import base_attack
5
+ from deeprobust.image.attack import pgd
6
+ from deeprobust.image.attack import deepfool
7
+ from deeprobust.image.attack import fgsm
8
+ from deeprobust.image.attack import lbfgs
9
+ from deeprobust.image.attack import cw
10
+
11
+ from deeprobust.image.attack import onepixel
12
+
13
+ __all__ = ['base_attack', 'pgd', 'lbfgs', 'fgsm', 'deepfool','cw', 'onepixel']
14
+
15
+ logging.info("import base_attack from attack")
deeprobust/image/attack/cw.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import optim
3
+ import torch.nn as nn
4
+ import numpy as np
5
+ import logging
6
+
7
+ from deeprobust.image.attack.base_attack import BaseAttack
8
+ from deeprobust.image.utils import onehot_like
9
+ from deeprobust.image.optimizer import AdamOptimizer
10
+
11
+ class CarliniWagner(BaseAttack):
12
+ """
13
+ C&W attack is an effective method to calcuate high-confidence adversarial examples.
14
+
15
+ References
16
+ ----------
17
+ .. [1] Carlini, N., & Wagner, D. (2017, May). Towards evaluating the robustness of neural networks. https://arxiv.org/pdf/1608.04644.pdf
18
+
19
+ This reimplementation is based on https://github.com/kkew3/pytorch-cw2
20
+ Copyright 2018 Kaiwen Wu
21
+
22
+ Examples
23
+ --------
24
+
25
+ >>> from deeprobust.image.attack.cw import CarliniWagner
26
+ >>> from deeprobust.image.netmodels.CNN import Net
27
+ >>> from deeprobust.image.config import attack_params
28
+
29
+ >>> model = Net()
30
+ >>> model.load_state_dict(torch.load("./trained_models/MNIST_CNN_epoch_20.pt", map_location = torch.device('cuda')))
31
+ >>> model.eval()
32
+
33
+ >>> x,y = datasets.MNIST()
34
+ >>> attack = CarliniWagner(model, device='cuda')
35
+ >>> AdvExArray = attack.generate(x, y, target_label = 1, classnum = 10, **attack_params['CW_MNIST])
36
+
37
+ """
38
+
39
+
40
+ def __init__(self, model, device = 'cuda'):
41
+ super(CarliniWagner, self).__init__(model, device)
42
+ self.model = model
43
+ self.device = device
44
+
45
+ def generate(self, image, label, target_label, **kwargs):
46
+ """
47
+ Call this function to generate adversarial examples.
48
+
49
+ Parameters
50
+ ----------
51
+ image :
52
+ original image
53
+ label :
54
+ target label
55
+ kwargs :
56
+ user defined paremeters
57
+ """
58
+
59
+ assert self.check_type_device(image, label)
60
+ assert self.parse_params(**kwargs)
61
+ self.target = target_label
62
+ return self.cw(self.model,
63
+ self.image,
64
+ self.label,
65
+ self.target,
66
+ self.confidence,
67
+ self.clip_max,
68
+ self.clip_min,
69
+ self.max_iterations,
70
+ self.initial_const,
71
+ self.binary_search_steps,
72
+ self.learning_rate
73
+ )
74
+
75
+ def parse_params(self,
76
+ classnum = 10,
77
+ confidence = 1e-4,
78
+ clip_max = 1,
79
+ clip_min = 0,
80
+ max_iterations = 1000,
81
+ initial_const = 1e-2,
82
+ binary_search_steps = 5,
83
+ learning_rate = 0.00001,
84
+ abort_early = True):
85
+ """
86
+ Parse the user defined parameters.
87
+
88
+ Parameters
89
+ ----------
90
+ classnum :
91
+ number of class
92
+ confidence :
93
+ confidence
94
+ clip_max :
95
+ maximum pixel value
96
+ clip_min :
97
+ minimum pixel value
98
+ max_iterations :
99
+ maximum number of iterations
100
+ initial_const :
101
+ initialization of binary search
102
+ binary_search_steps :
103
+ step number of binary search
104
+ learning_rate :
105
+ learning rate
106
+ abort_early :
107
+ Set abort_early = True to allow early stop
108
+ """
109
+
110
+ self.classnum = classnum
111
+ self.confidence = confidence
112
+ self.clip_max = clip_max
113
+ self.clip_min = clip_min
114
+ self.max_iterations = max_iterations
115
+ self.initial_const = initial_const
116
+ self.binary_search_steps = binary_search_steps
117
+ self.learning_rate = learning_rate
118
+ self.abort_early = abort_early
119
+ return True
120
+
121
+ def cw(self, model, image, label, target, confidence, clip_max, clip_min, max_iterations, initial_const, binary_search_steps, learning_rate):
122
+ #change the input image
123
+ img_tanh = self.to_attack_space(image.cpu())
124
+ img_ori ,_ = self.to_model_space(img_tanh)
125
+ img_ori = img_ori.to(self.device)
126
+
127
+ #binary search initialization
128
+ c = initial_const
129
+ c_low = 0
130
+ c_high = np.inf
131
+ found_adv = False
132
+ last_loss = np.inf
133
+
134
+ for step in range(binary_search_steps):
135
+
136
+ #initialize w : perturbed image in tanh space
137
+ w = torch.from_numpy(img_tanh.numpy())
138
+
139
+ optimizer = AdamOptimizer(img_tanh.shape)
140
+
141
+ is_adversarial = False
142
+
143
+ for iteration in range(max_iterations):
144
+
145
+ # adversary example
146
+ img_adv, adv_grid = self.to_model_space(w)
147
+ img_adv = img_adv.to(self.device)
148
+ img_adv.requires_grad = True
149
+
150
+ #output of the layer before softmax
151
+ output = model.get_logits(img_adv)
152
+
153
+ #pending success
154
+ is_adversarial = self.pending_f(img_adv)
155
+
156
+ #calculate loss function and gradient of loss funcition on x
157
+ loss, loss_grad = self.loss_function(
158
+ img_adv, c, self.target, img_ori, self.confidence, self.clip_min, self.clip_max
159
+ )
160
+
161
+
162
+ #calculate gradient of loss function on w
163
+ gradient = adv_grid.to(self.device) * loss_grad.to(self.device)
164
+ w = w + torch.from_numpy(optimizer(gradient.cpu().detach().numpy(), learning_rate)).float()
165
+
166
+ if is_adversarial:
167
+ found_adv = True
168
+
169
+ #do binary search on c
170
+ if found_adv:
171
+ c_high = c
172
+ else:
173
+ c_low = c
174
+
175
+ if c_high == np.inf:
176
+ c *= 10
177
+ else:
178
+ c = (c_high + c_low) / 2
179
+
180
+ if (step % 10 == 0):
181
+ print("iteration:{:.0f},loss:{:.4f}".format(step,loss))
182
+
183
+ # if (step == 50):
184
+ # learning_rate = learning_rate/100
185
+
186
+ #abort early
187
+ if(self.abort_early == True and (step % 10) == 0 and step > 100) :
188
+ print("early abortion?", loss, last_loss)
189
+ if not (loss <= 0.9999 * last_loss):
190
+ break
191
+ last_loss = loss
192
+
193
+
194
+ return img_adv.detach()
195
+
196
+ def loss_function(
197
+ self, x_p, const, target, reconstructed_original, confidence, min_, max_):
198
+ """Returns the loss and the gradient of the loss w.r.t. x,
199
+ assuming that logits = model(x)."""
200
+
201
+ ## get the output of model before softmax
202
+ x_p.requires_grad = True
203
+ logits = self.model.get_logits(x_p).to(self.device)
204
+
205
+ ## find the largest class except the target class
206
+ targetlabel_mask = (torch.from_numpy(onehot_like(np.zeros(self.classnum), target))).double()
207
+ secondlargest_mask = (torch.from_numpy(np.ones(self.classnum)) - targetlabel_mask).to(self.device)
208
+
209
+ secondlargest = np.argmax((logits.double() * secondlargest_mask).cpu().detach().numpy(), axis = 1)
210
+
211
+ is_adv_loss = logits[0][secondlargest] - logits[0][target]
212
+
213
+ # is_adv is True as soon as the is_adv_loss goes below 0
214
+ # but sometimes we want additional confidence
215
+ is_adv_loss += confidence
216
+
217
+ if is_adv_loss == 0:
218
+ is_adv_loss_grad = 0
219
+ else:
220
+ is_adv_loss.backward()
221
+ is_adv_loss_grad = x_p.grad
222
+
223
+ is_adv_loss = max(0, is_adv_loss)
224
+
225
+ s = max_ - min_
226
+ squared_l2_distance = np.sum( ((x_p - reconstructed_original) ** 2).cpu().detach().numpy() ) / s ** 2
227
+ total_loss = squared_l2_distance + const * is_adv_loss
228
+
229
+
230
+ squared_l2_distance_grad = (2 / s ** 2) * (x_p - reconstructed_original)
231
+
232
+ #print(is_adv_loss_grad)
233
+ total_loss_grad = squared_l2_distance_grad + const * is_adv_loss_grad
234
+ return total_loss, total_loss_grad
235
+
236
+ def pending_f(self, x_p):
237
+ """Pending is the loss function is less than 0
238
+ """
239
+ targetlabel_mask = torch.from_numpy(onehot_like(np.zeros(self.classnum), self.target))
240
+ secondlargest_mask = torch.from_numpy(np.ones(self.classnum)) - targetlabel_mask
241
+ targetlabel_mask = targetlabel_mask.to(self.device)
242
+ secondlargest_mask = secondlargest_mask.to(self.device)
243
+
244
+ Zx_i = np.max((self.model.get_logits(x_p).double().to(self.device) * secondlargest_mask).cpu().detach().numpy())
245
+ Zx_t = np.max((self.model.get_logits(x_p).double().to(self.device) * targetlabel_mask).cpu().detach().numpy())
246
+
247
+ if ( Zx_i - Zx_t < - self.confidence):
248
+ return True
249
+ else:
250
+ return False
251
+
252
+ def to_attack_space(self, x):
253
+ x = x.detach()
254
+ # map from [min_, max_] to [-1, +1]
255
+ # x'=(x- 0.5 * (max+min) / 0.5 * (max-min))
256
+ a = (self.clip_min + self.clip_max) / 2
257
+ b = (self.clip_max - self.clip_min) / 2
258
+ x = (x - a) / b
259
+
260
+ # from [-1, +1] to approx. (-1, +1)
261
+ x = x * 0.999999
262
+
263
+ # from (-1, +1) to (-inf, +inf)
264
+ return np.arctanh(x)
265
+
266
+ def to_model_space(self, x):
267
+ """Transforms an input from the attack space
268
+ to the model space. This transformation and
269
+ the returned gradient are elementwise."""
270
+
271
+ # from (-inf, +inf) to (-1, +1)
272
+ x = np.tanh(x)
273
+
274
+ grad = 1 - np.square(x)
275
+
276
+ # map from (-1, +1) to (min_, max_)
277
+ a = (self.clip_min + self.clip_max) / 2
278
+ b = (self.clip_max - self.clip_min) / 2
279
+ x = x * b + a
280
+
281
+ grad = grad * b
282
+ return x, grad
283
+
284
+
285
+
deeprobust/image/attack/deepfool.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from torch.autograd import Variable
3
+ import torch as torch
4
+ import copy
5
+ #from torch.autograd.gradcheck import zero_gradients
6
+
7
+ from deeprobust.image.attack.base_attack import BaseAttack
8
+
9
+ def zero_gradients(x):
10
+ if isinstance(x, torch.Tensor):
11
+ if x.grad is not None:
12
+ x.grad.detach_()
13
+ x.grad.zero_()
14
+ elif isinstance(x, collections.abc.Iterable):
15
+ for elem in x:
16
+ zero_gradients(elem)
17
+
18
+ class DeepFool(BaseAttack):
19
+ """DeepFool attack.
20
+ """
21
+
22
+ def __init__(self, model, device = 'cuda' ):
23
+ super(DeepFool, self).__init__(model, device)
24
+ self.model = model
25
+ self.device = device
26
+
27
+ def generate(self, image, label, **kwargs):
28
+ """
29
+ Call this function to generate adversarial examples.
30
+
31
+ Parameters
32
+ ----------
33
+ image : 1*H*W*3
34
+ original image
35
+ label : int
36
+ target label
37
+ kwargs :
38
+ user defined paremeters
39
+
40
+ Returns
41
+ -------
42
+ adv_img :
43
+ adversarial examples
44
+ """
45
+
46
+
47
+ #check type device
48
+ assert self.check_type_device(image, label)
49
+ is_cuda = torch.cuda.is_available()
50
+
51
+ if (is_cuda and self.device == 'cuda'):
52
+ self.image = image.cuda()
53
+ self.model = self.model.cuda()
54
+ else:
55
+ self.image = image
56
+
57
+ assert self.parse_params(**kwargs)
58
+
59
+ adv_img, self.r, self.ite = deepfool(self.model,
60
+ self.image,
61
+ self.num_classes,
62
+ self.overshoot,
63
+ self.max_iteration,
64
+ self.device)
65
+ return adv_img
66
+
67
+ def getpert(self):
68
+ return self.r, self.ite
69
+
70
+ def parse_params(self,
71
+ num_classes = 10,
72
+ overshoot = 0.02,
73
+ max_iteration = 50):
74
+ """
75
+ Parse the user defined parameters
76
+
77
+ Parameters
78
+ ----------
79
+ num_classes : int
80
+ limits the number of classes to test against. (default = 10)
81
+ overshoot : float
82
+ used as a termination criterion to prevent vanishing updates (default = 0.02).
83
+ max_iteration : int
84
+ maximum number of iteration for deepfool (default = 50)
85
+ """
86
+ self.num_classes = num_classes
87
+ self.overshoot = overshoot
88
+ self.max_iteration = max_iteration
89
+ return True
90
+
91
+ def deepfool(model, image, num_classes, overshoot, max_iter, device):
92
+ f_image = model.forward(image).data.cpu().numpy().flatten()
93
+ output = (np.array(f_image)).flatten().argsort()[::-1]
94
+
95
+ output = output[0:num_classes]
96
+ label = output[0]
97
+
98
+ input_shape = image.cpu().numpy().shape
99
+ x = copy.deepcopy(image).requires_grad_(True)
100
+ w = np.zeros(input_shape)
101
+ r_tot = np.zeros(input_shape)
102
+
103
+ fs = model.forward(x)
104
+ fs_list = [fs[0,output[k]] for k in range(num_classes)]
105
+ current_pred_label = label
106
+
107
+ for i in range(max_iter):
108
+
109
+ pert = np.inf
110
+ fs[0, output[0]].backward(retain_graph = True)
111
+ grad_orig = x.grad.data.cpu().numpy().copy()
112
+
113
+ for k in range(1, num_classes):
114
+ zero_gradients(x)
115
+
116
+ fs[0, output[k]].backward(retain_graph=True)
117
+ cur_grad = x.grad.data.cpu().numpy().copy()
118
+
119
+ # set new w_k and new f_k
120
+ w_k = cur_grad - grad_orig
121
+ f_k = (fs[0, output[k]] - fs[0, output[0]]).data.cpu().numpy()
122
+
123
+ pert_k = abs(f_k)/np.linalg.norm(w_k.flatten())
124
+
125
+ # determine which w_k to use
126
+ if pert_k < pert:
127
+ pert = pert_k
128
+ w = w_k
129
+
130
+ # compute r_i and r_tot
131
+ # Added 1e-4 for numerical stability
132
+ r_i = (pert+1e-4) * w / np.linalg.norm(w)
133
+ r_tot = np.float32(r_tot + r_i)
134
+
135
+ pert_image = image + (1+overshoot)*torch.from_numpy(r_tot).to(device)
136
+
137
+ x = pert_image.detach().requires_grad_(True)
138
+ fs = model.forward(x)
139
+
140
+ if (not np.argmax(fs.data.cpu().numpy().flatten()) == label):
141
+ break
142
+
143
+
144
+ r_tot = (1+overshoot)*r_tot
145
+
146
+ return pert_image, r_tot, i
147
+
deeprobust/image/defense/advexample_pgd.png ADDED
deeprobust/image/netmodels/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ #__init__.py
2
+ from deeprobust.image.netmodels import CNN
3
+ from deeprobust.image.netmodels import resnet
4
+ from deeprobust.image.netmodels import YOPOCNN
5
+ from deeprobust.image.netmodels import train_model
6
+
7
+ __all__ = ['CNNmodel','resnet','YOPOCNN','train_model']
deeprobust/image/netmodels/densenet.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This is an implementation of DenseNet model.
3
+
4
+ Reference
5
+ ---------
6
+ ..[1]Huang, Gao, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q. Weinberger. "Densely connected convolutional networks." In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 4700-4708. 2017.
7
+ ..[2]Original implementation: https://github.com/kuangliu/pytorch-cifar
8
+ """
9
+ import math
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+
15
+
16
+ class Bottleneck(nn.Module):
17
+ def __init__(self, in_planes, growth_rate):
18
+ super(Bottleneck, self).__init__()
19
+ self.bn1 = nn.BatchNorm2d(in_planes)
20
+ self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
21
+ self.bn2 = nn.BatchNorm2d(4*growth_rate)
22
+ self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
23
+
24
+ def forward(self, x):
25
+ out = self.conv1(F.relu(self.bn1(x)))
26
+ out = self.conv2(F.relu(self.bn2(out)))
27
+ out = torch.cat([out,x], 1)
28
+ return out
29
+
30
+
31
+ class Transition(nn.Module):
32
+ def __init__(self, in_planes, out_planes):
33
+ super(Transition, self).__init__()
34
+ self.bn = nn.BatchNorm2d(in_planes)
35
+ self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
36
+
37
+ def forward(self, x):
38
+ out = self.conv(F.relu(self.bn(x)))
39
+ out = F.avg_pool2d(out, 2)
40
+ return out
41
+
42
+
43
+ class DenseNet(nn.Module):
44
+ """DenseNet.
45
+
46
+ """
47
+
48
+ def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
49
+ super(DenseNet, self).__init__()
50
+ self.growth_rate = growth_rate
51
+
52
+ num_planes = 2*growth_rate
53
+ self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
54
+
55
+ self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
56
+ num_planes += nblocks[0]*growth_rate
57
+ out_planes = int(math.floor(num_planes*reduction))
58
+ self.trans1 = Transition(num_planes, out_planes)
59
+ num_planes = out_planes
60
+
61
+ self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
62
+ num_planes += nblocks[1]*growth_rate
63
+ out_planes = int(math.floor(num_planes*reduction))
64
+ self.trans2 = Transition(num_planes, out_planes)
65
+ num_planes = out_planes
66
+
67
+ self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
68
+ num_planes += nblocks[2]*growth_rate
69
+ out_planes = int(math.floor(num_planes*reduction))
70
+ self.trans3 = Transition(num_planes, out_planes)
71
+ num_planes = out_planes
72
+
73
+ self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
74
+ num_planes += nblocks[3]*growth_rate
75
+
76
+ self.bn = nn.BatchNorm2d(num_planes)
77
+ self.linear = nn.Linear(num_planes, num_classes)
78
+
79
+ def _make_dense_layers(self, block, in_planes, nblock):
80
+ layers = []
81
+ for i in range(nblock):
82
+ layers.append(block(in_planes, self.growth_rate))
83
+ in_planes += self.growth_rate
84
+ return nn.Sequential(*layers)
85
+
86
+ def forward(self, x):
87
+ out = self.conv1(x)
88
+ out = self.trans1(self.dense1(out))
89
+ out = self.trans2(self.dense2(out))
90
+ out = self.trans3(self.dense3(out))
91
+ out = self.dense4(out)
92
+ out = F.avg_pool2d(F.relu(self.bn(out)), 4)
93
+ out = out.view(out.size(0), -1)
94
+ out = self.linear(out)
95
+ return out
96
+
97
+ def DenseNet121():
98
+ """DenseNet121.
99
+ """
100
+ return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32)
101
+
102
+ def DenseNet169():
103
+ """DenseNet169.
104
+ """
105
+ return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32)
106
+
107
+ def DenseNet201():
108
+ """DenseNet201.
109
+ """
110
+ return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32)
111
+
112
+ def DenseNet161():
113
+ """DenseNet161.
114
+ """
115
+ return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48)
116
+
117
+ def densenet_cifar():
118
+ """densenet_cifar.
119
+ """
120
+ return DenseNet(Bottleneck, [6,12,24,16], growth_rate=12)
121
+
122
+ def test(model, device, test_loader):
123
+ """test.
124
+
125
+ Parameters
126
+ ----------
127
+ model :
128
+ model
129
+ device :
130
+ device
131
+ test_loader :
132
+ test_loader
133
+ """
134
+ model.eval()
135
+
136
+ test_loss = 0
137
+ correct = 0
138
+ with torch.no_grad():
139
+ for data, target in test_loader:
140
+ data, target = data.to(device), target.to(device)
141
+ output = model(data)
142
+ #test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
143
+
144
+ test_loss += F.cross_entropy(output, target)
145
+ pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
146
+ correct += pred.eq(target.view_as(pred)).sum().item()
147
+
148
+ test_loss /= len(test_loader.dataset)
149
+
150
+ print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
151
+ test_loss, correct, len(test_loader.dataset),
152
+ 100. * correct / len(test_loader.dataset)))
153
+
154
+ def train(model, device, train_loader, optimizer, epoch):
155
+ """train.
156
+
157
+ Parameters
158
+ ----------
159
+ model :
160
+ model
161
+ device :
162
+ device
163
+ train_loader :
164
+ train_loader
165
+ optimizer :
166
+ optimizer
167
+ epoch :
168
+ epoch
169
+ """
170
+ model.train()
171
+
172
+ # lr = util.adjust_learning_rate(optimizer, epoch, args) # don't need it if we use Adam
173
+
174
+ for batch_idx, (data, target) in enumerate(train_loader):
175
+ data, target = torch.tensor(data).to(device), torch.tensor(target).to(device)
176
+ optimizer.zero_grad()
177
+ output = model(data)
178
+ # loss = F.nll_loss(output, target)
179
+ loss = F.cross_entropy(output, target)
180
+ loss.backward()
181
+ optimizer.step()
182
+ if batch_idx % 10 == 0:
183
+ print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
184
+ epoch, batch_idx * len(data), len(train_loader.dataset),
185
+ 100. * batch_idx / len(train_loader), loss.item()/data.shape[0]))
deeprobust/image/netmodels/preact_resnet.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This is an reimplementaiton of Pre-activation ResNet.
3
+ """
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+
8
+
9
+ class PreActBlock(nn.Module):
10
+ '''Pre-activation version of the BasicBlock.'''
11
+ expansion = 1
12
+
13
+ def __init__(self, in_planes, planes, stride=1):
14
+ super(PreActBlock, self).__init__()
15
+ self.bn1 = nn.BatchNorm2d(in_planes)
16
+ self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
17
+ self.bn2 = nn.BatchNorm2d(planes)
18
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
19
+
20
+ if stride != 1 or in_planes != self.expansion*planes:
21
+ self.shortcut = nn.Sequential(
22
+ nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
23
+ )
24
+
25
+ def forward(self, x):
26
+ out = F.relu(self.bn1(x))
27
+ shortcut = self.shortcut(x) if hasattr(self, 'shortcut') else x
28
+ out = self.conv1(out)
29
+ out = self.conv2(F.relu(self.bn2(out)))
30
+ out += shortcut
31
+ return out
32
+
33
+
34
+ class PreActBottleneck(nn.Module):
35
+ '''Pre-activation version of the original Bottleneck module.'''
36
+ expansion = 4
37
+
38
+ def __init__(self, in_planes, planes, stride=1):
39
+ super(PreActBottleneck, self).__init__()
40
+ self.bn1 = nn.BatchNorm2d(in_planes)
41
+ self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
42
+ self.bn2 = nn.BatchNorm2d(planes)
43
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
44
+ self.bn3 = nn.BatchNorm2d(planes)
45
+ self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
46
+
47
+ if stride != 1 or in_planes != self.expansion*planes:
48
+ self.shortcut = nn.Sequential(
49
+ nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
50
+ )
51
+
52
+ def forward(self, x):
53
+ out = F.relu(self.bn1(x))
54
+ shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
55
+ out = self.conv1(out)
56
+ out = self.conv2(F.relu(self.bn2(out)))
57
+ out = self.conv3(F.relu(self.bn3(out)))
58
+ out += shortcut
59
+ return out
60
+
61
+
62
+ class PreActResNet(nn.Module):
63
+ """PreActResNet.
64
+ """
65
+
66
+ def __init__(self, block, num_blocks, num_classes=10):
67
+ super(PreActResNet, self).__init__()
68
+ self.in_planes = 64
69
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
70
+ self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
71
+ self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
72
+ self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
73
+ self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
74
+ self.bn = nn.BatchNorm2d(512 * block.expansion)
75
+ self.linear = nn.Linear(512 * block.expansion, num_classes)
76
+
77
+ def _make_layer(self, block, planes, num_blocks, stride):
78
+ strides = [stride] + [1]*(num_blocks-1)
79
+ layers = []
80
+ for stride in strides:
81
+ layers.append(block(self.in_planes, planes, stride))
82
+ self.in_planes = planes * block.expansion
83
+ return nn.Sequential(*layers)
84
+
85
+ def forward(self, x):
86
+ out = self.conv1(x)
87
+ out = self.layer1(out)
88
+ out = self.layer2(out)
89
+ out = self.layer3(out)
90
+ out = self.layer4(out)
91
+ out = F.relu(self.bn(out))
92
+ out = F.avg_pool2d(out, 4)
93
+ out = out.view(out.size(0), -1)
94
+ out = self.linear(out)
95
+ return out
96
+
97
+
98
+ def PreActResNet18():
99
+ """PreActResNet18.
100
+ """
101
+ return PreActResNet(PreActBlock, [2,2,2,2])
docs/source/deeprobust.graph.defense.rst ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ deeprobust.graph.defense package
2
+ ================================
3
+
4
+ Submodules
5
+ ----------
6
+
7
+ deeprobust.graph.defense.adv\_training module
8
+ ---------------------------------------------
9
+
10
+ .. automodule:: deeprobust.graph.defense.adv_training
11
+ :members:
12
+
13
+ deeprobust.graph.defense.gcn module
14
+ -----------------------------------
15
+
16
+ .. automodule:: deeprobust.graph.defense.gcn
17
+ :members:
18
+
19
+ deeprobust.graph.defense.gcn\_preprocess module
20
+ -----------------------------------------------
21
+
22
+ .. automodule:: deeprobust.graph.defense.gcn_preprocess
23
+ :members:
24
+
25
+ deeprobust.graph.defense.pgd module
26
+ -----------------------------------
27
+
28
+ .. automodule:: deeprobust.graph.defense.pgd
29
+ :members:
30
+
31
+ deeprobust.graph.defense.prognn module
32
+ --------------------------------------
33
+
34
+ .. automodule:: deeprobust.graph.defense.prognn
35
+ :members:
36
+
37
+ deeprobust.graph.defense.r\_gcn module
38
+ --------------------------------------
39
+
40
+ .. automodule:: deeprobust.graph.defense.r_gcn
41
+ :members:
42
+
43
+ deeprobust.graph.defense.median_gcn module
44
+ --------------------------------------
45
+
46
+ .. automodule:: deeprobust.graph.defense.median_gcn
47
+ :members:
48
+
49
+
50
+ Module contents
51
+ ---------------
52
+
53
+ .. automodule:: deeprobust.graph.defense
54
+ :members:
docs/source/deeprobust.graph.global_attack.rst ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ deeprobust.graph.global\_attack package
2
+ =======================================
3
+
4
+ Submodules
5
+ ----------
6
+
7
+ deeprobust.graph.global\_attack.base\_attack module
8
+ ---------------------------------------------------
9
+
10
+ .. automodule:: deeprobust.graph.global_attack.base_attack
11
+ :members:
12
+
13
+ deeprobust.graph.global\_attack.dice module
14
+ -------------------------------------------
15
+
16
+ .. automodule:: deeprobust.graph.global_attack.dice
17
+ :members:
18
+
19
+ deeprobust.graph.global\_attack.mettack module
20
+ ----------------------------------------------
21
+
22
+ .. automodule:: deeprobust.graph.global_attack.mettack
23
+ :members:
24
+
25
+ deeprobust.graph.global\_attack.nipa module
26
+ -------------------------------------------
27
+
28
+ .. automodule:: deeprobust.graph.global_attack.nipa
29
+ :members:
30
+
31
+ deeprobust.graph.global\_attack.random_attack module
32
+ ----------------------------------------------------
33
+
34
+ .. automodule:: deeprobust.graph.global_attack.random_attack
35
+ :members:
36
+
37
+ deeprobust.graph.global\_attack.topology\_attack module
38
+ -------------------------------------------------------
39
+
40
+ .. automodule:: deeprobust.graph.global_attack.topology_attack
41
+ :members:
42
+
43
+
44
+ Module contents
45
+ ---------------
46
+
47
+ .. automodule:: deeprobust.graph.global_attack
48
+ :members:
docs/source/deeprobust.graph.rst ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ deeprobust.graph package
2
+ ========================
3
+
4
+ Subpackages
5
+ -----------
6
+
7
+ .. toctree::
8
+
9
+ deeprobust.graph.data
10
+ deeprobust.graph.defense
11
+ deeprobust.graph.global_attack
12
+ deeprobust.graph.rl
13
+ deeprobust.graph.targeted_attack
14
+
15
+ Submodules
16
+ ----------
17
+
18
+ deeprobust.graph.black\_box module
19
+ ----------------------------------
20
+
21
+ .. automodule:: deeprobust.graph.black_box
22
+ :members:
23
+
24
+ deeprobust.graph.utils module
25
+ -----------------------------
26
+
27
+ .. automodule:: deeprobust.graph.utils
28
+ :members:
29
+
30
+
31
+ Module contents
32
+ ---------------
33
+
34
+ .. automodule:: deeprobust.graph
35
+ :members:
docs/source/deeprobust.rst ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ deeprobust package
2
+ ==================
3
+
4
+ Subpackages
5
+ -----------
6
+
7
+ .. toctree::
8
+
9
+ deeprobust.graph
10
+ deeprobust.image
11
+
12
+ Module contents
13
+ ---------------
14
+
15
+ .. automodule:: deeprobust
16
+ :members:
examples/graph/cgscore_datasets_batch.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # compute cgscore for gcn
2
+ # author: Yaning
3
+ import torch
4
+ import numpy as np
5
+ import torch.nn.functional as Fd
6
+ from deeprobust.graph.defense import GCNJaccard, GCN
7
+ from deeprobust.graph.defense import GCNScore
8
+ from deeprobust.graph.utils import *
9
+ from deeprobust.graph.data import Dataset, PrePtbDataset
10
+ from scipy.sparse import csr_matrix
11
+ import argparse
12
+ import pickle
13
+ from deeprobust.graph import utils
14
+ from collections import defaultdict
15
+ from tqdm import tqdm
16
+
17
+ parser = argparse.ArgumentParser()
18
+ parser.add_argument('--seed', type=int, default=15, help='Random seed.')
19
+ parser.add_argument('--dataset', type=str, default='pubmed', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset')
20
+ parser.add_argument('--ptb_rate', type=float, default=0.05, help='pertubation rate')
21
+
22
+ args = parser.parse_args()
23
+ args.cuda = torch.cuda.is_available()
24
+ print('cuda: %s' % args.cuda)
25
+ device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
26
+
27
+ # make sure you use the same data splits as you generated attacks
28
+ np.random.seed(args.seed)
29
+ if args.cuda:
30
+ torch.cuda.manual_seed(args.seed)
31
+
32
+ # Here the random seed is to split the train/val/test data,
33
+ # we need to set the random seed to be the same as that when you generate the perturbed graph
34
+ # data = Dataset(root='/tmp/', name=args.dataset, setting='nettack', seed=15)
35
+ # Or we can just use setting='prognn' to get the splits
36
+ data = Dataset(root='/tmp/', name=args.dataset, setting='prognn')
37
+ adj, features, labels = data.adj, data.features, data.labels
38
+ idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
39
+
40
+
41
+ perturbed_data = PrePtbDataset(root='/tmp/',
42
+ name=args.dataset,
43
+ attack_method='meta',
44
+ ptb_rate=args.ptb_rate)
45
+
46
+ perturbed_adj = perturbed_data.adj
47
+ # perturbed_adj = adj
48
+
49
+ def save_cg_scores(cg_scores, filename="cg_scores.npy"):
50
+ np.save(filename, cg_scores)
51
+ print(f"CG-scores saved to {filename}")
52
+
53
+ def load_cg_scores_numpy(filename="cg_scores.npy"):
54
+ cg_scores = np.load(filename, allow_pickle=True)
55
+ print(f"CG-scores loaded from {filename}")
56
+ return cg_scores
57
+
58
+
59
+ import torch
60
+ import numpy as np
61
+ from collections import defaultdict
62
+ from tqdm import tqdm
63
+
64
+
65
+ def calc_cg_score_gnn_with_sampling(
66
+ A, X, labels, device, rep_num=1, unbalance_ratio=1, sub_term=False, batch_size=64
67
+ ):
68
+ """
69
+ Optimized CG-score calculation with edge batching and GPU acceleration.
70
+ """
71
+
72
+ N = A.shape[0]
73
+ cg_scores = {
74
+ "vi": np.zeros((N, N)),
75
+ "ab": np.zeros((N, N)),
76
+ "a2": np.zeros((N, N)),
77
+ "b2": np.zeros((N, N)),
78
+ "times": np.zeros((N, N)),
79
+ }
80
+
81
+ A = A.to(device)
82
+ X = X.to(device)
83
+ labels = labels.to(device)
84
+
85
+ @torch.no_grad()
86
+ def normalize(tensor):
87
+ return tensor / (torch.norm(tensor, dim=1, keepdim=True) + 1e-8)
88
+
89
+ for _ in range(rep_num):
90
+ AX = torch.matmul(A, X)
91
+ norm_AX = normalize(AX)
92
+
93
+ # ✨ Step 1: 标签分组(矢量化 + GPU)
94
+ unique_labels = torch.unique(labels)
95
+ label_to_indices = {
96
+ label.item(): (labels == label).nonzero(as_tuple=True)[0] for label in unique_labels
97
+ }
98
+ dataset = {label: norm_AX[indices] for label, indices in label_to_indices.items()}
99
+
100
+ # ✨ Step 2: 负样本构建(GPU 上)
101
+ neg_samples_dict = {}
102
+ neg_indices_dict = {}
103
+ for label in unique_labels:
104
+ label = label.item()
105
+ mask = labels != label
106
+ neg_samples = norm_AX[mask]
107
+ neg_indices = mask.nonzero(as_tuple=True)[0]
108
+ neg_samples_dict[label] = neg_samples
109
+ neg_indices_dict[label] = neg_indices
110
+
111
+ for curr_label in tqdm(unique_labels.tolist(), desc="Label groups"):
112
+ curr_samples = dataset[curr_label]
113
+ curr_indices = label_to_indices[curr_label]
114
+ curr_num = len(curr_samples)
115
+
116
+ chosen_curr_idx = torch.randperm(curr_num, device=device)
117
+ chosen_curr_samples = curr_samples[chosen_curr_idx]
118
+ chosen_curr_indices = curr_indices[chosen_curr_idx]
119
+
120
+ neg_samples = neg_samples_dict[curr_label]
121
+ neg_indices = neg_indices_dict[curr_label]
122
+ neg_num = min(int(curr_num * unbalance_ratio), len(neg_samples))
123
+ rand_idx = torch.randperm(len(neg_samples), device=device)[:neg_num]
124
+ chosen_neg_samples = neg_samples[rand_idx]
125
+ chosen_neg_indices = neg_indices[rand_idx]
126
+
127
+ combined_samples = torch.cat([chosen_curr_samples, chosen_neg_samples], dim=0)
128
+ y = torch.cat([torch.ones(len(chosen_curr_samples)), -torch.ones(neg_num)], dim=0).to(device)
129
+
130
+ # 参考误差
131
+ H_inner = torch.matmul(combined_samples, combined_samples.T)
132
+ H_inner = torch.clamp(H_inner, min=-1.0, max=1.0)
133
+ H = H_inner * (np.pi - torch.acos(H_inner)) / (2 * np.pi)
134
+ H.fill_diagonal_(0.5)
135
+ H += 1e-6 * torch.eye(H.size(0), device=device)
136
+ invH = torch.inverse(H)
137
+ original_error = y @ (invH @ y)
138
+
139
+ # ✨ Step 3: 收集候选边(仍在 CPU 逻辑)
140
+ edge_batch = []
141
+ for idx_i in chosen_curr_indices.tolist():
142
+ for j in range(idx_i + 1, N):
143
+ if A[idx_i, j] != 0:
144
+ edge_batch.append((idx_i, j))
145
+
146
+ # ✨ Step 4: 批处理更新
147
+ for k in tqdm(range(0, len(edge_batch), batch_size), desc="Edge batches", leave=False):
148
+ batch = edge_batch[k : k + batch_size]
149
+ B = len(batch)
150
+
151
+ norm_AX1_batch = norm_AX.repeat(B, 1, 1).clone()
152
+ for b, (i, j) in enumerate(batch):
153
+ AX1_i = AX[i] - A[i, j] * X[j]
154
+ AX1_j = AX[j] - A[j, i] * X[i]
155
+ norm_AX1_batch[b, i] = AX1_i / (torch.norm(AX1_i) + 1e-8)
156
+ norm_AX1_batch[b, j] = AX1_j / (torch.norm(AX1_j) + 1e-8)
157
+
158
+ sample_idx = chosen_curr_indices.tolist() + chosen_neg_indices.tolist()
159
+ sample_batch = norm_AX1_batch[:, sample_idx, :] # [B, M, D]
160
+
161
+ H_inner = torch.matmul(sample_batch, sample_batch.transpose(1, 2))
162
+ H_inner = torch.clamp(H_inner, min=-1.0, max=1.0)
163
+ H = H_inner * (np.pi - torch.acos(H_inner)) / (2 * np.pi)
164
+ eye = torch.eye(H.size(-1), device=device).unsqueeze(0).expand_as(H)
165
+ H = H + 1e-6 * eye
166
+ H.diagonal(dim1=-2, dim2=-1).copy_(0.5)
167
+
168
+ invH = torch.inverse(H)
169
+ y_expanded = y.unsqueeze(0).expand(B, -1)
170
+ error_A1 = torch.einsum("bi,bij,bj->b", y_expanded, invH, y_expanded)
171
+
172
+ for b, (i, j) in enumerate(batch):
173
+ score = (original_error - error_A1[b]).item()
174
+ cg_scores["vi"][i, j] += score
175
+ cg_scores["vi"][j, i] = score
176
+ cg_scores["times"][i, j] += 1
177
+ cg_scores["times"][j, i] += 1
178
+
179
+ for key in cg_scores:
180
+ if key != "times":
181
+ cg_scores[key] = cg_scores[key] / np.where(cg_scores["times"] > 0, cg_scores["times"], 1)
182
+
183
+ return cg_scores if sub_term else cg_scores["vi"]
184
+
185
+
186
+
187
+ def is_symmetric_sparse(adj):
188
+ """
189
+ Check if a sparse matrix is symmetric.
190
+ """
191
+ # Check symmetry
192
+ return (adj != adj.transpose()).nnz == 0 # .nnz is the number of non-zero elements
193
+
194
+ def make_symmetric_sparse(adj):
195
+ """
196
+ Ensure the sparse adjacency matrix is symmetrical.
197
+ """
198
+ # Make the matrix symmetric
199
+ sym_adj = (adj + adj.transpose()) / 2
200
+ return sym_adj
201
+
202
+ perturbed_adj = make_symmetric_sparse(perturbed_adj)
203
+
204
+ if type(perturbed_adj) is not torch.Tensor:
205
+ features, perturbed_adj, labels = utils.to_tensor(features, perturbed_adj, labels)
206
+ else:
207
+ features = features.to(device)
208
+ perturbed_adj = perturbed_adj.to(device)
209
+ labels = labels.to(device)
210
+
211
+ if utils.is_sparse_tensor(perturbed_adj):
212
+
213
+ adj_norm = utils.normalize_adj_tensor(perturbed_adj, sparse=True)
214
+ else:
215
+ adj_norm = utils.normalize_adj_tensor(perturbed_adj)
216
+
217
+ features = features.to_dense()
218
+ perturbed_adj = adj_norm.to_dense()
219
+
220
+
221
+ calc_cg_score = calc_cg_score_gnn_with_sampling(perturbed_adj, features, labels, device, rep_num=1, unbalance_ratio=3, sub_term=False, batch_size=512)
222
+ save_cg_scores(calc_cg_score, filename="pubmed_0.05.npy")
223
+ # print("completed")
examples/graph/cgscore_experiments/huggingface/download.py ADDED
File without changes
examples/graph/cgscore_experiments/huggingface/upload.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import HfApi
2
+
3
+ # 初始化 API 客户端
4
+ api = HfApi()
5
+
6
+ # 设置参数/
7
+ repo_id = "Yaning1001/CGSCORE"
8
+ repo_type = "model"
9
+ folder_path = "/home/yiren/new_ssd2/chunhui/yaning/project/cgscore/"
10
+ # token = "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" # 替换为你的 Hugging Face API Token
11
+
12
+ # 调用上传方法
13
+ api.upload_large_folder(
14
+ repo_id=repo_id,
15
+ repo_type=repo_type,
16
+ folder_path=folder_path,
17
+ )
examples/graph/perturbated_data/metattack.py ADDED
File without changes
examples/graph/read_score.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+
4
+ def remove_least_important_edges(adj, cgscore, remove_ratio=0.8):
5
+ """
6
+ Remove the least important edges based on CGScore.
7
+
8
+ Args:
9
+ adj (torch.Tensor): Original adjacency matrix (N x N).
10
+ cgscore (np.ndarray): CGScore matrix (N x N).
11
+ keep_ratio (float): Ratio of edges to keep (default: 0.8).
12
+
13
+ Returns:
14
+ adj (torch.Tensor): Adjusted adjacency matrix after removing edges.
15
+ """
16
+ # Convert CGScore from numpy to PyTorch tensor
17
+ cgscore = torch.tensor(cgscore, dtype=torch.float32)
18
+
19
+ assert adj.shape == cgscore.shape, "adj and cgscore must have the same shape"
20
+ N = adj.shape[0]
21
+
22
+ # Extract upper triangular non-zero elements (excluding diagonal)
23
+ triu_indices = torch.triu_indices(N, N, offset=1) # Upper triangle indices
24
+ triu_scores = cgscore[triu_indices[0], triu_indices[1]]
25
+ triu_adj = adj[triu_indices[0], triu_indices[1]]
26
+
27
+ # Mask to ignore zero elements in adj
28
+ mask = triu_adj > 0
29
+ triu_scores = triu_scores[mask]
30
+ triu_indices = triu_indices[:, mask]
31
+
32
+ # Sort by CGScore in ascending order
33
+ sorted_indices = torch.argsort(triu_scores) # Indices of sorted CGScores
34
+
35
+
36
+ # Determine the cutoff for edges to remove
37
+ num_edges_to_remove = int(len(sorted_indices) * (remove_ratio)) # Edges to remove
38
+ print("len(sorted_indices)", len(sorted_indices))
39
+ print("remove_radio:", remove_ratio)
40
+ print("num_edges_to_remove", num_edges_to_remove)
41
+ edges_to_remove = sorted_indices[:num_edges_to_remove] # First 20% (lowest CGScores)
42
+
43
+ # Create a copy of the adjacency matrix
44
+ adj_new = adj.clone()
45
+
46
+ # Remove the least important edges
47
+ for idx in edges_to_remove:
48
+ i, j = triu_indices[:, idx]
49
+ adj_new[i, j] = 0
50
+ adj_new[j, i] = 0 # Ensure symmetry
51
+
52
+ return adj_new
53
+
54
+
55
+ # 示例邻接矩阵和 CGScore 矩阵
56
+ adj = torch.tensor([
57
+ [0, 1, 1, 0],
58
+ [1, 0, 1, 1],
59
+ [1, 1, 0, 1],
60
+ [0, 1, 1, 0]
61
+ ], dtype=torch.float32)
62
+
63
+ cgscore = np.array([
64
+ [0.0, 0.8, 0.6, 0.0],
65
+ [0.8, 0.0, 0.1, 1.2],
66
+ [0.6, 0.7, 0.0, 1.9],
67
+ [0.0, 1.2, 1.1, 0.0]
68
+ ], dtype=np.float32)
69
+
70
+ # 调用函数
71
+ adj_new = remove_least_important_edges(adj, cgscore, remove_ratio=0.2)
72
+
73
+ # 打印结果
74
+ print("原始邻接矩阵:")
75
+ print(adj)
76
+ print("调整后的邻接矩阵:")
77
+ print(adj_new)
78
+
examples/graph/test_airgnn.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """"test different models on noise features"""
2
+ import argparse
3
+ import numpy as np
4
+ from torch_geometric.datasets import Planetoid
5
+ import torch_geometric.transforms as T
6
+ from deeprobust.graph.defense_pyg import AirGNN, GCN, APPNP, GAT, SAGE, GPRGNN
7
+ import torch
8
+ import random
9
+ import os.path as osp
10
+ from deeprobust.graph.utils import add_feature_noise, add_feature_noise_test, get_perf
11
+ import torch.nn.functional as F
12
+
13
+ parser = argparse.ArgumentParser()
14
+ parser.add_argument('--gpu_id', type=int, default=0, help='gpu id')
15
+ parser.add_argument('--dataset', type=str, default='cora')
16
+ parser.add_argument('--epochs', type=int, default=10)
17
+ parser.add_argument('--lr', type=float, default=0.01)
18
+ parser.add_argument('--hidden', type=int, default=64)
19
+ parser.add_argument('--weight_decay', type=float, default=5e-4)
20
+ parser.add_argument('--with_bn', type=int, default=0)
21
+ parser.add_argument('--seed', type=int, default=0, help='Random seed.')
22
+ parser.add_argument('--nlayers', type=int, default=2)
23
+ parser.add_argument('--model', type=str, default='AirGNN')
24
+ parser.add_argument('--debug', type=float, default=0)
25
+ parser.add_argument('--dropout', type=float, default=0.5)
26
+ parser.add_argument('--noise_feature', type=float, default=0.3)
27
+ parser.add_argument('--lambda_', type=float, default=0)
28
+ args = parser.parse_args()
29
+
30
+ torch.cuda.set_device(args.gpu_id)
31
+
32
+ print('===========')
33
+
34
+ # random seed setting
35
+ random.seed(args.seed)
36
+ np.random.seed(args.seed)
37
+ torch.manual_seed(args.seed)
38
+ torch.cuda.manual_seed(args.seed)
39
+
40
+ def get_dataset(name, normalize_features=True, transform=None, if_dpr=True):
41
+ path = osp.join(osp.dirname(osp.realpath(__file__)), 'data', name)
42
+ if name in ['cora', 'citeseer', 'pubmed']:
43
+ dataset = Planetoid(path, name)
44
+ else:
45
+ raise NotImplementedError
46
+ dataset.transform = T.NormalizeFeatures()
47
+ return dataset
48
+
49
+ dataset = get_dataset(args.dataset)
50
+ data = dataset[0]
51
+
52
+ def pretrain_model():
53
+ feat, labels = data.x, data.y
54
+ nclass = max(labels).item()+1
55
+ if args.model == "AirGNN":
56
+ args.dropout=0.2; args.lambda_amp=0.5; args.alpha=0.1
57
+ model = AirGNN(nfeat=feat.shape[1], nhid=args.hidden, dropout=args.dropout, with_bn=args.with_bn,
58
+ K=10, weight_decay=args.weight_decay, args=args, nlayers=args.nlayers,
59
+ nclass=max(labels).item()+1, device=device).to(device)
60
+ elif args.model == "GCN":
61
+ model = GCN(nfeat=feat.shape[1], nhid=args.hidden, dropout=args.dropout,
62
+ nlayers=args.nlayers, with_bn=args.with_bn,
63
+ weight_decay=args.weight_decay, nclass=nclass,
64
+ device=device).to(device)
65
+ elif args.model == "GAT":
66
+ args.dropout = 0.5; args.hidden = 8
67
+ model = GAT(nfeat=feat.shape[1], nhid=args.hidden, heads=8, lr=0.005, nlayers=args.nlayers,
68
+ nclass=nclass, with_bn=args.with_bn, weight_decay=args.weight_decay,
69
+ dropout=args.dropout, device=device).to(device)
70
+ elif args.model == "SAGE":
71
+ model = SAGE(feat.shape[1], 32, max(labels).item()+1, num_layers=5,
72
+ dropout=0.0, lr=0.01, weight_decay=0, device=device).to(device)
73
+ elif args.model == "GPR":
74
+ model = GPRGNN(feat.shape[1], 32, max(labels).item()+1, dropout=0.0,
75
+ lr=0.01, weight_decay=0, device=device).to(device)
76
+ else:
77
+ raise NotImplementedError
78
+
79
+ print(model)
80
+ model.fit(data, train_iters=1000, patience=1000, verbose=True)
81
+
82
+ model.eval()
83
+ model.data = data.to(device)
84
+ output = model.predict()
85
+ labels = labels.to(device)
86
+ print("Test set results:", get_perf(output, labels, data.test_mask, verbose=0)[1])
87
+ return model
88
+
89
+ device = 'cuda'
90
+ model = pretrain_model()
91
+
92
+ if args.noise_feature > 0:
93
+ feat_noise, noisy_nodes = add_feature_noise_test(data,
94
+ args.noise_feature, args.seed)
95
+
96
+ output = model.predict()
97
+ labels = data.y.to(device)
98
+ print("After noise, test set results:", get_perf(output, labels, data.test_mask, verbose=0)[1])
99
+ print('Validation:', get_perf(output, labels, data.val_mask, verbose=0)[1])
100
+ print('Abnomral test nodes:', get_perf(output, labels, noisy_nodes, verbose=0)[1])
101
+ print('Normal test nodes:', get_perf(output, labels, data.test_mask & (~noisy_nodes), verbose=0)[1])
102
+
103
+
examples/graph/test_dice.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import torch.nn.functional as F
4
+ import torch.optim as optim
5
+ from deeprobust.graph.defense import GCN
6
+ from deeprobust.graph.global_attack import DICE
7
+ from deeprobust.graph.utils import *
8
+ from deeprobust.graph.data import Dataset
9
+
10
+ import argparse
11
+
12
+ parser = argparse.ArgumentParser()
13
+ parser.add_argument('--seed', type=int, default=15, help='Random seed.')
14
+ parser.add_argument('--dataset', type=str, default='citeseer', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset')
15
+ parser.add_argument('--ptb_rate', type=float, default=0.05, help='pertubation rate')
16
+
17
+
18
+ args = parser.parse_args()
19
+ args.cuda = torch.cuda.is_available()
20
+ print('cuda: %s' % args.cuda)
21
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
22
+
23
+ np.random.seed(args.seed)
24
+ torch.manual_seed(args.seed)
25
+ if args.cuda:
26
+ torch.cuda.manual_seed(args.seed)
27
+
28
+ data = Dataset(root='/tmp/', name=args.dataset)
29
+ adj, features, labels = data.adj, data.features, data.labels
30
+ idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
31
+ idx_unlabeled = np.union1d(idx_val, idx_test)
32
+
33
+ # Setup Attack Model
34
+ model = DICE()
35
+
36
+ n_perturbations = int(args.ptb_rate * (adj.sum()//2))
37
+
38
+ model.attack(adj, labels, n_perturbations)
39
+ modified_adj = model.modified_adj
40
+
41
+ adj, features, labels = preprocess(adj, features, labels, preprocess_adj=False, sparse=True, device=device)
42
+
43
+ modified_adj = normalize_adj(modified_adj)
44
+ modified_adj = sparse_mx_to_torch_sparse_tensor(modified_adj)
45
+ modified_adj = modified_adj.to(device)
46
+
47
+ def test(adj):
48
+ ''' test on GCN '''
49
+ # adj = normalize_adj_tensor(adj)
50
+ gcn = GCN(nfeat=features.shape[1],
51
+ nhid=16,
52
+ nclass=labels.max().item() + 1,
53
+ dropout=0.5, device=device)
54
+
55
+ gcn = gcn.to(device)
56
+
57
+ optimizer = optim.Adam(gcn.parameters(),
58
+ lr=0.01, weight_decay=5e-4)
59
+
60
+ gcn.fit(features, adj, labels, idx_train) # train without model picking
61
+ # gcn.fit(features, adj, labels, idx_train, idx_val) # train with validation model picking
62
+ output = gcn.output
63
+ loss_test = F.nll_loss(output[idx_test], labels[idx_test])
64
+ acc_test = accuracy(output[idx_test], labels[idx_test])
65
+ print("Test set results:",
66
+ "loss= {:.4f}".format(loss_test.item()),
67
+ "accuracy= {:.4f}".format(acc_test.item()))
68
+
69
+ return acc_test.item()
70
+
71
+ def main():
72
+ print('=== testing GCN on original(clean) graph ===')
73
+ test(adj)
74
+ print('=== testing GCN on perturbed graph ===')
75
+ test(modified_adj)
76
+
77
+
78
+ if __name__ == '__main__':
79
+ main()
80
+
examples/graph/test_fga.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import torch.nn.functional as F
4
+ import torch.optim as optim
5
+ from deeprobust.graph.defense import GCN
6
+ from deeprobust.graph.targeted_attack import FGA
7
+ from deeprobust.graph.utils import *
8
+ from deeprobust.graph.data import Dataset
9
+ from tqdm import tqdm
10
+ import argparse
11
+
12
+ parser = argparse.ArgumentParser()
13
+ parser.add_argument('--seed', type=int, default=15, help='Random seed.')
14
+ parser.add_argument('--dataset', type=str, default='citeseer', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset')
15
+ parser.add_argument('--ptb_rate', type=float, default=0.05, help='pertubation rate')
16
+
17
+ args = parser.parse_args()
18
+ args.cuda = torch.cuda.is_available()
19
+ print('cuda: %s' % args.cuda)
20
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
21
+
22
+ np.random.seed(args.seed)
23
+ torch.manual_seed(args.seed)
24
+ if args.cuda:
25
+ torch.cuda.manual_seed(args.seed)
26
+
27
+ data = Dataset(root='/tmp/', name=args.dataset)
28
+ adj, features, labels = data.adj, data.features, data.labels
29
+ idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
30
+
31
+ idx_unlabeled = np.union1d(idx_val, idx_test)
32
+
33
+ # Setup Surrogate model
34
+ surrogate = GCN(nfeat=features.shape[1], nclass=labels.max().item()+1,
35
+ nhid=16, device=device)
36
+
37
+ surrogate = surrogate.to(device)
38
+ surrogate.fit(features, adj, labels, idx_train, idx_val)
39
+
40
+ # Setup Attack Model
41
+ target_node = 0
42
+ model = FGA(surrogate, nnodes=adj.shape[0], device=device)
43
+ model = model.to(device)
44
+
45
+ def main():
46
+ u = 0 # node to attack
47
+ assert u in idx_unlabeled
48
+
49
+ degrees = adj.sum(0).A1
50
+ n_perturbations = int(degrees[u]) # How many perturbations to perform. Default: Degree of the node
51
+
52
+ model.attack(features, adj, labels, idx_train, target_node, n_perturbations)
53
+
54
+ print('=== testing GCN on original(clean) graph ===')
55
+ test(adj, features, target_node)
56
+
57
+ print('=== testing GCN on perturbed graph ===')
58
+ test(model.modified_adj, features, target_node)
59
+
60
+ def test(adj, features, target_node):
61
+ ''' test on GCN '''
62
+ gcn = GCN(nfeat=features.shape[1],
63
+ nhid=16,
64
+ nclass=labels.max().item() + 1,
65
+ dropout=0.5, device=device)
66
+
67
+ if args.cuda:
68
+ gcn = gcn.to(device)
69
+
70
+ gcn.fit(features, adj, labels, idx_train)
71
+
72
+ gcn.eval()
73
+ output = gcn.predict()
74
+ probs = torch.exp(output[[target_node]])[0]
75
+ print('probs: {}'.format(probs.detach().cpu().numpy()))
76
+ acc_test = accuracy(output[idx_test], labels[idx_test])
77
+
78
+ print("Test set results:",
79
+ "accuracy= {:.4f}".format(acc_test.item()))
80
+
81
+ return acc_test.item()
82
+
83
+
84
+ def select_nodes(target_gcn=None):
85
+ '''
86
+ selecting nodes as reported in nettack paper:
87
+ (i) the 10 nodes with highest margin of classification, i.e. they are clearly correctly classified,
88
+ (ii) the 10 nodes with lowest margin (but still correctly classified) and
89
+ (iii) 20 more nodes randomly
90
+ '''
91
+
92
+ if target_gcn is None:
93
+ target_gcn = GCN(nfeat=features.shape[1],
94
+ nhid=16,
95
+ nclass=labels.max().item() + 1,
96
+ dropout=0.5, device=device)
97
+ target_gcn = target_gcn.to(device)
98
+ target_gcn.fit(features, adj, labels, idx_train, idx_val, patience=30)
99
+ target_gcn.eval()
100
+ output = target_gcn.predict()
101
+
102
+ margin_dict = {}
103
+ for idx in idx_test:
104
+ margin = classification_margin(output[idx], labels[idx])
105
+ if margin < 0: # only keep the nodes correctly classified
106
+ continue
107
+ margin_dict[idx] = margin
108
+ sorted_margins = sorted(margin_dict.items(), key=lambda x:x[1], reverse=True)
109
+ high = [x for x, y in sorted_margins[: 10]]
110
+ low = [x for x, y in sorted_margins[-10: ]]
111
+ other = [x for x, y in sorted_margins[10: -10]]
112
+ other = np.random.choice(other, 20, replace=False).tolist()
113
+
114
+ return high + low + other
115
+
116
+ def multi_test_poison():
117
+ # test on 40 nodes on poisoining attack
118
+ cnt = 0
119
+ degrees = adj.sum(0).A1
120
+ node_list = select_nodes()
121
+ num = len(node_list)
122
+ print('=== [Poisoning] Attacking %s nodes respectively ===' % num)
123
+ for target_node in tqdm(node_list):
124
+ n_perturbations = int(degrees[target_node])
125
+ model = FGA(surrogate, nnodes=adj.shape[0], device=device)
126
+ model = model.to(device)
127
+ model.attack(features, adj, labels, idx_train, target_node, n_perturbations)
128
+ modified_adj = model.modified_adj
129
+ acc = single_test(modified_adj, features, target_node)
130
+ if acc == 0:
131
+ cnt += 1
132
+ print('misclassification rate : %s' % (cnt/num))
133
+
134
+ def single_test(adj, features, target_node, gcn=None):
135
+ if gcn is None:
136
+ # test on GCN (poisoning attack)
137
+ gcn = GCN(nfeat=features.shape[1],
138
+ nhid=16,
139
+ nclass=labels.max().item() + 1,
140
+ dropout=0.5, device=device)
141
+
142
+ gcn = gcn.to(device)
143
+
144
+ gcn.fit(features, adj, labels, idx_train, idx_val, patience=30)
145
+ gcn.eval()
146
+ output = gcn.predict()
147
+ else:
148
+ # test on GCN (evasion attack)
149
+ output = gcn.predict(features, adj)
150
+ probs = torch.exp(output[[target_node]])
151
+
152
+ # acc_test = accuracy(output[[target_node]], labels[target_node])
153
+ acc_test = (output.argmax(1)[target_node] == labels[target_node])
154
+ return acc_test.item()
155
+
156
+ def multi_test_evasion():
157
+ # test on 40 nodes on evasion attack
158
+ # target_gcn = GCN(nfeat=features.shape[1],
159
+ # nhid=16,
160
+ # nclass=labels.max().item() + 1,
161
+ # dropout=0.5, device=device)
162
+
163
+ # target_gcn = target_gcn.to(device)
164
+ # target_gcn.fit(features, adj, labels, idx_train, idx_val, patience=30)
165
+
166
+ target_gcn = surrogate
167
+ cnt = 0
168
+ degrees = adj.sum(0).A1
169
+ node_list = select_nodes(target_gcn)
170
+ num = len(node_list)
171
+
172
+ print('=== [Evasion] Attacking %s nodes respectively ===' % num)
173
+ for target_node in tqdm(node_list):
174
+ n_perturbations = int(degrees[target_node])
175
+ model = FGA(surrogate, nnodes=adj.shape[0], device=device)
176
+ model = model.to(device)
177
+ model.attack(features, adj, labels, idx_train, target_node, n_perturbations)
178
+ modified_adj = model.modified_adj
179
+
180
+ acc = single_test(modified_adj, features, target_node, gcn=target_gcn)
181
+ if acc == 0:
182
+ cnt += 1
183
+ print('misclassification rate : %s' % (cnt/num))
184
+
185
+ if __name__ == '__main__':
186
+ main()
187
+ multi_test_evasion()
188
+ multi_test_poison()
examples/graph/test_gcn_adj_cgscore.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # compute cgscore for gcn
2
+ # author: Yaning
3
+ import torch
4
+ import numpy as np
5
+ import torch.nn.functional as Fd
6
+ from deeprobust.graph.defense import GCNJaccard, GCN
7
+ from deeprobust.graph.defense import GCNScore
8
+ from deeprobust.graph.utils import *
9
+ from deeprobust.graph.data import Dataset, PrePtbDataset
10
+ from scipy.sparse import csr_matrix
11
+ import argparse
12
+ import pickle
13
+ from deeprobust.graph import utils
14
+ from collections import defaultdict
15
+
16
+ parser = argparse.ArgumentParser()
17
+ parser.add_argument('--seed', type=int, default=15, help='Random seed.')
18
+ parser.add_argument('--dataset', type=str, default='pubmed', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset')
19
+ parser.add_argument('--ptb_rate', type=float, default=0.25, help='pertubation rate')
20
+
21
+ args = parser.parse_args()
22
+ args.cuda = torch.cuda.is_available()
23
+ print('cuda: %s' % args.cuda)
24
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
25
+
26
+ # make sure you use the same data splits as you generated attacks
27
+ np.random.seed(args.seed)
28
+ if args.cuda:
29
+ torch.cuda.manual_seed(args.seed)
30
+
31
+ # Here the random seed is to split the train/val/test data,
32
+ # we need to set the random seed to be the same as that when you generate the perturbed graph
33
+ # data = Dataset(root='/tmp/', name=args.dataset, setting='nettack', seed=15)
34
+ # Or we can just use setting='prognn' to get the splits
35
+ data = Dataset(root='/tmp/', name=args.dataset, setting='prognn')
36
+ adj, features, labels = data.adj, data.features, data.labels
37
+ idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
38
+
39
+
40
+ perturbed_data = PrePtbDataset(root='/tmp/',
41
+ name=args.dataset,
42
+ attack_method='metattack',
43
+ ptb_rate=args.ptb_rate)
44
+
45
+ perturbed_adj = perturbed_data.adj
46
+ # idx_test = perturbed_data.target_nodes
47
+ # perturbed_adj = adj
48
+
49
+ def save_cg_scores(cg_scores, filename="cg_scores.npy"):
50
+ np.save(filename, cg_scores)
51
+ print(f"CG-scores saved to {filename}")
52
+
53
+ def load_cg_scores_numpy(filename="cg_scores.npy"):
54
+ cg_scores = np.load(filename, allow_pickle=True)
55
+ print(f"CG-scores loaded from {filename}")
56
+ return cg_scores
57
+
58
+ def calc_cg_score_gnn_with_sampling(
59
+ A, X, labels, device, rep_num=1, unbalance_ratio=1, sub_term=False
60
+ ):
61
+ """
62
+ Calculate CG-score for each edge in a graph with node labels and random sampling.
63
+
64
+ Args:
65
+ A: torch.Tensor
66
+ Adjacency matrix of the graph (size: N x N).
67
+ X: torch.Tensor
68
+ Node features matrix (size: N x F).
69
+ labels: torch.Tensor
70
+ Node labels (size: N).
71
+ device: torch.device
72
+ Device to perform calculations.
73
+ rep_num: int
74
+ Number of repetitions for Monte Carlo sampling.
75
+ unbalance_ratio: float
76
+ Ratio of unbalanced data (1:unbalance_ratio).
77
+ sub_term: bool
78
+ If True, calculate and return sub-terms.
79
+
80
+ Returns:
81
+ cg_scores: dict
82
+ Dictionary containing CG-scores for edges and optionally sub-terms.
83
+ """
84
+ N = A.shape[0]
85
+ cg_scores = {
86
+ "vi": np.zeros((N, N)),
87
+ "ab": np.zeros((N, N)),
88
+ "a2": np.zeros((N, N)),
89
+ "b2": np.zeros((N, N)),
90
+ "times": np.zeros((N, N)),
91
+ }
92
+
93
+ with torch.no_grad():
94
+ for _ in range(rep_num):
95
+ # Compute AX (node representations)
96
+ AX = torch.matmul(A, X).to(device)
97
+ norm_AX = AX / torch.norm(AX, dim=1, keepdim=True)
98
+
99
+ # Group nodes by their labels
100
+ dataset = defaultdict(list)
101
+ data_idx = defaultdict(list)
102
+ for i, label in enumerate(labels):
103
+ dataset[label.item()].append(norm_AX[i].unsqueeze(0)) # Store normalized data
104
+ data_idx[label.item()].append(i) # Store indices
105
+
106
+ # Convert to tensors
107
+ for label, data_list in dataset.items():
108
+ dataset[label] = torch.cat(data_list, dim=0)
109
+ data_idx[label] = torch.tensor(data_idx[label], dtype=torch.long, device=device)
110
+
111
+ # Calculate CG-scores for each label group
112
+ for curr_label, curr_samples in dataset.items():
113
+ curr_indices = data_idx[curr_label]
114
+ curr_num = len(curr_samples)
115
+
116
+ # Randomly sample a subset of current label examples
117
+ chosen_curr_idx = np.random.choice(range(curr_num), curr_num, replace=False)
118
+ chosen_curr_samples = curr_samples[chosen_curr_idx]
119
+ chosen_curr_indices = curr_indices[chosen_curr_idx]
120
+
121
+ # Sample negative examples from other classes
122
+ neg_samples = torch.cat(
123
+ [dataset[l] for l in dataset if l != curr_label], dim=0
124
+ )
125
+ neg_indices = torch.cat(
126
+ [data_idx[l] for l in data_idx if l != curr_label], dim=0
127
+ )
128
+ neg_num = min(int(curr_num * unbalance_ratio), len(neg_samples))
129
+ chosen_neg_samples = neg_samples[
130
+ torch.randperm(len(neg_samples))[:neg_num]
131
+ ]
132
+
133
+ # Combine positive and negative samples
134
+ combined_samples = torch.cat([chosen_curr_samples, chosen_neg_samples], dim=0)
135
+ y = torch.cat(
136
+ [torch.ones(len(chosen_curr_samples)), -torch.ones(neg_num)], dim=0
137
+ ).to(device)
138
+
139
+ # Compute the Gram matrix H^\infty
140
+ H_inner = torch.matmul(combined_samples, combined_samples.T)
141
+ del combined_samples
142
+ ###
143
+ H_inner = torch.clamp(H_inner, min=-1.0, max=1.0)
144
+ ###
145
+ H = H_inner * (np.pi - torch.acos(H_inner)) / (2 * np.pi)
146
+ del H_inner
147
+
148
+ H.fill_diagonal_(0.5)
149
+ ##
150
+ epsilon = 1e-6
151
+ H = H + epsilon * torch.eye(H.size(0), device=H.device)
152
+ ##
153
+ invH = torch.inverse(H)
154
+ del H
155
+ original_error = y @ (invH @ y)
156
+
157
+ # Compute CG-scores for each edge
158
+ for i in chosen_curr_indices:
159
+ print("the node index:", i)
160
+ for j in range(i + 1, N): # Upper triangular traversal
161
+ # print(j)
162
+ if A[i, j] == 0: # Skip if no edge exists
163
+ continue
164
+
165
+ # Remove edge (i, j) to create A1
166
+ A1 = A.clone()
167
+ A1[i, j] = A1[j, i] = 0
168
+
169
+ # Recompute AX with A1
170
+ AX1 = torch.matmul(A1, X).to(device)
171
+ norm_AX1 = AX1 / torch.norm(AX1, dim=1, keepdim=True)
172
+
173
+ # Repeat error calculation with A1
174
+ curr_samples_A1 = norm_AX1[chosen_curr_indices]
175
+ neg_samples_A1 = norm_AX1[neg_indices]
176
+ chosen_neg_samples_A1 = neg_samples_A1[
177
+ torch.randperm(len(neg_samples_A1))[:neg_num]
178
+ ]
179
+ combined_samples_A1 = torch.cat(
180
+ [curr_samples_A1, chosen_neg_samples_A1], dim=0
181
+ )
182
+ H_inner_A1 = torch.matmul(combined_samples_A1, combined_samples_A1.T)
183
+
184
+ del combined_samples_A1
185
+
186
+ ### trick1
187
+ H_inner_A1 = torch.clamp(H_inner_A1, min=-1.0, max=1.0)
188
+ ###
189
+
190
+ H_A1 = H_inner_A1 * (np.pi - torch.acos(H_inner_A1)) / (2 * np.pi)
191
+ del H_inner_A1
192
+ H_A1.fill_diagonal_(0.5)
193
+
194
+ ### trick2
195
+ epsilon = 1e-6
196
+ H_A1= H_A1 + epsilon * torch.eye(H_A1.size(0), device=H_A1.device)
197
+ ###
198
+ invH_A1 = torch.inverse(H_A1)
199
+ del H_A1
200
+
201
+ error_A1 = y @ (invH_A1 @ y)
202
+
203
+ print("i:", i)
204
+ print("j:", j)
205
+ print("current score:", (original_error - error_A1).item())
206
+ # Compute the difference in error (CG-score)
207
+ cg_scores["vi"][i, j] += (original_error - error_A1).item()
208
+ cg_scores["vi"][j, i] = cg_scores["vi"][i, j] # Symmetric
209
+ cg_scores["times"][i, j] += 1
210
+ cg_scores["times"][j, i] += 1
211
+
212
+ # Normalize CG-scores by repetition count
213
+ for key, values in cg_scores.items():
214
+ if key == "times":
215
+ continue
216
+ cg_scores[key] = values / np.where(cg_scores["times"] > 0, cg_scores["times"], 1)
217
+
218
+ return cg_scores if sub_term else cg_scores["vi"]
219
+
220
+ def remove_least_important_edges(adj, cgscore, remove_ratio=0.8):
221
+ """
222
+ Remove the least important edges based on CGScore.
223
+
224
+ Args:
225
+ adj (torch.Tensor): Original adjacency matrix (N x N).
226
+ cgscore (np.ndarray): CGScore matrix (N x N).
227
+ keep_ratio (float): Ratio of edges to keep (default: 0.8).
228
+
229
+ Returns:
230
+ adj (torch.Tensor): Adjusted adjacency matrix after removing edges.
231
+ """
232
+ # Convert CGScore from numpy to PyTorch tensor
233
+ cgscore = torch.tensor(cgscore, dtype=torch.float32)
234
+
235
+ assert adj.shape == cgscore.shape, "adj and cgscore must have the same shape"
236
+ N = adj.shape[0]
237
+
238
+ # Extract upper triangular non-zero elements (excluding diagonal)
239
+ triu_indices = torch.triu_indices(N, N, offset=1) # Upper triangle indices
240
+ triu_scores = cgscore[triu_indices[0], triu_indices[1]]
241
+ triu_adj = adj[triu_indices[0], triu_indices[1]]
242
+
243
+ # Mask to ignore zero elements in adj
244
+ mask = triu_adj > 0
245
+ triu_scores = triu_scores[mask]
246
+ triu_indices = triu_indices[:, mask]
247
+
248
+ # Sort by CGScore in ascending order
249
+ sorted_indices = torch.argsort(triu_scores) # Indices of sorted CGScores
250
+
251
+
252
+ # Determine the cutoff for edges to remove
253
+ num_edges_to_remove = int(len(sorted_indices) * (remove_ratio)) # Edges to remove
254
+ print("len(sorted_indices)", len(sorted_indices))
255
+ print("remove_radio:", remove_ratio)
256
+ print("num_edges_to_remove", num_edges_to_remove)
257
+ edges_to_remove = sorted_indices[:num_edges_to_remove] # First 20% (lowest CGScores)
258
+
259
+ # Create a copy of the adjacency matrix
260
+ adj_new = adj.clone()
261
+
262
+ # Remove the least important edges
263
+ for idx in edges_to_remove:
264
+ i, j = triu_indices[:, idx]
265
+ adj_new[i, j] = 0
266
+ adj_new[j, i] = 0 # Ensure symmetry
267
+
268
+ return adj_new
269
+
270
+ def is_symmetric_sparse(adj):
271
+ """
272
+ Check if a sparse matrix is symmetric.
273
+ """
274
+ # Check symmetry
275
+ return (adj != adj.transpose()).nnz == 0 # .nnz is the number of non-zero elements
276
+
277
+ def make_symmetric_sparse(adj):
278
+ """
279
+ Ensure the sparse adjacency matrix is symmetrical.
280
+ """
281
+ # Make the matrix symmetric
282
+ sym_adj = (adj + adj.transpose()) / 2
283
+ return sym_adj
284
+
285
+ perturbed_adj = make_symmetric_sparse(perturbed_adj)
286
+
287
+ if type(perturbed_adj) is not torch.Tensor:
288
+ features, perturbed_adj, _ = utils.to_tensor(features, perturbed_adj, labels)
289
+ else:
290
+ features = features.to(device)
291
+ perturbed_adj = perturbed_adj.to(device)
292
+ # labels = labels.to(device)
293
+
294
+ if utils.is_sparse_tensor(perturbed_adj):
295
+
296
+ adj_norm = utils.normalize_adj_tensor(perturbed_adj, sparse=True)
297
+ else:
298
+ adj_norm = utils.normalize_adj_tensor(perturbed_adj)
299
+
300
+ features = features.to_dense()
301
+ perturbed_adj = perturbed_adj.to_dense()
302
+
303
+ # save_cg_scores(calc_cg_score, filename="cg_scores.npy")
304
+ # calc_cg_score = calc_cg_score_gnn_with_sampling(perturbed_adj, features, labels, device, rep_num=1, unbalance_ratio=1, sub_term=False)
305
+ # print("cgscore:", calc_cg_score_gnn_with_sampling)
306
+
307
+
308
+ cg_scores = load_cg_scores_numpy(filename="pubmed_0.05.npy")
309
+ cg_scores_abs = np.abs(cg_scores)
310
+ perturbed_adj = remove_least_important_edges(perturbed_adj, cg_scores_abs, remove_ratio=0.2)
311
+ perturbed_adj = sp.csr_matrix(perturbed_adj.numpy())
312
+ features = sp.csr_matrix(features.numpy())
313
+
314
+ # Setup Defense Model
315
+ # model = GCNJaccard(nfeat=features.shape[1], nclass=labels.max()+1,
316
+ # nhid=16, device=device)
317
+
318
+ model = GCN(nfeat=features.shape[1], nclass=labels.max()+1,
319
+ nhid=16, device=device)
320
+
321
+ model = model.to(device)
322
+
323
+ print("labels:", labels)
324
+ print('=== testing GCN-Jaccard on perturbed graph ===')
325
+ model.fit(features, perturbed_adj, labels, idx_train, idx_val, train_iters=200, verbose=True)
326
+ model.eval()
327
+ # You can use the inner function of model to test
328
+ model.test(idx_test)
examples/graph/test_gcn_svd.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import torch.nn.functional as F
4
+ import torch.optim as optim
5
+ from deeprobust.graph.defense import GCNSVD
6
+ from deeprobust.graph.utils import *
7
+ from deeprobust.graph.data import Dataset, PrePtbDataset
8
+ import argparse
9
+
10
+ parser = argparse.ArgumentParser()
11
+ parser.add_argument('--seed', type=int, default=15, help='Random seed.')
12
+ parser.add_argument('--dataset', type=str, default='cora', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset')
13
+ parser.add_argument('--ptb_rate', type=float, default=0.05, help='pertubation rate')
14
+ parser.add_argument('--k', type=int, default=15, help='Truncated Components.')
15
+
16
+ args = parser.parse_args()
17
+ args.cuda = torch.cuda.is_available()
18
+ print('cuda: %s' % args.cuda)
19
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
20
+
21
+ # make sure you use the same data splits as you generated attacks
22
+ np.random.seed(args.seed)
23
+ if args.cuda:
24
+ torch.cuda.manual_seed(args.seed)
25
+
26
+ # Here the random seed is to split the train/val/test data,
27
+ # we need to set the random seed to be the same as that when you generate the perturbed graph
28
+ # data = Dataset(root='/tmp/', name=args.dataset, setting='nettack', seed=15)
29
+ # Or we can just use setting='prognn' to get the splits
30
+ data = Dataset(root='/tmp/', name=args.dataset, setting='prognn')
31
+ adj, features, labels = data.adj, data.features, data.labels
32
+ idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
33
+
34
+
35
+ # load pre-attacked graph
36
+ perturbed_data = PrePtbDataset(root='/tmp/',
37
+ name=args.dataset,
38
+ attack_method='meta',
39
+ ptb_rate=args.ptb_rate)
40
+ perturbed_adj = perturbed_data.adj
41
+
42
+ # Setup Defense Model
43
+ model = GCNSVD(nfeat=features.shape[1], nclass=labels.max()+1,
44
+ nhid=16, device=device)
45
+
46
+ model = model.to(device)
47
+
48
+ print('=== testing GCN-SVD on perturbed graph ===')
49
+ model.fit(features, perturbed_adj, labels, idx_train, idx_val, k=args.k, verbose=True)
50
+ model.eval()
51
+ output = model.test(idx_test)
52
+
examples/graph/test_ig.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import torch.nn.functional as F
4
+ import torch.optim as optim
5
+ from deeprobust.graph.defense import GCN
6
+ from deeprobust.graph.targeted_attack import IGAttack
7
+ from deeprobust.graph.utils import *
8
+ from deeprobust.graph.data import Dataset
9
+ import argparse
10
+ from tqdm import tqdm
11
+
12
+ parser = argparse.ArgumentParser()
13
+ parser.add_argument('--seed', type=int, default=15, help='Random seed.')
14
+ parser.add_argument('--dataset', type=str, default='citeseer', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset')
15
+ parser.add_argument('--ptb_rate', type=float, default=0.05, help='pertubation rate')
16
+
17
+ args = parser.parse_args()
18
+ args.cuda = torch.cuda.is_available()
19
+ print('cuda: %s' % args.cuda)
20
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
21
+
22
+ np.random.seed(args.seed)
23
+ torch.manual_seed(args.seed)
24
+ if args.cuda:
25
+ torch.cuda.manual_seed(args.seed)
26
+
27
+ data = Dataset(root='/tmp/', name=args.dataset)
28
+ adj, features, labels = data.adj, data.features, data.labels
29
+ idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
30
+
31
+ idx_unlabeled = np.union1d(idx_val, idx_test)
32
+
33
+ # Setup Surrogate model
34
+ surrogate = GCN(nfeat=features.shape[1], nclass=labels.max().item()+1,
35
+ nhid=16, device=device)
36
+
37
+ surrogate = surrogate.to(device)
38
+ surrogate.fit(features, adj, labels, idx_train, idx_val)
39
+
40
+ # Setup Attack Model
41
+ target_node = 0
42
+ assert target_node in idx_unlabeled
43
+
44
+ model = IGAttack(surrogate, nnodes=adj.shape[0], attack_structure=True, attack_features=True, device=device)
45
+ model = model.to(device)
46
+
47
+ def main():
48
+ degrees = adj.sum(0).A1
49
+ # How many perturbations to perform. Default: Degree of the node
50
+ n_perturbations = int(degrees[target_node])
51
+
52
+ model.attack(features, adj, labels, idx_train, target_node, n_perturbations, steps=20)
53
+ modified_adj = model.modified_adj
54
+ modified_features = model.modified_features
55
+
56
+ print('=== testing GCN on original(clean) graph ===')
57
+ test(adj, features, target_node)
58
+
59
+ print('=== testing GCN on perturbed graph ===')
60
+ test(modified_adj, modified_features, target_node)
61
+
62
+ def test(adj, features, target_node):
63
+ ''' test on GCN '''
64
+ gcn = GCN(nfeat=features.shape[1],
65
+ nhid=16,
66
+ nclass=labels.max().item() + 1,
67
+ dropout=0.5, device=device)
68
+
69
+ gcn = gcn.to(device)
70
+
71
+ gcn.fit(features, adj, labels, idx_train)
72
+
73
+ gcn.eval()
74
+ output = gcn.predict()
75
+ probs = torch.exp(output[[target_node]])[0]
76
+ print('probs: {}'.format(probs.detach().cpu().numpy()))
77
+ acc_test = accuracy(output[idx_test], labels[idx_test])
78
+
79
+ print("Test set results:",
80
+ "accuracy= {:.4f}".format(acc_test.item()))
81
+
82
+ return acc_test.item()
83
+
84
+ def select_nodes(target_gcn=None):
85
+ '''
86
+ selecting nodes as reported in nettack paper:
87
+ (i) the 10 nodes with highest margin of classification, i.e. they are clearly correctly classified,
88
+ (ii) the 10 nodes with lowest margin (but still correctly classified) and
89
+ (iii) 20 more nodes randomly
90
+ '''
91
+
92
+ if target_gcn is None:
93
+ target_gcn = GCN(nfeat=features.shape[1],
94
+ nhid=16,
95
+ nclass=labels.max().item() + 1,
96
+ dropout=0.5, device=device)
97
+ target_gcn = target_gcn.to(device)
98
+ target_gcn.fit(features, adj, labels, idx_train, idx_val, patience=30)
99
+ target_gcn.eval()
100
+ output = target_gcn.predict()
101
+
102
+ margin_dict = {}
103
+ for idx in idx_test:
104
+ margin = classification_margin(output[idx], labels[idx])
105
+ if margin < 0: # only keep the nodes correctly classified
106
+ continue
107
+ margin_dict[idx] = margin
108
+ sorted_margins = sorted(margin_dict.items(), key=lambda x:x[1], reverse=True)
109
+ high = [x for x, y in sorted_margins[: 10]]
110
+ low = [x for x, y in sorted_margins[-10: ]]
111
+ other = [x for x, y in sorted_margins[10: -10]]
112
+ other = np.random.choice(other, 20, replace=False).tolist()
113
+
114
+ return high + low + other
115
+
116
+ def multi_test_poison():
117
+ # test on 40 nodes on poisoining attack
118
+ cnt = 0
119
+ degrees = adj.sum(0).A1
120
+ node_list = select_nodes()
121
+ num = len(node_list)
122
+ print('=== [Poisoning] Attacking %s nodes respectively ===' % num)
123
+ for target_node in tqdm(node_list):
124
+ n_perturbations = int(degrees[target_node])
125
+ model = IGAttack(surrogate, nnodes=adj.shape[0], attack_structure=True, attack_features=True, device=device)
126
+ model = model.to(device)
127
+ model.attack(features, adj, labels, idx_train, target_node, n_perturbations, steps=20)
128
+ modified_adj = model.modified_adj
129
+ modified_features = model.modified_features
130
+ acc = single_test(modified_adj, modified_features, target_node)
131
+ if acc == 0:
132
+ cnt += 1
133
+ print('misclassification rate : %s' % (cnt/num))
134
+
135
+ def single_test(adj, features, target_node, gcn=None):
136
+ if gcn is None:
137
+ # test on GCN (poisoning attack)
138
+ gcn = GCN(nfeat=features.shape[1],
139
+ nhid=16,
140
+ nclass=labels.max().item() + 1,
141
+ dropout=0.5, device=device)
142
+
143
+ gcn = gcn.to(device)
144
+
145
+ gcn.fit(features, adj, labels, idx_train, idx_val, patience=30)
146
+ gcn.eval()
147
+ output = gcn.predict()
148
+ else:
149
+ # test on GCN (evasion attack)
150
+ output = gcn.predict(features, adj)
151
+ probs = torch.exp(output[[target_node]])
152
+
153
+ # acc_test = accuracy(output[[target_node]], labels[target_node])
154
+ acc_test = (output.argmax(1)[target_node] == labels[target_node])
155
+ return acc_test.item()
156
+
157
+ def multi_test_evasion():
158
+ # test on 40 nodes on evasion attack
159
+ # target_gcn = GCN(nfeat=features.shape[1],
160
+ # nhid=16,
161
+ # nclass=labels.max().item() + 1,
162
+ # dropout=0.5, device=device)
163
+ # target_gcn = target_gcn.to(device)
164
+ # target_gcn.fit(features, adj, labels, idx_train, idx_val, patience=30)
165
+
166
+ target_gcn = surrogate
167
+
168
+ cnt = 0
169
+ degrees = adj.sum(0).A1
170
+ node_list = select_nodes(target_gcn)
171
+ num = len(node_list)
172
+
173
+ print('=== [Evasion] Attacking %s nodes respectively ===' % num)
174
+ for target_node in tqdm(node_list):
175
+ n_perturbations = int(degrees[target_node])
176
+ model = IGAttack(surrogate, nnodes=adj.shape[0], attack_structure=True, attack_features=True, device=device)
177
+ model = model.to(device)
178
+ model.attack(features, adj, labels, idx_train, target_node, n_perturbations, steps=20)
179
+ modified_adj = model.modified_adj
180
+ modified_features = model.modified_features
181
+
182
+ acc = single_test(modified_adj, modified_features, target_node, gcn=target_gcn)
183
+ if acc == 0:
184
+ cnt += 1
185
+ print('misclassification rate : %s' % (cnt/num))
186
+
187
+ if __name__ == '__main__':
188
+ main()
189
+ # multi_test_evasion()
190
+ multi_test_poison()
191
+
192
+
examples/graph/test_prbcd_cora.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch_geometric.datasets import Planetoid
2
+ from torch_geometric.utils import to_undirected
3
+ import torch_geometric.transforms as T
4
+ import argparse
5
+ import torch
6
+ import deeprobust.graph.utils as utils
7
+ from deeprobust.graph.global_attack import PRBCD
8
+ from deeprobust.graph.defense_pyg import GCN, SAGE, GAT
9
+
10
+ parser = argparse.ArgumentParser()
11
+ parser.add_argument('--ptb_rate', type=float, default=0.1, help='perturbation rate.')
12
+ args = parser.parse_args()
13
+
14
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
+ dataset = Planetoid('./', 'cora')
16
+ dataset.transform = T.NormalizeFeatures()
17
+ data = dataset[0]
18
+
19
+ ### we can also attack other models such as GCN, GAT, SAGE or GPRGNN
20
+ ### (models in deeprobust.graph.defense_pyg), see below
21
+ print('now we choose to attack GCN model')
22
+ model = GCN(nfeat=data.x.shape[1], nhid=32, nclass=dataset.num_classes,
23
+ nlayers=2, dropout=0.5, lr=0.01, weight_decay=5e-4,
24
+ device=device).to(device)
25
+ agent = PRBCD(data, model=model, device=device, epochs=50) # by default, we are attacking the GCN model
26
+ agent.pretrain_model(model) # use the function to pretrain the provided model
27
+ edge_index, edge_weight = agent.attack(ptb_rate=args.ptb_rate)
28
+
29
+ print('now we choose to attack SAGE model')
30
+ model = SAGE(nfeat=data.x.shape[1], nhid=32, nclass=dataset.num_classes,
31
+ nlayers=2, dropout=0.5, lr=0.01, weight_decay=5e-4,
32
+ device=device).to(device)
33
+ agent = PRBCD(data, model=model, device=device, epochs=50) # by default, we are attacking the GCN model
34
+ agent.pretrain_model(model) # use the function to pretrain the provided model
35
+ edge_index, edge_weight = agent.attack(ptb_rate=args.ptb_rate)
36
+
37
+
38
+ print('now we choose to attack GAT model')
39
+ model = GAT(nfeat=data.x.shape[1], nhid=8, heads=8, weight_decay=5e-4,
40
+ lr=0.005, nlayers=2, nclass=dataset.num_classes,
41
+ dropout=0.5, device=device).to(device)
42
+
43
+ agent = PRBCD(data, model=model, device=device, epochs=50) # by default, we are attacking the GCN model
44
+ agent.pretrain_model(model) # use the function to pretrain the provided model
45
+ edge_index, edge_weight = agent.attack(ptb_rate=args.ptb_rate)
46
+
47
+
examples/graph/test_rgcn.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import torch.nn.functional as F
4
+ from deeprobust.graph.defense import RGCN
5
+ from deeprobust.graph.utils import *
6
+ from deeprobust.graph.data import Dataset
7
+ from deeprobust.graph.data import PrePtbDataset
8
+
9
+ import argparse
10
+ parser = argparse.ArgumentParser()
11
+ parser.add_argument('--seed', type=int, default=15, help='Random seed.')
12
+ parser.add_argument('--dataset', type=str, default='cora', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset')
13
+ parser.add_argument('--ptb_rate', type=float, default=0.05, help='pertubation rate')
14
+
15
+ args = parser.parse_args()
16
+ args.cuda = torch.cuda.is_available()
17
+ print('cuda: %s' % args.cuda)
18
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19
+
20
+ # Here the random seed is to split the train/val/test data,
21
+ # we need to set the random seed to be the same as that when you generate the perturbed graph
22
+ # data = Dataset(root='/tmp/', name=args.dataset, setting='nettack', seed=15)
23
+ # Or we can just use setting='prognn' to get the splits
24
+ data = Dataset(root='/tmp/', name=args.dataset, setting='prognn')
25
+ adj, features, labels = data.adj, data.features, data.labels
26
+ idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
27
+
28
+ np.random.seed(args.seed)
29
+ torch.manual_seed(args.seed)
30
+ if args.cuda:
31
+ torch.cuda.manual_seed(args.seed)
32
+
33
+ # load pre-attacked graph by Zugner: https://github.com/danielzuegner/gnn-meta-attack
34
+ print('==================')
35
+ print('=== load graph perturbed by Zugner metattack (under prognn splits) ===')
36
+
37
+ perturbed_data = PrePtbDataset(root='/tmp/',
38
+ name=args.dataset,
39
+ attack_method='meta',
40
+ ptb_rate=args.ptb_rate)
41
+ perturbed_adj = perturbed_data.adj
42
+
43
+ # Setup RGCN Model
44
+ model = RGCN(nnodes=perturbed_adj.shape[0], nfeat=features.shape[1], nclass=labels.max()+1,
45
+ nhid=32, device=device)
46
+
47
+ model = model.to(device)
48
+
49
+ model.fit(features, perturbed_adj, labels, idx_train, idx_val, train_iters=200, verbose=True)
50
+ # You can use the inner function of model to test
51
+ model.test(idx_test)
52
+
examples/graph/test_sgc.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import argparse
3
+ from deeprobust.graph.data import Dataset, Dpr2Pyg
4
+ from deeprobust.graph.defense import SGC
5
+ from deeprobust.graph.data import Dataset
6
+ from deeprobust.graph.data import PrePtbDataset
7
+
8
+ parser = argparse.ArgumentParser()
9
+ parser.add_argument('--seed', type=int, default=15, help='Random seed.')
10
+ parser.add_argument('--dataset', type=str, default='cora', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset')
11
+ parser.add_argument('--ptb_rate', type=float, default=0.05, help='perturbation rate')
12
+
13
+ args = parser.parse_args()
14
+ args.cuda = torch.cuda.is_available()
15
+ print('cuda: %s' % args.cuda)
16
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
17
+
18
+ # use data splist provided by prognn
19
+ data = Dataset(root='/tmp/', name=args.dataset, setting='prognn')
20
+ adj, features, labels = data.adj, data.features, data.labels
21
+ idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
22
+
23
+ sgc = SGC(nfeat=features.shape[1],
24
+ nclass=labels.max().item() + 1,
25
+ lr=0.1, device=device)
26
+ sgc = sgc.to(device)
27
+
28
+
29
+ # test on clean graph
30
+ print('==================')
31
+ print('=== train on clean graph ===')
32
+
33
+ pyg_data = Dpr2Pyg(data)
34
+ sgc.fit(pyg_data, verbose=True) # train with earlystopping
35
+ sgc.test()
36
+
37
+ # load pre-attacked graph by Zugner: https://github.com/danielzuegner/gnn-meta-attack
38
+ print('==================')
39
+ print('=== load graph perturbed by Zugner metattack (under prognn splits) ===')
40
+
41
+ perturbed_data = PrePtbDataset(root='/tmp/',
42
+ name=args.dataset,
43
+ attack_method='meta',
44
+ ptb_rate=args.ptb_rate)
45
+ perturbed_adj = perturbed_data.adj
46
+ pyg_data.update_edge_index(perturbed_adj) # inplace operation
47
+ sgc.fit(pyg_data, verbose=True) # train with earlystopping
48
+ sgc.test()
49
+
50
+
51
+
52
+
examples/graph/test_simpgcn.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import torch.nn.functional as F
4
+ from deeprobust.graph.utils import *
5
+ from deeprobust.graph.data import Dataset
6
+ from deeprobust.graph.data import PtbDataset, PrePtbDataset
7
+ from deeprobust.graph.defense import SimPGCN
8
+ import argparse
9
+
10
+ parser = argparse.ArgumentParser()
11
+ parser.add_argument('--seed', type=int, default=15, help='Random seed.')
12
+ parser.add_argument('--dataset', type=str, default='cora', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed'], help='dataset')
13
+ parser.add_argument('--ptb_rate', type=float, default=0.05, help='pertubation rate')
14
+
15
+ args = parser.parse_args()
16
+ args.cuda = torch.cuda.is_available()
17
+ print('cuda: %s' % args.cuda)
18
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
19
+
20
+ # use data splist provided by prognn
21
+ data = Dataset(root='/tmp/', name=args.dataset, setting='prognn')
22
+ adj, features, labels = data.adj, data.features, data.labels
23
+ idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
24
+
25
+
26
+ print('==================')
27
+ print('=== load graph perturbed by Zugner metattack (under prognn splits) ===')
28
+ # load pre-attacked graph by Zugner: https://github.com/danielzuegner/gnn-meta-attack
29
+ perturbed_data = PrePtbDataset(root='/tmp/',
30
+ name=args.dataset,
31
+ attack_method='meta',
32
+ ptb_rate=args.ptb_rate)
33
+ perturbed_adj = perturbed_data.adj
34
+
35
+ np.random.seed(args.seed)
36
+ torch.manual_seed(args.seed)
37
+ if args.cuda:
38
+ torch.cuda.manual_seed(args.seed)
39
+
40
+ # Setup Defense Model
41
+ model = SimPGCN(nnodes=features.shape[0], nfeat=features.shape[1], nhid=16, nclass=labels.max()+1, device=device)
42
+ model = model.to(device)
43
+
44
+ # using validation to pick model
45
+ model.fit(features, perturbed_adj, labels, idx_train, idx_val, train_iters=200, verbose=True)
46
+ # You can use the inner function of model to test
47
+ model.test(idx_test)
48
+
examples/image/test1.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function
2
+ import argparse
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ import torch.optim as optim
7
+ from torchvision import datasets, transforms
8
+ import numpy as np
9
+
10
+
11
+ class Net(nn.Module):
12
+ def __init__(self):
13
+ super(Net, self).__init__()
14
+ self.conv1 = nn.Conv2d(1, 20, 5, 1)
15
+ self.conv2 = nn.Conv2d(20, 50, 5, 1)
16
+ self.fc1 = nn.Linear( 4 * 4 *50, 500)
17
+ self.fc2 = nn.Linear(500, 10)
18
+
19
+ def forward(self, x):
20
+ x = F.relu(self.conv1(x))
21
+ x = F.max_pool2d(x, 2, 2)
22
+ x = F.relu(self.conv2(x))
23
+ x = F.max_pool2d(x, 2, 2)
24
+ x = x.view(-1, 4* 4 * 50)
25
+ x = F.relu(self.fc1(x))
26
+ x = self.fc2(x)
27
+ return F.log_softmax(x, dim=1)
28
+
29
+
30
+ def train(model, device, train_loader, optimizer, epoch):
31
+ model.train()
32
+ for batch_idx, (data, target) in enumerate(train_loader):
33
+ data, target = data.to(device), target.to(device)
34
+ optimizer.zero_grad()
35
+ output = model(data)
36
+ loss = F.nll_loss(output, target)
37
+ loss.backward()
38
+ optimizer.step()
39
+ if batch_idx % 10 == 0:
40
+ print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
41
+ epoch, batch_idx * len(data), len(train_loader.dataset),
42
+ 100. * batch_idx / len(train_loader), loss.item()))
43
+
44
+
45
+ def test(model, device, test_loader):
46
+ model.eval()
47
+
48
+ test_loss = 0
49
+ correct = 0
50
+ with torch.no_grad():
51
+ for data, target in test_loader:
52
+ data, target = data.to(device), target.to(device)
53
+ output = model(data)
54
+ test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
55
+ pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
56
+ correct += pred.eq(target.view_as(pred)).sum().item()
57
+
58
+ test_loss /= len(test_loader.dataset)
59
+
60
+ print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
61
+ test_loss, correct, len(test_loader.dataset),
62
+ 100. * correct / len(test_loader.dataset)))
63
+
64
+
65
+
66
+
67
+ torch.manual_seed(100)
68
+ device = torch.device("cuda")
69
+
70
+
71
+ train_loader = torch.utils.data.DataLoader(
72
+ datasets.MNIST('../data', train=True, download=True,
73
+ transform=transforms.Compose([transforms.ToTensor(),
74
+ transforms.Normalize((0.1307,), (0.3081,))])),
75
+ batch_size=64,
76
+ shuffle=True)
77
+
78
+ test_loader = torch.utils.data.DataLoader(
79
+ datasets.MNIST('../data', train=False,
80
+ transform=transforms.Compose([transforms.ToTensor(),
81
+ transforms.Normalize((0.1307,), (0.3081,))])),
82
+ batch_size=1000,
83
+ shuffle=True)
84
+
85
+ model = Net().to(device)
86
+ optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
87
+
88
+
89
+ save_model = True
90
+ for epoch in range(1, 5 + 1): ## 5 batches
91
+ train( model, device, train_loader, optimizer, epoch)
92
+ test( model, device, test_loader)
93
+
94
+ if (save_model):
95
+ torch.save(model.state_dict(), "mnist_cnn.pt")
96
+
97
+
98
+
99
+
100
+ ############################################################## test
101
+
102
+ xx = datasets.MNIST('../data').data[0:10]
103
+ xx = xx.unsqueeze_(1).float()/255
104
+
105
+ yy = datasets.MNIST('../data', download=True).targets[0:10]
106
+
107
+
108
+ from fgsm import FGM
109
+
110
+
111
+ fgsm_params = {
112
+ 'epsilon': 0.1,
113
+ 'order': np.inf,
114
+ 'clip_max': None,
115
+ 'clip_min': None
116
+ }
117
+
118
+ F1 = FGM(model, device = "cpu") ### or cuda
119
+ aa = F1.generate(x=xx, y=yy, **fgsm_params)
120
+
121
+ import matplotlib.pyplot as plt
122
+ plt.imsave('test.jpg', aa[0,0])
examples/image/test_ImageNet.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torchvision
2
+ import torch
3
+ from torchvision import datasets
4
+ from torchvision import transforms
5
+ import os
6
+ from deeprobust.image.attack.pgd import PGD
7
+ from deeprobust.image.config import attack_params
8
+
9
+ val_root = '/mnt/home/liyaxin1/Documents/data/ImageNet'
10
+ #Imagenet_data = torchvision.datasets.ImageNet(val_root, split = 'val')
11
+ test_loader = torch.utils.data.DataLoader(datasets.ImageFolder('~/Documents/data/ImageNet/val', transforms.Compose([
12
+ transforms.Scale(256), transforms.CenterCrop(224), transforms.ToTensor(),
13
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])), batch_size=1, shuffle=False)
14
+
15
+ #import torchvision.models as models
16
+ #model = models.resnet50(pretrained=True).to('cuda')
17
+
18
+ import pretrainedmodels
19
+ model = pretrainedmodels.resnet50(num_classes=1000, pretrained='imagenet').to('cuda')
20
+
21
+ for i, (input, y) in enumerate(test_loader):
22
+
23
+ import ipdb
24
+ ipdb.set_trace()
25
+
26
+ input, y = input.to('cuda'), y.to('cuda')
27
+ pred = model(input)
28
+ print(pred.argmax(dim=1, keepdim = True))
29
+
30
+ adversary = PGD(model)
31
+ AdvExArray = adversary.generate(input, y, **attack_params['PGD_CIFAR10']).float()
32
+
33
+
examples/image/test_onepixel.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F #233
5
+ import torch.optim as optim
6
+ from torchvision import datasets,models,transforms
7
+ from PIL import Image
8
+ import argparse
9
+
10
+ from deeprobust.image.attack.onepixel import Onepixel
11
+ from deeprobust.image.netmodels import resnet
12
+ from deeprobust.image.config import attack_params
13
+ from deeprobust.image.utils import download_model
14
+
15
+ def parameter_parser():
16
+ parser = argparse.ArgumentParser(description = "Run attack algorithms.")
17
+
18
+ parser.add_argument("--destination",
19
+ default = './trained_models/',
20
+ help = "choose destination to load the pretrained models.")
21
+
22
+ parser.add_argument("--filename",
23
+ default = "MNIST_CNN_epoch_20.pt")
24
+
25
+ return parser.parse_args()
26
+
27
+ args = parameter_parser() # read argument and creat an argparse object
28
+
29
+ model = resnet.ResNet18().to('cuda')
30
+ print("Load network")
31
+
32
+ model.load_state_dict(torch.load("./trained_models/CIFAR10_ResNet18_epoch_20.pt"))
33
+ model.eval()
34
+
35
+ transform_val = transforms.Compose([
36
+ transforms.ToTensor(),
37
+ ])
38
+
39
+ test_loader = torch.utils.data.DataLoader(
40
+ datasets.CIFAR10('deeprobust/image/data', train = False, download=True,
41
+ transform = transform_val),
42
+ batch_size = 1, shuffle=True) #, **kwargs)
43
+
44
+
45
+ classes = np.array(('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'))
46
+
47
+ xx, yy = next(iter(test_loader))
48
+ xx = xx.to('cuda').float()
49
+
50
+ """
51
+ Generate adversarial examples
52
+ """
53
+
54
+ F1 = Onepixel(model, device = "cuda") ### or cuda
55
+ AdvExArray = F1.generate(xx, yy)
56
+
57
+ predict0 = model(xx)
58
+ predict0= predict0.argmax(dim=1, keepdim=True)
59
+
60
+ predict1 = model(AdvExArray)
61
+ predict1= predict1.argmax(dim=1, keepdim=True)
62
+
63
+ print("original prediction:")
64
+ print(predict0)
65
+
66
+ print("attack prediction:")
67
+ print(predict1)
68
+
69
+ xx = xx.cpu().detach().numpy()
70
+ AdvExArray = AdvExArray.cpu().detach().numpy()
71
+
72
+ import matplotlib.pyplot as plt
73
+ xx = xx[0].transpose(1, 2, 0)
74
+ AdvExArray = AdvExArray[0].transpose(1, 2, 0)
75
+
76
+ plt.imshow(xx, vmin=0, vmax=255)
77
+ plt.savefig('./adversary_examples/cifar10_advexample_ori.png')
78
+
79
+ plt.imshow(AdvExArray, vmin=0, vmax=255)
80
+ plt.savefig('./adversary_examples/cifar10_advexample_onepixel_adv.png')
examples/image/test_pgdtraining.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from deeprobust.image.defense.pgdtraining import PGDtraining
2
+ from deeprobust.image.attack.pgd import PGD
3
+ import torch
4
+ from torchvision import datasets, transforms
5
+ from deeprobust.image.netmodels.CNN import Net
6
+ from deeprobust.image.config import defense_params
7
+
8
+
9
+ """
10
+ LOAD DATASETS
11
+ """
12
+
13
+ train_loader = torch.utils.data.DataLoader(
14
+ datasets.MNIST('deeprobust/image/defense/data', train=True, download=True,
15
+ transform=transforms.Compose([transforms.ToTensor()])),
16
+ batch_size=256,
17
+ shuffle=True)
18
+
19
+ test_loader = torch.utils.data.DataLoader(
20
+ datasets.MNIST('deeprobust/image/defense/data', train=False,
21
+ transform=transforms.Compose([transforms.ToTensor()])),
22
+ batch_size=256,
23
+ shuffle=True)
24
+
25
+
26
+ """
27
+ TRAIN DEFENSE MODEL
28
+ """
29
+
30
+ print('====== START TRAINING =====')
31
+
32
+ model = Net()
33
+
34
+ defense = PGDtraining(model, 'cuda')
35
+ defense.generate(train_loader, test_loader, **defense_params["PGDtraining_MNIST"])
36
+
37
+ print('====== FINISH TRAINING =====')
38
+
examples/image/test_train.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ import deeprobust.image.netmodels.train_model as trainmodel
2
+ trainmodel.train('CNN','MNIST','cuda', 20)