|
|
import torch |
|
|
import numpy as np |
|
|
import torch.nn.functional as F |
|
|
from deeprobust.graph.defense import GCNJaccard, GCN, ProGNN |
|
|
from deeprobust.graph.utils import * |
|
|
from deeprobust.graph.data import Dataset, PrePtbDataset |
|
|
import argparse |
|
|
import os |
|
|
import csv |
|
|
|
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument('--seed', type=int, default=15, help='Random seed.') |
|
|
|
|
|
parser.add_argument('--dataset', type=str, default='pubmed', choices=['cora', 'cora_ml', 'citeseer', 'polblogs', 'pubmed', 'Flickr'], help='dataset') |
|
|
parser.add_argument('--ptb_type', type=str, default='clean', choices=['clean', 'meta', 'dice', 'minmax', 'pgd', 'random'], help='attack type') |
|
|
parser.add_argument('--ptb_rate', type=float, default=0.0, help='pertubation rate') |
|
|
|
|
|
parser.add_argument('--epochs', type=int, default=200, help='Number of epochs to train.') |
|
|
|
|
|
parser.add_argument('--hidden', type=int, default=16, help='Number of hidden units.') |
|
|
parser.add_argument('--dropout', type=float, default=0.5, help='Dropout rate (1 - keep probability).') |
|
|
parser.add_argument('--debug', action='store_true',default=False, help='debug mode') |
|
|
parser.add_argument('--only_gcn', action='store_true',default=False, help='test the performance of gcn without other components') |
|
|
|
|
|
parser.add_argument('--lr', type=float, default=0.01, help='Initial learning rate.') |
|
|
parser.add_argument('--weight_decay', type=float, default=5e-4, help='Weight decay (L2 loss on parameters).') |
|
|
parser.add_argument('--alpha', type=float, default=5e-4, help='weight of l1 norm') |
|
|
parser.add_argument('--beta', type=float, default=1.5, help='weight of nuclear norm') |
|
|
parser.add_argument('--gamma', type=float, default=1, help='weight of l2 norm') |
|
|
parser.add_argument('--lambda_', type=float, default=0, help='weight of feature smoothing') |
|
|
parser.add_argument('--phi', type=float, default=0, help='weight of symmetric loss') |
|
|
parser.add_argument('--inner_steps', type=int, default=2, help='steps for inner optimization') |
|
|
parser.add_argument('--outer_steps', type=int, default=1, help='steps for outer optimization') |
|
|
parser.add_argument('--lr_adj', type=float, default=0.01, help='lr for training adj') |
|
|
parser.add_argument('--symmetric', action='store_true', default=False, help='whether use symmetric matrix') |
|
|
|
|
|
parser.add_argument('--gpu', type=int, default=6, help='GPU device ID (default: 0)') |
|
|
|
|
|
args = parser.parse_args() |
|
|
args.cuda = torch.cuda.is_available() |
|
|
device = torch.device(f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu") |
|
|
print('Using device:', device) |
|
|
|
|
|
|
|
|
np.random.seed(args.seed) |
|
|
if args.cuda: |
|
|
torch.cuda.manual_seed(args.seed) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data = Dataset(root='/tmp/', name=args.dataset) |
|
|
adj, features, labels = data.adj, data.features, data.labels |
|
|
idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test |
|
|
idx_unlabeled = np.union1d(idx_val, idx_test) |
|
|
print(type(adj)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ptb_path = f"../attacked_adj/{args.dataset}/{args.ptb_type}_{args.dataset}_{args.ptb_rate}.pt" |
|
|
perturbed_adj = torch.load(ptb_path) |
|
|
perturbed_adj = sp.csr_matrix(perturbed_adj.to('cpu').numpy()) |
|
|
|
|
|
def test_prognn(features, adj, labels): |
|
|
|
|
|
|
|
|
adj, features, labels = preprocess(adj, features, labels, preprocess_adj=False, device=device) |
|
|
|
|
|
model = GCN(nfeat=features.shape[1], |
|
|
nhid=args.hidden, |
|
|
nclass=labels.max().item() + 1, |
|
|
dropout=args.dropout, device=device).to(device) |
|
|
prognn = ProGNN(model, args, device) |
|
|
prognn.fit(features, adj, labels, idx_train, idx_val) |
|
|
acc = prognn.test(features, labels, idx_test) |
|
|
return acc |
|
|
|
|
|
|
|
|
def main(): |
|
|
|
|
|
acc = test_prognn(features, perturbed_adj, labels) |
|
|
csv_dir = "../result" |
|
|
os.makedirs(csv_dir, exist_ok=True) |
|
|
|
|
|
csv_filename = os.path.join(csv_dir, f"ProGNN_{args.dataset}_{args.ptb_type}_{args.ptb_rate}.csv") |
|
|
row = [f"{args.dataset} ", f" {args.ptb_type} ", f" {args.ptb_rate} ", f" {acc}"] |
|
|
|
|
|
try: |
|
|
file_exists = os.path.isfile(csv_filename) |
|
|
with open(csv_filename, 'a', newline='') as csvfile: |
|
|
writer = csv.writer(csvfile) |
|
|
if not file_exists: |
|
|
writer.writerow(["dataset ", "ptb_type ", "ptb_rate ", "accuracy"]) |
|
|
writer.writerow(row) |
|
|
except Exception as e: |
|
|
print(f"[Error] Failed to write CSV: {e}") |
|
|
|
|
|
if __name__ == '__main__': |
|
|
main() |