File size: 14,101 Bytes
92b9080 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 |
import numpy as np
import scipy.sparse as sp
import os.path as osp
import os
import urllib.request
import sys
import pickle as pkl
import networkx as nx
from deeprobust.graph.utils import get_train_val_test, get_train_val_test_gcn
import zipfile
import json
import platform
class Dataset():
"""Dataset class contains four citation network datasets "cora", "cora-ml", "citeseer" and "pubmed",
and one blog dataset "Polblogs". Datasets "ACM", "BlogCatalog", "Flickr", "UAI",
"Flickr" are also available. See more details in https://github.com/DSE-MSU/DeepRobust/tree/master/deeprobust/graph#supported-datasets.
The 'cora', 'cora-ml', 'polblogs' and 'citeseer' are downloaded from https://github.com/danielzuegner/gnn-meta-attack/tree/master/data, and 'pubmed' is from https://github.com/tkipf/gcn/tree/master/gcn/data.
Parameters
----------
root : string
root directory where the dataset should be saved.
name : string
dataset name, it can be chosen from ['cora', 'citeseer', 'cora_ml', 'polblogs',
'pubmed', 'acm', 'blogcatalog', 'uai', 'flickr']
setting : string
there are two data splits settings. It can be chosen from ['nettack', 'gcn', 'prognn']
The 'nettack' setting follows nettack paper where they select the largest connected
components of the graph and use 10%/10%/80% nodes for training/validation/test .
The 'gcn' setting follows gcn paper where they use the full graph and 20 samples
in each class for traing, 500 nodes for validation, and 1000
nodes for test. (Note here 'netack' and 'gcn' setting do not provide fixed split, i.e.,
different random seed would return different data splits)
seed : int
random seed for splitting training/validation/test.
require_mask : bool
setting require_mask True to get training, validation and test mask
(self.train_mask, self.val_mask, self.test_mask)
Examples
--------
We can first create an instance of the Dataset class and then take out its attributes.
>>> from deeprobust.graph.data import Dataset
>>> data = Dataset(root='/tmp/', name='cora', seed=15)
>>> adj, features, labels = data.adj, data.features, data.labels
>>> idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
"""
def __init__(self, root, name, setting='nettack', seed=None, require_mask=False):
self.name = name.lower()
self.setting = setting.lower()
assert self.name in ['cora', 'citeseer', 'cora_ml', 'polblogs',
'pubmed', 'acm', 'blogcatalog', 'uai', 'flickr'], \
'Currently only support cora, citeseer, cora_ml, ' + \
'polblogs, pubmed, acm, blogcatalog, flickr'
assert self.setting in ['gcn', 'nettack', 'prognn'], "Settings should be" + \
" choosen from ['gcn', 'nettack', 'prognn']"
self.seed = seed
# self.url = 'https://raw.githubusercontent.com/danielzuegner/nettack/master/data/%s.npz' % self.name
self.url = 'https://raw.githubusercontent.com/danielzuegner/gnn-meta-attack/master/data/%s.npz' % self.name
if platform.system() == 'Windows':
self.root = root
else:
self.root = osp.expanduser(osp.normpath(root))
self.data_folder = osp.join(root, self.name)
self.data_filename = self.data_folder + '.npz'
self.require_mask = require_mask
self.require_lcc = False if setting == 'gcn' else True
self.adj, self.features, self.labels = self.load_data()
if setting == 'prognn':
assert name in ['cora', 'citeseer', 'pubmed', 'cora_ml', 'polblogs', 'Flickr'], "ProGNN splits only " + \
"cora, citeseer, pubmed, cora_ml, polblogs, Flickr"
self.idx_train, self.idx_val, self.idx_test = self.get_prognn_splits()
else:
self.idx_train, self.idx_val, self.idx_test = self.get_train_val_test()
if self.require_mask:
self.get_mask()
def get_train_val_test(self):
"""Get training, validation, test splits according to self.setting (either 'nettack' or 'gcn').
"""
if self.setting == 'nettack':
return get_train_val_test(nnodes=self.adj.shape[0], val_size=0.1, test_size=0.8, stratify=self.labels, seed=self.seed)
if self.setting == 'gcn':
return get_train_val_test_gcn(self.labels, seed=self.seed)
def get_prognn_splits(self):
"""Get target nodes incides, which is the nodes with degree > 10 in the test set."""
url = 'https://raw.githubusercontent.com/ChandlerBang/Pro-GNN/' + \
'master/splits/{}_prognn_splits.json'.format(self.name)
json_file = osp.join(self.root,
'{}_prognn_splits.json'.format(self.name))
if not osp.exists(json_file):
self.download_file(url, json_file)
# with open(f'/mnt/home/jinwei2/Projects/nettack/{dataset}_nettacked_nodes.json', 'r') as f:
with open(json_file, 'r') as f:
idx = json.loads(f.read())
return np.array(idx['idx_train']), \
np.array(idx['idx_val']), np.array(idx['idx_test'])
def load_data(self):
print('Loading {} dataset...'.format(self.name))
if self.name == 'pubmed':
return self.load_pubmed()
if self.name in ['acm', 'blogcatalog', 'uai', 'flickr']:
return self.load_zip()
if not osp.exists(self.data_filename):
self.download_npz()
adj, features, labels = self.get_adj()
return adj, features, labels
def download_file(self, url, file):
print('Dowloading from {} to {}'.format(url, file))
try:
urllib.request.urlretrieve(url, file)
except:
raise Exception("Download failed! Make sure you have \
stable Internet connection and enter the right name")
def download_npz(self):
"""Download adjacen matrix npz file from self.url.
"""
print('Downloading from {} to {}'.format(self.url, self.data_filename))
try:
urllib.request.urlretrieve(self.url, self.data_filename)
print('Done!')
except:
raise Exception('''Download failed! Make sure you have stable Internet connection and enter the right name''')
def download_pubmed(self, name):
url = 'https://raw.githubusercontent.com/tkipf/gcn/master/gcn/data/'
try:
print('Downloading', url)
urllib.request.urlretrieve(url + name, osp.join(self.root, name))
print('Done!')
except:
raise Exception('''Download failed! Make sure you have stable Internet connection and enter the right name''')
def download_zip(self, name):
url = 'https://raw.githubusercontent.com/ChandlerBang/Pro-GNN/master/other_datasets/{}.zip'.\
format(name)
try:
print('Downlading', url)
urllib.request.urlretrieve(url, osp.join(self.root, name+'.zip'))
print('Done!')
except:
raise Exception('''Download failed! Make sure you have stable Internet connection and enter the right name''')
def load_zip(self):
data_filename = self.data_folder + '.zip'
name = self.name
if not osp.exists(data_filename):
self.download_zip(name)
with zipfile.ZipFile(data_filename, 'r') as zip_ref:
zip_ref.extractall(self.root)
feature_path = osp.join(self.data_folder, '{0}.feature'.format(name))
label_path = osp.join(self.data_folder, '{0}.label'.format(name))
graph_path = osp.join(self.data_folder, '{0}.edge'.format(name))
f = np.loadtxt(feature_path, dtype = float)
l = np.loadtxt(label_path, dtype = int)
features = sp.csr_matrix(f, dtype=np.float32)
# features = torch.FloatTensor(np.array(features.todense()))
struct_edges = np.genfromtxt(graph_path, dtype=np.int32)
sedges = np.array(list(struct_edges), dtype=np.int32).reshape(struct_edges.shape)
n = features.shape[0]
sadj = sp.coo_matrix((np.ones(sedges.shape[0]), (sedges[:, 0], sedges[:, 1])), shape=(n, n), dtype=np.float32)
sadj = sadj + sadj.T.multiply(sadj.T > sadj) - sadj.multiply(sadj.T > sadj)
label = np.array(l)
return sadj, features, label
def load_pubmed(self):
dataset = 'pubmed'
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
name = "ind.{}.{}".format(dataset, names[i])
data_filename = osp.join(self.root, name)
if not osp.exists(data_filename):
self.download_pubmed(name)
with open(data_filename, 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_file = "ind.{}.test.index".format(dataset)
if not osp.exists(osp.join(self.root, test_idx_file)):
self.download_pubmed(test_idx_file)
test_idx_reorder = parse_index_file(osp.join(self.root, test_idx_file))
test_idx_range = np.sort(test_idx_reorder)
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
labels = np.where(labels)[1]
return adj, features, labels
def get_adj(self):
adj, features, labels = self.load_npz(self.data_filename)
adj = adj + adj.T
adj = adj.tolil()
adj[adj > 1] = 1
if self.require_lcc:
lcc = self.largest_connected_components(adj)
adj = adj[lcc][:, lcc]
features = features[lcc]
labels = labels[lcc]
assert adj.sum(0).A1.min() > 0, "Graph contains singleton nodes"
# whether to set diag=0?
adj.setdiag(0)
adj = adj.astype("float32").tocsr()
adj.eliminate_zeros()
assert np.abs(adj - adj.T).sum() == 0, "Input graph is not symmetric"
assert adj.max() == 1 and len(np.unique(adj[adj.nonzero()].A1)) == 1, "Graph must be unweighted"
return adj, features, labels
def load_npz(self, file_name, is_sparse=True):
with np.load(file_name) as loader:
# loader = dict(loader)
if is_sparse:
adj = sp.csr_matrix((loader['adj_data'], loader['adj_indices'],
loader['adj_indptr']), shape=loader['adj_shape'])
if 'attr_data' in loader:
features = sp.csr_matrix((loader['attr_data'], loader['attr_indices'],
loader['attr_indptr']), shape=loader['attr_shape'])
else:
features = None
labels = loader.get('labels')
else:
adj = loader['adj_data']
if 'attr_data' in loader:
features = loader['attr_data']
else:
features = None
labels = loader.get('labels')
if features is None:
features = np.eye(adj.shape[0])
features = sp.csr_matrix(features, dtype=np.float32)
return adj, features, labels
def largest_connected_components(self, adj, n_components=1):
"""Select k largest connected components.
Parameters
----------
adj : scipy.sparse.csr_matrix
input adjacency matrix
n_components : int
n largest connected components we want to select
"""
_, component_indices = sp.csgraph.connected_components(adj)
component_sizes = np.bincount(component_indices)
components_to_keep = np.argsort(component_sizes)[::-1][:n_components] # reverse order to sort descending
nodes_to_keep = [
idx for (idx, component) in enumerate(component_indices) if component in components_to_keep]
print("Selecting {0} largest connected components".format(n_components))
return nodes_to_keep
def __repr__(self):
return '{0}(adj_shape={1}, feature_shape={2})'.format(self.name, self.adj.shape, self.features.shape)
def get_mask(self):
idx_train, idx_val, idx_test = self.idx_train, self.idx_val, self.idx_test
labels = self.onehot(self.labels)
def get_mask(idx):
mask = np.zeros(labels.shape[0], dtype=np.bool)
mask[idx] = 1
return mask
def get_y(idx):
mx = np.zeros(labels.shape)
mx[idx] = labels[idx]
return mx
self.train_mask = get_mask(self.idx_train)
self.val_mask = get_mask(self.idx_val)
self.test_mask = get_mask(self.idx_test)
self.y_train, self.y_val, self.y_test = get_y(idx_train), get_y(idx_val), get_y(idx_test)
def onehot(self, labels):
eye = np.identity(labels.max() + 1)
onehot_mx = eye[labels]
return onehot_mx
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
if __name__ == '__main__':
from deeprobust.graph.data import Dataset
for name in ['cora', 'citeseer', 'pubmed', 'cora_ml']:
data = Dataset(root='/tmp/', name=name, setting="prognn")
idx_train = data.idx_train
data2 = Dataset(root='/tmp/', name=name, setting="nettack", seed=15)
idx_train2 = data2.idx_train
assert (idx_train != idx_train2).sum() == 0
data = Dataset(root='/tmp/', name='flickr')
adj, features, labels = data.adj, data.features, data.labels
idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
|