code stringlengths 17 6.64M |
|---|
def train(train_loader, optimizer):
model.train()
total_loss = 0
for data in train_loader:
optimizer.zero_grad()
data = data.to(device)
(S_0, S_L) = model(data.x_s, data.edge_index_s, data.edge_attr_s, data.x_s_batch, data.x_t, data.edge_index_t, data.edge_attr_t, data.x_t_batch)
num_graphs = (data.x_s_batch.max().item() + 1)
y = generate_y(num_nodes=10, batch_size=num_graphs)
loss = model.loss(S_0, y)
loss = ((model.loss(S_L, y) + loss) if (model.num_steps > 0) else loss)
loss.backward()
optimizer.step()
total_loss += (loss.item() * num_graphs)
return (total_loss / len(train_loader.dataset))
|
@torch.no_grad()
def test(test_dataset):
model.eval()
test_loader1 = DataLoader(test_dataset, args.batch_size, shuffle=True)
test_loader2 = DataLoader(test_dataset, args.batch_size, shuffle=True)
correct = num_examples = 0
while (num_examples < args.test_samples):
for (data_s, data_t) in zip(test_loader1, test_loader2):
(data_s, data_t) = (data_s.to(device), data_t.to(device))
(_, S_L) = model(data_s.x, data_s.edge_index, data_s.edge_attr, data_s.batch, data_t.x, data_t.edge_index, data_t.edge_attr, data_t.batch)
y = generate_y(num_nodes=10, batch_size=data_t.num_graphs)
correct += model.acc(S_L, y, reduction='sum')
num_examples += y.size(1)
if (num_examples >= args.test_samples):
return (correct / num_examples)
|
def run(i, datasets):
datasets = [dataset.shuffle() for dataset in datasets]
train_datasets = [dataset[:20] for dataset in datasets]
test_datasets = [dataset[20:] for dataset in datasets]
train_datasets = [PairDataset(train_dataset, train_dataset, sample=False) for train_dataset in train_datasets]
train_dataset = torch.utils.data.ConcatDataset(train_datasets)
train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True, follow_batch=['x_s', 'x_t'])
model.load_state_dict(state_dict)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
for epoch in range(1, (1 + args.epochs)):
train(train_loader, optimizer)
accs = [(100 * test(test_dataset)) for test_dataset in test_datasets]
print(f'Run {i:02d}:')
print(' '.join([category.ljust(13) for category in WILLOW.categories]))
print(' '.join([f'{acc:.2f}'.ljust(13) for acc in accs]))
return accs
|
def set_seed():
torch.manual_seed(12345)
|
def test_dgmc_repr():
model = DGMC(psi_1, psi_2, num_steps=1)
assert (model.__repr__() == 'DGMC(\n psi_1=GIN(32, 16, num_layers=2, batch_norm=False, cat=True, lin=True),\n psi_2=GIN(8, 8, num_layers=2, batch_norm=False, cat=True, lin=True),\n num_steps=1, k=-1\n)')
model.reset_parameters()
|
def test_dgmc_on_single_graphs():
set_seed()
model = DGMC(psi_1, psi_2, num_steps=1)
(x, e) = (data.x, data.edge_index)
y = torch.arange(data.num_nodes)
y = torch.stack([y, y], dim=0)
set_seed()
(S1_0, S1_L) = model(x, e, None, None, x, e, None, None)
loss1 = model.loss(S1_0, y)
loss1.backward()
acc1 = model.acc(S1_0, y)
hits1_1 = model.hits_at_k(1, S1_0, y)
hits1_10 = model.hits_at_k(10, S1_0, y)
hits1_all = model.hits_at_k(data.num_nodes, S1_0, y)
set_seed()
model.k = data.num_nodes
y = torch.arange(data.num_nodes)
y = torch.stack([y, y], dim=0)
(S2_0, S2_L) = model(x, e, None, None, x, e, None, None, y)
loss2 = model.loss(S2_0, y)
loss2.backward()
acc2 = model.acc(S2_0, y)
hits2_1 = model.hits_at_k(1, S2_0, y)
hits2_10 = model.hits_at_k(10, S2_0, y)
hits2_all = model.hits_at_k(data.num_nodes, S2_0, y)
assert (S1_0.size() == (data.num_nodes, data.num_nodes))
assert (S1_L.size() == (data.num_nodes, data.num_nodes))
assert torch.allclose(S1_0, S2_0.to_dense())
assert torch.allclose(S1_L, S2_L.to_dense())
assert torch.allclose(loss1, loss2)
assert (acc1 == acc2 == hits1_1 == hits2_1)
assert (hits1_1 <= hits1_10 == hits2_10 <= hits1_all)
assert (hits1_all == hits2_all == 1.0)
|
def test_dgmc_on_multiple_graphs():
set_seed()
model = DGMC(psi_1, psi_2, num_steps=1)
batch = Batch.from_data_list([data, data])
(x, e, b) = (batch.x, batch.edge_index, batch.batch)
set_seed()
(S1_0, S1_L) = model(x, e, None, b, x, e, None, b)
assert (S1_0.size() == (batch.num_nodes, data.num_nodes))
assert (S1_L.size() == (batch.num_nodes, data.num_nodes))
set_seed()
model.k = data.num_nodes
(S2_0, S2_L) = model(x, e, None, b, x, e, None, b)
assert torch.allclose(S1_0, S2_0.to_dense())
assert torch.allclose(S1_L, S2_L.to_dense())
|
def test_dgmc_include_gt():
model = DGMC(psi_1, psi_2, num_steps=1)
S_idx = torch.tensor([[[0, 1], [1, 2]], [[1, 2], [0, 1]]])
s_mask = torch.tensor([[True, False], [True, True]])
y = torch.tensor([[0, 1], [0, 0]])
S_idx = model.__include_gt__(S_idx, s_mask, y)
assert (S_idx.tolist() == [[[0, 1], [1, 2]], [[1, 0], [0, 1]]])
|
def test_gin():
model = GIN(16, 32, num_layers=2, batch_norm=True, cat=True, lin=True)
assert (model.__repr__() == 'GIN(16, 32, num_layers=2, batch_norm=True, cat=True, lin=True)')
x = torch.randn(100, 16)
edge_index = torch.randint(100, (2, 400), dtype=torch.long)
for (cat, lin) in product([False, True], [False, True]):
model = GIN(16, 32, 2, True, cat, lin)
out = model(x, edge_index)
assert (out.size() == (100, ((16 + (2 * 32)) if ((not lin) and cat) else 32)))
assert (out.size() == (100, model.out_channels))
|
def test_mlp():
model = MLP(16, 32, num_layers=2, batch_norm=True, dropout=0.5)
assert (model.__repr__() == 'MLP(16, 32, num_layers=2, batch_norm=True, dropout=0.5)')
x = torch.randn(100, 16)
out = model(x)
assert (out.size() == (100, 32))
|
def test_rel():
model = RelCNN(16, 32, num_layers=2, batch_norm=True, cat=True, lin=True, dropout=0.5)
assert (model.__repr__() == 'RelCNN(16, 32, num_layers=2, batch_norm=True, cat=True, lin=True, dropout=0.5)')
assert (model.convs[0].__repr__() == 'RelConv(16, 32)')
x = torch.randn(100, 16)
edge_index = torch.randint(100, (2, 400), dtype=torch.long)
for (cat, lin) in product([False, True], [False, True]):
model = RelCNN(16, 32, 2, True, cat, lin, 0.5)
out = model(x, edge_index)
assert (out.size() == (100, ((16 + (2 * 32)) if ((not lin) and cat) else 32)))
assert (out.size() == (100, model.out_channels))
|
def test_spline():
model = SplineCNN(16, 32, dim=3, num_layers=2, cat=True, lin=True, dropout=0.5)
assert (model.__repr__() == 'SplineCNN(16, 32, dim=3, num_layers=2, cat=True, lin=True, dropout=0.5)')
x = torch.randn(100, 16)
edge_index = torch.randint(100, (2, 400), dtype=torch.long)
edge_attr = torch.rand((400, 3))
for (cat, lin) in product([False, True], [False, True]):
model = SplineCNN(16, 32, 3, 2, cat, lin, 0.5)
out = model(x, edge_index, edge_attr)
assert (out.size() == (100, ((16 + (2 * 32)) if ((not lin) and cat) else 32)))
assert (out.size() == (100, model.out_channels))
|
def test_pair_dataset():
x = torch.randn(10, 16)
edge_index = torch.randint(x.size(0), (2, 30), dtype=torch.long)
data = Data(x=x, edge_index=edge_index)
dataset = PairDataset([data, data], [data, data], sample=True)
assert (dataset.__repr__() == 'PairDataset([Data(edge_index=[2, 30], x=[10, 16]), Data(edge_index=[2, 30], x=[10, 16])], [Data(edge_index=[2, 30], x=[10, 16]), Data(edge_index=[2, 30], x=[10, 16])], sample=True)')
assert (len(dataset) == 2)
pair = dataset[0]
assert (len(pair) == 4)
assert torch.allclose(pair.x_s, x)
assert (pair.edge_index_s.tolist() == edge_index.tolist())
assert torch.allclose(pair.x_t, x)
assert (pair.edge_index_t.tolist() == edge_index.tolist())
dataset = PairDataset([data, data], [data, data], sample=False)
assert (dataset.__repr__() == 'PairDataset([Data(edge_index=[2, 30], x=[10, 16]), Data(edge_index=[2, 30], x=[10, 16])], [Data(edge_index=[2, 30], x=[10, 16]), Data(edge_index=[2, 30], x=[10, 16])], sample=False)')
assert (len(dataset) == 4)
pair = dataset[0]
assert (len(pair) == 4)
assert torch.allclose(pair.x_s, x)
assert (pair.edge_index_s.tolist() == edge_index.tolist())
assert torch.allclose(pair.x_t, x)
assert (pair.edge_index_t.tolist() == edge_index.tolist())
|
def test_valid_pair_dataset():
x = torch.randn(10, 16)
edge_index = torch.randint(x.size(0), (2, 30), dtype=torch.long)
y = torch.randperm(x.size(0))
data = Data(x=x, edge_index=edge_index, y=y)
dataset = ValidPairDataset([data, data], [data, data], sample=True)
assert (dataset.__repr__() == 'ValidPairDataset([Data(edge_index=[2, 30], x=[10, 16], y=[10]), Data(edge_index=[2, 30], x=[10, 16], y=[10])], [Data(edge_index=[2, 30], x=[10, 16], y=[10]), Data(edge_index=[2, 30], x=[10, 16], y=[10])], sample=True)')
assert (len(dataset) == 2)
pair = dataset[0]
assert (len(pair) == 5)
assert torch.allclose(pair.x_s, x)
assert (pair.edge_index_s.tolist() == edge_index.tolist())
assert torch.allclose(pair.x_t, x)
assert (pair.edge_index_t.tolist() == edge_index.tolist())
assert (pair.y.tolist() == torch.arange(x.size(0)).tolist())
dataset = ValidPairDataset([data, data], [data, data], sample=False)
assert (dataset.__repr__() == 'ValidPairDataset([Data(edge_index=[2, 30], x=[10, 16], y=[10]), Data(edge_index=[2, 30], x=[10, 16], y=[10])], [Data(edge_index=[2, 30], x=[10, 16], y=[10]), Data(edge_index=[2, 30], x=[10, 16], y=[10])], sample=False)')
assert (len(dataset) == 4)
pair = dataset[0]
assert (len(pair) == 5)
assert torch.allclose(pair.x_s, x)
assert (pair.edge_index_s.tolist() == edge_index.tolist())
assert torch.allclose(pair.x_t, x)
assert (pair.edge_index_t.tolist() == edge_index.tolist())
assert (pair.y.tolist() == torch.arange(x.size(0)).tolist())
|
def train(model, loader, optimizer):
model.train()
for (batch, *args) in loader:
batch = batch.to(model.device)
optimizer.zero_grad()
out = model(batch.x, batch.adj_t, *args)
train_mask = batch.train_mask[:out.size(0)]
loss = criterion(out[train_mask], batch.y[:out.size(0)][train_mask])
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
|
@torch.no_grad()
def test(model, data):
model.eval()
out = model(data.x.to(model.device), data.adj_t.to(model.device)).cpu()
train_acc = compute_micro_f1(out, data.y, data.train_mask)
val_acc = compute_micro_f1(out, data.y, data.val_mask)
test_acc = compute_micro_f1(out, data.y, data.test_mask)
return (train_acc, val_acc, test_acc)
|
def train(model, loader, optimizer):
model.train()
for (batch, *args) in loader:
batch = batch.to(model.device)
optimizer.zero_grad()
out = model(batch.x, batch.adj_t, *args)
train_mask = batch.train_mask[:out.size(0)]
loss = criterion(out[train_mask], batch.y[:out.size(0)][train_mask])
loss.backward()
optimizer.step()
|
@torch.no_grad()
def test(model, data):
model.eval()
out = model(data.x.to(model.device), data.adj_t.to(model.device)).cpu()
train_acc = compute_micro_f1(out, data.y, data.train_mask)
val_acc = compute_micro_f1(out, data.y, data.val_mask)
test_acc = compute_micro_f1(out, data.y, data.test_mask)
return (train_acc, val_acc, test_acc)
|
class GIN(ScalableGNN):
def __init__(self, num_nodes: int, in_channels: int, hidden_channels: int, out_channels: int, num_layers: int):
super().__init__(num_nodes, hidden_channels, num_layers, pool_size=2, buffer_size=60000)
self.in_channels = in_channels
self.out_channels = out_channels
self.lins = torch.nn.ModuleList()
self.lins.append(Linear(in_channels, hidden_channels))
self.lins.append(Linear(hidden_channels, out_channels))
self.convs = torch.nn.ModuleList()
for i in range(num_layers):
self.convs.append(GINConv(Identity(), train_eps=True))
self.mlps = torch.nn.ModuleList()
for _ in range(num_layers):
mlp = Sequential(Linear(hidden_channels, hidden_channels), BatchNorm1d(hidden_channels, track_running_stats=False), ReLU(), Linear(hidden_channels, hidden_channels), ReLU())
self.mlps.append(mlp)
def forward(self, x: Tensor, adj_t: SparseTensor, *args):
x = self.lins[0](x).relu_()
reg = 0
it = zip(self.convs[:(- 1)], self.mlps[:(- 1)], self.histories)
for (i, (conv, mlp, history)) in enumerate(it):
h = conv((x, x[:adj_t.size(0)]), adj_t)
if ((i > 0) and self.training):
approx = mlp((h + (0.1 * torch.randn_like(h))))
h = mlp(h)
if ((i > 0) and self.training):
diff = (h - approx).norm(dim=(- 1))
reg += (diff.mean() / len(self.histories))
h += x[:h.size(0)]
x = self.push_and_pull(history, h, *args)
h = self.convs[(- 1)]((x, x[:adj_t.size(0)]), adj_t)
h = self.mlps[(- 1)](h)
h += x[:h.size(0)]
x = self.lins[1](h)
return (x, reg)
@torch.no_grad()
def forward_layer(self, layer: int, x: Tensor, adj_t: SparseTensor, state):
if (layer == 0):
x = self.lins[0](x).relu_()
h = self.convs[layer]((x, x[:adj_t.size(0)]), adj_t)
h = self.mlps[layer](h)
h += x[:h.size(0)]
if (layer == (self.num_layers - 1)):
h = self.lins[1](h)
return h
|
def train(model, loader, optimizer):
model.train()
total_loss = total_examples = 0
for (batch, *args) in loader:
batch = batch.to(model.device)
optimizer.zero_grad()
(out, reg) = model(batch.x, batch.adj_t, *args)
loss = (criterion(out, batch.y[:out.size(0)]) + reg)
loss.backward()
optimizer.step()
total_loss += (float(loss) * int(out.size(0)))
total_examples += int(out.size(0))
return (total_loss / total_examples)
|
@torch.no_grad()
def mini_test(model, loader, y):
model.eval()
out = model(loader=loader)
return (int((out.argmax(dim=(- 1)) == y).sum()) / y.size(0))
|
@torch.no_grad()
def full_test(model, loader):
model.eval()
total_correct = total_examples = 0
for batch in loader:
batch = batch.to(device)
(out, _) = model(batch.x, batch.adj_t)
total_correct += int((out.argmax(dim=(- 1)) == batch.y).sum())
total_examples += out.size(0)
return (total_correct / total_examples)
|
def mini_train(model, loader, criterion, optimizer, max_steps, grad_norm=None, edge_dropout=0.0):
model.train()
total_loss = total_examples = 0
for (i, (batch, batch_size, *args)) in enumerate(loader):
x = batch.x.to(model.device)
adj_t = batch.adj_t.to(model.device)
y = batch.y[:batch_size].to(model.device)
train_mask = batch.train_mask[:batch_size].to(model.device)
if (train_mask.sum() == 0):
continue
adj_t = dropout(adj_t, p=edge_dropout)
optimizer.zero_grad()
out = model(x, adj_t, batch_size, *args)
loss = criterion(out[train_mask], y[train_mask])
loss.backward()
if (grad_norm is not None):
torch.nn.utils.clip_grad_norm_(model.parameters(), grad_norm)
optimizer.step()
total_loss += (float(loss) * int(train_mask.sum()))
total_examples += int(train_mask.sum())
if (((i + 1) >= max_steps) and ((i + 1) < len(loader))):
break
return (total_loss / total_examples)
|
@torch.no_grad()
def full_test(model, data):
model.eval()
return model(data.x.to(model.device), data.adj_t.to(model.device)).cpu()
|
@torch.no_grad()
def mini_test(model, loader):
model.eval()
return model(loader=loader)
|
@hydra.main(config_path='conf', config_name='config')
def main(conf):
conf.model.params = conf.model.params[conf.dataset.name]
params = conf.model.params
print(OmegaConf.to_yaml(conf))
try:
edge_dropout = params.edge_dropout
except:
edge_dropout = 0.0
grad_norm = (None if isinstance(params.grad_norm, str) else params.grad_norm)
device = (f'cuda:{conf.device}' if torch.cuda.is_available() else 'cpu')
t = time.perf_counter()
print('Loading data...', end=' ', flush=True)
(data, in_channels, out_channels) = get_data(conf.root, conf.dataset.name)
print(f'Done! [{(time.perf_counter() - t):.2f}s]')
(perm, ptr) = metis(data.adj_t, num_parts=params.num_parts, log=True)
data = permute(data, perm, log=True)
if conf.model.loop:
t = time.perf_counter()
print('Adding self-loops...', end=' ', flush=True)
data.adj_t = data.adj_t.set_diag()
print(f'Done! [{(time.perf_counter() - t):.2f}s]')
if conf.model.norm:
t = time.perf_counter()
print('Normalizing data...', end=' ', flush=True)
data.adj_t = gcn_norm(data.adj_t, add_self_loops=False)
print(f'Done! [{(time.perf_counter() - t):.2f}s]')
if (data.y.dim() == 1):
criterion = torch.nn.CrossEntropyLoss()
else:
criterion = torch.nn.BCEWithLogitsLoss()
train_loader = SubgraphLoader(data, ptr, batch_size=params.batch_size, shuffle=True, num_workers=params.num_workers, persistent_workers=(params.num_workers > 0))
eval_loader = EvalSubgraphLoader(data, ptr, batch_size=params['batch_size'])
if (conf.dataset.name == 'ppi'):
(val_data, _, _) = get_ppi(conf.root, split='val')
(test_data, _, _) = get_ppi(conf.root, split='test')
if conf.model.loop:
val_data.adj_t = val_data.adj_t.set_diag()
test_data.adj_t = test_data.adj_t.set_diag()
if conf.model.norm:
val_data.adj_t = gcn_norm(val_data.adj_t, add_self_loops=False)
test_data.adj_t = gcn_norm(test_data.adj_t, add_self_loops=False)
t = time.perf_counter()
print('Calculating buffer size...', end=' ', flush=True)
buffer_size = max([n_id.numel() for (_, _, n_id, _, _) in eval_loader])
print(f'Done! [{(time.perf_counter() - t):.2f}s] -> {buffer_size}')
kwargs = {}
if (conf.model.name[:3] == 'PNA'):
kwargs['deg'] = data.adj_t.storage.rowcount()
GNN = getattr(models, conf.model.name)
model = GNN(num_nodes=data.num_nodes, in_channels=in_channels, out_channels=out_channels, pool_size=params.pool_size, buffer_size=buffer_size, **params.architecture, **kwargs).to(device)
optimizer = torch.optim.Adam([dict(params=model.reg_modules.parameters(), weight_decay=params.reg_weight_decay), dict(params=model.nonreg_modules.parameters(), weight_decay=params.nonreg_weight_decay)], lr=params.lr)
t = time.perf_counter()
print('Fill history...', end=' ', flush=True)
mini_test(model, eval_loader)
print(f'Done! [{(time.perf_counter() - t):.2f}s]')
best_val_acc = test_acc = 0
for epoch in range(1, (params.epochs + 1)):
loss = mini_train(model, train_loader, criterion, optimizer, params.max_steps, grad_norm, edge_dropout)
out = mini_test(model, eval_loader)
train_acc = compute_micro_f1(out, data.y, data.train_mask)
if (conf.dataset.name != 'ppi'):
val_acc = compute_micro_f1(out, data.y, data.val_mask)
tmp_test_acc = compute_micro_f1(out, data.y, data.test_mask)
else:
val_acc = compute_micro_f1(full_test(model, val_data), val_data.y)
tmp_test_acc = compute_micro_f1(full_test(model, test_data), test_data.y)
if (val_acc > best_val_acc):
best_val_acc = val_acc
test_acc = tmp_test_acc
if ((epoch % conf.log_every) == 0):
print(f'Epoch: {epoch:04d}, Loss: {loss:.4f}, Train: {train_acc:.4f}, Val: {val_acc:.4f}, Test: {tmp_test_acc:.4f}, Final: {test_acc:.4f}')
print('=========================')
print(f'Val: {best_val_acc:.4f}, Test: {test_acc:.4f}')
|
def get_extensions():
Extension = CppExtension
define_macros = []
libraries = []
extra_compile_args = {'cxx': []}
extra_link_args = []
info = parallel_info()
if (('parallel backend: OpenMP' in info) and ('OpenMP not found' not in info)):
extra_compile_args['cxx'] += ['-DAT_PARALLEL_OPENMP']
if (sys.platform == 'win32'):
extra_compile_args['cxx'] += ['/openmp']
else:
extra_compile_args['cxx'] += ['-fopenmp']
else:
print('Compiling without OpenMP...')
if WITH_CUDA:
Extension = CUDAExtension
define_macros += [('WITH_CUDA', None)]
nvcc_flags = os.getenv('NVCC_FLAGS', '')
nvcc_flags = ([] if (nvcc_flags == '') else nvcc_flags.split(' '))
nvcc_flags += ['-arch=sm_35', '--expt-relaxed-constexpr']
extra_compile_args['nvcc'] = nvcc_flags
extensions_dir = osp.join('csrc')
main_files = glob.glob(osp.join(extensions_dir, '*.cpp'))
extensions = []
for main in main_files:
name = main.split(os.sep)[(- 1)][:(- 4)]
sources = [main]
path = osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp')
if osp.exists(path):
sources += [path]
path = osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu')
if (WITH_CUDA and osp.exists(path)):
sources += [path]
extension = Extension(('torch_geometric_autoscale._' + name), sources, include_dirs=[extensions_dir], define_macros=define_macros, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args, libraries=libraries)
extensions += [extension]
return extensions
|
def get_planetoid(root: str, name: str) -> Tuple[(Data, int, int)]:
transform = T.Compose([T.NormalizeFeatures(), T.ToSparseTensor()])
dataset = Planetoid(f'{root}/Planetoid', name, transform=transform)
return (dataset[0], dataset.num_features, dataset.num_classes)
|
def get_wikics(root: str) -> Tuple[(Data, int, int)]:
dataset = WikiCS(f'{root}/WIKICS', transform=T.ToSparseTensor())
data = dataset[0]
data.adj_t = data.adj_t.to_symmetric()
data.val_mask = data.stopping_mask
data.stopping_mask = None
return (data, dataset.num_features, dataset.num_classes)
|
def get_coauthor(root: str, name: str) -> Tuple[(Data, int, int)]:
dataset = Coauthor(f'{root}/Coauthor', name, transform=T.ToSparseTensor())
data = dataset[0]
torch.manual_seed(12345)
(data.train_mask, data.val_mask, data.test_mask) = gen_masks(data.y, 20, 30, 20)
return (data, dataset.num_features, dataset.num_classes)
|
def get_amazon(root: str, name: str) -> Tuple[(Data, int, int)]:
dataset = Amazon(f'{root}/Amazon', name, transform=T.ToSparseTensor())
data = dataset[0]
torch.manual_seed(12345)
(data.train_mask, data.val_mask, data.test_mask) = gen_masks(data.y, 20, 30, 20)
return (data, dataset.num_features, dataset.num_classes)
|
def get_arxiv(root: str) -> Tuple[(Data, int, int)]:
dataset = PygNodePropPredDataset('ogbn-arxiv', f'{root}/OGB', pre_transform=T.ToSparseTensor())
data = dataset[0]
data.adj_t = data.adj_t.to_symmetric()
data.node_year = None
data.y = data.y.view((- 1))
split_idx = dataset.get_idx_split()
data.train_mask = index2mask(split_idx['train'], data.num_nodes)
data.val_mask = index2mask(split_idx['valid'], data.num_nodes)
data.test_mask = index2mask(split_idx['test'], data.num_nodes)
return (data, dataset.num_features, dataset.num_classes)
|
def get_products(root: str) -> Tuple[(Data, int, int)]:
dataset = PygNodePropPredDataset('ogbn-products', f'{root}/OGB', pre_transform=T.ToSparseTensor())
data = dataset[0]
data.y = data.y.view((- 1))
split_idx = dataset.get_idx_split()
data.train_mask = index2mask(split_idx['train'], data.num_nodes)
data.val_mask = index2mask(split_idx['valid'], data.num_nodes)
data.test_mask = index2mask(split_idx['test'], data.num_nodes)
return (data, dataset.num_features, dataset.num_classes)
|
def get_yelp(root: str) -> Tuple[(Data, int, int)]:
dataset = Yelp(f'{root}/YELP', pre_transform=T.ToSparseTensor())
data = dataset[0]
data.x = ((data.x - data.x.mean(dim=0)) / data.x.std(dim=0))
return (data, dataset.num_features, dataset.num_classes)
|
def get_flickr(root: str) -> Tuple[(Data, int, int)]:
dataset = Flickr(f'{root}/Flickr', pre_transform=T.ToSparseTensor())
return (dataset[0], dataset.num_features, dataset.num_classes)
|
def get_reddit(root: str) -> Tuple[(Data, int, int)]:
dataset = Reddit2(f'{root}/Reddit2', pre_transform=T.ToSparseTensor())
data = dataset[0]
data.x = ((data.x - data.x.mean(dim=0)) / data.x.std(dim=0))
return (data, dataset.num_features, dataset.num_classes)
|
def get_ppi(root: str, split: str='train') -> Tuple[(Data, int, int)]:
dataset = PPI(f'{root}/PPI', split=split, pre_transform=T.ToSparseTensor())
data = Batch.from_data_list(dataset)
data.batch = None
data.ptr = None
data[f'{split}_mask'] = torch.ones(data.num_nodes, dtype=torch.bool)
return (data, dataset.num_features, dataset.num_classes)
|
def get_sbm(root: str, name: str) -> Tuple[(Data, int, int)]:
dataset = GNNBenchmarkDataset(f'{root}/SBM', name, split='train', pre_transform=T.ToSparseTensor())
data = Batch.from_data_list(dataset)
data.batch = None
data.ptr = None
return (data, dataset.num_features, dataset.num_classes)
|
def get_data(root: str, name: str) -> Tuple[(Data, int, int)]:
if (name.lower() in ['cora', 'citeseer', 'pubmed']):
return get_planetoid(root, name)
elif (name.lower() in ['coauthorcs', 'coauthorphysics']):
return get_coauthor(root, name[8:])
elif (name.lower() in ['amazoncomputers', 'amazonphoto']):
return get_amazon(root, name[6:])
elif (name.lower() == 'wikics'):
return get_wikics(root)
elif (name.lower() in ['cluster', 'pattern']):
return get_sbm(root, name)
elif (name.lower() == 'reddit'):
return get_reddit(root)
elif (name.lower() == 'ppi'):
return get_ppi(root)
elif (name.lower() == 'flickr'):
return get_flickr(root)
elif (name.lower() == 'yelp'):
return get_yelp(root)
elif (name.lower() in ['ogbn-arxiv', 'arxiv']):
return get_arxiv(root)
elif (name.lower() in ['ogbn-products', 'products']):
return get_products(root)
else:
raise NotImplementedError
|
class History(torch.nn.Module):
'A historical embedding storage module.'
def __init__(self, num_embeddings: int, embedding_dim: int, device=None):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
pin_memory = ((device is None) or (str(device) == 'cpu'))
self.emb = torch.empty(num_embeddings, embedding_dim, device=device, pin_memory=pin_memory)
self._device = torch.device('cpu')
self.reset_parameters()
def reset_parameters(self):
self.emb.fill_(0)
def _apply(self, fn):
self._device = fn(torch.zeros(1)).device
return self
@torch.no_grad()
def pull(self, n_id: Optional[Tensor]=None) -> Tensor:
out = self.emb
if (n_id is not None):
assert (n_id.device == self.emb.device)
out = out.index_select(0, n_id)
return out.to(device=self._device)
@torch.no_grad()
def push(self, x, n_id: Optional[Tensor]=None, offset: Optional[Tensor]=None, count: Optional[Tensor]=None):
if ((n_id is None) and (x.size(0) != self.num_embeddings)):
raise ValueError
elif ((n_id is None) and (x.size(0) == self.num_embeddings)):
self.emb.copy_(x)
elif ((offset is None) or (count is None)):
assert (n_id.device == self.emb.device)
self.emb[n_id] = x.to(self.emb.device)
else:
src_o = 0
x = x.to(self.emb.device)
for (dst_o, c) in zip(offset.tolist(), count.tolist()):
self.emb[dst_o:(dst_o + c)] = x[src_o:(src_o + c)]
src_o += c
def forward(self, *args, **kwargs):
''
raise NotImplementedError
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.num_embeddings}, {self.embedding_dim}, emb_device={self.emb.device}, device={self._device})'
|
class SubData(NamedTuple):
data: Data
batch_size: int
n_id: Tensor
offset: Tensor
count: Tensor
def to(self, *args, **kwargs):
return SubData(self.data.to(*args, **kwargs), self.batch_size, self.n_id, self.offset, self.count)
|
class SubgraphLoader(DataLoader):
'A simple subgraph loader that, given a pre-partioned :obj:`data` object,\n generates subgraphs from mini-batches in :obj:`ptr` (including their 1-hop\n neighbors).'
def __init__(self, data: Data, ptr: Tensor, batch_size: int=1, bipartite: bool=True, log: bool=True, **kwargs):
self.data = data
self.ptr = ptr
self.bipartite = bipartite
self.log = log
n_id = torch.arange(data.num_nodes)
batches = n_id.split((ptr[1:] - ptr[:(- 1)]).tolist())
batches = [(i, batches[i]) for i in range(len(batches))]
if (batch_size > 1):
super().__init__(batches, batch_size=batch_size, collate_fn=self.compute_subgraph, **kwargs)
else:
if log:
t = time.perf_counter()
print('Pre-processing subgraphs...', end=' ', flush=True)
data_list = list(DataLoader(batches, collate_fn=self.compute_subgraph, batch_size=batch_size, **kwargs))
if log:
print(f'Done! [{(time.perf_counter() - t):.2f}s]')
super().__init__(data_list, batch_size=batch_size, collate_fn=(lambda x: x[0]), **kwargs)
def compute_subgraph(self, batches: List[Tuple[(int, Tensor)]]) -> SubData:
(batch_ids, n_ids) = zip(*batches)
n_id = torch.cat(n_ids, dim=0)
batch_id = torch.tensor(batch_ids)
batch_size = n_id.numel()
offset = self.ptr[batch_id]
count = self.ptr[batch_id.add_(1)].sub_(offset)
(rowptr, col, value) = self.data.adj_t.csr()
(rowptr, col, value, n_id) = relabel_fn(rowptr, col, value, n_id, self.bipartite)
adj_t = SparseTensor(rowptr=rowptr, col=col, value=value, sparse_sizes=((rowptr.numel() - 1), n_id.numel()), is_sorted=True)
data = self.data.__class__(adj_t=adj_t)
for (k, v) in self.data:
if (isinstance(v, Tensor) and (v.size(0) == self.data.num_nodes)):
data[k] = v.index_select(0, n_id)
return SubData(data, batch_size, n_id, offset, count)
def __repr__(self):
return f'{self.__class__.__name__}()'
|
class EvalSubgraphLoader(SubgraphLoader):
'A simple subgraph loader that, given a pre-partioned :obj:`data` object,\n generates subgraphs from mini-batches in :obj:`ptr` (including their 1-hop\n neighbors).\n In contrast to :class:`SubgraphLoader`, this loader does not generate\n subgraphs from randomly sampled mini-batches, and should therefore only be\n used for evaluation.\n '
def __init__(self, data: Data, ptr: Tensor, batch_size: int=1, bipartite: bool=True, log: bool=True, **kwargs):
ptr = ptr[::batch_size]
if (int(ptr[(- 1)]) != data.num_nodes):
ptr = torch.cat([ptr, torch.tensor([data.num_nodes])], dim=0)
super().__init__(data=data, ptr=ptr, batch_size=1, bipartite=bipartite, log=log, shuffle=False, num_workers=0, **kwargs)
|
def metis(adj_t: SparseTensor, num_parts: int, recursive: bool=False, log: bool=True) -> Tuple[(Tensor, Tensor)]:
'Computes the METIS partition of a given sparse adjacency matrix\n :obj:`adj_t`, returning its "clustered" permutation :obj:`perm` and\n corresponding cluster slices :obj:`ptr`.'
if log:
t = time.perf_counter()
print(f'Computing METIS partitioning with {num_parts} parts...', end=' ', flush=True)
num_nodes = adj_t.size(0)
if (num_parts <= 1):
(perm, ptr) = (torch.arange(num_nodes), torch.tensor([0, num_nodes]))
else:
(rowptr, col, _) = adj_t.csr()
cluster = partition_fn(rowptr, col, None, num_parts, recursive)
(cluster, perm) = cluster.sort()
ptr = torch.ops.torch_sparse.ind2ptr(cluster, num_parts)
if log:
print(f'Done! [{(time.perf_counter() - t):.2f}s]')
return (perm, ptr)
|
def permute(data: Data, perm: Tensor, log: bool=True) -> Data:
'Permutes a :obj:`data` object according to a given permutation\n :obj:`perm`.'
if log:
t = time.perf_counter()
print('Permuting data...', end=' ', flush=True)
data = copy.copy(data)
for (key, value) in data:
if (isinstance(value, Tensor) and (value.size(0) == data.num_nodes)):
data[key] = value[perm]
elif (isinstance(value, Tensor) and (value.size(0) == data.num_edges)):
raise NotImplementedError
elif isinstance(value, SparseTensor):
data[key] = value.permute(perm)
if log:
print(f'Done! [{(time.perf_counter() - t):.2f}s]')
return data
|
class APPNP(ScalableGNN):
def __init__(self, num_nodes: int, in_channels, hidden_channels: int, out_channels: int, num_layers: int, alpha: float, dropout: float=0.0, pool_size: Optional[int]=None, buffer_size: Optional[int]=None, device=None):
super().__init__(num_nodes, out_channels, num_layers, pool_size, buffer_size, device)
self.in_channels = in_channels
self.out_channels = out_channels
self.alpha = alpha
self.dropout = dropout
self.lins = ModuleList()
self.lins.append(Linear(in_channels, hidden_channels))
self.lins.append(Linear(hidden_channels, out_channels))
self.reg_modules = self.lins[:1]
self.nonreg_modules = self.lins[1:]
def reset_parameters(self):
super().reset_parameters()
for lin in self.lins:
lin.reset_parameters()
def forward(self, x: Tensor, adj_t: SparseTensor, *args) -> Tensor:
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[0](x)
x = x.relu()
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[1](x)
x_0 = x[:adj_t.size(0)]
for history in self.histories:
x = (((1 - self.alpha) * (adj_t @ x)) + (self.alpha * x_0))
x = self.push_and_pull(history, x, *args)
x = (((1 - self.alpha) * (adj_t @ x)) + (self.alpha * x_0))
return x
@torch.no_grad()
def forward_layer(self, layer, x, adj_t, state):
if (layer == 0):
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[0](x)
x = x.relu()
x = F.dropout(x, p=self.dropout, training=self.training)
x = x_0 = self.lins[1](x)
state['x_0'] = x_0[:adj_t.size(0)]
x = (((1 - self.alpha) * (adj_t @ x)) + (self.alpha * state['x_0']))
return x
|
class ScalableGNN(torch.nn.Module):
'An abstract class for implementing scalable GNNs via historical\n embeddings.\n This class will take care of initializing :obj:`num_layers - 1` historical\n embeddings, and provides a convenient interface to push recent node\n embeddings to the history, and to pull previous embeddings from the\n history.\n In case historical embeddings are stored on the CPU, they will reside\n inside pinned memory, which allows for asynchronous memory transfers of\n historical embeddings.\n For this, this class maintains a :class:`AsyncIOPool` object that\n implements the underlying mechanisms of asynchronous memory transfers as\n described in our paper.\n\n Args:\n num_nodes (int): The number of nodes in the graph.\n hidden_channels (int): The number of hidden channels of the model.\n As a current restriction, all intermediate node embeddings need to\n utilize the same number of features.\n num_layers (int): The number of layers of the model.\n pool_size (int, optional): The number of pinned CPU buffers for pulling\n histories and transfering them to GPU.\n Needs to be set in order to make use of asynchronous memory\n transfers. (default: :obj:`None`)\n buffer_size (int, optional): The size of pinned CPU buffers, i.e. the\n maximum number of out-of-mini-batch nodes pulled at once.\n Needs to be set in order to make use of asynchronous memory\n transfers. (default: :obj:`None`)\n '
def __init__(self, num_nodes: int, hidden_channels: int, num_layers: int, pool_size: Optional[int]=None, buffer_size: Optional[int]=None, device=None):
super().__init__()
self.num_nodes = num_nodes
self.hidden_channels = hidden_channels
self.num_layers = num_layers
self.pool_size = ((num_layers - 1) if (pool_size is None) else pool_size)
self.buffer_size = buffer_size
self.histories = torch.nn.ModuleList([History(num_nodes, hidden_channels, device) for _ in range((num_layers - 1))])
self.pool: Optional[AsyncIOPool] = None
self._async = False
self.__out: Optional[Tensor] = None
@property
def emb_device(self):
return self.histories[0].emb.device
@property
def device(self):
return self.histories[0]._device
def _apply(self, fn: Callable) -> None:
super()._apply(fn)
if ((str(self.emb_device) == 'cpu') and (str(self.device)[:4] == 'cuda') and (self.pool_size is not None) and (self.buffer_size is not None)):
self.pool = AsyncIOPool(self.pool_size, self.buffer_size, self.histories[0].embedding_dim)
self.pool.to(self.device)
return self
def reset_parameters(self):
for history in self.histories:
history.reset_parameters()
def __call__(self, x: Optional[Tensor]=None, adj_t: Optional[SparseTensor]=None, batch_size: Optional[int]=None, n_id: Optional[Tensor]=None, offset: Optional[Tensor]=None, count: Optional[Tensor]=None, loader: EvalSubgraphLoader=None, **kwargs) -> Tensor:
'Enhances the call of forward propagation by immediately start\n pulling historical embeddings for all layers asynchronously.\n After forward propogation is completed, the push of node embeddings to\n the histories will be synchronized.\n\n For example, given a mini-batch with node indices\n :obj:`n_id = [0, 1, 5, 6, 7, 3, 4]`, where the first 5 nodes\n represent the mini-batched nodes, and nodes :obj:`3` and :obj:`4`\n denote out-of-mini-batched nodes (i.e. the 1-hop neighbors of the\n mini-batch that are not included in the current mini-batch), then\n other input arguments should be given as:\n\n .. code-block:: python\n\n batch_size = 5\n offset = [0, 5]\n count = [2, 3]\n\n Args:\n x (Tensor, optional): Node feature matrix. (default: :obj:`None`)\n adj_t (SparseTensor, optional) The sparse adjacency matrix.\n (default: :obj:`None`)\n batch_size (int, optional): The in-mini-batch size of nodes.\n (default: :obj:`None`)\n n_id (Tensor, optional): The global indices of mini-batched and\n out-of-mini-batched nodes. (default: :obj:`None`)\n offset (Tensor, optional): The offset of mini-batched nodes inside\n a utilize a contiguous memory layout. (default: :obj:`None`)\n count (Tensor, optional): The number of mini-batched nodes inside a\n contiguous memory layout. (default: :obj:`None`)\n loader (EvalSubgraphLoader, optional): A subgraph loader used for\n evaluating the given GNN in a layer-wise fashsion.\n '
if (loader is not None):
return self.mini_inference(loader)
self._async = ((self.pool is not None) and (batch_size is not None) and (n_id is not None) and (offset is not None) and (count is not None))
if ((batch_size is not None) and (not self._async) and (str(self.emb_device) == 'cpu') and (str(self.device)[:4] == 'cuda')):
warnings.warn('Asynchronous I/O disabled, although history and model sit on different devices.')
if self._async:
for hist in self.histories:
self.pool.async_pull(hist.emb, None, None, n_id[batch_size:])
out = self.forward(x, adj_t, batch_size, n_id, offset, count, **kwargs)
if self._async:
for hist in self.histories:
self.pool.synchronize_push()
self._async = False
return out
def push_and_pull(self, history, x: Tensor, batch_size: Optional[int]=None, n_id: Optional[Tensor]=None, offset: Optional[Tensor]=None, count: Optional[Tensor]=None) -> Tensor:
'Pushes and pulls information from :obj:`x` to :obj:`history` and\n vice versa.'
if ((n_id is None) and (x.size(0) != self.num_nodes)):
return x
if ((n_id is None) and (x.size(0) == self.num_nodes)):
history.push(x)
return x
assert (n_id is not None)
if (batch_size is None):
history.push(x, n_id)
return x
if (not self._async):
history.push(x[:batch_size], n_id[:batch_size], offset, count)
h = history.pull(n_id[batch_size:])
return torch.cat([x[:batch_size], h], dim=0)
else:
out = self.pool.synchronize_pull()[:(n_id.numel() - batch_size)]
self.pool.async_push(x[:batch_size], offset, count, history.emb)
out = torch.cat([x[:batch_size], out], dim=0)
self.pool.free_pull()
return out
@property
def _out(self):
if (self.__out is None):
self.__out = torch.empty(self.num_nodes, self.out_channels, pin_memory=True)
return self.__out
@torch.no_grad()
def mini_inference(self, loader: SubgraphLoader) -> Tensor:
'An implementation of layer-wise evaluation of GNNs.\n For each individual layer and mini-batch, :meth:`forward_layer` takes\n care of computing the next state of node embeddings.\n Additional state (such as residual connections) can be stored in\n a `state` directory.'
loader = [(sub_data + ({},)) for sub_data in loader]
for (data, batch_size, n_id, offset, count, state) in loader:
x = data.x.to(self.device)
adj_t = data.adj_t.to(self.device)
out = self.forward_layer(0, x, adj_t, state)[:batch_size]
self.pool.async_push(out, offset, count, self.histories[0].emb)
self.pool.synchronize_push()
for i in range(1, len(self.histories)):
for (_, batch_size, n_id, offset, count, _) in loader:
self.pool.async_pull(self.histories[(i - 1)].emb, offset, count, n_id[batch_size:])
for (batch, batch_size, n_id, offset, count, state) in loader:
adj_t = batch.adj_t.to(self.device)
x = self.pool.synchronize_pull()[:n_id.numel()]
out = self.forward_layer(i, x, adj_t, state)[:batch_size]
self.pool.async_push(out, offset, count, self.histories[i].emb)
self.pool.free_pull()
self.pool.synchronize_push()
for (_, batch_size, n_id, offset, count, _) in loader:
self.pool.async_pull(self.histories[(- 1)].emb, offset, count, n_id[batch_size:])
for (batch, batch_size, n_id, offset, count, state) in loader:
adj_t = batch.adj_t.to(self.device)
x = self.pool.synchronize_pull()[:n_id.numel()]
out = self.forward_layer((self.num_layers - 1), x, adj_t, state)[:batch_size]
self.pool.async_push(out, offset, count, self._out)
self.pool.free_pull()
self.pool.synchronize_push()
return self._out
@torch.no_grad()
def forward_layer(self, layer: int, x: Tensor, adj_t: SparseTensor, state: Dict[(str, Any)]) -> Tensor:
raise NotImplementedError
|
class GAT(ScalableGNN):
def __init__(self, num_nodes: int, in_channels, hidden_channels: int, hidden_heads: int, out_channels: int, out_heads: int, num_layers: int, dropout: float=0.0, pool_size: Optional[int]=None, buffer_size: Optional[int]=None, device=None):
super().__init__(num_nodes, (hidden_channels * hidden_heads), num_layers, pool_size, buffer_size, device)
self.in_channels = in_channels
self.hidden_heads = hidden_heads
self.out_channels = out_channels
self.out_heads = out_heads
self.dropout = dropout
self.convs = ModuleList()
for i in range((num_layers - 1)):
in_dim = (in_channels if (i == 0) else (hidden_channels * hidden_heads))
conv = GATConv(in_dim, hidden_channels, hidden_heads, concat=True, dropout=dropout, add_self_loops=False)
self.convs.append(conv)
conv = GATConv((hidden_channels * hidden_heads), out_channels, out_heads, concat=False, dropout=dropout, add_self_loops=False)
self.convs.append(conv)
self.reg_modules = self.convs
self.nonreg_modules = ModuleList()
def reset_parameters(self):
super().reset_parameters()
for conv in self.convs:
conv.reset_parameters()
def forward(self, x: Tensor, adj_t: SparseTensor, *args) -> Tensor:
for (conv, history) in zip(self.convs[:(- 1)], self.histories):
x = F.dropout(x, p=self.dropout, training=self.training)
x = conv((x, x[:adj_t.size(0)]), adj_t)
x = F.elu(x)
x = self.push_and_pull(history, x, *args)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[(- 1)]((x, x[:adj_t.size(0)]), adj_t)
return x
@torch.no_grad()
def forward_layer(self, layer, x, adj_t, state):
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[layer]((x, x[:adj_t.size(0)]), adj_t)
if (layer < (self.num_layers - 1)):
x = x.elu()
return x
|
class GCN(ScalableGNN):
def __init__(self, num_nodes: int, in_channels, hidden_channels: int, out_channels: int, num_layers: int, dropout: float=0.0, drop_input: bool=True, batch_norm: bool=False, residual: bool=False, linear: bool=False, pool_size: Optional[int]=None, buffer_size: Optional[int]=None, device=None):
super().__init__(num_nodes, hidden_channels, num_layers, pool_size, buffer_size, device)
self.in_channels = in_channels
self.out_channels = out_channels
self.dropout = dropout
self.drop_input = drop_input
self.batch_norm = batch_norm
self.residual = residual
self.linear = linear
self.lins = ModuleList()
if linear:
self.lins.append(Linear(in_channels, hidden_channels))
self.lins.append(Linear(hidden_channels, out_channels))
self.convs = ModuleList()
for i in range(num_layers):
in_dim = out_dim = hidden_channels
if ((i == 0) and (not linear)):
in_dim = in_channels
if ((i == (num_layers - 1)) and (not linear)):
out_dim = out_channels
conv = GCNConv(in_dim, out_dim, normalize=False)
self.convs.append(conv)
self.bns = ModuleList()
for i in range(num_layers):
bn = BatchNorm1d(hidden_channels)
self.bns.append(bn)
@property
def reg_modules(self):
if self.linear:
return ModuleList((list(self.convs) + list(self.bns)))
else:
return ModuleList((list(self.convs[:(- 1)]) + list(self.bns)))
@property
def nonreg_modules(self):
return (self.lins if self.linear else self.convs[(- 1):])
def reset_parameters(self):
super().reset_parameters()
for lin in self.lins:
lin.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
for bn in self.bns:
bn.reset_parameters()
def forward(self, x: Tensor, adj_t: SparseTensor, *args) -> Tensor:
if self.drop_input:
x = F.dropout(x, p=self.dropout, training=self.training)
if self.linear:
x = self.lins[0](x).relu_()
x = F.dropout(x, p=self.dropout, training=self.training)
for (conv, bn, hist) in zip(self.convs[:(- 1)], self.bns, self.histories):
h = conv(x, adj_t)
if self.batch_norm:
h = bn(h)
if (self.residual and (h.size((- 1)) == x.size((- 1)))):
h += x[:h.size(0)]
x = h.relu_()
x = self.push_and_pull(hist, x, *args)
x = F.dropout(x, p=self.dropout, training=self.training)
h = self.convs[(- 1)](x, adj_t)
if (not self.linear):
return h
if self.batch_norm:
h = self.bns[(- 1)](h)
if (self.residual and (h.size((- 1)) == x.size((- 1)))):
h += x[:h.size(0)]
h = h.relu_()
h = F.dropout(h, p=self.dropout, training=self.training)
return self.lins[1](h)
@torch.no_grad()
def forward_layer(self, layer, x, adj_t, state):
if (layer == 0):
if self.drop_input:
x = F.dropout(x, p=self.dropout, training=self.training)
if self.linear:
x = self.lins[0](x).relu_()
x = F.dropout(x, p=self.dropout, training=self.training)
else:
x = F.dropout(x, p=self.dropout, training=self.training)
h = self.convs[layer](x, adj_t)
if ((layer < (self.num_layers - 1)) or self.linear):
if self.batch_norm:
h = self.bns[layer](h)
if (self.residual and (h.size((- 1)) == x.size((- 1)))):
h += x[:h.size(0)]
h = h.relu_()
if self.linear:
h = F.dropout(h, p=self.dropout, training=self.training)
h = self.lins[1](h)
return h
|
class GCN2(ScalableGNN):
def __init__(self, num_nodes: int, in_channels, hidden_channels: int, out_channels: int, num_layers: int, alpha: float, theta: float, shared_weights: bool=True, dropout: float=0.0, drop_input: bool=True, batch_norm: bool=False, residual: bool=False, pool_size: Optional[int]=None, buffer_size: Optional[int]=None, device=None):
super().__init__(num_nodes, hidden_channels, num_layers, pool_size, buffer_size, device)
self.in_channels = in_channels
self.out_channels = out_channels
self.dropout = dropout
self.drop_input = drop_input
self.batch_norm = batch_norm
self.residual = residual
self.lins = ModuleList()
self.lins.append(Linear(in_channels, hidden_channels))
self.lins.append(Linear(hidden_channels, out_channels))
self.convs = ModuleList()
for i in range(num_layers):
conv = GCN2Conv(hidden_channels, alpha=alpha, theta=theta, layer=(i + 1), shared_weights=shared_weights, normalize=False)
self.convs.append(conv)
self.bns = ModuleList()
for i in range(num_layers):
bn = BatchNorm1d(hidden_channels)
self.bns.append(bn)
@property
def reg_modules(self):
return ModuleList((list(self.convs) + list(self.bns)))
@property
def nonreg_modules(self):
return self.lins
def reset_parameters(self):
super().reset_parameters()
for lin in self.lins:
lin.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
for bn in self.bns:
bn.reset_parameters()
def forward(self, x: Tensor, adj_t: SparseTensor, *args) -> Tensor:
if self.drop_input:
x = F.dropout(x, p=self.dropout, training=self.training)
x = x_0 = self.lins[0](x).relu_()
x = F.dropout(x, p=self.dropout, training=self.training)
for (conv, bn, hist) in zip(self.convs[:(- 1)], self.bns[:(- 1)], self.histories):
h = conv(x, x_0, adj_t)
if self.batch_norm:
h = bn(h)
if self.residual:
h += x[:h.size(0)]
x = h.relu_()
x = self.push_and_pull(hist, x, *args)
x = F.dropout(x, p=self.dropout, training=self.training)
h = self.convs[(- 1)](x, x_0, adj_t)
if self.batch_norm:
h = self.bns[(- 1)](h)
if self.residual:
h += x[:h.size(0)]
x = h.relu_()
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[1](x)
return x
@torch.no_grad()
def forward_layer(self, layer, x, adj_t, state):
if (layer == 0):
if self.drop_input:
x = F.dropout(x, p=self.dropout, training=self.training)
x = x_0 = self.lins[0](x).relu_()
state['x_0'] = x_0[:adj_t.size(0)]
x = F.dropout(x, p=self.dropout, training=self.training)
h = self.convs[layer](x, state['x_0'], adj_t)
if self.batch_norm:
h = self.bns[layer](h)
if (self.residual and (h.size((- 1)) == x.size((- 1)))):
h += x[:h.size(0)]
x = h.relu_()
if (layer == (self.num_layers - 1)):
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[1](x)
return x
|
class PNAConv(MessagePassing):
def __init__(self, in_channels: int, out_channels: int, aggregators: List[str], scalers: List[str], deg: Tensor, **kwargs):
super().__init__(aggr=None, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.aggregators = aggregators
self.scalers = scalers
deg = deg.to(torch.float)
self.avg_deg = {'lin': deg.mean().item(), 'log': (deg + 1).log().mean().item()}
self.pre_lins = torch.nn.ModuleList([Linear(in_channels, out_channels) for _ in range((len(aggregators) * len(scalers)))])
self.post_lins = torch.nn.ModuleList([Linear(out_channels, out_channels) for _ in range((len(aggregators) * len(scalers)))])
self.lin = Linear(in_channels, out_channels)
self.reset_parameters()
def reset_parameters(self):
for lin in self.pre_lins:
lin.reset_parameters()
for lin in self.post_lins:
lin.reset_parameters()
self.lin.reset_parameters()
def forward(self, x: Tensor, adj_t):
out = self.propagate(adj_t, x=x)
out += self.lin(x)[:out.size(0)]
return out
def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor:
deg = adj_t.storage.rowcount().to(x.dtype).view((- 1), 1)
out = 0
for ((aggr, scaler), pre_lin, post_lin) in zip(product(self.aggregators, self.scalers), self.pre_lins, self.post_lins):
h = pre_lin(x).relu_()
h = adj_t.matmul(h, reduce=aggr)
h = post_lin(h)
if (scaler == 'amplification'):
h *= ((deg + 1).log() / self.avg_deg['log'])
elif (scaler == 'attenuation'):
h *= (self.avg_deg['log'] / ((deg + 1).log() + EPS))
out += h
return out
|
class PNA(ScalableGNN):
def __init__(self, num_nodes: int, in_channels: int, hidden_channels: int, out_channels: int, num_layers: int, aggregators: List[int], scalers: List[int], deg: Tensor, dropout: float=0.0, drop_input: bool=True, batch_norm: bool=False, residual: bool=False, pool_size: Optional[int]=None, buffer_size: Optional[int]=None, device=None):
super().__init__(num_nodes, hidden_channels, num_layers, pool_size, buffer_size, device)
self.in_channels = in_channels
self.out_channels = out_channels
self.dropout = dropout
self.drop_input = drop_input
self.batch_norm = batch_norm
self.residual = residual
self.convs = ModuleList()
for i in range(num_layers):
in_dim = (in_channels if (i == 0) else hidden_channels)
out_dim = (out_channels if (i == (num_layers - 1)) else hidden_channels)
conv = PNAConv(in_dim, out_dim, aggregators=aggregators, scalers=scalers, deg=deg)
self.convs.append(conv)
self.bns = ModuleList()
for i in range((num_layers - 1)):
bn = BatchNorm1d(hidden_channels)
self.bns.append(bn)
@property
def reg_modules(self):
return ModuleList((list(self.convs[:(- 1)]) + list(self.bns)))
@property
def nonreg_modules(self):
return self.convs[(- 1):]
def reset_parameters(self):
super().reset_parameters()
for conv in self.convs:
conv.reset_parameters()
for bn in self.bns:
bn.reset_parameters()
def forward(self, x: Tensor, adj_t: SparseTensor, *args) -> Tensor:
if self.drop_input:
x = F.dropout(x, p=self.dropout, training=self.training)
for (conv, bn, hist) in zip(self.convs[:(- 1)], self.bns, self.histories):
h = conv(x, adj_t)
if self.batch_norm:
h = bn(h)
if (self.residual and (h.size((- 1)) == x.size((- 1)))):
h += x[:h.size(0)]
x = h.relu_()
x = self.push_and_pull(hist, x, *args)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[(- 1)](x, adj_t)
return x
@torch.no_grad()
def forward_layer(self, layer, x, adj_t, state):
if ((layer == 0) and self.drop_input):
x = F.dropout(x, p=self.dropout, training=self.training)
h = self.convs[layer](x, adj_t)
if (layer < (self.num_layers - 1)):
if self.batch_norm:
h = self.bns[layer](h)
if (self.residual and (h.size((- 1)) == x.size((- 1)))):
h += x[:h.size(0)]
h = h.relu_()
h = F.dropout(h, p=self.dropout, training=self.training)
return h
|
class PNA_JK(ScalableGNN):
def __init__(self, num_nodes: int, in_channels: int, hidden_channels: int, out_channels: int, num_layers: int, aggregators: List[int], scalers: List[int], deg: Tensor, dropout: float=0.0, drop_input: bool=True, batch_norm: bool=False, residual: bool=False, pool_size: Optional[int]=None, buffer_size: Optional[int]=None, device=None):
super().__init__(num_nodes, hidden_channels, num_layers, pool_size, buffer_size, device)
self.in_channels = in_channels
self.out_channels = out_channels
(self.num_layers == num_layers)
self.dropout = dropout
self.drop_input = drop_input
self.batch_norm = batch_norm
self.residual = residual
self.lins = ModuleList()
self.lins.append(Sequential(Linear(in_channels, hidden_channels), (BatchNorm1d(hidden_channels) if batch_norm else Identity()), ReLU(inplace=True)))
self.lins.append(Linear(((num_layers + 1) * hidden_channels), out_channels))
self.convs = ModuleList()
for _ in range(num_layers):
conv = PNAConv(hidden_channels, hidden_channels, aggregators=aggregators, scalers=scalers, deg=deg)
self.convs.append(conv)
self.bns = ModuleList()
for _ in range(num_layers):
bn = BatchNorm1d(hidden_channels)
self.bns.append(bn)
@property
def reg_modules(self):
return ModuleList((list(self.convs) + list(self.bns)))
@property
def nonreg_modules(self):
return self.lins
def reset_parameters(self):
super().reset_parameters()
for lin in self.lins:
lin.reset_parameters()
for conv in self.convs:
conv.reset_parameters()
for bn in self.bns:
bn.reset_parameters()
def forward(self, x: Tensor, adj_t: SparseTensor, *args) -> Tensor:
if self.drop_input:
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[0](x)
xs = [x[:adj_t.size(0)]]
for (conv, bn, hist) in zip(self.convs[:(- 1)], self.bns[:(- 1)], self.histories):
h = conv(x, adj_t)
if self.batch_norm:
h = bn(h)
if self.residual:
h += x[:h.size(0)]
x = h.relu_()
xs += [x]
x = self.push_and_pull(hist, x, *args)
x = F.dropout(x, p=self.dropout, training=self.training)
h = self.convs[(- 1)](x, adj_t)
if self.batch_norm:
h = self.bns[(- 1)](h)
if self.residual:
h += x[:h.size(0)]
x = h.relu_()
xs += [x]
x = torch.cat(xs, dim=(- 1))
x = F.dropout(x, p=self.dropout, training=self.training)
return self.lins[1](x)
@torch.no_grad()
def forward_layer(self, layer, x, adj_t, state):
if (layer == 0):
if self.drop_input:
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.lins[0](x)
state['xs'] = [x[:adj_t.size(0)]]
h = self.convs[layer](x, adj_t)
if self.batch_norm:
h = self.bns[layer](h)
if self.residual:
h += x[:h.size(0)]
h = h.relu_()
state['xs'] += [h]
h = F.dropout(h, p=self.dropout, training=self.training)
if (layer == (self.num_layers - 1)):
h = torch.cat(state['xs'], dim=(- 1))
h = F.dropout(h, p=self.dropout, training=self.training)
h = self.lins[1](h)
return h
|
class AsyncIOPool(torch.nn.Module):
def __init__(self, pool_size: int, buffer_size: int, embedding_dim: int):
super().__init__()
self.pool_size = pool_size
self.buffer_size = buffer_size
self.embedding_dim = embedding_dim
self._device = torch.device('cpu')
self._pull_queue = []
self._push_cache = ([None] * pool_size)
self._push_streams = ([None] * pool_size)
self._pull_streams = ([None] * pool_size)
self._cpu_buffers = ([None] * pool_size)
self._cuda_buffers = ([None] * pool_size)
self._pull_index = (- 1)
self._push_index = (- 1)
def _apply(self, fn: Callable) -> None:
self._device = fn(torch.zeros(1)).device
return self
def _pull_stream(self, idx: int) -> Stream:
if (self._pull_streams[idx] is None):
assert (str(self._device)[:4] == 'cuda')
self._pull_streams[idx] = torch.cuda.Stream(self._device)
return self._pull_streams[idx]
def _push_stream(self, idx: int) -> Stream:
if (self._push_streams[idx] is None):
assert (str(self._device)[:4] == 'cuda')
self._push_streams[idx] = torch.cuda.Stream(self._device)
return self._push_streams[idx]
def _cpu_buffer(self, idx: int) -> Tensor:
if (self._cpu_buffers[idx] is None):
self._cpu_buffers[idx] = torch.empty(self.buffer_size, self.embedding_dim, pin_memory=True)
return self._cpu_buffers[idx]
def _cuda_buffer(self, idx: int) -> Tensor:
if (self._cuda_buffers[idx] is None):
assert (str(self._device)[:4] == 'cuda')
self._cuda_buffers[idx] = torch.empty(self.buffer_size, self.embedding_dim, device=self._device)
return self._cuda_buffers[idx]
@torch.no_grad()
def async_pull(self, src: Tensor, offset: Optional[Tensor], count: Optional[Tensor], index: Tensor) -> None:
self._pull_index = ((self._pull_index + 1) % self.pool_size)
data = (self._pull_index, src, offset, count, index)
self._pull_queue.append(data)
if (len(self._pull_queue) <= self.pool_size):
self._async_pull(self._pull_index, src, offset, count, index)
@torch.no_grad()
def _async_pull(self, idx: int, src: Tensor, offset: Optional[Tensor], count: Optional[Tensor], index: Tensor) -> None:
with torch.cuda.stream(self._pull_stream(idx)):
read_async(src, offset, count, index, self._cuda_buffer(idx), self._cpu_buffer(idx))
@torch.no_grad()
def synchronize_pull(self) -> Tensor:
idx = self._pull_queue[0][0]
synchronize()
torch.cuda.synchronize(self._pull_stream(idx))
return self._cuda_buffer(idx)
@torch.no_grad()
def free_pull(self) -> None:
self._pull_queue.pop(0)
if (len(self._pull_queue) >= self.pool_size):
data = self._pull_queue[(self.pool_size - 1)]
(idx, src, offset, count, index) = data
self._async_pull(idx, src, offset, count, index)
elif (len(self._pull_queue) == 0):
self._pull_index = (- 1)
@torch.no_grad()
def async_push(self, src: Tensor, offset: Tensor, count: Tensor, dst: Tensor) -> None:
self._push_index = ((self._push_index + 1) % self.pool_size)
self.synchronize_push(self._push_index)
self._push_cache[self._push_index] = src
with torch.cuda.stream(self._push_stream(self._push_index)):
write_async(src, offset, count, dst)
@torch.no_grad()
def synchronize_push(self, idx: Optional[int]=None) -> None:
if (idx is None):
for idx in range(self.pool_size):
self.synchronize_push(idx)
self._push_index = (- 1)
else:
torch.cuda.synchronize(self._push_stream(idx))
self._push_cache[idx] = None
def forward(self, *args, **kwargs):
''
raise NotImplementedError
def __repr__(self):
return f'{self.__class__.__name__}(pool_size={self.pool_size}, buffer_size={self.buffer_size}, embedding_dim={self.embedding_dim}, device={self._device})'
|
def index2mask(idx: Tensor, size: int) -> Tensor:
mask = torch.zeros(size, dtype=torch.bool, device=idx.device)
mask[idx] = True
return mask
|
def compute_micro_f1(logits: Tensor, y: Tensor, mask: Optional[Tensor]=None) -> float:
if (mask is not None):
(logits, y) = (logits[mask], y[mask])
if (y.dim() == 1):
return (int(logits.argmax(dim=(- 1)).eq(y).sum()) / y.size(0))
else:
y_pred = (logits > 0)
y_true = (y > 0.5)
tp = int((y_true & y_pred).sum())
fp = int(((~ y_true) & y_pred).sum())
fn = int((y_true & (~ y_pred)).sum())
try:
precision = (tp / (tp + fp))
recall = (tp / (tp + fn))
return ((2 * (precision * recall)) / (precision + recall))
except ZeroDivisionError:
return 0.0
|
def gen_masks(y: Tensor, train_per_class: int=20, val_per_class: int=30, num_splits: int=20) -> Tuple[(Tensor, Tensor, Tensor)]:
num_classes = (int(y.max()) + 1)
train_mask = torch.zeros(y.size(0), num_splits, dtype=torch.bool)
val_mask = torch.zeros(y.size(0), num_splits, dtype=torch.bool)
for c in range(num_classes):
idx = (y == c).nonzero(as_tuple=False).view((- 1))
perm = torch.stack([torch.randperm(idx.size(0)) for _ in range(num_splits)], dim=1)
idx = idx[perm]
train_idx = idx[:train_per_class]
train_mask.scatter_(0, train_idx, True)
val_idx = idx[train_per_class:(train_per_class + val_per_class)]
val_mask.scatter_(0, val_idx, True)
test_mask = (~ (train_mask | val_mask))
return (train_mask, val_mask, test_mask)
|
def dropout(adj_t: SparseTensor, p: float, training: bool=True):
if ((not training) or (p == 0.0)):
return adj_t
if (adj_t.storage.value() is not None):
value = F.dropout(adj_t.storage.value(), p=p)
adj_t = adj_t.set_value(value, layout='coo')
else:
mask = (torch.rand(adj_t.nnz(), device=adj_t.storage.row().device) > p)
adj_t = adj_t.masked_select_nnz(mask, layout='coo')
return adj_t
|
def train(epoch):
model.train()
total_loss = 0
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
loss = (model(data).squeeze() - data.y).abs().mean()
loss.backward()
total_loss += (loss.item() * data.num_graphs)
optimizer.step()
return (total_loss / len(train_loader.dataset))
|
@torch.no_grad()
def test(loader):
model.eval()
total_error = 0
for data in loader:
data = data.to(device)
total_error += (model(data).squeeze() - data.y).abs().sum().item()
return (total_error / len(loader.dataset))
|
def train(epoch):
model.train()
total_loss = 0
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
loss = (model(data).squeeze() - data.y).abs().mean()
loss.backward()
total_loss += (loss.item() * data.num_graphs)
optimizer.step()
return (total_loss / len(train_loader.dataset))
|
@torch.no_grad()
def test(loader):
model.eval()
total_error = 0
for data in loader:
data = data.to(device)
total_error += (model(data).squeeze() - data.y).abs().sum().item()
return (total_error / len(loader.dataset))
|
def get_extensions():
extensions = []
extensions_dir = osp.join('csrc')
main_files = glob.glob(osp.join(extensions_dir, '*.cpp'))
main_files = [path for path in main_files if ('hip' not in path)]
for (main, suffix) in product(main_files, suffices):
define_macros = [('WITH_PYTHON', None)]
undef_macros = []
if (sys.platform == 'win32'):
define_macros += [('torchcluster_EXPORTS', None)]
extra_compile_args = {'cxx': ['-O2']}
if (not (os.name == 'nt')):
extra_compile_args['cxx'] += ['-Wno-sign-compare']
extra_link_args = ['-s']
info = parallel_info()
if (('backend: OpenMP' in info) and ('OpenMP not found' not in info) and (sys.platform != 'darwin')):
extra_compile_args['cxx'] += ['-DAT_PARALLEL_OPENMP']
if (sys.platform == 'win32'):
extra_compile_args['cxx'] += ['/openmp']
else:
extra_compile_args['cxx'] += ['-fopenmp']
else:
print('Compiling without OpenMP...')
if ((sys.platform == 'darwin') and (platform.machine() == 'arm64')):
extra_compile_args['cxx'] += ['-arch', 'arm64']
extra_link_args += ['-arch', 'arm64']
if (suffix == 'cuda'):
define_macros += [('WITH_CUDA', None)]
nvcc_flags = os.getenv('NVCC_FLAGS', '')
nvcc_flags = ([] if (nvcc_flags == '') else nvcc_flags.split(' '))
nvcc_flags += ['-O2']
extra_compile_args['nvcc'] = nvcc_flags
if torch.version.hip:
define_macros += [('USE_ROCM', None)]
undef_macros += ['__HIP_NO_HALF_CONVERSIONS__']
else:
nvcc_flags += ['--expt-relaxed-constexpr']
name = main.split(os.sep)[(- 1)][:(- 4)]
sources = [main]
path = osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp')
if osp.exists(path):
sources += [path]
path = osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu')
if ((suffix == 'cuda') and osp.exists(path)):
sources += [path]
Extension = (CppExtension if (suffix == 'cpu') else CUDAExtension)
extension = Extension(f'torch_cluster._{name}_{suffix}', sources, include_dirs=[extensions_dir], define_macros=define_macros, undef_macros=undef_macros, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args)
extensions += [extension]
return extensions
|
@torch.jit.script
def fps2(x: Tensor, ratio: Tensor) -> Tensor:
return fps(x, None, ratio, False)
|
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_fps(dtype, device):
x = tensor([[(- 1), (- 1)], [(- 1), (+ 1)], [(+ 1), (+ 1)], [(+ 1), (- 1)], [(- 2), (- 2)], [(- 2), (+ 2)], [(+ 2), (+ 2)], [(+ 2), (- 2)]], dtype, device)
batch = tensor([0, 0, 0, 0, 1, 1, 1, 1], torch.long, device)
ptr_list = [0, 4, 8]
ptr = torch.tensor(ptr_list, device=device)
out = fps(x, batch, random_start=False)
assert (out.tolist() == [0, 2, 4, 6])
out = fps(x, batch, ratio=0.5, random_start=False)
assert (out.tolist() == [0, 2, 4, 6])
ratio = torch.tensor(0.5, device=device)
out = fps(x, batch, ratio=ratio, random_start=False)
assert (out.tolist() == [0, 2, 4, 6])
out = fps(x, ptr=ptr_list, ratio=0.5, random_start=False)
assert (out.tolist() == [0, 2, 4, 6])
out = fps(x, ptr=ptr, ratio=0.5, random_start=False)
assert (out.tolist() == [0, 2, 4, 6])
ratio = torch.tensor([0.5, 0.5], device=device)
out = fps(x, batch, ratio=ratio, random_start=False)
assert (out.tolist() == [0, 2, 4, 6])
out = fps(x, random_start=False)
assert (out.sort()[0].tolist() == [0, 5, 6, 7])
out = fps(x, ratio=0.5, random_start=False)
assert (out.sort()[0].tolist() == [0, 5, 6, 7])
out = fps(x, ratio=torch.tensor(0.5, device=device), random_start=False)
assert (out.sort()[0].tolist() == [0, 5, 6, 7])
out = fps(x, ratio=torch.tensor([0.5], device=device), random_start=False)
assert (out.sort()[0].tolist() == [0, 5, 6, 7])
out = fps2(x, torch.tensor([0.5], device=device))
assert (out.sort()[0].tolist() == [0, 5, 6, 7])
|
@pytest.mark.parametrize('device', devices)
def test_random_fps(device):
N = 1024
for _ in range(5):
pos = torch.randn(((2 * N), 3), device=device)
batch_1 = torch.zeros(N, dtype=torch.long, device=device)
batch_2 = torch.ones(N, dtype=torch.long, device=device)
batch = torch.cat([batch_1, batch_2])
idx = fps(pos, batch, ratio=0.5)
assert ((idx.min() >= 0) and (idx.max() < (2 * N)))
|
def assert_correct(row, col, cluster):
(row, col, cluster) = (row.to('cpu'), col.to('cpu'), cluster.to('cpu'))
n = cluster.size(0)
assert (cluster.min() >= 0)
(_, index) = torch.unique(cluster, return_inverse=True)
count = torch.zeros_like(cluster)
count.scatter_add_(0, index, torch.ones_like(cluster))
assert ((count > 2).max() == 0)
assert ((cluster <= torch.arange(n, dtype=cluster.dtype)).sum() == n)
for i in range(n):
x = (cluster[col[(row == i)]] == cluster[i])
y = (cluster == cluster[i])
y[i] = 0
assert (x.sum() == y.sum())
|
@pytest.mark.parametrize('test,dtype,device', product(tests, dtypes, devices))
def test_graclus_cluster(test, dtype, device):
if ((dtype == torch.bfloat16) and (device == torch.device('cuda:0'))):
return
row = tensor(test['row'], torch.long, device)
col = tensor(test['col'], torch.long, device)
weight = tensor(test.get('weight'), dtype, device)
cluster = graclus_cluster(row, col, weight)
assert_correct(row, col, cluster)
jit = torch.jit.script(graclus_cluster)
cluster = jit(row, col, weight)
assert_correct(row, col, cluster)
|
@pytest.mark.parametrize('test,dtype,device', product(tests, dtypes, devices))
def test_grid_cluster(test, dtype, device):
if ((dtype == torch.bfloat16) and (device == torch.device('cuda:0'))):
return
pos = tensor(test['pos'], dtype, device)
size = tensor(test['size'], dtype, device)
start = tensor(test.get('start'), dtype, device)
end = tensor(test.get('end'), dtype, device)
cluster = grid_cluster(pos, size, start, end)
assert (cluster.tolist() == test['cluster'])
jit = torch.jit.script(grid_cluster)
assert torch.equal(jit(pos, size, start, end), cluster)
|
def to_set(edge_index):
return set([(i, j) for (i, j) in edge_index.t().tolist()])
|
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_knn(dtype, device):
x = tensor([[(- 1), (- 1)], [(- 1), (+ 1)], [(+ 1), (+ 1)], [(+ 1), (- 1)], [(- 1), (- 1)], [(- 1), (+ 1)], [(+ 1), (+ 1)], [(+ 1), (- 1)]], dtype, device)
y = tensor([[1, 0], [(- 1), 0]], dtype, device)
batch_x = tensor([0, 0, 0, 0, 1, 1, 1, 1], torch.long, device)
batch_y = tensor([0, 1], torch.long, device)
edge_index = knn(x, y, 2)
assert (to_set(edge_index) == set([(0, 2), (0, 3), (1, 0), (1, 1)]))
jit = torch.jit.script(knn)
edge_index = jit(x, y, 2)
assert (to_set(edge_index) == set([(0, 2), (0, 3), (1, 0), (1, 1)]))
edge_index = knn(x, y, 2, batch_x, batch_y)
assert (to_set(edge_index) == set([(0, 2), (0, 3), (1, 4), (1, 5)]))
if x.is_cuda:
edge_index = knn(x, y, 2, batch_x, batch_y, cosine=True)
assert (to_set(edge_index) == set([(0, 2), (0, 3), (1, 4), (1, 5)]))
batch_x = tensor([0, 0, 0, 0, 2, 2, 2, 2], torch.long, device)
batch_y = tensor([0, 2], torch.long, device)
edge_index = knn(x, y, 2, batch_x, batch_y)
assert (to_set(edge_index) == set([(0, 2), (0, 3), (1, 4), (1, 5)]))
|
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_knn_graph(dtype, device):
x = tensor([[(- 1), (- 1)], [(- 1), (+ 1)], [(+ 1), (+ 1)], [(+ 1), (- 1)]], dtype, device)
edge_index = knn_graph(x, k=2, flow='target_to_source')
assert (to_set(edge_index) == set([(0, 1), (0, 3), (1, 0), (1, 2), (2, 1), (2, 3), (3, 0), (3, 2)]))
edge_index = knn_graph(x, k=2, flow='source_to_target')
assert (to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2), (3, 2), (0, 3), (2, 3)]))
jit = torch.jit.script(knn_graph)
edge_index = jit(x, k=2, flow='source_to_target')
assert (to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2), (3, 2), (0, 3), (2, 3)]))
|
@pytest.mark.parametrize('dtype,device', product([torch.float], devices))
def test_knn_graph_large(dtype, device):
x = torch.randn(1000, 3, dtype=dtype, device=device)
edge_index = knn_graph(x, k=5, flow='target_to_source', loop=True)
tree = scipy.spatial.cKDTree(x.cpu().numpy())
(_, col) = tree.query(x.cpu(), k=5)
truth = set([(i, j) for (i, ns) in enumerate(col) for j in ns])
assert (to_set(edge_index.cpu()) == truth)
|
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_nearest(dtype, device):
x = tensor([[(- 1), (- 1)], [(- 1), (+ 1)], [(+ 1), (+ 1)], [(+ 1), (- 1)], [(- 2), (- 2)], [(- 2), (+ 2)], [(+ 2), (+ 2)], [(+ 2), (- 2)]], dtype, device)
y = tensor([[(- 1), 0], [(+ 1), 0], [(- 2), 0], [(+ 2), 0]], dtype, device)
batch_x = tensor([0, 0, 0, 0, 1, 1, 1, 1], torch.long, device)
batch_y = tensor([0, 0, 1, 1], torch.long, device)
out = nearest(x, y, batch_x, batch_y)
assert (out.tolist() == [0, 0, 1, 1, 2, 2, 3, 3])
out = nearest(x, y)
assert (out.tolist() == [0, 0, 1, 1, 2, 2, 3, 3])
batch_x = tensor([0, 0, 0, 0, 1, 1, 1, 1], torch.long, device)
batch_y = tensor([0, 0, 0, 0], torch.long, device)
with pytest.raises(ValueError):
nearest(x, y, batch_x, batch_y)
with pytest.raises(ValueError):
nearest(x, y, batch_x, batch_y=None)
batch_x = tensor([0, 0, 1, 1, 2, 2, 3, 3], torch.long, device)
batch_y = tensor([0, 1, 3, 3], torch.long, device)
with pytest.raises(ValueError):
nearest(x, y, batch_x, batch_y)
batch_x = tensor([0, 0, 1, 0, 0, 0, 0], torch.long, device)
batch_y = tensor([0, 0, 1, 1], torch.long, device)
with pytest.raises(ValueError):
nearest(x, y, batch_x, batch_y)
batch_x = tensor([0, 0, 0, 0, 1, 1, 1, 1], torch.long, device)
batch_y = tensor([0, 0, 1, 0], torch.long, device)
with pytest.raises(ValueError):
nearest(x, y, batch_x, batch_y)
|
def to_set(edge_index):
return set([(i, j) for (i, j) in edge_index.t().tolist()])
|
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_radius(dtype, device):
x = tensor([[(- 1), (- 1)], [(- 1), (+ 1)], [(+ 1), (+ 1)], [(+ 1), (- 1)], [(- 1), (- 1)], [(- 1), (+ 1)], [(+ 1), (+ 1)], [(+ 1), (- 1)]], dtype, device)
y = tensor([[0, 0], [0, 1]], dtype, device)
batch_x = tensor([0, 0, 0, 0, 1, 1, 1, 1], torch.long, device)
batch_y = tensor([0, 1], torch.long, device)
edge_index = radius(x, y, 2, max_num_neighbors=4)
assert (to_set(edge_index) == set([(0, 0), (0, 1), (0, 2), (0, 3), (1, 1), (1, 2), (1, 5), (1, 6)]))
jit = torch.jit.script(radius)
edge_index = jit(x, y, 2, max_num_neighbors=4)
assert (to_set(edge_index) == set([(0, 0), (0, 1), (0, 2), (0, 3), (1, 1), (1, 2), (1, 5), (1, 6)]))
edge_index = radius(x, y, 2, batch_x, batch_y, max_num_neighbors=4)
assert (to_set(edge_index) == set([(0, 0), (0, 1), (0, 2), (0, 3), (1, 5), (1, 6)]))
batch_x = tensor([0, 0, 0, 0, 2, 2, 2, 2], torch.long, device)
batch_y = tensor([0, 2], torch.long, device)
edge_index = radius(x, y, 2, batch_x, batch_y, max_num_neighbors=4)
assert (to_set(edge_index) == set([(0, 0), (0, 1), (0, 2), (0, 3), (1, 5), (1, 6)]))
|
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_radius_graph(dtype, device):
x = tensor([[(- 1), (- 1)], [(- 1), (+ 1)], [(+ 1), (+ 1)], [(+ 1), (- 1)]], dtype, device)
edge_index = radius_graph(x, r=2.5, flow='target_to_source')
assert (to_set(edge_index) == set([(0, 1), (0, 3), (1, 0), (1, 2), (2, 1), (2, 3), (3, 0), (3, 2)]))
edge_index = radius_graph(x, r=2.5, flow='source_to_target')
assert (to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2), (3, 2), (0, 3), (2, 3)]))
jit = torch.jit.script(radius_graph)
edge_index = jit(x, r=2.5, flow='source_to_target')
assert (to_set(edge_index) == set([(1, 0), (3, 0), (0, 1), (2, 1), (1, 2), (3, 2), (0, 3), (2, 3)]))
|
@pytest.mark.parametrize('dtype,device', product([torch.float], devices))
def test_radius_graph_large(dtype, device):
x = torch.randn(1000, 3, dtype=dtype, device=device)
edge_index = radius_graph(x, r=0.5, flow='target_to_source', loop=True, max_num_neighbors=2000)
tree = scipy.spatial.cKDTree(x.cpu().numpy())
col = tree.query_ball_point(x.cpu(), r=0.5)
truth = set([(i, j) for (i, ns) in enumerate(col) for j in ns])
assert (to_set(edge_index.cpu()) == truth)
|
def test_neighbor_sampler():
torch.manual_seed(1234)
start = torch.tensor([0, 1])
cumdeg = torch.tensor([0, 3, 7])
e_id = neighbor_sampler(start, cumdeg, size=1.0)
assert (e_id.tolist() == [0, 2, 1, 5, 6, 3, 4])
e_id = neighbor_sampler(start, cumdeg, size=3)
assert (e_id.tolist() == [1, 0, 2, 4, 5, 6])
|
@torch.jit._overload
def fps(src, batch, ratio, random_start, batch_size, ptr):
pass
|
def graclus_cluster(row: torch.Tensor, col: torch.Tensor, weight: Optional[torch.Tensor]=None, num_nodes: Optional[int]=None) -> torch.Tensor:
'A greedy clustering algorithm of picking an unmarked vertex and matching\n it with one its unmarked neighbors (that maximizes its edge weight).\n\n Args:\n row (LongTensor): Source nodes.\n col (LongTensor): Target nodes.\n weight (Tensor, optional): Edge weights. (default: :obj:`None`)\n num_nodes (int, optional): The number of nodes. (default: :obj:`None`)\n\n :rtype: :class:`LongTensor`\n\n .. code-block:: python\n\n import torch\n from torch_cluster import graclus_cluster\n\n row = torch.tensor([0, 1, 1, 2])\n col = torch.tensor([1, 0, 2, 1])\n weight = torch.Tensor([1, 1, 1, 1])\n cluster = graclus_cluster(row, col, weight)\n '
if (num_nodes is None):
num_nodes = (max(int(row.max()), int(col.max())) + 1)
mask = (row != col)
(row, col) = (row[mask], col[mask])
if (weight is not None):
weight = weight[mask]
if (weight is None):
perm = torch.randperm(row.size(0), dtype=torch.long, device=row.device)
(row, col) = (row[perm], col[perm])
perm = torch.argsort(row)
(row, col) = (row[perm], col[perm])
if (weight is not None):
weight = weight[perm]
deg = row.new_zeros(num_nodes)
deg.scatter_add_(0, row, torch.ones_like(row))
rowptr = row.new_zeros((num_nodes + 1))
torch.cumsum(deg, 0, out=rowptr[1:])
return torch.ops.torch_cluster.graclus(rowptr, col, weight)
|
def grid_cluster(pos: torch.Tensor, size: torch.Tensor, start: Optional[torch.Tensor]=None, end: Optional[torch.Tensor]=None) -> torch.Tensor:
'A clustering algorithm, which overlays a regular grid of user-defined\n size over a point cloud and clusters all points within a voxel.\n\n Args:\n pos (Tensor): D-dimensional position of points.\n size (Tensor): Size of a voxel in each dimension.\n start (Tensor, optional): Start position of the grid (in each\n dimension). (default: :obj:`None`)\n end (Tensor, optional): End position of the grid (in each\n dimension). (default: :obj:`None`)\n\n :rtype: :class:`LongTensor`\n\n .. code-block:: python\n\n import torch\n from torch_cluster import grid_cluster\n\n pos = torch.Tensor([[0, 0], [11, 9], [2, 8], [2, 2], [8, 3]])\n size = torch.Tensor([5, 5])\n cluster = grid_cluster(pos, size)\n '
return torch.ops.torch_cluster.grid(pos, size, start, end)
|
def knn(x: torch.Tensor, y: torch.Tensor, k: int, batch_x: Optional[torch.Tensor]=None, batch_y: Optional[torch.Tensor]=None, cosine: bool=False, num_workers: int=1, batch_size: Optional[int]=None) -> torch.Tensor:
'Finds for each element in :obj:`y` the :obj:`k` nearest points in\n :obj:`x`.\n\n Args:\n x (Tensor): Node feature matrix\n :math:`\\mathbf{X} \\in \\mathbb{R}^{N \\times F}`.\n y (Tensor): Node feature matrix\n :math:`\\mathbf{X} \\in \\mathbb{R}^{M \\times F}`.\n k (int): The number of neighbors.\n batch_x (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^N`, which assigns each\n node to a specific example. :obj:`batch_x` needs to be sorted.\n (default: :obj:`None`)\n batch_y (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^M`, which assigns each\n node to a specific example. :obj:`batch_y` needs to be sorted.\n (default: :obj:`None`)\n cosine (boolean, optional): If :obj:`True`, will use the Cosine\n distance instead of the Euclidean distance to find nearest\n neighbors. (default: :obj:`False`)\n num_workers (int): Number of workers to use for computation. Has no\n effect in case :obj:`batch_x` or :obj:`batch_y` is not\n :obj:`None`, or the input lies on the GPU. (default: :obj:`1`)\n batch_size (int, optional): The number of examples :math:`B`.\n Automatically calculated if not given. (default: :obj:`None`)\n\n :rtype: :class:`LongTensor`\n\n .. code-block:: python\n\n import torch\n from torch_cluster import knn\n\n x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])\n batch_x = torch.tensor([0, 0, 0, 0])\n y = torch.Tensor([[-1, 0], [1, 0]])\n batch_y = torch.tensor([0, 0])\n assign_index = knn(x, y, 2, batch_x, batch_y)\n '
if ((x.numel() == 0) or (y.numel() == 0)):
return torch.empty(2, 0, dtype=torch.long, device=x.device)
x = (x.view((- 1), 1) if (x.dim() == 1) else x)
y = (y.view((- 1), 1) if (y.dim() == 1) else y)
(x, y) = (x.contiguous(), y.contiguous())
if (batch_size is None):
batch_size = 1
if (batch_x is not None):
assert (x.size(0) == batch_x.numel())
batch_size = (int(batch_x.max()) + 1)
if (batch_y is not None):
assert (y.size(0) == batch_y.numel())
batch_size = max(batch_size, (int(batch_y.max()) + 1))
assert (batch_size > 0)
ptr_x: Optional[torch.Tensor] = None
ptr_y: Optional[torch.Tensor] = None
if (batch_size > 1):
assert (batch_x is not None)
assert (batch_y is not None)
arange = torch.arange((batch_size + 1), device=x.device)
ptr_x = torch.bucketize(arange, batch_x)
ptr_y = torch.bucketize(arange, batch_y)
return torch.ops.torch_cluster.knn(x, y, ptr_x, ptr_y, k, cosine, num_workers)
|
def knn_graph(x: torch.Tensor, k: int, batch: Optional[torch.Tensor]=None, loop: bool=False, flow: str='source_to_target', cosine: bool=False, num_workers: int=1, batch_size: Optional[int]=None) -> torch.Tensor:
'Computes graph edges to the nearest :obj:`k` points.\n\n Args:\n x (Tensor): Node feature matrix\n :math:`\\mathbf{X} \\in \\mathbb{R}^{N \\times F}`.\n k (int): The number of neighbors.\n batch (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^N`, which assigns each\n node to a specific example. :obj:`batch` needs to be sorted.\n (default: :obj:`None`)\n loop (bool, optional): If :obj:`True`, the graph will contain\n self-loops. (default: :obj:`False`)\n flow (string, optional): The flow direction when used in combination\n with message passing (:obj:`"source_to_target"` or\n :obj:`"target_to_source"`). (default: :obj:`"source_to_target"`)\n cosine (boolean, optional): If :obj:`True`, will use the Cosine\n distance instead of Euclidean distance to find nearest neighbors.\n (default: :obj:`False`)\n num_workers (int): Number of workers to use for computation. Has no\n effect in case :obj:`batch` is not :obj:`None`, or the input lies\n on the GPU. (default: :obj:`1`)\n batch_size (int, optional): The number of examples :math:`B`.\n Automatically calculated if not given. (default: :obj:`None`)\n\n :rtype: :class:`LongTensor`\n\n .. code-block:: python\n\n import torch\n from torch_cluster import knn_graph\n\n x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])\n batch = torch.tensor([0, 0, 0, 0])\n edge_index = knn_graph(x, k=2, batch=batch, loop=False)\n '
assert (flow in ['source_to_target', 'target_to_source'])
edge_index = knn(x, x, (k if loop else (k + 1)), batch, batch, cosine, num_workers, batch_size)
if (flow == 'source_to_target'):
(row, col) = (edge_index[1], edge_index[0])
else:
(row, col) = (edge_index[0], edge_index[1])
if (not loop):
mask = (row != col)
(row, col) = (row[mask], col[mask])
return torch.stack([row, col], dim=0)
|
def nearest(x: torch.Tensor, y: torch.Tensor, batch_x: Optional[torch.Tensor]=None, batch_y: Optional[torch.Tensor]=None) -> torch.Tensor:
'Clusters points in :obj:`x` together which are nearest to a given query\n point in :obj:`y`.\n\n Args:\n x (Tensor): Node feature matrix\n :math:`\\mathbf{X} \\in \\mathbb{R}^{N \\times F}`.\n y (Tensor): Node feature matrix\n :math:`\\mathbf{Y} \\in \\mathbb{R}^{M \\times F}`.\n batch_x (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^N`, which assigns each\n node to a specific example. :obj:`batch_x` needs to be sorted.\n (default: :obj:`None`)\n batch_y (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^M`, which assigns each\n node to a specific example. :obj:`batch_y` needs to be sorted.\n (default: :obj:`None`)\n\n :rtype: :class:`LongTensor`\n\n .. code-block:: python\n\n import torch\n from torch_cluster import nearest\n\n x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])\n batch_x = torch.tensor([0, 0, 0, 0])\n y = torch.Tensor([[-1, 0], [1, 0]])\n batch_y = torch.tensor([0, 0])\n cluster = nearest(x, y, batch_x, batch_y)\n '
x = (x.view((- 1), 1) if (x.dim() == 1) else x)
y = (y.view((- 1), 1) if (y.dim() == 1) else y)
assert (x.size(1) == y.size(1))
if ((batch_x is not None) and ((batch_x[1:] - batch_x[:(- 1)]) < 0).any()):
raise ValueError("'batch_x' is not sorted")
if ((batch_y is not None) and ((batch_y[1:] - batch_y[:(- 1)]) < 0).any()):
raise ValueError("'batch_y' is not sorted")
if x.is_cuda:
if (batch_x is not None):
assert (x.size(0) == batch_x.numel())
batch_size = (int(batch_x.max()) + 1)
deg = x.new_zeros(batch_size, dtype=torch.long)
deg.scatter_add_(0, batch_x, torch.ones_like(batch_x))
ptr_x = deg.new_zeros((batch_size + 1))
torch.cumsum(deg, 0, out=ptr_x[1:])
else:
ptr_x = torch.tensor([0, x.size(0)], device=x.device)
if (batch_y is not None):
assert (y.size(0) == batch_y.numel())
batch_size = (int(batch_y.max()) + 1)
deg = y.new_zeros(batch_size, dtype=torch.long)
deg.scatter_add_(0, batch_y, torch.ones_like(batch_y))
ptr_y = deg.new_zeros((batch_size + 1))
torch.cumsum(deg, 0, out=ptr_y[1:])
else:
ptr_y = torch.tensor([0, y.size(0)], device=y.device)
nonempty_ptr_x = ((ptr_x[1:] - ptr_x[:(- 1)]) > 0)
nonempty_ptr_y = ((ptr_y[1:] - ptr_y[:(- 1)]) > 0)
if (not torch.equal(nonempty_ptr_x, nonempty_ptr_y)):
raise ValueError("Some batch indices occur in 'batch_x' that do not occur in 'batch_y'")
return torch.ops.torch_cluster.nearest(x, y, ptr_x, ptr_y)
else:
if ((batch_x is None) and (batch_y is not None)):
batch_x = x.new_zeros(x.size(0), dtype=torch.long)
if ((batch_y is None) and (batch_x is not None)):
batch_y = y.new_zeros(y.size(0), dtype=torch.long)
if ((batch_x is not None) and (batch_y is not None)):
unique_batch_x = batch_x.unique_consecutive()
unique_batch_y = batch_y.unique_consecutive()
if (not torch.equal(unique_batch_x, unique_batch_y)):
raise ValueError("Some batch indices occur in 'batch_x' that do not occur in 'batch_y'")
assert ((x.dim() == 2) and (batch_x.dim() == 1))
assert ((y.dim() == 2) and (batch_y.dim() == 1))
assert (x.size(0) == batch_x.size(0))
assert (y.size(0) == batch_y.size(0))
min_xy = min(x.min().item(), y.min().item())
(x, y) = ((x - min_xy), (y - min_xy))
max_xy = max(x.max().item(), y.max().item())
x.div_(max_xy)
y.div_(max_xy)
D = x.size((- 1))
x = torch.cat([x, ((2 * D) * batch_x.view((- 1), 1).to(x.dtype))], (- 1))
y = torch.cat([y, ((2 * D) * batch_y.view((- 1), 1).to(y.dtype))], (- 1))
return torch.from_numpy(scipy.cluster.vq.vq(x.detach().cpu(), y.detach().cpu())[0]).to(torch.long)
|
def radius(x: torch.Tensor, y: torch.Tensor, r: float, batch_x: Optional[torch.Tensor]=None, batch_y: Optional[torch.Tensor]=None, max_num_neighbors: int=32, num_workers: int=1, batch_size: Optional[int]=None) -> torch.Tensor:
'Finds for each element in :obj:`y` all points in :obj:`x` within\n distance :obj:`r`.\n\n Args:\n x (Tensor): Node feature matrix\n :math:`\\mathbf{X} \\in \\mathbb{R}^{N \\times F}`.\n y (Tensor): Node feature matrix\n :math:`\\mathbf{Y} \\in \\mathbb{R}^{M \\times F}`.\n r (float): The radius.\n batch_x (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^N`, which assigns each\n node to a specific example. :obj:`batch_x` needs to be sorted.\n (default: :obj:`None`)\n batch_y (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^M`, which assigns each\n node to a specific example. :obj:`batch_y` needs to be sorted.\n (default: :obj:`None`)\n max_num_neighbors (int, optional): The maximum number of neighbors to\n return for each element in :obj:`y`.\n If the number of actual neighbors is greater than\n :obj:`max_num_neighbors`, returned neighbors are picked randomly.\n (default: :obj:`32`)\n num_workers (int): Number of workers to use for computation. Has no\n effect in case :obj:`batch_x` or :obj:`batch_y` is not\n :obj:`None`, or the input lies on the GPU. (default: :obj:`1`)\n batch_size (int, optional): The number of examples :math:`B`.\n Automatically calculated if not given. (default: :obj:`None`)\n\n .. code-block:: python\n\n import torch\n from torch_cluster import radius\n\n x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])\n batch_x = torch.tensor([0, 0, 0, 0])\n y = torch.Tensor([[-1, 0], [1, 0]])\n batch_y = torch.tensor([0, 0])\n assign_index = radius(x, y, 1.5, batch_x, batch_y)\n '
if ((x.numel() == 0) or (y.numel() == 0)):
return torch.empty(2, 0, dtype=torch.long, device=x.device)
x = (x.view((- 1), 1) if (x.dim() == 1) else x)
y = (y.view((- 1), 1) if (y.dim() == 1) else y)
(x, y) = (x.contiguous(), y.contiguous())
if (batch_size is None):
batch_size = 1
if (batch_x is not None):
assert (x.size(0) == batch_x.numel())
batch_size = (int(batch_x.max()) + 1)
if (batch_y is not None):
assert (y.size(0) == batch_y.numel())
batch_size = max(batch_size, (int(batch_y.max()) + 1))
assert (batch_size > 0)
ptr_x: Optional[torch.Tensor] = None
ptr_y: Optional[torch.Tensor] = None
if (batch_size > 1):
assert (batch_x is not None)
assert (batch_y is not None)
arange = torch.arange((batch_size + 1), device=x.device)
ptr_x = torch.bucketize(arange, batch_x)
ptr_y = torch.bucketize(arange, batch_y)
return torch.ops.torch_cluster.radius(x, y, ptr_x, ptr_y, r, max_num_neighbors, num_workers)
|
def radius_graph(x: torch.Tensor, r: float, batch: Optional[torch.Tensor]=None, loop: bool=False, max_num_neighbors: int=32, flow: str='source_to_target', num_workers: int=1, batch_size: Optional[int]=None) -> torch.Tensor:
'Computes graph edges to all points within a given distance.\n\n Args:\n x (Tensor): Node feature matrix\n :math:`\\mathbf{X} \\in \\mathbb{R}^{N \\times F}`.\n r (float): The radius.\n batch (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^N`, which assigns each\n node to a specific example. :obj:`batch` needs to be sorted.\n (default: :obj:`None`)\n loop (bool, optional): If :obj:`True`, the graph will contain\n self-loops. (default: :obj:`False`)\n max_num_neighbors (int, optional): The maximum number of neighbors to\n return for each element.\n If the number of actual neighbors is greater than\n :obj:`max_num_neighbors`, returned neighbors are picked randomly.\n (default: :obj:`32`)\n flow (string, optional): The flow direction when used in combination\n with message passing (:obj:`"source_to_target"` or\n :obj:`"target_to_source"`). (default: :obj:`"source_to_target"`)\n num_workers (int): Number of workers to use for computation. Has no\n effect in case :obj:`batch` is not :obj:`None`, or the input lies\n on the GPU. (default: :obj:`1`)\n batch_size (int, optional): The number of examples :math:`B`.\n Automatically calculated if not given. (default: :obj:`None`)\n\n :rtype: :class:`LongTensor`\n\n .. code-block:: python\n\n import torch\n from torch_cluster import radius_graph\n\n x = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])\n batch = torch.tensor([0, 0, 0, 0])\n edge_index = radius_graph(x, r=1.5, batch=batch, loop=False)\n '
assert (flow in ['source_to_target', 'target_to_source'])
edge_index = radius(x, x, r, batch, batch, (max_num_neighbors if loop else (max_num_neighbors + 1)), num_workers, batch_size)
if (flow == 'source_to_target'):
(row, col) = (edge_index[1], edge_index[0])
else:
(row, col) = (edge_index[0], edge_index[1])
if (not loop):
mask = (row != col)
(row, col) = (row[mask], col[mask])
return torch.stack([row, col], dim=0)
|
def random_walk(row: Tensor, col: Tensor, start: Tensor, walk_length: int, p: float=1, q: float=1, coalesced: bool=True, num_nodes: Optional[int]=None, return_edge_indices: bool=False) -> Union[(Tensor, Tuple[(Tensor, Tensor)])]:
'Samples random walks of length :obj:`walk_length` from all node indices\n in :obj:`start` in the graph given by :obj:`(row, col)` as described in the\n `"node2vec: Scalable Feature Learning for Networks"\n <https://arxiv.org/abs/1607.00653>`_ paper.\n Edge indices :obj:`(row, col)` need to be coalesced/sorted according to\n :obj:`row` (use the :obj:`coalesced` attribute to force).\n\n Args:\n row (LongTensor): Source nodes.\n col (LongTensor): Target nodes.\n start (LongTensor): Nodes from where random walks start.\n walk_length (int): The walk length.\n p (float, optional): Likelihood of immediately revisiting a node in the\n walk. (default: :obj:`1`)\n q (float, optional): Control parameter to interpolate between\n breadth-first strategy and depth-first strategy (default: :obj:`1`)\n coalesced (bool, optional): If set to :obj:`True`, will coalesce/sort\n the graph given by :obj:`(row, col)` according to :obj:`row`.\n (default: :obj:`True`)\n num_nodes (int, optional): The number of nodes. (default: :obj:`None`)\n return_edge_indices (bool, optional): Whether to additionally return\n the indices of edges traversed during the random walk.\n (default: :obj:`False`)\n\n :rtype: :class:`LongTensor`\n '
if (num_nodes is None):
num_nodes = (max(int(row.max()), int(col.max()), int(start.max())) + 1)
if coalesced:
perm = torch.argsort(((row * num_nodes) + col))
(row, col) = (row[perm], col[perm])
deg = row.new_zeros(num_nodes)
deg.scatter_add_(0, row, torch.ones_like(row))
rowptr = row.new_zeros((num_nodes + 1))
torch.cumsum(deg, 0, out=rowptr[1:])
(node_seq, edge_seq) = torch.ops.torch_cluster.random_walk(rowptr, col, start, walk_length, p, q)
if return_edge_indices:
return (node_seq, edge_seq)
return node_seq
|
def neighbor_sampler(start: torch.Tensor, rowptr: torch.Tensor, size: float):
assert (not start.is_cuda)
factor: float = (- 1.0)
count: int = (- 1)
if (size <= 1):
factor = size
assert (factor > 0)
else:
count = int(size)
return torch.ops.torch_cluster.neighbor_sampler(start, rowptr, count, factor)
|
def tensor(x: Any, dtype: torch.dtype, device: torch.device):
return (None if (x is None) else torch.tensor(x, dtype=dtype, device=device))
|
def get_extensions():
extensions = []
extensions_dir = osp.join('csrc')
main_files = glob.glob(osp.join(extensions_dir, '*.cpp'))
main_files = [path for path in main_files if ('hip' not in path)]
for (main, suffix) in product(main_files, suffices):
define_macros = []
undef_macros = []
extra_compile_args = {'cxx': ['-O2']}
if (not (os.name == 'nt')):
extra_compile_args['cxx'] += ['-Wno-sign-compare']
extra_link_args = ['-s']
info = parallel_info()
if (('backend: OpenMP' in info) and ('OpenMP not found' not in info) and (sys.platform != 'darwin')):
extra_compile_args['cxx'] += ['-DAT_PARALLEL_OPENMP']
if (sys.platform == 'win32'):
extra_compile_args['cxx'] += ['/openmp']
else:
extra_compile_args['cxx'] += ['-fopenmp']
else:
print('Compiling without OpenMP...')
if ((sys.platform == 'darwin') and (platform.machine() == 'arm64')):
extra_compile_args['cxx'] += ['-arch', 'arm64']
extra_link_args += ['-arch', 'arm64']
if (suffix == 'cuda'):
define_macros += [('WITH_CUDA', None)]
nvcc_flags = os.getenv('NVCC_FLAGS', '')
nvcc_flags = ([] if (nvcc_flags == '') else nvcc_flags.split(' '))
nvcc_flags += ['-O2']
extra_compile_args['nvcc'] = nvcc_flags
if torch.version.hip:
define_macros += [('USE_ROCM', None)]
undef_macros += ['__HIP_NO_HALF_CONVERSIONS__']
else:
nvcc_flags += ['--expt-relaxed-constexpr']
name = main.split(os.sep)[(- 1)][:(- 4)]
sources = [main]
path = osp.join(extensions_dir, 'cpu', f'{name}_cpu.cpp')
if osp.exists(path):
sources += [path]
path = osp.join(extensions_dir, 'cuda', f'{name}_cuda.cu')
if ((suffix == 'cuda') and osp.exists(path)):
sources += [path]
Extension = (CppExtension if (suffix == 'cpu') else CUDAExtension)
extension = Extension(f'torch_spline_conv._{name}_{suffix}', sources, include_dirs=[extensions_dir], define_macros=define_macros, undef_macros=undef_macros, extra_compile_args=extra_compile_args, extra_link_args=extra_link_args)
extensions += [extension]
return extensions
|
@pytest.mark.parametrize('test,dtype,device', product(tests, dtypes, devices))
def test_spline_basis_forward(test, dtype, device):
if ((dtype == torch.bfloat16) and (device == torch.device('cuda:0'))):
return
pseudo = tensor(test['pseudo'], dtype, device)
kernel_size = tensor(test['kernel_size'], torch.long, device)
is_open_spline = tensor(test['is_open_spline'], torch.uint8, device)
basis = tensor(test['basis'], dtype, device)
weight_index = tensor(test['weight_index'], torch.long, device)
degree = 1
out = spline_basis(pseudo, kernel_size, is_open_spline, degree)
assert torch.allclose(out[0], basis)
assert torch.allclose(out[1], weight_index)
jit = torch.jit.script(spline_basis)
jit_out = jit(pseudo, kernel_size, is_open_spline, degree)
assert torch.allclose(jit_out[0], basis)
assert torch.allclose(jit_out[1], weight_index)
|
@pytest.mark.parametrize('test,dtype,device', product(tests, dtypes, devices))
def test_spline_conv_forward(test, dtype, device):
if ((dtype == torch.bfloat16) and (device == torch.device('cuda:0'))):
return
x = tensor(test['x'], dtype, device)
edge_index = tensor(test['edge_index'], torch.long, device)
pseudo = tensor(test['pseudo'], dtype, device)
weight = tensor(test['weight'], dtype, device)
kernel_size = tensor(test['kernel_size'], torch.long, device)
is_open_spline = tensor(test['is_open_spline'], torch.uint8, device)
root_weight = tensor(test['root_weight'], dtype, device)
bias = tensor(test['bias'], dtype, device)
expected = tensor(test['expected'], dtype, device)
out = spline_conv(x, edge_index, pseudo, weight, kernel_size, is_open_spline, 1, True, root_weight, bias)
error = (0.01 if (dtype == torch.bfloat16) else 1e-07)
assert torch.allclose(out, expected, rtol=error, atol=error)
jit = torch.jit.script(spline_conv)
jit_out = jit(x, edge_index, pseudo, weight, kernel_size, is_open_spline, 1, True, root_weight, bias)
assert torch.allclose(jit_out, expected, rtol=error, atol=error)
|
@pytest.mark.parametrize('degree,device', product(degrees, devices))
def test_spline_conv_backward(degree, device):
x = torch.rand((3, 2), dtype=torch.double, device=device)
x.requires_grad_()
edge_index = tensor([[0, 1, 1, 2], [1, 0, 2, 1]], torch.long, device)
pseudo = torch.rand((4, 3), dtype=torch.double, device=device)
pseudo.requires_grad_()
weight = torch.rand((125, 2, 4), dtype=torch.double, device=device)
weight.requires_grad_()
kernel_size = tensor([5, 5, 5], torch.long, device)
is_open_spline = tensor([1, 0, 1], torch.uint8, device)
root_weight = torch.rand((2, 4), dtype=torch.double, device=device)
root_weight.requires_grad_()
bias = torch.rand(4, dtype=torch.double, device=device)
bias.requires_grad_()
data = (x, edge_index, pseudo, weight, kernel_size, is_open_spline, degree, True, root_weight, bias)
assert (gradcheck(spline_conv, data, eps=1e-06, atol=0.0001) is True)
|
@pytest.mark.parametrize('test,dtype,device', product(tests, dtypes, devices))
def test_spline_weighting_forward(test, dtype, device):
if ((dtype == torch.bfloat16) and (device == torch.device('cuda:0'))):
return
x = tensor(test['x'], dtype, device)
weight = tensor(test['weight'], dtype, device)
basis = tensor(test['basis'], dtype, device)
weight_index = tensor(test['weight_index'], torch.long, device)
expected = tensor(test['expected'], dtype, device)
out = spline_weighting(x, weight, basis, weight_index)
assert torch.allclose(out, expected)
jit = torch.jit.script(spline_weighting)
assert torch.allclose(jit(x, weight, basis, weight_index), expected)
|
@pytest.mark.parametrize('device', devices)
def test_spline_weighting_backward(device):
pseudo = torch.rand((4, 2), dtype=torch.double, device=device)
kernel_size = tensor([5, 5], torch.long, device)
is_open_spline = tensor([1, 1], torch.uint8, device)
degree = 1
(basis, weight_index) = spline_basis(pseudo, kernel_size, is_open_spline, degree)
basis.requires_grad_()
x = torch.rand((4, 2), dtype=torch.double, device=device)
x.requires_grad_()
weight = torch.rand((25, 2, 4), dtype=torch.double, device=device)
weight.requires_grad_()
data = (x, weight, basis, weight_index)
assert (gradcheck(spline_weighting, data, eps=1e-06, atol=0.0001) is True)
|
def spline_basis(pseudo: torch.Tensor, kernel_size: torch.Tensor, is_open_spline: torch.Tensor, degree: int) -> Tuple[(torch.Tensor, torch.Tensor)]:
return torch.ops.torch_spline_conv.spline_basis(pseudo, kernel_size, is_open_spline, degree)
|
def spline_conv(x: torch.Tensor, edge_index: torch.Tensor, pseudo: torch.Tensor, weight: torch.Tensor, kernel_size: torch.Tensor, is_open_spline: torch.Tensor, degree: int=1, norm: bool=True, root_weight: Optional[torch.Tensor]=None, bias: Optional[torch.Tensor]=None) -> torch.Tensor:
'Applies the spline-based convolution operator :math:`(f \\star g)(i) =\n \\frac{1}{|\\mathcal{N}(i)|} \\sum_{l=1}^{M_{in}} \\sum_{j \\in \\mathcal{N}(i)}\n f_l(j) \\cdot g_l(u(i, j))` over several node features of an input graph.\n The kernel function :math:`g_l` is defined over the weighted B-spline\n tensor product basis for a single input feature map :math:`l`.\n\n Args:\n x (:class:`Tensor`): Input node features of shape\n (number_of_nodes x in_channels).\n edge_index (:class:`LongTensor`): Graph edges, given by source and\n target indices, of shape (2 x number_of_edges) in the fixed\n interval [0, 1].\n pseudo (:class:`Tensor`): Edge attributes, ie. pseudo coordinates,\n of shape (number_of_edges x number_of_edge_attributes).\n weight (:class:`Tensor`): Trainable weight parameters of shape\n (kernel_size x in_channels x out_channels).\n kernel_size (:class:`LongTensor`): Number of trainable weight\n parameters in each edge dimension.\n is_open_spline (:class:`ByteTensor`): Whether to use open or closed\n B-spline bases for each dimension.\n degree (int, optional): B-spline basis degree. (default: :obj:`1`)\n norm (bool, optional): Whether to normalize output by node degree.\n (default: :obj:`True`)\n root_weight (:class:`Tensor`, optional): Additional shared trainable\n parameters for each feature of the root node of shape\n (in_channels x out_channels). (default: :obj:`None`)\n bias (:class:`Tensor`, optional): Optional bias of shape\n (out_channels). (default: :obj:`None`)\n\n :rtype: :class:`Tensor`\n '
x = (x.unsqueeze((- 1)) if (x.dim() == 1) else x)
pseudo = (pseudo.unsqueeze((- 1)) if (pseudo.dim() == 1) else pseudo)
(row, col) = (edge_index[0], edge_index[1])
(N, E, M_out) = (x.size(0), row.size(0), weight.size(2))
(basis, weight_index) = spline_basis(pseudo, kernel_size, is_open_spline, degree)
out = spline_weighting(x[col], weight, basis, weight_index)
row_expanded = row.unsqueeze((- 1)).expand_as(out)
out = x.new_zeros((N, M_out)).scatter_add_(0, row_expanded, out)
if norm:
ones = torch.ones(E, dtype=x.dtype, device=x.device)
deg = out.new_zeros(N).scatter_add_(0, row, ones)
out = (out / deg.unsqueeze((- 1)).clamp_(min=1))
if (root_weight is not None):
out += (x @ root_weight)
if (bias is not None):
out += bias
return out
|
def tensor(x: Any, dtype: torch.dtype, device: torch.device):
return (None if (x is None) else torch.tensor(x, dtype=dtype, device=device))
|
def spline_weighting(x: torch.Tensor, weight: torch.Tensor, basis: torch.Tensor, weight_index: torch.Tensor) -> torch.Tensor:
return torch.ops.torch_spline_conv.spline_weighting(x, weight, basis, weight_index)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.