repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_dist_feature.py | test/python/test_dist_feature.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import torch
import graphlearn_torch as glt
def run_dist_feature_test(world_size: int, rank: int, feature: glt.data.Feature,
partition_book: glt.data.PartitionBook, master_port: int):
glt.distributed.init_worker_group(world_size, rank, 'dist-feature-test')
glt.distributed.init_rpc(master_addr='localhost', master_port=master_port)
partition2workers = glt.distributed.rpc_sync_data_partitions(world_size, rank)
rpc_router = glt.distributed.RpcDataPartitionRouter(partition2workers)
current_device = torch.device('cuda', rank % 2)
dist_feature = glt.distributed.DistFeature(
world_size, rank, feature, partition_book,
local_only=False, rpc_router=rpc_router,
device=current_device
)
input = torch.tensor(
[10, 20, 260, 360, 200, 210, 420, 430],
dtype=torch.int64,
device=current_device
)
expected_features = torch.cat([
torch.ones(2, 1024, dtype=torch.float32, device=current_device),
torch.zeros(2, 1024, dtype=torch.float32, device=current_device),
torch.ones(2, 1024, dtype=torch.float32, device=current_device)*2,
torch.zeros(2, 1024, dtype=torch.float32, device=current_device)
])
res = dist_feature[input]
tc = unittest.TestCase()
tc.assertTrue(glt.utils.tensor_equal_with_device(res, expected_features))
glt.distributed.shutdown_rpc()
class DistFeatureTestCase(unittest.TestCase):
def test_dist_feature_lookup(self):
cpu_tensor0 = torch.cat([
torch.ones(128, 1024, dtype=torch.float32),
torch.ones(128, 1024, dtype=torch.float32)*2
])
cpu_tensor1 = torch.cat([
torch.zeros(128, 1024, dtype=torch.float32),
torch.zeros(128, 1024, dtype=torch.float32)
])
id2index = torch.arange(128 * 4)
id2index[128*2:] -= 128*2
partition_book = torch.cat([
torch.zeros(128*2, dtype=torch.long),
torch.ones(128*2, dtype=torch.long)
])
partition_book.share_memory_()
device_group_list = [
glt.data.DeviceGroup(0, [0]),
glt.data.DeviceGroup(1, [1])
]
split_ratio = 0.8
feature0 = glt.data.Feature(cpu_tensor0, id2index,
split_ratio, device_group_list)
feature1 = glt.data.Feature(cpu_tensor1, id2index,
split_ratio, device_group_list)
mp_context = torch.multiprocessing.get_context('spawn')
port = glt.utils.get_free_port()
w0 = mp_context.Process(
target=run_dist_feature_test,
args=(2, 0, feature0, partition_book, port)
)
w1 = mp_context.Process(
target=run_dist_feature_test,
args=(2, 1, feature1, partition_book, port)
)
w0.start()
w1.start()
w0.join()
w1.join()
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_vineyard.py | test/python/test_vineyard.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import subprocess
import re
import json
import os
from graphlearn_torch.data import *
from graphlearn_torch.distributed import DistDataset
@unittest.skipIf(os.getenv("WITH_VINEYARD", "OFF") == "OFF", "only test with vineyard")
class VineyardDatasetTest(unittest.TestCase):
sock = "/tmp/vineyard.glt.unittest.sock"
@classmethod
def setUpClass(cls):
cls.vineyardd_process = subprocess.Popen(
["vineyardd", "--socket", cls.sock]
)
data_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'vineyard_data',
'modern_graph'
)
cfg = {
"vertices":
[
{
"data_path": os.path.join(data_dir, "person.csv"),
"label": "person",
"options": "header_row=true&delimiter=|"
}, {
"data_path": os.path.join(data_dir, "software.csv"),
"label": "software",
"options": "header_row=true&delimiter=|"
}
],
"edges":
[
{
"data_path": os.path.join(data_dir, "knows.csv"),
"label": "knows",
"src_label": "person",
"dst_label": "person",
"options": "header_row=true&delimiter=|"
}, {
"data_path": os.path.join(data_dir, "created.csv"),
"label": "created",
"src_label": "person",
"dst_label": "software",
"options": "header_row=true&delimiter=|"
}
],
"directed": 1,
"retain_oid": 1,
"generate_eid": 1,
"string_oid": 0,
"local_vertex_map": 0,
"print_normalized_schema": 1
}
json_path = os.path.join(data_dir, "config.json")
with open(json_path, 'w') as json_file:
json.dump(cfg, json_file)
try:
command = f"vineyard-graph-loader --socket {cls.sock} --config {json_path}"
output = subprocess.run(
command, capture_output=True, text=True, shell=True
)
match = re.search(r"\[fragment group id\]: (\d+)", str(output))
if match:
cls.fid = match.group(1)
else:
raise Exception("Fragment Group ID not found.")
finally:
os.remove(json_path)
@classmethod
def tearDownClass(cls):
cls.vineyardd_process.kill()
def setUp(self):
self.sock = self.__class__.sock
self.fid = self.__class__.fid
self.homo_edges = [
("person", "knows", "person"),
]
self.homo_edge_weights = {
("person", "knows", "person"): "weight",
}
self.homo_node_features = {
"person": ["feat0", "feat1"],
}
self.homo_edge_features = {
("person", "knows", "person"): ["feat0", "feat1"],
},
self.node_labels = {
"person": "label",
}
self.hetero_edges = [
("person", "knows", "person"),
("person", "created", "software"),
]
self.hetero_edge_weights = {
("person", "knows", "person"): "weight",
("person", "created", "software"): "weight",
}
self.hetero_node_features = {
"person": ["feat0", "feat1"],
"software": ["feat0"],
}
self.hetero_edge_features = {
("person", "knows", "person"): ["feat0", "feat1"],
("person", "created", "software"): ["feat0"],
}
def test_homo_dataset(self):
ds = Dataset()
ds.load_vineyard(
self.fid,
self.sock,
edges=self.homo_edges,
edge_weights=self.homo_edge_weights,
node_features=self.homo_node_features,
edge_features=self.homo_edge_features,
node_labels=self.node_labels,
)
self.assertEqual(ds.graph.row_count, 4)
self.assertEqual(ds.graph.col_count, 2)
self.assertEqual(ds.graph.edge_count, 2)
self.assertEqual(ds.graph.topo.edge_weights.shape, (2,))
self.assertEqual(ds.node_features.shape, (4, 2))
self.assertEqual(ds.edge_features.shape, (2, 2))
self.assertEqual(ds.node_labels.shape[0], 4)
def test_in_homo_dataset(self):
ds = Dataset(edge_dir="in")
ds.load_vineyard(
self.fid,
self.sock,
edges=self.homo_edges,
)
self.assertEqual(ds.graph.row_count, 4)
self.assertEqual(ds.graph.col_count, 1)
self.assertEqual(ds.graph.edge_count, 2)
def test_hetero_dataset(self):
ds = Dataset()
ds.load_vineyard(
self.fid,
self.sock,
edges=self.hetero_edges,
edge_weights=self.hetero_edge_weights,
node_features=self.hetero_node_features,
edge_features=self.hetero_edge_features,
node_labels=self.node_labels,
)
graph1 = ds.graph[("person", "knows", "person")]
graph2 = ds.graph[("person", "created", "software")]
self.assertEqual(graph1.row_count, 4)
self.assertEqual(graph1.col_count, 2)
self.assertEqual(graph1.edge_count, 2)
self.assertEqual(graph1.topo.edge_weights.shape, (2,))
self.assertEqual(graph2.row_count, 4)
self.assertEqual(graph2.col_count, 2)
self.assertEqual(graph2.edge_count, 4)
self.assertEqual(graph2.topo.edge_weights.shape, (4,))
self.assertEqual(ds.node_features["person"].shape, (4, 2))
self.assertEqual(ds.node_features["software"].shape, (2, 1))
self.assertEqual(
ds.edge_features[("person", "knows", "person")].shape, (2, 2)
)
self.assertEqual(
ds.edge_features[("person", "created", "software")].shape, (4, 1)
)
self.assertEqual(ds.node_labels["person"].shape[0], 4)
def test_in_hetero_dataset(self):
ds = Dataset(edge_dir="in")
ds.load_vineyard(
self.fid,
self.sock,
edges=self.hetero_edges,
)
graph1 = ds.graph[("person", "knows", "person")]
graph2 = ds.graph[("person", "created", "software")]
self.assertEqual(graph1.row_count, 4)
self.assertEqual(graph1.col_count, 1)
self.assertEqual(graph1.edge_count, 2)
self.assertEqual(graph2.row_count, 2)
self.assertEqual(graph2.col_count, 3)
self.assertEqual(graph2.edge_count, 4)
def test_homo_dist_dataset(self):
ds = DistDataset()
ds.load_vineyard(
self.fid,
self.sock,
edges=self.homo_edges,
node_features=self.homo_node_features,
edge_features=self.homo_edge_features,
node_labels=self.node_labels,
)
print(ds.node_labels)
def test_hetero_dist_dataset(self):
ds = DistDataset()
ds.load_vineyard(
self.fid,
self.sock,
edges=self.hetero_edges,
node_features=self.hetero_node_features,
edge_features=self.hetero_edge_features,
node_labels=self.node_labels,
)
print(ds.node_labels["person"])
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_dist_subgraph_loader.py | test/python/test_dist_subgraph_loader.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import time
import unittest
import torch
import graphlearn_torch as glt
# sampling options
sampling_nprocs = 2
device_num = 2
def _prepare_dataset(rank: int):
"""
input graph:
1 1 0 0 0 0 0 1
0 1 0 1 0 0 1 0
0 0 1 0 1 0 1 0
0 0 0 0 0 1 0 0
0 1 0 1 0 0 0 0
1 0 0 0 0 1 0 0
1 0 0 0 1 0 0 0
0 0 0 0 0 0 0 1
supppose first 4 rows of above matrix are partitioned to partiton#0 and the
rest are belong to partition#1.
"""
# partition
node_pb = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1], dtype=torch.long)
edge_pb = torch.tensor([0] * 10 + [1] * 7, dtype=torch.long)
# graph
nodes, rows, cols, eids = [], [], [], []
if rank == 0:
nodes = [0, 1, 2, 3]
rows = [0, 0, 0, 1, 1, 1, 2, 2, 2, 3]
cols = [0, 1, 7, 1, 3, 6, 2, 4, 6, 5]
eids = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
else:
nodes = [4, 5, 6, 7]
rows = [4, 4, 5, 5, 6, 6, 7]
cols = [1, 3, 0, 5, 0, 4, 7]
eids = [10, 11, 12, 13, 14, 15, 16]
edge_index = torch.tensor([rows, cols], dtype=torch.int64)
edge_ids = torch.tensor(eids, dtype=torch.int64)
csr_topo = glt.data.Topology(edge_index=edge_index, edge_ids=edge_ids)
graph = glt.data.Graph(csr_topo, 'ZERO_COPY', device=0)
# feature
device_group_list = [glt.data.DeviceGroup(0, [0]),
glt.data.DeviceGroup(1, [1])]
split_ratio = 0.2
nfeat = rank + torch.zeros(len(nodes), 512, dtype=torch.float32)
nfeat_id2idx = glt.utils.id2idx(nodes)
node_feature = glt.data.Feature(nfeat, nfeat_id2idx, split_ratio,
device_group_list, device=0)
efeat = rank + torch.ones(len(eids), 10, dtype=torch.float32)
efeat_id2idx = glt.utils.id2idx(eids)
edge_feature = glt.data.Feature(efeat, efeat_id2idx, split_ratio,
device_group_list, device=0)
# dist dataset
return glt.distributed.DistDataset(
2, rank,
graph, node_feature, edge_feature, None,
node_pb, edge_pb,
)
def _check_sample_result(data, rank):
tc = unittest.TestCase()
if rank == 0:
true_node = torch.tensor([0, 1, 3, 5, 6, 7], device='cuda:0')
true_edge_index = torch.tensor([[0, 1, 5, 1, 2, 4, 3, 0, 3, 0, 5],
[0, 0, 0, 1, 1, 1, 2, 3, 3, 4, 5]],
device='cuda:0')
true_edge_id = torch.tensor([0, 1, 2, 3, 4, 5, 9, 12, 13, 14, 16], device='cuda:0')
true_mapping = torch.tensor([0, 2, 5], device='cuda:0')
else:
true_node = torch.tensor([0, 1, 3, 5, 6, 7], device='cuda:1')
true_edge_index = torch.tensor([[0, 3, 0, 5, 0, 1, 5, 1, 2, 4, 3],
[3, 3, 4, 5, 0, 0, 0, 1, 1, 1, 2]],
device='cuda:1')
true_edge_id = torch.tensor([12, 13, 14, 16, 0, 1, 2, 3, 4, 5, 9], device='cuda:1')
true_mapping = torch.tensor([0, 2, 5], device='cuda:1')
tc.assertTrue(glt.utils.tensor_equal_with_device(data.node, true_node))
tc.assertTrue(glt.utils.tensor_equal_with_device(data.edge_index, true_edge_index))
tc.assertTrue(glt.utils.tensor_equal_with_device(data.edge, true_edge_id))
tc.assertTrue(glt.utils.tensor_equal_with_device(data.mapping, true_mapping))
device = data.node.device
for i, v in enumerate(data.node):
expect_feat = torch.zeros(512, device=device, dtype=torch.float32)
if v > 3: # rank1
expect_feat += 1
tc.assertTrue(glt.utils.tensor_equal_with_device(data.x[i], expect_feat))
tc.assertTrue(data.edge_attr is not None)
for i, e in enumerate(data.edge):
expect_feat = torch.ones(10, device=device, dtype=torch.float32)
if e > 9: # rank1
expect_feat += 1
tc.assertTrue(glt.utils.tensor_equal_with_device(data.edge_attr[i], expect_feat))
def run_test_as_worker(world_size: int, rank: int,
master_port: int, sampling_master_port: int,
dataset: glt.distributed.DistDataset,
input_nodes: glt.InputNodes, check_fn,
collocated = False):
# Initialize worker group context
glt.distributed.init_worker_group(
world_size, rank, 'dist-neighbor-loader-test'
)
dist_context = glt.distributed.get_context()
# Init RPC
glt.distributed.init_rpc(
master_addr='localhost',
master_port=master_port,
num_rpc_threads=1,
rpc_timeout=30
)
# dist loader
if collocated:
worker_options = glt.distributed.CollocatedDistSamplingWorkerOptions(
master_addr='localhost',
master_port=sampling_master_port,
rpc_timeout=10
)
else:
worker_options = glt.distributed.MpDistSamplingWorkerOptions(
num_workers=sampling_nprocs,
worker_devices=[torch.device('cuda', i % device_num)
for i in range(sampling_nprocs)],
worker_concurrency=2,
master_addr='localhost',
master_port=sampling_master_port,
rpc_timeout=10,
num_rpc_threads=2,
pin_memory=True
)
dist_loader = glt.distributed.DistSubGraphLoader(
data=dataset,
num_neighbors=[-1, -1],
input_nodes=input_nodes,
batch_size=3,
shuffle=False,
drop_last=False,
with_edge=True,
collect_features=True,
to_device=torch.device('cuda', rank % device_num),
worker_options=worker_options
)
# run testing
for epoch in range(0, 2):
for res in dist_loader:
check_fn(res, rank)
time.sleep(0.1)
glt.distributed.barrier()
print(f'[Trainer {dist_context.rank}] epoch {epoch} finished.')
dist_loader.shutdown()
def run_test_as_server(num_servers: int, num_clients: int, server_rank: int,
master_port: int, dataset: glt.distributed.DistDataset):
print(f'[Server {server_rank}] Initializing server ...')
glt.distributed.init_server(
num_servers=num_servers,
num_clients=num_clients,
server_rank=server_rank,
dataset=dataset,
master_addr='localhost',
master_port=master_port,
request_timeout=30,
num_rpc_threads=2,
server_group_name='dist-remote-sampling-test-server'
)
print(f'[Server {server_rank}] Waiting for exit ...')
glt.distributed.wait_and_shutdown_server()
print(f'[Server {server_rank}] Exited ...')
def run_test_as_client(num_servers: int, num_clients: int, client_rank: int,
master_port: int, sampling_master_port: int,
input_nodes: glt.InputNodes, check_fn):
print(f'[Client {client_rank}] Initializing client ...')
glt.distributed.init_client(
num_servers=num_servers,
num_clients=num_clients,
client_rank=client_rank,
master_addr='localhost',
master_port=master_port,
num_rpc_threads=1,
client_group_name='dist-remote-sampling-test-client'
)
print(f'[Client {client_rank}] Creating DistSubGraphLoader ...')
target_server_rank = client_rank % num_servers
options = glt.distributed.RemoteDistSamplingWorkerOptions(
server_rank=target_server_rank,
num_workers=sampling_nprocs,
worker_devices=[torch.device('cuda', i % device_num)
for i in range(sampling_nprocs)],
worker_concurrency=2,
master_addr='localhost',
master_port=sampling_master_port,
rpc_timeout=10,
num_rpc_threads=2,
prefetch_size=4
)
dist_loader = glt.distributed.DistSubGraphLoader(
data=None,
num_neighbors=[-1, -1],
input_nodes=input_nodes,
batch_size=3,
shuffle=False,
drop_last=False,
with_edge=True,
collect_features=True,
to_device=torch.device('cuda', client_rank % device_num),
worker_options=options
)
print(f'[Client {client_rank}] Running tests ...')
for epoch in range(0, 2):
for res in dist_loader:
check_fn(res, client_rank)
time.sleep(0.1)
glt.distributed.barrier()
print(f'[Client {client_rank}] epoch {epoch} finished.')
print(f'[Client {client_rank}] Shutdowning ...')
glt.distributed.shutdown_client()
print(f'[Client {client_rank}] Exited ...')
class DistSubGraphLoaderTestCase(unittest.TestCase):
def setUp(self):
self.dataset0 = _prepare_dataset(rank=0)
self.dataset1 = _prepare_dataset(rank=1)
self.input_nodes0 = torch.tensor([0, 3, 7], dtype=torch.long)
self.input_nodes1 = self.input_nodes0
self.master_port = glt.utils.get_free_port()
self.sampling_master_port = glt.utils.get_free_port()
def test_collocated(self):
print("\n--- DistSubGraphLoader Test (collocated) ---")
mp_context = torch.multiprocessing.get_context('spawn')
w0 = mp_context.Process(
target=run_test_as_worker,
args=(2, 0, self.master_port, self.sampling_master_port,
self.dataset0, self.input_nodes0, _check_sample_result, True)
)
w1 = mp_context.Process(
target=run_test_as_worker,
args=(2, 1, self.master_port, self.sampling_master_port,
self.dataset1, self.input_nodes1, _check_sample_result, True)
)
w0.start()
w1.start()
w0.join()
w1.join()
def test_mp(self):
print("\n--- DistSubGraphLoader Test (multiprocessing) ---")
mp_context = torch.multiprocessing.get_context('spawn')
w0 = mp_context.Process(
target=run_test_as_worker,
args=(2, 0, self.master_port, self.sampling_master_port,
self.dataset0, self.input_nodes0, _check_sample_result, False)
)
w1 = mp_context.Process(
target=run_test_as_worker,
args=(2, 1, self.master_port, self.sampling_master_port,
self.dataset1, self.input_nodes1, _check_sample_result, False)
)
w0.start()
w1.start()
w0.join()
w1.join()
def test_remote_mode(self):
print("\n--- DistSubGraphLoader Test (server-client mode, remote) ---")
mp_context = torch.multiprocessing.get_context('spawn')
server0 = mp_context.Process(
target=run_test_as_server,
args=(2, 2, 0, self.master_port, self.dataset0)
)
server1 = mp_context.Process(
target=run_test_as_server,
args=(2, 2, 1, self.master_port, self.dataset1)
)
client0 = mp_context.Process(
target=run_test_as_client,
args=(2, 2, 0, self.master_port, self.sampling_master_port,
self.input_nodes0, _check_sample_result)
)
client1 = mp_context.Process(
target=run_test_as_client,
args=(2, 2, 1, self.master_port, self.sampling_master_port,
self.input_nodes1, _check_sample_result)
)
server0.start()
server1.start()
client0.start()
client1.start()
server0.join()
server1.join()
client0.join()
client1.join()
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_pyg_remote_backend.py | test/python/test_pyg_remote_backend.py | import unittest
from collections import defaultdict
from typing import List
import graphlearn_torch as glt
import torch
from dist_test_utils import *
from dist_test_utils import _prepare_hetero_dataset
from graphlearn_torch.distributed.dist_client import request_server
from graphlearn_torch.distributed.dist_server import DistServer
from parameterized import parameterized
from torch_geometric.utils.sparse import index2ptr, ptr2index
def run_test_as_server(
num_servers: int,
num_clients: int,
server_rank: List[int],
master_port: int,
dataset: glt.distributed.DistDataset,
):
print(f"[Server {server_rank}] Initializing server ...")
glt.distributed.init_server(
num_servers=num_servers,
num_clients=num_clients,
server_rank=server_rank,
dataset=dataset,
master_addr="localhost",
master_port=master_port,
request_timeout=30,
num_rpc_threads=2,
server_group_name="pyg_remote_backend_test_server",
)
print(f"[Server {server_rank}] Waiting for exit ...")
glt.distributed.wait_and_shutdown_server()
print(f"[Server {server_rank}] Exited ...")
def run_test_as_client(
num_servers: int,
num_clients: int,
client_rank: int,
master_port: int,
node_type,
node_index,
feature_size,
edge_type,
edge_layout,
check_fn,
):
print(f"[Client {client_rank}] Initializing client ...")
glt.distributed.init_client(
num_servers=num_servers,
num_clients=num_clients,
client_rank=client_rank,
master_addr="localhost",
master_port=master_port,
num_rpc_threads=1,
client_group_name="pyg_remote_backend_test_client",
)
print(f"[Client {client_rank}] Check function {check_fn.__name__} ...")
check_fn(node_type, node_index, feature_size, edge_type, edge_layout)
print(f"[Client {client_rank}] Shutdowning ...")
glt.distributed.shutdown_client()
print(f"[Client {client_rank}] Exited ...")
def _check_feature_store(node_type, node_index, feature_size, edge_type, edge_layout):
tc = unittest.TestCase()
num_partitions, _, _, _ = request_server(0, DistServer.get_dataset_meta)
partition_ids = request_server(
0, DistServer.get_node_partition_id, node_type, node_index
)
tc.assertTrue(torch.equal(partition_ids, node_index % 2))
indexes = []
features = []
labels = []
input_order = torch.arange(node_index.size(0), dtype=torch.long)
for pidx in range(0, num_partitions):
remote_mask = (partition_ids == pidx)
remote_ids = torch.masked_select(node_index, remote_mask)
if remote_ids.shape[0] > 0:
feature = request_server(pidx, DistServer.get_node_feature, node_type, remote_ids)
label = request_server(pidx, DistServer.get_node_label, node_type, remote_ids)
label = torch.unsqueeze(label, 1)
features.append(feature)
labels.append(label)
indexes.append(torch.masked_select(input_order, remote_mask))
num_nodes = request_server(pidx, DistServer.get_tensor_size, node_type)[0]
tc.assertEqual(num_nodes, vnum_per_partition)
node_features = torch.zeros(node_index.shape[0], features[0].shape[1], dtype=features[0].dtype)
node_labels = torch.zeros(node_index.shape[0], 1, dtype=labels[0].dtype)
for i, (feature, label) in enumerate(zip(features, labels)):
node_features[indexes[i]] = feature
node_labels[indexes[i]] = label
for id, index in enumerate(node_index):
if index % 2 == 0:
tc.assertTrue(torch.equal(node_features[id], torch.zeros(feature_size)))
else:
if node_type == user_ntype:
tc.assertTrue(torch.equal(node_features[id], torch.ones(feature_size)))
else:
tc.assertTrue(
torch.equal(node_features[id], torch.full((feature_size,), 2))
)
tc.assertEqual(node_labels[id], index)
def _check_graph_store(node_type, node_index, feature_size, edge_type, edge_layout):
tc = unittest.TestCase()
if edge_type == u2i_etype:
step = 1
else:
step = 2
for server_id in range(2):
true_rows = []
true_cols = []
for v in range(server_id, vnum_total, 2):
true_rows.extend([v for _ in range(degree)])
true_cols.extend(
sorted([((v + i + step) % vnum_total) for i in range(degree)])
)
true_rows = torch.tensor(true_rows)
true_cols = torch.tensor(true_cols)
(row, col) = request_server(
server_id, DistServer.get_edge_index, edge_type, edge_layout
)
tc.assertTrue(torch.equal(row, true_rows))
tc.assertTrue(torch.equal(col, true_cols))
class PygRemoteBackendTestCase(unittest.TestCase):
def setUp(self):
self.loader_batch_size = 4
self.num_neighbors = [4, 3, 2]
self.dataset0 = _prepare_hetero_dataset(rank=0, edge_dir="out")
self.dataset1 = _prepare_hetero_dataset(rank=1, edge_dir="out")
self.master_port = glt.utils.get_free_port()
@parameterized.expand(
[
(1, 2, user_ntype, torch.tensor([0]), 512),
(1, 2, user_ntype, torch.tensor([0, 1, 2, 3]), 512),
(1, 2, item_ntype, torch.tensor([0]), 256),
(1, 2, item_ntype, torch.tensor([4, 5, 6, 7]), 256),
]
)
def test_dist_server_supported_feature_store(
self, num_clients, num_servers, node_type, node_index, feature_size
):
print(
"\n--- Function in DistServer supported PyG Remote Backend Test (server-client mode, remote) ---"
)
print(f"--- num_clients: {num_clients} num_servers: {num_servers} ---")
self.dataset_list = [self.dataset0, self.dataset1]
mp_context = torch.multiprocessing.get_context("spawn")
server_procs = []
for server_rank in range(num_servers):
server_procs.append(
mp_context.Process(
target=run_test_as_server,
args=(
num_servers,
num_clients,
server_rank,
self.master_port,
self.dataset_list[server_rank],
),
)
)
client_procs = []
for client_rank in range(num_clients):
client_procs.append(
mp_context.Process(
target=run_test_as_client,
args=(
num_servers,
num_clients,
client_rank,
self.master_port,
node_type,
node_index,
feature_size,
None,
None,
_check_feature_store,
),
)
)
for sproc in server_procs:
sproc.start()
for cproc in client_procs:
cproc.start()
for sproc in server_procs:
sproc.join()
for cproc in client_procs:
cproc.join()
@parameterized.expand(
[
(1, 2, i2i_etype, "coo"),
(1, 2, u2i_etype, "coo"),
]
)
def test_dist_server_supported_graph_store(
self, num_clients, num_servers, edge_type, edge_layout
):
print(
"\n--- Function in DistServer supported PyG Remote Backend Test (server-client mode, remote) ---"
)
print(f"--- num_clients: {num_clients} num_servers: {num_servers} ---")
self.dataset_list = [self.dataset0, self.dataset1]
mp_context = torch.multiprocessing.get_context("spawn")
server_procs = []
for server_rank in range(num_servers):
server_procs.append(
mp_context.Process(
target=run_test_as_server,
args=(
num_servers,
num_clients,
server_rank,
self.master_port,
self.dataset_list[server_rank],
),
)
)
client_procs = []
for client_rank in range(num_clients):
client_procs.append(
mp_context.Process(
target=run_test_as_client,
args=(
num_servers,
num_clients,
client_rank,
self.master_port,
None,
None,
None,
edge_type,
edge_layout,
_check_graph_store,
),
)
)
for sproc in server_procs:
sproc.start()
for cproc in client_procs:
cproc.start()
for sproc in server_procs:
sproc.join()
for cproc in client_procs:
cproc.join()
if __name__ == "__main__":
unittest.main()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/test/python/test_neighbor_sampler.py | test/python/test_neighbor_sampler.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import torch
from graphlearn_torch.data import Topology, Graph
from graphlearn_torch.sampler import NeighborSampler, NegativeSampling, EdgeSamplerInput
from graphlearn_torch.utils import tensor_equal_with_device
class RandomSamplerTestCase(unittest.TestCase):
def setUp(self):
"""
input graph:
1 1 0 0 0 0
0 1 0 1 0 0
0 0 1 0 1 0
0 0 0 0 0 1
"""
indptr = torch.tensor([0, 2, 4, 6, 7, 7, 7], dtype=torch.int64)
indices = torch.tensor([0, 1, 1, 3, 2, 4, 5], dtype=torch.int64)
self.csr_topo = Topology(edge_index=(indptr, indices), input_layout='CSR')
self.input_seeds = torch.tensor([0, 1, 2, 3, 4], dtype=torch.int64)
self.num_neighbors = [2]
self.nodes = torch.tensor([0, 1, 2, 3, 4, 5], dtype=torch.int64)
self.edge_index = torch.tensor([[0, 1, 1, 3, 2, 4, 5],
[0, 0, 1, 1, 2, 2, 3]],
dtype=torch.int64)
self.edge_ids = torch.tensor([0, 1, 2, 3, 4, 5, 6], dtype=torch.int64)
self.edge_weights = torch.tensor([.1, .9, .1, .9, .1, .9, .1], dtype=torch.float)
self.csr_topo_with_weight = Topology(edge_index=(indptr, indices),
input_layout='CSR',
edge_weights=self.edge_weights)
self.device = torch.device('cuda:0')
self.cuda_nodes = torch.tensor([0, 1, 2, 3, 4, 5],
dtype=torch.int64, device=self.device)
self.cuda_edge_index = torch.tensor([[0, 1, 1, 3, 2, 4, 5],
[0, 0, 1, 1, 2, 2, 3]],
dtype=torch.int64, device=self.device)
self.cuda_edge_ids = torch.tensor([0, 1, 2, 3, 4, 5, 6],
dtype=torch.int64, device=self.device)
self.edge_label_index = torch.tensor([[0, 3],
[1, 5]],
dtype=torch.int64)
self.cuda_edge_label_index = torch.tensor([[0, 3],
[1, 5]],
dtype=torch.int64,
device=self.device)
def test_cpu_sample_from_node(self):
g = Graph(self.csr_topo, mode='CPU')
sampler = NeighborSampler(g, self.num_neighbors, device=torch.device('cpu'))
sample_out = sampler.sample_from_nodes(self.input_seeds)
nodes = sample_out.node
edge_index = torch.stack([sample_out.row, sample_out.col])
self.assertTrue(tensor_equal_with_device(nodes, self.nodes))
self.assertTrue(tensor_equal_with_device(edge_index, self.edge_index))
def test_cuda_sample_from_node(self):
g = Graph(self.csr_topo, 'CUDA', 0)
sampler = NeighborSampler(g, self.num_neighbors, device=self.device)
sample_out = sampler.sample_from_nodes(self.input_seeds)
nodes = sample_out.node
edge_index = torch.stack([sample_out.row, sample_out.col])
self.assertTrue(tensor_equal_with_device(nodes, self.cuda_nodes))
self.assertTrue(tensor_equal_with_device(edge_index, self.cuda_edge_index))
def test_pin_sample_from_node(self):
g = Graph(self.csr_topo, 'ZERO_COPY', 0)
sampler = NeighborSampler(g, self.num_neighbors, device=self.device)
sample_out = sampler.sample_from_nodes(self.input_seeds)
nodes = sample_out.node
edge_index = torch.stack([sample_out.row, sample_out.col])
self.assertTrue(tensor_equal_with_device(nodes, self.cuda_nodes))
self.assertTrue(tensor_equal_with_device(edge_index, self.cuda_edge_index))
def test_cuda_sample_prob(self):
g = Graph(self.csr_topo, 'CUDA', 0)
sampler = NeighborSampler(g, [2, 2], device=self.device)
probs = sampler.sample_prob(
torch.tensor([0, 4], dtype=torch.int64),
self.nodes.size(0)
)
print(probs)
def test_cpu_sample_from_node_with_edge(self):
g = Graph(self.csr_topo, mode='CPU')
sampler = NeighborSampler(
g, self.num_neighbors, device=torch.device('cpu'), with_edge=True
)
sample_out = sampler.sample_from_nodes(self.input_seeds)
self.assertTrue(tensor_equal_with_device(sample_out.node, self.nodes))
self.assertTrue(tensor_equal_with_device(
torch.stack([sample_out.row, sample_out.col]), self.edge_index))
self.assertTrue(tensor_equal_with_device(sample_out.edge, self.edge_ids))
def test_cpu_weighted_sample_from_node_with_edge(self):
g = Graph(self.csr_topo_with_weight, mode='CPU')
stats = torch.zeros(7)
sampler = NeighborSampler(
g, [1], with_edge=True, with_weight=True, device=torch.device('cpu'))
for _ in range(1000):
sample_out = sampler.sample_from_nodes(self.input_seeds)
edges = sample_out.edge
stats.scatter_add_(0, edges, torch.ones(7))
# with high probability holds
self.assertTrue(stats[0] < 200 and stats[2] < 200 and stats[4] < 200)
self.assertTrue(stats[1] > 800 and stats[3] > 800 and stats[5] > 800)
self.assertEqual(stats[6], 1000)
def test_cuda_sample_from_node_with_edge(self):
g = Graph(self.csr_topo, 'CUDA', 0)
sampler = NeighborSampler(
g, self.num_neighbors, device=self.device, with_edge=True
)
sample_out = sampler.sample_from_nodes(self.input_seeds)
self.assertTrue(tensor_equal_with_device(sample_out.node, self.cuda_nodes))
self.assertTrue(tensor_equal_with_device(
torch.stack([sample_out.row, sample_out.col]), self.cuda_edge_index))
self.assertTrue(tensor_equal_with_device(sample_out.edge, self.cuda_edge_ids))
def test_cpu_sample_from_edges(self):
g = Graph(self.csr_topo, mode='CPU')
bin_neg_sampling = NegativeSampling('binary')
tri_neg_sampling = NegativeSampling('triplet')
sampler = NeighborSampler(
g, self.num_neighbors, device=torch.device('cpu'), with_neg=True,
)
bin_inputs = EdgeSamplerInput(self.edge_label_index[0],
self.edge_label_index[1],
neg_sampling=bin_neg_sampling)
tri_inputs = EdgeSamplerInput(self.edge_label_index[0],
self.edge_label_index[1],
neg_sampling=tri_neg_sampling)
sample_out = sampler.sample_from_edges(bin_inputs)
out_index = sample_out.metadata['edge_label_index']
pos_index = sample_out.node[out_index[:,:2]]
neg_index = sample_out.node[out_index[:,2:]]
baseline = torch.stack((self.edge_index[1], self.edge_index[0]), dim=0).T
self.assertTrue(tensor_equal_with_device(pos_index, self.edge_label_index))
self.assertFalse(
torch.any(torch.sum((neg_index.T[0]==baseline).to(torch.int), dim=1) == 2)
)
sample_out = sampler.sample_from_edges(tri_inputs)
pos_index = torch.stack(
(sample_out.node[sample_out.metadata['src_index']],
sample_out.node[sample_out.metadata['dst_pos_index']]
), dim=0)
self.assertTrue(tensor_equal_with_device(pos_index, self.edge_label_index))
def test_cuda_sample_from_edges(self):
g = Graph(self.csr_topo, mode='CUDA')
bin_neg_sampling = NegativeSampling('binary')
tri_neg_sampling = NegativeSampling('triplet')
sampler = NeighborSampler(
g, self.num_neighbors, device=torch.device('cuda:0'), with_neg=True,
)
bin_inputs = EdgeSamplerInput(self.edge_label_index[0],
self.edge_label_index[1],
neg_sampling=bin_neg_sampling)
tri_inputs = EdgeSamplerInput(self.edge_label_index[0],
self.edge_label_index[1],
neg_sampling=tri_neg_sampling)
sample_out = sampler.sample_from_edges(bin_inputs)
out_index = sample_out.metadata['edge_label_index']
pos_index = sample_out.node[out_index[:,:2]]
neg_index = sample_out.node[out_index[:,2:]]
baseline = torch.stack((self.cuda_edge_index[1], self.cuda_edge_index[0]),
dim=0).T
self.assertTrue(tensor_equal_with_device(pos_index,
self.cuda_edge_label_index))
self.assertFalse(
torch.any(torch.sum((neg_index.T[0]==baseline).to(torch.int), dim=1) == 2)
)
sample_out = sampler.sample_from_edges(tri_inputs)
pos_index = torch.stack(
(sample_out.node[sample_out.metadata['src_index']],
sample_out.node[sample_out.metadata['dst_pos_index']]
), dim=0)
self.assertTrue(tensor_equal_with_device(pos_index,
self.cuda_edge_label_index))
if __name__ == "__main__":
unittest.main() | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/docs/conf.py | docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
import datetime
import graphlearn_torch
project = 'graphlearn-for-pytorch'
author = 'Alibaba-inc'
copyright = f'{datetime.datetime.now().year}, {author}'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
"myst_parser",
]
add_module_names = False
autodoc_member_order = 'bysource'
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/train_sage_prod_with_trim.py | examples/train_sage_prod_with_trim.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import time
import torch
import numpy as np
import os.path as osp
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn.functional as F
from numpy import genfromtxt
from torch_geometric.nn import GraphSAGE
from ogb.nodeproppred import PygNodePropPredDataset
from tqdm import tqdm
import graphlearn_torch as glt
def run(rank, glt_ds, train_idx,
num_features, num_classes, trimmed):
train_loader = glt.loader.NeighborLoader(glt_ds,
[10, 10, 10],
train_idx,
batch_size=1024,
shuffle=True,
device=torch.device(rank))
print(f'Rank {rank} build graphlearn_torch NeighborLoader Done.')
model = GraphSAGE(
in_channels=num_features,
hidden_channels=256,
num_layers=3,
out_channels=num_classes,
).to(rank)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
for epoch in range(1, 10):
model.train()
start = time.time()
total_examples = total_loss = 0
for batch in tqdm(train_loader):
optimizer.zero_grad()
if trimmed:
out = model(
batch.x, batch.edge_index,
num_sampled_nodes_per_hop=batch.num_sampled_nodes,
num_sampled_edges_per_hop=batch.num_sampled_edges,
)[:batch.batch_size].log_softmax(dim=-1)
else:
out = model(
batch.x, batch.edge_index
)[:batch.batch_size].log_softmax(dim=-1)
loss = F.nll_loss(out, batch.y[:batch.batch_size])
loss.backward()
optimizer.step()
total_examples += batch.batch_size
total_loss += float(loss) * batch.batch_size
end = time.time()
print(f'Epoch: {epoch:03d}, Loss: {(total_loss / total_examples):.4f},',
f'Epoch Time: {end - start}')
if __name__ == '__main__':
world_size = torch.cuda.device_count()
start = time.time()
root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'products')
dataset = PygNodePropPredDataset('ogbn-products', root)
split_idx = dataset.get_idx_split()
data = dataset[0]
train_idx = split_idx['train']
print(f'Load data cost {time.time()-start} s.')
start = time.time()
print('Build graphlearn_torch dataset...')
glt_dataset = glt.data.Dataset()
glt_dataset.init_graph(
edge_index=data.edge_index,
graph_mode='CUDA',
directed=False
)
glt_dataset.init_node_features(
node_feature_data=data.x,
sort_func=glt.data.sort_by_in_degree,
split_ratio=1,
device_group_list=[glt.data.DeviceGroup(0, [0])],
)
glt_dataset.init_node_labels(node_label_data=data.y)
print(f'Build graphlearn_torch csr_topo and feature cost {time.time() - start} s.')
run(0, glt_dataset, train_idx, 100, 47, True) | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/train_sage_ogbn_products.py | examples/train_sage_ogbn_products.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Reaches around 0.7870 ± 0.0036 test accuracy.
import time
import torch
import graphlearn_torch as glt
import os.path as osp
import torch.nn.functional as F
from ogb.nodeproppred import PygNodePropPredDataset, Evaluator
from torch_geometric.loader import NeighborSampler
from torch_geometric.nn import SAGEConv
from tqdm import tqdm
class SAGE(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers):
super(SAGE, self).__init__()
self.num_layers = num_layers
self.convs = torch.nn.ModuleList()
self.convs.append(SAGEConv(in_channels, hidden_channels))
for _ in range(num_layers - 2):
self.convs.append(SAGEConv(hidden_channels, hidden_channels))
self.convs.append(SAGEConv(hidden_channels, out_channels))
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
def forward(self, x, adjs):
# `train_loader` computes the k-hop neighborhood of a batch of nodes,
# and returns, for each layer, a bipartite graph object, holding the
# bipartite edges `edge_index`, the index `e_id` of the original edges,
# and the size/shape `size` of the bipartite graph.
# Target nodes are also included in the source nodes so that one can
# easily apply skip-connections or add self-loops.
for i, (edge_index, _, size) in enumerate(adjs):
x_target = x[:size[1]] # Target nodes are always placed first.
x = self.convs[i]((x, x_target), edge_index)
if i != self.num_layers - 1:
x = F.relu(x)
x = F.dropout(x, p=0.5, training=self.training)
return x.log_softmax(dim=-1)
def inference(self, x_all):
pbar = tqdm(total=x_all.size(0) * self.num_layers)
pbar.set_description('Evaluating')
# Compute representations of nodes layer by layer, using *all*
# available edges. This leads to faster computation in contrast to
# immediately computing the final representations of each batch.
total_edges = 0
for i in range(self.num_layers):
xs = []
for batch_size, n_id, adj in test_loader:
edge_index, _, size = adj.to(device)
total_edges += edge_index.size(1)
x = x_all[n_id].to(device)
x_target = x[:size[1]]
x = self.convs[i]((x, x_target), edge_index)
if i != self.num_layers - 1:
x = F.relu(x)
xs.append(x.cpu())
pbar.update(batch_size)
x_all = torch.cat(xs, dim=0)
pbar.close()
return x_all
root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'products')
dataset = PygNodePropPredDataset('ogbn-products', root)
split_idx = dataset.get_idx_split()
evaluator = Evaluator(name='ogbn-products')
data = dataset[0]
# PyG NeighborSampler
test_loader = NeighborSampler(data.edge_index, node_idx=None, sizes=[-1],
batch_size=4096, shuffle=False, num_workers=12)
glt_dataset = glt.data.Dataset()
glt_dataset.init_graph(
edge_index=dataset[0].edge_index,
graph_mode='ZERO_COPY',
directed=False
)
glt_dataset.init_node_features(
node_feature_data=data.x,
sort_func=glt.data.sort_by_in_degree,
split_ratio=0.2,
device_group_list=[glt.data.DeviceGroup(0, [0])],
)
glt_dataset.init_node_labels(node_label_data=data.y)
device = torch.device('cuda:0')
# graphlearn_torch NeighborLoader
train_loader = glt.loader.NeighborLoader(glt_dataset,
[15, 10, 5],
split_idx['train'],
batch_size=1024,
shuffle=True,
drop_last=True,
device=device,
as_pyg_v1=True)
model = SAGE(dataset.num_features, 256, dataset.num_classes, num_layers=3)
model = model.to(device)
def train(epoch):
model.train()
pbar = tqdm(total=split_idx['train'].size(0))
pbar.set_description(f'Epoch {epoch:02d}')
total_loss = total_correct = 0
step = 0
glt_dataset.node_labels = glt_dataset.node_labels.to(device)
for batch_size, n_id, adjs in train_loader:
# `adjs` holds a list of `(edge_index, e_id, size)` tuples.
adjs = [adj.to(device) for adj in adjs]
optimizer.zero_grad()
out = model(glt_dataset.node_features[n_id], adjs)
loss = F.nll_loss(out, glt_dataset.node_labels[n_id[:batch_size]])
loss.backward()
optimizer.step()
total_loss += float(loss)
total_correct += int(out.argmax(dim=-1).eq(glt_dataset.node_labels[n_id[:batch_size]]).sum())
step += 1
pbar.update(batch_size)
pbar.close()
loss = total_loss / step
approx_acc = total_correct / split_idx['train'].size(0)
return loss, approx_acc
@torch.no_grad()
def test():
model.eval()
out = model.inference(glt_dataset.node_features)
y_true = glt_dataset.node_labels.cpu().unsqueeze(-1)
y_pred = out.argmax(dim=-1, keepdim=True)
train_acc = evaluator.eval({
'y_true': y_true[split_idx['train']],
'y_pred': y_pred[split_idx['train']],
})['acc']
val_acc = evaluator.eval({
'y_true': y_true[split_idx['valid']],
'y_pred': y_pred[split_idx['valid']],
})['acc']
test_acc = evaluator.eval({
'y_true': y_true[split_idx['test']],
'y_pred': y_pred[split_idx['test']],
})['acc']
return train_acc, val_acc, test_acc
test_accs = []
for run in range(1, 2):
print('')
print(f'Run {run:02d}:')
print('')
model.reset_parameters()
optimizer = torch.optim.Adam(model.parameters(), lr=0.003)
best_val_acc = final_test_acc = 0
for epoch in range(1, 21):
epoch_start = time.time()
loss, acc = train(epoch)
print(f'Epoch {epoch:02d}, Loss: {loss:.4f}, Approx. Train: {acc:.4f}',
f'Epoch Time: {time.time() - epoch_start}')
if epoch > 5:
train_acc, val_acc, test_acc = test()
print(f'Train: {train_acc:.4f}, Val: {val_acc:.4f}, Test: {test_acc:.4f}')
if val_acc > best_val_acc:
best_val_acc = val_acc
final_test_acc = test_acc
test_accs.append(final_test_acc)
test_acc = torch.tensor(test_accs)
print('============================')
print(f'Final Test: {test_acc.mean():.4f} ± {test_acc.std():.4f}')
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/feature_mp.py | examples/feature_mp.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
import graphlearn_torch as glt
import torch.multiprocessing as mp
def test_feature(rank, world_size, feature):
torch.cuda.set_device(rank)
assert list(feature.shape) == [128*3, 128]
input = torch.tensor([10, 20, 200, 210, 300, 310], dtype=torch.int64,
device= torch.device(rank))
attr = torch.ones(2, 128, dtype=torch.float32,
device=torch.device(rank))
res = torch.cat((attr, attr*2, attr*3), 0)
assert glt.utils.tensor_equal_with_device(feature[input], res)
if __name__ == "__main__":
print('Use GPU 0 and GPU 1 for multiprocessing Feature test.')
world_size = 2
attr = torch.ones(128, 128, dtype=torch.float32)
tensor = torch.cat([attr, attr*2, attr*3], 0)
rows = torch.cat([torch.arange(128*3),
torch.randint(128, (128*3,)),
torch.randint(128*2, (128*3,))])
cols = torch.cat([torch.randint(128*3, (128*3,)),
torch.randint(128*3, (128*3,)),
torch.randint(128*3, (128*3,))])
csr_topo = glt.data.Topology(edge_index=torch.stack([rows, cols]))
device_group_list = [glt.data.DeviceGroup(0, [0]),
glt.data.DeviceGroup(1, [1])]
#device_group_list = [glt.data.DeviceGroup(0, [0, 1])]
split_ratio = 0.8 # [0, 1]
cpu_tensor, id2index = glt.data.sort_by_in_degree(tensor, split_ratio, csr_topo)
feature = glt.data.Feature(cpu_tensor, id2index, split_ratio, device_group_list, 0)
mp.spawn(test_feature,
args=(world_size, feature),
nprocs=world_size,
join=True) | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/graph_sage_unsup_ppi.py | examples/graph_sage_unsup_ppi.py | import os.path as osp
import torch
import torch.nn.functional as F
import tqdm
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import f1_score
from sklearn.multioutput import MultiOutputClassifier
from torch_geometric.data import Batch
from torch_geometric.datasets import PPI
from torch_geometric.loader import DataLoader
from torch_geometric.nn import GraphSAGE
import graphlearn_torch as glt
from graphlearn_torch.loader import LinkNeighborLoader
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'PPI')
train_dataset = PPI(path, split='train')
val_dataset = PPI(path, split='val')
test_dataset = PPI(path, split='test')
# Group all training graphs into a single graph to perform sampling:
train_data = Batch.from_data_list(train_dataset)
# Prepare graph and feature for graphlearn-torch
train_feature = train_data.x.clone(memory_format=torch.contiguous_format)
glt_dataset = glt.data.Dataset()
glt_dataset.init_graph(
edge_index=train_data['edge_index'],
graph_mode='ZERO_COPY'
)
glt_dataset.init_node_features(
node_feature_data=train_feature,
split_ratio=0.2,
device_group_list=[glt.data.DeviceGroup(0, [0])]
)
loader = LinkNeighborLoader(
data=glt_dataset,
batch_size=2048,
shuffle=True,
neg_sampling='binary',
num_neighbors=[10, 10],
num_workers=6,
persistent_workers=True
)
# Evaluation loaders (one datapoint corresponds to a graph)
train_loader = DataLoader(train_dataset, batch_size=2)
val_loader = DataLoader(val_dataset, batch_size=2)
test_loader = DataLoader(test_dataset, batch_size=2)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = GraphSAGE(
in_channels=train_dataset.num_features,
hidden_channels=64,
num_layers=2,
out_channels=64,
).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.005)
def train():
model.train()
total_loss = total_examples = 0
for data in tqdm.tqdm(loader):
data = data.to(device)
optimizer.zero_grad()
h = model(data.x, data.edge_index)
h_src = h[data.edge_label_index[0]]
h_dst = h[data.edge_label_index[1]]
link_pred = (h_src * h_dst).sum(dim=-1) # Inner product.
loss = F.binary_cross_entropy_with_logits(link_pred, data.edge_label)
loss.backward()
optimizer.step()
total_loss += float(loss) * link_pred.numel()
total_examples += link_pred.numel()
return total_loss / total_examples
@torch.no_grad()
def encode(loader):
model.eval()
xs, ys = [], []
for data in loader:
data = data.to(device)
xs.append(model(data.x, data.edge_index).cpu())
ys.append(data.y.cpu())
return torch.cat(xs, dim=0), torch.cat(ys, dim=0)
@torch.no_grad()
def test():
# Train classifier on training set:
x, y = encode(train_loader)
clf = MultiOutputClassifier(SGDClassifier(loss='log_loss', penalty='l2'))
clf.fit(x, y)
train_f1 = f1_score(y, clf.predict(x), average='micro')
# Evaluate on validation set:
x, y = encode(val_loader)
val_f1 = f1_score(y, clf.predict(x), average='micro')
# Evaluate on test set:
x, y = encode(test_loader)
test_f1 = f1_score(y, clf.predict(x), average='micro')
return train_f1, val_f1, test_f1
for epoch in range(1, 6):
loss = train()
print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}')
train_f1, val_f1, test_f1 = test()
print(f'Train F1: {train_f1:.4f}, Val F1: {val_f1:.4f}, '
f'Test F1: {test_f1:.4f}')
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/seal_link_pred.py | examples/seal_link_pred.py | import math
import time
import os.path as osp
from itertools import chain
import graphlearn_torch as glt
import numpy as np
import torch
import torch.nn.functional as F
from scipy.sparse.csgraph import shortest_path
from sklearn.metrics import roc_auc_score
from torch.nn import BCEWithLogitsLoss, Conv1d, MaxPool1d, ModuleList
from torch_geometric.data import Data, InMemoryDataset
from torch_geometric.datasets import Planetoid
from torch_geometric.loader import DataLoader
from torch_geometric.nn import MLP, GCNConv, global_sort_pool
from torch_geometric.transforms import RandomLinkSplit
from torch_geometric.utils import k_hop_subgraph, to_scipy_sparse_matrix
class SEALDataset(InMemoryDataset):
def __init__(self, dataset, num_hops, split='train'):
self.data = dataset[0]
self.num_hops = num_hops
super().__init__(dataset.root)
index = ['train', 'val', 'test'].index(split)
self.data, self.slices = torch.load(self.processed_paths[index])
@property
def processed_file_names(self):
return ['SEAL_train_data.pt', 'SEAL_val_data.pt', 'SEAL_test_data.pt']
def process(self):
transform = RandomLinkSplit(num_val=0.05, num_test=0.1,
is_undirected=True, split_labels=True)
train_data, val_data, test_data = transform(self.data)
self._max_z = 0
# Collect a list of subgraphs for training, validation and testing:
start = time.time()
graph = glt.data.Graph(glt.data.Topology(train_data.edge_index), mode='CPU')
subgraph_sampler = glt.sampler.NeighborSampler(graph, [-1]*self.num_hops)
train_pos_data_list = self.extract_enclosing_subgraphs(
subgraph_sampler, train_data.pos_edge_label_index, 1)
train_neg_data_list = self.extract_enclosing_subgraphs(
subgraph_sampler, train_data.neg_edge_label_index, 0)
graph = glt.data.Graph(glt.data.Topology(val_data.edge_index), mode='CPU')
subgraph_sampler = glt.sampler.NeighborSampler(graph, [-1]*self.num_hops)
val_pos_data_list = self.extract_enclosing_subgraphs(
subgraph_sampler, val_data.pos_edge_label_index, 1)
val_neg_data_list = self.extract_enclosing_subgraphs(
subgraph_sampler, val_data.neg_edge_label_index, 0)
graph = glt.data.Graph(glt.data.Topology(test_data.edge_index), mode='CPU')
subgraph_sampler = glt.sampler.NeighborSampler(graph, [-1]*self.num_hops)
test_pos_data_list = self.extract_enclosing_subgraphs(
subgraph_sampler, test_data.pos_edge_label_index, 1)
test_neg_data_list = self.extract_enclosing_subgraphs(
subgraph_sampler, test_data.neg_edge_label_index, 0)
end = time.time()
print(f'Data process cost: {end-start:.4f} secs.')
# Convert node labeling to one-hot features.
for data in chain(train_pos_data_list, train_neg_data_list,
val_pos_data_list, val_neg_data_list,
test_pos_data_list, test_neg_data_list):
# We solely learn links from structure, dropping any node features:
data.x = F.one_hot(data.z, self._max_z + 1).to(torch.float)
torch.save(self.collate(train_pos_data_list + train_neg_data_list),
self.processed_paths[0])
torch.save(self.collate(val_pos_data_list + val_neg_data_list),
self.processed_paths[1])
torch.save(self.collate(test_pos_data_list + test_neg_data_list),
self.processed_paths[2])
def extract_enclosing_subgraphs(self, subgraph_sampler, edge_label_index, y):
data_list = []
for src, dst in edge_label_index.t().tolist():
# sub_nodes, sub_edge_index, mapping, _ = k_hop_subgraph(
# [src, dst], self.num_hops, edge_index, relabel_nodes=True)
subgraph = subgraph_sampler.subgraph(torch.tensor([src, dst], dtype=torch.long))
sub_nodes, sub_edge_index, mapping = subgraph.node, \
torch.stack([subgraph.row, subgraph.col]), subgraph.metadata
src, dst = mapping.tolist()
# Remove target link from the subgraph.
mask1 = (sub_edge_index[0] != src) | (sub_edge_index[1] != dst)
mask2 = (sub_edge_index[0] != dst) | (sub_edge_index[1] != src)
sub_edge_index = sub_edge_index[:, mask1 & mask2]
# Calculate node labeling.
z = self.drnl_node_labeling(sub_edge_index, src, dst,
num_nodes=sub_nodes.size(0))
data = Data(x=self.data.x[sub_nodes], z=z,
edge_index=sub_edge_index, y=y)
data_list.append(data)
return data_list
def drnl_node_labeling(self, edge_index, src, dst, num_nodes=None):
# Double-radius node labeling (DRNL).
src, dst = (dst, src) if src > dst else (src, dst)
adj = to_scipy_sparse_matrix(edge_index, num_nodes=num_nodes).tocsr()
idx = list(range(src)) + list(range(src + 1, adj.shape[0]))
adj_wo_src = adj[idx, :][:, idx]
idx = list(range(dst)) + list(range(dst + 1, adj.shape[0]))
adj_wo_dst = adj[idx, :][:, idx]
dist2src = shortest_path(adj_wo_dst, directed=False, unweighted=True,
indices=src)
dist2src = np.insert(dist2src, dst, 0, axis=0)
dist2src = torch.from_numpy(dist2src)
dist2dst = shortest_path(adj_wo_src, directed=False, unweighted=True,
indices=dst - 1)
dist2dst = np.insert(dist2dst, src, 0, axis=0)
dist2dst = torch.from_numpy(dist2dst)
dist = dist2src + dist2dst
dist_over_2, dist_mod_2 = dist // 2, dist % 2
z = 1 + torch.min(dist2src, dist2dst)
z += dist_over_2 * (dist_over_2 + dist_mod_2 - 1)
z[src] = 1.
z[dst] = 1.
z[torch.isnan(z)] = 0.
self._max_z = max(int(z.max()), self._max_z)
return z.to(torch.long)
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'Planetoid')
dataset = Planetoid(path, name='Cora')
train_dataset = SEALDataset(dataset, num_hops=2, split='train')
val_dataset = SEALDataset(dataset, num_hops=2, split='val')
test_dataset = SEALDataset(dataset, num_hops=2, split='test')
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=32)
test_loader = DataLoader(test_dataset, batch_size=32)
class DGCNN(torch.nn.Module):
def __init__(self, hidden_channels, num_layers, GNN=GCNConv, k=0.6):
super().__init__()
if k < 1: # Transform percentile to number.
num_nodes = sorted([data.num_nodes for data in train_dataset])
k = num_nodes[int(math.ceil(k * len(num_nodes))) - 1]
k = max(10, k)
self.k = int(k)
self.convs = ModuleList()
self.convs.append(GNN(train_dataset.num_features, hidden_channels))
for i in range(0, num_layers - 1):
self.convs.append(GNN(hidden_channels, hidden_channels))
self.convs.append(GNN(hidden_channels, 1))
conv1d_channels = [16, 32]
total_latent_dim = hidden_channels * num_layers + 1
conv1d_kws = [total_latent_dim, 5]
self.conv1 = Conv1d(1, conv1d_channels[0], conv1d_kws[0], conv1d_kws[0])
self.maxpool1d = MaxPool1d(2, 2)
self.conv2 = Conv1d(conv1d_channels[0], conv1d_channels[1], conv1d_kws[1], 1)
dense_dim = int((self.k - 2) / 2 + 1)
dense_dim = (dense_dim - conv1d_kws[1] + 1) * conv1d_channels[1]
self.mlp = MLP([dense_dim, 128, 1], dropout=0.5, norm=None)
def forward(self, x, edge_index, batch):
xs = [x]
for conv in self.convs:
xs += [conv(xs[-1], edge_index).tanh()]
x = torch.cat(xs[1:], dim=-1)
# Global pooling.
x = global_sort_pool(x, batch, self.k)
x = x.unsqueeze(1) # [num_graphs, 1, k * hidden]
x = self.conv1(x).relu()
x = self.maxpool1d(x)
x = self.conv2(x).relu()
x = x.view(x.size(0), -1) # [num_graphs, dense_dim]
return self.mlp(x)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = DGCNN(hidden_channels=32, num_layers=3).to(device)
optimizer = torch.optim.Adam(params=model.parameters(), lr=0.0001)
criterion = BCEWithLogitsLoss()
def train():
model.train()
total_loss = 0
for data in train_loader:
data = data.to(device)
optimizer.zero_grad()
out = model(data.x, data.edge_index, data.batch)
loss = criterion(out.view(-1), data.y.to(torch.float))
loss.backward()
optimizer.step()
total_loss += float(loss) * data.num_graphs
return total_loss / len(train_dataset)
@torch.no_grad()
def test(loader):
model.eval()
y_pred, y_true = [], []
for data in loader:
data = data.to(device)
logits = model(data.x, data.edge_index, data.batch)
y_pred.append(logits.view(-1).cpu())
y_true.append(data.y.view(-1).cpu().to(torch.float))
return roc_auc_score(torch.cat(y_true), torch.cat(y_pred))
best_val_auc = test_auc = 0
for epoch in range(1, 51):
loss = train()
val_auc = test(val_loader)
if val_auc > best_val_auc:
best_val_auc = val_auc
test_auc = test(test_loader)
print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Val: {val_auc:.4f}, '
f'Test: {test_auc:.4f}')
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/hetero/train_hgt_mag_mp.py | examples/hetero/train_hgt_mag_mp.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import time
import torch
import graphlearn_torch as glt
import os.path as osp
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
import torch_geometric.transforms as T
from torch_geometric.datasets import OGB_MAG
from torch_geometric.nn import Linear, HGTConv
class HGT(torch.nn.Module):
def __init__(self, hidden_channels, out_channels, num_heads, num_layers,
node_types, edge_types):
super().__init__()
self.lin_dict = torch.nn.ModuleDict()
for node_type in node_types:
self.lin_dict[node_type] = Linear(-1, hidden_channels)
self.convs = torch.nn.ModuleList()
for _ in range(num_layers):
conv = HGTConv(hidden_channels, hidden_channels, (node_types, edge_types),
num_heads, group='sum')
self.convs.append(conv)
self.lin = Linear(hidden_channels, out_channels)
def forward(self, x_dict, edge_index_dict):
x_dict = {
node_type: self.lin_dict[node_type](x).relu_()
for node_type, x in x_dict.items()
}
for conv in self.convs:
x_dict = conv(x_dict, edge_index_dict)
return self.lin(x_dict['paper'])
@torch.no_grad()
def init_params(loader, model):
# Initialize lazy parameters via forwarding a single batch to the model:
batch = next(iter(loader))
model(batch.x_dict, batch.edge_index_dict)
def train(model, loader, optimizer):
model.train()
total_examples = total_loss = 0
for batch in loader:
optimizer.zero_grad()
batch_size = batch['paper'].batch_size
out = model(batch.x_dict, batch.edge_index_dict)[:batch_size]
loss = F.cross_entropy(out, batch['paper'].y[:batch_size])
loss.backward()
optimizer.step()
total_examples += batch_size
total_loss += float(loss) * batch_size
return total_loss / total_examples
@torch.no_grad()
def test(model, loader):
model.eval()
total_examples = total_correct = 0
for batch in loader:
batch_size = batch['paper'].batch_size
out = model(batch.x_dict, batch.edge_index_dict)[:batch_size]
pred = out.argmax(dim=-1)
total_examples += batch_size
total_correct += int((pred == batch['paper'].y[:batch_size]).sum())
return total_correct / total_examples
def run(rank, world_size, glt_ds, train_idx,
val_idx, num_classes, node_types, edge_types):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
dist.init_process_group('nccl', rank=rank, world_size=world_size)
torch.cuda.set_device(rank)
# init NeighborLoader
train_idx = train_idx.split(train_idx.size(0) // world_size)[rank]
train_loader = glt.loader.NeighborLoader(glt_ds,
[10] * 2,
('paper', train_idx),
batch_size=1024,
shuffle=True,
device=torch.device(rank))
val_loader = glt.loader.NeighborLoader(glt_ds,
[10] * 2,
('paper', val_idx),
batch_size=1024,
shuffle=True,
device=torch.device(rank))
# model
model = HGT(hidden_channels=64,
out_channels=num_classes,
num_heads=2,
num_layers=2,
node_types=node_types,
edge_types=edge_types).to(rank)
init_params(train_loader, model)
model = DistributedDataParallel(model, device_ids=[rank],
find_unused_parameters=True)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
for epoch in range(1, 21):
start_time = time.time()
loss = train(model, train_loader, optimizer)
epoch_time = time.time() - start_time
dist.barrier()
if rank == 0:
val_acc = test(model, val_loader)
print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}',
f'Time: {epoch_time:.4f}, Val: {val_acc:.4f}')
dist.barrier()
if __name__ == '__main__':
world_size = torch.cuda.device_count()
path = osp.join(osp.dirname(osp.realpath(__file__)), '../../data/')
transform = T.ToUndirected(merge=True)
dataset = OGB_MAG(path, preprocess='metapath2vec', transform=transform)
data = dataset[0]
train_idx = data['paper'].train_mask.nonzero(as_tuple=False).view(-1)
val_idx = data['paper'].val_mask.nonzero(as_tuple=False).view(-1)
# init graphlearn_torch Dataset.
edge_dict, x_dict = {}, {}
for etype in data.edge_types:
edge_dict[etype] = data[etype]['edge_index']
for ntype in data.node_types:
x_dict[ntype] = data[ntype].x.clone(memory_format=torch.contiguous_format)
glt_dataset = glt.data.Dataset()
glt_dataset.init_graph(
edge_index=edge_dict,
graph_mode='ZERO_COPY'
)
glt_dataset.init_node_features(
node_feature_data=x_dict,
split_ratio=0.2,
device_group_list=[glt.data.DeviceGroup(i, [i]) for i in range(world_size)]
)
glt_dataset.init_node_labels(node_label_data={'paper': data['paper'].y})
train_idx.share_memory_()
val_idx.share_memory_()
mp.spawn(run,
args=(world_size,
glt_dataset,
train_idx,
val_idx,
dataset.num_classes,
data.node_types,
data.edge_types),
nprocs=world_size,
join=True) | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/hetero/train_hgt_mag.py | examples/hetero/train_hgt_mag.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os.path as osp
import torch
import torch.nn.functional as F
import graphlearn_torch as glt
from tqdm import tqdm
import torch_geometric.transforms as T
from torch_geometric.datasets import OGB_MAG
from torch_geometric.nn import Linear, HGTConv
class HGT(torch.nn.Module):
def __init__(self, hidden_channels, out_channels, num_heads, num_layers):
super().__init__()
self.lin_dict = torch.nn.ModuleDict()
for node_type in data.node_types:
self.lin_dict[node_type] = Linear(-1, hidden_channels)
self.convs = torch.nn.ModuleList()
for _ in range(num_layers):
conv = HGTConv(hidden_channels, hidden_channels, data.metadata(),
num_heads, group='sum')
self.convs.append(conv)
self.lin = Linear(hidden_channels, out_channels)
def forward(self, x_dict, edge_index_dict):
x_dict = {
node_type: self.lin_dict[node_type](x).relu_()
for node_type, x in x_dict.items()
}
for conv in self.convs:
x_dict = conv(x_dict, edge_index_dict)
return self.lin(x_dict['paper'])
@torch.no_grad()
def init_params():
# Initialize lazy parameters via forwarding a single batch to the model:
batch = next(iter(train_loader))
model(batch.x_dict, batch.edge_index_dict)
def train():
model.train()
total_examples = total_loss = 0
for batch in tqdm(train_loader):
optimizer.zero_grad()
batch_size = batch['paper'].batch_size
out = model(batch.x_dict, batch.edge_index_dict)[:batch_size]
loss = F.cross_entropy(out, batch['paper'].y[:batch_size])
loss.backward()
optimizer.step()
total_examples += batch_size
total_loss += float(loss) * batch_size
return total_loss / total_examples
@torch.no_grad()
def test(loader):
model.eval()
total_examples = total_correct = 0
for batch in tqdm(loader):
batch_size = batch['paper'].batch_size
out = model(batch.x_dict, batch.edge_index_dict)[:batch_size]
pred = out.argmax(dim=-1)
total_examples += batch_size
total_correct += int((pred == batch['paper'].y[:batch_size]).sum())
return total_correct / total_examples
if __name__ == '__main__':
path = osp.join(osp.dirname(osp.realpath(__file__)), '../../data/')
transform = T.ToUndirected(merge=True)
dataset = OGB_MAG(path, preprocess='metapath2vec', transform=transform)
data = dataset[0]
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# init graphlearn_torch Dataset.
edge_dict, feature_dict = {}, {}
for etype in data.edge_types:
edge_dict[etype] = data[etype]['edge_index']
for ntype in data.node_types:
feature_dict[ntype] = data[ntype].x.clone(memory_format=torch.contiguous_format)
glt_dataset = glt.data.Dataset()
glt_dataset.init_graph(
edge_index=edge_dict,
graph_mode='ZERO_COPY'
)
glt_dataset.init_node_features(
node_feature_data=feature_dict,
split_ratio=0.2,
device_group_list=[glt.data.DeviceGroup(0, [0])]
)
glt_dataset.init_node_labels(node_label_data={'paper': data['paper'].y})
train_idx = data['paper'].train_mask.nonzero(as_tuple=False).view(-1)
train_loader = glt.loader.NeighborLoader(glt_dataset,
[10] * 2,
('paper', train_idx),
batch_size=1024,
shuffle=True,
device=device)
val_idx = data['paper'].val_mask.nonzero(as_tuple=False).view(-1)
val_loader = glt.loader.NeighborLoader(glt_dataset,
[10] * 2,
('paper', val_idx),
batch_size=1024,
device=device)
# model
model = HGT(hidden_channels=64,
out_channels=dataset.num_classes,
num_heads=2,
num_layers=2).to(device)
init_params() # Initialize parameters.
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
for epoch in range(1, 21):
loss = train()
val_acc = test(val_loader)
print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Val: {val_acc:.4f}') | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/hetero/bipartite_sage_unsup.py | examples/hetero/bipartite_sage_unsup.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os.path as osp
import torch
import torch.nn.functional as F
import tqdm
from sklearn.metrics import roc_auc_score
from torch.nn import Embedding, Linear
import torch_geometric.transforms as T
from torch_geometric.datasets import Taobao
from torch_geometric.nn import SAGEConv
from torch_geometric.utils.convert import to_scipy_sparse_matrix
import graphlearn_torch as glt
from graphlearn_torch.loader import LinkNeighborLoader
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
path = osp.join(osp.dirname(osp.realpath(__file__)), '../../data/Taobao')
dataset = Taobao(path)
data = dataset[0]
data['user'].x = torch.arange(0, data['user'].num_nodes)
data['item'].x = torch.arange(0, data['item'].num_nodes)
# Only consider user<>item relationships for simplicity:
del data['category']
del data['item', 'category']
del data['user', 'item'].time
del data['user', 'item'].behavior
# Add a reverse ('item', 'rev_to', 'user') relation for message passing:
data = T.ToUndirected()(data)
# Perform a link-level split into training and test edges:
print('Computing data splits...')
train_data, val_data, test_data = T.RandomLinkSplit(
num_val=0,
num_test=0.2,
neg_sampling_ratio=1.0,
add_negative_train_samples=False,
edge_types=[('user', 'to', 'item')],
rev_edge_types=[('item', 'rev_to', 'user')],
)(data)
print('Done!')
# Compute sparsified item<>item relationships through users:
print('Computing item<>item relationships...')
mat = to_scipy_sparse_matrix(data['user', 'item'].edge_index).tocsr()
mat = mat[:data['user'].num_nodes, :data['item'].num_nodes]
comat = mat.T @ mat
comat.setdiag(0)
comat = comat >= 3.
comat = comat.tocoo()
row = torch.from_numpy(comat.row).to(torch.long)
col = torch.from_numpy(comat.col).to(torch.long)
item_to_item_edge_index = torch.stack([row, col], dim=0)
# Prepare graph and feature for graphlearn-torch
train_edge_dict, train_feature_dict = {}, {}
for etype in train_data.edge_types:
train_edge_dict[etype] = train_data[etype]['edge_index']
for ntype in train_data.node_types:
train_feature_dict[ntype] = train_data[ntype].x.clone(memory_format=torch.contiguous_format)
# Add the generated item<>item relationships for high-order information:
train_edge_dict[('item', 'to', 'item')] = item_to_item_edge_index
edge_dir = 'out' # 'in' for in-bound sampling, 'out' for out-boung sampling
train_dataset = glt.data.Dataset(edge_dir=edge_dir)
train_dataset.init_graph(
edge_index=train_edge_dict,
graph_mode='ZERO_COPY'
)
train_dataset.init_node_features(
node_feature_data=train_feature_dict,
split_ratio=0.2,
device_group_list=[glt.data.DeviceGroup(0, [0])]
)
test_edge_dict, test_feature_dict = {}, {}
for etype in test_data.edge_types:
test_edge_dict[etype] = test_data[etype]['edge_index']
for ntype in test_data.node_types:
test_feature_dict[ntype] = test_data[ntype].x.clone(memory_format=torch.contiguous_format)
# Add the generated item<>item relationships for high-order information:
test_edge_dict[('item', 'to', 'item')] = item_to_item_edge_index.clone()
test_dataset = glt.data.Dataset(edge_dir=edge_dir)
test_dataset.init_graph(
edge_index=test_edge_dict,
graph_mode='ZERO_COPY'
)
test_dataset.init_node_features(
node_feature_data=test_feature_dict,
split_ratio=0.0,
device_group_list=[glt.data.DeviceGroup(0, [0])]
)
print('Done!')
train_loader = LinkNeighborLoader(
data=train_dataset,
num_neighbors=[8, 4],
edge_label_index=('user', 'to', 'item'),
neg_sampling='binary',
batch_size=2048,
shuffle=True,
num_workers=16,
drop_last=True,
device=torch.device('cuda:0')
)
test_loader = LinkNeighborLoader(
data=test_dataset,
num_neighbors=[8, 4],
edge_label_index=(
('user', 'to', 'item'),
test_data[('user', 'to', 'item')].edge_label_index,
),
edge_label=test_data[('user', 'to', 'item')].edge_label,
batch_size=2048,
shuffle=False,
num_workers=16,
)
class ItemGNNEncoder(torch.nn.Module):
def __init__(self, hidden_channels, out_channels):
super().__init__()
self.conv1 = SAGEConv(-1, hidden_channels)
self.conv2 = SAGEConv(hidden_channels, hidden_channels)
self.lin = Linear(hidden_channels, out_channels)
def forward(self, x, edge_index):
x = self.conv1(x, edge_index).relu()
x = self.conv2(x, edge_index).relu()
return self.lin(x)
class UserGNNEncoder(torch.nn.Module):
def __init__(self, hidden_channels, out_channels):
super().__init__()
self.conv1 = SAGEConv((-1, -1), hidden_channels)
self.conv2 = SAGEConv((-1, -1), hidden_channels)
self.conv3 = SAGEConv((-1, -1), hidden_channels)
self.lin = Linear(hidden_channels, out_channels)
def forward(self, x_dict, edge_index_dict):
item_x = self.conv1(
x_dict['item'],
edge_index_dict[('item', 'to', 'item')],
).relu()
user_x = self.conv2(
(x_dict['item'], x_dict['user']),
edge_index_dict[('item', 'rev_to', 'user')],
).relu()
user_x = self.conv3(
(item_x, user_x),
edge_index_dict[('item', 'rev_to', 'user')],
).relu()
return self.lin(user_x)
class EdgeDecoder(torch.nn.Module):
def __init__(self, hidden_channels):
super().__init__()
self.lin1 = Linear(2 * hidden_channels, hidden_channels)
self.lin2 = Linear(hidden_channels, 1)
def forward(self, z_src, z_dst, edge_label_index):
row, col = edge_label_index
z = torch.cat([z_src[row], z_dst[col]], dim=-1)
z = self.lin1(z).relu()
z = self.lin2(z)
return z.view(-1)
class Model(torch.nn.Module):
def __init__(self, num_users, num_items, hidden_channels, out_channels):
super().__init__()
self.user_emb = Embedding(num_users, hidden_channels, device=device)
self.item_emb = Embedding(num_items, hidden_channels, device=device)
self.item_encoder = ItemGNNEncoder(hidden_channels, out_channels)
self.user_encoder = UserGNNEncoder(hidden_channels, out_channels)
self.decoder = EdgeDecoder(out_channels)
def forward(self, x_dict, edge_index_dict, edge_label_index):
z_dict = {}
x_dict['user'] = self.user_emb(x_dict['user'])
x_dict['item'] = self.item_emb(x_dict['item'])
z_dict['item'] = self.item_encoder(
x_dict['item'],
edge_index_dict[('item', 'to', 'item')],
)
z_dict['user'] = self.user_encoder(x_dict, edge_index_dict)
if edge_dir == 'out':
return self.decoder(z_dict['item'], z_dict['user'], edge_label_index)
return self.decoder(z_dict['user'], z_dict['item'], edge_label_index)
model = Model(
num_users=data['user'].num_nodes,
num_items=data['item'].num_nodes,
hidden_channels=64,
out_channels=64,
).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
batch_edge_type = ('user', 'to', 'item') if edge_dir == 'in' else ('item', 'rev_to', 'user')
def train():
model.train()
total_loss = total_examples = 0
for batch in tqdm.tqdm(train_loader):
batch = batch.to(device)
optimizer.zero_grad()
pred = model(
batch.x_dict,
batch.edge_index_dict,
batch[batch_edge_type].edge_label_index,
)
loss = F.binary_cross_entropy_with_logits(
pred, batch[batch_edge_type].edge_label)
loss.backward()
optimizer.step()
total_loss += float(loss)
total_examples += pred.numel()
return total_loss / total_examples
@torch.no_grad()
def test(loader):
model.eval()
preds, targets = [], []
for batch in tqdm.tqdm(loader):
batch = batch.to(device)
pred = model(
batch.x_dict,
batch.edge_index_dict,
batch[batch_edge_type].edge_label_index,
).sigmoid().view(-1).cpu()
target = batch[batch_edge_type].edge_label.long().cpu()
preds.append(pred)
targets.append(target)
pred = torch.cat(preds, dim=0).numpy()
target = torch.cat(targets, dim=0).numpy()
return roc_auc_score(target, pred)
for epoch in range(1, 21):
loss = train()
test_auc = test(test_loader)
print(f'Epoch: {epoch:02d}, Loss: {loss:4f}, '
f'Test: {test_auc:.4f}')
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/hetero/hierarchical_sage.py | examples/hetero/hierarchical_sage.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
import torch.nn.functional as F
from tqdm import tqdm
import torch_geometric.transforms as T
from torch_geometric.datasets import OGB_MAG
from torch_geometric.nn import HeteroConv, Linear, SAGEConv
from torch_geometric.utils import trim_to_layer
import graphlearn_torch as glt
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
transforms = [T.ToUndirected(merge=True)]
dataset = OGB_MAG(root='../../data', preprocess='metapath2vec',
transform=T.Compose(transforms))
data = dataset[0].to(device, 'x', 'y')
class HierarchicalHeteroGraphSage(torch.nn.Module):
def __init__(self, edge_types, hidden_channels, out_channels, num_layers):
super().__init__()
self.convs = torch.nn.ModuleList()
for _ in range(num_layers):
conv = HeteroConv(
{
edge_type: SAGEConv((-1, -1), hidden_channels)
for edge_type in edge_types
}, aggr='sum')
self.convs.append(conv)
self.lin = Linear(hidden_channels, out_channels)
def forward(self, x_dict, edge_index_dict, num_sampled_edges_dict,
num_sampled_nodes_dict):
for i, conv in enumerate(self.convs):
x_dict, edge_index_dict, _ = trim_to_layer(
layer=i,
num_sampled_nodes_per_hop=num_sampled_nodes_dict,
num_sampled_edges_per_hop=num_sampled_edges_dict,
x=x_dict,
edge_index=edge_index_dict,
)
x_dict = conv(x_dict, edge_index_dict)
x_dict = {key: x.relu() for key, x in x_dict.items()}
return self.lin(x_dict['paper'])
model = HierarchicalHeteroGraphSage(
edge_types=data.edge_types,
hidden_channels=64,
out_channels=dataset.num_classes,
num_layers=2,
).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
# init graphlearn_torch Dataset.
edge_dict, feature_dict = {}, {}
for etype in data.edge_types:
edge_dict[etype] = data[etype]['edge_index']
for ntype in data.node_types:
feature_dict[ntype] = data[ntype].x.clone(memory_format=torch.contiguous_format)
glt_dataset = glt.data.Dataset()
glt_dataset.init_graph(
edge_index=edge_dict,
graph_mode='ZERO_COPY'
)
glt_dataset.init_node_features(
node_feature_data=feature_dict,
split_ratio=0.2,
device_group_list=[glt.data.DeviceGroup(0, [0])]
)
glt_dataset.init_node_labels(node_label_data={'paper': data['paper'].y})
train_idx = data['paper'].train_mask.nonzero(as_tuple=False).view(-1)
train_loader = glt.loader.NeighborLoader(glt_dataset,
[10] * 2,
('paper', train_idx),
batch_size=1024,
shuffle=True,
device=device)
val_idx = data['paper'].val_mask.nonzero(as_tuple=False).view(-1)
val_loader = glt.loader.NeighborLoader(glt_dataset,
[10] * 2,
('paper', val_idx),
batch_size=1024,
device=device)
def train():
model.train()
total_examples = total_loss = 0
for batch in tqdm(train_loader):
batch = batch.to(device)
optimizer.zero_grad()
out = model(
batch.x_dict,
batch.edge_index_dict,
num_sampled_nodes_dict=batch.num_sampled_nodes,
num_sampled_edges_dict=batch.num_sampled_edges,
)
batch_size = batch['paper'].batch_size
loss = F.cross_entropy(out[:batch_size], batch['paper'].y[:batch_size])
loss.backward()
optimizer.step()
total_examples += batch_size
total_loss += float(loss) * batch_size
return total_loss / total_examples
@torch.no_grad()
def test(loader):
model.eval()
total_examples = total_correct = 0
for batch in tqdm(loader):
batch = batch.to(device)
out = model(
batch.x_dict,
batch.edge_index_dict,
num_sampled_nodes_dict=batch.num_sampled_nodes,
num_sampled_edges_dict=batch.num_sampled_edges,
)
batch_size = batch['paper'].batch_size
pred = out[:batch_size].argmax(dim=-1)
total_examples += batch_size
total_correct += int((pred == batch['paper'].y[:batch_size]).sum())
return total_correct / total_examples
for epoch in range(1, 6):
loss = train()
val_acc = test(val_loader)
print(f'Epoch: {epoch:02d}, Loss: {loss:.4f}, Val: {val_acc:.4f}')
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/distributed/dist_train_sage_supervised.py | examples/distributed/dist_train_sage_supervised.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import os.path as osp
import time
import graphlearn_torch as glt
import torch
import torch.distributed
import torch.nn.functional as F
from ogb.nodeproppred import Evaluator
from torch.nn.parallel import DistributedDataParallel
from torch_geometric.nn import GraphSAGE
@torch.no_grad()
def test(model, test_loader, dataset_name):
evaluator = Evaluator(name=dataset_name)
model.eval()
xs = []
y_true = []
for i, batch in enumerate(test_loader):
if i == 0:
device = batch.x.device
x = model(batch.x, batch.edge_index)[:batch.batch_size]
xs.append(x.cpu())
y_true.append(batch.y[:batch.batch_size].cpu())
del batch
xs = [t.to(device) for t in xs]
y_true = [t.to(device) for t in y_true]
y_pred = torch.cat(xs, dim=0).argmax(dim=-1, keepdim=True)
y_true = torch.cat(y_true, dim=0).unsqueeze(-1)
test_acc = evaluator.eval({
'y_true': y_true,
'y_pred': y_pred,
})['acc']
return test_acc
def run_training_proc(local_proc_rank: int, num_nodes: int, node_rank: int,
num_training_procs_per_node: int, dataset_name: str,
in_channels: int, out_channels: int,
dataset: glt.distributed.DistDataset,
train_idx: torch.Tensor, test_idx: torch.Tensor,
epochs: int, batch_size: int, master_addr: str,
training_pg_master_port: int, train_loader_master_port: int,
test_loader_master_port: int):
# Initialize graphlearn_torch distributed worker group context.
glt.distributed.init_worker_group(
world_size=num_nodes*num_training_procs_per_node,
rank=node_rank*num_training_procs_per_node+local_proc_rank,
group_name='distributed-sage-supervised-trainer'
)
current_ctx = glt.distributed.get_context()
current_device = torch.device(local_proc_rank % torch.cuda.device_count())
# Initialize training process group of PyTorch.
torch.distributed.init_process_group(
backend='nccl',
rank=current_ctx.rank,
world_size=current_ctx.world_size,
init_method='tcp://{}:{}'.format(master_addr, training_pg_master_port)
)
# Create distributed neighbor loader for training
train_idx = train_idx.split(train_idx.size(0) // num_training_procs_per_node)[local_proc_rank]
train_loader = glt.distributed.DistNeighborLoader(
data=dataset,
num_neighbors=[15, 10, 5],
input_nodes=train_idx,
batch_size=batch_size,
shuffle=True,
collect_features=True,
to_device=current_device,
worker_options=glt.distributed.MpDistSamplingWorkerOptions(
num_workers=1,
worker_devices=[current_device],
worker_concurrency=4,
master_addr=master_addr,
master_port=train_loader_master_port,
channel_size='1GB',
pin_memory=True
)
)
# Create distributed neighbor loader for testing.
test_idx = test_idx.split(test_idx.size(0) // num_training_procs_per_node)[local_proc_rank]
test_loader = glt.distributed.DistNeighborLoader(
data=dataset,
num_neighbors=[15, 10, 5],
input_nodes=test_idx,
batch_size=batch_size,
shuffle=False,
collect_features=True,
to_device=current_device,
worker_options=glt.distributed.MpDistSamplingWorkerOptions(
num_workers=2,
worker_devices=[torch.device('cuda', i % torch.cuda.device_count()) for i in range(2)],
worker_concurrency=4,
master_addr=master_addr,
master_port=test_loader_master_port,
channel_size='2GB',
pin_memory=True
)
)
# Define model and optimizer.
torch.cuda.set_device(current_device)
model = GraphSAGE(
in_channels=in_channels,
hidden_channels=256,
num_layers=3,
out_channels=out_channels,
).to(current_device)
model = DistributedDataParallel(model, device_ids=[current_device.index])
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
# Train and test.
f = open('dist_sage_sup.txt', 'a+')
for epoch in range(0, epochs):
model.train()
start = time.time()
for batch in train_loader:
optimizer.zero_grad()
out = model(batch.x, batch.edge_index)[:batch.batch_size].log_softmax(dim=-1)
loss = F.nll_loss(out, batch.y[:batch.batch_size])
loss.backward()
optimizer.step()
end = time.time()
f.write(f'-- [Trainer {current_ctx.rank}] Epoch: {epoch:03d}, Loss: {loss:.4f}, Epoch Time: {end - start}\n')
# torch.cuda.empty_cache() # empty cache when GPU memory is not efficient.
torch.cuda.synchronize()
torch.distributed.barrier()
# Test accuracy.
if epoch == 0 or epoch > (epochs // 2):
test_acc = test(model, test_loader, dataset_name)
f.write(f'-- [Trainer {current_ctx.rank}] Test Accuracy: {test_acc:.4f}\n')
torch.cuda.synchronize()
torch.distributed.barrier()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Arguments for distributed training of supervised SAGE."
)
parser.add_argument(
"--dataset",
type=str,
default='ogbn-products',
help="The name of ogbn dataset.",
)
parser.add_argument(
"--in_channel",
type=int,
default=100,
help="in channel of the dataset, default is for ogbn-products"
)
parser.add_argument(
"--out_channel",
type=int,
default=47,
help="out channel of the dataset, default is for ogbn-products"
)
parser.add_argument(
"--dataset_root_dir",
type=str,
default='../../data/products',
help="The root directory (relative path) of partitioned ogbn dataset.",
)
parser.add_argument(
"--num_dataset_partitions",
type=int,
default=2,
help="The number of partitions of ogbn-products dataset.",
)
parser.add_argument(
"--num_nodes",
type=int,
default=2,
help="Number of distributed nodes.",
)
parser.add_argument(
"--node_rank",
type=int,
default=0,
help="The current node rank.",
)
parser.add_argument(
"--num_training_procs",
type=int,
default=2,
help="The number of traning processes per node.",
)
parser.add_argument(
"--epochs",
type=int,
default=10,
help="The number of training epochs.",
)
parser.add_argument(
"--batch_size",
type=int,
default=512,
help="Batch size for the training and testing dataloader.",
)
parser.add_argument(
"--master_addr",
type=str,
default='localhost',
help="The master address for RPC initialization.",
)
parser.add_argument(
"--training_pg_master_port",
type=int,
default=11111,
help="The port used for PyTorch's process group initialization across training processes.",
)
parser.add_argument(
"--train_loader_master_port",
type=int,
default=11112,
help="The port used for RPC initialization across all sampling workers of training loader.",
)
parser.add_argument(
"--test_loader_master_port",
type=int,
default=11113,
help="The port used for RPC initialization across all sampling workers of testing loader.",
)
args = parser.parse_args()
f = open('dist_sage_sup.txt', 'a+')
f.write('--- Distributed training example of supervised SAGE ---\n')
f.write(f'* dataset: {args.dataset}\n')
f.write(f'* dataset root dir: {args.dataset_root_dir}\n')
f.write(f'* number of dataset partitions: {args.num_dataset_partitions}\n')
f.write(f'* total nodes: {args.num_nodes}\n')
f.write(f'* node rank: {args.node_rank}\n')
f.write(f'* number of training processes per node: {args.num_training_procs}\n')
f.write(f'* epochs: {args.epochs}\n')
f.write(f'* batch size: {args.batch_size}\n')
f.write(f'* master addr: {args.master_addr}\n')
f.write(f'* training process group master port: {args.training_pg_master_port}\n')
f.write(f'* training loader master port: {args.train_loader_master_port}\n')
f.write(f'* testing loader master port: {args.test_loader_master_port}\n')
f.write('--- Loading data partition ...\n')
root_dir = osp.join(osp.dirname(osp.realpath(__file__)), args.dataset_root_dir)
data_pidx = args.node_rank % args.num_dataset_partitions
dataset = glt.distributed.DistDataset()
dataset.load(
root_dir=osp.join(root_dir, f'{args.dataset}-partitions'),
partition_idx=data_pidx,
graph_mode='ZERO_COPY',
whole_node_label_file=osp.join(root_dir, f'{args.dataset}-label', 'label.pt')
)
train_idx = torch.load(
osp.join(root_dir, f'{args.dataset}-train-partitions', f'partition{data_pidx}.pt')
)
test_idx = torch.load(
osp.join(root_dir, f'{args.dataset}-test-partitions', f'partition{data_pidx}.pt')
)
train_idx.share_memory_()
test_idx.share_memory_()
f.write('--- Launching training processes ...\n')
torch.multiprocessing.spawn(
run_training_proc,
args=(args.num_nodes, args.node_rank, args.num_training_procs,
args.dataset, args.in_channel, args.out_channel, dataset, train_idx, test_idx, args.epochs,
args.batch_size, args.master_addr, args.training_pg_master_port,
args.train_loader_master_port, args.test_loader_master_port),
nprocs=args.num_training_procs,
join=True
)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/distributed/partition_ogbn_dataset.py | examples/distributed/partition_ogbn_dataset.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import ast
import os.path as osp
import graphlearn_torch as glt
import torch
from ogb.nodeproppred import PygNodePropPredDataset
def partition_dataset(ogbn_dataset: str,
root_dir: str,
num_partitions: int,
num_nbrs: glt.NumNeighbors,
chunk_size: int,
cache_ratio: float):
print(f'-- Loading {ogbn_dataset} ...')
dataset = PygNodePropPredDataset(ogbn_dataset, root_dir)
data = dataset[0]
node_num = len(data.x)
edge_num = len(data.edge_index[0])
print('* node count: {}'.format(node_num))
print('* edge count: {}'.format(edge_num))
split_idx = dataset.get_idx_split()
print('-- Saving label ...')
label_dir = osp.join(root_dir, f'{ogbn_dataset}-label')
glt.utils.ensure_dir(label_dir)
torch.save(data.y.squeeze(), osp.join(label_dir, 'label.pt'))
print('-- Partitioning training idx ...')
train_idx = split_idx['train']
train_idx = train_idx.split(train_idx.size(0) // num_partitions)
train_idx_partitions_dir = osp.join(root_dir, f'{ogbn_dataset}-train-partitions')
glt.utils.ensure_dir(train_idx_partitions_dir)
for pidx in range(num_partitions):
print(train_idx[pidx].shape)
torch.save(train_idx[pidx], osp.join(train_idx_partitions_dir, f'partition{pidx}.pt'))
print('-- Partitioning test idx ...')
test_idx = split_idx['test']
test_idx = test_idx.split(test_idx.size(0) // num_partitions)
test_idx_partitions_dir = osp.join(root_dir, f'{ogbn_dataset}-test-partitions')
glt.utils.ensure_dir(test_idx_partitions_dir)
for pidx in range(num_partitions):
torch.save(test_idx[pidx], osp.join(test_idx_partitions_dir, f'partition{pidx}.pt'))
print('-- Initializing graph ...')
csr_topo = glt.data.Topology(edge_index=data.edge_index, input_layout='COO')
graph = glt.data.Graph(csr_topo, mode='ZERO_COPY')
print('-- Sampling hotness ...')
glt_sampler = glt.sampler.NeighborSampler(graph, num_nbrs)
node_probs = []
for pidx in range(num_partitions):
seeds = train_idx[pidx]
prob = glt_sampler.sample_prob(seeds, node_num)
node_probs.append(prob.cpu())
print('-- Partitioning graph and features ...')
partitions_dir = osp.join(root_dir, f'{ogbn_dataset}-partitions')
freq_partitioner = glt.partition.FrequencyPartitioner(
output_dir=partitions_dir,
num_parts=num_partitions,
num_nodes=node_num,
edge_index=data.edge_index,
probs=node_probs,
node_feat=data.x,
chunk_size=chunk_size,
cache_ratio=cache_ratio
)
freq_partitioner.partition()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Arguments for partitioning ogbn datasets.")
parser.add_argument(
"--dataset",
type=str,
default='ogbn-products',
help="The name of ogbn dataset.",
)
parser.add_argument(
"--root_dir",
type=str,
default='../../data/products',
help="The root directory (relative path) of input dataset and output partitions.",
)
parser.add_argument(
"--num_partitions",
type=int,
default=2,
help="Number of partitions",
)
parser.add_argument(
"--num_nbrs",
type=ast.literal_eval,
default='[15,10,5]',
help="The number of neighbors to sample hotness for feature caching.",
)
parser.add_argument(
"--chunk_size",
type=int,
default=10000,
help="Chunk size for feature partitioning.",
)
parser.add_argument(
"--cache_ratio",
type=float,
default=0.2,
help="The proportion to cache features per partition.",
)
args = parser.parse_args()
partition_dataset(
ogbn_dataset=args.dataset,
root_dir=osp.join(osp.dirname(osp.realpath(__file__)), args.root_dir),
num_partitions=args.num_partitions,
num_nbrs=args.num_nbrs,
chunk_size=args.chunk_size,
cache_ratio=args.cache_ratio
) | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/distributed/run_dist_train_sage_sup.py | examples/distributed/run_dist_train_sage_sup.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import yaml
import argparse
import paramiko
import click
if __name__ == "__main__":
parser = argparse.ArgumentParser('Run DistRandomSampler benchmarks.')
parser.add_argument('--config', type=str, default='dist_train_sage_sup_config.yml',
help='paths to configuration file for benchmarks')
parser.add_argument('--epochs', type=int, default=1,
help='repeat epochs for sampling')
parser.add_argument('--batch_size', type=int, default=2048,
help='batch size for sampling')
parser.add_argument('--master_addr', type=str, default='0.0.0.0',
help='master ip address for synchronization across all training nodes')
parser.add_argument('--master_port', type=str, default='11345',
help='port for synchronization across all training nodes')
args = parser.parse_args()
config = open(args.config, 'r')
config = yaml.safe_load(config)
dataset = config['dataset']
ip_list, port_list, username_list = config['nodes'], config['ports'], config['usernames']
dst_path_list = config['dst_paths']
node_ranks = config['node_ranks']
num_nodes = len(node_ranks)
visible_devices = config['visible_devices']
python_bins = config['python_bins']
num_cores = len(visible_devices[0].split(','))
in_channel = str(config['in_channel'])
out_channel = str(config['out_channel'])
dataset_path = "../../data/"
passwd_dict = {}
for username, ip in zip(username_list, ip_list):
passwd_dict[ip+username] = click.prompt('passwd for '+username+'@'+ip,
hide_input=True)
for username, ip, port, dst, noderk, device, pythonbin in zip(
username_list,
ip_list,
port_list,
dst_path_list,
node_ranks,
visible_devices,
python_bins,
):
trans = paramiko.Transport((ip, port))
trans.connect(username=username, password=passwd_dict[ip+username])
ssh = paramiko.SSHClient()
ssh._transport = trans
to_dist_dir = 'cd '+dst+'/examples/distributed/ '
exec_example = "tmux new -d 'CUDA_VISIBLE_DEVICES="+device+" "+pythonbin+" dist_train_sage_supervised.py --dataset="+dataset+" --dataset_root_dir=../../data/"+dataset+" --in_channel="+in_channel+" --out_channel="+out_channel+" --node_rank="+str(noderk)+" --num_dataset_partitions="+str(num_nodes)+" --num_nodes="+str(num_nodes)+" --num_training_procs="+str(num_cores)+" --master_addr="+args.master_addr+" --training_pg_master_port="+args.master_port+" --train_loader_master_port="+str(int(args.master_port)+1)+" --test_loader_master_port="+str(int(args.master_port)+2)+" --batch_size="+str(args.batch_size)+" --epochs="+str(args.epochs)
print(to_dist_dir + ' && '+ exec_example + " '")
stdin, stdout, stderr = ssh.exec_command(to_dist_dir+' && '+exec_example+" '", bufsize=1)
print(stdout.read().decode())
print(stderr.read().decode())
ssh.close()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/distributed/dist_sage_unsup/preprocess_template.py | examples/distributed/dist_sage_unsup/preprocess_template.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import os.path as osp
from typing import Tuple, Optional
import torch
import graphlearn_torch as glt
# Implement this class to preprocess your dataset
class FakeDataset(object):
def __init__(self, feat_src_path, graph_src_path):
self.feats = self.get_feat(feat_src_path)
self.graph = self.get_graph(graph_src_path)
self.edge_weights = self.get_edge_weight()
self.train_idx, self.val_idx, self.test_idx = \
self.get_train_test_split_idx()
def get_feat(self, feat_src_path) -> torch.Tensor:
pass
def get_graph(self, graph_src_path) -> torch.Tensor:
pass
def get_edge_weight(self, edge_weight_src_path='') -> Optional[torch.Tensor]:
pass
def get_train_test_split_idx(self) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
r''' This function should return three tensors, which are the indices or
edges of training, validation and testing for the link prediction task.
If returned tensors are of shape [2, num_edges], then the first row is
the source nodes and the second row is the destination nodes.
'''
pass
@property
def node_num(self) -> int:
pass
def partition_dataset(
dataset_name: str, feat_src_path: str, graph_src_path: str,
dst_path: str = '', num_partitions: int = 2
):
# Substitute FakeDataset with your own dataset class
data = FakeDataset(feat_src_path, graph_src_path)
print('-- Partitioning training idx / training edges ...')
train_idx = data.train_idx
train_idx = train_idx.split(train_idx.size(0) // num_partitions)
train_idx = [idx.T for idx in train_idx]
train_idx_partitions_dir = osp.join(dst_path, f'{dataset_name}-train-partitions')
glt.utils.ensure_dir(train_idx_partitions_dir)
for pidx in range(num_partitions):
torch.save(train_idx[pidx], osp.join(train_idx_partitions_dir, f'partition{pidx}.pt'))
print('-- Partitioning validation idx / validation edges...')
train_idx = data.val_idx
train_idx = train_idx.split(train_idx.size(0) // num_partitions)
train_idx = [idx.T for idx in train_idx]
train_idx_partitions_dir = osp.join(dst_path, f'{dataset_name}-val-partitions')
glt.utils.ensure_dir(train_idx_partitions_dir)
for pidx in range(num_partitions):
torch.save(train_idx[pidx], osp.join(train_idx_partitions_dir, f'partition{pidx}.pt'))
print('-- Partitioning test idx / testing edges ...')
test_idx = data.test_idx
test_idx = test_idx.split(test_idx.size(0) // num_partitions)
test_idx = [idx.T for idx in test_idx]
test_idx_partitions_dir = osp.join(dst_path, f'{dataset_name}-test-partitions')
glt.utils.ensure_dir(test_idx_partitions_dir)
for pidx in range(num_partitions):
torch.save(test_idx[pidx], osp.join(test_idx_partitions_dir, f'partition{pidx}.pt'))
print('-- Partitioning graph and features ...')
partitions_dir = osp.join(dst_path, f'{dataset_name}-partitions')
partitioner = glt.partition.RandomPartitioner(
output_dir=partitions_dir,
num_parts=num_partitions,
num_nodes=data.node_num,
edge_index=data.graph,
node_feat=data.feats,
edge_weights=data.edge_weights,
edge_assign_strategy='by_src'
)
partitioner.partition()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Arguments for partition of unsupervised SAGE."
)
parser.add_argument(
'--dataset_name', type=str, default=''
)
parser.add_argument(
'--feat_src_path', type=str, default=''
)
parser.add_argument(
'--graph_src_path', type=str, default=''
)
parser.add_argument(
'--dst_path', type=str, default=''
)
parser.add_argument(
'--num_partitions', type=int, default=2
)
args = parser.parse_args()
dataset_name = args.dataset_name
feat_src_path = args.feat_src_path
graph_src_path = args.graph_src_path
dst_path = args.dst_path
num_partitions = args.num_partitions
partition_dataset(
dataset_name, feat_src_path, graph_src_path, dst_path, num_partitions)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/distributed/dist_sage_unsup/dist_sage_unsup.py | examples/distributed/dist_sage_unsup/dist_sage_unsup.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import os.path as osp
import time
import tqdm
import graphlearn_torch as glt
import torch
import torch.distributed
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score, recall_score
from torch.nn.parallel import DistributedDataParallel
from torch.distributed.optim.zero_redundancy_optimizer import ZeroRedundancyOptimizer as ZeRO
from torch.distributed.algorithms.join import Join
from torch_geometric.nn import GraphSAGE
@torch.no_grad()
def test(model, test_loader, current_device):
model.eval()
preds, targets = [], []
for batch in tqdm.tqdm(test_loader):
batch = batch.to(current_device)
out = model.module(batch.x, batch.edge_index)
out_src = out[batch.edge_label_index[0]]
out_dst = out[batch.edge_label_index[1]]
pred = (out_src * out_dst).sum(dim=-1).sigmoid().round().view(-1).cpu()
target = batch.edge_label.long().cpu()
preds.append(pred)
targets.append(target)
pred = torch.cat(preds, dim=0).numpy()
target = torch.cat(targets, dim=0).numpy()
return roc_auc_score(target, pred), recall_score(target, pred)
def run_training_proc(local_proc_rank: int, num_nodes: int, node_rank: int,
num_training_procs_per_node: int, dataset_name: str,
in_channels: int, out_channels: int, with_weight: bool,
dataset: glt.distributed.DistDataset,
train_idx: torch.Tensor, test_idx: torch.Tensor,
epochs: int, batch_size: int, master_addr: str,
training_pg_master_port: int, train_loader_master_port: int,
test_loader_master_port: int):
# Initialize graphlearn_torch distributed worker group context.
glt.distributed.init_worker_group(
world_size=num_nodes*num_training_procs_per_node,
rank=node_rank*num_training_procs_per_node+local_proc_rank,
group_name='distributed-sage-unsupervised-trainer'
)
current_ctx = glt.distributed.get_context()
current_device = torch.device('cpu')
# Initialize training process group of PyTorch.
torch.distributed.init_process_group(
backend='gloo',
rank=current_ctx.rank,
world_size=current_ctx.world_size,
init_method='tcp://{}:{}'.format(master_addr, training_pg_master_port)
)
# Create distributed neighbor loader for training
# train_idx is an edge index of shape [2, num_train_edges] in this example.
train_idx = (train_idx.T.split(train_idx.size(1) // num_training_procs_per_node)
[local_proc_rank]).T
# with_weight=True only supports CPU sampling currently.
train_loader = glt.distributed.DistLinkNeighborLoader(
data=dataset,
num_neighbors=[15, 10],
edge_label_index=train_idx,
batch_size=batch_size,
with_weight=with_weight,
neg_sampling='binary',
shuffle=True,
collect_features=True,
to_device=current_device,
worker_options=glt.distributed.MpDistSamplingWorkerOptions(
num_workers=1,
worker_devices=[current_device],
worker_concurrency=4,
master_addr=master_addr,
master_port=train_loader_master_port,
channel_size='8GB',
pin_memory=False
)
)
# Create distributed neighbor loader for testing.
# test_idx is an edge index of shape [2, num_test_edges] in this example.
test_idx = (test_idx.T.split(test_idx.size(1) // num_training_procs_per_node)
[local_proc_rank]).T
# with_weight=True only supports CPU sampling currently.
test_loader = glt.distributed.DistLinkNeighborLoader(
data=dataset,
num_neighbors=[15, 10],
edge_label_index=test_idx,
neg_sampling='binary',
batch_size=batch_size,
with_weight=with_weight,
shuffle=False,
collect_features=True,
to_device=current_device,
worker_options=glt.distributed.MpDistSamplingWorkerOptions(
num_workers=1,
worker_devices=[current_device],
worker_concurrency=4,
master_addr=master_addr,
master_port=test_loader_master_port,
channel_size='2GB',
pin_memory=False
)
)
# Define model and optimizer.
current_device = torch.device(local_proc_rank % torch.cuda.device_count())
torch.cuda.set_device(current_device)
model = GraphSAGE(
in_channels=in_channels,
hidden_channels=256,
num_layers=2,
out_channels=out_channels,
).to(current_device)
model = DistributedDataParallel(model, device_ids=[current_device.index],
find_unused_parameters=True)
optimizer = ZeRO(model.parameters(), torch.optim.Adam, lr=1e-5)
# Train and test.
for epoch in range(0, epochs):
with Join([model, optimizer]):
model.train()
start = time.time()
for batch in tqdm.tqdm(train_loader):
batch = batch.to(current_device)
optimizer.zero_grad()
out = model(batch.x, batch.edge_index)
out_src = out[batch.edge_label_index[0]]
out_dst = out[batch.edge_label_index[1]]
pred = (out_src * out_dst).sum(dim=-1)
loss = F.binary_cross_entropy_with_logits(pred, batch.edge_label)
loss.backward()
optimizer.step()
end = time.time()
print(f'-- [Trainer {current_ctx.rank}] Epoch: {epoch:03d}, Loss: {loss:.4f}, Epoch Time: {end - start}\n')
# torch.cuda.empty_cache() # empty cache when GPU memory is not efficient.
torch.cuda.synchronize()
torch.distributed.barrier()
# Test accuracy.
if epoch % 2 == 0:
test_auc, test_recall = test(model, test_loader, current_device)
print(f'-- [Trainer {current_ctx.rank}] Test AUC: {test_auc:.4f} Test Rec: {test_recall:.4f}\n')
torch.cuda.synchronize()
torch.distributed.barrier()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Arguments for distributed training of unsupervised SAGE."
)
parser.add_argument(
"--dataset",
type=str,
default='Your dataset name',
help="The name of the dataset.",
)
parser.add_argument(
"--in_channel",
type=int,
default=4,
)
parser.add_argument(
"--out_channel",
type=int,
default=32,
)
parser.add_argument(
"--dataset_root_dir",
type=str,
default='Your dataset root directory',
help="The root directory (relative path) of the partitioned dataset.",
)
parser.add_argument(
"--num_dataset_partitions",
type=int,
default=2,
help="The number of partitions of the dataset.",
)
parser.add_argument(
"--num_nodes",
type=int,
default=2,
help="Number of distributed nodes.",
)
parser.add_argument(
"--node_rank",
type=int,
default=0,
help="The current node rank.",
)
parser.add_argument(
"--num_training_procs",
type=int,
default=2,
help="The number of traning processes per node.",
)
parser.add_argument(
"--epochs",
type=int,
default=11,
help="The number of training epochs.",
)
parser.add_argument(
"--batch_size",
type=int,
default=512,
help="Batch size for the training and testing dataloader.",
)
parser.add_argument(
"--with_weight",
action="store_true",
help="Whether to use edge weights.",
)
parser.add_argument(
"--master_addr",
type=str,
default='localhost',
help="The master address for RPC initialization.",
)
parser.add_argument(
"--training_pg_master_port",
type=int,
default=12211,
help="The port used for PyTorch's process group initialization across training processes.",
)
parser.add_argument(
"--train_loader_master_port",
type=int,
default=12212,
help="The port used for RPC initialization across all sampling workers of training loader.",
)
parser.add_argument(
"--test_loader_master_port",
type=int,
default=12213,
help="The port used for RPC initialization across all sampling workers of testing loader.",
)
args = parser.parse_args()
print('--- Distributed training example of unsupervised SAGE ---\n')
print(f'* dataset: {args.dataset}\n')
print(f'* dataset root dir: {args.dataset_root_dir}\n')
print(f'* number of dataset partitions: {args.num_dataset_partitions}\n')
print(f'* total nodes: {args.num_nodes}\n')
print(f'* node rank: {args.node_rank}\n')
print(f'* number of training processes per node: {args.num_training_procs}\n')
print(f'* epochs: {args.epochs}\n')
print(f'* batch size: {args.batch_size}\n')
print(f'* master addr: {args.master_addr}\n')
print(f'* training process group master port: {args.training_pg_master_port}\n')
print(f'* training loader master port: {args.train_loader_master_port}\n')
print(f'* testing loader master port: {args.test_loader_master_port}\n')
print('--- Loading data partition ...\n')
root_dir = osp.join(osp.dirname(osp.realpath(__file__)), args.dataset_root_dir)
data_pidx = args.node_rank % args.num_dataset_partitions
dataset = glt.distributed.DistDataset()
dataset.load(
root_dir=osp.join(root_dir, f'{args.dataset}-partitions'),
partition_idx=data_pidx,
graph_mode='CPU',
)
# Load train and test edges.
train_idx = torch.load(
osp.join(root_dir, f'{args.dataset}-train-partitions', f'partition{data_pidx}.pt')
)
test_idx = torch.load(
osp.join(root_dir, f'{args.dataset}-test-partitions', f'partition{data_pidx}.pt')
)
train_idx.share_memory_()
test_idx.share_memory_()
print('--- Launching training processes ...\n')
torch.multiprocessing.spawn(
run_training_proc,
args=(args.num_nodes, args.node_rank, args.num_training_procs,
args.dataset, args.in_channel, args.out_channel, args.with_weight,
dataset, train_idx, test_idx, args.epochs,
args.batch_size, args.master_addr, args.training_pg_master_port,
args.train_loader_master_port, args.test_loader_master_port),
nprocs=args.num_training_procs,
join=True
)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/distributed/server_client_mode/sage_supervised_client.py | examples/distributed/server_client_mode/sage_supervised_client.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import os.path as osp
import time
import graphlearn_torch as glt
import torch
import torch.distributed
import torch.nn.functional as F
from ogb.nodeproppred import Evaluator
from torch.nn.parallel import DistributedDataParallel
from torch_geometric.nn import GraphSAGE
from typing import List
from torch.distributed.algorithms.join import Join
@torch.no_grad()
def test(model, test_loader, dataset_name):
evaluator = Evaluator(name=dataset_name)
model.eval()
xs = []
y_true = []
for i, batch in enumerate(test_loader):
if i == 0:
device = batch.x.device
x = model.module(batch.x, batch.edge_index)[:batch.batch_size]
xs.append(x.cpu())
y_true.append(batch.y[:batch.batch_size].cpu())
del batch
xs = [t.to(device) for t in xs]
y_true = [t.to(device) for t in y_true]
y_pred = torch.cat(xs, dim=0).argmax(dim=-1, keepdim=True)
y_true = torch.cat(y_true, dim=0).unsqueeze(-1)
test_acc = evaluator.eval({
'y_true': y_true,
'y_pred': y_pred,
})['acc']
return test_acc
def run_client_proc(
num_servers: int, num_clients: int, client_rank: int, server_rank_list: List[int],
dataset_name: str, train_path_list: List[str], test_path_list: List[str], epochs: int,
batch_size: int, master_addr: str, server_client_port: int, pg_master: str,
training_pg_master_port: int, train_loader_master_port: int,
test_loader_master_port: int
):
print(f'-- [Client {client_rank}] Initializing client ...')
glt.distributed.init_client(
num_servers=num_servers,
num_clients=num_clients,
client_rank=client_rank,
master_addr=master_addr,
master_port=server_client_port,
num_rpc_threads=4,
client_group_name='dist-train-supervised-sage-client'
)
# Initialize training process group of PyTorch.
current_ctx = glt.distributed.get_context()
current_device = torch.device(current_ctx.rank % torch.cuda.device_count())
print(
f'-- [Client {client_rank}] Initializing training process group of PyTorch ...'
)
torch.distributed.init_process_group(
backend='nccl',
rank=current_ctx.rank,
world_size=current_ctx.world_size,
init_method='tcp://{}:{}'.format(pg_master, training_pg_master_port)
)
# TODO(hongyi): handle the case that different servers have different device count
server_device_count = glt.distributed.request_server(
server_rank=server_rank_list[0], func=torch.cuda.device_count)
# Create distributed neighbor loader on remote server for training.
print(f'-- [Client {client_rank}] Creating training dataloader ...')
train_loader = glt.distributed.DistNeighborLoader(
data=None,
num_neighbors=[15, 10, 5],
input_nodes=train_path_list,
batch_size=batch_size,
shuffle=True,
collect_features=True,
to_device=current_device,
worker_options=glt.distributed.RemoteDistSamplingWorkerOptions(
server_rank=server_rank_list,
num_workers=server_device_count,
worker_devices=[
torch.device(f'cuda:{i}') for i in range(server_device_count)
],
worker_concurrency=4,
master_addr=master_addr,
master_port=train_loader_master_port,
buffer_size='1GB',
prefetch_size=2,
worker_key='train'
)
)
# Create distributed neighbor loader on remote server for testing.
print(f'-- [Client {client_rank}] Creating testing dataloader ...')
test_loader = glt.distributed.DistNeighborLoader(
data=None,
num_neighbors=[15, 10, 5],
input_nodes=test_path_list,
batch_size=batch_size,
shuffle=False,
collect_features=True,
to_device=current_device,
worker_options=glt.distributed.RemoteDistSamplingWorkerOptions(
server_rank=server_rank_list,
num_workers=server_device_count,
worker_devices=[
torch.device(f'cuda:{i}') for i in range(server_device_count)
],
worker_concurrency=4,
master_addr=master_addr,
master_port=test_loader_master_port,
buffer_size='1GB',
prefetch_size=2,
worker_key='test'
)
)
# Define model and optimizer.
print(f'-- [Client {client_rank}] Initializing model and optimizer ...')
torch.cuda.set_device(current_device)
model = GraphSAGE(
in_channels=100,
hidden_channels=256,
num_layers=3,
out_channels=47,
).to(current_device)
model = DistributedDataParallel(model, device_ids=[current_device.index])
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
# Train and test.
print(f'-- [Client {client_rank}] Start training and testing ...')
for epoch in range(0, epochs):
model.train()
start = time.time()
with Join([model]):
for batch in train_loader:
optimizer.zero_grad()
out = model(batch.x,
batch.edge_index)[:batch.batch_size].log_softmax(dim=-1)
loss = F.nll_loss(out, batch.y[:batch.batch_size])
loss.backward()
optimizer.step()
end = time.time()
print(
f'-- [Client {client_rank}] Epoch: {epoch:03d}, Loss: {loss:.4f}, Epoch Time: {end - start}'
)
torch.cuda.synchronize()
torch.distributed.barrier()
# Test accuracy.
if epoch == 0 or epoch > (epochs // 2):
test_acc = test(model, test_loader, dataset_name)
print(f'-- [Client {client_rank}] Test Accuracy: {test_acc:.4f}')
torch.cuda.synchronize()
torch.distributed.barrier()
print(f'-- [Client {client_rank}] Shutdowning ...')
glt.distributed.shutdown_client()
print(f'-- [Client {client_rank}] Exited ...')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Arguments for distributed training of supervised SAGE with servers."
)
parser.add_argument(
"--dataset",
type=str,
default='ogbn-products',
help="The name of ogbn dataset.",
)
parser.add_argument(
"--dataset_root_dir",
type=str,
default='../../../data/products',
help="The root directory (relative path) of partitioned ogbn dataset.",
)
parser.add_argument(
"--num_dataset_partitions",
type=int,
default=2,
help="The number of partitions of the dataset.",
)
parser.add_argument(
"--num_server_nodes",
type=int,
default=2,
help="Number of server nodes for remote sampling.",
)
parser.add_argument(
"--num_client_nodes",
type=int,
default=2,
help="Number of client nodes for training.",
)
parser.add_argument(
"--node_rank",
type=int,
default=0,
help="The node rank of the current role.",
)
parser.add_argument(
"--num_server_procs_per_node",
type=int,
default=1,
help="The number of server processes for remote sampling per server node.",
)
parser.add_argument(
"--num_client_procs_per_node",
type=int,
default=2,
help="The number of client processes for training per client node.",
)
parser.add_argument(
"--epochs",
type=int,
default=10,
help="The number of training epochs. (client option)",
)
parser.add_argument(
"--batch_size",
type=int,
default=512,
help="Batch size for the training and testing dataloader.",
)
parser.add_argument(
"--master_addr",
type=str,
default='localhost',
help="The master address for RPC initialization.",
)
parser.add_argument(
"--server_client_master_port",
type=int,
default=11110,
help="The port used for RPC initialization across all servers and clients.",
)
parser.add_argument(
"--pg_master",
type=str,
default='localhost',
help="The master address for PyTorch's process group initialization.",
)
parser.add_argument(
"--training_pg_master_port",
type=int,
default=11111,
help="The port used for PyTorch's process group initialization across all training processes.",
)
parser.add_argument(
"--train_loader_master_port",
type=int,
default=11112,
help="The port used for RPC initialization across all sampling workers of training loader.",
)
parser.add_argument(
"--test_loader_master_port",
type=int,
default=11113,
help="The port used for RPC initialization across all sampling workers of testing loader.",
)
args = parser.parse_args()
print(
f'--- Distributed training example of supervised SAGE with server-client mode. Client {args.node_rank} ---'
)
print(f'* dataset: {args.dataset}')
print(f'* dataset root dir: {args.dataset_root_dir}')
print(f'* total server nodes: {args.num_server_nodes}')
print(f'* total client nodes: {args.num_client_nodes}')
print(f'* node rank: {args.node_rank}')
print(
f'* number of server processes per server node: {args.num_server_procs_per_node}'
)
print(
f'* number of client processes per client node: {args.num_client_procs_per_node}'
)
print(f'* master addr: {args.master_addr}')
print(f'* server-client master port: {args.server_client_master_port}')
print(f'* number of dataset partitions: {args.num_dataset_partitions}')
num_servers = args.num_server_nodes * args.num_server_procs_per_node
num_clients = args.num_client_nodes * args.num_client_procs_per_node
root_dir = osp.join(
osp.dirname(osp.realpath(__file__)), args.dataset_root_dir
)
print(f'* epochs: {args.epochs}')
print(f'* batch size: {args.batch_size}')
print(f'* training process group master port: {args.training_pg_master_port}')
print(f'* training loader master port: {args.train_loader_master_port}')
print(f'* testing loader master port: {args.test_loader_master_port}')
print('--- Loading training and testing seeds ...')
train_path_list = []
for data_pidx in range(args.num_dataset_partitions):
train_path_list.append(
osp.join(
root_dir, f'{args.dataset}-train-partitions', f'partition{data_pidx}.pt'
)
)
test_path_list = []
for data_pidx in range(args.num_dataset_partitions):
test_path_list.append(
osp.join(
root_dir, f'{args.dataset}-test-partitions', f'partition{data_pidx}.pt'
)
)
print('--- Launching client processes ...')
mp_context = torch.multiprocessing.get_context('spawn')
client_procs = []
for local_proc_rank in range(args.num_client_procs_per_node):
client_rank = args.node_rank * args.num_client_procs_per_node + local_proc_rank
cproc = mp_context.Process(
target=run_client_proc,
args=(
num_servers, num_clients, client_rank,
[server_rank for server_rank in range(num_servers)
], args.dataset, train_path_list, test_path_list, args.epochs,
args.batch_size, args.master_addr, args.server_client_master_port,
args.pg_master, args.training_pg_master_port,
args.train_loader_master_port, args.test_loader_master_port
)
)
client_procs.append(cproc)
for cproc in client_procs:
cproc.start()
for cproc in client_procs:
cproc.join()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/distributed/server_client_mode/sage_supervised_server.py | examples/distributed/server_client_mode/sage_supervised_server.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import os.path as osp
import graphlearn_torch as glt
import torch
import torch.distributed
def run_server_proc(
num_servers: int, num_clients: int, server_rank: int,
dataset: glt.distributed.DistDataset, master_addr: str,
server_client_port: int
):
print(f'-- [Server {server_rank}] Initializing server ...')
glt.distributed.init_server(
num_servers=num_servers,
num_clients=num_clients,
server_rank=server_rank,
dataset=dataset,
master_addr=master_addr,
master_port=server_client_port,
num_rpc_threads=16,
server_group_name='dist-train-supervised-sage-server'
)
print(f'-- [Server {server_rank}] Waiting for exit ...')
glt.distributed.wait_and_shutdown_server()
print(f'-- [Server {server_rank}] Exited ...')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Arguments for distributed training of supervised SAGE with servers."
)
parser.add_argument(
"--dataset",
type=str,
default='ogbn-products',
help="The name of ogbn dataset.",
)
parser.add_argument(
"--dataset_root_dir",
type=str,
default='../../../data/products',
help="The root directory (relative path) of partitioned ogbn dataset.",
)
parser.add_argument(
"--num_dataset_partitions",
type=int,
default=2,
help="The number of partitions of the dataset.",
)
parser.add_argument(
"--num_server_nodes",
type=int,
default=2,
help="Number of server nodes for remote sampling.",
)
parser.add_argument(
"--num_client_nodes",
type=int,
default=2,
help="Number of client nodes for training.",
)
parser.add_argument(
"--node_rank",
type=int,
default=0,
help="The node rank of the current role.",
)
parser.add_argument(
"--num_server_procs_per_node",
type=int,
default=1,
help="The number of server processes for remote sampling per server node.",
)
parser.add_argument(
"--num_client_procs_per_node",
type=int,
default=2,
help="The number of client processes for training per client node.",
)
parser.add_argument(
"--master_addr",
type=str,
default='localhost',
help="The master address for RPC initialization.",
)
parser.add_argument(
"--server_client_master_port",
type=int,
default=11110,
help="The port used for RPC initialization across all servers and clients.",
)
args = parser.parse_args()
print(
f'--- Distributed training example of supervised SAGE with server-client mode. Server {args.node_rank} ---'
)
print(f'* dataset: {args.dataset}')
print(f'* dataset root dir: {args.dataset_root_dir}')
print(f'* total server nodes: {args.num_server_nodes}')
print(f'* node rank: {args.node_rank}')
print(
f'* number of server processes per server node: {args.num_server_procs_per_node}'
)
print(
f'* number of client processes per client node: {args.num_client_procs_per_node}'
)
print(f'* master addr: {args.master_addr}')
print(f'* server-client master port: {args.server_client_master_port}')
print(f'* number of dataset partitions: {args.num_dataset_partitions}')
num_servers = args.num_server_nodes * args.num_server_procs_per_node
num_clients = args.num_client_nodes * args.num_client_procs_per_node
root_dir = osp.join(
osp.dirname(osp.realpath(__file__)), args.dataset_root_dir
)
data_pidx = args.node_rank % args.num_dataset_partitions
mp_context = torch.multiprocessing.get_context('spawn')
print('--- Launching server processes ...')
server_procs = []
for local_proc_rank in range(args.num_server_procs_per_node):
server_rank = args.node_rank * args.num_server_procs_per_node + local_proc_rank
print('--- Loading data partition ...')
dataset = glt.distributed.DistDataset()
dataset.load(
root_dir=osp.join(root_dir, f'{args.dataset}-partitions'),
partition_idx=server_rank,
graph_mode='ZERO_COPY',
whole_node_label_file=osp.join(
root_dir, f'{args.dataset}-label', 'label.pt'
)
)
sproc = mp_context.Process(
target=run_server_proc,
args=(
num_servers, num_clients, server_rank, dataset, args.master_addr,
args.server_client_master_port
)
)
server_procs.append(sproc)
for sproc in server_procs:
sproc.start()
for sproc in server_procs:
sproc.join()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/gpt/utils.py | examples/gpt/utils.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from openai import OpenAI
def get_gpt_response(message, model="gpt-4-1106-preview"):
client = OpenAI()
chat_completion = client.chat.completions.create(
messages=[
{
"role" : "user",
"content": message,
}
],
model=model,
)
return chat_completion.choices[0].message.content
def node_classification(batch):
message = "This is a directed subgraph of arxiv citation network with " + str(batch.x.shape[0]) + " nodes numbered from 0 to " + str(batch.x.shape[0]-1) + ".\n"
message += "The subgraph has " + str(batch.edge_index.shape[1]) + " edges.\n"
for i in range(1, batch.x.shape[0]):
feature_str = ','.join(f'{it:.3f}' for it in batch.x[i].tolist())
message += "The feature of node " + str(i) + " is [" + feature_str + "] "
message += "and the node label is " + str(batch.y[i].item()) + ".\n"
message += "The edges of the subgraph are " + str(batch.edge_index.T.tolist()) + ' where the first number indicates source node and the second destination node.\n'
message += "Question: predict the label for node 0, whose feature is [" + ','.join(f'{it:.3f}' for it in batch.x[0].tolist()) + "]. Give the label only and don't show any reasoning process.\n\n"
return message
def link_prediction(batch, titles, reason=False):
message = "This is a directed subgraph of arxiv citation network with " + str(batch.x.shape[0]) + " nodes numbered from 0 to " + str(batch.x.shape[0]-1) + ".\n"
graph = batch.edge_index.T.unique(dim=0).tolist()
message += "The titles of each paper:\n"
for i in range(batch.x.shape[0]):
message += "node " + str(i) + " is '" + titles[i][0] + "'\n"
message += "The sampled subgraph of the network is " + str(graph) + ' where the first number indicates source node and the second destination node.\n'
message += "Hint: the direction of the edge can indicate some information of temporality.\n"
message += "\nAccording to principles of citation network construction and the given subgraph structure, answer the following questions:\n"
# In batch.edge_label_index.T.tolist(), index 0 and 1 are positive samples,
# index 2 and 3 are negative samples.
message += "Question 1: predict whether there tends to form an edge "+str(batch.edge_label_index.T.tolist()[1])+".\n"
message += "Question 2: predict whether there tends to form an edge "+str(batch.edge_label_index.T.tolist()[3])+".\n"
message += "Question 3: predict whether there tends to form an edge "+str(batch.edge_label_index.T.tolist()[2])+".\n"
message += "Question 4: predict whether there tends to form an edge "+str(batch.edge_label_index.T.tolist()[0])+".\n"
if reason:
message += "Answer yes or no and show reasoning process.\n\n"
else:
message += "Answer yes or no and don't show any reasoning process.\n\n"
return message
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/gpt/arxiv.py | examples/gpt/arxiv.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import time
import torch
from tqdm import tqdm
import graphlearn_torch as glt
from utils import get_gpt_response, link_prediction
def run(glt_ds, raw_text, reason):
neg_sampling = glt.sampler.NegativeSampling('binary')
train_loader = glt.loader.LinkNeighborLoader(glt_ds,
[12, 6],
neg_sampling=neg_sampling,
batch_size=2,
drop_last=True,
shuffle=True,
device=torch.device('cpu'))
print(f'Building graphlearn_torch NeighborLoader Done.')
for batch in tqdm(train_loader):
batch_titles = raw_text[batch.node]
if batch.edge_index.shape[1] < 5:
continue
# print(batch)
# print(batch.edge_label_index)
message = link_prediction(batch, batch_titles, reason=reason)
# print(message)
response = get_gpt_response(
message=message
)
print(f"response: {response}")
if __name__ == '__main__':
import pandas as pd
root = '../data/arxiv_2023/raw/'
titles = pd.read_csv(root + "titles.csv.gz").to_numpy()
ids = torch.from_numpy(pd.read_csv(root + "ids.csv.gz").to_numpy())
edge_index = torch.from_numpy(pd.read_csv(root + "edges.csv.gz").to_numpy())
print('Build graphlearn_torch dataset...')
start = time.time()
glt_dataset = glt.data.Dataset()
glt_dataset.init_graph(
edge_index=edge_index.T,
graph_mode='CPU',
directed=True
)
glt_dataset.init_node_features(
node_feature_data=ids,
sort_func=glt.data.sort_by_in_degree,
split_ratio=0
)
print(f'Build graphlearn_torch csr_topo and feature cost {time.time() - start} s.')
run(glt_dataset, titles, reason=False)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/igbh/dist_train_rgnn.py | examples/igbh/dist_train_rgnn.py | # Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse, datetime
import os.path as osp
import time, tqdm
import graphlearn_torch as glt
import mlperf_logging.mllog.constants as mllog_constants
import numpy as np
import sklearn.metrics
import torch
import torch.distributed
from mlperf_logging_utils import get_mlperf_logger, submission_info
from torch.nn.parallel import DistributedDataParallel
from utilities import create_ckpt_folder
from rgnn import RGNN
mllogger = get_mlperf_logger(path=osp.dirname(osp.abspath(__file__)))
def evaluate(model, dataloader, current_device, with_gpu,
rank, world_size, epoch_num):
if rank == 0:
mllogger.start(
key=mllog_constants.EVAL_START,
metadata={mllog_constants.EPOCH_NUM: epoch_num},
)
predictions = []
labels = []
with torch.no_grad():
for batch in tqdm.tqdm(dataloader):
batch_size = batch['paper'].batch_size
out = model(batch.x_dict,
batch.edge_index_dict,
num_sampled_nodes_dict=batch.num_sampled_nodes,
num_sampled_edges_dict=batch.num_sampled_edges)[:batch_size]
out = out.to(torch.float32)
batch_size = min(out.shape[0], batch_size)
labels.append(batch['paper'].y[:batch_size].cpu().clone().numpy())
predictions.append(out.argmax(1).cpu().clone().numpy())
predictions = np.concatenate(predictions)
labels = np.concatenate(labels)
acc = sklearn.metrics.accuracy_score(labels, predictions)
if with_gpu:
torch.cuda.synchronize()
torch.distributed.barrier()
acc_tensor = torch.tensor(acc).to(current_device)
torch.distributed.all_reduce(acc_tensor, op=torch.distributed.ReduceOp.SUM)
global_acc = acc_tensor.item() / world_size
if rank == 0:
mllogger.event(
key=mllog_constants.EVAL_ACCURACY,
value=global_acc,
metadata={mllog_constants.EPOCH_NUM: epoch_num},
)
mllogger.end(
key=mllog_constants.EVAL_STOP,
metadata={mllog_constants.EPOCH_NUM: epoch_num},
)
return acc, global_acc
def run_training_proc(local_proc_rank, num_nodes, node_rank, num_training_procs,
split_training_sampling, hidden_channels, num_classes, num_layers,
model_type, num_heads, fan_out, epochs, train_batch_size, val_batch_size,
learning_rate,
random_seed,
dataset, train_idx, val_idx,
train_channel_size,
val_channel_size,
master_addr,
training_pg_master_port,
train_loader_master_port,
val_loader_master_port,
with_gpu, trim_to_layer, precision, use_all2all,
edge_dir, rpc_timeout,
validation_acc, validation_frac_within_epoch, evaluate_on_epoch_end,
checkpoint_on_epoch_end, ckpt_steps, ckpt_path):
world_size=num_nodes*num_training_procs
rank=node_rank*num_training_procs+local_proc_rank
dtype = torch.float32
if precision == "bf16":
dtype = torch.bfloat16
elif precision == "fp16":
dtype = torch.float16
if rank == 0:
if ckpt_steps > 0:
ckpt_dir = create_ckpt_folder(base_dir=osp.dirname(osp.abspath(__file__)))
glt.utils.common.seed_everything(random_seed)
# Initialize graphlearn_torch distributed worker group context.
glt.distributed.init_worker_group(
world_size=world_size,
rank=rank,
group_name='distributed-igbh-trainer'
)
current_ctx = glt.distributed.get_context()
if with_gpu:
if split_training_sampling:
current_device = torch.device((local_proc_rank * 2) % torch.cuda.device_count())
sampling_device = torch.device((local_proc_rank * 2 + 1) % torch.cuda.device_count())
else:
current_device = torch.device(local_proc_rank % torch.cuda.device_count())
sampling_device = current_device
else:
current_device = torch.device('cpu')
sampling_device = current_device
# Initialize training process group of PyTorch.
torch.distributed.init_process_group(
backend='nccl' if with_gpu else 'gloo',
timeout=datetime.timedelta(seconds=rpc_timeout),
rank=current_ctx.rank,
world_size=current_ctx.world_size,
init_method='tcp://{}:{}'.format(master_addr, training_pg_master_port)
)
# Create distributed neighbor loader for training
train_idx = train_idx.split(train_idx.size(0) // num_training_procs)[local_proc_rank]
train_loader = glt.distributed.DistNeighborLoader(
data=dataset,
num_neighbors=[int(fanout) for fanout in fan_out.split(',')],
input_nodes=('paper', train_idx),
batch_size=train_batch_size,
shuffle=True,
drop_last=False,
edge_dir=edge_dir,
collect_features=True,
to_device=current_device,
random_seed=random_seed,
worker_options = glt.distributed.MpDistSamplingWorkerOptions(
num_workers=1,
worker_devices=sampling_device,
worker_concurrency=4,
master_addr=master_addr,
master_port=train_loader_master_port,
channel_size=train_channel_size,
pin_memory=True,
use_all2all=use_all2all,
rpc_timeout=rpc_timeout,
num_rpc_threads=2
)
)
# Create distributed neighbor loader for validation.
val_idx = val_idx.split(val_idx.size(0) // num_training_procs)[local_proc_rank]
val_loader = glt.distributed.DistNeighborLoader(
data=dataset,
num_neighbors=[int(fanout) for fanout in fan_out.split(',')],
input_nodes=('paper', val_idx),
batch_size=val_batch_size,
shuffle=True,
drop_last=False,
edge_dir=edge_dir,
collect_features=True,
to_device=current_device,
random_seed=random_seed,
worker_options = glt.distributed.MpDistSamplingWorkerOptions(
num_workers=1,
worker_devices=sampling_device,
worker_concurrency=4,
master_addr=master_addr,
master_port=val_loader_master_port,
channel_size=val_channel_size,
pin_memory=True,
use_all2all=use_all2all,
rpc_timeout=rpc_timeout,
num_rpc_threads=2
)
)
# Load checkpoint
ckpt = None
if ckpt_path is not None:
try:
ckpt = torch.load(ckpt_path)
except FileNotFoundError as e:
print(f"Checkpoint file not found: {e}")
return -1
# Define model and optimizer.
if with_gpu:
torch.cuda.set_device(current_device)
model = RGNN(dataset.get_edge_types(),
dataset.node_features['paper'].shape[1],
hidden_channels,
num_classes,
num_layers=num_layers,
dropout=0.2,
model=model_type,
heads=num_heads,
node_type='paper',
with_trim=trim_to_layer).to(current_device)
if ckpt is not None:
model.load_state_dict(ckpt['model_state_dict'])
model = model.to(current_device, dtype=dtype)
model = DistributedDataParallel(model,
device_ids=[current_device.index] if with_gpu else None,
find_unused_parameters=True)
param_size = 0
for param in model.parameters():
param_size += param.nelement() * param.element_size()
buffer_size = 0
for buffer in model.buffers():
buffer_size += buffer.nelement() * buffer.element_size()
size_all_mb = (param_size + buffer_size) / 1024**2
print('model size: {:.3f}MB'.format(size_all_mb))
loss_fcn = torch.nn.CrossEntropyLoss().to(current_device)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
if ckpt is not None:
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
batch_num = (len(train_idx) + train_batch_size - 1) // train_batch_size
validation_freq = int(batch_num * validation_frac_within_epoch)
is_success = False
epoch_num = 0
training_start = time.time()
for epoch in range(epochs):
model.train()
total_loss = 0
train_acc = 0
idx = 0
gpu_mem_alloc = 0
epoch_start = time.time()
for batch in tqdm.tqdm(train_loader):
idx += 1
batch_size = batch['paper'].batch_size
out = model(batch.x_dict,
batch.edge_index_dict,
num_sampled_nodes_dict=batch.num_sampled_nodes,
num_sampled_edges_dict=batch.num_sampled_edges)[:batch_size]
out = out.to(torch.float32)
batch_size = min(batch_size, out.shape[0])
y = batch['paper'].y[:batch_size]
loss = loss_fcn(out, y)
loss = loss.to(dtype)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
train_acc += sklearn.metrics.accuracy_score(y.cpu().numpy(),
out.argmax(1).detach().cpu().numpy())*100
gpu_mem_alloc += (
torch.cuda.max_memory_allocated() / 1000000
if with_gpu
else 0
)
#checkpoint
if ckpt_steps > 0 and idx % ckpt_steps == 0:
if with_gpu:
torch.cuda.synchronize()
torch.distributed.barrier()
if rank == 0:
epoch_num = epoch + idx / batch_num
glt.utils.common.save_ckpt(idx + epoch * batch_num,
ckpt_dir, model.module, optimizer, epoch_num)
torch.distributed.barrier()
# evaluate
if idx % validation_freq == 0:
if with_gpu:
torch.cuda.synchronize()
torch.distributed.barrier()
epoch_num = epoch + idx / batch_num
model.eval()
rank_val_acc, global_acc = evaluate(model, val_loader, current_device,
with_gpu, rank,
world_size, epoch_num)
if validation_acc is not None and global_acc >= validation_acc:
is_success = True
break
model.train()
train_acc /= idx
gpu_mem_alloc /= idx
if with_gpu:
torch.cuda.synchronize()
torch.distributed.barrier()
#checkpoint at the end of epoch
if checkpoint_on_epoch_end:
if rank == 0:
epoch_num = epoch + 1
glt.utils.common.save_ckpt(idx + epoch * batch_num,
ckpt_dir, model.module, optimizer, epoch_num)
torch.distributed.barrier()
# evaluate at the end of epoch
if evaluate_on_epoch_end and not is_success:
epoch_num = epoch + 1
model.eval()
rank_val_acc, global_acc = evaluate(model, val_loader, current_device,
with_gpu, rank, world_size, epoch_num)
if validation_acc is not None and global_acc >= validation_acc:
is_success = True
tqdm.tqdm.write(
"Rank{:02d} | Epoch {:03d} | Loss {:.4f} | Train Acc {:.2f} | Val Acc {:.2f} | Time {} | GPU {:.1f} MB".format(
current_ctx.rank,
epoch,
total_loss,
train_acc,
rank_val_acc*100,
str(datetime.timedelta(seconds = int(time.time() - epoch_start))),
gpu_mem_alloc
)
)
# stop training if success
if is_success:
break
if rank == 0:
status = mllog_constants.SUCCESS if is_success else mllog_constants.ABORTED
mllogger.end(key=mllog_constants.RUN_STOP,
metadata={mllog_constants.STATUS: status,
mllog_constants.EPOCH_NUM: epoch_num,
}
)
print("Total time taken " + str(datetime.timedelta(seconds = int(time.time() - training_start))))
if __name__ == '__main__':
mllogger.event(key=mllog_constants.CACHE_CLEAR, value=True)
mllogger.start(key=mllog_constants.INIT_START)
parser = argparse.ArgumentParser()
root = osp.join(osp.dirname(osp.dirname(osp.dirname(osp.realpath(__file__)))), 'data', 'igbh')
glt.utils.ensure_dir(root)
parser.add_argument('--path', type=str, default=root,
help='path containing the datasets')
parser.add_argument('--dataset_size', type=str, default='full',
choices=['tiny', 'small', 'medium', 'large', 'full'],
help='size of the datasets')
parser.add_argument('--num_classes', type=int, default=2983,
choices=[19, 2983], help='number of classes')
parser.add_argument('--in_memory', type=int, default=0,
choices=[0, 1], help='0:read only mmap_mode=r, 1:load into memory')
# Model
parser.add_argument('--model', type=str, default='rgat',
choices=['rgat', 'rsage'])
# Model parameters
parser.add_argument('--fan_out', type=str, default='15,10,5')
parser.add_argument('--train_batch_size', type=int, default=512)
parser.add_argument('--val_batch_size', type=int, default=512)
parser.add_argument('--hidden_channels', type=int, default=512)
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--epochs', type=int, default=2)
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--num_heads', type=int, default=4)
parser.add_argument('--random_seed', type=int, default=42)
# Distributed settings.
parser.add_argument("--num_nodes", type=int, default=2,
help="Number of distributed nodes.")
parser.add_argument("--node_rank", type=int, default=0,
help="The current node rank.")
parser.add_argument("--num_training_procs", type=int, default=2,
help="The number of traning processes per node.")
parser.add_argument("--master_addr", type=str, default='localhost',
help="The master address for RPC initialization.")
parser.add_argument("--training_pg_master_port", type=int, default=12111,
help="The port used for PyTorch's process group initialization across training processes.")
parser.add_argument("--train_loader_master_port", type=int, default=12112,
help="The port used for RPC initialization across all sampling workers of train loader.")
parser.add_argument("--val_loader_master_port", type=int, default=12113,
help="The port used for RPC initialization across all sampling workers of val loader.")
parser.add_argument("--cpu_mode", action="store_true",
help="Only use CPU for sampling and training, default is False.")
parser.add_argument("--edge_dir", type=str, default='out',
help="sampling direction, can be 'in' for 'by_dst' or 'out' for 'by_src' for partitions.")
parser.add_argument('--layout', type=str, default='COO',
help="Layout of input graph: CSC, CSR, COO. Default is COO.")
parser.add_argument('--train_channel_size', type=str, default='16GB',
help="Size of shared memory queue to put sampled results for train dataset")
parser.add_argument('--val_channel_size', type=str, default='16GB',
help="Size of shared memory queue to put sampled results for val dataset")
parser.add_argument("--rpc_timeout", type=int, default=180,
help="rpc timeout in seconds")
parser.add_argument("--split_training_sampling", action="store_true",
help="Use seperate GPUs for training and sampling processes.")
parser.add_argument("--with_trim", action="store_true",
help="use trim_to_layer function from PyG")
parser.add_argument("--precision", type=str, default='fp32',
choices=['fp32', 'fp16', 'bf16'], help="Precision to train the model")
parser.add_argument("--graph_caching", action="store_true",
help="load the full graph topology for each partition"),
parser.add_argument("--use_all2all", action="store_true",
help="using all2all for cross node feature collection instead of p2p rpc"),
parser.add_argument("--validation_frac_within_epoch", type=float, default=0.05,
help="Fraction of the epoch after which validation should be performed.")
parser.add_argument("--validation_acc", type=float, default=0.72,
help="Validation accuracy threshold to stop training once reached.")
parser.add_argument("--evaluate_on_epoch_end", action="store_true",
help="Evaluate using validation set on each epoch end."),
parser.add_argument("--checkpoint_on_epoch_end", action="store_true",
help="Save checkpoint on each epoch end."),
parser.add_argument('--ckpt_steps', type=int, default=-1,
help="Save checkpoint every n steps. Default is -1, which means no checkpoint is saved.")
parser.add_argument('--ckpt_path', type=str, default=None,
help="Path to load checkpoint from. Default is None.")
args = parser.parse_args()
assert args.layout in ['COO', 'CSC', 'CSR']
glt.utils.common.seed_everything(args.random_seed)
# when set --cpu_mode or GPU is not available, use cpu only mode.
args.with_gpu = (not args.cpu_mode) and torch.cuda.is_available()
if args.with_gpu:
assert(not args.num_training_procs > torch.cuda.device_count())
if args.split_training_sampling:
assert(not args.num_training_procs > torch.cuda.device_count() // 2)
if args.node_rank == 0:
world_size = args.num_nodes * args.num_training_procs
submission_info(mllogger, 'GNN', 'reference_implementation')
mllogger.event(key=mllog_constants.GLOBAL_BATCH_SIZE, value=world_size*args.train_batch_size)
mllogger.event(key=mllog_constants.OPT_BASE_LR, value=args.learning_rate)
mllogger.event(key=mllog_constants.SEED,value=args.random_seed)
mllogger.end(key=mllog_constants.INIT_STOP)
mllogger.start(key=mllog_constants.RUN_START)
print('--- Loading data partition ...\n')
data_pidx = args.node_rank % args.num_nodes
dataset = glt.distributed.DistDataset(edge_dir=args.edge_dir, graph_caching=args.graph_caching)
dataset.load(
root_dir=osp.join(args.path, f'{args.dataset_size}-partitions'),
partition_idx=data_pidx,
graph_mode='ZERO_COPY' if args.with_gpu else 'CPU',
input_layout = args.layout,
feature_with_gpu=args.with_gpu,
graph_caching = args.graph_caching,
whole_node_label_file={'paper': osp.join(args.path, f'{args.dataset_size}-label', 'label.pt')}
)
train_idx = torch.load(
osp.join(args.path, f'{args.dataset_size}-train-partitions', f'partition{data_pidx}.pt')
)
val_idx = torch.load(
osp.join(args.path, f'{args.dataset_size}-val-partitions', f'partition{data_pidx}.pt')
)
train_idx.share_memory_()
val_idx.share_memory_()
print('--- Launching training processes ...\n')
torch.multiprocessing.spawn(
run_training_proc,
args=(args.num_nodes, args.node_rank, args.num_training_procs,
args.split_training_sampling, args.hidden_channels, args.num_classes,
args.num_layers, args.model, args.num_heads, args.fan_out,
args.epochs, args.train_batch_size, args.val_batch_size, args.learning_rate,
args.random_seed,
dataset, train_idx, val_idx,
args.train_channel_size,
args.val_channel_size,
args.master_addr,
args.training_pg_master_port,
args.train_loader_master_port,
args.val_loader_master_port,
args.with_gpu,
args.with_trim,
args.precision,
args.use_all2all,
args.edge_dir,
args.rpc_timeout,
args.validation_acc,
args.validation_frac_within_epoch,
args.evaluate_on_epoch_end,
args.checkpoint_on_epoch_end,
args.ckpt_steps,
args.ckpt_path),
nprocs=args.num_training_procs,
join=True
)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/igbh/partition.py | examples/igbh/partition.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import os.path as osp
import graphlearn_torch as glt
import torch
from dataset import IGBHeteroDataset
from typing import Literal
def convert_graph_layout(base_path: str,
edge_dict: dict,
layout: str):
device = torch.device('cpu')
graph_dict = {}
for etype, e_path in edge_dict.items():
graph = glt.partition.base.load_graph_partition_data(osp.join(base_path, e_path), device)
if graph != None:
graph_dict[etype] = graph
edge_dir = 'out' if layout == 'CSR' else 'in'
dataset = glt.distributed.DistDataset(edge_dir=edge_dir)
edge_index, edge_ids, edge_weights = {}, {}, {}
for k, v in graph_dict.items():
edge_index[k] = v.edge_index
edge_weights[k] = v.weights
edge_ids[k] = v.eids
# COO is the original layout of raw igbh graph
dataset.init_graph(edge_index, edge_ids, edge_weights, layout='COO',
graph_mode='CPU', device=device)
for etype in graph_dict:
graph = dataset.get_graph(etype)
indptr, indices, _ = graph.export_topology()
path = osp.join(base_path, edge_dict[etype])
if layout == 'CSR':
torch.save(indptr, osp.join(path, 'rows.pt'))
torch.save(indices, osp.join(path, 'cols.pt'))
else:
torch.save(indptr, osp.join(path, 'cols.pt'))
torch.save(indices, osp.join(path, 'rows.pt'))
def partition_dataset(src_path: str,
dst_path: str,
num_partitions: int,
chunk_size: int,
dataset_size: str='tiny',
in_memory: bool=True,
edge_assign_strategy: str='by_src',
use_label_2K: bool=False,
with_feature: bool=True,
use_graph_caching: bool=False,
data_type: str="fp32",
layout: Literal['CSC', 'CSR', 'COO'] = 'COO'):
print(f'-- Loading igbh_{dataset_size} ...')
use_fp16 = False
if data_type == "fp16":
use_fp16 = True
data = IGBHeteroDataset(src_path, dataset_size, in_memory, use_label_2K, use_fp16=use_fp16)
node_num = {k : v.shape[0] for k, v in data.feat_dict.items()}
print('-- Saving label ...')
label_dir = osp.join(dst_path, f'{dataset_size}-label')
glt.utils.ensure_dir(label_dir)
torch.save(data.label.squeeze(), osp.join(label_dir, 'label.pt'))
print('-- Partitioning training idx ...')
train_idx = data.train_idx
train_idx = train_idx.split(train_idx.size(0) // num_partitions)
train_idx_partitions_dir = osp.join(dst_path, f'{dataset_size}-train-partitions')
glt.utils.ensure_dir(train_idx_partitions_dir)
for pidx in range(num_partitions):
torch.save(train_idx[pidx], osp.join(train_idx_partitions_dir, f'partition{pidx}.pt'))
print('-- Partitioning validation idx ...')
val_idx = data.val_idx
val_idx = val_idx.split(val_idx.size(0) // num_partitions)
val_idx_partitions_dir = osp.join(dst_path, f'{dataset_size}-val-partitions')
glt.utils.ensure_dir(val_idx_partitions_dir)
for pidx in range(num_partitions):
torch.save(val_idx[pidx], osp.join(val_idx_partitions_dir, f'partition{pidx}.pt'))
print('-- Partitioning graph and features ...')
feat_precision = torch.float32
if data_type == 'fp16':
feat_precision = torch.float16
elif data_type == 'bf16':
feat_precision = torch.bfloat16
partitions_dir = osp.join(dst_path, f'{dataset_size}-partitions')
partitioner = glt.partition.RandomPartitioner(
output_dir=partitions_dir,
num_parts=num_partitions,
num_nodes=node_num,
edge_index=data.edge_dict,
node_feat=data.feat_dict,
node_feat_dtype = feat_precision,
edge_assign_strategy=edge_assign_strategy,
chunk_size=chunk_size,
)
partitioner.partition(with_feature, graph_caching=use_graph_caching)
if layout in ['CSC', 'CSR']:
compress_edge_dict = {}
compress_edge_dict[('paper', 'cites', 'paper')] = 'paper__cites__paper'
compress_edge_dict[('paper', 'written_by', 'author')] = 'paper__written_by__author'
compress_edge_dict[('author', 'affiliated_to', 'institute')] = 'author__affiliated_to__institute'
compress_edge_dict[('paper', 'topic', 'fos')] = 'paper__topic__fos'
compress_edge_dict[('author', 'rev_written_by', 'paper')] = 'author__rev_written_by__paper'
compress_edge_dict[('institute', 'rev_affiliated_to', 'author')] = 'institute__rev_affiliated_to__author'
compress_edge_dict[('fos', 'rev_topic', 'paper')] = 'fos__rev_topic__paper'
compress_edge_dict[('paper', 'published', 'journal')] = 'paper__published__journal'
compress_edge_dict[('paper', 'venue', 'conference')] = 'paper__venue__conference'
compress_edge_dict[('journal', 'rev_published', 'paper')] = 'journal__rev_published__paper'
compress_edge_dict[('conference', 'rev_venue', 'paper')] = 'conference__rev_venue__paper'
if use_graph_caching:
base_path = osp.join(dst_path, f'{dataset_size}-partitions', 'graph')
convert_graph_layout(base_path, compress_edge_dict, layout)
else:
for pidx in range(num_partitions):
base_path = osp.join(dst_path, f'{dataset_size}-partitions', f'part{pidx}', 'graph')
convert_graph_layout(base_path, compress_edge_dict, layout)
if __name__ == '__main__':
root = osp.join(osp.dirname(osp.dirname(osp.dirname(osp.realpath(__file__)))), 'data', 'igbh')
glt.utils.ensure_dir(root)
parser = argparse.ArgumentParser(description="Arguments for partitioning ogbn datasets.")
parser.add_argument('--src_path', type=str, default=root,
help='path containing the datasets')
parser.add_argument('--dst_path', type=str, default=root,
help='path containing the partitioned datasets')
parser.add_argument('--dataset_size', type=str, default='full',
choices=['tiny', 'small', 'medium', 'large', 'full'],
help='size of the datasets')
parser.add_argument('--num_classes', type=int, default=2983,
choices=[19, 2983], help='number of classes')
parser.add_argument('--in_memory', type=int, default=0,
choices=[0, 1], help='0:read only mmap_mode=r, 1:load into memory')
parser.add_argument("--num_partitions", type=int, default=2,
help="Number of partitions")
parser.add_argument("--chunk_size", type=int, default=10000,
help="Chunk size for feature partitioning.")
parser.add_argument("--edge_assign_strategy", type=str, default='by_src',
help="edge assign strategy can be either 'by_src' or 'by_dst'")
parser.add_argument('--with_feature', type=int, default=1,
choices=[0, 1], help='0:do not partition feature, 1:partition feature')
parser.add_argument('--graph_caching', type=int, default=0,
choices=[0, 1], help='0:do not save full graph topology, 1:save full graph topology')
parser.add_argument("--data_precision", type=str, default='fp32',
choices=['fp32', 'fp16', 'bf16'], help="data precision for node feature")
parser.add_argument("--layout", type=str, default='COO',
help="layout of the partitioned graph: CSC, CSR, COO")
args = parser.parse_args()
partition_dataset(
args.src_path,
args.dst_path,
num_partitions=args.num_partitions,
chunk_size=args.chunk_size,
dataset_size=args.dataset_size,
in_memory=args.in_memory,
edge_assign_strategy=args.edge_assign_strategy,
use_label_2K=args.num_classes==2983,
with_feature=args.with_feature==1,
use_graph_caching=args.graph_caching,
data_type=args.data_precision,
layout = args.layout
)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/igbh/rgnn.py | examples/igbh/rgnn.py | # Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
import torch.nn.functional as F
from torch_geometric.nn import HeteroConv, GATConv, GCNConv, SAGEConv
from torch_geometric.utils import trim_to_layer
class RGNN(torch.nn.Module):
r""" [Relational GNN model](https://arxiv.org/abs/1703.06103).
Args:
etypes: edge types.
in_dim: input size.
h_dim: Dimension of hidden layer.
out_dim: Output dimension.
num_layers: Number of conv layers.
dropout: Dropout probability for hidden layers.
model: "rsage" or "rgat".
heads: Number of multi-head-attentions for GAT.
node_type: The predict node type for node classification.
"""
def __init__(self, etypes, in_dim, h_dim, out_dim, num_layers=2,
dropout=0.2, model='rgat', heads=4, node_type=None, with_trim=False):
super().__init__()
self.node_type = node_type
if node_type is not None:
self.lin = torch.nn.Linear(h_dim, out_dim)
self.convs = torch.nn.ModuleList()
for i in range(num_layers):
in_dim = in_dim if i == 0 else h_dim
h_dim = out_dim if (i == (num_layers - 1) and node_type is None) else h_dim
if model == 'rsage':
self.convs.append(HeteroConv({
etype: SAGEConv(in_dim, h_dim, root_weight=False)
for etype in etypes}))
elif model == 'rgat':
self.convs.append(HeteroConv({
etype: GATConv(in_dim, h_dim // heads, heads=heads, add_self_loops=False)
for etype in etypes}))
self.dropout = torch.nn.Dropout(dropout)
self.with_trim = with_trim
def forward(self, x_dict, edge_index_dict, num_sampled_edges_dict=None,
num_sampled_nodes_dict=None):
for i, conv in enumerate(self.convs):
if self.with_trim:
x_dict, edge_index_dict, _ = trim_to_layer(
layer=i,
num_sampled_nodes_per_hop=num_sampled_nodes_dict,
num_sampled_edges_per_hop=num_sampled_edges_dict,
x=x_dict,
edge_index=edge_index_dict
)
for key in list(edge_index_dict.keys()):
if key[0] not in x_dict or key[-1] not in x_dict:
del edge_index_dict[key]
x_dict = conv(x_dict, edge_index_dict)
if i != len(self.convs) - 1:
x_dict = {key: F.leaky_relu(x) for key, x in x_dict.items()}
x_dict = {key: self.dropout(x) for key, x in x_dict.items()}
if hasattr(self, 'lin'): # for node classification
return self.lin(x_dict[self.node_type])
else:
return x_dict
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/igbh/train_rgnn_multi_gpu.py | examples/igbh/train_rgnn_multi_gpu.py | # Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse, datetime, os
import numpy as np
import os.path as osp
import sklearn.metrics
import time, tqdm
import torch
import warnings
import torch.distributed as dist
import graphlearn_torch as glt
import mlperf_logging.mllog.constants as mllog_constants
from torch.nn.parallel import DistributedDataParallel
from dataset import IGBHeteroDataset
from mlperf_logging_utils import get_mlperf_logger, submission_info
from utilities import create_ckpt_folder
from rgnn import RGNN
warnings.filterwarnings("ignore")
mllogger = get_mlperf_logger(path=osp.dirname(osp.abspath(__file__)))
def evaluate(model, dataloader, current_device, rank, world_size, epoch_num):
if rank == 0:
mllogger.start(
key=mllog_constants.EVAL_START,
metadata={mllog_constants.EPOCH_NUM: epoch_num},
)
predictions = []
labels = []
with torch.no_grad():
for batch in dataloader:
batch_size = batch['paper'].batch_size
out = model(
{
node_name: node_feat.to(current_device).to(torch.float32)
for node_name, node_feat in batch.x_dict.items()
},
batch.edge_index_dict
)[:batch_size]
labels.append(batch['paper'].y[:batch_size].cpu().numpy())
predictions.append(out.argmax(1).cpu().numpy())
predictions = np.concatenate(predictions)
labels = np.concatenate(labels)
acc = sklearn.metrics.accuracy_score(labels, predictions)
torch.cuda.synchronize()
dist.barrier()
acc_tensor = torch.tensor(acc).to(current_device)
torch.distributed.all_reduce(acc_tensor, op=torch.distributed.ReduceOp.SUM)
global_acc = acc_tensor.item() / world_size
if rank == 0:
mllogger.event(
key=mllog_constants.EVAL_ACCURACY,
value=global_acc,
metadata={mllog_constants.EPOCH_NUM: epoch_num},
)
mllogger.end(
key=mllog_constants.EVAL_STOP,
metadata={mllog_constants.EPOCH_NUM: epoch_num},
)
return acc, global_acc
def run_training_proc(rank, world_size,
hidden_channels, num_classes, num_layers, model_type, num_heads, fan_out,
epochs, train_batch_size, val_batch_size, learning_rate, random_seed, dataset,
train_idx, val_idx, with_gpu, validation_acc, validation_frac_within_epoch,
evaluate_on_epoch_end, checkpoint_on_epoch_end, ckpt_steps, ckpt_path):
if rank == 0:
if ckpt_steps > 0:
ckpt_dir = create_ckpt_folder(base_dir=osp.dirname(osp.abspath(__file__)))
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
dist.init_process_group('nccl', rank=rank, world_size=world_size)
torch.cuda.set_device(rank)
glt.utils.common.seed_everything(random_seed)
current_device =torch.device(rank)
print(f'Rank {rank} init graphlearn_torch NeighborLoader...')
# Create rank neighbor loader for training
train_idx = train_idx.split(train_idx.size(0) // world_size)[rank]
train_loader = glt.loader.NeighborLoader(
data=dataset,
num_neighbors=[int(fanout) for fanout in fan_out.split(',')],
input_nodes=('paper', train_idx),
batch_size=train_batch_size,
shuffle=True,
drop_last=False,
device=current_device,
seed=random_seed
)
# Create rank neighbor loader for validation.
val_idx = val_idx.split(val_idx.size(0) // world_size)[rank]
val_loader = glt.loader.NeighborLoader(
data=dataset,
num_neighbors=[int(fanout) for fanout in fan_out.split(',')],
input_nodes=('paper', val_idx),
batch_size=val_batch_size,
shuffle=True,
drop_last=False,
device=current_device,
seed=random_seed
)
# Load checkpoint
ckpt = None
if ckpt_path is not None:
try:
ckpt = torch.load(ckpt_path)
except FileNotFoundError as e:
print(f"Checkpoint file not found: {e}")
return -1
# Define model and optimizer.
model = RGNN(dataset.get_edge_types(),
dataset.node_features['paper'].shape[1],
hidden_channels,
num_classes,
num_layers=num_layers,
dropout=0.2,
model=model_type,
heads=num_heads,
node_type='paper').to(current_device)
if ckpt is not None:
model.load_state_dict(ckpt['model_state_dict'])
model = DistributedDataParallel(model,
device_ids=[current_device.index] if with_gpu else None,
find_unused_parameters=True)
param_size = 0
for param in model.parameters():
param_size += param.nelement() * param.element_size()
buffer_size = 0
for buffer in model.buffers():
buffer_size += buffer.nelement() * buffer.element_size()
size_all_mb = (param_size + buffer_size) / 1024**2
print('model size: {:.3f}MB'.format(size_all_mb))
loss_fcn = torch.nn.CrossEntropyLoss().to(current_device)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
if ckpt is not None:
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
batch_num = (len(train_idx) + train_batch_size - 1) // train_batch_size
validation_freq = int(batch_num * validation_frac_within_epoch)
is_success = False
epoch_num = 0
training_start = time.time()
for epoch in tqdm.tqdm(range(epochs)):
model.train()
total_loss = 0
train_acc = 0
idx = 0
gpu_mem_alloc = 0
epoch_start = time.time()
for batch in train_loader:
idx += 1
batch_size = batch['paper'].batch_size
out = model(
{
node_name: node_feat.to(current_device).to(torch.float32)
for node_name, node_feat in batch.x_dict.items()
},
batch.edge_index_dict
)[:batch_size]
y = batch['paper'].y[:batch_size]
loss = loss_fcn(out, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
train_acc += sklearn.metrics.accuracy_score(y.cpu().numpy(),
out.argmax(1).detach().cpu().numpy())*100
gpu_mem_alloc += (
torch.cuda.max_memory_allocated() / 1000000
if with_gpu
else 0
)
#checkpoint
if ckpt_steps > 0 and idx % ckpt_steps == 0:
if with_gpu:
torch.cuda.synchronize()
dist.barrier()
if rank == 0:
epoch_num = epoch + idx / batch_num
glt.utils.common.save_ckpt(idx + epoch * batch_num,
ckpt_dir, model.module, optimizer, epoch_num)
dist.barrier()
# evaluate
if idx % validation_freq == 0:
if with_gpu:
torch.cuda.synchronize()
dist.barrier()
epoch_num = epoch + idx / batch_num
model.eval()
rank_val_acc, global_acc = evaluate(model, val_loader, current_device,
rank, world_size, epoch_num)
if validation_acc is not None and global_acc >= validation_acc:
is_success = True
break
model.train()
if with_gpu:
torch.cuda.synchronize()
dist.barrier()
#checkpoint at the end of epoch
if checkpoint_on_epoch_end:
if rank == 0:
epoch_num = epoch + 1
glt.utils.common.save_ckpt(idx + epoch * batch_num,
ckpt_dir, model.module, optimizer, epoch_num)
dist.barrier()
# evaluate at the end of epoch
if evaluate_on_epoch_end and not is_success:
epoch_num = epoch + 1
model.eval()
rank_val_acc, global_acc = evaluate(model, val_loader, current_device,
rank, world_size, epoch_num)
if validation_acc is not None and global_acc >= validation_acc:
is_success = True
#tqdm
train_acc /= idx
gpu_mem_alloc /= idx
tqdm.tqdm.write(
"Rank{:02d} | Epoch {:03d} | Loss {:.4f} | Train Acc {:.2f} | Val Acc {:.2f} | Time {} | GPU {:.1f} MB".format(
rank,
epoch,
total_loss,
train_acc,
rank_val_acc*100,
str(datetime.timedelta(seconds = int(time.time() - epoch_start))),
gpu_mem_alloc
)
)
# stop training if success
if is_success:
break
#log run status
if rank == 0:
status = mllog_constants.SUCCESS if is_success else mllog_constants.ABORTED
mllogger.end(key=mllog_constants.RUN_STOP,
metadata={mllog_constants.STATUS: status,
mllog_constants.EPOCH_NUM: epoch_num,
}
)
print("Total time taken " + str(datetime.timedelta(seconds = int(time.time() - training_start))))
if __name__ == '__main__':
mllogger.event(key=mllog_constants.CACHE_CLEAR, value=True)
mllogger.start(key=mllog_constants.INIT_START)
parser = argparse.ArgumentParser()
root = osp.join(osp.dirname(osp.dirname(osp.dirname(osp.realpath(__file__)))), 'data', 'igbh')
glt.utils.ensure_dir(root)
parser.add_argument('--path', type=str, default=root,
help='path containing the datasets')
parser.add_argument('--dataset_size', type=str, default='full',
choices=['tiny', 'small', 'medium', 'large', 'full'],
help='size of the datasets')
parser.add_argument('--num_classes', type=int, default=2983,
choices=[19, 2983], help='number of classes')
parser.add_argument('--in_memory', type=int, default=1,
choices=[0, 1], help='0:read only mmap_mode=r, 1:load into memory')
# Model
parser.add_argument('--model', type=str, default='rgat',
choices=['rgat', 'rsage'])
# Model parameters
parser.add_argument('--fan_out', type=str, default='15,10,5')
parser.add_argument('--train_batch_size', type=int, default=1024)
parser.add_argument('--val_batch_size', type=int, default=1024)
parser.add_argument('--hidden_channels', type=int, default=512)
parser.add_argument('--learning_rate', type=float, default=0.001)
parser.add_argument('--epochs', type=int, default=2)
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--num_heads', type=int, default=4)
parser.add_argument('--random_seed', type=int, default=42)
parser.add_argument("--cpu_mode", action="store_true",
help="Only use CPU for sampling and training, default is False.")
parser.add_argument("--edge_dir", type=str, default='in')
parser.add_argument('--layout', type=str, default='COO',
help="Layout of input graph. Default is COO.")
parser.add_argument("--pin_feature", action="store_true",
help="Pin the feature in host memory. Default is False.")
parser.add_argument("--use_fp16", action="store_true",
help="To use FP16 for loading the features. Default is False.")
parser.add_argument("--validation_frac_within_epoch", type=float, default=0.05,
help="Fraction of the epoch after which validation should be performed.")
parser.add_argument("--validation_acc", type=float, default=0.72,
help="Validation accuracy threshold to stop training once reached.")
parser.add_argument("--evaluate_on_epoch_end", action="store_true",
help="Evaluate using validation set on each epoch end.")
parser.add_argument("--checkpoint_on_epoch_end", action="store_true",
help="Save checkpoint on each epoch end.")
parser.add_argument('--ckpt_steps', type=int, default=-1,
help="Save checkpoint every n steps. Default is -1, which means no checkpoint is saved.")
parser.add_argument('--ckpt_path', type=str, default=None,
help="Path to load checkpoint from. Default is None.")
args = parser.parse_args()
args.with_gpu = (not args.cpu_mode) and torch.cuda.is_available()
assert args.layout in ['COO', 'CSC', 'CSR']
glt.utils.common.seed_everything(args.random_seed)
world_size = torch.cuda.device_count()
submission_info(mllogger, 'GNN', 'reference_implementation')
mllogger.event(key=mllog_constants.GLOBAL_BATCH_SIZE, value=world_size*args.train_batch_size)
mllogger.event(key=mllog_constants.OPT_BASE_LR, value=args.learning_rate)
mllogger.event(key=mllog_constants.SEED,value=args.random_seed)
mllogger.end(key=mllog_constants.INIT_STOP)
mllogger.start(key=mllog_constants.RUN_START)
igbh_dataset = IGBHeteroDataset(args.path, args.dataset_size, args.in_memory,
args.num_classes==2983, True, args.layout,
args.use_fp16)
# init graphlearn_torch Dataset.
glt_dataset = glt.data.Dataset(edge_dir=args.edge_dir)
glt_dataset.init_node_features(
node_feature_data=igbh_dataset.feat_dict,
with_gpu=args.with_gpu and args.pin_feature
)
glt_dataset.init_graph(
edge_index=igbh_dataset.edge_dict,
layout = args.layout,
graph_mode='ZERO_COPY' if args.with_gpu else 'CPU',
)
glt_dataset.init_node_labels(node_label_data={'paper': igbh_dataset.label})
train_idx = igbh_dataset.train_idx.clone().share_memory_()
val_idx = igbh_dataset.val_idx.clone().share_memory_()
print('--- Launching training processes ...\n')
torch.multiprocessing.spawn(
run_training_proc,
args=(world_size, args.hidden_channels, args.num_classes, args.num_layers,
args.model, args.num_heads, args.fan_out, args.epochs,
args.train_batch_size, args.val_batch_size,
args.learning_rate, args.random_seed,
glt_dataset, train_idx, val_idx, args.with_gpu,
args.validation_acc, args.validation_frac_within_epoch,
args.evaluate_on_epoch_end, args.checkpoint_on_epoch_end,
args.ckpt_steps, args.ckpt_path),
nprocs=world_size,
join=True
)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/igbh/dataset.py | examples/igbh/dataset.py | # Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import torch
import os.path as osp
from torch_geometric.utils import add_self_loops, remove_self_loops
from download import download_dataset
from typing import Literal
def float2half(base_path, dataset_size):
paper_nodes_num = {'tiny':100000, 'small':1000000, 'medium':10000000, 'large':100000000, 'full':269346174}
author_nodes_num = {'tiny':357041, 'small':1926066, 'medium':15544654, 'large':116959896, 'full':277220883}
# paper node
paper_feat_path = osp.join(base_path, 'paper', 'node_feat.npy')
paper_fp16_feat_path = osp.join(base_path, 'paper', 'node_feat_fp16.pt')
if not osp.exists(paper_fp16_feat_path):
if dataset_size in ['large', 'full']:
num_paper_nodes = paper_nodes_num[dataset_size]
paper_node_features = torch.from_numpy(np.memmap(paper_feat_path, dtype='float32', mode='r', shape=(num_paper_nodes,1024)))
else:
paper_node_features = torch.from_numpy(np.load(paper_feat_path, mmap_mode='r'))
paper_node_features = paper_node_features.half()
torch.save(paper_node_features, paper_fp16_feat_path)
# author node
author_feat_path = osp.join(base_path, 'author', 'node_feat.npy')
author_fp16_feat_path = osp.join(base_path, 'author', 'node_feat_fp16.pt')
if not osp.exists(author_fp16_feat_path):
if dataset_size in ['large', 'full']:
num_author_nodes = author_nodes_num[dataset_size]
author_node_features = torch.from_numpy(np.memmap(author_feat_path, dtype='float32', mode='r', shape=(num_author_nodes,1024)))
else:
author_node_features = torch.from_numpy(np.load(author_feat_path, mmap_mode='r'))
author_node_features = author_node_features.half()
torch.save(author_node_features, author_fp16_feat_path)
# institute node
institute_feat_path = osp.join(base_path, 'institute', 'node_feat.npy')
institute_fp16_feat_path = osp.join(base_path, 'institute', 'node_feat_fp16.pt')
if not osp.exists(institute_fp16_feat_path):
institute_node_features = torch.from_numpy(np.load(institute_feat_path, mmap_mode='r'))
institute_node_features = institute_node_features.half()
torch.save(institute_node_features, institute_fp16_feat_path)
# fos node
fos_feat_path = osp.join(base_path, 'fos', 'node_feat.npy')
fos_fp16_feat_path = osp.join(base_path, 'fos', 'node_feat_fp16.pt')
if not osp.exists(fos_fp16_feat_path):
fos_node_features = torch.from_numpy(np.load(fos_feat_path, mmap_mode='r'))
fos_node_features = fos_node_features.half()
torch.save(fos_node_features, fos_fp16_feat_path)
if dataset_size in ['large', 'full']:
# conference node
conference_feat_path = osp.join(base_path, 'conference', 'node_feat.npy')
conference_fp16_feat_path = osp.join(base_path, 'conference', 'node_feat_fp16.pt')
if not osp.exists(conference_fp16_feat_path):
conference_node_features = torch.from_numpy(np.load(conference_feat_path, mmap_mode='r'))
conference_node_features = conference_node_features.half()
torch.save(conference_node_features, conference_fp16_feat_path)
# journal node
journal_feat_path = osp.join(base_path, 'journal', 'node_feat.npy')
journal_fp16_feat_path = osp.join(base_path, 'journal', 'node_feat_fp16.pt')
if not osp.exists(journal_fp16_feat_path):
journal_node_features = torch.from_numpy(np.load(journal_feat_path, mmap_mode='r'))
journal_node_features = journal_node_features.half()
torch.save(journal_node_features, journal_fp16_feat_path)
class IGBHeteroDataset(object):
def __init__(self,
path,
dataset_size='tiny',
in_memory=True,
use_label_2K=False,
with_edges=True,
layout: Literal['CSC', 'CSR', 'COO'] = 'COO',
use_fp16=False):
self.dir = path
self.dataset_size = dataset_size
self.in_memory = in_memory
self.use_label_2K = use_label_2K
self.with_edges = with_edges
self.layout = layout
self.use_fp16 = use_fp16
self.ntypes = ['paper', 'author', 'institute', 'fos']
self.etypes = None
self.edge_dict = {}
self.feat_dict = {}
self.paper_nodes_num = {'tiny':100000, 'small':1000000, 'medium':10000000, 'large':100000000, 'full':269346174}
self.author_nodes_num = {'tiny':357041, 'small':1926066, 'medium':15544654, 'large':116959896, 'full':277220883}
# 'paper' nodes.
self.label = None
self.train_idx = None
self.val_idx = None
self.test_idx = None
self.base_path = osp.join(path, self.dataset_size, 'processed')
if not osp.exists(self.base_path):
download_dataset(path, 'heterogeneous', dataset_size)
if self.use_fp16:
float2half(self.base_path, self.dataset_size)
self.process()
def process(self):
# load edges
if self.with_edges:
if self.layout == 'COO':
if self.in_memory:
paper_paper_edges = torch.from_numpy(np.load(osp.join(self.base_path,
'paper__cites__paper', 'edge_index.npy'))).t()
author_paper_edges = torch.from_numpy(np.load(osp.join(self.base_path,
'paper__written_by__author', 'edge_index.npy'))).t()
affiliation_author_edges = torch.from_numpy(np.load(osp.join(self.base_path,
'author__affiliated_to__institute', 'edge_index.npy'))).t()
paper_fos_edges = torch.from_numpy(np.load(osp.join(self.base_path,
'paper__topic__fos', 'edge_index.npy'))).t()
if self.dataset_size in ['large', 'full']:
paper_published_journal = torch.from_numpy(np.load(osp.join(self.base_path,
'paper__published__journal', 'edge_index.npy'))).t()
paper_venue_conference = torch.from_numpy(np.load(osp.join(self.base_path,
'paper__venue__conference', 'edge_index.npy'))).t()
else:
paper_paper_edges = torch.from_numpy(np.load(osp.join(self.base_path,
'paper__cites__paper', 'edge_index.npy'), mmap_mode='r')).t()
author_paper_edges = torch.from_numpy(np.load(osp.join(self.base_path,
'paper__written_by__author', 'edge_index.npy'), mmap_mode='r')).t()
affiliation_author_edges = torch.from_numpy(np.load(osp.join(self.base_path,
'author__affiliated_to__institute', 'edge_index.npy'), mmap_mode='r')).t()
paper_fos_edges = torch.from_numpy(np.load(osp.join(self.base_path,
'paper__topic__fos', 'edge_index.npy'), mmap_mode='r')).t()
if self.dataset_size in ['large', 'full']:
paper_published_journal = torch.from_numpy(np.load(osp.join(self.base_path,
'paper__published__journal', 'edge_index.npy'), mmap_mode='r')).t()
paper_venue_conference = torch.from_numpy(np.load(osp.join(self.base_path,
'paper__venue__conference', 'edge_index.npy'), mmap_mode='r')).t()
cites_edge = add_self_loops(remove_self_loops(paper_paper_edges)[0])[0]
self.edge_dict = {
('paper', 'cites', 'paper'): (torch.cat([cites_edge[1, :], cites_edge[0, :]]), torch.cat([cites_edge[0, :], cites_edge[1, :]])),
('paper', 'written_by', 'author'): author_paper_edges,
('author', 'affiliated_to', 'institute'): affiliation_author_edges,
('paper', 'topic', 'fos'): paper_fos_edges,
('author', 'rev_written_by', 'paper'): (author_paper_edges[1, :], author_paper_edges[0, :]),
('institute', 'rev_affiliated_to', 'author'): (affiliation_author_edges[1, :], affiliation_author_edges[0, :]),
('fos', 'rev_topic', 'paper'): (paper_fos_edges[1, :], paper_fos_edges[0, :])
}
if self.dataset_size in ['large', 'full']:
self.edge_dict[('paper', 'published', 'journal')] = paper_published_journal
self.edge_dict[('paper', 'venue', 'conference')] = paper_venue_conference
self.edge_dict[('journal', 'rev_published', 'paper')] = (paper_published_journal[1, :], paper_published_journal[0, :])
self.edge_dict[('conference', 'rev_venue', 'paper')] = (paper_venue_conference[1, :], paper_venue_conference[0, :])
# directly load from CSC or CSC files, which can be generated using compress_graph.py
else:
compress_edge_dict = {}
compress_edge_dict[('paper', 'cites', 'paper')] = 'paper__cites__paper'
compress_edge_dict[('paper', 'written_by', 'author')] = 'paper__written_by__author'
compress_edge_dict[('author', 'affiliated_to', 'institute')] = 'author__affiliated_to__institute'
compress_edge_dict[('paper', 'topic', 'fos')] = 'paper__topic__fos'
compress_edge_dict[('author', 'rev_written_by', 'paper')] = 'author__rev_written_by__paper'
compress_edge_dict[('institute', 'rev_affiliated_to', 'author')] = 'institute__rev_affiliated_to__author'
compress_edge_dict[('fos', 'rev_topic', 'paper')] = 'fos__rev_topic__paper'
if self.dataset_size in ['large', 'full']:
compress_edge_dict[('paper', 'published', 'journal')] = 'paper__published__journal'
compress_edge_dict[('paper', 'venue', 'conference')] = 'paper__venue__conference'
compress_edge_dict[('journal', 'rev_published', 'paper')] = 'journal__rev_published__paper'
compress_edge_dict[('conference', 'rev_venue', 'paper')] = 'conference__rev_venue__paper'
for etype in compress_edge_dict.keys():
edge_path = osp.join(self.base_path, self.layout, compress_edge_dict[etype])
try:
edge_path = osp.join(self.base_path, self.layout, compress_edge_dict[etype])
indptr = torch.load(osp.join(edge_path, 'indptr.pt'))
indices = torch.load(osp.join(edge_path, 'indices.pt'))
if self.layout == 'CSC':
self.edge_dict[etype] = (indices, indptr)
else:
self.edge_dict[etype] = (indptr, indices)
except FileNotFoundError as e:
print(f"FileNotFound: {e}")
exit()
except Exception as e:
print(f"Exception: {e}")
exit()
self.etypes = list(self.edge_dict.keys())
# load features and labels
label_file = 'node_label_19.npy' if not self.use_label_2K else 'node_label_2K.npy'
paper_feat_path = osp.join(self.base_path, 'paper', 'node_feat.npy')
paper_lbl_path = osp.join(self.base_path, 'paper', label_file)
num_paper_nodes = self.paper_nodes_num[self.dataset_size]
if self.in_memory:
if self.use_fp16:
paper_node_features = torch.load(osp.join(self.base_path, 'paper', 'node_feat_fp16.pt'))
else:
paper_node_features = torch.from_numpy(np.load(paper_feat_path))
else:
if self.dataset_size in ['large', 'full']:
paper_node_features = torch.from_numpy(np.memmap(paper_feat_path, dtype='float32', mode='r', shape=(num_paper_nodes,1024)))
else:
paper_node_features = torch.from_numpy(np.load(paper_feat_path, mmap_mode='r'))
if self.dataset_size in ['large', 'full']:
paper_node_labels = torch.from_numpy(np.memmap(paper_lbl_path, dtype='float32', mode='r', shape=(num_paper_nodes))).to(torch.long)
else:
paper_node_labels = torch.from_numpy(np.load(paper_lbl_path)).to(torch.long)
self.feat_dict['paper'] = paper_node_features
self.label = paper_node_labels
num_author_nodes = self.author_nodes_num[self.dataset_size]
author_feat_path = osp.join(self.base_path, 'author', 'node_feat.npy')
if self.in_memory:
if self.use_fp16:
author_node_features = torch.load(osp.join(self.base_path, 'author', 'node_feat_fp16.pt'))
else:
author_node_features = torch.from_numpy(np.load(author_feat_path))
else:
if self.dataset_size in ['large', 'full']:
author_node_features = torch.from_numpy(np.memmap(author_feat_path, dtype='float32', mode='r', shape=(num_author_nodes,1024)))
else:
author_node_features = torch.from_numpy(np.load(author_feat_path, mmap_mode='r'))
self.feat_dict['author'] = author_node_features
if self.in_memory:
if self.use_fp16:
institute_node_features = torch.load(osp.join(self.base_path, 'institute', 'node_feat_fp16.pt'))
else:
institute_node_features = torch.from_numpy(np.load(osp.join(self.base_path, 'institute', 'node_feat.npy')))
else:
institute_node_features = torch.from_numpy(np.load(osp.join(self.base_path, 'institute', 'node_feat.npy'), mmap_mode='r'))
self.feat_dict['institute'] = institute_node_features
if self.in_memory:
if self.use_fp16:
fos_node_features = torch.load(osp.join(self.base_path, 'fos', 'node_feat_fp16.pt'))
else:
fos_node_features = torch.from_numpy(np.load(osp.join(self.base_path, 'fos', 'node_feat.npy')))
else:
fos_node_features = torch.from_numpy(np.load(osp.join(self.base_path, 'fos', 'node_feat.npy'), mmap_mode='r'))
self.feat_dict['fos'] = fos_node_features
if self.dataset_size in ['large', 'full']:
if self.in_memory:
if self.use_fp16:
conference_node_features = torch.load(osp.join(self.base_path, 'conference', 'node_feat_fp16.pt'))
else:
conference_node_features = torch.from_numpy(np.load(osp.join(self.base_path, 'conference', 'node_feat.npy')))
else:
conference_node_features = torch.from_numpy(np.load(osp.join(self.base_path, 'conference', 'node_feat.npy'), mmap_mode='r'))
self.feat_dict['conference'] = conference_node_features
if self.in_memory:
if self.use_fp16:
journal_node_features = torch.load(osp.join(self.base_path, 'journal', 'node_feat_fp16.pt'))
else:
journal_node_features = torch.from_numpy(np.load(osp.join(self.base_path, 'journal', 'node_feat.npy')))
else:
journal_node_features = torch.from_numpy(np.load(osp.join(self.base_path, 'journal', 'node_feat.npy'), mmap_mode='r'))
self.feat_dict['journal'] = journal_node_features
# Please ensure that train_idx and val_idx have been generated using split_seeds.py
try:
self.train_idx = torch.load(osp.join(self.base_path, 'train_idx.pt'))
self.val_idx = torch.load(osp.join(self.base_path, 'val_idx.pt'))
except FileNotFoundError as e:
print(f"FileNotFound: {e}, please ensure that train_idx and val_idx have been generated using split_seeds.py")
exit()
except Exception as e:
print(f"Exception: {e}")
exit()
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/igbh/build_partition_feature.py | examples/igbh/build_partition_feature.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import os.path as osp
import graphlearn_torch as glt
import torch
from dataset import IGBHeteroDataset
def partition_feature(src_path: str,
dst_path: str,
partition_idx: int,
chunk_size: int,
dataset_size: str='tiny',
in_memory: bool=True,
use_fp16: bool=False):
print(f'-- Loading igbh_{dataset_size} ...')
data = IGBHeteroDataset(src_path, dataset_size, in_memory, with_edges=False, use_fp16=use_fp16)
print(f'-- Build feature for partition {partition_idx} ...')
dst_path = osp.join(dst_path, f'{dataset_size}-partitions')
node_feat_dtype = torch.float16 if use_fp16 else torch.float32
glt.partition.base.build_partition_feature(root_dir = dst_path,
partition_idx = partition_idx,
chunk_size = chunk_size,
node_feat = data.feat_dict,
node_feat_dtype = node_feat_dtype)
if __name__ == '__main__':
root = osp.join(osp.dirname(osp.dirname(osp.dirname(osp.realpath(__file__)))), 'data', 'igbh')
glt.utils.ensure_dir(root)
parser = argparse.ArgumentParser(description="Arguments for partitioning ogbn datasets.")
parser.add_argument('--src_path', type=str, default=root,
help='path containing the datasets')
parser.add_argument('--dst_path', type=str, default=root,
help='path containing the partitioned datasets')
parser.add_argument('--dataset_size', type=str, default='full',
choices=['tiny', 'small', 'medium', 'large', 'full'],
help='size of the datasets')
parser.add_argument('--in_memory', type=int, default=0,
choices=[0, 1], help='0:read only mmap_mode=r, 1:load into memory')
parser.add_argument("--partition_idx", type=int, default=0,
help="Index of a partition")
parser.add_argument("--chunk_size", type=int, default=10000,
help="Chunk size for feature partitioning.")
parser.add_argument("--use_fp16", action="store_true",
help="save node/edge feature using fp16 format")
args = parser.parse_args()
partition_feature(
args.src_path,
args.dst_path,
partition_idx=args.partition_idx,
chunk_size=args.chunk_size,
dataset_size=args.dataset_size,
in_memory=args.in_memory==1,
use_fp16=args.use_fp16
)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/igbh/mlperf_logging_utils.py | examples/igbh/mlperf_logging_utils.py | import os
from mlperf_logging import mllog
from mlperf_logging.mllog import constants
from mlperf_logging.mllog.mllog import MLLogger
def get_mlperf_logger(path, filename='mlperf_gnn.log'):
mllog.config(filename=os.path.join(path, filename))
mllogger = mllog.get_mllogger()
mllogger.logger.propagate = False
return mllogger
def submission_info(mllogger: MLLogger, benchmark_name: str, submitter_name: str):
"""Logs required for a valid MLPerf submission."""
mllogger.event(
key=constants.SUBMISSION_BENCHMARK,
value=benchmark_name,
)
mllogger.event(
key=constants.SUBMISSION_ORG,
value=submitter_name,
)
mllogger.event(
key=constants.SUBMISSION_DIVISION,
value=constants.CLOSED,
)
mllogger.event(
key=constants.SUBMISSION_STATUS,
value=constants.ONPREM,
)
mllogger.event(
key=constants.SUBMISSION_PLATFORM,
value=submitter_name,
)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/igbh/download.py | examples/igbh/download.py | import tarfile, hashlib, os
import os.path as osp
from tqdm import tqdm
import urllib.request as ur
# https://github.com/IllinoisGraphBenchmark/IGB-Datasets/blob/main/igb/download.py
GBFACTOR = float(1 << 30)
def decide_download(url):
d = ur.urlopen(url)
size = int(d.info()["Content-Length"])/GBFACTOR
### confirm if larger than 1GB
if size > 1:
return input("This will download %.2fGB. Will you proceed? (y/N) " % (size)).lower() == "y"
else:
return True
dataset_urls = {
'homogeneous' : {
'tiny' : 'https://igb-public.s3.us-east-2.amazonaws.com/igb-homogeneous/igb_homogeneous_tiny.tar.gz',
'small' : 'https://igb-public.s3.us-east-2.amazonaws.com/igb-homogeneous/igb_homogeneous_small.tar.gz',
'medium' : 'https://igb-public.s3.us-east-2.amazonaws.com/igb-homogeneous/igb_homogeneous_medium.tar.gz'
},
'heterogeneous' : {
'tiny' : 'https://igb-public.s3.us-east-2.amazonaws.com/igb-heterogeneous/igb_heterogeneous_tiny.tar.gz',
'small' : 'https://igb-public.s3.us-east-2.amazonaws.com/igb-heterogeneous/igb_heterogeneous_small.tar.gz',
'medium' : 'https://igb-public.s3.us-east-2.amazonaws.com/igb-heterogeneous/igb_heterogeneous_medium.tar.gz'
}
}
md5checksums = {
'homogeneous' : {
'tiny' : '34856534da55419b316d620e2d5b21be',
'small' : '6781c699723529902ace0a95cafe6fe4',
'medium' : '4640df4ceee46851fd18c0a44ddcc622'
},
'heterogeneous' : {
'tiny' : '83fbc1091497ff92cf20afe82fae0ade',
'small' : '2f42077be60a074aec24f7c60089e1bd',
'medium' : '7f0df4296eca36553ff3a6a63abbd347'
}
}
def check_md5sum(dataset_type, dataset_size, filename):
original_md5 = md5checksums[dataset_type][dataset_size]
with open(filename, 'rb') as file_to_check:
data = file_to_check.read()
md5_returned = hashlib.md5(data).hexdigest()
if original_md5 == md5_returned:
print(" md5sum verified.")
return
else:
os.remove(filename)
raise Exception(" md5sum verification failed!.")
def download_dataset(path, dataset_type, dataset_size):
output_directory = path
url = dataset_urls[dataset_type][dataset_size]
if decide_download(url):
data = ur.urlopen(url)
size = int(data.info()["Content-Length"])
chunk_size = 1024*1024
num_iter = int(size/chunk_size) + 2
downloaded_size = 0
filename = path + "/igb_" + dataset_type + "_" + dataset_size + ".tar.gz"
with open(filename, 'wb') as f:
pbar = tqdm(range(num_iter))
for _ in pbar:
chunk = data.read(chunk_size)
downloaded_size += len(chunk)
pbar.set_description("Downloaded {:.2f} GB".format(float(downloaded_size)/GBFACTOR))
f.write(chunk)
print("Downloaded" + " igb_" + dataset_type + "_" + dataset_size, end=" ->")
check_md5sum(dataset_type, dataset_size, filename)
file = tarfile.open(filename)
file.extractall(output_directory)
file.close()
size = 0
for path, _, files in os.walk(output_directory+"/"+dataset_size):
for f in files:
fp = osp.join(path, f)
size += osp.getsize(fp)
print("Final dataset size {:.2f} GB.".format(size/GBFACTOR))
os.remove(filename)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/igbh/__init__.py | examples/igbh/__init__.py | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false | |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/igbh/compress_graph.py | examples/igbh/compress_graph.py | # Copyright 2023 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse, datetime, os
import numpy as np
import torch
import os.path as osp
import graphlearn_torch as glt
from dataset import float2half
from download import download_dataset
from torch_geometric.utils import add_self_loops, remove_self_loops
from typing import Literal
class IGBHeteroDatasetCompress(object):
def __init__(self,
path,
dataset_size,
layout: Literal['CSC', 'CSR'] = 'CSC',):
self.dir = path
self.dataset_size = dataset_size
self.layout = layout
self.ntypes = ['paper', 'author', 'institute', 'fos']
self.etypes = None
self.edge_dict = {}
self.paper_nodes_num = {'tiny':100000, 'small':1000000, 'medium':10000000, 'large':100000000, 'full':269346174}
self.author_nodes_num = {'tiny':357041, 'small':1926066, 'medium':15544654, 'large':116959896, 'full':277220883}
if not osp.exists(osp.join(path, self.dataset_size, 'processed')):
download_dataset(path, 'heterogeneous', dataset_size)
self.process()
def process(self):
paper_paper_edges = torch.from_numpy(np.load(osp.join(self.dir, self.dataset_size, 'processed',
'paper__cites__paper', 'edge_index.npy'))).t()
author_paper_edges = torch.from_numpy(np.load(osp.join(self.dir, self.dataset_size, 'processed',
'paper__written_by__author', 'edge_index.npy'))).t()
affiliation_author_edges = torch.from_numpy(np.load(osp.join(self.dir, self.dataset_size, 'processed',
'author__affiliated_to__institute', 'edge_index.npy'))).t()
paper_fos_edges = torch.from_numpy(np.load(osp.join(self.dir, self.dataset_size, 'processed',
'paper__topic__fos', 'edge_index.npy'))).t()
if self.dataset_size in ['large', 'full']:
paper_published_journal = torch.from_numpy(np.load(osp.join(self.dir, self.dataset_size, 'processed',
'paper__published__journal', 'edge_index.npy'))).t()
paper_venue_conference = torch.from_numpy(np.load(osp.join(self.dir, self.dataset_size, 'processed',
'paper__venue__conference', 'edge_index.npy'))).t()
cites_edge = add_self_loops(remove_self_loops(paper_paper_edges)[0])[0]
self.edge_dict = {
('paper', 'cites', 'paper'): (torch.cat([cites_edge[1, :], cites_edge[0, :]]), torch.cat([cites_edge[0, :], cites_edge[1, :]])),
('paper', 'written_by', 'author'): author_paper_edges,
('author', 'affiliated_to', 'institute'): affiliation_author_edges,
('paper', 'topic', 'fos'): paper_fos_edges,
('author', 'rev_written_by', 'paper'): (author_paper_edges[1, :], author_paper_edges[0, :]),
('institute', 'rev_affiliated_to', 'author'): (affiliation_author_edges[1, :], affiliation_author_edges[0, :]),
('fos', 'rev_topic', 'paper'): (paper_fos_edges[1, :], paper_fos_edges[0, :])
}
if self.dataset_size in ['large', 'full']:
self.edge_dict[('paper', 'published', 'journal')] = paper_published_journal
self.edge_dict[('paper', 'venue', 'conference')] = paper_venue_conference
self.edge_dict[('journal', 'rev_published', 'paper')] = (paper_published_journal[1, :], paper_published_journal[0, :])
self.edge_dict[('conference', 'rev_venue', 'paper')] = (paper_venue_conference[1, :], paper_venue_conference[0, :])
self.etypes = list(self.edge_dict.keys())
# init graphlearn_torch Dataset.
edge_dir = 'out' if self.layout == 'CSR' else 'in'
glt_dataset = glt.data.Dataset(edge_dir=edge_dir)
glt_dataset.init_graph(
edge_index=self.edge_dict,
graph_mode='CPU',
)
# save the corresponding csr or csc file
compress_edge_dict = {}
compress_edge_dict[('paper', 'cites', 'paper')] = 'paper__cites__paper'
compress_edge_dict[('paper', 'written_by', 'author')] = 'paper__written_by__author'
compress_edge_dict[('author', 'affiliated_to', 'institute')] = 'author__affiliated_to__institute'
compress_edge_dict[('paper', 'topic', 'fos')] = 'paper__topic__fos'
compress_edge_dict[('author', 'rev_written_by', 'paper')] = 'author__rev_written_by__paper'
compress_edge_dict[('institute', 'rev_affiliated_to', 'author')] = 'institute__rev_affiliated_to__author'
compress_edge_dict[('fos', 'rev_topic', 'paper')] = 'fos__rev_topic__paper'
compress_edge_dict[('paper', 'published', 'journal')] = 'paper__published__journal'
compress_edge_dict[('paper', 'venue', 'conference')] = 'paper__venue__conference'
compress_edge_dict[('journal', 'rev_published', 'paper')] = 'journal__rev_published__paper'
compress_edge_dict[('conference', 'rev_venue', 'paper')] = 'conference__rev_venue__paper'
for etype in self.etypes:
graph = glt_dataset.get_graph(etype)
indptr, indices, _ = graph.export_topology()
path = os.path.join(self.dir, self.dataset_size, 'processed', self.layout, compress_edge_dict[etype])
if not os.path.exists(path):
os.makedirs(path)
torch.save(indptr, os.path.join(path, 'indptr.pt'))
torch.save(indices, os.path.join(path, 'indices.pt'))
path = os.path.join(self.dir, self.dataset_size, 'processed', self.layout)
print(f"The {self.layout} graph has been persisted in path: {path}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
root = osp.join(osp.dirname(osp.dirname(osp.dirname(osp.realpath(__file__)))), 'data', 'igbh')
glt.utils.ensure_dir(root)
parser.add_argument('--path', type=str, default=root,
help='path containing the datasets')
parser.add_argument('--dataset_size', type=str, default='full',
choices=['tiny', 'small', 'medium', 'large', 'full'],
help='size of the datasets')
parser.add_argument("--layout", type=str, default='CSC')
parser.add_argument('--use_fp16', action="store_true",
help="convert the node/edge feature into fp16 format")
args = parser.parse_args()
print(f"Start constructing the {args.layout} graph...")
igbh_dataset = IGBHeteroDatasetCompress(args.path, args.dataset_size, args.layout)
if args.use_fp16:
base_path = osp.join(args.path, args.dataset_size, 'processed')
float2half(base_path, args.dataset_size)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/igbh/split_seeds.py | examples/igbh/split_seeds.py | import argparse
import os.path as osp
import torch
class SeedSplitter(object):
def __init__(self,
path,
dataset_size='tiny',
use_label_2K=True,
random_seed=42,
validation_frac=0.01):
self.path = path
self.dataset_size = dataset_size
self.use_label_2K = use_label_2K
self.random_seed = random_seed
self.validation_frac = validation_frac
self.paper_nodes_num = {'tiny':100000, 'small':1000000, 'medium':10000000, 'large':100000000, 'full':269346174}
self.process()
def process(self):
torch.manual_seed(self.random_seed)
n_labeled_idx = self.paper_nodes_num[self.dataset_size]
if self.dataset_size == 'full':
if self.use_label_2K:
n_labeled_idx = 157675969
else:
n_labeled_idx = 227130858
shuffled_index = torch.randperm(n_labeled_idx)
n_train = int(n_labeled_idx * 0.6)
n_val = int(n_labeled_idx * self.validation_frac)
train_idx = shuffled_index[:n_train]
val_idx = shuffled_index[n_train : n_train + n_val]
path = osp.join(self.path, self.dataset_size, 'processed')
torch.save(train_idx, osp.join(path, 'train_idx.pt'))
torch.save(val_idx, osp.join(path, 'val_idx.pt'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
root = osp.join(osp.dirname(osp.dirname(osp.dirname(osp.realpath(__file__)))), 'data', 'igbh')
parser.add_argument('--path', type=str, default=root,
help='path containing the datasets')
parser.add_argument('--dataset_size', type=str, default='full',
choices=['tiny', 'small', 'medium', 'large', 'full'],
help='size of the datasets')
parser.add_argument("--random_seed", type=int, default='42')
parser.add_argument('--num_classes', type=int, default=2983,
choices=[19, 2983], help='number of classes')
parser.add_argument("--validation_frac", type=float, default=0.005,
help="Fraction of labeled vertices to be used for validation.")
args = parser.parse_args()
splitter = SeedSplitter(path=args.path,
dataset_size=args.dataset_size,
use_label_2K=(args.num_classes==2983),
random_seed=args.random_seed,
validation_frac=args.validation_frac) | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/igbh/utilities.py | examples/igbh/utilities.py | import os
import time
import torch
def create_ckpt_folder(base_dir, prefix="ckpt"):
timestamp = time.strftime("%Y%m%d-%H%M%S")
folder_name = f"{prefix}_{timestamp}" if prefix else timestamp
full_path = os.path.join(base_dir, folder_name)
if not os.path.exists(full_path):
os.makedirs(full_path)
return full_path
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/multi_gpu/train_sage_ogbn_papers100m.py | examples/multi_gpu/train_sage_ogbn_papers100m.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import time
import torch
import numpy as np
import os.path as osp
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn.functional as F
from numpy import genfromtxt
from torch.nn.parallel import DistributedDataParallel
from torch_geometric.nn import GraphSAGE
import graphlearn_torch as glt
def run(rank, world_size, glt_ds, train_idx,
num_features, num_classes):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
dist.init_process_group('nccl', rank=rank, world_size=world_size)
torch.cuda.set_device(rank)
torch.manual_seed(42)
print(f'Rank {rank} init graphlearn_torch NeighborLoader...')
train_idx = train_idx.split(train_idx.size(0) // world_size)[rank]
train_loader = glt.loader.NeighborLoader(glt_ds,
[15, 10, 5],
train_idx,
batch_size=1024,
shuffle=True,
device=torch.device(rank))
print(f'Rank {rank} build graphlearn_torch NeighborLoader Done.')
model = GraphSAGE(
in_channels=num_features,
hidden_channels=256,
num_layers=3,
out_channels=num_classes,
).to(rank)
model = DistributedDataParallel(model, device_ids=[rank])
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
for epoch in range(1, 10):
model.train()
start = time.time()
total_examples = total_loss = 0
for batch in train_loader:
optimizer.zero_grad()
out = model(batch.x, batch.edge_index)[:batch.batch_size].log_softmax(dim=-1)
loss = F.nll_loss(out, batch.y[:batch.batch_size])
loss.backward()
optimizer.step()
total_examples += batch.batch_size
total_loss += float(loss) * batch.batch_size
end = time.time()
dist.barrier()
if rank == 0:
print(f'Epoch: {epoch:03d}, Loss: {(total_loss / total_examples):.4f},',
f'Epoch Time: {end - start}')
dist.barrier()
if __name__ == '__main__':
world_size = torch.cuda.device_count()
start = time.time()
root = osp.join(osp.dirname(osp.dirname(osp.dirname(osp.realpath(__file__)))),
'data',
'papers100M-bin')
label = np.load(osp.join(root, "raw/node-label.npz"))
train_idx = genfromtxt(root+'/split/time/train.csv',
delimiter='\n')
train_idx = torch.from_numpy(train_idx.astype(np.long))
data = np.load(osp.join(root, "raw/data.npz"))
edge_index = data["edge_index"]
feature = torch.from_numpy(data["node_feat"]).type(torch.float)
label = torch.from_numpy(label["node_label"]).type(torch.long).squeeze()
print(f'Load data cost {time.time()-start} s.')
start = time.time()
print('Build graphlearn_torch dataset...')
glt_dataset = glt.data.Dataset()
glt_dataset.init_graph(
edge_index=edge_index,
graph_mode='ZERO_COPY',
directed=True
)
glt_dataset.init_node_features(
node_feature_data=feature,
sort_func=glt.data.sort_by_in_degree,
split_ratio=0.15 * min(world_size, 4),
device_group_list=[glt.data.DeviceGroup(0, [0, 1, 2, 3]),
glt.data.DeviceGroup(1, [4, 5, 6, 7])],
)
glt_dataset.init_node_labels(node_label_data=label)
print(f'Build graphlearn_torch csr_topo and feature cost {time.time() - start} s.')
train_idx.share_memory_()
mp.spawn(run,
args=(world_size,
glt_dataset,
train_idx,
128,
172),
nprocs=world_size,
join=True)
| python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/pai/ogbn_products/train_products_sage.py | examples/pai/ogbn_products/train_products_sage.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import common_io
import os
import time
import torch
import graphlearn_torch as glt
import torch.multiprocessing as mp
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
from torch_geometric.nn import GraphSAGE
def run(rank, world_size, dataset, train_idx, train_label, train_id2idx, batch_size,
lr, nbrs_num, features_num, hidden_dim, class_num, depth, epochs,):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
torch.distributed.init_process_group('nccl', rank=rank, world_size=world_size)
torch.cuda.set_device(rank)
torch.manual_seed(42)
train_idx = train_idx.split(train_idx.size(0) // world_size)[rank]
train_label = train_label.to(rank)
train_id2idx = train_id2idx.to(rank)
# Create neighbor loader for training
train_loader = glt.loader.NeighborLoader(dataset,
nbrs_num,
train_idx,
batch_size=batch_size,
shuffle=True,
drop_last=True,
device=torch.device(rank))
# Define model and optimizer.
model = GraphSAGE(
in_channels=features_num,
hidden_channels=hidden_dim,
num_layers=depth,
out_channels=class_num,
).to(rank)
model = DistributedDataParallel(model, device_ids=[rank])
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# Train and test.
for epoch in range(0, epochs):
model.train()
start = time.time()
for batch in train_loader:
optimizer.zero_grad()
out = model(batch.x, batch.edge_index)[:batch.batch_size].log_softmax(dim=-1)
loss = F.nll_loss(out, train_label[train_id2idx[batch.batch]])
loss.backward()
optimizer.step()
end = time.time()
print(f'-- [Trainer {rank}] Epoch: {epoch:03d}, Loss: {loss:.4f}, Epoch Time: {end - start}\n')
torch.cuda.synchronize()
torch.distributed.barrier()
def read_train_idx_and_label(table):
id_label = []
reader = common_io.table.TableReader(table,
num_threads=10,
capacity=10240)
while True:
try:
data = reader.read(1024, allow_smaller_final_batch=True)
id_label.extend(data)
except common_io.exception.OutOfRangeException:
reader.close()
break
ids = torch.tensor([e[0] for e in id_label], dtype=torch.long)
label = torch.tensor([e[1] for e in id_label], dtype=torch.long)
return ids.squeeze(), label.squeeze()
if __name__ == "__main__":
argparser = argparse.ArgumentParser("Train GraphSAGE.")
argparser.add_argument('--tables', type=str, default="" ,
help='ODPS input table names')
argparser.add_argument('--class_num', type=int, default=47)
argparser.add_argument('--features_num', type=int, default=100)
argparser.add_argument('--hidden_dim', type=int, default=256)
argparser.add_argument('--depth', type=int, default=3)
argparser.add_argument('--nbrs_num', type=str, default='[15, 10, 5]')
argparser.add_argument('--learning_rate', type=float, default=0.003)
argparser.add_argument('--epoch', type=int, default=10)
argparser.add_argument('--batch_size', type=int, default=512)
argparser.add_argument('--split_ratio', type=float, default=0.2)
args = argparser.parse_args()
args.nbrs_num = eval(args.nbrs_num)
world_size = torch.cuda.device_count()
node_table, edge_table, train_table = args.tables.split(',')
train_idx, train_label = read_train_idx_and_label(train_table)
train_id2idx = glt.utils.id2idx(train_idx)
train_idx.share_memory_()
train_label.share_memory_()
train_id2idx.share_memory_()
node_tables = {'i': node_table}
edge_tables = {('i', 'i-i', 'i') : edge_table}
# init glt Dataset.
glt_dataset = glt.data.TableDataset()
glt_dataset.load(edge_tables=edge_tables,
node_tables=node_tables,
graph_mode='ZERO_COPY',
sort_func=glt.data.sort_by_in_degree,
split_ratio=args.split_ratio,
directed=False,
device=0)
mp.spawn(run,
args=(world_size,
glt_dataset,
train_idx,
train_label,
train_id2idx,
args.batch_size,
args.learning_rate,
args.nbrs_num,
args.features_num,
args.hidden_dim,
args.class_num,
args.depth,
args.epoch,
),
nprocs=world_size,
join=True) | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/pai/ogbn_products/data_preprocess.py | examples/pai/ogbn_products/data_preprocess.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os.path as osp
from ogb.nodeproppred import PygNodePropPredDataset
import numpy as np
from ogb.nodeproppred import NodePropPredDataset
# load data
root = osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))),
'../..', 'data', 'products')
dataset = NodePropPredDataset('ogbn-products', root)
split_idx = dataset.get_idx_split()
train_idx = split_idx['train']
graph, label = dataset[0] # label with shape(2449029, 1)
num_nodes = graph['num_nodes'] # 2449029
node_feat = graph['node_feat'] # shape(2449029, 100)
edge_index = graph['edge_index'] # shape(2, 123718280)
# dump to disk
train_table = osp.join(root, 'ogbn_products_train')
node_table = osp.join(root, 'ogbn_products_node')
edge_table = osp.join(root, 'ogbn_products_edge')
with open(train_table, 'w') as f:
for i in train_idx:
f.write(str(i) + '\t' + str(label[i][0]) + '\n')
with open(node_table, 'w') as f:
for i in range(num_nodes):
f.write(str(i) + '\t' + str(':'.join(map(str, node_feat[i]))) + '\n')
with open(edge_table, 'w') as f:
for i in range(edge_index.shape[1]):
f.write(str(edge_index[0][i]) + '\t' + str(edge_index[1][i]) + '\t' + str(i) + '\n') | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
alibaba/graphlearn-for-pytorch | https://github.com/alibaba/graphlearn-for-pytorch/blob/88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9/examples/pai/ogbn_products/dist_train_products_sage.py | examples/pai/ogbn_products/dist_train_products_sage.py | # Copyright 2022 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import common_io
import os
import time
import torch
import graphlearn_torch as glt
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn.functional as F
from torch.nn.parallel import DistributedDataParallel
from torch_geometric.nn import GraphSAGE
def run(rank, num_nodes, node_rank, num_training_procs_per_node,
dataset, train_idx, train_label, train_id2idx, batch_size,
lr, nbrs_num, features_num, hidden_dim, class_num, depth, epochs,
master_addr, training_pg_master_port, train_loader_master_port):
# Initialize graphlearn_torch distributed worker group context.
glt.distributed.init_worker_group(
world_size=num_nodes*num_training_procs_per_node,
rank=node_rank*num_training_procs_per_node+rank,
group_name='distributed-sage-supervised-trainer'
)
current_ctx = glt.distributed.get_context()
current_device = torch.device(rank % torch.cuda.device_count())
# Initialize training process group of PyTorch.
torch.distributed.init_process_group(
backend='nccl',
rank=current_ctx.rank,
world_size=current_ctx.world_size,
init_method='tcp://{}:{}'.format(master_addr, training_pg_master_port)
)
train_idx = train_idx.split(train_idx.size(0) // num_training_procs_per_node)[rank]
train_label = train_label.to(current_device)
train_id2idx = train_id2idx.to(current_device)
# Create distributed neighbor loader for training
train_loader = glt.distributed.DistNeighborLoader(
data=dataset,
num_neighbors=nbrs_num,
input_nodes=train_idx,
batch_size=batch_size,
shuffle=True,
drop_last=True,
collect_features=True,
to_device=current_device,
worker_options=glt.distributed.MpDistSamplingWorkerOptions(
num_workers=1,
worker_devices=[current_device],
worker_concurrency=4,
master_addr=master_addr,
master_port=train_loader_master_port,
channel_size='1GB',
pin_memory=True
)
)
# Define model and optimizer.
torch.cuda.set_device(current_device)
model = GraphSAGE(
in_channels=features_num,
hidden_channels=hidden_dim,
num_layers=depth,
out_channels=class_num,
).to(current_device)
model = DistributedDataParallel(model, device_ids=[current_device.index])
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# Train and test.
for epoch in range(0, epochs):
model.train()
start = time.time()
for batch in train_loader:
optimizer.zero_grad()
out = model(batch.x, batch.edge_index)[:batch.batch_size].log_softmax(dim=-1)
loss = F.nll_loss(out, train_label[train_id2idx[batch.batch]])
loss.backward()
optimizer.step()
end = time.time()
print(f'-- [Trainer {current_ctx.rank}] Epoch: {epoch:03d}, Loss: {loss:.4f}, Epoch Time: {end - start}\n')
torch.cuda.synchronize()
torch.distributed.barrier()
def read_train_idx_and_label(table, partition_idx, num_partitions):
id_label = []
reader = common_io.table.TableReader(table,
num_threads=10,
capacity=10240)
while True:
try:
data = reader.read(1024, allow_smaller_final_batch=True)
id_label.extend(data)
except common_io.exception.OutOfRangeException:
reader.close()
break
ids = torch.tensor([e[0] for e in id_label], dtype=torch.long)
label = torch.tensor([e[1] for e in id_label], dtype=torch.long)
return ids.squeeze(), label.squeeze()
if __name__ == "__main__":
argparser = argparse.ArgumentParser("Train GraphSAGE.")
argparser.add_argument('--tables', type=str, default="" ,
help='ODPS input table names')
argparser.add_argument("--num_training_procs", type=int, default=2,
help="The number of traning processes per node.")
argparser.add_argument('--num_vertices', type=int, default=2449029)
argparser.add_argument('--class_num', type=int, default=47)
argparser.add_argument('--features_num', type=int, default=100)
argparser.add_argument('--hidden_dim', type=int, default=256)
argparser.add_argument('--depth', type=int, default=3)
argparser.add_argument('--nbrs_num', type=str, default='[15, 10, 5]')
argparser.add_argument('--learning_rate', type=float, default=0.003)
argparser.add_argument('--epoch', type=int, default=10)
argparser.add_argument('--batch_size', type=int, default=512)
args = argparser.parse_args()
args.nbrs_num = eval(args.nbrs_num)
# get dist context from PAI environment.
num_nodes = int(os.getenv('WORLD_SIZE', 1))
node_rank = int(os.getenv('RANK', 0))
master_addr = os.getenv('MASTER_ADDR', '127.0.0.1')
master_port = int(os.getenv('MASTER_PORT', 29500))
train_loader_master_port = 11234
partitioner_master_port = 11235
node_table, edge_table, train_table = args.tables.split(',')
train_idx, train_label = read_train_idx_and_label(train_table, node_rank, num_nodes)
train_id2idx = glt.utils.id2idx(train_idx)
train_idx.share_memory_()
train_label.share_memory_()
train_id2idx.share_memory_()
node_tables = {'i': node_table}
edge_tables = {('i', 'i-i', 'i') : edge_table}
# init glt Dataset.
print('--- Loading data partition ...')
glt_dataset = glt.distributed.DistTableDataset()
glt_dataset.load(num_partitions=num_nodes,
partition_idx=node_rank,
edge_tables=edge_tables,
node_tables=node_tables,
num_nodes=args.num_vertices,
graph_mode='ZERO_COPY',
device_group_list=None,
reader_threads=10,
reader_capacity=10240,
reader_batch_size=1024,
feature_with_gpu=True,
edge_assign_strategy='by_src',
chunk_size=10000,
master_addr=master_addr,
master_port=partitioner_master_port,
num_rpc_threads=16,
)
print('--- Launching training processes ...')
mp.spawn(run,
args=(num_nodes,
node_rank,
args.num_training_procs,
glt_dataset,
train_idx,
train_label,
train_id2idx,
args.batch_size,
args.learning_rate,
args.nbrs_num,
args.features_num,
args.hidden_dim,
args.class_num,
args.depth,
args.epoch,
master_addr,
master_port,
train_loader_master_port,
),
nprocs=args.num_training_procs,
join=True) | python | Apache-2.0 | 88ff111ac0d9e45c6c9d2d18cfc5883dca07e9f9 | 2026-01-05T07:14:39.718240Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/setup.py | setup.py |
from setuptools import setup
setup(name='stable_nalu',
version='0.1.0',
description='Implementation of NALU with stable training',
url='https://github.com/AndreasMadsen/publication-stable-nalu',
author='Andreas Madsen',
author_email='amwebdk@gmail.com',
license='MIT',
packages=['stable_nalu'],
install_requires=[
'numpy',
'tqdm',
'torch',
'scipy',
'pandas',
'tensorflow',
'torchvision',
'tensorboard',
'tensorboardX',
],
test_suite='nose.collector',
tests_require=['nose'],
include_package_data=True,
zip_safe=False) | python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/experiments/sequential_mnist.py | experiments/sequential_mnist.py |
import os
import ast
import math
import torch
import stable_nalu
import argparse
# Parse arguments
parser = argparse.ArgumentParser(description='Run either the MNIST counting or MNIST Arithmetic task')
parser.add_argument('--layer-type',
action='store',
default='NALU',
choices=list(stable_nalu.network.SequentialMnistNetwork.UNIT_NAMES),
type=str,
help='Specify the layer type, e.g. RNN-tanh, LSTM, NAC, NALU')
parser.add_argument('--operation',
action='store',
default='cumsum',
choices=[
'cumsum', 'sum', 'cumprod', 'prod', 'cumdiv', 'div'
],
type=str,
help='Specify the operation to use, sum or count')
parser.add_argument('--regualizer',
action='store',
default=10,
type=float,
help='Specify the regualization lambda to be used')
parser.add_argument('--regualizer-z',
action='store',
default=0,
type=float,
help='Specify the z-regualization lambda to be used')
parser.add_argument('--regualizer-oob',
action='store',
default=1,
type=float,
help='Specify the oob-regualization lambda to be used')
parser.add_argument('--mnist-digits',
action='store',
default=[0,1,2,3,4,5,6,7,8,9],
type=lambda str: list(map(int,str)),
help='MNIST digits to use')
parser.add_argument('--mnist-outputs',
action='store',
default=1,
type=int,
help='number of MNIST to use, more than 1 adds redundant values')
parser.add_argument('--model-simplification',
action='store',
default='none',
choices=[
'none', 'solved-accumulator', 'pass-through'
],
type=str,
help='Simplifiations applied to the model')
parser.add_argument('--max-epochs',
action='store',
default=1000,
type=int,
help='Specify the max number of epochs to use')
parser.add_argument('--batch-size',
action='store',
default=64,
type=int,
help='Specify the batch-size to be used for training')
parser.add_argument('--seed',
action='store',
default=0,
type=int,
help='Specify the seed to use')
parser.add_argument('--interpolation-length',
action='store',
default=10,
type=int,
help='Specify the sequence length for interpolation')
parser.add_argument('--extrapolation-lengths',
action='store',
default=[100, 1000],
type=ast.literal_eval,
help='Specify the sequence lengths used for the extrapolation dataset')
parser.add_argument('--softmax-transform',
action='store_true',
default=False,
help='Should a softmax transformation be used to control the output of the CNN model')
parser.add_argument('--nac-mul',
action='store',
default='none',
choices=['none', 'normal', 'safe', 'max-safe', 'mnac'],
type=str,
help='Make the second NAC a multiplicative NAC, used in case of a just NAC network.')
parser.add_argument('--nac-oob',
action='store',
default='clip',
choices=['regualized', 'clip'],
type=str,
help='Choose of out-of-bound should be handled by clipping or regualization.')
parser.add_argument('--regualizer-scaling',
action='store',
default='linear',
choices=['exp', 'linear'],
type=str,
help='Use an expoentational scaling from 0 to 1, or a linear scaling.')
parser.add_argument('--regualizer-scaling-start',
action='store',
default=10000,
type=int,
help='Start linear scaling at this global step.')
parser.add_argument('--regualizer-scaling-end',
action='store',
default=100000,
type=int,
help='Stop linear scaling at this global step.')
parser.add_argument('--regualizer-shape',
action='store',
default='linear',
choices=['squared', 'linear'],
type=str,
help='Use either a squared or linear shape for the bias and oob regualizer.')
parser.add_argument('--mnac-epsilon',
action='store',
default=0,
type=float,
help='Set the idendity epsilon for MNAC.')
parser.add_argument('--nalu-bias',
action='store_true',
default=False,
help='Enables bias in the NALU gate')
parser.add_argument('--nalu-two-nac',
action='store_true',
default=False,
help='Uses two independent NACs in the NALU Layer')
parser.add_argument('--nalu-two-gate',
action='store_true',
default=False,
help='Uses two independent gates in the NALU Layer')
parser.add_argument('--nalu-mul',
action='store',
default='normal',
choices=['normal', 'safe', 'trig', 'max-safe', 'mnac'],
help='Multplication unit, can be normal, safe, trig')
parser.add_argument('--nalu-gate',
action='store',
default='normal',
choices=['normal', 'regualized', 'obs-gumbel', 'gumbel'],
type=str,
help='Can be normal, regualized, obs-gumbel, or gumbel')
parser.add_argument('--no-cuda',
action='store_true',
default=False,
help=f'Force no CUDA (cuda usage is detected automatically as {torch.cuda.is_available()})')
parser.add_argument('--name-prefix',
action='store',
default='sequence_mnist',
type=str,
help='Where the data should be stored')
parser.add_argument('--remove-existing-data',
action='store_true',
default=False,
help='Should old results be removed')
parser.add_argument('--verbose',
action='store_true',
default=False,
help='Should network measures (e.g. gates) and gradients be shown')
args = parser.parse_args()
setattr(args, 'cuda', torch.cuda.is_available() and not args.no_cuda)
# Print configuration
print(f'running')
print(f' - layer_type: {args.layer_type}')
print(f' - operation: {args.operation}')
print(f' - regualizer: {args.regualizer}')
print(f' - regualizer_z: {args.regualizer_z}')
print(f' - regualizer_oob: {args.regualizer_oob}')
print(f' - mnist_digits: {args.mnist_digits}')
print(f' - mnist_outputs: {args.mnist_outputs}')
print(f' - model_simplification: {args.model_simplification}')
print(f' -')
print(f' - max_epochs: {args.max_epochs}')
print(f' - batch_size: {args.batch_size}')
print(f' - seed: {args.seed}')
print(f' -')
print(f' - interpolation_length: {args.interpolation_length}')
print(f' - extrapolation_lengths: {args.extrapolation_lengths}')
print(f' -')
print(f' - softmax_transform: {args.softmax_transform}')
print(f' - nac_mul: {args.nac_mul}')
print(f' - nac_oob: {args.nac_oob}')
print(f' - regualizer_scaling: {args.regualizer_scaling}')
print(f' - regualizer_scaling_start: {args.regualizer_scaling_start}')
print(f' - regualizer_scaling_end: {args.regualizer_scaling_end}')
print(f' - regualizer_shape: {args.regualizer_shape}')
print(f' - mnac_epsilon: {args.mnac_epsilon}')
print(f' - nalu_bias: {args.nalu_bias}')
print(f' - nalu_two_nac: {args.nalu_two_nac}')
print(f' - nalu_two_gate: {args.nalu_two_gate}')
print(f' - nalu_mul: {args.nalu_mul}')
print(f' - nalu_gate: {args.nalu_gate}')
print(f' -')
print(f' - cuda: {args.cuda}')
print(f' - name_prefix: {args.name_prefix}')
print(f' - remove_existing_data: {args.remove_existing_data}')
print(f' - verbose: {args.verbose}')
# Prepear logging
summary_writer = stable_nalu.writer.SummaryWriter(
f'{args.name_prefix}/{args.layer_type.lower()}'
f'{"-nac-" if args.nac_mul != "none" else ""}'
f'{"n" if args.nac_mul == "normal" else ""}'
f'{"s" if args.nac_mul == "safe" else ""}'
f'{"s" if args.nac_mul == "max-safe" else ""}'
f'{"t" if args.nac_mul == "trig" else ""}'
f'{"m" if args.nac_mul == "mnac" else ""}'
f'{"-nalu-" if (args.nalu_bias or args.nalu_two_nac or args.nalu_two_gate or args.nalu_mul != "normal" or args.nalu_gate != "normal") else ""}'
f'{"b" if args.nalu_bias else ""}'
f'{"2n" if args.nalu_two_nac else ""}'
f'{"2g" if args.nalu_two_gate else ""}'
f'{"s" if args.nalu_mul == "safe" else ""}'
f'{"s" if args.nalu_mul == "max-safe" else ""}'
f'{"t" if args.nalu_mul == "trig" else ""}'
f'{"m" if args.nalu_mul == "mnac" else ""}'
f'{"r" if args.nalu_gate == "regualized" else ""}'
f'{"u" if args.nalu_gate == "gumbel" else ""}'
f'{"uu" if args.nalu_gate == "obs-gumbel" else ""}'
f'_d-{"".join(map(str, args.mnist_digits))}'
f'_h-{args.mnist_outputs}'
f'_op-{args.operation.lower()}'
f'_oob-{"c" if args.nac_oob == "clip" else "r"}'
f'_rs-{args.regualizer_scaling}-{args.regualizer_shape}'
f'_eps-{args.mnac_epsilon}'
f'_rl-{args.regualizer_scaling_start}-{args.regualizer_scaling_end}'
f'_r-{args.regualizer}-{args.regualizer_z}-{args.regualizer_oob}'
f'_m-{"s" if args.softmax_transform else "l"}-{args.model_simplification[0]}'
f'_i-{args.interpolation_length}'
f'_e-{"-".join(map(str, args.extrapolation_lengths))}'
f'_b{args.batch_size}'
f'_s{args.seed}',
remove_existing_data=args.remove_existing_data
)
# Set threads
if 'LSB_DJOB_NUMPROC' in os.environ:
torch.set_num_threads(int(os.environ['LSB_DJOB_NUMPROC']))
# Set seed
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
# Setup datasets
dataset = stable_nalu.dataset.SequentialMnistDataset(
operation=args.operation,
use_cuda=args.cuda,
seed=args.seed,
mnist_digits=args.mnist_digits
)
dataset_train = dataset.fork(seq_length=args.interpolation_length, subset='train').dataloader(shuffle=True)
# Seeds are from random.org
dataset_train_full = dataset.fork(seq_length=args.interpolation_length, subset='train',
seed=62379872).dataloader(shuffle=False)
dataset_valid = dataset.fork(seq_length=args.interpolation_length, subset='valid',
seed=47430696).dataloader(shuffle=False)
dataset_test_extrapolations = [
( seq_length,
dataset.fork(seq_length=seq_length, subset='test',
seed=88253339).dataloader(shuffle=False)
) for seq_length in args.extrapolation_lengths
]
# setup model
model = stable_nalu.network.SequentialMnistNetwork(
args.layer_type,
output_size=dataset.get_item_shape().target[-1],
writer=summary_writer.every(100).verbose(args.verbose),
mnist_digits=args.mnist_digits,
mnist_outputs=args.mnist_outputs,
model_simplification=args.model_simplification,
softmax_transform=args.softmax_transform,
nac_mul=args.nac_mul,
nac_oob=args.nac_oob,
regualizer_shape=args.regualizer_shape,
regualizer_z=args.regualizer_z,
mnac_epsilon=args.mnac_epsilon,
nalu_bias=args.nalu_bias,
nalu_two_nac=args.nalu_two_nac,
nalu_two_gate=args.nalu_two_gate,
nalu_mul=args.nalu_mul,
nalu_gate=args.nalu_gate,
)
model.reset_parameters()
if args.cuda:
model.cuda()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters())
seq_index = slice(None) if dataset.get_item_shape().target[0] is None else -1
def accuracy(y, t):
return torch.mean((torch.round(y) == t).float())
def test_model(dataloader):
with torch.no_grad(), model.no_internal_logging(), model.no_random():
mse_loss = 0
acc_all = 0
acc_last = 0
for x, t in dataloader:
# forward
_, y = model(x)
mse_loss += criterion(y[:,seq_index,:], t[:,seq_index,:]).item() * len(t)
acc_all += accuracy(y[:,seq_index,:], t[:,seq_index,:]).item() * len(t)
acc_last += accuracy(y[:,-1,:], t[:,-1,:]).item() * len(t)
return (
mse_loss / len(dataloader.dataset),
acc_all / len(dataloader.dataset),
acc_last / len(dataloader.dataset)
)
# Train model
global_step = 0
for epoch_i in range(0, args.max_epochs + 1):
for i_train, (x_train, t_train) in enumerate(dataset_train):
global_step += 1
summary_writer.set_iteration(global_step)
summary_writer.add_scalar('epoch', epoch_i)
# Prepear model
model.set_parameter('tau', max(0.5, math.exp(-1e-5 * global_step)))
optimizer.zero_grad()
# Log validation
if epoch_i % 5 == 0 and i_train == 0:
(train_full_mse,
train_full_acc_all,
train_full_acc_last) = test_model(dataset_train_full)
summary_writer.add_scalar('metric/train/mse', train_full_mse)
summary_writer.add_scalar('metric/train/acc/all', train_full_acc_all)
summary_writer.add_scalar('metric/train/acc/last', train_full_acc_last)
(valid_mse,
valid_acc_all,
valid_acc_last) = test_model(dataset_valid)
summary_writer.add_scalar('metric/valid/mse', valid_mse)
summary_writer.add_scalar('metric/valid/acc/all', valid_acc_all)
summary_writer.add_scalar('metric/valid/acc/last', valid_acc_last)
for seq_length, dataloader in dataset_test_extrapolations:
(test_extrapolation_mse,
test_extrapolation_acc_all,
test_extrapolation_acc_last) = test_model(dataloader)
summary_writer.add_scalar(f'metric/test/extrapolation/{seq_length}/mse', test_extrapolation_mse)
summary_writer.add_scalar(f'metric/test/extrapolation/{seq_length}/acc/all', test_extrapolation_acc_all)
summary_writer.add_scalar(f'metric/test/extrapolation/{seq_length}/acc/last', test_extrapolation_acc_last)
# forward
with summary_writer.force_logging(epoch_i % 5 == 0 and i_train == 0):
mnist_y_train, y_train = model(x_train)
regualizers = model.regualizer()
if (args.regualizer_scaling == 'linear'):
r_w_scale = max(0, min(1, (
(global_step - args.regualizer_scaling_start) /
(args.regualizer_scaling_end - args.regualizer_scaling_start)
)))
elif (args.regualizer_scaling == 'exp'):
r_w_scale = 1 - math.exp(-1e-5 * global_step)
loss_train_criterion = criterion(y_train[:,seq_index,:], t_train[:,seq_index,:])
loss_train_regualizer = args.regualizer * r_w_scale * regualizers['W'] + regualizers['g'] + args.regualizer_z * regualizers['z'] + args.regualizer_oob * regualizers['W-OOB']
loss_train = loss_train_criterion + loss_train_regualizer
# Log loss
summary_writer.add_scalar('loss/train/accuracy/all', accuracy(y_train[:,seq_index,:], t_train[:,seq_index,:]))
summary_writer.add_scalar('loss/train/accuracy/last', accuracy(y_train[:,-1,:], t_train[:,-1,:]))
summary_writer.add_scalar('loss/train/critation', loss_train_criterion)
summary_writer.add_scalar('loss/train/regualizer', loss_train_regualizer)
summary_writer.add_scalar('loss/train/total', loss_train)
if epoch_i % 5 == 0 and i_train == 0:
summary_writer.add_tensor('MNIST/train',
torch.cat([mnist_y_train[:,0,:], t_train[:,0,:]], dim=1))
print('train %d: %.5f, full: %.5f, %.3f (acc[last]), valid: %.5f, %.3f (acc[last])' % (
epoch_i, loss_train_criterion, train_full_mse, train_full_acc_last, valid_mse, valid_acc_last
))
# Optimize model
if loss_train.requires_grad:
loss_train.backward()
optimizer.step()
model.optimize(loss_train_criterion)
# Log gradients if in verbose mode
with summary_writer.force_logging(epoch_i % 5 == 0 and i_train == 0):
model.log_gradients()
# Write results for this training
print(f'finished:')
print(f' - loss_train: {loss_train}')
print(f' - validation: {valid_mse}')
# Use saved weights to visualize the intermediate values.
stable_nalu.writer.save_model(summary_writer.name, model)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/experiments/sequential_svhn.py | experiments/sequential_svhn.py |
import os
import ast
import math
import torch
import stable_nalu
import argparse
# Parse arguments
parser = argparse.ArgumentParser(description='Run either the SVHN counting or SVHN Arithmetic task')
parser.add_argument('--layer-type',
action='store',
default='NALU',
choices=list(stable_nalu.network.SequentialSvhnNetwork.UNIT_NAMES),
type=str,
help='Specify the layer type, e.g. RNN-tanh, LSTM, NAC, NALU')
parser.add_argument('--operation',
action='store',
default='cumsum',
choices=[
'cumsum', 'sum', 'cumprod', 'prod'
],
type=str,
help='Specify the operation to use, sum or count')
parser.add_argument('--resnet',
action='store',
default='resnet18',
choices=[
'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152'
],
type=str,
help='Specify the resnet18 version')
parser.add_argument('--regualizer',
action='store',
default=10,
type=float,
help='Specify the regualization lambda to be used')
parser.add_argument('--regualizer-z',
action='store',
default=0,
type=float,
help='Specify the z-regualization lambda to be used')
parser.add_argument('--regualizer-oob',
action='store',
default=1,
type=float,
help='Specify the oob-regualization lambda to be used')
parser.add_argument('--svhn-digits',
action='store',
default=[0,1,2,3,4,5,6,7,8,9],
type=lambda str: list(map(int,str)),
help='SVHN digits to use')
parser.add_argument('--svhn-outputs',
action='store',
default=1,
type=int,
help='number of SVHN outputs to use, more than 1 adds redundant values')
parser.add_argument('--model-simplification',
action='store',
default='none',
choices=[
'none', 'solved-accumulator', 'pass-through'
],
type=str,
help='Simplifiations applied to the model')
parser.add_argument('--max-epochs',
action='store',
default=1000,
type=int,
help='Specify the max number of epochs to use')
parser.add_argument('--batch-size',
action='store',
default=64,
type=int,
help='Specify the batch-size to be used for training')
parser.add_argument('--seed',
action='store',
default=0,
type=int,
help='Specify the seed to use')
parser.add_argument('--interpolation-length',
action='store',
default=10,
type=int,
help='Specify the sequence length for interpolation')
parser.add_argument('--extrapolation-lengths',
action='store',
default=[100, 1000],
type=ast.literal_eval,
help='Specify the sequence lengths used for the extrapolation dataset')
parser.add_argument('--nac-mul',
action='store',
default='none',
choices=['none', 'normal', 'safe', 'max-safe', 'mnac'],
type=str,
help='Make the second NAC a multiplicative NAC, used in case of a just NAC network.')
parser.add_argument('--nac-oob',
action='store',
default='clip',
choices=['regualized', 'clip'],
type=str,
help='Choose of out-of-bound should be handled by clipping or regualization.')
parser.add_argument('--regualizer-scaling',
action='store',
default='linear',
choices=['exp', 'linear'],
type=str,
help='Use an expoentational scaling from 0 to 1, or a linear scaling.')
parser.add_argument('--regualizer-scaling-start',
action='store',
default=10000,
type=int,
help='Start linear scaling at this global step.')
parser.add_argument('--regualizer-scaling-end',
action='store',
default=100000,
type=int,
help='Stop linear scaling at this global step.')
parser.add_argument('--regualizer-shape',
action='store',
default='linear',
choices=['squared', 'linear'],
type=str,
help='Use either a squared or linear shape for the bias and oob regualizer.')
parser.add_argument('--mnac-epsilon',
action='store',
default=0,
type=float,
help='Set the idendity epsilon for MNAC.')
parser.add_argument('--nalu-bias',
action='store_true',
default=False,
help='Enables bias in the NALU gate')
parser.add_argument('--nalu-two-nac',
action='store_true',
default=False,
help='Uses two independent NACs in the NALU Layer')
parser.add_argument('--nalu-two-gate',
action='store_true',
default=False,
help='Uses two independent gates in the NALU Layer')
parser.add_argument('--nalu-mul',
action='store',
default='normal',
choices=['normal', 'safe', 'trig', 'max-safe', 'mnac'],
help='Multplication unit, can be normal, safe, trig')
parser.add_argument('--nalu-gate',
action='store',
default='normal',
choices=['normal', 'regualized', 'obs-gumbel', 'gumbel'],
type=str,
help='Can be normal, regualized, obs-gumbel, or gumbel')
parser.add_argument('--no-cuda',
action='store_true',
default=False,
help=f'Force no CUDA (cuda usage is detected automatically as {torch.cuda.is_available()})')
parser.add_argument('--name-prefix',
action='store',
default='sequence_svhn',
type=str,
help='Where the data should be stored')
parser.add_argument('--remove-existing-data',
action='store_true',
default=False,
help='Should old results be removed')
parser.add_argument('--verbose',
action='store_true',
default=False,
help='Should network measures (e.g. gates) and gradients be shown')
args = parser.parse_args()
setattr(args, 'cuda', torch.cuda.is_available() and not args.no_cuda)
# Print configuration
print(f'running')
print(f' - layer_type: {args.layer_type}')
print(f' - operation: {args.operation}')
print(f' - resnet: {args.resnet}')
print(f' - regualizer: {args.regualizer}')
print(f' - regualizer_z: {args.regualizer_z}')
print(f' - regualizer_oob: {args.regualizer_oob}')
print(f' - svhn_digits: {args.svhn_digits}')
print(f' - svhn_outputs: {args.svhn_outputs}')
print(f' - model_simplification: {args.model_simplification}')
print(f' -')
print(f' - max_epochs: {args.max_epochs}')
print(f' - batch_size: {args.batch_size}')
print(f' - seed: {args.seed}')
print(f' -')
print(f' - interpolation_length: {args.interpolation_length}')
print(f' - extrapolation_lengths: {args.extrapolation_lengths}')
print(f' -')
print(f' - nac_mul: {args.nac_mul}')
print(f' - nac_oob: {args.nac_oob}')
print(f' - regualizer_scaling: {args.regualizer_scaling}')
print(f' - regualizer_scaling_start: {args.regualizer_scaling_start}')
print(f' - regualizer_scaling_end: {args.regualizer_scaling_end}')
print(f' - regualizer_shape: {args.regualizer_shape}')
print(f' - mnac_epsilon: {args.mnac_epsilon}')
print(f' - nalu_bias: {args.nalu_bias}')
print(f' - nalu_two_nac: {args.nalu_two_nac}')
print(f' - nalu_two_gate: {args.nalu_two_gate}')
print(f' - nalu_mul: {args.nalu_mul}')
print(f' - nalu_gate: {args.nalu_gate}')
print(f' -')
print(f' - cuda: {args.cuda}')
print(f' - name_prefix: {args.name_prefix}')
print(f' - remove_existing_data: {args.remove_existing_data}')
print(f' - verbose: {args.verbose}')
# Prepear logging
summary_writer = stable_nalu.writer.SummaryWriter(
f'{args.name_prefix}/{args.layer_type.lower()}'
f'{"-nac-" if args.nac_mul != "none" else ""}'
f'{"n" if args.nac_mul == "normal" else ""}'
f'{"s" if args.nac_mul == "safe" else ""}'
f'{"s" if args.nac_mul == "max-safe" else ""}'
f'{"t" if args.nac_mul == "trig" else ""}'
f'{"m" if args.nac_mul == "mnac" else ""}'
f'{"-nalu-" if (args.nalu_bias or args.nalu_two_nac or args.nalu_two_gate or args.nalu_mul != "normal" or args.nalu_gate != "normal") else ""}'
f'{"b" if args.nalu_bias else ""}'
f'{"2n" if args.nalu_two_nac else ""}'
f'{"2g" if args.nalu_two_gate else ""}'
f'{"s" if args.nalu_mul == "safe" else ""}'
f'{"s" if args.nalu_mul == "max-safe" else ""}'
f'{"t" if args.nalu_mul == "trig" else ""}'
f'{"m" if args.nalu_mul == "mnac" else ""}'
f'{"r" if args.nalu_gate == "regualized" else ""}'
f'{"u" if args.nalu_gate == "gumbel" else ""}'
f'{"uu" if args.nalu_gate == "obs-gumbel" else ""}'
f'_d-{"".join(map(str, args.svhn_digits))}'
f'_h-{args.svhn_outputs}'
f'_op-{args.operation.lower()}'
f'_net-{args.resnet[6:]}'
f'_oob-{"c" if args.nac_oob == "clip" else "r"}'
f'_rs-{args.regualizer_scaling}-{args.regualizer_shape}'
f'_eps-{args.mnac_epsilon}'
f'_rl-{args.regualizer_scaling_start}-{args.regualizer_scaling_end}'
f'_r-{args.regualizer}-{args.regualizer_z}-{args.regualizer_oob}'
f'_m-{args.model_simplification[0]}'
f'_i-{args.interpolation_length}'
f'_e-{"-".join(map(str, args.extrapolation_lengths))}'
f'_b{args.batch_size}'
f'_s{args.seed}',
remove_existing_data=args.remove_existing_data
)
# Set threads
if 'LSB_DJOB_NUMPROC' in os.environ:
torch.set_num_threads(int(os.environ['LSB_DJOB_NUMPROC']))
# Set seed
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
# Setup datasets
dataset = stable_nalu.dataset.SequentialSvhnDataset(
operation=args.operation,
use_cuda=args.cuda,
seed=args.seed,
svhn_digits=args.svhn_digits
)
dataset_train = dataset.fork(seq_length=args.interpolation_length, subset='train').dataloader(shuffle=True)
# Seeds are from random.org
dataset_train_full = dataset.fork(seq_length=args.interpolation_length, subset='train',
seed=62379872).dataloader(shuffle=False)
dataset_valid = dataset.fork(seq_length=args.interpolation_length, subset='valid',
seed=47430696).dataloader(shuffle=False)
dataset_test_extrapolations = [
( seq_length,
dataset.fork(seq_length=seq_length, subset='test',
seed=88253339).dataloader(shuffle=False)
) for seq_length in args.extrapolation_lengths
]
# setup model
model = stable_nalu.network.SequentialSvhnNetwork(
args.layer_type,
output_size=dataset.get_item_shape().target[-1],
writer=summary_writer.every(100).verbose(args.verbose),
svhn_outputs=args.svhn_outputs,
model_simplification=args.model_simplification,
nac_mul=args.nac_mul,
nac_oob=args.nac_oob,
regualizer_shape=args.regualizer_shape,
regualizer_z=args.regualizer_z,
mnac_epsilon=args.mnac_epsilon,
nalu_bias=args.nalu_bias,
nalu_two_nac=args.nalu_two_nac,
nalu_two_gate=args.nalu_two_gate,
nalu_mul=args.nalu_mul,
nalu_gate=args.nalu_gate,
)
model.reset_parameters()
if args.cuda:
model.cuda()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters())
seq_index = slice(None) if dataset.get_item_shape().target[0] is None else -1
def accuracy(y, t):
return torch.mean((torch.round(y) == t).float())
def test_model(dataloader):
with torch.no_grad(), model.no_internal_logging(), model.no_random():
mse_loss = 0
acc_all = 0
acc_last = 0
for x, t in dataloader:
# forward
_, y = model(x)
mse_loss += criterion(y[:,seq_index,:], t[:,seq_index,:]).item() * len(t)
acc_all += accuracy(y[:,seq_index,:], t[:,seq_index,:]).item() * len(t)
acc_last += accuracy(y[:,-1,:], t[:,-1,:]).item() * len(t)
return (
mse_loss / len(dataloader.dataset),
acc_all / len(dataloader.dataset),
acc_last / len(dataloader.dataset)
)
# Train model
global_step = 0
for epoch_i in range(0, args.max_epochs + 1):
for i_train, (x_train, t_train) in enumerate(dataset_train):
global_step += 1
summary_writer.set_iteration(global_step)
summary_writer.add_scalar('epoch', epoch_i)
# Prepear model
model.set_parameter('tau', max(0.5, math.exp(-1e-5 * global_step)))
optimizer.zero_grad()
# Log validation
if epoch_i % 5 == 0 and i_train == 0:
(train_full_mse,
train_full_acc_all,
train_full_acc_last) = test_model(dataset_train_full)
summary_writer.add_scalar('metric/train/mse', train_full_mse)
summary_writer.add_scalar('metric/train/acc/all', train_full_acc_all)
summary_writer.add_scalar('metric/train/acc/last', train_full_acc_last)
(valid_mse,
valid_acc_all,
valid_acc_last) = test_model(dataset_valid)
summary_writer.add_scalar('metric/valid/mse', valid_mse)
summary_writer.add_scalar('metric/valid/acc/all', valid_acc_all)
summary_writer.add_scalar('metric/valid/acc/last', valid_acc_last)
for seq_length, dataloader in dataset_test_extrapolations:
(test_extrapolation_mse,
test_extrapolation_acc_all,
test_extrapolation_acc_last) = test_model(dataloader)
summary_writer.add_scalar(f'metric/test/extrapolation/{seq_length}/mse', test_extrapolation_mse)
summary_writer.add_scalar(f'metric/test/extrapolation/{seq_length}/acc/all', test_extrapolation_acc_all)
summary_writer.add_scalar(f'metric/test/extrapolation/{seq_length}/acc/last', test_extrapolation_acc_last)
# forward
with summary_writer.force_logging(epoch_i % 5 == 0 and i_train == 0):
svhn_y_train, y_train = model(x_train)
regualizers = model.regualizer()
if (args.regualizer_scaling == 'linear'):
r_w_scale = max(0, min(1, (
(global_step - args.regualizer_scaling_start) /
(args.regualizer_scaling_end - args.regualizer_scaling_start)
)))
elif (args.regualizer_scaling == 'exp'):
r_w_scale = 1 - math.exp(-1e-5 * global_step)
loss_train_criterion = criterion(y_train[:,seq_index,:], t_train[:,seq_index,:])
loss_train_regualizer = args.regualizer * r_w_scale * regualizers['W'] + regualizers['g'] + args.regualizer_z * regualizers['z'] + args.regualizer_oob * regualizers['W-OOB']
loss_train = loss_train_criterion + loss_train_regualizer
# Log loss
summary_writer.add_scalar('loss/train/accuracy/all', accuracy(y_train[:,seq_index,:], t_train[:,seq_index,:]))
summary_writer.add_scalar('loss/train/accuracy/last', accuracy(y_train[:,-1,:], t_train[:,-1,:]))
summary_writer.add_scalar('loss/train/critation', loss_train_criterion)
summary_writer.add_scalar('loss/train/regualizer', loss_train_regualizer)
summary_writer.add_scalar('loss/train/total', loss_train)
if epoch_i % 5 == 0 and i_train == 0:
summary_writer.add_tensor('SVHN/train',
torch.cat([svhn_y_train[:,0,:], t_train[:,0,:]], dim=1))
print('train %d: %.5f, full: %.5f, %.3f (acc[last]), valid: %.5f, %.3f (acc[last])' % (
epoch_i, loss_train_criterion, train_full_mse, train_full_acc_last, valid_mse, valid_acc_last
))
# Optimize model
if loss_train.requires_grad:
loss_train.backward()
optimizer.step()
model.optimize(loss_train_criterion)
# Log gradients if in verbose mode
with summary_writer.force_logging(epoch_i % 5 == 0 and i_train == 0):
model.log_gradients()
# Write results for this training
print(f'finished:')
print(f' - loss_train: {loss_train}')
print(f' - validation: {valid_mse}')
# Use saved weights to visualize the intermediate values.
stable_nalu.writer.save_model(summary_writer.name, model)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/experiments/simple_function_static.py | experiments/simple_function_static.py |
import os
import ast
import math
import torch
import stable_nalu
import argparse
# Parse arguments
parser = argparse.ArgumentParser(description='Runs the simple function static task')
parser.add_argument('--layer-type',
action='store',
default='NALU',
choices=list(stable_nalu.network.SimpleFunctionStaticNetwork.UNIT_NAMES),
type=str,
help='Specify the layer type, e.g. Tanh, ReLU, NAC, NALU')
parser.add_argument('--operation',
action='store',
default='add',
choices=[
'add', 'sub', 'mul', 'div', 'squared', 'root'
],
type=str,
help='Specify the operation to use, e.g. add, mul, squared')
parser.add_argument('--num-subsets',
action='store',
default=2,
type=int,
help='Specify the number of subsets to use')
parser.add_argument('--regualizer',
action='store',
default=10,
type=float,
help='Specify the regualization lambda to be used')
parser.add_argument('--regualizer-z',
action='store',
default=0,
type=float,
help='Specify the z-regualization lambda to be used')
parser.add_argument('--regualizer-oob',
action='store',
default=1,
type=float,
help='Specify the oob-regualization lambda to be used')
parser.add_argument('--first-layer',
action='store',
default=None,
help='Set the first layer to be a different type')
parser.add_argument('--max-iterations',
action='store',
default=100000,
type=int,
help='Specify the max number of iterations to use')
parser.add_argument('--batch-size',
action='store',
default=128,
type=int,
help='Specify the batch-size to be used for training')
parser.add_argument('--seed',
action='store',
default=0,
type=int,
help='Specify the seed to use')
parser.add_argument('--interpolation-range',
action='store',
default=[1,2],
type=ast.literal_eval,
help='Specify the interpolation range that is sampled uniformly from')
parser.add_argument('--extrapolation-range',
action='store',
default=[2,6],
type=ast.literal_eval,
help='Specify the extrapolation range that is sampled uniformly from')
parser.add_argument('--input-size',
action='store',
default=100,
type=int,
help='Specify the input size')
parser.add_argument('--subset-ratio',
action='store',
default=0.25,
type=float,
help='Specify the subset-size as a fraction of the input-size')
parser.add_argument('--overlap-ratio',
action='store',
default=0.5,
type=float,
help='Specify the overlap-size as a fraction of the input-size')
parser.add_argument('--simple',
action='store_true',
default=False,
help='Use a very simple dataset with t = sum(v[0:2]) + sum(v[4:6])')
parser.add_argument('--hidden-size',
action='store',
default=2,
type=int,
help='Specify the vector size of the hidden layer.')
parser.add_argument('--nac-mul',
action='store',
default='none',
choices=['none', 'normal', 'safe', 'max-safe', 'mnac'],
type=str,
help='Make the second NAC a multiplicative NAC, used in case of a just NAC network.')
parser.add_argument('--oob-mode',
action='store',
default='clip',
choices=['regualized', 'clip'],
type=str,
help='Choose of out-of-bound should be handled by clipping or regualization.')
parser.add_argument('--regualizer-scaling',
action='store',
default='linear',
choices=['exp', 'linear'],
type=str,
help='Use an expoentational scaling from 0 to 1, or a linear scaling.')
parser.add_argument('--regualizer-scaling-start',
action='store',
default=1000000,
type=int,
help='Start linear scaling at this global step.')
parser.add_argument('--regualizer-scaling-end',
action='store',
default=2000000,
type=int,
help='Stop linear scaling at this global step.')
parser.add_argument('--regualizer-shape',
action='store',
default='linear',
choices=['squared', 'linear'],
type=str,
help='Use either a squared or linear shape for the bias and oob regualizer.')
parser.add_argument('--mnac-epsilon',
action='store',
default=0,
type=float,
help='Set the idendity epsilon for MNAC.')
parser.add_argument('--nalu-bias',
action='store_true',
default=False,
help='Enables bias in the NALU gate')
parser.add_argument('--nalu-two-nac',
action='store_true',
default=False,
help='Uses two independent NACs in the NALU Layer')
parser.add_argument('--nalu-two-gate',
action='store_true',
default=False,
help='Uses two independent gates in the NALU Layer')
parser.add_argument('--nalu-mul',
action='store',
default='normal',
choices=['normal', 'safe', 'trig', 'max-safe', 'mnac'],
help='Multplication unit, can be normal, safe, trig')
parser.add_argument('--nalu-gate',
action='store',
default='normal',
choices=['normal', 'regualized', 'obs-gumbel', 'gumbel'],
type=str,
help='Can be normal, regualized, obs-gumbel, or gumbel')
parser.add_argument('--optimizer',
action='store',
default='adam',
choices=['adam', 'sgd'],
type=str,
help='The optimization algorithm to use, Adam or SGD')
parser.add_argument('--learning-rate',
action='store',
default=1e-3,
type=float,
help='Specify the learning-rate')
parser.add_argument('--momentum',
action='store',
default=0.0,
type=float,
help='Specify the nestrov momentum, only used with SGD')
parser.add_argument('--no-cuda',
action='store_true',
default=False,
help=f'Force no CUDA (cuda usage is detected automatically as {torch.cuda.is_available()})')
parser.add_argument('--name-prefix',
action='store',
default='simple_function_static',
type=str,
help='Where the data should be stored')
parser.add_argument('--remove-existing-data',
action='store_true',
default=False,
help='Should old results be removed')
parser.add_argument('--verbose',
action='store_true',
default=False,
help='Should network measures (e.g. gates) and gradients be shown')
args = parser.parse_args()
setattr(args, 'cuda', torch.cuda.is_available() and not args.no_cuda)
# Print configuration
print(f'running')
print(f' - layer_type: {args.layer_type}')
print(f' - first_layer: {args.first_layer}')
print(f' - operation: {args.operation}')
print(f' - num_subsets: {args.num_subsets}')
print(f' - regualizer: {args.regualizer}')
print(f' - regualizer_z: {args.regualizer_z}')
print(f' - regualizer_oob: {args.regualizer_oob}')
print(f' -')
print(f' - max_iterations: {args.max_iterations}')
print(f' - batch_size: {args.batch_size}')
print(f' - seed: {args.seed}')
print(f' -')
print(f' - interpolation_range: {args.interpolation_range}')
print(f' - extrapolation_range: {args.extrapolation_range}')
print(f' - input_size: {args.input_size}')
print(f' - subset_ratio: {args.subset_ratio}')
print(f' - overlap_ratio: {args.overlap_ratio}')
print(f' - simple: {args.simple}')
print(f' -')
print(f' - hidden_size: {args.hidden_size}')
print(f' - nac_mul: {args.nac_mul}')
print(f' - oob_mode: {args.oob_mode}')
print(f' - regualizer_scaling: {args.regualizer_scaling}')
print(f' - regualizer_scaling_start: {args.regualizer_scaling_start}')
print(f' - regualizer_scaling_end: {args.regualizer_scaling_end}')
print(f' - regualizer_shape: {args.regualizer_shape}')
print(f' - mnac_epsilon: {args.mnac_epsilon}')
print(f' - nalu_bias: {args.nalu_bias}')
print(f' - nalu_two_nac: {args.nalu_two_nac}')
print(f' - nalu_two_gate: {args.nalu_two_gate}')
print(f' - nalu_mul: {args.nalu_mul}')
print(f' - nalu_gate: {args.nalu_gate}')
print(f' -')
print(f' - optimizer: {args.optimizer}')
print(f' - learning_rate: {args.learning_rate}')
print(f' - momentum: {args.momentum}')
print(f' -')
print(f' - cuda: {args.cuda}')
print(f' - name_prefix: {args.name_prefix}')
print(f' - remove_existing_data: {args.remove_existing_data}')
print(f' - verbose: {args.verbose}')
# Prepear logging
summary_writer = stable_nalu.writer.SummaryWriter(
f'{args.name_prefix}/{args.layer_type.lower()}'
f'{"-nac-" if args.nac_mul != "none" else ""}'
f'{"n" if args.nac_mul == "normal" else ""}'
f'{"s" if args.nac_mul == "safe" else ""}'
f'{"s" if args.nac_mul == "max-safe" else ""}'
f'{"t" if args.nac_mul == "trig" else ""}'
f'{"m" if args.nac_mul == "mnac" else ""}'
f'{"-nalu-" if (args.nalu_bias or args.nalu_two_nac or args.nalu_two_gate or args.nalu_mul != "normal" or args.nalu_gate != "normal") else ""}'
f'{"b" if args.nalu_bias else ""}'
f'{"2n" if args.nalu_two_nac else ""}'
f'{"2g" if args.nalu_two_gate else ""}'
f'{"s" if args.nalu_mul == "safe" else ""}'
f'{"s" if args.nalu_mul == "max-safe" else ""}'
f'{"t" if args.nalu_mul == "trig" else ""}'
f'{"m" if args.nalu_mul == "mnac" else ""}'
f'{"r" if args.nalu_gate == "regualized" else ""}'
f'{"u" if args.nalu_gate == "gumbel" else ""}'
f'{"uu" if args.nalu_gate == "obs-gumbel" else ""}'
f'_op-{args.operation.lower()}'
f'_oob-{"c" if args.oob_mode == "clip" else "r"}'
f'_rs-{args.regualizer_scaling}-{args.regualizer_shape}'
f'_eps-{args.mnac_epsilon}'
f'_rl-{args.regualizer_scaling_start}-{args.regualizer_scaling_end}'
f'_r-{args.regualizer}-{args.regualizer_z}-{args.regualizer_oob}'
f'_i-{args.interpolation_range[0]}-{args.interpolation_range[1]}'
f'_e-{args.extrapolation_range[0]}-{args.extrapolation_range[1]}'
f'_z-{"simple" if args.simple else f"{args.input_size}-{args.subset_ratio}-{args.overlap_ratio}"}'
f'_b{args.batch_size}'
f'_s{args.seed}'
f'_h{args.hidden_size}'
f'_z{args.num_subsets}'
f'_lr-{args.optimizer}-{"%.5f" % args.learning_rate}-{args.momentum}',
remove_existing_data=args.remove_existing_data
)
# Set threads
if 'LSB_DJOB_NUMPROC' in os.environ:
torch.set_num_threads(int(os.environ['LSB_DJOB_NUMPROC']))
# Set seed
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
# Setup datasets
dataset = stable_nalu.dataset.SimpleFunctionStaticDataset(
operation=args.operation,
input_size=args.input_size,
subset_ratio=args.subset_ratio,
overlap_ratio=args.overlap_ratio,
num_subsets=args.num_subsets,
simple=args.simple,
use_cuda=args.cuda,
seed=args.seed,
)
print(f' -')
print(f' - dataset: {dataset.print_operation()}')
# Interpolation and extrapolation seeds are from random.org
dataset_train = iter(dataset.fork(sample_range=args.interpolation_range).dataloader(batch_size=args.batch_size))
dataset_valid_interpolation_data = next(iter(dataset.fork(sample_range=args.interpolation_range, seed=43953907).dataloader(batch_size=10000)))
dataset_test_extrapolation_data = next(iter(dataset.fork(sample_range=args.extrapolation_range, seed=8689336).dataloader(batch_size=10000)))
# setup model
model = stable_nalu.network.SimpleFunctionStaticNetwork(
args.layer_type,
input_size=dataset.get_input_size(),
writer=summary_writer.every(1000).verbose(args.verbose),
first_layer=args.first_layer,
hidden_size=args.hidden_size,
nac_oob=args.oob_mode,
regualizer_shape=args.regualizer_shape,
regualizer_z=args.regualizer_z,
mnac_epsilon=args.mnac_epsilon,
nac_mul=args.nac_mul,
nalu_bias=args.nalu_bias,
nalu_two_nac=args.nalu_two_nac,
nalu_two_gate=args.nalu_two_gate,
nalu_mul=args.nalu_mul,
nalu_gate=args.nalu_gate,
)
model.reset_parameters()
if args.cuda:
model.cuda()
criterion = torch.nn.MSELoss()
if args.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
elif args.optimizer == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum)
else:
raise ValueError(f'{args.optimizer} is not a valid optimizer algorithm')
def test_model(data):
with torch.no_grad(), model.no_internal_logging(), model.no_random():
x, t = data
return criterion(model(x), t)
# Train model
print('')
for epoch_i, (x_train, t_train) in zip(range(args.max_iterations + 1), dataset_train):
summary_writer.set_iteration(epoch_i)
# Prepear model
model.set_parameter('tau', max(0.5, math.exp(-1e-5 * epoch_i)))
optimizer.zero_grad()
# Log validation
if epoch_i % 1000 == 0:
interpolation_error = test_model(dataset_valid_interpolation_data)
extrapolation_error = test_model(dataset_test_extrapolation_data)
summary_writer.add_scalar('metric/valid/interpolation', interpolation_error)
summary_writer.add_scalar('metric/test/extrapolation', extrapolation_error)
# forward
y_train = model(x_train)
regualizers = model.regualizer()
if (args.regualizer_scaling == 'linear'):
r_w_scale = max(0, min(1, (
(epoch_i - args.regualizer_scaling_start) /
(args.regualizer_scaling_end - args.regualizer_scaling_start)
)))
elif (args.regualizer_scaling == 'exp'):
r_w_scale = 1 - math.exp(-1e-5 * epoch_i)
loss_train_criterion = criterion(y_train, t_train)
loss_train_regualizer = args.regualizer * r_w_scale * regualizers['W'] + regualizers['g'] + args.regualizer_z * regualizers['z'] + args.regualizer_oob * regualizers['W-OOB']
loss_train = loss_train_criterion + loss_train_regualizer
# Log loss
if args.verbose or epoch_i % 1000 == 0:
summary_writer.add_scalar('loss/train/critation', loss_train_criterion)
summary_writer.add_scalar('loss/train/regualizer', loss_train_regualizer)
summary_writer.add_scalar('loss/train/total', loss_train)
if epoch_i % 1000 == 0:
print('train %d: %.5f, inter: %.5f, extra: %.5f' % (epoch_i, loss_train_criterion, interpolation_error, extrapolation_error))
# Optimize model
if loss_train.requires_grad:
loss_train.backward()
optimizer.step()
model.optimize(loss_train_criterion)
# Log gradients if in verbose mode
if args.verbose and epoch_i % 1000 == 0:
model.log_gradients()
# Compute validation loss
loss_valid_inter = test_model(dataset_valid_interpolation_data)
loss_valid_extra = test_model(dataset_test_extrapolation_data)
# Write results for this training
print(f'finished:')
print(f' - loss_train: {loss_train}')
print(f' - loss_valid_inter: {loss_valid_inter}')
print(f' - loss_valid_extra: {loss_valid_extra}')
# Use saved weights to visualize the intermediate values.
stable_nalu.writer.save_model(summary_writer.name, model)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/experiments/simple_function_recurrent.py | experiments/simple_function_recurrent.py |
import math
import torch
import stable_nalu
import argparse
# Parse arguments
parser = argparse.ArgumentParser(description='Runs the simple function static task')
parser.add_argument('--layer-type',
action='store',
default='NALU',
choices=list(stable_nalu.network.SimpleFunctionRecurrentNetwork.UNIT_NAMES),
type=str,
help='Specify the layer type, e.g. RNN-tanh, LSTM, NAC, NALU')
parser.add_argument('--operation',
action='store',
default='add',
choices=[
'add', 'sub', 'mul', 'div', 'squared', 'root'
],
type=str,
help='Specify the operation to use, e.g. add, mul, squared')
parser.add_argument('--seed',
action='store',
default=0,
type=int,
help='Specify the seed to use')
parser.add_argument('--max-iterations',
action='store',
default=100000,
type=int,
help='Specify the max number of iterations to use')
parser.add_argument('--cuda',
action='store',
default=torch.cuda.is_available(),
type=bool,
help=f'Should CUDA be used (detected automatically as {torch.cuda.is_available()})')
parser.add_argument('--simple',
action='store_true',
default=False,
help='Use a very simple dataset with t = sum(v[0:2]) + sum(v[4:6])')
parser.add_argument('--verbose',
action='store_true',
default=False,
help='Should network measures (e.g. gates) and gradients be shown')
args = parser.parse_args()
# Print configuration
print(f'running')
print(f' - seed: {args.seed}')
print(f' - operation: {args.operation}')
print(f' - layer_type: {args.layer_type}')
print(f' - simple: {args.simple}')
print(f' - cuda: {args.cuda}')
print(f' - verbose: {args.verbose}')
print(f' - max_iterations: {args.max_iterations}')
# Prepear logging
results_writer = stable_nalu.writer.ResultsWriter('simple_function_recurrent')
summary_writer = stable_nalu.writer.SummaryWriter(
f'simple_function_recurrent/{args.layer_type.lower()}_{args.operation.lower()}_{args.seed}'
)
# Set seed
torch.manual_seed(args.seed)
# Setup datasets
dataset = stable_nalu.dataset.SimpleFunctionRecurrentDataset(
operation=args.operation,
simple=args.simple,
use_cuda=args.cuda,
seed=args.seed
)
dataset_train = iter(dataset.fork(seq_length=10).dataloader(batch_size=128))
dataset_valid_interpolation = iter(dataset.fork(seq_length=10).dataloader(batch_size=2048))
dataset_valid_extrapolation = iter(dataset.fork(seq_length=1000).dataloader(batch_size=2048))
# setup model
model = stable_nalu.network.SimpleFunctionRecurrentNetwork(
args.layer_type,
writer=summary_writer.every(1000) if args.verbose else None
)
if args.cuda:
model.cuda()
model.reset_parameters()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters())
def test_model(dataloader):
with torch.no_grad(), model.no_internal_logging():
x, t = next(dataloader)
return criterion(model(x), t)
# Train model
for epoch_i, (x_train, t_train) in zip(range(args.max_iterations + 1), dataset_train):
summary_writer.set_iteration(epoch_i)
# Prepear model
model.set_parameter('tau', max(0.5, math.exp(-1e-5 * epoch_i)))
optimizer.zero_grad()
# Log validation
if epoch_i % 1000 == 0:
summary_writer.add_scalar('loss/valid/interpolation', test_model(dataset_valid_interpolation))
summary_writer.add_scalar('loss/valid/extrapolation', test_model(dataset_valid_extrapolation))
# forward
y_train = model(x_train)
loss_train_criterion = criterion(y_train, t_train)
loss_train_regualizer = 0.1 * (1 - math.exp(-1e-5 * epoch_i)) * model.regualizer()
loss_train = loss_train_criterion + loss_train_regualizer
# Log loss
summary_writer.add_scalar('loss/train/critation', loss_train_criterion)
summary_writer.add_scalar('loss/train/regualizer', loss_train_regualizer)
summary_writer.add_scalar('loss/train/total', loss_train)
if epoch_i % 1000 == 0:
print(f'train {epoch_i}: {loss_train_criterion}')
# Optimize model
if loss_train.requires_grad:
loss_train.backward()
optimizer.step()
model.optimize(loss_train_criterion)
# Log gradients if in verbose mode
if args.verbose and epoch_i % 1000 == 0:
model.log_gradients()
# Compute validation loss
loss_valid_inter = test_model(dataset_valid_interpolation)
loss_valid_extra = test_model(dataset_valid_extrapolation)
# Write results for this training
print(f'finished:')
print(f' - loss_train: {loss_train}')
print(f' - loss_valid_inter: {loss_valid_inter}')
print(f' - loss_valid_extra: {loss_valid_extra}')
# save results
results_writer.add({
'seed': args.seed,
'operation': args.operation,
'layer_type': args.layer_type,
'simple': args.simple,
'cuda': args.cuda,
'verbose': args.verbose,
'max_iterations': args.max_iterations,
'loss_train': loss_train,
'loss_valid_inter': loss_valid_inter,
'loss_valid_extra': loss_valid_extra
})
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/experiments/number_translation.py | experiments/number_translation.py |
import math
import torch
import stable_nalu
import argparse
# Parse arguments
parser = argparse.ArgumentParser(description='Run the number translation task')
parser.add_argument('--layer-type',
action='store',
default='NALU',
choices=list(stable_nalu.network.NumberTranslationNetwork.UNIT_NAMES),
type=str,
help='Specify the layer type, e.g. RNN-tanh, LSTM, NAC, NALU')
parser.add_argument('--seed',
action='store',
default=0,
type=int,
help='Specify the seed to use')
parser.add_argument('--max-epochs',
action='store',
default=10000,
type=int,
help='Specify the max number of epochs to use')
parser.add_argument('--cuda',
action='store',
default=torch.cuda.is_available(),
type=bool,
help=f'Should CUDA be used (detected automatically as {torch.cuda.is_available()})')
parser.add_argument('--verbose',
action='store_true',
default=False,
help='Should network measures (e.g. gates) and gradients be shown')
args = parser.parse_args()
# Print configuration
print(f'running')
print(f' - seed: {args.seed}')
print(f' - layer_type: {args.layer_type}')
print(f' - cuda: {args.cuda}')
print(f' - verbose: {args.verbose}')
print(f' - max_epochs: {args.max_epochs}')
# Prepear logging
results_writer = stable_nalu.writer.ResultsWriter('number_translation')
summary_writer = stable_nalu.writer.SummaryWriter(
f'number_translation/{args.layer_type.lower()}_{args.seed}'
)
# Set seed
torch.manual_seed(args.seed)
# Setup datasets
dataset = stable_nalu.dataset.NumberTranslationDataset(
use_cuda=args.cuda,
seed=args.seed
)
dataset_train = dataset.fork(subset='train').dataloader()
dataset_valid = dataset.fork(subset='valid').dataloader()
# setup model
model = stable_nalu.network.NumberTranslationNetwork(
args.layer_type,
writer=summary_writer.every(150) if args.verbose else None
)
if args.cuda:
model.cuda()
model.reset_parameters()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters())
def test_model(dataloader):
with torch.no_grad(), model.no_internal_logging():
acc_loss = 0
for x, t in dataloader:
# forward
y = model(x)
acc_loss += criterion(y, t).item() * len(t)
return acc_loss / len(dataloader.dataset)
# Train model
global_step = 0
for epoch_i in range(args.max_epochs):
for i_train, (x_train, t_train) in enumerate(dataset_train):
global_step += 1
summary_writer.set_iteration(global_step)
# Prepear model
model.set_parameter('tau', max(0.5, math.exp(-1e-5 * global_step)))
optimizer.zero_grad()
# Log validation
if epoch_i % 50 == 0 and i_train == 0:
summary_writer.add_scalar('loss/valid', test_model(dataset_valid))
# forward
y_train = model(x_train)
loss_train_criterion = criterion(y_train, t_train)
loss_train_regualizer = 0.1 * (1 - math.exp(-1e-5 * global_step)) * model.regualizer()
loss_train = loss_train_criterion + loss_train_regualizer
# Log loss
summary_writer.add_scalar('loss/train/critation', loss_train_criterion)
summary_writer.add_scalar('loss/train/regualizer', loss_train_regualizer)
summary_writer.add_scalar('loss/train/total', loss_train)
if epoch_i % 50 == 0 and i_train == 0:
print(f'train {epoch_i}: {loss_train_criterion}')
# Optimize model
if loss_train.requires_grad:
loss_train.backward()
optimizer.step()
model.optimize(loss_train_criterion)
# Log gradients if in verbose mode
if args.verbose and epoch_i % 10 == 0 and i_train == 0:
model.log_gradients()
# Compute losses
loss_train = test_model(dataset_train)
loss_valid = test_model(dataset_valid)
# Write results for this training
print(f'finished:')
print(f' - loss_train: {loss_train}')
print(f' - loss_valid: {loss_valid}')
# save results
results_writer.add({
'seed': args.seed,
'layer_type': args.layer_type,
'cuda': args.cuda,
'verbose': args.verbose,
'max_epochs': args.max_epochs,
'loss_train': loss_train,
'loss_valid': loss_valid,
})
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/__init__.py | stable_nalu/__init__.py |
from . import dataset
from . import functional
from . import layer
from . import network
from . import reader
from . import writer | python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/abstract/_extended_torch_module.py | stable_nalu/abstract/_extended_torch_module.py |
import torch
import collections
from ..writer import DummySummaryWriter
class NoRandomScope:
def __init__(self, module):
self._module = module
def __enter__(self):
self._module._disable_random()
def __exit__(self, type, value, traceback):
self._module._enable_random()
return False
class ExtendedTorchModule(torch.nn.Module):
def __init__(self, default_name, *args, writer=None, name=None, **kwargs):
super().__init__()
if writer is None:
writer = DummySummaryWriter()
self.writer = writer.namespace(default_name if name is None else name)
self.allow_random = True
def set_parameter(self, name, value):
parameter = getattr(self, name, None)
if isinstance(parameter, torch.nn.Parameter):
parameter.fill_(value)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.set_parameter(name, value)
def regualizer(self, merge_in=None):
regualizers = collections.defaultdict(int)
if merge_in is not None:
for key, value in merge_in.items():
self.writer.add_scalar(f'regualizer/{key}', value)
regualizers[key] += value
for module in self.children():
if isinstance(module, ExtendedTorchModule):
for key, value in module.regualizer().items():
regualizers[key] += value
return regualizers
def optimize(self, loss):
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.optimize(loss)
def log_gradients(self):
for name, parameter in self.named_parameters(recurse=False):
if parameter.requires_grad:
gradient, *_ = parameter.grad.data
self.writer.add_summary(f'{name}/grad', gradient)
self.writer.add_histogram(f'{name}/grad', gradient)
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module.log_gradients()
def no_internal_logging(self):
return self.writer.no_logging()
def _disable_random(self):
self.allow_random = False
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._disable_random()
def _enable_random(self):
self.allow_random = True
for module in self.children():
if isinstance(module, ExtendedTorchModule):
module._enable_random()
def no_random(self):
return NoRandomScope(self)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/abstract/__init__.py | stable_nalu/abstract/__init__.py |
from ._extended_torch_module import ExtendedTorchModule | python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/writer/save_model.py | stable_nalu/writer/save_model.py |
import os
import os.path as path
import torch
THIS_DIR = path.dirname(path.realpath(__file__))
if 'SAVE_DIR' in os.environ:
SAVE_DIR = os.environ['SAVE_DIR']
else:
SAVE_DIR = path.join(THIS_DIR, '../../save')
def save_model(name, model):
save_file = path.join(SAVE_DIR, name) + '.pth'
os.makedirs(path.dirname(save_file), exist_ok=True)
torch.save(model, save_file)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/writer/summary_writer.py | stable_nalu/writer/summary_writer.py |
import os
import shutil
import os.path as path
import torch
import numpy as np
from tensorboardX import SummaryWriter as SummaryWriterRaw
THIS_DIR = path.dirname(path.realpath(__file__))
if 'TENSORBOARD_DIR' in os.environ:
TENSORBOARD_DIR = os.environ['TENSORBOARD_DIR']
else:
TENSORBOARD_DIR = path.join(THIS_DIR, '../../tensorboard')
class SummaryWriterNamespaceNoLoggingScope:
def __init__(self, writer):
self._writer = writer
def __enter__(self):
self._writer._logging_enabled = False
def __exit__(self, type, value, traceback):
self._writer._logging_enabled = True
return False
class SummaryWriterNamespaceForceLoggingScope:
def __init__(self, writer, flag):
self._writer = writer
self._flag = flag
def __enter__(self):
self._writer._force_logging = self._flag
def __exit__(self, type, value, traceback):
self._writer._force_logging = False
return False
class SummaryWriterNamespace:
def __init__(self, namespace='', epoch_interval=1, verbose=True, root=None, parent=None):
self._namespace = namespace
self._epoch_interval = epoch_interval
self._verbose = verbose
self._parent = parent
self._logging_enabled = True
self._force_logging = False
if root is None:
self._root = self
else:
self._root = root
def get_iteration(self):
return self._root.get_iteration()
def is_log_iteration(self):
return (self._root.get_iteration() % self._epoch_interval == 0) or self._root._force_logging
def is_logging_enabled(self):
writer = self
while writer is not None:
if writer._logging_enabled:
writer = writer._parent
else:
return False
return True
def is_verbose(self, verbose_only):
return (verbose_only is False or self._verbose)
def add_scalar(self, name, value, verbose_only=True):
if self.is_log_iteration() and self.is_logging_enabled() and self.is_verbose(verbose_only):
self._root.writer.add_scalar(f'{self._namespace}/{name}', value, self.get_iteration())
def add_summary(self, name, tensor, verbose_only=True):
if self.is_log_iteration() and self.is_logging_enabled() and self.is_verbose(verbose_only):
self._root.writer.add_scalar(f'{self._namespace}/{name}/mean', torch.mean(tensor), self.get_iteration())
self._root.writer.add_scalar(f'{self._namespace}/{name}/var', torch.var(tensor), self.get_iteration())
def add_tensor(self, name, matrix, verbose_only=True):
if self.is_log_iteration() and self.is_logging_enabled() and self.is_verbose(verbose_only):
data = matrix.detach().cpu().numpy()
data_str = np.array2string(data, max_line_width=60, threshold=np.inf)
self._root.writer.add_text(f'{self._namespace}/{name}', f'<pre>{data_str}</pre>', self.get_iteration())
def add_histogram(self, name, tensor, verbose_only=True):
if torch.isnan(tensor).any():
print(f'nan detected in {self._namespace}/{name}')
tensor = torch.where(torch.isnan(tensor), torch.tensor(0, dtype=tensor.dtype), tensor)
raise ValueError('nan detected')
if self.is_log_iteration() and self.is_logging_enabled() and self.is_verbose(verbose_only):
self._root.writer.add_histogram(f'{self._namespace}/{name}', tensor, self.get_iteration())
def print(self, name, tensor, verbose_only=True):
if self.is_log_iteration() and self.is_logging_enabled() and self.is_verbose(verbose_only):
print(f'{self._namespace}/{name}:')
print(tensor)
def namespace(self, name):
return SummaryWriterNamespace(
namespace=f'{self._namespace}/{name}',
epoch_interval=self._epoch_interval,
verbose=self._verbose,
root=self._root,
parent=self,
)
def every(self, epoch_interval):
return SummaryWriterNamespace(
namespace=self._namespace,
epoch_interval=epoch_interval,
verbose=self._verbose,
root=self._root,
parent=self,
)
def verbose(self, verbose):
return SummaryWriterNamespace(
namespace=self._namespace,
epoch_interval=self._epoch_interval,
verbose=verbose,
root=self._root,
parent=self,
)
def no_logging(self):
return SummaryWriterNamespaceNoLoggingScope(self)
def force_logging(self, flag):
return SummaryWriterNamespaceForceLoggingScope(self, flag)
class SummaryWriter(SummaryWriterNamespace):
def __init__(self, name, remove_existing_data=False, **kwargs):
super().__init__()
self.name = name
self._iteration = 0
log_dir = path.join(TENSORBOARD_DIR, name)
if path.exists(log_dir) and remove_existing_data:
shutil.rmtree(log_dir)
self.writer = SummaryWriterRaw(log_dir=log_dir, **kwargs)
def set_iteration(self, iteration):
self._iteration = iteration
def get_iteration(self):
return self._iteration
def close(self):
self.writer.close()
def __del__(self):
self.close()
class DummySummaryWriter():
def __init__(self, **kwargs):
self._logging_enabled = False
pass
def add_scalar(self, name, value, verbose_only=True):
pass
def add_summary(self, name, tensor, verbose_only=True):
pass
def add_histogram(self, name, tensor, verbose_only=True):
pass
def add_tensor(self, name, tensor, verbose_only=True):
pass
def print(self, name, tensor, verbose_only=True):
pass
def namespace(self, name):
return self
def every(self, epoch_interval):
return self
def verbose(self, verbose):
return self
def no_logging(self):
return SummaryWriterNamespaceNoLoggingScope(self)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/writer/__init__.py | stable_nalu/writer/__init__.py |
from .summary_writer import DummySummaryWriter, SummaryWriter
from .save_model import save_model
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/integration_test/simple_function_static_linear_add.py | stable_nalu/integration_test/simple_function_static_linear_add.py |
import numpy as np
import torch
import stable_nalu
def test_linear_add_can_have_zero_loss():
# Prepear data
dataset = stable_nalu.dataset.SimpleFunctionStaticDataset(
operation='add',
seed=0
)
dataset_eval = iter(dataset.fork(input_range=1).dataloader(batch_size=128))
# Setup pre-solved model
model = stable_nalu.network.SimpleFunctionStaticNetwork('linear')
w_1 = np.zeros((100, 2), dtype=np.float32)
w_1[dataset.a_start:dataset.a_end, 0] = 1
w_1[dataset.b_start:dataset.b_end, 1] = 1
w_2 = np.ones((2, 1), dtype=np.float32)
model.layer_1.layer.weight.data = torch.tensor(np.transpose(w_1))
model.layer_2.layer.weight.data = torch.tensor(np.transpose(w_2))
# Compute loss
criterion = torch.nn.MSELoss()
for i, (x_train, t_train) in zip(range(5), dataset_eval):
y_train = model(x_train)
loss = criterion(y_train, t_train)
np.testing.assert_almost_equal(
loss.detach().numpy(),
0
)
def test_linear_add_is_trainable():
# Prepear data
dataset = stable_nalu.dataset.SimpleFunctionStaticDataset(
operation='add',
seed=0
)
dataset_train = iter(dataset.fork(input_range=1).dataloader(batch_size=128))
torch.manual_seed(0)
model = stable_nalu.network.SimpleFunctionStaticNetwork('linear')
model.reset_parameters()
criterion = torch.nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
for epoch_i, (x_train, t_train) in zip(range(200), dataset_train):
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
y_train = model(x_train)
loss = criterion(y_train, t_train)
loss.backward()
optimizer.step()
# Check that last loss is 0
np.testing.assert_almost_equal(
loss.detach().numpy(),
0,
decimal=5
)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/integration_test/__init__.py | stable_nalu/integration_test/__init__.py | python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false | |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/reader/tensorboard_metric_reader.py | stable_nalu/reader/tensorboard_metric_reader.py |
import re
import ast
import pandas
import collections
import multiprocessing
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from .tensorboard_reader import TensorboardReader
def _parse_numpy_str(array_string):
pattern = r'''# Match (mandatory) whitespace between...
(?<=\]) # ] and
\s+
(?= \[) # [, or
|
(?<=[^\[\]\s])
\s+
(?= [^\[\]\s]) # two non-bracket non-whitespace characters
'''
# Replace such whitespace with a comma
fixed_string = re.sub(pattern, ',', array_string, flags=re.VERBOSE)
return np.array(ast.literal_eval(fixed_string))
def _csv_format_column_name(column_name):
return column_name.replace('/', '.')
def _everything_default_matcher(tag):
return True
class TensorboardMetricReader:
def __init__(self, dirname,
metric_matcher=_everything_default_matcher,
step_start=0,
recursive_weight=False,
processes=None,
progress_bar=True):
self.dirname = dirname
self.metric_matcher = metric_matcher
self.step_start = step_start
self.recursive_weight = recursive_weight
self.processes = processes
self.progress_bar = progress_bar
def _parse_tensorboard_data(self, inputs):
(dirname, filename, reader) = inputs
columns = collections.defaultdict(list)
columns['name'] = dirname
current_epoch = None
current_logged_step = None
for e in tf.train.summary_iterator(filename):
step = e.step - self.step_start
for v in e.summary.value:
if v.tag == 'epoch':
current_epoch = v.simple_value
elif self.metric_matcher(v.tag):
columns[v.tag].append(v.simple_value)
current_logged_step = step
# Syncronize the step count with the loss metrics
if len(columns['step']) != len(columns[v.tag]):
columns['step'].append(step)
# Syncronize the wall.time with the loss metrics
if len(columns['wall.time']) != len(columns[v.tag]):
columns['wall.time'].append(e.wall_time)
# Syncronize the epoch with the loss metrics
if current_epoch is not None and len(columns['epoch']) != len(columns[v.tag]):
columns['epoch'].append(current_epoch)
elif v.tag.endswith('W/text_summary') and current_logged_step == step:
if self.recursive_weight:
W = _parse_numpy_str(v.tensor.string_val[0][5:-6].decode('ascii'))
if len(columns['step']) != len(columns['recursive.weight']):
columns['recursive.weight'].append(W[0, -1])
elif v.tag.endswith('W/sparsity_error') and current_logged_step == step:
# Step changed, update sparse error
if len(columns['step']) != len(columns['sparse.error.max']):
columns['sparse.error.max'].append(v.simple_value)
else:
columns['sparse.error.max'][-1] = max(
columns['sparse.error.max'][-1],
v.simple_value
)
if len(columns['sparse.error.max']) == 0:
columns['sparse.error.max'] = [None] * len(columns['step'])
if self.recursive_weight:
if len(columns['recursive.weight']) == 0:
columns['recursive.weight'] = [None] * len(columns['step'])
return dirname, columns
def __iter__(self):
reader = TensorboardReader(self.dirname, auto_open=False)
with tqdm(total=len(reader), disable=not self.progress_bar) as pbar, \
multiprocessing.Pool(self.processes) as pool:
columns_order = None
for dirname, data in pool.imap_unordered(self._parse_tensorboard_data, reader):
pbar.update()
# Check that some data is present
# if len(data['step']) == 0:
# print(f'missing data in: {dirname}')
# continue
# Fix flushing issue
for column_name, column_data in data.items():
if len(data['step']) - len(column_data) == 1:
data[column_name].append(None)
# Convert to dataframe
df = pandas.DataFrame(data)
if len(df) == 0:
print(f'Warning: No data for {dirname}')
continue
# Ensure the columns are always order the same
if columns_order is None:
columns_order = df.columns.tolist()
else:
df = df[columns_order]
df.rename(_csv_format_column_name, axis='columns', inplace=True)
yield df
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/reader/tensorboard_reader.py | stable_nalu/reader/tensorboard_reader.py |
import os
import os.path as path
import tensorflow as tf
def _listdir_filter_hidden_files(dirpath):
files = os.listdir(dirpath)
files = filter(lambda filename: filename[0] != '.', files)
return list(files)
class TensorboardReader:
"""Reads the final values (before reaching NaN) from a directory of
of results
"""
def __init__(self, dirpath, auto_open=True):
self._sourcedir = dirpath
self._directories = _listdir_filter_hidden_files(dirpath)
self._auto_open = auto_open
def __iter__(self):
"""Return the last non-nan result from each directory.
The format is (dirname, losses, last_global_step)
"""
for subdir in self._directories:
logfiles = _listdir_filter_hidden_files(path.join(self._sourcedir, subdir))
if len(logfiles) != 1:
raise Exception(f'wrong number of logfile was found in {subdir}')
filename = path.join(self._sourcedir, subdir, logfiles[0])
if self._auto_open:
reader = tf.train.summary_iterator(filename)
yield (subdir, filename, reader)
else:
yield (subdir, filename, None)
def __len__(self):
return len(self._directories)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/reader/__init__.py | stable_nalu/reader/__init__.py |
from .tensorboard_metric_reader import TensorboardMetricReader
from .tensorboard_reader import TensorboardReader
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/hard_softmax_nac.py | stable_nalu/layer/hard_softmax_nac.py |
import math
import torch
from ..abstract import ExtendedTorchModule
from ..functional import sparsity_error
from ._abstract_recurrent_cell import AbstractRecurrentCell
class HardSoftmaxNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
# Define the target weights. Also, put 0 last such that p1 = p2 = 0
# corresponds to p3 = 1 => w = 0.
self.register_buffer('target_weights', torch.tensor([1, -1, 0], dtype=torch.float32))
# Initialize a tensor, that will be the placeholder for the hard samples
self.register_buffer('sample', torch.LongTensor(out_features, in_features))
# We will only two parameters per weight, this is to prevent the redundancy
# there would otherwise exist. This also makes it much more comparable with
# NAC.
self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features, 2))
self.register_buffer('W_hat_k', torch.Tensor(out_features, in_features, 1))
self.register_parameter('bias', None)
def reset_parameters(self):
# Use a gain of sqrt(0.5). Lets assume that softmax'(0) ~ 1, because this
# holds for sigmoid. Then:
# Var[W] = 1 * Var[S_1] - 1 * Var[S_2] + 0 * Var[S_3] = 2 / (fan[in] + fan[out])
# Var[W] = 2 * Var[S_i] = 2 / (fan[in] + fan[out])
# Var[S_i] = 1/2 * 2 / (fan[in] + fan[out])
# sqrt(Var[S_i]) = sqrt(1/2) * sqrt(2 / (fan[in] + fan[out]))
# This is not exactly true, because S_1, S_2, and S_3 are not enterily uncorrelated.
torch.nn.init.xavier_uniform_(self.W_hat, gain=math.sqrt(0.5))
torch.nn.init.constant_(self.W_hat_k, 0)
def forward(self, input, reuse=False):
# Concat trainable and non-trainable weights
W_hat_full = torch.cat((self.W_hat, self.W_hat_k), dim=-1) # size = [out, in, 3]
# Compute W_soft
pi = torch.nn.functional.softmax(W_hat_full, dim=-1)
W_soft = pi @ self.target_weights
# Compute W_hard
if not reuse:
torch.multinomial(pi.view(-1, 3), 1, True, out=self.sample.view(-1))
W_hard = self.target_weights[self.sample]
# Use W_hard in the forward pass, but use W_soft for the gradients.
# This implementation trick comes from torch.nn.functional.gumble_softmax(hard=True)
W = W_hard - W_soft.detach() + W_soft
# Compute the linear multiplication as usual
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False)
return torch.nn.functional.linear(input, W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class HardSoftmaxNACCell(AbstractRecurrentCell):
"""Implements the Gumbel NAC (Gumbel Neural Accumulator) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(HardSoftmaxNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/regualized_linear_mnac.py | stable_nalu/layer/regualized_linear_mnac.py |
import scipy.optimize
import numpy as np
import torch
import math
from ..abstract import ExtendedTorchModule
from ..functional import mnac, sparsity_error
from ._abstract_recurrent_cell import AbstractRecurrentCell
class RegualizedLinearMNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features,
regualizer_shape='squared',
mnac_epsilon=0, mnac_normalized=False, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.mnac_normalized = mnac_normalized
self._regualizer_bias = Regualizer(
support='mnac', type='bias',
shape=regualizer_shape, zero_epsilon=mnac_epsilon
)
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(0.25)
r = math.sqrt(3.0) * std
torch.nn.init.uniform_(self.W, 0.5 - r, 0.5 + r)
def regualizer(self):
return super().regualizer({
'W': self._regualizer_bias(self.W)
})
def forward(self, x, reuse=False):
W = self.W
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False)
if self.mnac_normalized:
c = torch.std(x)
x_normalized = x / c
z_normalized = mnac(x_normalized, W, mode='prod')
return z_normalized * (c ** torch.sum(W, 1))
else:
return mnac(x, W, mode='prod')
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class RegualizedLinearMNACCell(AbstractRecurrentCell):
"""Implements the NAC (Neural Accumulator) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(ReRegualizedLinearMNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/gradient_bandit_nac.py | stable_nalu/layer/gradient_bandit_nac.py |
import math
import torch
from ..abstract import ExtendedTorchModule
from ..functional import sparsity_error
from ._abstract_recurrent_cell import AbstractRecurrentCell
class GradientBanditNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
# The learning rate
self.lr = torch.nn.Parameter(torch.tensor(1e-3, dtype=torch.float32), requires_grad=False)
# Holds a running mean of the loss
self.running_mean_beta = torch.nn.Parameter(torch.tensor(0.9, dtype=torch.float32), requires_grad=False)
self.register_buffer('running_mean_iter', torch.tensor(0, dtype=torch.float32))
self.register_buffer('running_mean_loss', torch.tensor(0, dtype=torch.float32))
# Define the target weights. Also, put 0 last such that p1 = p2 = 0
# corresponds to p3 = 1 => w = 0.
self.register_buffer('target_weights', torch.tensor([1, -1, 0], dtype=torch.float32))
# Initialize a tensor, that will be the placeholder for the hard sample
self.sample = torch.LongTensor(out_features, in_features)
# We will only two parameters per weight, this is to prevent the redundancy
# there would otherwise exist. This also makes it much more comparable with
# NAC.
self.register_buffer('W_hat', torch.Tensor(out_features, in_features, 3))
self.register_parameter('bias', None)
def reset_parameters(self):
torch.nn.init.zeros_(self.W_hat)
self.running_mean_iter.fill_(0)
self.running_mean_loss.fill_(0)
def optimize(self, loss):
# Increment the iteration counter
self.running_mean_iter.add_(1)
# Update running mean, this is a unbiased exponental average, see Adam()
self.running_mean_loss.mul_(self.running_mean_beta).add_(1 - self.running_mean_beta, loss)
running_mean_loss_debias = self.running_mean_loss / (1 - self.running_mean_beta**self.running_mean_iter)
# Convert W sample to a one-hot encoding
samples_one_hot = torch.zeros(self.out_features, self.in_features, 3) \
.scatter_(2, self.sample.view(self.out_features, self.in_features, 1), 1.0)
# Compute update
pi = torch.nn.functional.softmax(self.W_hat, dim=-1)
self.W_hat.addcmul_(self.lr, running_mean_loss_debias - loss, samples_one_hot - pi)
def forward(self, input, reuse=False):
pi = torch.nn.functional.softmax(self.W_hat, dim=-1)
# Compute W
if self.allow_random:
if not reuse:
torch.multinomial(pi.view(-1, 3), 1, True, out=self.sample.view(-1))
W = self.target_weights[self.sample]
else:
W = self.target_weights[torch.argmax(pi, dim=-1)]
# Compute the linear multiplication as usual
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False)
return torch.nn.functional.linear(input, W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class GradientBanditNACCell(AbstractRecurrentCell):
"""Implements the Gumbel NAC (Gumbel Neural Accumulator) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(GradientBanditNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/generalized_test.py | stable_nalu/layer/generalized_test.py |
from nose.tools import *
import numpy as np
import torch
from stable_nalu.layer import GeneralizedLayer
def test_generalized_layer_linear():
x_np = np.random.randn(64, 100).astype(np.float32)
x_tensor = torch.tensor(x_np)
layer = GeneralizedLayer(100, 2, 'linear')
layer.reset_parameters()
y_tensor = layer(x_tensor)
w_np = layer.layer.weight.detach().numpy()
y_np = np.dot(x_np, np.transpose(w_np))
np.testing.assert_almost_equal(y_np, y_tensor.detach().numpy())
assert_equal(y_tensor.shape, (64, 2))
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/gradient_bandit_nalu.py | stable_nalu/layer/gradient_bandit_nalu.py |
from .gradient_bandit_nac import GradientBanditNACLayer
from ._abstract_nalu import AbstractNALULayer
from ._abstract_recurrent_cell import AbstractRecurrentCell
class GradientBanditNALULayer(AbstractNALULayer):
"""Implements the NALU (Neural Arithmetic Logic Unit)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__(GradientBanditNACLayer, None, in_features, out_features, **kwargs)
class GradientBanditNALUCell(AbstractRecurrentCell):
"""Implements the NALU (Neural Arithmetic Logic Unit) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(GradientBanditNALULayer, None, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/pos_nac.py | stable_nalu/layer/pos_nac.py |
import scipy.optimize
import numpy as np
import torch
from ..functional import nac_weight, sparsity_error
from ..abstract import ExtendedTorchModule
from ._abstract_recurrent_cell import AbstractRecurrentCell
class PosNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
torch.nn.init.xavier_normal_(self.W_hat)
def forward(self, input, reuse=False):
W = torch.sigmoid(self.W_hat)
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False)
return torch.nn.functional.linear(input, W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class PosNACCell(AbstractRecurrentCell):
"""Implements the NAC (Neural Accumulator) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(PosNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/linear_nac.py | stable_nalu/layer/linear_nac.py |
import scipy.optimize
import numpy as np
import torch
from ..abstract import ExtendedTorchModule
from ._abstract_recurrent_cell import AbstractRecurrentCell
class LinearNACLayer(ExtendedTorchModule):
"""Implements the RegualizedLinearNAC
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.W)
def forward(self, input, reuse=False):
self.writer.add_histogram('W', self.W)
self.writer.add_tensor('W', self.W, verbose_only=False)
return torch.nn.functional.linear(input, self.W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class LinearNACCell(AbstractRecurrentCell):
"""Implements the RegualizedLinearNAC as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(RegualizedLinearNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/basic_test.py | stable_nalu/layer/basic_test.py |
from nose.tools import *
import numpy as np
import torch
from stable_nalu.layer import BasicLayer
def test_basic_layer_linear():
x_np = np.random.RandomState(1).randn(64, 100).astype(np.float32)
x_tensor = torch.tensor(x_np)
torch.manual_seed(1)
layer = BasicLayer(100, 2, activation='linear')
layer.reset_parameters()
y_tensor = layer(x_tensor)
w_np = layer.weight.detach().numpy()
y_np = np.dot(x_np, np.transpose(w_np))
np.testing.assert_almost_equal(y_np, y_tensor.detach().numpy())
assert_equal(y_tensor.shape, (64, 2))
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/nalu.py | stable_nalu/layer/nalu.py |
from .nac import NACLayer
from .mnac import MNACLayer
from ._abstract_nalu import AbstractNALULayer
from ._abstract_recurrent_cell import AbstractRecurrentCell
class NALULayer(AbstractNALULayer):
"""Implements the NALU (Neural Arithmetic Logic Unit)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__(NACLayer, MNACLayer, in_features, out_features, **kwargs)
class NALUCell(AbstractRecurrentCell):
"""Implements the NALU (Neural Arithmetic Logic Unit) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(NALULayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/independent_nalu.py | stable_nalu/layer/independent_nalu.py |
from .independent_nac import IndependentNACLayer
from ._abstract_nalu import AbstractNALULayer
from ._abstract_recurrent_cell import AbstractRecurrentCell
class IndependentNALULayer(AbstractNALULayer):
"""Implements the NALU (Neural Arithmetic Logic Unit)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__(IndependentNACLayer, None, in_features, out_features, **kwargs)
class IndependentNALUCell(AbstractRecurrentCell):
"""Implements the NALU (Neural Arithmetic Logic Unit) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(IndependentNALULayer, None, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/mnac.py | stable_nalu/layer/mnac.py |
import math
import torch
import numpy as np
import scipy.optimize
from ..functional import mnac, sparsity_error
from ..abstract import ExtendedTorchModule
from ._abstract_recurrent_cell import AbstractRecurrentCell
class MNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(0.25)
r = math.sqrt(3.0) * std
torch.nn.init.uniform_(self.W_hat, - r, r)
def forward(self, x, reuse=False):
W = torch.sigmoid(self.W_hat)
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False)
return mnac(x, W)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class MNACCell(AbstractRecurrentCell):
"""Implements the NAC (Neural Accumulator) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(MNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/gumbel_nalu.py | stable_nalu/layer/gumbel_nalu.py |
from .gumbel_nac import GumbelNACLayer
from .gumbel_mnac import GumbelMNACLayer
from ._abstract_nalu import AbstractNALULayer
from ._abstract_recurrent_cell import AbstractRecurrentCell
class GumbelNALULayer(AbstractNALULayer):
"""Implements the Gumbel NALU (Neural Arithmetic Logic Unit)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__(GumbelNACLayer, GumbelMNACLayer, in_features, out_features, **kwargs)
class GumbelNALUCell(AbstractRecurrentCell):
"""Implements the Gumbel NALU (Neural Arithmetic Logic Unit) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(GumbelNALULayer, GumbelMNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/regualized_linear_nalu.py | stable_nalu/layer/regualized_linear_nalu.py |
from .regualized_linear_nac import RegualizedLinearNACLayer
from ._abstract_nalu import AbstractNALULayer
from ._abstract_recurrent_cell import AbstractRecurrentCell
class RegualizedLinearNALULayer(AbstractNALULayer):
"""Implements the NALU (Neural Arithmetic Logic Unit)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__(RegualizedLinearNACLayer, None, in_features, out_features, **kwargs)
class RegualizedLinearNALUCell(AbstractRecurrentCell):
"""Implements the NALU (Neural Arithmetic Logic Unit) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(RegualizedLinearNALULayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/re_regualized_linear_mnac.py | stable_nalu/layer/re_regualized_linear_mnac.py |
import scipy.optimize
import numpy as np
import torch
import math
from ..abstract import ExtendedTorchModule
from ..functional import mnac, Regualizer, RegualizerNMUZ, sparsity_error
from ._abstract_recurrent_cell import AbstractRecurrentCell
class ReRegualizedLinearMNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features,
nac_oob='regualized', regualizer_shape='squared',
mnac_epsilon=0, mnac_normalized=False, regualizer_z=0,
**kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.mnac_normalized = mnac_normalized
self.mnac_epsilon = mnac_epsilon
self.nac_oob = nac_oob
self._regualizer_bias = Regualizer(
support='mnac', type='bias',
shape=regualizer_shape, zero_epsilon=mnac_epsilon
)
self._regualizer_oob = Regualizer(
support='mnac', type='oob',
shape=regualizer_shape, zero_epsilon=mnac_epsilon,
zero=self.nac_oob == 'clip'
)
self._regualizer_nmu_z = RegualizerNMUZ(
zero=regualizer_z == 0
)
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(0.25)
r = min(0.25, math.sqrt(3.0) * std)
torch.nn.init.uniform_(self.W, 0.5 - r, 0.5 + r)
self._regualizer_nmu_z.reset()
def optimize(self, loss):
self._regualizer_nmu_z.reset()
if self.nac_oob == 'clip':
self.W.data.clamp_(0.0 + self.mnac_epsilon, 1.0)
def regualizer(self):
return super().regualizer({
'W': self._regualizer_bias(self.W),
'z': self._regualizer_nmu_z(self.W),
'W-OOB': self._regualizer_oob(self.W)
})
def forward(self, x, reuse=False):
if self.allow_random:
self._regualizer_nmu_z.append_input(x)
W = torch.clamp(self.W, 0.0 + self.mnac_epsilon, 1.0) \
if self.nac_oob == 'regualized' \
else self.W
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False)
if self.mnac_normalized:
c = torch.std(x)
x_normalized = x / c
z_normalized = mnac(x_normalized, W, mode='prod')
out = z_normalized * (c ** torch.sum(W, 1))
else:
out = mnac(x, W, mode='prod')
return out
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class ReRegualizedLinearMNACCell(AbstractRecurrentCell):
"""Implements the NAC (Neural Accumulator) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(ReRegualizedLinearMNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/silly_re_regualized_linear_mnac.py | stable_nalu/layer/silly_re_regualized_linear_mnac.py |
import scipy.optimize
import numpy as np
import torch
import math
from ..abstract import ExtendedTorchModule
from ..functional import mnac, sparsity_error
from ._abstract_recurrent_cell import AbstractRecurrentCell
class SillyReRegualizedLinearMNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features,
nac_oob='regualized', regualizer_shape='squared',
mnac_epsilon=0, mnac_normalized=False,
**kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.mnac_normalized = mnac_normalized
self.mnac_epsilon = mnac_epsilon
self.nac_oob = nac_oob
self._regualizer_bias = Regualizer(
support='mnac', type='bias',
shape=regualizer_shape, zero_epsilon=mnac_epsilon
)
self._regualizer_oob = Regualizer(
support='mnac', type='oob',
shape=regualizer_shape, zero_epsilon=mnac_epsilon,
zero=self.nac_oob == 'clip'
)
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(0.25)
r = min(0.25, math.sqrt(3.0) * std)
torch.nn.init.uniform_(self.W, 0.5 - r, 0.5 + r)
def optimize(self, loss):
if self.nac_oob == 'clip':
self.W.data.clamp_(0.0 + self.mnac_epsilon, 1.0)
def regualizer(self):
return super().regualizer({
'W': self._regualizer_bias(self.W),
'W-OOB': self._regualizer_oob(self.W)
})
def forward(self, x, reuse=False):
W = torch.clamp(self.W, 0.0 + self.mnac_epsilon, 1.0) \
if self.nac_oob == 'regualized' \
else self.W
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False)
if self.mnac_normalized:
c = torch.std(x)
x_normalized = x / c
z_normalized = mnac(x_normalized, W, mode='no-idendity')
return z_normalized * (c ** torch.sum(W, 1))
else:
return mnac(x, W, mode='no-idendity')
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class SillyReRegualizedLinearMNACCell(AbstractRecurrentCell):
"""Implements the NAC (Neural Accumulator) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(SillyReRegualizedLinearMNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/_abstract_recurrent_cell.py | stable_nalu/layer/_abstract_recurrent_cell.py |
import torch
from ..abstract import ExtendedTorchModule
class AbstractRecurrentCell(ExtendedTorchModule):
def __init__(self, Op, input_size, hidden_size, writer=None, **kwargs):
super().__init__('recurrent', writer=writer, **kwargs)
self.input_size = input_size
self.hidden_size = hidden_size
self.op = Op(input_size + hidden_size, hidden_size, writer=self.writer, **kwargs)
def reset_parameters(self):
self.op.reset_parameters()
def forward(self, x_t, h_tm1):
return self.op(torch.cat((x_t, h_tm1), dim=1))
def extra_repr(self):
return 'input_size={}, hidden_size={}'.format(
self.input_size, self.hidden_size
)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/independent_nac.py | stable_nalu/layer/independent_nac.py |
import torch
from ..functional import nac_weight, sparsity_error
from .nac import NACLayer
from ._abstract_recurrent_cell import AbstractRecurrentCell
class IndependentNACLayer(NACLayer):
def forward(self, input, reuse=False):
W = nac_weight(self.W_hat, self.M_hat, mode='independent')
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False)
return torch.nn.functional.linear(input, W, self.bias)
class IndependentNACCell(AbstractRecurrentCell):
"""Implements the NAC (Neural Accumulator) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(IndependentNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/re_regualized_linear_nac.py | stable_nalu/layer/re_regualized_linear_nac.py |
import scipy.optimize
import numpy as np
import torch
import math
from ..abstract import ExtendedTorchModule
from ..functional import Regualizer, RegualizerNAUZ, sparsity_error
from ._abstract_recurrent_cell import AbstractRecurrentCell
class ReRegualizedLinearNACLayer(ExtendedTorchModule):
"""Implements the RegualizedLinearNAC
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features,
nac_oob='regualized', regualizer_shape='squared', regualizer_z=0,
**kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.nac_oob = nac_oob
self._regualizer_bias = Regualizer(
support='nac', type='bias',
shape=regualizer_shape
)
self._regualizer_oob = Regualizer(
support='nac', type='oob',
shape=regualizer_shape,
zero=self.nac_oob == 'clip'
)
self._regualizer_nau_z = RegualizerNAUZ(
zero=regualizer_z == 0
)
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(2.0 / (self.in_features + self.out_features))
r = min(0.5, math.sqrt(3.0) * std)
torch.nn.init.uniform_(self.W, -r, r)
def optimize(self, loss):
self._regualizer_nau_z.reset()
if self.nac_oob == 'clip':
self.W.data.clamp_(-1.0, 1.0)
def regualizer(self):
return super().regualizer({
'W': self._regualizer_bias(self.W),
'z': self._regualizer_nau_z(self.W),
'W-OOB': self._regualizer_oob(self.W)
})
def forward(self, x, reuse=False):
if self.allow_random:
self._regualizer_nau_z.append_input(x)
W = torch.clamp(self.W, -1.0, 1.0)
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False)
return torch.nn.functional.linear(x, W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class ReRegualizedLinearNACCell(AbstractRecurrentCell):
"""Implements the RegualizedLinearNAC as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(ReRegualizedLinearNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/re_regualized_linear_nalu.py | stable_nalu/layer/re_regualized_linear_nalu.py |
from .re_regualized_linear_nac import ReRegualizedLinearNACLayer
from .re_regualized_linear_mnac import ReRegualizedLinearMNACLayer
from ._abstract_nalu import AbstractNALULayer
from ._abstract_recurrent_cell import AbstractRecurrentCell
class ReRegualizedLinearNALULayer(AbstractNALULayer):
"""Implements the NALU (Neural Arithmetic Logic Unit)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__(ReRegualizedLinearNACLayer, ReRegualizedLinearMNACLayer, in_features, out_features, **kwargs)
class ReRegualizedLinearNALUCell(AbstractRecurrentCell):
"""Implements the NALU (Neural Arithmetic Logic Unit) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(ReRegualizedLinearNALULayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/pos_nalu.py | stable_nalu/layer/pos_nalu.py |
from .pos_nac import PosNACLayer
from ._abstract_nalu import AbstractNALULayer
from ._abstract_recurrent_cell import AbstractRecurrentCell
class PosNALULayer(AbstractNALULayer):
"""Implements the NALU (Neural Arithmetic Logic Unit)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__(PosNACLayer, None, in_features, out_features, **kwargs)
class PosNALUCell(AbstractRecurrentCell):
"""Implements the NALU (Neural Arithmetic Logic Unit) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(PosNALULayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/gumbel_nac.py | stable_nalu/layer/gumbel_nac.py |
import torch
from ..abstract import ExtendedTorchModule
from ..functional import sample_gumbel_softmax, batch_linear
from ._abstract_recurrent_cell import AbstractRecurrentCell
class GumbelNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, sample_each_observation=False, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.sample_each_observation = sample_each_observation
# Define the temperature tau
self.tau = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32), requires_grad=False)
# Define the target weights. Also, put 0 last such that p1 = p2 = 0
# corresponds to p3 = 1 => w = 0.
self.register_buffer('target_weights', torch.tensor([1, -1, 0], dtype=torch.float32))
# Initialize a tensor, that will be the placeholder for the uniform samples
self.U = torch.Tensor(out_features, in_features, 3)
# We will only two parameters per weight, this is to prevent the redundancy
# there would otherwise exist. This also makes it much more comparable with
# NAC.
self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features, 2))
self.register_buffer('W_hat_k', torch.Tensor(out_features, in_features, 1))
self.register_parameter('bias', None)
def reset_parameters(self):
# Initialize to zero, the source of randomness can come from the Gumbel sampling.
torch.nn.init.constant_(self.W_hat, 0)
torch.nn.init.constant_(self.W_hat_k, 0)
torch.nn.init.constant_(self.tau, 1)
def forward(self, input, reuse=False):
# Concat trainable and non-trainable weights
W_hat_full = torch.cat((self.W_hat, self.W_hat_k), dim=-1) # size = [out, in, 3]
# Convert to log-properbilities
# NOTE: softmax(log(softmax(w)) + g) can be simplified to softmax(w + g), taking
# pi = softmax(W_hat_full) is just more interpretable.
# log_pi = W_hat_full
log_pi = torch.nn.functional.log_softmax(W_hat_full, dim=-1)
# Sample a quazi-1-hot encoding
if self.allow_random:
y = sample_gumbel_softmax(self.U, log_pi, tau=self.tau, reuse=reuse)
else:
y = torch.exp(log_pi)
# The final weight matrix (W), is computed by selecting from the target_weights
W = y @ self.target_weights
# Compute the linear multiplication as usual
self.writer.add_histogram('W', torch.exp(log_pi) @ self.target_weights)
self.writer.add_tensor('W', torch.exp(log_pi) @ self.target_weights, verbose_only=False)
return torch.nn.functional.linear(input, W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class GumbelNACCell(AbstractRecurrentCell):
"""Implements the Gumbel NAC (Gumbel Neural Accumulator) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(GumbelNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/hard_softmax_nalu.py | stable_nalu/layer/hard_softmax_nalu.py |
from .hard_softmax_nac import HardSoftmaxNACLayer
from ._abstract_nalu import AbstractNALULayer
from ._abstract_recurrent_cell import AbstractRecurrentCell
class HardSoftmaxNALULayer(AbstractNALULayer):
"""Implements the NALU (Neural Arithmetic Logic Unit)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__(HardSoftmaxNACLayer, None, in_features, out_features, **kwargs)
class HardSoftmaxNALUCell(AbstractRecurrentCell):
"""Implements the NALU (Neural Arithmetic Logic Unit) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(HardSoftmaxNALULayer, None, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/basic.py | stable_nalu/layer/basic.py |
import math
import torch
from ..functional import sparsity_error
from ..abstract import ExtendedTorchModule
from ._abstract_recurrent_cell import AbstractRecurrentCell
ACTIVATIONS = {
'Tanh': torch.tanh,
'Sigmoid': torch.sigmoid,
'ReLU6': torch.nn.functional.relu6,
'Softsign': torch.nn.functional.softsign,
'SELU': torch.selu,
'ELU': torch.nn.functional.elu,
'ReLU': torch.relu,
'linear': lambda x: x
}
INITIALIZATIONS = {
'Tanh': lambda W: torch.nn.init.xavier_uniform_(
W, gain=torch.nn.init.calculate_gain('tanh')),
'Sigmoid': lambda W: torch.nn.init.xavier_uniform_(
W, gain=torch.nn.init.calculate_gain('sigmoid')),
'ReLU6': lambda W: torch.nn.init.kaiming_uniform_(
W, nonlinearity='relu'),
'Softsign': lambda W: torch.nn.init.xavier_uniform_(
W, gain=1),
'SELU': lambda W: torch.nn.init.uniform_(
W, a=-math.sqrt(3/W.size(1)), b=math.sqrt(3/W.size(1))),
# ELU: The weights have been initialized according to (He et al., 2015).
# source: https://arxiv.org/pdf/1511.07289.pdf
'ELU': lambda W: torch.nn.init.kaiming_uniform_(
W, nonlinearity='relu'),
'ReLU': lambda W: torch.nn.init.kaiming_uniform_(
W, nonlinearity='relu'),
'linear': lambda W: torch.nn.init.xavier_uniform_(
W, gain=torch.nn.init.calculate_gain('linear'))
}
class BasicLayer(ExtendedTorchModule):
ACTIVATIONS = set(ACTIVATIONS.keys())
def __init__(self, in_features, out_features, activation='linear', bias=True, **kwargs):
super().__init__('basic', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.activation = activation
if activation not in ACTIVATIONS:
raise NotImplementedError(
f'the activation {activation} is not implemented')
self.activation_fn = ACTIVATIONS[activation]
self.initializer = INITIALIZATIONS[activation]
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
def reset_parameters(self):
self.initializer(self.W)
if self.bias is not None:
torch.nn.init.zeros_(self.bias)
def forward(self, input, reuse=False):
self.writer.add_histogram('W', self.W)
self.writer.add_tensor('W', self.W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(self.W), verbose_only=False)
return self.activation_fn(
torch.nn.functional.linear(input, self.W, self.bias)
)
def extra_repr(self):
return 'in_features={}, out_features={}, activation={}'.format(
self.in_features, self.out_features, self.activation
)
class BasicCell(AbstractRecurrentCell):
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(BasicLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/linear_nalu.py | stable_nalu/layer/linear_nalu.py |
from .linear_nac import LinearNACLayer
from ._abstract_nalu import AbstractNALULayer
from ._abstract_recurrent_cell import AbstractRecurrentCell
class LinearNALULayer(AbstractNALULayer):
"""Implements the NALU (Neural Arithmetic Logic Unit)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__(LinearNACLayer, None, in_features, out_features, **kwargs)
class LinearNALUCell(AbstractRecurrentCell):
"""Implements the NALU (Neural Arithmetic Logic Unit) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(LinearNALULayer, None, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/gumbel_mnac.py | stable_nalu/layer/gumbel_mnac.py |
import torch
from ..abstract import ExtendedTorchModule
from ..functional import mnac
from ._abstract_recurrent_cell import AbstractRecurrentCell
class GumbelMNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
# Define the temperature tau
self.tau = torch.nn.Parameter(torch.tensor(1, dtype=torch.float32), requires_grad=False)
# Define the target weights. Also, put 0 last such that p1 = p2 = 0
# corresponds to p3 = 1 => w = 0.
self.register_buffer('target_weights', torch.tensor([1, -1, 0], dtype=torch.float32))
# Initialize a tensor, that will be the placeholder for the uniform samples
self.U = torch.Tensor(out_features, in_features, 3)
# We will only two parameters per weight, this is to prevent the redundancy
# there would otherwise exist. This also makes it much more comparable with
# NAC.
self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
# Initialize to zero, the source of randomness can come from the Gumbel sampling.
torch.nn.init.constant_(self.W_hat, 0)
torch.nn.init.constant_(self.tau, 1)
def forward(self, x, reuse=False):
if self.allow_random:
gumbel = -torch.log(1e-8 - torch.log(torch.rand(self.out_features, self.in_features, device=x.device) + 1e-8))
W = torch.sigmoid((self.W_hat + gumbel) / self.tau)
else:
W = torch.sigmoid(self.W_hat)
# Compute the linear multiplication as usual
expected_W = torch.sigmoid(self.W_hat)
self.writer.add_histogram('W', expected_W)
self.writer.add_tensor('W', expected_W, verbose_only=False)
return mnac(x, W)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class GumbelMNACCell(AbstractRecurrentCell):
"""Implements the Gumbel NAC (Gumbel Neural Accumulator) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(GumbelNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/__init__.py | stable_nalu/layer/__init__.py |
from .basic import BasicLayer, BasicCell
from .nac import NACLayer, NACCell
from .nalu import NALULayer, NALUCell
from .gumbel_nac import GumbelNACLayer, GumbelNACCell
from .gumbel_nalu import GumbelNALULayer, GumbelNALUCell
from .linear_nac import LinearNACLayer, LinearNACCell
from .linear_nalu import LinearNALULayer, LinearNALUCell
from .softmax_nac import SoftmaxNACLayer, SoftmaxNACCell
from .softmax_nalu import SoftmaxNALULayer, SoftmaxNALUCell
from .independent_nac import IndependentNACLayer, IndependentNACCell
from .independent_nalu import IndependentNALULayer, IndependentNALUCell
from .hard_softmax_nac import HardSoftmaxNACLayer, HardSoftmaxNACCell
from .hard_softmax_nalu import HardSoftmaxNALULayer, HardSoftmaxNALUCell
from .gradient_bandit_nac import GradientBanditNACLayer, GradientBanditNACCell
from .gradient_bandit_nalu import GradientBanditNALULayer, GradientBanditNALUCell
from .regualized_linear_nac import RegualizedLinearNACLayer, RegualizedLinearNACCell
from .regualized_linear_nalu import RegualizedLinearNALULayer, RegualizedLinearNALUCell
from .re_regualized_linear_nac import ReRegualizedLinearNACLayer, ReRegualizedLinearNACCell
from .re_regualized_linear_nalu import ReRegualizedLinearNALULayer, ReRegualizedLinearNALUCell
from .generalized import GeneralizedLayer, GeneralizedCell
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/_abstract_nalu.py | stable_nalu/layer/_abstract_nalu.py |
import math
import torch
from ..abstract import ExtendedTorchModule
torch.nn.functional.gumbel_softmax
class AbstractNALULayer(ExtendedTorchModule):
"""Implements the NALU (Neural Arithmetic Logic Unit)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, NACOp, MNACOp, in_features, out_features, eps=1e-7,
nalu_two_nac=False, nalu_two_gate=False,
nalu_bias=False, nalu_mul='normal', nalu_gate='normal',
writer=None, name=None, **kwargs):
super().__init__('nalu', name=name, writer=writer, **kwargs)
self.in_features = in_features
self.out_features = out_features
self.eps = eps
self.nalu_two_nac = nalu_two_nac
self.nalu_two_gate = nalu_two_gate
self.nalu_bias = nalu_bias
self.nalu_mul = nalu_mul
self.nalu_gate = nalu_gate
if nalu_mul == 'mnac' and not nalu_two_nac:
raise ValueError('nalu_two_nac must be true when mnac is used')
if nalu_gate == 'gumbel' or nalu_gate == 'obs-gumbel':
self.tau = torch.tensor(1, dtype=torch.float32)
if nalu_two_nac and nalu_mul == 'mnac':
self.nac_add = NACOp(in_features, out_features, writer=self.writer, name='nac_add', **kwargs)
self.nac_mul = MNACOp(in_features, out_features, writer=self.writer, name='nac_mul', **kwargs)
elif nalu_two_nac:
self.nac_add = NACOp(in_features, out_features, writer=self.writer, name='nac_add', **kwargs)
self.nac_mul = NACOp(in_features, out_features, writer=self.writer, name='nac_mul', **kwargs)
else:
self.nac_add = NACOp(in_features, out_features, writer=self.writer, **kwargs)
self.nac_mul = self._nac_add_reuse
self.G_add = torch.nn.Parameter(torch.Tensor(out_features, in_features))
if nalu_two_gate:
self.G_mul = torch.nn.Parameter(torch.Tensor(out_features, in_features))
if nalu_bias:
self.bias_add = torch.nn.Parameter(torch.Tensor(out_features))
if nalu_two_gate:
self.bias_mul = torch.nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias_add', None)
self.register_parameter('bias_mul', None)
# Don't make this a buffer, as it is not a state that we want to permanently save
self.stored_gate_add = torch.tensor([0], dtype=torch.float32)
if nalu_two_gate:
self.stored_gate_mul = torch.tensor([0], dtype=torch.float32)
self.stored_input = torch.tensor([0], dtype=torch.float32)
def _nac_add_reuse(self, x):
return self.nac_add(x, reuse=True)
def regualizer(self):
regualizers = {}
if self.nalu_gate == 'regualized':
# NOTE: This is almost identical to sum(g * (1 - g)). Primarily
# sum(g * (1 - g)) is 4 times larger than sum(g^2 * (1 - g)^2), the curve
# is also a bit wider. Besides this there is only a very small error.
regualizers['g'] = torch.sum(self.stored_gate_add**2 * (1 - self.stored_gate_add)**2)
if self.nalu_two_gate:
regualizers['g'] += torch.sum(self.stored_gate_mul**2 * (1 - self.stored_gate_mul)**2)
if self.nalu_gate == 'max-safe':
regualizers['z'] = torch.mean((1 - self.stored_gate) * torch.relu(1 - self.stored_input))
# Continue recursion on the regualizer, such that if the NACOp has a regualizer, this is included too.
return super().regualizer(regualizers)
def reset_parameters(self):
self.nac_add.reset_parameters()
if self.nalu_two_nac:
self.nac_mul.reset_parameters()
torch.nn.init.xavier_uniform_(
self.G_add,
gain=torch.nn.init.calculate_gain('sigmoid'))
if self.nalu_two_gate:
torch.nn.init.xavier_uniform_(
self.G_mul,
gain=torch.nn.init.calculate_gain('sigmoid'))
if self.nalu_bias:
# consider http://proceedings.mlr.press/v37/jozefowicz15.pdf
torch.nn.init.constant_(self.bias_add, 0)
if self.nalu_two_gate:
torch.nn.init.constant_(self.bias_mul, 0)
def _compute_gate(self, x, G, bias):
# g = sigmoid(G x)
if self.nalu_gate == 'gumbel' or self.nalu_gate == 'obs-gumbel':
gumbel = 0
if self.allow_random and self.nalu_gate == 'gumbel':
gumbel = (-torch.log(1e-8 - torch.log(torch.rand(self.out_features, device=x.device) + 1e-8)))
elif self.allow_random and self.nalu_gate == 'obs-gumbel':
gumbel = (-torch.log(1e-8 - torch.log(torch.rand(x.size(0), self.out_features, device=x.device) + 1e-8)))
g = torch.sigmoid((torch.nn.functional.linear(x, G, bias) + gumbel) / self.tau)
else:
g = torch.sigmoid(torch.nn.functional.linear(x, G, bias))
return g
def forward(self, x):
self.stored_input = x
g_add = self._compute_gate(x, self.G_add, self.bias_add)
self.stored_gate_add = g_add
if self.nalu_two_gate:
g_mul = self._compute_gate(x, self.G_mul, self.bias_mul)
self.stored_gate_mul = g_mul
self.writer.add_histogram('gate/add', g_add)
self.writer.add_histogram('gate/mul', g_mul)
else:
g_mul = 1 - g_add
self.writer.add_histogram('gate', g_add)
self.writer.add_scalar('gate/mean', torch.mean(g_add), verbose_only=False)
# a = W x = nac(x)
a = self.nac_add(x)
# m = exp(W log(|x| + eps)) = exp(nac(log(|x| + eps)))
if self.nalu_mul == 'normal':
m = torch.exp(self.nac_mul(
torch.log(torch.abs(x) + self.eps)
))
elif self.nalu_mul == 'safe':
m = torch.exp(self.nac_mul(
torch.log(torch.abs(x - 1) + 1)
))
elif self.nac_mul == 'max-safe':
m = torch.exp(self.nac_mul(
torch.log(torch.relu(x - 1) + 1)
))
elif self.nalu_mul == 'trig':
m = torch.sinh(self.nac_mul(
torch.log(x+(x**2+1)**0.5 + self.eps) # torch.asinh(x) does not exist
))
elif self.nalu_mul == 'mnac':
m = self.nac_mul(x)
else:
raise ValueError(f'Unsupported nalu_mul option ({self.nalu_mul})')
self.writer.add_histogram('add', a)
self.writer.add_histogram('mul', m)
# y = g (*) a + (1 - g) (*) m
y = g_add * a + g_mul * m
return y
def extra_repr(self):
return 'in_features={}, out_features={}, eps={}, nalu_two_nac={}, nalu_bias={}'.format(
self.in_features, self.out_features, self.eps, self.nalu_two_nac, self.nalu_bias
)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/nac.py | stable_nalu/layer/nac.py |
import scipy.optimize
import numpy as np
import torch
from ..functional import nac_weight, sparsity_error, RegualizerNAUZ
from ..abstract import ExtendedTorchModule
from ._abstract_recurrent_cell import AbstractRecurrentCell
def nac_w_variance(r):
"""Calculates the variance of W.
Asumming \hat{w} and \hat{m} are sampled from a uniform
distribution with range [-r, r], this is the variance
of w = tanh(\hat{w})*sigmoid(\hat{m}).
"""
if (r == 0):
return 0
else:
return (1 - np.tanh(r) / r) * (r - np.tanh(r / 2)) * (1 / (2 * r))
def nac_w_optimal_r(fan_in, fan_out):
"""Computes the optimal Uniform[-r, r] given the fan
This uses numerical optimization.
TODO: consider if there is an algebraic solution.
"""
fan = max(fan_in + fan_out, 5)
r = scipy.optimize.bisect(lambda r: fan * nac_w_variance(r) - 2, 0, 10)
return r
class NACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features,
regualizer_z = 0, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.M_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
self._regualizer_nau_z = RegualizerNAUZ(
zero=regualizer_z == 0
)
def reset_parameters(self):
r = nac_w_optimal_r(self.in_features, self.out_features)
torch.nn.init.uniform_(self.W_hat, a=-r, b=r)
torch.nn.init.uniform_(self.M_hat, a=-r, b=r)
def optimize(self, loss):
self._regualizer_nau_z.reset()
def regualizer(self):
W = nac_weight(self.W_hat, self.M_hat, mode='normal')
return super().regualizer({
'z': self._regualizer_nau_z(W)
})
def forward(self, x, reuse=False):
if self.allow_random:
self._regualizer_nau_z.append_input(x)
W = nac_weight(self.W_hat, self.M_hat, mode='normal')
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False)
return torch.nn.functional.linear(x, W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class NACCell(AbstractRecurrentCell):
"""Implements the NAC (Neural Accumulator) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(NACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/regualized_linear_nac.py | stable_nalu/layer/regualized_linear_nac.py |
import scipy.optimize
import numpy as np
import torch
from ..abstract import ExtendedTorchModule
from ._abstract_recurrent_cell import AbstractRecurrentCell
class RegualizedLinearNACLayer(ExtendedTorchModule):
"""Implements the RegualizedLinearNAC
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features,
regualizer_shape='squared',
**kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self._regualizer_bias = Regualizer(
support='nac', type='bias',
shape=regualizer_shape
)
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.W)
def regualizer(self):
return super().regualizer({
'W': self._regualizer_bias(self.W)
})
def forward(self, input, reuse=False):
self.writer.add_histogram('W', self.W)
self.writer.add_tensor('W', self.W, verbose_only=False)
return torch.nn.functional.linear(input, self.W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class RegualizedLinearNACCell(AbstractRecurrentCell):
"""Implements the RegualizedLinearNAC as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(RegualizedLinearNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/generalized.py | stable_nalu/layer/generalized.py |
import torch
from .basic import BasicLayer, BasicCell
from .nac import NACLayer, NACCell
from .mnac import MNACLayer, MNACCell
from .nalu import NALULayer, NALUCell
from .pos_nac import PosNACLayer, PosNACCell
from .pos_nalu import PosNALULayer, PosNALUCell
from .gumbel_nac import GumbelNACLayer, GumbelNACCell
from .gumbel_mnac import GumbelMNACLayer, GumbelMNACCell
from .gumbel_nalu import GumbelNALULayer, GumbelNALUCell
from .linear_nac import LinearNACLayer, LinearNACCell
from .linear_nalu import LinearNALULayer, LinearNALUCell
from .softmax_nac import SoftmaxNACLayer, SoftmaxNACCell
from .softmax_nalu import SoftmaxNALULayer, SoftmaxNALUCell
from .independent_nac import IndependentNACLayer, IndependentNACCell
from .independent_nalu import IndependentNALULayer, IndependentNALUCell
from .hard_softmax_nac import HardSoftmaxNACLayer, HardSoftmaxNACCell
from .hard_softmax_nalu import HardSoftmaxNALULayer, HardSoftmaxNALUCell
from .gradient_bandit_nac import GradientBanditNACLayer, GradientBanditNACCell
from .gradient_bandit_nalu import GradientBanditNALULayer, GradientBanditNALUCell
from .regualized_linear_nac import RegualizedLinearNACLayer, RegualizedLinearNACCell
from .regualized_linear_mnac import RegualizedLinearMNACLayer, RegualizedLinearMNACCell
from .regualized_linear_nalu import RegualizedLinearNALULayer, RegualizedLinearNALUCell
from .re_regualized_linear_nac import ReRegualizedLinearNACLayer, ReRegualizedLinearNACCell
from .re_regualized_linear_mnac import ReRegualizedLinearMNACLayer, ReRegualizedLinearMNACCell
from .re_regualized_linear_nalu import ReRegualizedLinearNALULayer, ReRegualizedLinearNALUCell
from .re_regualized_linear_pos_nac import ReRegualizedLinearPosNACLayer, ReRegualizedLinearPosNACCell
from .silly_re_regualized_linear_mnac import SillyReRegualizedLinearMNACLayer, SillyReRegualizedLinearMNACCell
from ..abstract import ExtendedTorchModule
unit_name_to_layer_class = {
'NAC': NACLayer,
'MNAC': MNACLayer,
'NALU': NALULayer,
'PosNAC': PosNACLayer,
'PosNALU': PosNALULayer,
'GumbelNAC': GumbelNACLayer,
'GumbelMNAC': GumbelMNACLayer,
'GumbelNALU': GumbelNALULayer,
'LinearNAC': LinearNACLayer,
'LinearNALU': LinearNALULayer,
'SoftmaxNAC': SoftmaxNACLayer,
'SoftmaxNALU': SoftmaxNALULayer,
'IndependentNAC': IndependentNACLayer,
'IndependentNALU': IndependentNALULayer,
'HardSoftmaxNAC': HardSoftmaxNACLayer,
'HardSoftmaxNALU': HardSoftmaxNALULayer,
'GradientBanditNAC': GradientBanditNACLayer,
'GradientBanditNALU': GradientBanditNALULayer,
'RegualizedLinearNAC': RegualizedLinearNACLayer,
'RegualizedLinearMNAC': RegualizedLinearMNACLayer,
'RegualizedLinearNALU': RegualizedLinearNALULayer,
'ReRegualizedLinearNAC': ReRegualizedLinearNACLayer,
'ReRegualizedLinearMNAC': ReRegualizedLinearMNACLayer,
'ReRegualizedLinearNALU': ReRegualizedLinearNALULayer,
'ReRegualizedLinearPosNAC': ReRegualizedLinearPosNACLayer,
'SillyReRegualizedLinearNAC': None,
'SillyReRegualizedLinearMNAC': SillyReRegualizedLinearMNACLayer,
'SillyReRegualizedLinearNALU': None,
}
unit_name_to_cell_class = {
'NAC': NACCell,
'MNAC': MNACCell,
'NALU': NALUCell,
'PosNAC': PosNACCell,
'PosNALU': PosNALUCell,
'GumbelNAC': GumbelNACCell,
'GumbelMNAC': GumbelMNACCell,
'GumbelNALU': GumbelNALUCell,
'SoftmaxNAC': SoftmaxNACCell,
'SoftmaxNALU': SoftmaxNALUCell,
'IndependentNAC': IndependentNACCell,
'IndependentNALU': IndependentNALUCell,
'HardSoftmaxNAC': HardSoftmaxNACCell,
'HardSoftmaxNALU': HardSoftmaxNALUCell,
'GradientBanditNAC': GradientBanditNACCell,
'GradientBanditNALU': GradientBanditNALUCell,
'RegualizedLinearNAC': RegualizedLinearNACCell,
'RegualizedLinearNALU': RegualizedLinearNALUCell,
'ReRegualizedLinearNAC': ReRegualizedLinearNACCell,
'ReRegualizedLinearMNAC': ReRegualizedLinearMNACCell,
'ReRegualizedLinearNALU': ReRegualizedLinearNALUCell,
'ReRegualizedLinearPosNAC': ReRegualizedLinearPosNACCell,
}
class GeneralizedLayer(ExtendedTorchModule):
"""Abstracts all layers, both basic, NAC and NALU
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
unit_name: name of the unit (e.g. NAC, Sigmoid, Tanh)
"""
UNIT_NAMES = set(unit_name_to_layer_class.keys()) | BasicLayer.ACTIVATIONS
def __init__(self, in_features, out_features, unit_name, writer=None, name=None, **kwags):
super().__init__('layer', name=name, writer=writer, **kwags)
self.in_features = in_features
self.out_features = out_features
self.unit_name = unit_name
if unit_name in unit_name_to_layer_class:
Layer = unit_name_to_layer_class[unit_name]
self.layer = Layer(in_features, out_features,
writer=self.writer,
**kwags)
else:
self.layer = BasicLayer(in_features, out_features,
activation=unit_name,
writer=self.writer,
**kwags)
def reset_parameters(self):
self.layer.reset_parameters()
def forward(self, input):
return self.layer(input)
def extra_repr(self):
return 'in_features={}, out_features={}, unit_name={}'.format(
self.in_features, self.out_features, self.unit_name
)
class GeneralizedCell(ExtendedTorchModule):
"""Abstracts all cell, RNN-tanh, RNN-ReLU, GRU, LSTM, NAC and NALU
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
unit_name: name of the unit (e.g. RNN-tanh, LSTM, NAC)
"""
UNIT_NAMES = set(unit_name_to_cell_class.keys()) | {'GRU', 'LSTM', 'RNN-tanh', 'RNN-ReLU', 'RNN-linear'}
def __init__(self, input_size, hidden_size, unit_name, writer=None, **kwags):
super().__init__('cell', writer=writer, **kwags)
self.input_size = input_size
self.hidden_size = hidden_size
self.unit_name = unit_name
if unit_name in unit_name_to_cell_class:
Cell = unit_name_to_cell_class[unit_name]
self.cell = Cell(input_size, hidden_size,
writer=self.writer,
**kwags)
elif unit_name == 'none':
self.cell = PassThoughCell(input_size, hidden_size,
**kwags)
elif unit_name == 'GRU':
self.cell = torch.nn.GRUCell(input_size, hidden_size)
elif unit_name == 'LSTM':
self.cell = torch.nn.LSTMCell(input_size, hidden_size)
elif unit_name == 'RNN-tanh':
self.cell = torch.nn.RNNCell(input_size, hidden_size,
nonlinearity='tanh')
elif unit_name == 'RNN-ReLU':
self.cell = torch.nn.RNNCell(input_size, hidden_size,
nonlinearity='relu')
elif unit_name == 'RNN-linear':
self.cell = BasicCell(input_size, hidden_size,
activation='linear',
writer=self.writer,
**kwags)
else:
raise NotImplementedError(
f'{unit_name} is not an implemented cell type')
def reset_parameters(self):
self.cell.reset_parameters()
def forward(self, x_t, h_tm1):
return self.cell(x_t, h_tm1)
def extra_repr(self):
return 'input_size={}, hidden_size={}, unit_name={}'.format(
self.input_size, self.hidden_size, self.unit_name
)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/softmax_nalu.py | stable_nalu/layer/softmax_nalu.py |
from .softmax_nac import SoftmaxNACLayer
from ._abstract_nalu import AbstractNALULayer
from ._abstract_recurrent_cell import AbstractRecurrentCell
class SoftmaxNALULayer(AbstractNALULayer):
"""Implements the NALU (Neural Arithmetic Logic Unit)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__(SoftmaxNACLayer, None, in_features, out_features, **kwargs)
class SoftmaxNALUCell(AbstractRecurrentCell):
"""Implements the NALU (Neural Arithmetic Logic Unit) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(SoftmaxNALULayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/re_regualized_linear_pos_nac.py | stable_nalu/layer/re_regualized_linear_pos_nac.py |
import scipy.optimize
import numpy as np
import torch
import math
from ..abstract import ExtendedTorchModule
from ..functional import mnac, Regualizer, sparsity_error, RegualizerNMUZ
from ._abstract_recurrent_cell import AbstractRecurrentCell
class ReRegualizedLinearPosNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features,
nac_oob='regualized', regualizer_shape='squared',
mnac_epsilon=0, mnac_normalized=False, regualizer_z=0,
**kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
self.mnac_normalized = mnac_normalized
self.mnac_epsilon = mnac_epsilon
self.nac_oob = nac_oob
self._regualizer_bias = Regualizer(
support='mnac', type='bias',
shape=regualizer_shape, zero_epsilon=mnac_epsilon
)
self._regualizer_oob = Regualizer(
support='mnac', type='oob',
shape=regualizer_shape, zero_epsilon=mnac_epsilon,
zero=self.nac_oob == 'clip'
)
self._regualizer_nmu_z = RegualizerNMUZ(
zero=regualizer_z == 0
)
self.W = torch.nn.Parameter(torch.Tensor(out_features, in_features))
self.register_parameter('bias', None)
def reset_parameters(self):
std = math.sqrt(0.25)
r = min(0.25, math.sqrt(3.0) * std)
torch.nn.init.uniform_(self.W, 0.5 - r, 0.5 + r)
self._regualizer_nmu_z.reset()
def optimize(self, loss):
self._regualizer_nmu_z.reset()
if self.nac_oob == 'clip':
self.W.data.clamp_(0.0 + self.mnac_epsilon, 1.0)
def regualizer(self):
return super().regualizer({
'W': self._regualizer_bias(self.W),
'z': self._regualizer_nmu_z(self.W),
'W-OOB': self._regualizer_oob(self.W)
})
def forward(self, x, reuse=False):
if self.allow_random:
self._regualizer_nmu_z.append_input(x)
W = torch.clamp(self.W, 0.0 + self.mnac_epsilon, 1.0) \
if self.nac_oob == 'regualized' \
else self.W
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False)
return torch.nn.functional.linear(x, W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class ReRegualizedLinearPosNACCell(AbstractRecurrentCell):
"""Implements the NAC (Neural Accumulator) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(ReRegualizedLinearPosNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/layer/softmax_nac.py | stable_nalu/layer/softmax_nac.py |
import math
import torch
from ..abstract import ExtendedTorchModule
from ..functional import sparsity_error
from ._abstract_recurrent_cell import AbstractRecurrentCell
class SoftmaxNACLayer(ExtendedTorchModule):
"""Implements the NAC (Neural Accumulator)
Arguments:
in_features: number of ingoing features
out_features: number of outgoing features
"""
def __init__(self, in_features, out_features, **kwargs):
super().__init__('nac', **kwargs)
self.in_features = in_features
self.out_features = out_features
# Define the target weights. Also, put 0 last such that p1 = p2 = 0
# corresponds to p3 = 1 => w = 0.
self.register_buffer('target_weights', torch.tensor([1, -1, 0], dtype=torch.float32))
# We will only two parameters per weight, this is to prevent the redundancy
# there would otherwise exist. This also makes it much more comparable with
# NAC.
self.W_hat = torch.nn.Parameter(torch.Tensor(out_features, in_features, 2))
self.register_buffer('W_hat_k', torch.Tensor(out_features, in_features, 1))
self.register_parameter('bias', None)
def reset_parameters(self):
# Use a gain of sqrt(0.5). Lets assume that softmax'(0) ~ 1, because this
# holds for sigmoid. Then:
# Var[W] = 1 * Var[S_1] - 1 * Var[S_2] + 0 * Var[S_3] = 2 / (fan[in] + fan[out])
# Var[W] = 2 * Var[S_i] = 2 / (fan[in] + fan[out])
# Var[S_i] = 1/2 * 2 / (fan[in] + fan[out])
# sqrt(Var[S_i]) = sqrt(1/2) * sqrt(2 / (fan[in] + fan[out]))
# This is not exactly true, because S_1, S_2, and S_3 are not enterily uncorrelated.
torch.nn.init.xavier_uniform_(self.W_hat, gain=math.sqrt(0.5))
torch.nn.init.constant_(self.W_hat_k, 0)
def forward(self, input, reuse=False):
# Concat trainable and non-trainable weights
W_hat_full = torch.cat((self.W_hat, self.W_hat_k), dim=-1) # size = [out, in, 3]
# Compute W
pi = torch.nn.functional.softmax(W_hat_full, dim=-1)
W = pi @ self.target_weights
# Compute the linear multiplication as usual
self.writer.add_histogram('W', W)
self.writer.add_tensor('W', W)
self.writer.add_scalar('W/sparsity_error', sparsity_error(W), verbose_only=False)
return torch.nn.functional.linear(input, W, self.bias)
def extra_repr(self):
return 'in_features={}, out_features={}'.format(
self.in_features, self.out_features
)
class SoftmaxNACCell(AbstractRecurrentCell):
"""Implements the Gumbel NAC (Gumbel Neural Accumulator) as a recurrent cell
Arguments:
input_size: number of ingoing features
hidden_size: number of outgoing features
"""
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(SoftmaxNACLayer, input_size, hidden_size, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/dataset/number_translation_test.py | stable_nalu/dataset/number_translation_test.py |
from nose.tools import *
import torch
import numpy as np
from stable_nalu.dataset import NumberTranslationDataset
def encode_as_string(number):
return ' '.join(NumberTranslationDataset.encode(number, as_strings=True))
def test_number_encoding():
assert_equal(encode_as_string(1), 'one')
assert_equal(encode_as_string(2), 'two')
assert_equal(encode_as_string(3), 'three')
assert_equal(encode_as_string(4), 'four')
assert_equal(encode_as_string(5), 'five')
assert_equal(encode_as_string(6), 'six')
assert_equal(encode_as_string(7), 'seven')
assert_equal(encode_as_string(8), 'eight')
assert_equal(encode_as_string(9), 'nine')
assert_equal(encode_as_string(10), 'ten')
assert_equal(encode_as_string(11), 'eleven')
assert_equal(encode_as_string(12), 'twelve')
assert_equal(encode_as_string(13), 'thirteen')
assert_equal(encode_as_string(14), 'fourteen')
assert_equal(encode_as_string(15), 'fifteen')
assert_equal(encode_as_string(16), 'sixteen')
assert_equal(encode_as_string(17), 'seventeen')
assert_equal(encode_as_string(18), 'eighteen')
assert_equal(encode_as_string(19), 'nineteen')
assert_equal(encode_as_string(20), 'twenty')
assert_equal(encode_as_string(30), 'thirty')
assert_equal(encode_as_string(40), 'forty')
assert_equal(encode_as_string(50), 'fifty')
assert_equal(encode_as_string(60), 'sixty')
assert_equal(encode_as_string(70), 'seventy')
assert_equal(encode_as_string(80), 'eighty')
assert_equal(encode_as_string(90), 'ninety')
assert_equal(encode_as_string(100), 'one hundred')
assert_equal(encode_as_string(230), 'two hundred and thirty')
assert_equal(encode_as_string(235), 'two hundred and thirty five')
assert_equal(encode_as_string(35), 'thirty five')
assert_equal(encode_as_string(119), 'one hundred and nineteen')
def test_train_contains_all_tokens():
for seed in range(100):
dataset = NumberTranslationDataset(seed=seed).fork(subset='train')
tokens_seen = set()
for x, t in dataset:
tokens_seen |= set(x.tolist())
assert_equal(tokens_seen, set(range(29)))
def test_correct_length():
dataset = NumberTranslationDataset(seed=0)
dataset_train = dataset.fork(subset='train')
dataset_valid = dataset.fork(subset='valid')
dataset_test = dataset.fork(subset='test')
assert_equal(len(dataset_train), 169)
assert_equal(len(dataset_valid), 200)
assert_equal(len(dataset_test), 630)
def test_all_subsets_contains_all_numbers():
dataset = NumberTranslationDataset(seed=0)
numbers = set()
for subset_name in ['train', 'valid', 'test']:
subset = dataset.fork(subset=subset_name)
for i in range(len(subset)):
x, t = subset[i]
numbers.add(int(t.numpy().item(0)))
assert_equal(numbers, set(range(1, 1000)))
def test_subsets_are_distinct():
dataset = NumberTranslationDataset(seed=0)
dataset_train = dataset.fork(subset='train')
numbers_train = { dataset_train[i][1].numpy().item(0) for i in range(len(dataset_train)) }
dataset_valid = dataset.fork(subset='valid')
numbers_valid = { dataset_valid[i][1].numpy().item(0) for i in range(len(dataset_valid)) }
dataset_test = dataset.fork(subset='test')
numbers_test = { dataset_test[i][1].numpy().item(0) for i in range(len(dataset_test)) }
assert_equal(numbers_train & numbers_valid, set())
assert_equal(numbers_valid & numbers_test, set())
assert_equal(numbers_train & numbers_test, set())
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/dataset/simple_function_recurrent_test.py | stable_nalu/dataset/simple_function_recurrent_test.py |
from nose.tools import *
import scipy.stats
import torch
import numpy as np
from stable_nalu.dataset import SimpleFunctionRecurrentDataset
def test_batch_shape():
dataset = SimpleFunctionRecurrentDataset(
operation='add', seed=0
)
dataset_test = iter(dataset.fork(seq_length=16).dataloader(batch_size=128))
x_batch, t_batch = next(dataset_test)
assert_equal(x_batch.size(), (128, 16, 10))
assert_equal(t_batch.size(), (128, 1))
def test_observation_shape():
dataset = SimpleFunctionRecurrentDataset(
operation='add', seed=0
)
x_batch, t_batch = dataset.fork(seq_length=16)[0]
assert_equal(x_batch.size(), (16, 10))
assert_equal(t_batch.size(), (1, ))
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/dataset/sequential_mnist.py | stable_nalu/dataset/sequential_mnist.py |
import os.path as path
import numpy as np
import torch
import torch.utils.data
import torchvision
from typing import Tuple, NamedTuple, Union
from ._dataloader import DataLoaderCudaWrapper
from ._partial_dataset import PartialDataset
class ItemShape(NamedTuple):
input: Tuple[Union[None, int], ...]
target: Tuple[Union[None, int], ...]
class OPERATIONS:
@staticmethod
def sum(seq):
return OPERATIONS.sum(seq)
@staticmethod
def cumsum(seq):
return np.cumsum(seq).reshape(-1, 1)
@staticmethod
def prod(seq):
return OPERATIONS.cumprod(seq)
@staticmethod
def cumprod(seq):
return np.cumprod(seq).reshape(-1, 1)
@staticmethod
def div(seq):
return OPERATIONS.cumprod(seq)
@staticmethod
def cumdiv(seq):
return np.cumprod(np.reciprocal(seq)).reshape(-1, 1)
THIS_DIR = path.dirname(path.realpath(__file__))
DATA_DIR = path.join(THIS_DIR, 'data')
class SequentialMnistDataset:
def __init__(self, operation,
num_workers=1,
mnist_digits=[0,1,2,3,4,5,6,7,8,9],
seed=None,
use_cuda=False):
super().__init__()
self._operation = getattr(OPERATIONS, operation)
self._num_workers = num_workers
self._use_cuda = use_cuda
self._rng = np.random.RandomState(seed)
self._mnist_digits = set(mnist_digits)
def is_cum_task():
if self._operation == OPERATIONS.sum:
return False
elif self._operation == OPERATIONS.cumsum:
return True
elif self._operation == OPERATIONS.prod:
return False
elif self._operation == OPERATIONS.cumprod:
return True
elif self._operation == OPERATIONS.div:
return False
elif self._operation == OPERATIONS.cumdiv:
return True
else:
raise ValueError('bad operation')
def get_item_shape(self):
if self._operation == OPERATIONS.sum:
return ItemShape((None, 28, 28), (None, 1))
elif self._operation == OPERATIONS.cumsum:
return ItemShape((None, 28, 28), (None, 1))
elif self._operation == OPERATIONS.prod:
return ItemShape((None, 28, 28), (None, 1))
elif self._operation == OPERATIONS.cumprod:
return ItemShape((None, 28, 28), (None, 1))
elif self._operation == OPERATIONS.div:
return ItemShape((None, 28, 28), (None, 1))
elif self._operation == OPERATIONS.cumdiv:
return ItemShape((None, 28, 28), (None, 1))
else:
raise ValueError('bad operation')
def fork(self, seq_length=10, subset='train', seed=None):
if subset not in {'train', 'valid', 'test'}:
raise ValueError(f'subset must be either train or test, it is {subset}')
rng = np.random.RandomState(self._rng.randint(0, 2**32 - 1) if seed is None else seed)
return SequentialMnistDatasetFork(self, seq_length, subset, rng)
class SequentialMnistDatasetFork(torch.utils.data.Dataset):
def __init__(self, parent, seq_length, subset, rng):
super().__init__()
self._operation = parent._operation
self._num_workers = parent._num_workers
self._use_cuda = parent._use_cuda
self._mnist_digits = parent._mnist_digits
self._subset = subset
self._seq_length = seq_length
self._rng = rng
full_dataset = torchvision.datasets.MNIST(
root=DATA_DIR,
train=subset in ['train', 'valid'],
download=True,
# Transform is from https://github.com/pytorch/examples/blob/master/mnist/main.py
# which is what is also used in the NALU paper
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.1307,), (0.3081,))
])
)
if subset == 'train':
self._dataset = PartialDataset(full_dataset, 0, 55000)
elif subset == 'valid':
self._dataset = PartialDataset(full_dataset, 55000, 5000)
elif subset == 'test':
self._dataset = full_dataset
self._index_mapping = self._rng.permutation([
i for (i, (x, t)) in enumerate(self._dataset) if t in self._mnist_digits
])
def __getitem__(self, index):
mnist_images = []
mnist_targets = []
for mnist_index in range(index * self._seq_length, (index + 1) * self._seq_length):
image, target = self._dataset[self._index_mapping[mnist_index]]
mnist_images.append(image) # image.size() = [1, 28, 28]
mnist_targets.append(target)
data = torch.stack(mnist_images) # data.size() = [seq_length, 1, 28, 28]
target = self._operation(np.stack(mnist_targets))
return (
data,
torch.tensor(target, dtype=torch.float32)
)
def __len__(self):
return len(self._index_mapping) // self._seq_length
def dataloader(self, batch_size=64, shuffle=True):
batcher = torch.utils.data.DataLoader(
self,
batch_size=batch_size,
shuffle=shuffle,
num_workers=self._num_workers)
if self._use_cuda:
return DataLoaderCudaWrapper(batcher)
else:
return batcher
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/dataset/sequential_svhn.py | stable_nalu/dataset/sequential_svhn.py |
import os.path as path
import numpy as np
import torch
import torch.utils.data
import torchvision
from typing import Tuple, NamedTuple, Union
from ._dataloader import DataLoaderCudaWrapper
from ._partial_dataset import PartialDataset
class ItemShape(NamedTuple):
input: Tuple[Union[None, int], ...]
target: Tuple[Union[None, int], ...]
class OPERATIONS:
@staticmethod
def sum(seq):
return OPERATIONS.sum(seq)
@staticmethod
def cumsum(seq):
return np.cumsum(seq).reshape(-1, 1)
@staticmethod
def prod(seq):
return OPERATIONS.cumprod(seq)
@staticmethod
def cumprod(seq):
return np.cumprod(seq).reshape(-1, 1)
THIS_DIR = path.dirname(path.realpath(__file__))
DATA_DIR = path.join(THIS_DIR, 'data')
class SequentialSvhnDataset:
def __init__(self, operation,
num_workers=1,
svhn_digits=[0,1,2,3,4,5,6,7,8,9],
seed=None,
use_cuda=False):
super().__init__()
self._operation = getattr(OPERATIONS, operation)
self._num_workers = num_workers
self._use_cuda = use_cuda
self._rng = np.random.RandomState(seed)
self._svhn_digits = set(svhn_digits)
def is_cum_task():
if self._operation == OPERATIONS.sum:
return False
elif self._operation == OPERATIONS.cumsum:
return True
elif self._operation == OPERATIONS.prod:
return False
elif self._operation == OPERATIONS.cumprod:
return True
else:
raise ValueError('bad operation')
def get_item_shape(self):
if self._operation == OPERATIONS.sum:
return ItemShape((None, 28, 28), (None, 1))
elif self._operation == OPERATIONS.cumsum:
return ItemShape((None, 28, 28), (None, 1))
elif self._operation == OPERATIONS.prod:
return ItemShape((None, 28, 28), (None, 1))
elif self._operation == OPERATIONS.cumprod:
return ItemShape((None, 28, 28), (None, 1))
else:
raise ValueError('bad operation')
def fork(self, seq_length=10, subset='train', seed=None):
if subset not in {'train', 'valid', 'test'}:
raise ValueError(f'subset must be either train or test, it is {subset}')
rng = np.random.RandomState(self._rng.randint(0, 2**32 - 1) if seed is None else seed)
return SequentialSvhnDatasetFork(self, seq_length, subset, rng)
class SequentialSvhnDatasetFork(torch.utils.data.Dataset):
raw_datasets = dict()
def __init__(self, parent, seq_length, subset, rng):
super().__init__()
self._operation = parent._operation
self._num_workers = parent._num_workers
self._use_cuda = parent._use_cuda
self._svhn_digits = parent._svhn_digits
self._subset = subset
self._seq_length = seq_length
self._rng = rng
split_name = 'train' if subset in ['train', 'valid'] else 'test'
if split_name in SequentialSvhnDatasetFork.raw_datasets:
full_dataset = SequentialSvhnDatasetFork.raw_datasets[split_name]
else:
full_dataset = torchvision.datasets.SVHN(
root=DATA_DIR,
split='train' if subset in ['train', 'valid'] else 'test',
download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor()
])
)
SequentialSvhnDatasetFork.raw_datasets[split_name] = full_dataset
if subset == 'train':
self._dataset = PartialDataset(full_dataset, 0, 73257 - 5000)
elif subset == 'valid':
self._dataset = PartialDataset(full_dataset, 73257 - 5000, 5000)
elif subset == 'test':
self._dataset = full_dataset
self._index_mapping = self._rng.permutation([
i for (i, (x, t)) in enumerate(self._dataset) if t in self._svhn_digits
])
def __getitem__(self, index):
svhn_images = []
svhn_targets = []
for svhn_index in range(index * self._seq_length, (index + 1) * self._seq_length):
image, target = self._dataset[self._index_mapping[svhn_index]]
svhn_images.append(image) # image.size() = [3, 32, 32]
svhn_targets.append(target)
data = torch.stack(svhn_images) # data.size() = [seq_length, 3, 32, 32]
target = self._operation(np.stack(svhn_targets))
return (
data,
torch.tensor(target, dtype=torch.float32)
)
def __len__(self):
return len(self._index_mapping) // self._seq_length
def dataloader(self, batch_size=64, shuffle=True):
batcher = torch.utils.data.DataLoader(
self,
batch_size=batch_size,
shuffle=shuffle,
num_workers=self._num_workers)
if self._use_cuda:
return DataLoaderCudaWrapper(batcher)
else:
return batcher
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/dataset/_dataloader.py | stable_nalu/dataset/_dataloader.py |
class DataLoaderCudaWrapper:
def __init__(self, batcher):
self._batcher = batcher
def __getattr__(self, name):
return getattr(self._batcher, name)
def __iter__(self):
batcher = iter(self._batcher)
return map(lambda values: (value.cuda() for value in values), batcher)
def __len__(self):
return len(self._batcher)
class FastDataLoader:
def __init__(self, dataset, batch_size, use_cuda):
self.dataset = dataset
self.batch_size = batch_size
self.use_cuda = use_cuda
def __iter__(self):
for i in range(len(self)):
values = self.dataset[i * self.batch_size: min(len(self.dataset), (1 + i)*self.batch_size)]
if self.use_cuda:
yield tuple(value.cuda() for value in values)
else:
yield values
def __len__(self):
return len(self.dataset) // self.batch_size
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/dataset/sequential_mnist_test.py | stable_nalu/dataset/sequential_mnist_test.py |
from nose.tools import *
import torch
import numpy as np
from stable_nalu.dataset import SequentialMnistDataset
def test_seed_gives_consistent_output():
dataset_a = SequentialMnistDataset(
operation='count',
seed=0
).fork(seq_length=1, subset='train')
dataset_b = SequentialMnistDataset(
operation='count',
seed=0
).fork(seq_length=1, subset='train')
assert_equal(len(dataset_a), len(dataset_b))
for observation_i in range(100):
x_a, t_a = dataset_a[observation_i]
x_b, t_b = dataset_b[observation_i]
np.testing.assert_almost_equal(x_a.numpy(), x_b.numpy())
np.testing.assert_almost_equal(t_a.numpy(), t_b.numpy())
assert_equal(x_a.size(), (1, 1, 28, 28))
assert_equal(x_b.size(), (1, 1, 28, 28))
def test_train_is_distinct_from_test():
dataset_train = SequentialMnistDataset(
operation='count',
seed=0
).fork(seq_length=1, subset='train')
dataset_test = SequentialMnistDataset(
operation='count',
seed=0
).fork(seq_length=1, subset='test')
assert_not_equal(len(dataset_train), len(dataset_test))
for observation_i in range(100):
x_a, t_a = dataset_train[observation_i]
x_b, t_b = dataset_test[observation_i]
assert not np.allclose(x_a.numpy(), x_b.numpy())
assert_equal(x_a.size(), (1, 1, 28, 28))
assert_equal(x_b.size(), (1, 1, 28, 28))
def test_count_dataset():
for seq_length in [1, 10, 100]:
dataset_labels = SequentialMnistDataset(
operation='sum',
seed=0
).fork(seq_length=1, subset='train')
dataset_count = SequentialMnistDataset(
operation='count',
seed=0
).fork(seq_length=seq_length, subset='train')
assert_equal(len(dataset_count), len(dataset_labels) // seq_length)
for count_i in range(5):
x_count, t_count = dataset_count[count_i]
assert_equal(x_count.size(), (seq_length, 1, 28, 28))
assert_equal(t_count.size(), (10, ))
t_count_expected = np.zeros((10, ))
for time_i in range(seq_length):
x_label, t_label = dataset_labels[count_i * seq_length + time_i]
t_count_expected[t_label.numpy().astype('int8')] += 1
np.testing.assert_almost_equal(
x_count[time_i].numpy(),
x_label.numpy().reshape(1, 28, 28)
)
np.testing.assert_almost_equal(t_count.numpy(), t_count_expected)
def test_sum_dataset():
for seq_length in [1, 10, 100]:
dataset_labels = SequentialMnistDataset(
operation='sum',
seed=0
).fork(seq_length=1, subset='train')
dataset_count = SequentialMnistDataset(
operation='sum',
seed=0
).fork(seq_length=seq_length, subset='train')
assert_equal(len(dataset_count), len(dataset_labels) // seq_length)
for count_i in range(5):
x_count, t_sum = dataset_count[count_i]
assert_equal(x_count.size(), (seq_length, 1, 28, 28))
assert_equal(t_sum.size(), (1, ))
t_sum_expected = np.zeros((1, ))
for time_i in range(seq_length):
x_label, t_label = dataset_labels[count_i * seq_length + time_i]
t_sum_expected += t_label.numpy()
np.testing.assert_almost_equal(
x_count[time_i].numpy(),
x_label.numpy().reshape(1, 28, 28)
)
np.testing.assert_almost_equal(t_sum.numpy(), t_sum_expected)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/dataset/simple_function_static_test.py | stable_nalu/dataset/simple_function_static_test.py |
from nose.tools import *
import scipy.stats
import torch
import numpy as np
from stable_nalu.dataset import SimpleFunctionStaticDataset
def test_solveable_by_linear_algebra():
dataset = SimpleFunctionStaticDataset(
operation='add', seed=0
)
dataset_test = iter(dataset.fork(input_range=1).dataloader(batch_size=100))
x_batch, t_batch = next(dataset_test)
x_batch_np = np.stack(x_batch)
t_batch_np = np.stack(t_batch)
w_merged_np = np.linalg.solve(x_batch_np, t_batch_np.ravel())
w_merged_np_int = np.round(w_merged_np, 0).astype('int8')
# W is whole numbers
np.testing.assert_almost_equal(
w_merged_np - w_merged_np_int,
np.zeros(100),
decimal=4
)
# W is either 0, 1, 2
# NOTE: a different seed might not result in an overlap, thus {2} might
# not be present.
assert_equal(
set(w_merged_np_int.tolist()),
{0, 1, 2}
)
# Compute a, b range parameters
# For seed=0, the b subset, is a subset of the a subset, which is assumed
# by the following algorithm.
a_start = None
a_end = None
b_start = None
b_end = None
previuse_w_value = 0
for w_index, w_value in enumerate(w_merged_np_int.tolist()):
if w_value == 1 and previuse_w_value == 0:
a_start = w_index
elif w_value == 0 and previuse_w_value == 1:
a_end = w_index
elif w_value == 2 and previuse_w_value == 1:
b_start = w_index
elif w_value == 1 and previuse_w_value == 2:
b_end = w_index
previuse_w_value = w_value
# Compare a and b range parameters
assert_equal(a_start, dataset.a_start)
assert_equal(a_end, dataset.a_end)
assert_equal(b_start, dataset.b_start)
assert_equal(b_end, dataset.b_end)
def test_input_range():
dataset = SimpleFunctionStaticDataset(
operation='add',
vector_size=10000,
seed=0
)
x, t = dataset.fork(input_range=5)[0]
_, p = scipy.stats.kstest(
x,
scipy.stats.uniform(loc=0, scale=5).cdf
)
assert p > 0.5
def test_output_shape():
dataset = SimpleFunctionStaticDataset(
operation='add',
seed=0
)
x, t = dataset.fork(input_range=5)[0]
assert_equal(x.shape, (100, ))
# Note, t.shape should be a 1-long vector, not a scalar. Otherwise
# the loss function gets confused about what the observation dimention
# is.
assert_equal(t.shape, (1, ))
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/dataset/simple_function_static.py | stable_nalu/dataset/simple_function_static.py |
import numpy as np
from ._simple_function_abstact import SimpleFunctionDataset
class SimpleFunctionStaticDataset(SimpleFunctionDataset):
def __init__(self, operation,
input_size=100,
**kwargs):
super().__init__(operation, input_size,
**kwargs)
def fork(self, sample_range=[1, 2], *args, **kwargs):
return super().fork((self._input_size, ), sample_range, *args, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/dataset/__init__.py | stable_nalu/dataset/__init__.py |
from .simple_function_static import SimpleFunctionStaticDataset
from .simple_function_recurrent import SimpleFunctionRecurrentDataset
from .sequential_mnist import SequentialMnistDataset
from .sequential_svhn import SequentialSvhnDataset
from .number_translation import NumberTranslationDataset
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
AndreasMadsen/stable-nalu | https://github.com/AndreasMadsen/stable-nalu/blob/b3296ace137ffa4854edeef3759f1578b7650210/stable_nalu/dataset/simple_function_recurrent.py | stable_nalu/dataset/simple_function_recurrent.py |
from ._simple_function_abstact import SimpleFunctionDataset
class SimpleFunctionRecurrentDataset(SimpleFunctionDataset):
def __init__(self, operation,
vector_size=10,
min_subset_length=1,
max_subset_length=5,
min_subset_overlap=1,
max_subset_overlap=2, **kwargs):
super().__init__(operation,
vector_size=vector_size,
min_subset_length=min_subset_length,
max_subset_length=max_subset_length,
min_subset_overlap=min_subset_overlap,
max_subset_overlap=max_subset_overlap, **kwargs)
def fork(self, seq_length=10, input_range=1, *args, **kwargs):
return super().fork((seq_length, self._vector_size), input_range, *args, **kwargs)
| python | MIT | b3296ace137ffa4854edeef3759f1578b7650210 | 2026-01-05T07:14:37.360899Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.