dttutty commited on
Commit
d74f49e
·
1 Parent(s): 88132a6

Add new scripts for CSV validation and dataset processing, including checks for column values and generating node roles

Browse files
README.md CHANGED
@@ -91,35 +91,31 @@ If you need a `datasets` library dataset or a Hub Viewer-backed table, export th
91
  - Selected integer-valued edge-feature arrays are materialized with lossless downcasts for storage efficiency.
92
  - `MOOC`, `REDDIT`, and `WIKIPEDIA` expose their non-trivial state labels as top-level `ban_labels.csv` sidecars.
93
 
94
- ## Recommended `max_batch_size`
95
 
96
  These conservative recommendations are derived from the `num_edges` values below,
97
  assuming the standard `80/10/10` train/val/test split and:
98
 
99
- `max_batch_size = floor(num_edges / (10 * N_GPU))`
100
 
101
- This keeps `batch_size * N_GPU` within the approximate smallest evaluation split
102
  budget for the current FROST runtime.
103
 
104
- For a rough dataset-by-dataset runtime ordering for the custom DyGLib
105
- DyGFormer sweep scripts in this repository, see
106
- [`DYGFORMER_EXPECTED_RUNTIME.md`](DYGFORMER_EXPECTED_RUNTIME.md).
107
-
108
- | Dataset | N_GPU=1 | N_GPU=2 | N_GPU=4 | N_GPU=8 |
109
- | --- | ---: | ---: | ---: | ---: |
110
- | CanParl | 7447 | 3723 | 1861 | 930 |
111
- | Contacts | 242627 | 121313 | 60656 | 30328 |
112
- | Flights | 192714 | 96357 | 48178 | 24089 |
113
- | SocialEvo | 209951 | 104975 | 52487 | 26243 |
114
- | UNtrade | 50749 | 25374 | 12687 | 6343 |
115
- | UNvote | 103574 | 51787 | 25893 | 12946 |
116
- | USLegis | 6039 | 3019 | 1509 | 754 |
117
- | Enron | 12523 | 6261 | 3130 | 1565 |
118
- | LastFM | 129310 | 64655 | 32327 | 16163 |
119
- | MOOC | 41174 | 20587 | 10293 | 5146 |
120
- | Reddit | 67244 | 33622 | 16811 | 8405 |
121
- | UCI | 5983 | 2991 | 1495 | 747 |
122
- | Wikipedia | 15747 | 7873 | 3936 | 1968 |
123
 
124
  ## Dataset Details
125
 
 
91
  - Selected integer-valued edge-feature arrays are materialized with lossless downcasts for storage efficiency.
92
  - `MOOC`, `REDDIT`, and `WIKIPEDIA` expose their non-trivial state labels as top-level `ban_labels.csv` sidecars.
93
 
94
+ ## Recommended `max_macro_batch_size`
95
 
96
  These conservative recommendations are derived from the `num_edges` values below,
97
  assuming the standard `80/10/10` train/val/test split and:
98
 
99
+ `max_macro_batch_size = floor(num_edges / 10)`
100
 
101
+ This keeps `macro_batch_size` within the approximate smallest evaluation split
102
  budget for the current FROST runtime.
103
 
104
+ | Dataset | max_macro_batch_size |
105
+ | --- | ---: |
106
+ | CanParl | 7447 |
107
+ | Contacts | 242627 |
108
+ | Flights | 192714 |
109
+ | SocialEvo | 209951 |
110
+ | UNtrade | 50749 |
111
+ | UNvote | 103574 |
112
+ | USLegis | 6039 |
113
+ | Enron | 12523 |
114
+ | LastFM | 129310 |
115
+ | MOOC | 41174 |
116
+ | Reddit | 67244 |
117
+ | UCI | 5983 |
118
+ | Wikipedia | 15747 |
 
 
 
 
119
 
120
  ## Dataset Details
121
 
check_csv_last_col_plus_one.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import csv
6
+ import math
7
+ import sys
8
+ from pathlib import Path
9
+
10
+
11
+ def parse_number(value: str) -> float:
12
+ value = value.strip()
13
+ if value == "":
14
+ raise ValueError("empty numeric field")
15
+ return float(value)
16
+
17
+
18
+ def main() -> int:
19
+ parser = argparse.ArgumentParser(
20
+ description="Check whether the last CSV column equals the first column plus an offset."
21
+ )
22
+ parser.add_argument("csv_path", type=Path, help="Path to the CSV file to inspect.")
23
+ parser.add_argument(
24
+ "--offset",
25
+ type=float,
26
+ default=1.0,
27
+ help="Expected offset between the first and last column. Default: 1.",
28
+ )
29
+ parser.add_argument(
30
+ "--tolerance",
31
+ type=float,
32
+ default=1e-9,
33
+ help="Absolute tolerance for float comparison. Default: 1e-9.",
34
+ )
35
+ args = parser.parse_args()
36
+
37
+ if not args.csv_path.is_file():
38
+ print(f"file not found: {args.csv_path}", file=sys.stderr)
39
+ return 2
40
+
41
+ total_rows = 0
42
+ mismatch_count = 0
43
+ parse_error_count = 0
44
+ first_mismatch: tuple[int, str, str] | None = None
45
+ first_parse_error: tuple[int, str] | None = None
46
+
47
+ with args.csv_path.open("r", newline="") as f:
48
+ reader = csv.reader(f)
49
+ header = next(reader, None)
50
+ if header is None:
51
+ print("empty CSV: no header row", file=sys.stderr)
52
+ return 2
53
+ if len(header) < 2:
54
+ print("CSV must have at least two columns", file=sys.stderr)
55
+ return 2
56
+
57
+ for line_no, row in enumerate(reader, start=2):
58
+ total_rows += 1
59
+ if len(row) < 2:
60
+ parse_error_count += 1
61
+ if first_parse_error is None:
62
+ first_parse_error = (line_no, "row has fewer than two columns")
63
+ continue
64
+
65
+ try:
66
+ first_val = parse_number(row[0])
67
+ last_val = parse_number(row[-1])
68
+ except ValueError as exc:
69
+ parse_error_count += 1
70
+ if first_parse_error is None:
71
+ first_parse_error = (line_no, str(exc))
72
+ continue
73
+
74
+ expected = first_val + args.offset
75
+ if not math.isclose(last_val, expected, rel_tol=0.0, abs_tol=args.tolerance):
76
+ mismatch_count += 1
77
+ if first_mismatch is None:
78
+ first_mismatch = (line_no, row[0], row[-1])
79
+
80
+ print(f"file: {args.csv_path}")
81
+ print(f"first column: {header[0]}")
82
+ print(f"last column: {header[-1]}")
83
+ print(f"rows checked: {total_rows}")
84
+ print(f"offset: {args.offset}")
85
+ print(f"mismatches: {mismatch_count}")
86
+ print(f"parse_errors: {parse_error_count}")
87
+
88
+ if first_mismatch is not None:
89
+ line_no, first_val, last_val = first_mismatch
90
+ print(
91
+ f"first mismatch at line {line_no}: first={first_val}, last={last_val}, "
92
+ f"expected={float(first_val) + args.offset}"
93
+ )
94
+
95
+ if first_parse_error is not None:
96
+ line_no, message = first_parse_error
97
+ print(f"first parse error at line {line_no}: {message}")
98
+
99
+ if mismatch_count == 0 and parse_error_count == 0:
100
+ print("result: PASS")
101
+ return 0
102
+
103
+ print("result: FAIL")
104
+ return 1
105
+
106
+
107
+ if __name__ == "__main__":
108
+ raise SystemExit(main())
check_csv_penultimate_col_zero.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import csv
6
+ import math
7
+ import sys
8
+ from pathlib import Path
9
+
10
+
11
+ def parse_number(value: str) -> float:
12
+ value = value.strip()
13
+ if value == "":
14
+ raise ValueError("empty numeric field")
15
+ return float(value)
16
+
17
+
18
+ def main() -> int:
19
+ parser = argparse.ArgumentParser(
20
+ description="Check whether the penultimate CSV column is zero for every data row."
21
+ )
22
+ parser.add_argument("csv_path", type=Path, help="Path to the CSV file to inspect.")
23
+ parser.add_argument(
24
+ "--zero",
25
+ type=float,
26
+ default=0.0,
27
+ help="Expected numeric value in the penultimate column. Default: 0.",
28
+ )
29
+ parser.add_argument(
30
+ "--tolerance",
31
+ type=float,
32
+ default=1e-9,
33
+ help="Absolute tolerance for float comparison. Default: 1e-9.",
34
+ )
35
+ args = parser.parse_args()
36
+
37
+ if not args.csv_path.is_file():
38
+ print(f"file not found: {args.csv_path}", file=sys.stderr)
39
+ return 2
40
+
41
+ total_rows = 0
42
+ mismatch_count = 0
43
+ parse_error_count = 0
44
+ first_mismatch: tuple[int, str] | None = None
45
+ first_parse_error: tuple[int, str] | None = None
46
+
47
+ with args.csv_path.open("r", newline="") as f:
48
+ reader = csv.reader(f)
49
+ header = next(reader, None)
50
+ if header is None:
51
+ print("empty CSV: no header row", file=sys.stderr)
52
+ return 2
53
+ if len(header) < 2:
54
+ print("CSV must have at least two columns", file=sys.stderr)
55
+ return 2
56
+
57
+ for line_no, row in enumerate(reader, start=2):
58
+ total_rows += 1
59
+ if len(row) < 2:
60
+ parse_error_count += 1
61
+ if first_parse_error is None:
62
+ first_parse_error = (line_no, "row has fewer than two columns")
63
+ continue
64
+
65
+ try:
66
+ penultimate_val = parse_number(row[-2])
67
+ except ValueError as exc:
68
+ parse_error_count += 1
69
+ if first_parse_error is None:
70
+ first_parse_error = (line_no, str(exc))
71
+ continue
72
+
73
+ if not math.isclose(penultimate_val, args.zero, rel_tol=0.0, abs_tol=args.tolerance):
74
+ mismatch_count += 1
75
+ if first_mismatch is None:
76
+ first_mismatch = (line_no, row[-2])
77
+
78
+ print(f"file: {args.csv_path}")
79
+ print(f"penultimate column: {header[-2]}")
80
+ print(f"rows checked: {total_rows}")
81
+ print(f"expected value: {args.zero}")
82
+ print(f"mismatches: {mismatch_count}")
83
+ print(f"parse_errors: {parse_error_count}")
84
+
85
+ if first_mismatch is not None:
86
+ line_no, value = first_mismatch
87
+ print(f"first mismatch at line {line_no}: value={value}")
88
+
89
+ if first_parse_error is not None:
90
+ line_no, message = first_parse_error
91
+ print(f"first parse error at line {line_no}: {message}")
92
+
93
+ if mismatch_count == 0 and parse_error_count == 0:
94
+ print("result: PASS")
95
+ return 0
96
+
97
+ print("result: FAIL")
98
+ return 1
99
+
100
+
101
+ if __name__ == "__main__":
102
+ raise SystemExit(main())
check_reverse.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import logging
4
+ from collections import Counter
5
+
6
+ import numpy as np
7
+
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ def load_edges(npz_path: str) -> Counter:
13
+ """
14
+ Load indptr and indices from an .npz CSR file and
15
+ return a Counter of directed edges (u, v).
16
+ """
17
+ data = np.load(npz_path)
18
+ indptr = data["indptr"]
19
+ indices = data["indices"]
20
+ edges = Counter()
21
+ num_nodes = len(indptr) - 1
22
+
23
+ for u in range(num_nodes):
24
+ start, end = indptr[u], indptr[u + 1]
25
+ for v in indices[start:end]:
26
+ edges[(u, int(v))] += 1
27
+ return edges
28
+
29
+
30
+ def check_symmetry(edges: Counter) -> bool:
31
+ """
32
+ Return True if for every (u, v), count == count of (v, u).
33
+ """
34
+ return all(edges.get((v, u), 0) == cnt for (u, v), cnt in edges.items())
35
+
36
+
37
+ def main() -> None:
38
+ p = argparse.ArgumentParser(
39
+ description="Check if a CSR .npz was built with add_reverse=True"
40
+ )
41
+ p.add_argument(
42
+ "npz_file", help="Path to the .npz file (must contain 'indptr' and 'indices')"
43
+ )
44
+ args = p.parse_args()
45
+
46
+ logger.debug(f"Loading edges from {args.npz_file}...")
47
+ edges = load_edges(args.npz_file)
48
+ logger.debug(f"Total directed edges loaded: {sum(edges.values())}")
49
+
50
+ logger.debug("Checking symmetry of the edge set...")
51
+ if check_symmetry(edges):
52
+ logger.debug(
53
+ "✔ Every edge has a matching reverse: likely generated with add_reverse=True"
54
+ )
55
+ else:
56
+ logger.debug(
57
+ "✘ Not every edge is paired: likely generated with add_reverse=False"
58
+ )
59
+
60
+
61
+ if __name__ == "__main__":
62
+ main()
convert_pt_to_npy.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ import numpy as np
4
+ import torch
5
+
6
+
7
+ logger = logging.getLogger(__name__)
8
+
9
+
10
+ # Path to the source .pt file
11
+ src_path = "~/Projects/lei_pipe/DATA/WIKI/edge_features.pt"
12
+ # Auto-generate the output path: .../edge_features.npy
13
+ dst_path = src_path.replace(".pt", ".npy")
14
+
15
+ logger.info("Loading: %s ...", src_path)
16
+
17
+ # 1. Load PyTorch Tensor
18
+ # map_location='cpu' is critical:
19
+ # If the file was saved on GPU, this prevents it from consuming GPU memory on load,
20
+ # and also prevents errors in environments without a GPU.
21
+ try:
22
+ tensor_data = torch.load(src_path, map_location="cpu")
23
+ except Exception as e:
24
+ logger.info("Load failed: %s", e)
25
+ exit(1)
26
+
27
+ # 2. Check data type
28
+ if not isinstance(tensor_data, torch.Tensor):
29
+ logger.info("Warning: file contains %s, not a plain Tensor.", type(tensor_data))
30
+ # If it is a dict, manually extract the Tensor, e.g.:
31
+ # tensor_data = tensor_data['feat']
32
+
33
+ logger.info("Data shape: %s", tensor_data.shape)
34
+ logger.info("Data dtype: %s", tensor_data.dtype)
35
+
36
+ # 3. Convert to NumPy and save
37
+ logger.info("Converting to NumPy and saving to: %s ...", dst_path)
38
+ try:
39
+ # Detach first if the tensor requires grad
40
+ if tensor_data.requires_grad:
41
+ tensor_data = tensor_data.detach()
42
+
43
+ np_arr = tensor_data.numpy()
44
+ np.save(dst_path, np_arr)
45
+ logger.info("Conversion successful.")
46
+
47
+ # 4. Verification (only reads array header, does not load data)
48
+ logger.info("%s", "-" * 30)
49
+ logger.info("Verifying generated .npy file:")
50
+ check_arr = np.load(dst_path, mmap_mode="r")
51
+ logger.info("Npy Shape: %s", check_arr.shape)
52
+ logger.info("Npy Dtype: %s", check_arr.dtype)
53
+
54
+ except Exception as e:
55
+ logger.info("Save failed: %s", e)
dataset_defaults.sh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dataset-specific training defaults.
2
+ # Source this file from run scripts:
3
+ # source "$(dirname "${BASH_SOURCE[0]}")/DATA/dataset_defaults.sh"
4
+
5
+ # Return the default macro_batch_size (total across all GPUs) for a given dataset.
6
+ # Unknown datasets fall back to 2,000.
7
+ default_macro_batch_size() {
8
+ local dataset="${1^^}" # upper-case
9
+ case "$dataset" in
10
+ WIKI|WIKIPEDIA) echo 100 ;;
11
+ MOOC) echo 240 ;;
12
+ LASTFM) echo 800 ;;
13
+ REDDIT) echo 400 ;;
14
+ *) echo 400 ;;
15
+ esac
16
+ }
gen_full_graph.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import itertools
3
+ import os
4
+
5
+ import numpy as np
6
+ import pandas as pd
7
+ from tqdm import tqdm
8
+
9
+ parser = argparse.ArgumentParser()
10
+ parser.add_argument("--data", type=str, help="path to edges.csv")
11
+ parser.add_argument("--add_reverse", default=True, action="store_true")
12
+ parser.add_argument(
13
+ "--tqdm", action="store_true", default=True, help="enable tqdm progress bars"
14
+ )
15
+ args = parser.parse_args()
16
+
17
+ df = pd.read_csv(args.data, header=0)
18
+
19
+ if {"u", "i", "idx"}.issubset(df.columns):
20
+ src_col = "u"
21
+ dst_col = "i"
22
+ eid_col = "idx"
23
+ elif {"src", "dst", "eid"}.issubset(df.columns):
24
+ src_col = "src"
25
+ dst_col = "dst"
26
+ eid_col = "eid"
27
+ else:
28
+ raise ValueError(
29
+ "edges.csv must contain either {u, i, idx} or {src, dst, eid} columns. "
30
+ f"Got: {list(df.columns)}"
31
+ )
32
+
33
+ if "ts" not in df.columns:
34
+ raise ValueError(f"edges.csv must contain a ts column. Got: {list(df.columns)}")
35
+
36
+ num_nodes = max(int(df[src_col].max()), int(df[dst_col].max())) + 1
37
+ print("num_nodes: ", num_nodes)
38
+
39
+ full_graph_with_reverse_edges_indptr = np.zeros(num_nodes + 1, dtype=np.int64)
40
+ full_graph_with_reverse_edges_indices = [[] for _ in range(num_nodes)]
41
+ full_graph_with_reverse_edges_ts = [[] for _ in range(num_nodes)]
42
+ full_graph_with_reverse_edges_eid = [[] for _ in range(num_nodes)]
43
+
44
+ edge_iter = tqdm(df.iterrows(), total=len(df)) if args.tqdm else df.iterrows()
45
+ for idx, row in edge_iter:
46
+ src = int(row[src_col])
47
+ dst = int(row[dst_col])
48
+ ts = int(row["ts"])
49
+ eid_val = int(row[eid_col])
50
+ full_graph_with_reverse_edges_indices[src].append(dst)
51
+ full_graph_with_reverse_edges_ts[src].append(ts)
52
+ full_graph_with_reverse_edges_eid[src].append(eid_val)
53
+ if args.add_reverse:
54
+ full_graph_with_reverse_edges_indices[dst].append(src)
55
+ full_graph_with_reverse_edges_ts[dst].append(ts)
56
+ full_graph_with_reverse_edges_eid[dst].append(eid_val)
57
+
58
+ node_iter = tqdm(range(num_nodes)) if args.tqdm else range(num_nodes)
59
+ for i in node_iter:
60
+ full_graph_with_reverse_edges_indptr[i + 1] = full_graph_with_reverse_edges_indptr[
61
+ i
62
+ ] + len(full_graph_with_reverse_edges_indices[i])
63
+
64
+ full_graph_with_reverse_edges_indices = np.array(
65
+ list(itertools.chain(*full_graph_with_reverse_edges_indices)), dtype=np.int64
66
+ )
67
+ full_graph_with_reverse_edges_ts = np.array(
68
+ list(itertools.chain(*full_graph_with_reverse_edges_ts)), dtype=np.int64
69
+ )
70
+ full_graph_with_reverse_edges_eid = np.array(
71
+ list(itertools.chain(*full_graph_with_reverse_edges_eid)), dtype=np.int64
72
+ )
73
+
74
+ print("Sorting...")
75
+
76
+
77
+ def tsort(i, indptr, indices, t, eid):
78
+ beg = indptr[i]
79
+ end = indptr[i + 1]
80
+ local_indices = indices[beg:end]
81
+ local_t = t[beg:end]
82
+ local_eid = eid[beg:end]
83
+ # Impose a total order so ties on timestamp are deterministic across runs.
84
+ sidx = np.lexsort((local_indices, local_eid, local_t))
85
+ indices[beg:end] = local_indices[sidx]
86
+ t[beg:end] = local_t[sidx]
87
+ eid[beg:end] = local_eid[sidx]
88
+
89
+
90
+ sort_iter = (
91
+ tqdm(range(full_graph_with_reverse_edges_indptr.shape[0] - 1))
92
+ if args.tqdm
93
+ else range(full_graph_with_reverse_edges_indptr.shape[0] - 1)
94
+ )
95
+ for i in sort_iter:
96
+ tsort(
97
+ i,
98
+ full_graph_with_reverse_edges_indptr,
99
+ full_graph_with_reverse_edges_indices,
100
+ full_graph_with_reverse_edges_ts,
101
+ full_graph_with_reverse_edges_eid,
102
+ )
103
+
104
+ # import pdb; pdb.set_trace()
105
+ print("saving...")
106
+ output_dir = os.path.dirname(args.data)
107
+ np.savez(
108
+ os.path.join(
109
+ output_dir,
110
+ "full_graph_with_reverse_edge.npz"
111
+ if args.add_reverse
112
+ else "full_graph_no_reverse_edge.npz",
113
+ ),
114
+ indptr=full_graph_with_reverse_edges_indptr,
115
+ indices=full_graph_with_reverse_edges_indices,
116
+ ts=full_graph_with_reverse_edges_ts,
117
+ eid=full_graph_with_reverse_edges_eid,
118
+ )
generate_node_role_npy.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import argparse
4
+ import csv
5
+ from pathlib import Path
6
+
7
+ import numpy as np
8
+
9
+
10
+ ROOT_DIR = Path(__file__).resolve().parent.parent
11
+ DATA_DIR = ROOT_DIR / "DATA"
12
+ ROLE_CONFIG = {
13
+ "wikipedia": {
14
+ "num_nodes": 9228,
15
+ "user_range": (1, 8228),
16
+ "item_range": (8228, 9228),
17
+ },
18
+ "reddit": {
19
+ "num_nodes": 10985,
20
+ "user_range": (1, 10001),
21
+ "item_range": (10001, 10985),
22
+ },
23
+ "mooc": {
24
+ "num_nodes": 7145,
25
+ "user_range": (1, 7048),
26
+ "item_range": (7048, 7145),
27
+ },
28
+ "lastfm": {
29
+ "num_nodes": 1981,
30
+ "user_range": (1, 981),
31
+ "item_range": (981, 1981),
32
+ },
33
+ }
34
+
35
+
36
+ def resolve_dataset_dir(dataset_name: str) -> Path:
37
+ direct = DATA_DIR / dataset_name
38
+ if direct.is_dir():
39
+ return direct
40
+
41
+ upper = DATA_DIR / dataset_name.upper()
42
+ if upper.is_dir():
43
+ return upper
44
+
45
+ lower = DATA_DIR / dataset_name.lower()
46
+ if lower.is_dir():
47
+ return lower
48
+
49
+ for child in DATA_DIR.iterdir():
50
+ if child.is_dir() and child.name.lower() == dataset_name.lower():
51
+ return child
52
+
53
+ raise FileNotFoundError(f"missing dataset directory for {dataset_name} under {DATA_DIR}")
54
+
55
+
56
+ def log(message: str) -> None:
57
+ print(message, flush=True)
58
+
59
+
60
+ def dataset_csv_path(dataset_name: str) -> Path:
61
+ csv_path = resolve_dataset_dir(dataset_name) / "edges.csv"
62
+ if not csv_path.is_file():
63
+ raise FileNotFoundError(f"missing edges.csv for {dataset_name}: {csv_path}")
64
+ return csv_path
65
+
66
+
67
+ def validate_config(dataset_name: str, config: dict[str, tuple[int, int] | int]) -> None:
68
+ csv_path = dataset_csv_path(dataset_name)
69
+ with csv_path.open("r", encoding="utf-8", newline="") as handle:
70
+ reader = csv.DictReader(handle)
71
+ umin = umax = imin = imax = None
72
+ uset: set[int] = set()
73
+ iset: set[int] = set()
74
+ n_edges = 0
75
+ for row in reader:
76
+ u = int(float(row["src"]))
77
+ i = int(float(row["dst"]))
78
+ umin = u if umin is None else min(umin, u)
79
+ umax = u if umax is None else max(umax, u)
80
+ imin = i if imin is None else min(imin, i)
81
+ imax = i if imax is None else max(imax, i)
82
+ uset.add(u)
83
+ iset.add(i)
84
+ n_edges += 1
85
+
86
+ expected_user_lo, expected_user_hi = config["user_range"]
87
+ expected_item_lo, expected_item_hi = config["item_range"]
88
+ expected_num_nodes = int(config["num_nodes"])
89
+ inferred_num_nodes = max(int(umax), int(imax)) + 1
90
+
91
+ if (umin, umax + 1) != (expected_user_lo, expected_user_hi):
92
+ raise ValueError(
93
+ f"{dataset_name}: user id range mismatch, got [{umin}->{umax + 1}), "
94
+ f"expected [{expected_user_lo}->{expected_user_hi})"
95
+ )
96
+ if (imin, imax + 1) != (expected_item_lo, expected_item_hi):
97
+ raise ValueError(
98
+ f"{dataset_name}: item id range mismatch, got [{imin}->{imax + 1}), "
99
+ f"expected [{expected_item_lo}->{expected_item_hi})"
100
+ )
101
+ if inferred_num_nodes != expected_num_nodes:
102
+ raise ValueError(
103
+ f"{dataset_name}: num_nodes mismatch, got {inferred_num_nodes}, expected {expected_num_nodes}"
104
+ )
105
+ if len(uset) != expected_user_hi - expected_user_lo:
106
+ raise ValueError(f"{dataset_name}: unexpected user unique count {len(uset)}")
107
+ if len(iset) != expected_item_hi - expected_item_lo:
108
+ raise ValueError(f"{dataset_name}: unexpected item unique count {len(iset)}")
109
+
110
+ log(
111
+ f"validated {dataset_name}: edges={n_edges}, "
112
+ f"user_range=[{expected_user_lo}->{expected_user_hi}), "
113
+ f"item_range=[{expected_item_lo}->{expected_item_hi}), "
114
+ f"num_nodes={expected_num_nodes}"
115
+ )
116
+
117
+
118
+ def generate_node_role(dataset_name: str, config: dict[str, tuple[int, int] | int]) -> Path:
119
+ validate_config(dataset_name, config)
120
+
121
+ num_nodes = int(config["num_nodes"])
122
+ item_lo, item_hi = config["item_range"]
123
+ arr = np.zeros((num_nodes, 1), dtype=np.bool_)
124
+ arr[item_lo:item_hi, 0] = True
125
+
126
+ dataset_dir = resolve_dataset_dir(dataset_name)
127
+ dst_path = dataset_dir / "node_role.npy"
128
+ np.save(dst_path, arr)
129
+ log(
130
+ f"wrote {dst_path} shape={arr.shape} dtype={arr.dtype} "
131
+ f"user=False for [0->{item_lo}), item=True for [{item_lo}->{item_hi})"
132
+ )
133
+ return dst_path
134
+
135
+
136
+ def parse_args() -> argparse.Namespace:
137
+ parser = argparse.ArgumentParser(
138
+ description="Generate bool node_role.npy for bipartite datasets in DATA/<dataset>/."
139
+ )
140
+ parser.add_argument(
141
+ "--dataset",
142
+ action="append",
143
+ choices=sorted(ROLE_CONFIG),
144
+ help="dataset name to generate; repeatable. Defaults to all supported datasets.",
145
+ )
146
+ return parser.parse_args()
147
+
148
+
149
+ def main() -> int:
150
+ args = parse_args()
151
+ datasets = args.dataset or list(ROLE_CONFIG)
152
+ for dataset_name in datasets:
153
+ generate_node_role(dataset_name, ROLE_CONFIG[dataset_name])
154
+ return 0
155
+
156
+
157
+ if __name__ == "__main__":
158
+ raise SystemExit(main())