File size: 5,115 Bytes
73d6f53 f927803 73d6f53 f927803 8cb794e f927803 73d6f53 7018700 73d6f53 7018700 f927803 73d6f53 7018700 703037f 7018700 703037f 7018700 703037f 7018700 73d6f53 f927803 703037f 7018700 73d6f53 7018700 703037f 7018700 703037f 8cb794e 73d6f53 8cb794e 7018700 4c92666 8cb794e f927803 4c92666 7018700 73d6f53 f927803 73d6f53 7018700 73d6f53 7018700 73d6f53 8cb794e 73d6f53 8cb794e 73d6f53 7018700 8cb794e 73d6f53 7018700 73d6f53 703037f 73d6f53 b18544b 73d6f53 703037f 8cb794e 73d6f53 f927803 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
# isonetpp_loader.py
from __future__ import annotations
import os
from typing import Optional, Dict
from huggingface_hub import hf_hub_download
from subiso_dataset import (
SubgraphIsomorphismDataset,
# TRAIN_MODE, VAL_MODE, TEST_MODE, BROAD_TEST_MODE
TRAIN_MODE, VAL_MODE, TEST_MODE
)
# ----------------------------
# Helpers
# ----------------------------
def _pairs_for_size(dataset_size: str) -> str:
return "80k" if dataset_size == "small" else "240k"
def _folder_for_size(dataset_size: str) -> str:
return "small_dataset" if dataset_size == "small" else "large_dataset"
def _normalize_name(base_name: str, dataset_size: str) -> str:
"""
Accepts 'aids' or 'aids240k' (and similarly for other sets).
If bare name -> append pairs; if already has 80k/240k -> keep as-is.
"""
pairs = _pairs_for_size(dataset_size)
if base_name.endswith(("80k", "240k")):
return base_name
return f"{base_name}{pairs}"
def _mode_prefix_and_dir(mode: str) -> tuple[str, str]:
"""
File prefix uses 'test' when mode contains 'test' (repo convention).
Directory has train/val/test. Map Extra_test_300 => 'test'.
"""
prefix = "test" if "test" in mode.lower() else mode
mode_dir = "test" if "test" in mode.lower() else mode
return prefix, mode_dir
# ----------------------------
# Path resolution + downloads
# ----------------------------
def _ensure_paths(
repo_id: str,
mode: str,
dataset_name: str, # 'aids' or 'aids240k'
dataset_size: str, # 'small' | 'large'
local_root: Optional[str] = None,
) -> Dict[str, str]:
"""
Download the three files needed into cache (or local_root if set):
- large_dataset/splits/<mode_dir>/<prefix>_<base>_query_subgraphs.pkl
- large_dataset/splits/<mode_dir>/<prefix>_<base>_rel_nx_is_subgraph_iso.pkl
- large_dataset/corpus/<base>_corpus_subgraphs.pkl
where <base> is normalized (contains 80k/240k exactly once).
"""
folder = _folder_for_size(dataset_size) # "large_dataset" or "small_dataset"
base = _normalize_name(dataset_name, dataset_size) # e.g., "aids240k"
# prefix, mode_dir = _mode_prefix_and_dir(mode)
query_fname = f"{mode}_{base}_query_subgraphs.pkl"
rel_fname = f"{mode}_{base}_rel_nx_is_subgraph_iso.pkl"
corpus_fname = f"{base}_corpus_subgraphs.pkl"
repo_query_path = f"{folder}/splits/{mode}/{query_fname}"
repo_rel_path = f"{folder}/splits/{mode}/{rel_fname}"
repo_corpus_path = f"{folder}/corpus/{corpus_fname}"
kwargs = dict(
repo_id=repo_id,
repo_type="dataset",
local_dir=local_root,
local_dir_use_symlinks=False,
)
query_path = hf_hub_download(filename=repo_query_path, **kwargs)
rel_path = hf_hub_download(filename=repo_rel_path, **kwargs)
corpus_path = hf_hub_download(filename=repo_corpus_path, **kwargs)
return {"query": query_path, "rel": rel_path, "corpus": corpus_path}
# ----------------------------
# Public entrypoint
# ----------------------------
def load_isonetpp_benchmark(
repo_id: str = "structlearning/isonetpp-benchmark",
mode: str = "train", # "train" | "val" | "test" | "Extra_test_300"
dataset_name: str = "aids", # "aids" or "aids240k" (same for mutag/ptc_*)
dataset_size: str = "large", # "small" | "large"
batch_size: int = 128,
data_type: str = "gmn", # "pyg" or "gmn"
device: Optional[str] = None,
download_root: Optional[str] = None,
):
# Map to class constantss
mode_map = {
"train": TRAIN_MODE,
"val": VAL_MODE,
"test": TEST_MODE,
# "extra_test_300": BROAD_TEST_MODE,
# "Extra_test_300": BROAD_TEST_MODE,
}
mode_norm = mode_map.get(mode, mode)
paths = _ensure_paths(
repo_id=repo_id,
mode=mode_norm,
dataset_name=dataset_name,
dataset_size=dataset_size,
local_root=download_root,
)
# paths["query"] = .../<folder>/splits/<mode_dir>/<file>
# We want dataset_base_path to be the **parent of <folder>** so that:
# dataset_base_path / dataset_path_override / splits/<mode>/... exists
# Compute levels carefully:
# file_dir = .../<folder>/splits/<mode_dir>
# splits_dir = .../<folder>/splits
# folder_dir = .../<folder>
# parent_dir = parent of <folder>
file_dir = os.path.dirname(paths["query"])
splits_dir = os.path.dirname(file_dir)
folder_dir = os.path.dirname(splits_dir)
parent_dir = os.path.dirname(folder_dir) # <-- this is the correct dataset_base_path
dataset_config = dict(
mode=mode_norm,
dataset_name=dataset_name,
dataset_size=dataset_size,
batch_size=batch_size,
data_type=data_type,
dataset_base_path=parent_dir, # parent of <folder>
dataset_path_override=None,# _folder_for_size(dataset_size), # "large_dataset"/"small_dataset"
experiment=None,
device=device,
)
return SubgraphIsomorphismDataset(**dataset_config)
|