isonetpp-benchmark / isonetpp_loader.py
indraroy
Fix loader path for HF dataset
f927803
raw
history blame
3.36 kB
# isonetpp_loader.py
from __future__ import annotations
import os
from typing import Optional, Dict
from huggingface_hub import hf_hub_download
from subiso_dataset import (
SubgraphIsomorphismDataset,
TRAIN_MODE, VAL_MODE, TEST_MODE, BROAD_TEST_MODE,
GMN_DATA_TYPE, PYG_DATA_TYPE
)
# Normalize names users pass ("aids" or "aids240k" → stored names are aids240k)
def _normalize_name(name: str) -> str:
if name.endswith("240k") or name.endswith("80k"):
return name
# assume large dataset default = 240k
return name + "240k"
def _folder(dataset_size: str) -> str:
return "small_dataset" if dataset_size == "small" else "large_dataset"
def _ensure_paths(
repo_id: str,
mode: str,
dataset_name: str,
dataset_size: str,
local_root: Optional[str] = None,
) -> Dict[str, str]:
dataset_name = _normalize_name(dataset_name)
folder = _folder(dataset_size) # "large_dataset" or "small_dataset"
prefix = "test" if "test" in mode.lower() else mode
pairs = "80k" if dataset_size == "small" else "240k"
query_fname = f"{prefix}_{dataset_name}{pairs}_query_subgraphs.pkl"
rel_fname = f"{prefix}_{dataset_name}{pairs}_rel_nx_is_subgraph_iso.pkl"
corpus_fname = f"{dataset_name}{pairs}_corpus_subgraphs.pkl"
repo_query_path = f"{folder}/splits/{mode}/{query_fname}"
repo_rel_path = f"{folder}/splits/{mode}/{rel_fname}"
repo_corpus_path = f"{folder}/corpus/{corpus_fname}"
kwargs = dict(repo_id=repo_id, repo_type="dataset", local_dir=local_root, local_dir_use_symlinks=False)
query_path = hf_hub_download(filename=repo_query_path, **kwargs)
rel_path = hf_hub_download(filename=repo_rel_path, **kwargs)
corpus_path = hf_hub_download(filename=repo_corpus_path, **kwargs)
return {"query": query_path, "rel": rel_path, "corpus": corpus_path}
def load_isonetpp_benchmark(
repo_id: str = "structlearning/isonetpp-benchmark",
mode: str = "train",
dataset_name: str = "aids",
dataset_size: str = "large",
batch_size: int = 128,
data_type: str = "pyg",
device: Optional[str] = None,
download_root: Optional[str] = None,
):
mode_map = {
"train": TRAIN_MODE, "val": VAL_MODE, "test": TEST_MODE,
"extra_test_300": BROAD_TEST_MODE, "Extra_test_300": BROAD_TEST_MODE
}
mode_norm = mode_map.get(mode, mode)
paths = _ensure_paths(
repo_id=repo_id,
mode=mode_norm,
dataset_name=dataset_name,
dataset_size=dataset_size,
local_root=download_root
)
# The downloaded structure is:
# <cache>/.../<folder>/splits/<mode>/<files>
# <cache>/.../<folder>/corpus/<files>
#
# So dataset_base_path = parent of <folder>
base_path = os.path.dirname(os.path.dirname(paths["query"])) # .../<folder>/splits
dataset_base_path = os.path.dirname(base_path) # .../<folder>
dataset_config = dict(
mode=mode_norm,
dataset_name=_normalize_name(dataset_name),
dataset_size=dataset_size,
batch_size=batch_size,
data_type=data_type,
dataset_base_path=dataset_base_path,
dataset_path_override=_folder(dataset_size), # 🟢 critical fix
experiment=None,
device=device,
)
return SubgraphIsomorphismDataset(**dataset_config)