isonetpp-benchmark / isonetpp_loader.py
indraroy
path fixes
96a607b
raw
history blame
4.79 kB
# isonetpp_loader.py
from __future__ import annotations
import os
import pickle
from typing import Literal, Optional, Dict
from huggingface_hub import hf_hub_download
try:
from subiso_dataset import SubgraphIsomorphismDataset, TRAIN_MODE, VAL_MODE, TEST_MODE, BROAD_TEST_MODE, GMN_DATA_TYPE, PYG_DATA_TYPE
except Exception as e:
raise ImportError(
"Make sure `subiso_dataset.py` (with SubgraphIsomorphismDataset) is in the same repo.\n"
f"Import error: {e}"
)
Mode = Literal["train", "val", "test", "Extra_test_300"]
Size = Literal["small", "large"]
Name = Literal["aids240k", "mutag240k", "ptc_fm240k", "ptc_fr240k", "ptc_mm240k", "ptc_mr240k"]
def _mode_prefix(mode: str) -> str:
# Your file naming uses "test" prefix for Extra_test_300 as well
return "test" if "test" in mode.lower() else mode
def _pair_count(dataset_size: Size) -> str:
return "80k" if dataset_size == "small" else "240k"
def _ensure_paths(
repo_id: str,
mode: Mode,
dataset_name: Name,
dataset_size: Size,
local_root: Optional[str] = None,
) -> Dict[str, str]:
"""
Download the three files needed for a given split into local cache (or local_root if set):
- <mode>_<name><pairs>_query_subgraphs.pkl
- <mode>_<name><pairs>_rel_nx_is_subgraph_iso.pkl
- <name><pairs>_corpus_subgraphs.pkl (lives next to splits in our layout under `corpus/`)
Returns local file paths.
"""
prefix = _mode_prefix(mode)
pairs = _pair_count(dataset_size)
# Expected layout in your dataset repo:
# corpus/<name>_corpus_subgraphs.pkl
# splits/<mode>/<mode>_<name>_query_subgraphs.pkl
# splits/<mode>/<mode>_<name>_rel_nx_is_subgraph_iso.pkl
query_fname = f"{prefix}_{dataset_name}_{'query_subgraphs' if '_' in dataset_name else 'query_subgraphs'}.pkl"
rel_fname = f"{prefix}_{dataset_name}_{'rel_nx_is_subgraph_iso' if '_' in dataset_name else 'rel_nx_is_subgraph_iso'}.pkl"
pairs = "80k" if dataset_size == "small" else "240k"
# Your actual saved names were like: train_aids240k_query_subgraphs.pkl (without extra underscore)
# So fix the minor formatting exactly:
query_fname = f"{size_folder}/{prefix}_{dataset_name}{pairs}_query_subgraphs.pkl"
rel_fname = f"{size_folder}/{prefix}_{dataset_name}{pairs}_rel_nx_is_subgraph_iso.pkl"
corpus_fname = f"{size_folder}/{dataset_name}{pairs}_corpus_subgraphs.pkl"
# Where files are in repo
repo_query_path = f"splits/{mode}/{query_fname}"
repo_rel_path = f"splits/{mode}/{rel_fname}"
repo_corpus_path = f"corpus/{corpus_fname}"
# Download to cache (or local_root if provided)
kwargs = dict(repo_id=repo_id, repo_type="dataset")
query_path = hf_hub_download(filename=repo_query_path, **kwargs, local_dir=local_root, local_dir_use_symlinks=False)
rel_path = hf_hub_download(filename=repo_rel_path, **kwargs, local_dir=local_root, local_dir_use_symlinks=False)
corpus_path= hf_hub_download(filename=repo_corpus_path,**kwargs, local_dir=local_root, local_dir_use_symlinks=False)
return {"query": query_path, "rel": rel_path, "corpus": corpus_path}
def load_isonetpp_benchmark(
repo_id: str = "structlearning/isonetpp-benchmark",
mode: Mode = "train",
dataset_name: Name = "aids240k",
dataset_size: Size = "large",
batch_size: int = 128,
data_type: str = "pyg",
device: Optional[str] = None,
download_root: Optional[str] = None,
):
"""
Returns: an initialized SubgraphIsomorphismDataset with files downloaded from the HF Hub.
"""
# Map to your class constants
mode_map = {
"train": TRAIN_MODE, "val": VAL_MODE, "test": TEST_MODE, "extra_test_300": BROAD_TEST_MODE, "Extra_test_300": BROAD_TEST_MODE
}
mode_norm = mode_map.get(mode, mode)
paths = _ensure_paths(
repo_id=repo_id,
mode=mode_norm,
dataset_name=dataset_name,
dataset_size=dataset_size,
local_root=download_root,
)
# Your class expects dataset_base_path + "splits/<mode>/..." and "corpus/..."
# We'll set dataset_base_path to the parent of the downloaded structure and override "dataset_path_override"
base_path = os.path.dirname(os.path.dirname(paths["query"])) # points to .../splits/
dataset_base_path = os.path.dirname(base_path) # parent folder containing `splits` and `corpus`
dataset_config = dict(
mode=mode_norm,
dataset_name=dataset_name,
dataset_size=dataset_size,
batch_size=batch_size,
data_type=data_type,
dataset_base_path=dataset_base_path,
experiment=None,
dataset_path_override=None,
device=device,
)
ds = SubgraphIsomorphismDataset(**dataset_config)
return ds