File size: 4,787 Bytes
73d6f53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4c92666
73d6f53
 
 
96a607b
 
 
4c92666
 
73d6f53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# isonetpp_loader.py
from __future__ import annotations
import os
import pickle
from typing import Literal, Optional, Dict
from huggingface_hub import hf_hub_download

try:
    from subiso_dataset import SubgraphIsomorphismDataset, TRAIN_MODE, VAL_MODE, TEST_MODE, BROAD_TEST_MODE, GMN_DATA_TYPE, PYG_DATA_TYPE
except Exception as e:
    raise ImportError(
        "Make sure `subiso_dataset.py` (with SubgraphIsomorphismDataset) is in the same repo.\n"
        f"Import error: {e}"
    )

Mode = Literal["train", "val", "test", "Extra_test_300"]
Size = Literal["small", "large"]
Name = Literal["aids240k", "mutag240k", "ptc_fm240k", "ptc_fr240k", "ptc_mm240k", "ptc_mr240k"]

def _mode_prefix(mode: str) -> str:
    # Your file naming uses "test" prefix for Extra_test_300 as well
    return "test" if "test" in mode.lower() else mode

def _pair_count(dataset_size: Size) -> str:
    return "80k" if dataset_size == "small" else "240k"

def _ensure_paths(
    repo_id: str,
    mode: Mode,
    dataset_name: Name,
    dataset_size: Size,
    local_root: Optional[str] = None,
) -> Dict[str, str]:
    """
    Download the three files needed for a given split into local cache (or local_root if set):
      - <mode>_<name><pairs>_query_subgraphs.pkl
      - <mode>_<name><pairs>_rel_nx_is_subgraph_iso.pkl
      - <name><pairs>_corpus_subgraphs.pkl  (lives next to splits in our layout under `corpus/`)
    Returns local file paths.
    """
    prefix = _mode_prefix(mode)
    pairs = _pair_count(dataset_size)

    # Expected layout in your dataset repo:
    #  corpus/<name>_corpus_subgraphs.pkl
    #  splits/<mode>/<mode>_<name>_query_subgraphs.pkl
    #  splits/<mode>/<mode>_<name>_rel_nx_is_subgraph_iso.pkl
    query_fname = f"{prefix}_{dataset_name}_{'query_subgraphs' if '_' in dataset_name else 'query_subgraphs'}.pkl"
    rel_fname   = f"{prefix}_{dataset_name}_{'rel_nx_is_subgraph_iso' if '_' in dataset_name else 'rel_nx_is_subgraph_iso'}.pkl"
    pairs = "80k" if dataset_size == "small" else "240k"

    # Your actual saved names were like: train_aids240k_query_subgraphs.pkl (without extra underscore)
    # So fix the minor formatting exactly:
    query_fname  = f"{size_folder}/{prefix}_{dataset_name}{pairs}_query_subgraphs.pkl"
    rel_fname    = f"{size_folder}/{prefix}_{dataset_name}{pairs}_rel_nx_is_subgraph_iso.pkl"
    corpus_fname = f"{size_folder}/{dataset_name}{pairs}_corpus_subgraphs.pkl"



    # Where files are in repo
    repo_query_path  = f"splits/{mode}/{query_fname}"
    repo_rel_path    = f"splits/{mode}/{rel_fname}"
    repo_corpus_path = f"corpus/{corpus_fname}"

    # Download to cache (or local_root if provided)
    kwargs = dict(repo_id=repo_id, repo_type="dataset")
    query_path = hf_hub_download(filename=repo_query_path, **kwargs, local_dir=local_root, local_dir_use_symlinks=False)
    rel_path   = hf_hub_download(filename=repo_rel_path,   **kwargs, local_dir=local_root, local_dir_use_symlinks=False)
    corpus_path= hf_hub_download(filename=repo_corpus_path,**kwargs, local_dir=local_root, local_dir_use_symlinks=False)

    return {"query": query_path, "rel": rel_path, "corpus": corpus_path}

def load_isonetpp_benchmark(
    repo_id: str = "structlearning/isonetpp-benchmark",
    mode: Mode = "train",
    dataset_name: Name = "aids240k",
    dataset_size: Size = "large",
    batch_size: int = 128,
    data_type: str = "pyg",
    device: Optional[str] = None,
    download_root: Optional[str] = None,
):
    """
    Returns: an initialized SubgraphIsomorphismDataset with files downloaded from the HF Hub.
    """
    # Map to your class constants
    mode_map = {
        "train": TRAIN_MODE, "val": VAL_MODE, "test": TEST_MODE, "extra_test_300": BROAD_TEST_MODE, "Extra_test_300": BROAD_TEST_MODE
    }
    mode_norm = mode_map.get(mode, mode)

    paths = _ensure_paths(
        repo_id=repo_id,
        mode=mode_norm,
        dataset_name=dataset_name,
        dataset_size=dataset_size,
        local_root=download_root,
    )

    # Your class expects dataset_base_path + "splits/<mode>/..." and "corpus/..."
    # We'll set dataset_base_path to the parent of the downloaded structure and override "dataset_path_override"
    base_path = os.path.dirname(os.path.dirname(paths["query"]))  # points to .../splits/
    dataset_base_path = os.path.dirname(base_path)               # parent folder containing `splits` and `corpus`

    dataset_config = dict(
        mode=mode_norm,
        dataset_name=dataset_name,
        dataset_size=dataset_size,
        batch_size=batch_size,
        data_type=data_type,
        dataset_base_path=dataset_base_path,
        experiment=None,
        dataset_path_override=None,
        device=device,
    )

    ds = SubgraphIsomorphismDataset(**dataset_config)
    return ds