File size: 6,903 Bytes
6d1bbc7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
#!/usr/bin/env python3
"""Prepare additional M1 datasets for Exp 1 and Exp 4.

Exp 1 (NegBioDB vs. random negatives):
  - negbiodb_m1_uniform_random.parquet   — untested pairs, uniform sampling
  - negbiodb_m1_degree_matched.parquet   — untested pairs, degree-distribution matched

Exp 4 (Node degree bias — DDB split):
  - negbiodb_m1_balanced_ddb.parquet     — M1 balanced + split_degree_balanced column

Usage:
    uv run python scripts/prepare_exp_data.py
    uv run python scripts/prepare_exp_data.py --data-dir exports/ --db data/negbiodb.db

Prerequisite:
    - exports/negbiodb_m1_balanced.parquet (from export_ml_dataset.py)
    - exports/chembl_positives_pchembl6.parquet
    - data/negbiodb.db (Exp 1 only)
"""

from __future__ import annotations

import argparse
import logging
import sys
from pathlib import Path

import pandas as pd

logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s %(levelname)s %(message)s",
    datefmt="%H:%M:%S",
)
logger = logging.getLogger(__name__)

# Root of the project
ROOT = Path(__file__).parent.parent


def prepare_exp1_uniform(
    db_path: Path, positives: pd.DataFrame, output_dir: Path, seed: int = 42
) -> Path:
    """Generate M1 dataset with uniform random negatives (Exp 1 control A)."""
    from negbiodb.export import generate_uniform_random_negatives

    logger.info("Generating uniform random negatives (Exp 1 control A)...")
    result = generate_uniform_random_negatives(
        negbiodb_path=db_path,
        positives=positives,
        n_samples=len(positives),   # 1:1 balanced
        output_dir=output_dir,
        seed=seed,
    )
    out_path = Path(result["path"])
    logger.info(
        "Saved %d rows to %s (pos=%d, neg=%d)",
        result["total"],
        out_path.name,
        result["n_pos"],
        result["n_neg"],
    )
    return out_path


def prepare_exp1_degree_matched(
    db_path: Path, positives: pd.DataFrame, output_dir: Path, seed: int = 42
) -> Path:
    """Generate M1 dataset with degree-matched random negatives (Exp 1 control B)."""
    from negbiodb.export import generate_degree_matched_negatives

    logger.info("Generating degree-matched random negatives (Exp 1 control B)...")
    result = generate_degree_matched_negatives(
        negbiodb_path=db_path,
        positives=positives,
        n_samples=len(positives),   # 1:1 balanced
        output_dir=output_dir,
        seed=seed,
    )
    out_path = Path(result["path"])
    logger.info(
        "Saved %d rows to %s (pos=%d, neg=%d)",
        result["total"],
        out_path.name,
        result["n_pos"],
        result["n_neg"],
    )
    return out_path


def prepare_exp4_ddb(
    m1_path: Path, output_dir: Path, seed: int = 42
) -> Path:
    """Add full-task split_degree_balanced column to M1 balanced for Exp 4.

    The DDB split is computed on the merged M1 benchmark graph itself so both
    positives and negatives are assigned under the same degree-balanced policy.
    """
    from negbiodb.export import add_degree_balanced_split

    logger.info("Building M1 balanced + DDB split (Exp 4)...")

    m1 = pd.read_parquet(m1_path)
    logger.info("M1 balanced loaded: %d rows", len(m1))
    m1_ddb = add_degree_balanced_split(m1, seed=seed)

    logger.info(
        "DDB split distribution (all rows): %s",
        m1_ddb["split_degree_balanced"].value_counts().to_dict(),
    )
    by_label_same = (
        m1_ddb.assign(same_as_random=m1_ddb["split_degree_balanced"] == m1_ddb["split_random"])
        .groupby("Y", dropna=False)["same_as_random"]
        .mean()
        .to_dict()
    )
    logger.info("Fraction matching split_random by label: %s", by_label_same)

    out_path = output_dir / "negbiodb_m1_balanced_ddb.parquet"
    m1_ddb.to_parquet(out_path, index=False)
    logger.info("Saved %d rows to %s", len(m1_ddb), out_path.name)
    return out_path


def verify_schema(path: Path, reference_path: Path) -> None:
    """Check that output schema matches the reference M1 file."""
    import pyarrow.parquet as pq
    ref_cols = set(pq.read_schema(reference_path).names)
    out_cols = set(pq.read_schema(path).names)
    extra = out_cols - ref_cols
    missing = ref_cols - out_cols
    if extra or missing:
        logger.warning(
            "%s schema differs from reference — extra: %s, missing: %s",
            path.name,
            extra,
            missing,
        )
    else:
        logger.info("%s schema matches reference M1 ✓", path.name)


def main(argv: list[str] | None = None) -> int:
    parser = argparse.ArgumentParser(description="Prepare Exp 1 and Exp 4 datasets")
    parser.add_argument(
        "--data-dir", type=Path, default=ROOT / "exports",
        help="Directory containing M1 parquets (default: exports/)"
    )
    parser.add_argument(
        "--db", type=Path, default=ROOT / "data" / "negbiodb.db",
        help="Path to negbiodb.db SQLite file"
    )
    parser.add_argument(
        "--seed", type=int, default=42,
        help="Random seed for negative sampling (default: 42)"
    )
    parser.add_argument(
        "--skip-exp1", action="store_true",
        help="Skip Exp 1 random negative generation (slow, ~30 min)"
    )
    parser.add_argument(
        "--skip-exp4", action="store_true",
        help="Skip Exp 4 DDB split preparation"
    )
    args = parser.parse_args(argv)

    data_dir: Path = args.data_dir
    db_path: Path = args.db

    # Verify inputs exist
    m1_balanced = data_dir / "negbiodb_m1_balanced.parquet"
    positives_path = data_dir / "chembl_positives_pchembl6.parquet"

    required_inputs = [m1_balanced]
    if not args.skip_exp1:
        required_inputs.append(positives_path)

    for p in required_inputs:
        if not p.exists():
            logger.error("Required file missing: %s", p)
            return 1

    if not args.skip_exp1 and not db_path.exists():
        logger.error("Database not found: %s", db_path)
        return 1

    logger.info("Using data_dir=%s, db=%s, seed=%d", data_dir, db_path, args.seed)

    # --- Exp 1: random negatives ----------------------------------------
    if not args.skip_exp1:
        positives = pd.read_parquet(positives_path)
        logger.info("Loaded %d ChEMBL positives", len(positives))

        out_uniform = prepare_exp1_uniform(db_path, positives, data_dir, args.seed)
        verify_schema(out_uniform, m1_balanced)

        out_deg = prepare_exp1_degree_matched(db_path, positives, data_dir, args.seed)
        verify_schema(out_deg, m1_balanced)
    else:
        logger.info("Skipping Exp 1 (--skip-exp1)")

    # --- Exp 4: DDB split -----------------------------------------------
    if not args.skip_exp4:
        out_ddb = prepare_exp4_ddb(m1_balanced, data_dir, args.seed)
    else:
        logger.info("Skipping Exp 4 (--skip-exp4)")

    logger.info("Done.")
    return 0


if __name__ == "__main__":
    sys.exit(main())