| """Build VenusX Fragment MCQ datasets. |
| |
| For each sub-task (Act, BindI, Dom, Evo, Motif) and each split |
| (train/validation/test), converts VenusX_Frag_{ann}_MF50 samples into |
| 4-choice MCQ with: |
| - 1 golden IPR ID (the original interpro_id) |
| - 3 distractors via 2-tier fallback: |
| Tier 1: InterPro hierarchy siblings (same parent, not ancestor/descendant) |
| Tier 2: Random from same sub-task label pool (exclude golden + picked) |
| |
| Each option carries a human-readable name from InterPro entry.list. |
| |
| Letter assignment (A/B/C/D) is randomized via a deterministic per-sample seed. |
| |
| Output: per sub-task, 3 HF-format parquet files (train/val/test) saved under |
| tmp/mcq_build/output/{ann}_MF50_MCQ4/ |
| train.parquet |
| validation.parquet |
| test.parquet |
| """ |
|
|
| from __future__ import annotations |
|
|
| import json |
| import random |
| from pathlib import Path |
| from typing import Any |
|
|
| from datasets import Dataset, DatasetDict, load_dataset |
|
|
| HERE = Path(__file__).parent |
| CACHE_FILE = HERE / "interpro_cache.json" |
| POOL_FILE = HERE / "label_pool.json" |
| OUT_DIR = HERE / "output" |
|
|
| ANNOTATIONS = ["Act", "BindI", "Dom", "Evo", "Motif"] |
| SPLIT_STRATEGY = "MF50" |
| N_OPTIONS = 4 |
| LETTERS = "ABCD" |
| NO_DESC = "(no description available)" |
|
|
|
|
| def _ancestors(ipr: str, parent: dict[str, str]) -> set[str]: |
| """Return all ancestors (parent, grandparent, ...) of an IPR.""" |
| out = set() |
| cur = ipr |
| while cur in parent: |
| cur = parent[cur] |
| if cur in out: |
| break |
| out.add(cur) |
| return out |
|
|
|
|
| def _descendants(ipr: str, children: dict[str, list[str]]) -> set[str]: |
| """Return all descendants (child, grandchild, ...) of an IPR.""" |
| out: set[str] = set() |
| stack = list(children.get(ipr, [])) |
| while stack: |
| cur = stack.pop() |
| if cur in out: |
| continue |
| out.add(cur) |
| stack.extend(children.get(cur, [])) |
| return out |
|
|
|
|
| def _siblings( |
| ipr: str, parent: dict[str, str], children: dict[str, list[str]] |
| ) -> list[str]: |
| """Return true siblings of an IPR (same parent, not self). |
| |
| Returns empty list if IPR has no parent (is root or not in hierarchy). |
| """ |
| par = parent.get(ipr) |
| if par is None: |
| return [] |
| return [c for c in children.get(par, []) if c != ipr] |
|
|
|
|
| def _description(ipr: str, entries: dict[str, dict[str, str]]) -> str: |
| """Get human-readable name for an IPR, or fallback.""" |
| rec = entries.get(ipr) |
| if rec is None: |
| return NO_DESC |
| return rec.get("name", NO_DESC) or NO_DESC |
|
|
|
|
| def pick_distractors( |
| golden: str, |
| pool: list[str], |
| cache: dict[str, Any], |
| rng: random.Random, |
| ) -> tuple[list[str], str]: |
| """Pick 3 distractors via 2-tier fallback. |
| |
| Returns (distractors, source) where source ∈ {"hierarchy", "pool", "mixed"}. |
| """ |
| parent = cache["parent"] |
| children = cache["children"] |
|
|
| |
| excluded = {golden} |
| excluded |= _ancestors(golden, parent) |
| excluded |= _descendants(golden, children) |
|
|
| |
| tier1_candidates = [s for s in _siblings(golden, parent, children) if s not in excluded] |
| rng.shuffle(tier1_candidates) |
| tier1 = tier1_candidates[: N_OPTIONS - 1] |
|
|
| |
| used = excluded | set(tier1) |
| pool_candidates = [x for x in pool if x not in used] |
| need = (N_OPTIONS - 1) - len(tier1) |
| rng.shuffle(pool_candidates) |
| tier2 = pool_candidates[:need] |
|
|
| distractors = tier1 + tier2 |
| if len(distractors) < N_OPTIONS - 1: |
| raise RuntimeError( |
| f"Cannot pick {N_OPTIONS - 1} distractors for {golden}: " |
| f"tier1={len(tier1)}, tier2={len(tier2)}, pool_size={len(pool)}" |
| ) |
|
|
| if tier1 and tier2: |
| source = "mixed" |
| elif tier1: |
| source = "hierarchy" |
| else: |
| source = "pool" |
| return distractors, source |
|
|
|
|
| def build_mcq_sample( |
| raw: dict[str, Any], |
| annotation: str, |
| split: str, |
| pool: list[str], |
| cache: dict[str, Any], |
| ) -> dict[str, Any]: |
| """Build one MCQ sample from one VenusX raw sample.""" |
| uid = str(raw.get("uid", "")) |
| golden = str(raw["interpro_id"]) |
| seq = str(raw["seq_fragment"]) |
| label = int(raw["interpro_label"]) |
|
|
| seed = f"venusx-mcq-v1:{annotation}:{split}:{uid}:{golden}" |
| rng = random.Random(seed) |
|
|
| distractors, source = pick_distractors(golden, pool, cache, rng) |
|
|
| options = [golden] + distractors |
| rng.shuffle(options) |
|
|
| correct_idx = options.index(golden) |
| entries = cache["entries"] |
|
|
| result = { |
| "uid": uid, |
| "seq_fragment": seq, |
| "annotation": annotation, |
| "interpro_label": label, |
| "correct_ipr": golden, |
| "correct_letter": LETTERS[correct_idx], |
| "distractor_source": source, |
| } |
| for i, letter in enumerate(LETTERS): |
| result[f"option_{letter.lower()}_ipr"] = options[i] |
| result[f"option_{letter.lower()}_desc"] = _description(options[i], entries) |
| return result |
|
|
|
|
| def build_sub_task( |
| annotation: str, |
| pool: list[str], |
| cache: dict[str, Any], |
| ) -> DatasetDict: |
| """Build all splits for one sub-task.""" |
| hf_id = f"AI4Protein/VenusX_Frag_{annotation}_{SPLIT_STRATEGY}" |
| print(f"\n=== Building MCQ for {annotation} ({hf_id}) ===", flush=True) |
| raw_ds = load_dataset(hf_id) |
|
|
| split_stats: dict[str, dict[str, int]] = {} |
| out_dict = {} |
| for split_name in raw_ds: |
| split_ds = raw_ds[split_name] |
| records = [] |
| source_counts = {"hierarchy": 0, "mixed": 0, "pool": 0} |
| for row in split_ds: |
| sample = build_mcq_sample(dict(row), annotation, split_name, pool, cache) |
| records.append(sample) |
| source_counts[sample["distractor_source"]] += 1 |
| out_dict[split_name] = Dataset.from_list(records) |
| split_stats[split_name] = {"n": len(records), **source_counts} |
| print( |
| f" {split_name}: n={len(records)}, " |
| f"hierarchy={source_counts['hierarchy']}, " |
| f"mixed={source_counts['mixed']}, " |
| f"pool={source_counts['pool']}", |
| flush=True, |
| ) |
|
|
| return DatasetDict(out_dict), split_stats |
|
|
|
|
| def main() -> int: |
| cache = json.loads(CACHE_FILE.read_text()) |
| pool_data = json.loads(POOL_FILE.read_text()) |
| OUT_DIR.mkdir(parents=True, exist_ok=True) |
|
|
| global_stats = {} |
| for annotation in ANNOTATIONS: |
| pool = pool_data["per_subtask"][annotation] |
| ds_dict, stats = build_sub_task(annotation, pool, cache) |
| out_path = OUT_DIR / f"VenusX_Frag_{annotation}_{SPLIT_STRATEGY}_MCQ4" |
| ds_dict.save_to_disk(str(out_path)) |
| global_stats[annotation] = stats |
| print(f" Saved to {out_path}", flush=True) |
|
|
| with open(OUT_DIR / "build_stats.json", "w") as f: |
| json.dump(global_stats, f, indent=2) |
| print(f"\nStats: {OUT_DIR / 'build_stats.json'}") |
| return 0 |
|
|
|
|
| if __name__ == "__main__": |
| raise SystemExit(main()) |
|
|