File size: 4,454 Bytes
8e574de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ae74a72
8e574de
ae74a72
8e574de
ae74a72
8e574de
 
 
 
 
 
 
ae74a72
8e574de
ae74a72
8e574de
ae74a72
8e574de
 
ae74a72
8e574de
ae74a72
8e574de
ae74a72
8e574de
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
#!/usr/bin/env python3
from __future__ import annotations

import os
import argparse
import pandas as pd


OUT_SPLITS = ['train', 'validation', 'test']

def ensure_dir(p: str):
    os.makedirs(p, exist_ok=True)

def write_subset(out_dir: str, name: str, df: pd.DataFrame):
    outp = os.path.join(out_dir, name)
    ensure_dir(outp)
    for split in OUT_SPLITS:
        fp = os.path.join(outp, f"{split}.parquet")
        d_split = df[df["split"] == split].copy()
        d_split.to_parquet(fp, index=False)
        print(f"{name}/{split}: {len(d_split):,} -> {fp}")

def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("--input", default="../data/fireprotdb_with_cluster_splits.parquet",
                    help="Parquet with MMseqs2 cluster_id and split columns already assigned")
    ap.add_argument("--out_dir", default="../data/subsets",
                    help="Output dir for HF subset parquets")
    ap.add_argument("--drop_columns", default="",
                    help="Comma-separated columns to drop in outputs (optional)")
    ap.add_argument("--require_sequence", action="store_true",
                    help="If set, drop rows without a UniProt sequence before creating subsets")
    ap.add_argument("--strict_split_check", action="store_true",
                    help="If set, error if any protein/cluster appears in multiple splits")
    args = ap.parse_args()

    df = pd.read_parquet(args.input)

    # Basic required columns
    if "split" not in df.columns:
        raise ValueError("Input parquet must contain a 'split' column (train/validation/test).")
    if "mutation" not in df.columns:
        raise ValueError("Input parquet must contain a 'mutation' column.")

    # Normalize split values
    df["split"] = df["split"].astype("string").str.strip().str.lower()
    bad = sorted(set(df["split"].dropna().unique()) - set(OUT_SPLITS))
    if bad:
        raise ValueError(f"Unexpected split values: {bad}. Expected {OUT_SPLITS}.")

    if args.require_sequence and "sequence" in df.columns:
        before = len(df)
        df = df[df["sequence"].notna()].copy()
        print(f"Dropped rows without sequence: {before - len(df):,}")

    # Optional leakage check: ensure each cluster/protein lives in exactly one split
    # Prefer cluster_id if available, else fall back to uniprotkb/sequence_id
    if args.strict_split_check:
        if "cluster_id" in df.columns:
            key = "cluster_id"
        elif "uniprotkb" in df.columns:
            key = "uniprotkb"
        else:
            key = "sequence_id"

        counts = df.groupby(key, dropna=False)["split"].nunique()
        leaked = counts[counts > 1]
        if len(leaked):
            example = leaked.head(10)
            raise ValueError(
                f"Leakage detected: {len(leaked):,} {key}s appear in multiple splits.\n"
                f"Examples:\n{example}"
            )
        print(f"Split leakage check passed using key={key}.")

    # Base filter: parsed mutation only (matches your fireprotdb.py intent)
    has_mut = df["mutation"].notna()
    df_mut = df[has_mut].copy()

    # Build subsets (you said you only want these)
    dg = df_mut[df_mut["dg"].notna()].copy()
    ddg = df_mut[df_mut["ddg"].notna()].copy()
    tm = df_mut[df_mut["tm"].notna()].copy()
    dtm = df_mut[df_mut["dtm"].notna()].copy()
    fitness = df_mut[df_mut["fitness"].notna()].copy()
    binary = df_mut[df_mut["stabilizing"].notna()].copy()

    # Optional: drop columns to slim outputs
    if args.drop_columns.strip():
        drop_cols = [c.strip() for c in args.drop_columns.split(",") if c.strip()]
        keep_drop = [c for c in drop_cols if c in df.columns]
        if keep_drop:
            dg = dg.drop(columns=keep_drop, errors="ignore")
            ddg = ddg.drop(columns=keep_drop, errors="ignore")
            tm = tm.drop(columns=keep_drop, errors="ignore")
            dtm = dtm.drop(columns=keep_drop, errors="ignore")
            fitness = fitness.drop(columns=keep_drop, errors="ignore")
            binary = binary.drop(columns=keep_drop, errors="ignore")

    write_subset(args.out_dir, "mutation_dg", dg)
    write_subset(args.out_dir, "mutation_ddg", ddg)
    write_subset(args.out_dir, "mutation_tm", tm)
    write_subset(args.out_dir, "mutation_dtm", dtm)
    write_subset(args.out_dir, "mutation_fitness", fitness)
    write_subset(args.out_dir, "mutation_binary", binary)

if __name__ == "__main__":
    main()