File size: 4,228 Bytes
159f435
ae74a72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159f435
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ae74a72
159f435
 
 
 
 
 
 
 
 
 
 
 
 
 
ae74a72
 
 
159f435
ae74a72
 
159f435
 
ae74a72
 
159f435
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
#!/usr/bin/env python3
"""
Re-assigns cluster-based splits to account for mutation data counts per protein (via greedy partition).

Reads in the previous split parquet file, and then generates a new split column

Ouputs:
- A parquet/CSV with new assigned splits.

Usage:
    python 06_make_weighted_splits.py \
            --input ../data/fireprotdb_cluster_splits.parquet \
            --output ../data/fireprotdb_splits_balanced_ddg.parquet \
            --ratios 0.8,0.1,0.1 \
            --task ddg
Notes:
- Can balance splits based on the amount of different data types. Useful if the splits based on ddG result in large imbalances for other datatypes. Specified with --task [type]
"""
from __future__ import annotations

import argparse
import pandas as pd

SPLITS = ["train", "validation", "test"]

def assign_weighted_splits(cluster_weights: pd.Series, ratios=(0.8, 0.1, 0.1)) -> pd.DataFrame:
    total = float(cluster_weights.sum())
    targets = {
        "train": total * ratios[0],
        "validation": total * ratios[1],
        "test": total * ratios[2],
    }
    current = {s: 0.0 for s in SPLITS}
    assignment = {}

    # Largest-first
    for cid, w in cluster_weights.sort_values(ascending=False).items():
        w = float(w)

        # Choose split that minimizes relative fill after adding this cluster.
        # (current+w)/target; lower is better.
        def score(s):
            t = targets[s] if targets[s] > 0 else 1.0
            return (current[s] + w) / t

        chosen = min(SPLITS, key=score)
        assignment[cid] = chosen
        current[chosen] += w

    out = pd.DataFrame({"cluster_id": list(assignment.keys()), "split": list(assignment.values())})

    # Print expected vs achieved (cluster-weighted, i.e. row-weighted)
    print("Target totals:", {k: round(v) for k, v in targets.items()})
    print("Achieved totals:", {k: round(v) for k, v in current.items()})
    return out

def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("--input", required=True)
    ap.add_argument("--output", required=True)
    ap.add_argument("--ratios", default="0.8,0.1,0.1")
    ap.add_argument("--task", choices=["dg", "ddg", "tm", "dtm", "fitness", "binary"], default="ddg")
    args = ap.parse_args()

    ratios = tuple(float(x) for x in args.ratios.split(","))

    df = pd.read_parquet(args.input)
    if "cluster_id" not in df.columns:
        raise ValueError("Input must contain cluster_id")

    # IMPORTANT: remove any existing split so we don't accidentally reuse it
    if "split" in df.columns:
        df = df.drop(columns=["split"])

    has_mut = df["mutation"].notna()

    if args.task == "dg":
        df_task = df[has_mut & df["dg"].notna()].copy()
    elif args.task == "ddg":
        df_task = df[has_mut & df["ddg"].notna()].copy()
    elif args.task == "tm":
        df_task = df[has_mut & df["tm"].notna()].copy()
    elif args.task == "dtm":
        df_task = df[has_mut & df["dtm"].notna()].copy()
    elif args.task == "fitness":
        df_task = df[has_mut & df["fitness"].notna()].copy()
    else:
        df_task = df[has_mut & df["stabilizing"].notna()].copy()

    # Ensure cluster_id is a plain string key
    df_task["cluster_id"] = df_task["cluster_id"].astype("string").fillna("NA_CLUSTER")

    # Cluster weights = number of task rows
    w = df_task.groupby("cluster_id").size()

    print(f"Task={args.task} rows: {len(df_task):,}")
    print(f"Task clusters: {len(w):,}")
    print("Top 10 clusters by rows:")
    print(w.sort_values(ascending=False).head(10))

    assign = assign_weighted_splits(w, ratios=ratios)

    # Join back to all rows (clusters without task rows -> train by default)
    df["cluster_id"] = df["cluster_id"].astype("string").fillna("NA_CLUSTER")
    df = df.merge(assign, on="cluster_id", how="left")
    df["split"] = df["split"].fillna("train")

    df.to_parquet(args.output, index=False)
    print("Wrote:", args.output)

    # Quick verify on task rows
    df_task_out = df_task.merge(assign, on="cluster_id", how="left")
    df_task_out["split"] = df_task_out["split"].fillna("train")
    print("Task rows by split:")
    print(df_task_out["split"].value_counts())

if __name__ == "__main__":
    main()