drake463 commited on
Commit
159f435
·
1 Parent(s): 68c498e

updated pipeline to fix sequence_id bug, added additional step to greedily assign clusters to splits

Browse files
data/fireprotdb_splits_balanced_ddg.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69d2b51712ea537f5229d1194ee5e4eae34e3db610ec0cefd0d2282415b05a82
3
+ size 116931745
data/fireprotdb_with_cluster_splits.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a7685586c8a203b6548f7379f99e3dfec24fd1c8b161df8f4d879da455a86b4
3
- size 109389319
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0231c790251fa9e54aa22beba469753840329fca0b10fc511ed9bce1ae4db99e
3
+ size 117032379
data/subsets/mutation_binary/test.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46e35999b00ac379c7508c0def79f52b800f1bdaed2bfe77ef36461688ccb185
3
- size 62766
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fcbae17160d47abbebe881b6b9cd4e38fea3325344209b6e0f736c06206fa64
3
+ size 2399800
data/subsets/mutation_binary/train.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f2459ac8a7be1a454829874c5802ebbc4a7088f5c3597cfce791cfa8ef98240
3
- size 454356
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82b22ca971643cd76402ed24684565211c82ff1093fe2142ae1f29ccbaf4f963
3
+ size 14543037
data/subsets/mutation_binary/validation.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:72bb3dc7ba474e9ae11a1c7020d0b1af09b1f44b82cc5697c7b559af661d3dc6
3
- size 13100753
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cb0fc05783975c2f54ba1b320ecb50e553da5576a08d1bc7161700875fccbbe
3
+ size 2390616
data/subsets/mutation_ddg/test.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e4bdd7661b8d9b708fa5a7b125bd5803a0e0910421d70bbe2373cd4aa63a637
3
- size 63049
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdc2c09cc26d4b86a5258b89fb09f08b619b40497ee61806b78d76b576c3c572
3
+ size 2400677
data/subsets/mutation_ddg/train.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1314c1bf86e41315bd817b3a73ddb23b2e4bba20a7b581473e3fa7dd1d70cb56
3
- size 458469
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6407dde0c4b5f36080ebca6eed5091c82484d17c619786f20250d127d5d71fd
3
+ size 14548884
data/subsets/mutation_ddg/validation.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d99c5f983d01ce91294122f84c1e4622c7bad28b046abc4e803d344f2cf15742
3
- size 13099916
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e77dc84cf0087f69d310bfb44a23565ca794f582da5cd4727d2dc681d756469
3
+ size 2391573
data/subsets/mutation_dtm/test.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:143d5453656c6633c0e20a2034190d4b27da3dd137455794704570ec45709139
3
- size 72093
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2cec3dbb85679bf7c8dd1a31a9a3a37207d05c1ca1660139102774a858af4d2
3
+ size 55867
data/subsets/mutation_dtm/train.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:675549665c00715760446ed8c08549ffad7a33af06ba9c55a770e6890d5038e1
3
- size 275341
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa11942b7d45cedabd64371c7b0a47dd2fff0683d8d10438fce528a48cb74538
3
+ size 282696
data/subsets/mutation_dtm/validation.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3ddd23bb0a3b00f7369b16e91cf3ad8e6f3d75bdfb664cb42239a8e2e1e99428
3
- size 62706
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b381764db88f1d8f3e73628ef2dbbf3f3dc95feedb85a620ea6480d0f6342a3c
3
+ size 70635
src/05_assign_cluster_splits.py CHANGED
@@ -45,13 +45,25 @@ def main():
45
  cl["cluster_id"] = cl["rep"]
46
  member_to_cluster = cl.set_index("member")["cluster_id"].to_dict()
47
 
48
- # Build protein_id same way as FASTA export
49
- df["uniprotkb"] = df["uniprotkb"].astype("string").fillna("").str.strip()
50
- df["sequence_id"] = df["sequence_id"].astype("string").fillna("").str.strip()
51
-
52
- df["protein_id"] = df["uniprotkb"]
53
- df.loc[df["protein_id"] == "", "protein_id"] = "seqid:" + df.loc[df["protein_id"] == "", "sequence_id"]
54
- df.loc[df["protein_id"] == "seqid:", "protein_id"] = "unknown"
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  df["cluster_id"] = df["protein_id"].map(lambda pid: member_to_cluster.get(pid, None))
57
 
 
45
  cl["cluster_id"] = cl["rep"]
46
  member_to_cluster = cl.set_index("member")["cluster_id"].to_dict()
47
 
48
+ # Build protein_id robustly
49
+ for c in ["uniprotkb", "sequence_id", "source_sequence_id", "target_sequence_id", "experiment_id"]:
50
+ if c not in df.columns:
51
+ df[c] = pd.NA
52
+
53
+ u = df["uniprotkb"].astype("string").fillna("").str.strip()
54
+ sid = df["sequence_id"].astype("string").fillna("").str.strip()
55
+ src = df["source_sequence_id"].astype("string").fillna("").str.strip()
56
+ tgt = df["target_sequence_id"].astype("string").fillna("").str.strip()
57
+ eid = df["experiment_id"].astype("string").fillna("").str.strip()
58
+
59
+ # priority: uniprot > sequence_id > source_sequence_id > target_sequence_id > experiment_id
60
+ protein_id = u
61
+ protein_id = protein_id.where(protein_id != "", "seqid:" + sid)
62
+ protein_id = protein_id.where(protein_id != "seqid:", "srcseq:" + src)
63
+ protein_id = protein_id.where(protein_id != "srcseq:", "tgtseq:" + tgt)
64
+ protein_id = protein_id.where(protein_id != "tgtseq:", "exp:" + eid)
65
+
66
+ df["protein_id"] = protein_id
67
 
68
  df["cluster_id"] = df["protein_id"].map(lambda pid: member_to_cluster.get(pid, None))
69
 
src/06_make_weighted_splits.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import pandas as pd
6
+
7
+ SPLITS = ["train", "validation", "test"]
8
+
9
+ def assign_weighted_splits(cluster_weights: pd.Series, ratios=(0.8, 0.1, 0.1)) -> pd.DataFrame:
10
+ total = float(cluster_weights.sum())
11
+ targets = {
12
+ "train": total * ratios[0],
13
+ "validation": total * ratios[1],
14
+ "test": total * ratios[2],
15
+ }
16
+ current = {s: 0.0 for s in SPLITS}
17
+ assignment = {}
18
+
19
+ # Largest-first
20
+ for cid, w in cluster_weights.sort_values(ascending=False).items():
21
+ w = float(w)
22
+
23
+ # Choose split that minimizes relative fill after adding this cluster.
24
+ # (current+w)/target; lower is better.
25
+ def score(s):
26
+ t = targets[s] if targets[s] > 0 else 1.0
27
+ return (current[s] + w) / t
28
+
29
+ chosen = min(SPLITS, key=score)
30
+ assignment[cid] = chosen
31
+ current[chosen] += w
32
+
33
+ out = pd.DataFrame({"cluster_id": list(assignment.keys()), "split": list(assignment.values())})
34
+
35
+ # Print expected vs achieved (cluster-weighted, i.e. row-weighted)
36
+ print("Target totals:", {k: round(v) for k, v in targets.items()})
37
+ print("Achieved totals:", {k: round(v) for k, v in current.items()})
38
+ return out
39
+
40
+ def main():
41
+ ap = argparse.ArgumentParser()
42
+ ap.add_argument("--input", required=True)
43
+ ap.add_argument("--output", required=True)
44
+ ap.add_argument("--ratios", default="0.8,0.1,0.1")
45
+ ap.add_argument("--task", choices=["ddg", "dtm", "binary"], default="ddg")
46
+ args = ap.parse_args()
47
+
48
+ ratios = tuple(float(x) for x in args.ratios.split(","))
49
+
50
+ df = pd.read_parquet(args.input)
51
+ if "cluster_id" not in df.columns:
52
+ raise ValueError("Input must contain cluster_id")
53
+
54
+ # IMPORTANT: remove any existing split so we don't accidentally reuse it
55
+ if "split" in df.columns:
56
+ df = df.drop(columns=["split"])
57
+
58
+ has_mut = df["mutation"].notna()
59
+
60
+ if args.task == "ddg":
61
+ df_task = df[has_mut & df["ddg"].notna()].copy()
62
+ elif args.task == "dtm":
63
+ df_task = df[has_mut & df["dtm"].notna()].copy()
64
+ else:
65
+ df_task = df[has_mut & df["stabilizing"].notna()].copy()
66
+
67
+ # Ensure cluster_id is a plain string key
68
+ df_task["cluster_id"] = df_task["cluster_id"].astype("string").fillna("NA_CLUSTER")
69
+
70
+ # Cluster weights = number of task rows
71
+ w = df_task.groupby("cluster_id").size()
72
+
73
+ print(f"Task={args.task} rows: {len(df_task):,}")
74
+ print(f"Task clusters: {len(w):,}")
75
+ print("Top 10 clusters by rows:")
76
+ print(w.sort_values(ascending=False).head(10))
77
+
78
+ assign = assign_weighted_splits(w, ratios=ratios)
79
+
80
+ # Join back to all rows (clusters without task rows -> train by default)
81
+ df["cluster_id"] = df["cluster_id"].astype("string").fillna("NA_CLUSTER")
82
+ df = df.merge(assign, on="cluster_id", how="left")
83
+ df["split"] = df["split"].fillna("train")
84
+
85
+ df.to_parquet(args.output, index=False)
86
+ print("Wrote:", args.output)
87
+
88
+ # Quick verify on task rows
89
+ df_task_out = df_task.merge(assign, on="cluster_id", how="left")
90
+ df_task_out["split"] = df_task_out["split"].fillna("train")
91
+ print("Task rows by split:")
92
+ print(df_task_out["split"].value_counts())
93
+
94
+ if __name__ == "__main__":
95
+ main()
src/{06_gen_subsets.py → 07_gen_subsets.py} RENAMED
File without changes