File size: 3,568 Bytes
8e574de
 
 
 
 
ae74a72
 
 
 
 
 
 
 
 
 
 
 
8e574de
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159f435
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8e574de
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
#!/usr/bin/env python3
"""
Assign cluster-based splits to FireProtDB rows.

Reads MMseqs2 cluster TSV and assigns a split per cluster, then joins back to all rows.

Ouputs:
- A parquet/CSV clone of input with additional cluster_id/split_id columns

Usage:
    python 05_assign_cluster_splits.py \
        --input ../data/fireprotdb_with_sequences.parquet \
        --clusters_tsv ../data/mmseqs_clusters_cluster.tsv \
        --output ../data/fireprotdb_with_cluster_splits.parquet \
        --ratios 0.8,0.1,0.1
Notes:
- 80% train, 10% validation, and 10% test are the default splits.
"""

from __future__ import annotations

import argparse
import hashlib
import pandas as pd

def stable_hash(s: str) -> int:
    h = hashlib.sha256(s.encode("utf-8")).hexdigest()
    return int(h[:8], 16)

def split_from_cluster(cluster_id: str, ratios=(0.8, 0.1, 0.1)) -> str:
    r = stable_hash(cluster_id) / 0xFFFFFFFF
    if r < ratios[0]:
        return "train"
    if r < ratios[0] + ratios[1]:
        return "validation"
    return "test"

def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("--input", default="../data/fireprotdb_with_sequences.parquet")
    ap.add_argument("--clusters_tsv", default="../data/mmseqs_clusters_cluster.tsv",
                    help="MMseqs2 cluster output TSV (representative\\tmember)")
    ap.add_argument("--output", default="../data/fireprotdb_with_cluster_splits.parquet")
    ap.add_argument("--ratios", default="0.8,0.1,0.1")
    args = ap.parse_args()

    ratios = tuple(float(x) for x in args.ratios.split(","))

    df = pd.read_parquet(args.input)

    # Load MMseqs2 TSV: representative \t member
    cl = pd.read_csv(args.clusters_tsv, sep="\t", header=None, names=["rep", "member"], dtype="string")
    cl["rep"] = cl["rep"].astype("string").fillna("").str.strip()
    cl["member"] = cl["member"].astype("string").fillna("").str.strip()

    # Use rep as cluster id
    cl["cluster_id"] = cl["rep"]
    member_to_cluster = cl.set_index("member")["cluster_id"].to_dict()

    # Build protein_id robustly
    for c in ["uniprotkb", "sequence_id", "source_sequence_id", "target_sequence_id", "experiment_id"]:
        if c not in df.columns:
            df[c] = pd.NA

    u = df["uniprotkb"].astype("string").fillna("").str.strip()
    sid = df["sequence_id"].astype("string").fillna("").str.strip()
    src = df["source_sequence_id"].astype("string").fillna("").str.strip()
    tgt = df["target_sequence_id"].astype("string").fillna("").str.strip()
    eid = df["experiment_id"].astype("string").fillna("").str.strip()

    # priority: uniprot > sequence_id > source_sequence_id > target_sequence_id > experiment_id
    protein_id = u
    protein_id = protein_id.where(protein_id != "", "seqid:" + sid)
    protein_id = protein_id.where(protein_id != "seqid:", "srcseq:" + src)
    protein_id = protein_id.where(protein_id != "srcseq:", "tgtseq:" + tgt)
    protein_id = protein_id.where(protein_id != "tgtseq:", "exp:" + eid)

    df["protein_id"] = protein_id

    df["cluster_id"] = df["protein_id"].map(lambda pid: member_to_cluster.get(pid, None))

    # If a protein didn't get clustered (missing sequence etc.), treat it as its own cluster
    df["cluster_id"] = df["cluster_id"].fillna(df["protein_id"].map(lambda x: f"singleton:{x}"))

    df["split"] = df["cluster_id"].map(lambda cid: split_from_cluster(str(cid), ratios=ratios))

    df.to_parquet(args.output, index=False)
    print(f"Wrote: {args.output}")
    print(df["split"].value_counts(dropna=False))

if __name__ == "__main__":
    main()