FireProtDB2 / src /05_assign_cluster_splits.py
drake463's picture
final pipeline and updated subsets
ae74a72
#!/usr/bin/env python3
"""
Assign cluster-based splits to FireProtDB rows.
Reads MMseqs2 cluster TSV and assigns a split per cluster, then joins back to all rows.
Ouputs:
- A parquet/CSV clone of input with additional cluster_id/split_id columns
Usage:
python 05_assign_cluster_splits.py \
--input ../data/fireprotdb_with_sequences.parquet \
--clusters_tsv ../data/mmseqs_clusters_cluster.tsv \
--output ../data/fireprotdb_with_cluster_splits.parquet \
--ratios 0.8,0.1,0.1
Notes:
- 80% train, 10% validation, and 10% test are the default splits.
"""
from __future__ import annotations
import argparse
import hashlib
import pandas as pd
def stable_hash(s: str) -> int:
h = hashlib.sha256(s.encode("utf-8")).hexdigest()
return int(h[:8], 16)
def split_from_cluster(cluster_id: str, ratios=(0.8, 0.1, 0.1)) -> str:
r = stable_hash(cluster_id) / 0xFFFFFFFF
if r < ratios[0]:
return "train"
if r < ratios[0] + ratios[1]:
return "validation"
return "test"
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--input", default="../data/fireprotdb_with_sequences.parquet")
ap.add_argument("--clusters_tsv", default="../data/mmseqs_clusters_cluster.tsv",
help="MMseqs2 cluster output TSV (representative\\tmember)")
ap.add_argument("--output", default="../data/fireprotdb_with_cluster_splits.parquet")
ap.add_argument("--ratios", default="0.8,0.1,0.1")
args = ap.parse_args()
ratios = tuple(float(x) for x in args.ratios.split(","))
df = pd.read_parquet(args.input)
# Load MMseqs2 TSV: representative \t member
cl = pd.read_csv(args.clusters_tsv, sep="\t", header=None, names=["rep", "member"], dtype="string")
cl["rep"] = cl["rep"].astype("string").fillna("").str.strip()
cl["member"] = cl["member"].astype("string").fillna("").str.strip()
# Use rep as cluster id
cl["cluster_id"] = cl["rep"]
member_to_cluster = cl.set_index("member")["cluster_id"].to_dict()
# Build protein_id robustly
for c in ["uniprotkb", "sequence_id", "source_sequence_id", "target_sequence_id", "experiment_id"]:
if c not in df.columns:
df[c] = pd.NA
u = df["uniprotkb"].astype("string").fillna("").str.strip()
sid = df["sequence_id"].astype("string").fillna("").str.strip()
src = df["source_sequence_id"].astype("string").fillna("").str.strip()
tgt = df["target_sequence_id"].astype("string").fillna("").str.strip()
eid = df["experiment_id"].astype("string").fillna("").str.strip()
# priority: uniprot > sequence_id > source_sequence_id > target_sequence_id > experiment_id
protein_id = u
protein_id = protein_id.where(protein_id != "", "seqid:" + sid)
protein_id = protein_id.where(protein_id != "seqid:", "srcseq:" + src)
protein_id = protein_id.where(protein_id != "srcseq:", "tgtseq:" + tgt)
protein_id = protein_id.where(protein_id != "tgtseq:", "exp:" + eid)
df["protein_id"] = protein_id
df["cluster_id"] = df["protein_id"].map(lambda pid: member_to_cluster.get(pid, None))
# If a protein didn't get clustered (missing sequence etc.), treat it as its own cluster
df["cluster_id"] = df["cluster_id"].fillna(df["protein_id"].map(lambda x: f"singleton:{x}"))
df["split"] = df["cluster_id"].map(lambda cid: split_from_cluster(str(cid), ratios=ratios))
df.to_parquet(args.output, index=False)
print(f"Wrote: {args.output}")
print(df["split"].value_counts(dropna=False))
if __name__ == "__main__":
main()