| | |
| | from __future__ import annotations |
| |
|
| | import os |
| | import argparse |
| | import pandas as pd |
| |
|
| |
|
| | OUT_SPLITS = ['train', 'validation', 'test'] |
| |
|
| | def ensure_dir(p: str): |
| | os.makedirs(p, exist_ok=True) |
| |
|
| | def write_subset(out_dir: str, name: str, df: pd.DataFrame): |
| | outp = os.path.join(out_dir, name) |
| | ensure_dir(outp) |
| | for split in OUT_SPLITS: |
| | fp = os.path.join(outp, f"{split}.parquet") |
| | d_split = df[df["split"] == split].copy() |
| | d_split.to_parquet(fp, index=False) |
| | print(f"{name}/{split}: {len(d_split):,} -> {fp}") |
| |
|
| | def main(): |
| | ap = argparse.ArgumentParser() |
| | ap.add_argument("--input", default="../data/fireprotdb_with_cluster_splits.parquet", |
| | help="Parquet with MMseqs2 cluster_id and split columns already assigned") |
| | ap.add_argument("--out_dir", default="../data/subsets", |
| | help="Output dir for HF subset parquets") |
| | ap.add_argument("--drop_columns", default="", |
| | help="Comma-separated columns to drop in outputs (optional)") |
| | ap.add_argument("--require_sequence", action="store_true", |
| | help="If set, drop rows without a UniProt sequence before creating subsets") |
| | ap.add_argument("--strict_split_check", action="store_true", |
| | help="If set, error if any protein/cluster appears in multiple splits") |
| | args = ap.parse_args() |
| |
|
| | df = pd.read_parquet(args.input) |
| |
|
| | |
| | if "split" not in df.columns: |
| | raise ValueError("Input parquet must contain a 'split' column (train/validation/test).") |
| | if "mutation" not in df.columns: |
| | raise ValueError("Input parquet must contain a 'mutation' column.") |
| |
|
| | |
| | df["split"] = df["split"].astype("string").str.strip().str.lower() |
| | bad = sorted(set(df["split"].dropna().unique()) - set(OUT_SPLITS)) |
| | if bad: |
| | raise ValueError(f"Unexpected split values: {bad}. Expected {OUT_SPLITS}.") |
| |
|
| | if args.require_sequence and "sequence" in df.columns: |
| | before = len(df) |
| | df = df[df["sequence"].notna()].copy() |
| | print(f"Dropped rows without sequence: {before - len(df):,}") |
| |
|
| | |
| | |
| | if args.strict_split_check: |
| | if "cluster_id" in df.columns: |
| | key = "cluster_id" |
| | elif "uniprotkb" in df.columns: |
| | key = "uniprotkb" |
| | else: |
| | key = "sequence_id" |
| |
|
| | counts = df.groupby(key, dropna=False)["split"].nunique() |
| | leaked = counts[counts > 1] |
| | if len(leaked): |
| | example = leaked.head(10) |
| | raise ValueError( |
| | f"Leakage detected: {len(leaked):,} {key}s appear in multiple splits.\n" |
| | f"Examples:\n{example}" |
| | ) |
| | print(f"Split leakage check passed using key={key}.") |
| |
|
| | |
| | has_mut = df["mutation"].notna() |
| | df_mut = df[has_mut].copy() |
| |
|
| | |
| | dg = df_mut[df_mut["dg"].notna()].copy() |
| | ddg = df_mut[df_mut["ddg"].notna()].copy() |
| | tm = df_mut[df_mut["tm"].notna()].copy() |
| | dtm = df_mut[df_mut["dtm"].notna()].copy() |
| | fitness = df_mut[df_mut["fitness"].notna()].copy() |
| | binary = df_mut[df_mut["stabilizing"].notna()].copy() |
| |
|
| | |
| | if args.drop_columns.strip(): |
| | drop_cols = [c.strip() for c in args.drop_columns.split(",") if c.strip()] |
| | keep_drop = [c for c in drop_cols if c in df.columns] |
| | if keep_drop: |
| | dg = dg.drop(columns=keep_drop, errors="ignore") |
| | ddg = ddg.drop(columns=keep_drop, errors="ignore") |
| | tm = tm.drop(columns=keep_drop, errors="ignore") |
| | dtm = dtm.drop(columns=keep_drop, errors="ignore") |
| | fitness = fitness.drop(columns=keep_drop, errors="ignore") |
| | binary = binary.drop(columns=keep_drop, errors="ignore") |
| |
|
| | write_subset(args.out_dir, "mutation_dg", dg) |
| | write_subset(args.out_dir, "mutation_ddg", ddg) |
| | write_subset(args.out_dir, "mutation_tm", tm) |
| | write_subset(args.out_dir, "mutation_dtm", dtm) |
| | write_subset(args.out_dir, "mutation_fitness", fitness) |
| | write_subset(args.out_dir, "mutation_binary", binary) |
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|