FireProtDB2 / src /01_process_csv.py
drake463's picture
final pipeline and updated subsets
ae74a72
#!/usr/bin/env python3
"""
Clean FireProtDB 2.0 CSV into ML-ready table with some reformatting.
Outputs:
- A canonical row-per-experiment table with parsed mutation fields and normalized columns.
- Optionally writes Parquet for speed.
Usage:
python 01_process_csv.py \
--input ../data/fireprotdb_20251015_16.csv \
--output ../data/fireprotdb_clean.parquet
Notes:
- This script is conservative: it does NOT impute missing ddg/dtm.
- It standardizes a few categorical fields; extend mappings as needed.
"""
from __future__ import annotations
import argparse
import math
import re
from typing import Optional, Tuple, Dict
import pandas as pd
###PDB parsing
_PDB_SPLIT = re.compile(r"[;,| ]+")
_PDB_ID = re.compile(r"^[0-9][A-Za-z0-9]{3}$") # 4-char PDB id, first char numeric
def parse_pdb_ids(x: object):
"""
Returns (pdb_id, pdb_ids) where:
- pdb_id: first valid 4-char PDB id (lowercase), or None
- pdb_ids: sorted unique list of valid ids (lowercase)
"""
if not isinstance(x, str):
return None, []
s = x.strip()
if not s:
return None, []
parts = [p.strip() for p in _PDB_SPLIT.split(s) if p.strip()]
ids = []
for p in parts:
p = p.strip()
# sometimes entries include chain like "1ABC:A" or "1ABC_A"
p = re.split(r"[:_]", p)[0].strip()
if _PDB_ID.match(p):
ids.append(p.lower())
ids = sorted(set(ids))
return (ids[0] if ids else None), ids
# --- Mutation parsing ---
# Accept common patterns:
# A123V
# p.Ala123Val (rare)
# 123A>V (rare)
_MUT_A123V = re.compile(r"^(?P<wt>[ACDEFGHIKLMNPQRSTVWY])(?P<pos>\d+)(?P<mut>[ACDEFGHIKLMNPQRSTVWY])$")
_MUT_123A_GT_V = re.compile(r"^(?P<pos>\d+)(?P<wt>[ACDEFGHIKLMNPQRSTVWY])>(?P<mut>[ACDEFGHIKLMNPQRSTVWY])$")
def parse_substitution(s: str) -> Tuple[Optional[str], Optional[int], Optional[str], Optional[str]]:
"""
Returns (wt_residue, position, mut_residue, normalized_mutation_string)
"""
if not isinstance(s, str) or not s.strip():
return None, None, None, None
s = s.strip()
m = _MUT_A123V.match(s)
if m:
wt = m.group("wt")
pos = int(m.group("pos"))
mut = m.group("mut")
return wt, pos, mut, f"{wt}{pos}{mut}"
m = _MUT_123A_GT_V.match(s)
if m:
pos = int(m.group("pos"))
wt = m.group("wt")
mut = m.group("mut")
return wt, pos, mut, f"{wt}{pos}{mut}"
# If it's something else (multi-mutation, insertion/deletion notation, etc.),
# keep it in "mutation_raw" but do not parse.
return None, None, None, None
# --- Categorical normalization ---
def norm_str(x: object) -> Optional[str]:
if not isinstance(x, str):
return None
x = x.strip()
return x if x else None
BUFFER_MAP: Dict[str, str] = {
"sodium tetraborate": "Sodium tetraborate",
"tetra-borate": "Sodium tetraborate",
"tetraborate": "Sodium tetraborate",
"sodium phosphate": "Sodium phosphate",
}
METHOD_MAP: Dict[str, str] = {
"dsc": "DSC",
"cd": "CD",
}
MEASURE_MAP: Dict[str, str] = {
"thermal": "Thermal",
}
def normalize_categoricals(df: pd.DataFrame) -> pd.DataFrame:
def map_lower(series: pd.Series, mapping: Dict[str, str]) -> pd.Series:
s = series.astype("string")
s_lower = s.str.lower().str.strip()
return s_lower.map(mapping).fillna(s.str.strip())
if "BUFFER" in df.columns:
df["buffer_norm"] = map_lower(df["BUFFER"], BUFFER_MAP)
else:
df["buffer_norm"] = pd.NA
if "METHOD" in df.columns:
df["method_norm"] = map_lower(df["METHOD"], METHOD_MAP)
else:
df["method_norm"] = pd.NA
if "MEASURE" in df.columns:
df["measure_norm"] = map_lower(df["MEASURE"], MEASURE_MAP)
else:
df["measure_norm"] = pd.NA
return df
# --- Numeric cleanup ---
def to_float(x: object) -> Optional[float]:
if x is None or (isinstance(x, float) and math.isnan(x)):
return None
if isinstance(x, (int, float)):
return float(x)
if isinstance(x, str):
s = x.strip()
if not s:
return None
# Handle "1mM" vs "1 mM" etc. for numeric fields by stripping units if present.
# For now: attempt raw float parse.
try:
return float(s)
except ValueError:
# try to extract first float substring
m = re.search(r"[-+]?\d*\.?\d+(?:[eE][-+]?\d+)?", s)
if m:
try:
return float(m.group(0))
except ValueError:
return None
return None
def clean_numeric_columns(df: pd.DataFrame) -> pd.DataFrame:
# ddg-like
for col in ["DDG", "DOMAINOME_DDG", "DG", "DH", "DHVH"]:
if col in df.columns:
df[col.lower()] = df[col].map(to_float)
else:
df[col.lower()] = pd.NA
# temperature-like
for col in ["TM", "DTM", "EXP_TEMPERATURE"]:
if col in df.columns:
df[col.lower()] = df[col].map(to_float)
else:
df[col.lower()] = pd.NA
# fitness
if "DOMAINOME_FITNESS" in df.columns:
df['fitness'] = df["DOMAINOME_FITNESS"].map(to_float)
else:
df['fitness'] = pd.NA
# pH
if "PH" in df.columns:
df["ph"] = df["PH"].map(to_float)
else:
df["ph"] = pd.NA
return df
def derive_labels(df: pd.DataFrame) -> pd.DataFrame:
# Stabilizing classification: prefer explicit STABILIZING column if present,
# else use ddg sign if ddg available.
if "STABILIZING" in df.columns:
s = df["STABILIZING"].astype("string").str.lower().str.strip()
df["stabilizing_explicit"] = s.map({"yes": True, "no": False})
else:
df["stabilizing_explicit"] = pd.NA
# ddg-based label (common convention: ddg < 0 stabilizing)
df["stabilizing_ddg"] = df["ddg"].apply(lambda v: True if isinstance(v, float) and v < 0 else (False if isinstance(v, float) and v > 0 else pd.NA))
# unified label: explicit if available else ddg-based
df["stabilizing"] = df["stabilizing_explicit"]
df.loc[df["stabilizing"].isna(), "stabilizing"] = df.loc[df["stabilizing"].isna(), "stabilizing_ddg"]
return df
def select_and_rename(df: pd.DataFrame) -> pd.DataFrame:
# canonical columns (keep more if you want)
keep = {
"EXPERIMENT_ID": "experiment_id",
"SEQUENCE_ID": "sequence_id",
"MUTANT_ID": "mutant_id",
"SOURCE_SEQUENCE_ID": "source_sequence_id",
"TARGET_SEQUENCE_ID": "target_sequence_id",
"SEQUENCE_LENGTH": "sequence_length",
"SUBSTITUTION": "substitution_raw",
"INSERTION": "insertion_raw",
"DELETION": "deletion_raw",
"PROTEIN": "protein_name",
"ORGANISM": "organism",
"UNIPROTKB": "uniprotkb",
"EC_NUMBER": "ec_number",
"INTERPRO": "interpro",
"PUBLICATION_PMID": "pmid",
"PUBLICATION_DOI": "doi",
"PUBLICATION_YEAR": "publication_year",
"SOURCE_DATASET": "source_dataset",
"REFERENCING_DATASET": "referencing_dataset",
"WWPDB": "wwpdb_raw",
}
out = pd.DataFrame()
for src, dst in keep.items():
out[dst] = df[src] if src in df.columns else pd.NA
# numeric & normalized categorical fields added earlier
extra_cols = [
"ddg", "domainome_ddg", "dg", "dh", "dhvh",
"tm", "dtm", "exp_temperature", "fitness",
"ph",
"buffer_norm", "method_norm", "measure_norm",
"stabilizing",
]
for c in extra_cols:
out[c] = df[c] if c in df.columns else pd.NA
# keep raw text fields that matter for conditions (optional)
for src, dst in [("BUFFER", "buffer_raw"), ("BUFFER_CONC", "buffer_conc_raw"), ("ION", "ion_raw"), ("ION_CONC", "ion_conc_raw"), ("STATE", "state")]:
out[dst] = df[src] if src in df.columns else pd.NA
out["pdb_id"] = df["pdb_id"] if "pdb_id" in df.columns else pd.NA
out["pdb_ids"] = df["pdb_ids"] if "pdb_ids" in df.columns else [[] for _ in range(len(df))]
return out
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--input", help="Path to raw FireProtDB 2.0 CSV", default='../data/fireprotdb_20251015-164116.csv')
ap.add_argument("--output", help="Path to output .parquet or .csv", default='../data/fireprotdb_cleaned.parquet')
ap.add_argument("--min_seq_len", type=int, default=1, help="Drop sequences shorter than this")
ap.add_argument("--drop_no_label", action="store_true", help="Drop rows with neither ddg nor dtm")
args = ap.parse_args()
# Load as strings to avoid pandas guessing mixed types
df = pd.read_csv(args.input, dtype="string", keep_default_na=False, na_values=["", "NA", "NaN", "nan"])
df = df.replace({"": pd.NA})
# Basic trimming
for c in df.columns:
if pd.api.types.is_string_dtype(df[c]):
df[c] = df[c].astype("string").str.strip()
# Normalize & parse
df = normalize_categoricals(df)
df = clean_numeric_columns(df)
# Parse substitution into structured columns
parsed = df["SUBSTITUTION"].apply(lambda x: parse_substitution(x) if "SUBSTITUTION" in df.columns else (None, None, None, None))
df["wt_residue"] = parsed.map(lambda t: t[0])
df["position"] = parsed.map(lambda t: t[1]).astype("Int64")
df["mut_residue"] = parsed.map(lambda t: t[2])
df["mutation"] = parsed.map(lambda t: t[3])
df = derive_labels(df)
if "WWPDB" in df.columns:
parsed_pdb = df["WWPDB"].astype("string").fillna("").apply(lambda v: parse_pdb_ids(str(v)))
df["pdb_id"] = parsed_pdb.map(lambda t: t[0])
df["pdb_ids"] = parsed_pdb.map(lambda t: t[1])
else:
df["pdb_id"] = pd.NA
df["pdb_ids"] = [[] for _ in range(len(df))]
# Filter
if "SEQUENCE_LENGTH" in df.columns:
seq_len = df["SEQUENCE_LENGTH"].map(to_float)
df["sequence_length_num"] = seq_len
df = df[df["sequence_length_num"].fillna(0) >= args.min_seq_len]
if args.drop_no_label:
df = df[~(df["ddg"].isna() & df["dtm"].isna())]
# Select final schema
out = select_and_rename(df)
# Add parsed mutation columns
out["wt_residue"] = df["wt_residue"]
out["position"] = df["position"]
out["mut_residue"] = df["mut_residue"]
out["mutation"] = df["mutation"]
# De-dupe obvious duplicates (same experiment id)
if "experiment_id" in out.columns:
out = out.drop_duplicates(subset=["experiment_id"])
# Write
if args.output.lower().endswith(".parquet"):
out.to_parquet(args.output, index=False)
elif args.output.lower().endswith(".csv"):
out.to_csv(args.output, index=False)
else:
raise ValueError("Output must end with .parquet or .csv")
print(f"Wrote {len(out):,} rows to {args.output}")
if __name__ == "__main__":
main()