PISCES-CulledPDB / src /02_build_curated_master_csv.py
akshayansamy's picture
Upload src/02_build_curated_master_csv.py with huggingface_hub
fc905aa verified
#!/usr/bin/env python3
"""
Build chain CSVs from index. Curated by subset: only list/FASTA pairs in
curated_csv/cullpdb_list_fasta_index.csv are processed.
Outputs:
- curated_csv/cullpdb_combined_chains.csv — single master CSV for all analysis
- curated_csv/subset_chains/<list_basename>.csv — one CSV per subset
"""
import csv
import re
import sys
from pathlib import Path
from typing import List, Optional, Tuple
SCRIPT_DIR = Path(__file__).resolve().parent
BASE = SCRIPT_DIR.parent
CULLPDB_DIR = BASE / "pieces" / "2026_01_26"
CURATED_DIR = BASE / "curated_csv"
SUBSET_CSV_DIR = CURATED_DIR / "subset_chains" # one CSV per subset, named by list_basename
COMBINED_CSV = CURATED_DIR / "cullpdb_combined_chains.csv" # single CSV for all analysis
PAT = re.compile(
r"^cullpdb_pc([\d.]+)_res([\d.]+)-([\d.]+)_"
r"(?:(noBrks)_)?"
r"len40-10000_R([\d.]+)_"
r"(.+?)_d\d{4}_\d{2}_\d{2}_chains(\d+)$"
)
def split_pdb_chain(pdb_chain: str) -> tuple:
"""Split PDB chain ID into (pdb_id, chain_id). PDB ID is first 4 chars, rest is chain."""
if len(pdb_chain) <= 4:
return (pdb_chain, "")
return (pdb_chain[:4], pdb_chain[4:])
def parse_params_from_basename(base: str) -> Optional[dict]:
m = PAT.match(base)
if not m:
return None
pc, res_min, res_max, no_brks, r_cutoff, methods, n_chains = m.groups()
n = int(n_chains)
no_brk = no_brks is not None
return {
"pc": float(pc),
"resolution_range": f"{res_min}-{res_max}",
"no_breaks": "yes" if no_brk else "no",
"R": float(r_cutoff),
"source_list": base,
}
def read_list_file(path: Path) -> List[dict]:
"""Return list of dicts: PDBchain, len, method, resol, rfac, freerfac."""
rows = []
with open(path) as f:
lines = f.readlines()
if not lines:
return rows
# header: PDBchain len method resol rfac freerfac
for line in lines[1:]:
line = line.strip()
if not line:
continue
parts = line.split()
if len(parts) < 5:
continue
pdb_chain = parts[0]
try:
length = int(parts[1])
except ValueError:
continue
method = parts[2]
resol = parts[3]
rfac = parts[4]
freerfac = parts[5] if len(parts) > 5 else ""
rows.append({
"pdb_chain": pdb_chain,
"len": length,
"method": method,
"resol": resol,
"rfac": rfac,
"freerfac": freerfac,
})
return rows
def read_fasta_entries(path: Path) -> List[Tuple[str, str]]:
"""Return list of (pdb_chain_from_header, sequence) in order (one per FASTA entry)."""
entries = []
current_seq = []
current_id = None
with open(path) as f:
for line in f:
line = line.rstrip("\n")
if line.startswith(">"):
if current_seq is not None:
if current_id is not None:
entries.append((current_id, "".join(current_seq)))
# New entry: first token after ">" is PDBchain
rest = line[1:].strip()
current_id = rest.split(None, 1)[0] if rest else ""
current_seq = []
else:
current_seq.append(line)
if current_id is not None:
entries.append((current_id, "".join(current_seq)))
return entries
def load_pairs_from_index(index_csv: Path) -> List[tuple]:
"""Load (base, list_path, fasta_path, params) from curated index CSV. Only these subsets are used."""
pairs = []
with open(index_csv, newline="") as f:
r = csv.DictReader(f)
for row in r:
base = row.get("list_basename", "").strip()
list_path_s = row.get("list_path", "").strip()
fasta_path_s = row.get("fasta_path", "").strip()
if not base or not list_path_s or not fasta_path_s:
continue
list_path = Path(list_path_s)
fasta_path = Path(fasta_path_s)
if not list_path.exists() or not fasta_path.exists():
print(f"Skip (missing): {base}", file=sys.stderr)
continue
params = parse_params_from_basename(base)
if not params:
print(f"Skip (parse): {base}", file=sys.stderr)
continue
pairs.append((base, list_path, fasta_path, params))
return pairs
def main():
index_csv = CURATED_DIR / "cullpdb_list_fasta_index.csv"
if not index_csv.exists():
print(f"Index not found: {index_csv}. Run build_list_fasta_index.py first.", file=sys.stderr)
sys.exit(1)
pairs = load_pairs_from_index(index_csv)
if not pairs:
print("No subsets in index (or paths missing). Nothing to do.", file=sys.stderr)
sys.exit(1)
print(f"Curating from index: {len(pairs)} subsets.")
fieldnames = [
"pdb_chain", "pdb", "chain", "sequence", "len", "method", "resolution", "rfac", "freerfac",
"pc", "no_breaks", "R", "source_list",
]
SUBSET_CSV_DIR.mkdir(parents=True, exist_ok=True)
# Sanity-check counters
id_mismatches = 0
len_mismatches = 0
count_mismatches = 0
files_skipped = 0
rows_written_total = 0
files_written = 0
all_rows = [] # for combined CSV (all analysis uses this file)
for base, list_path, fasta_path, params in pairs:
list_rows = read_list_file(list_path)
fasta_entries = read_fasta_entries(fasta_path)
n_list, n_fasta = len(list_rows), len(fasta_entries)
if n_list != n_fasta:
print(f"SKIP {base}: list has {n_list} rows, fasta has {n_fasta} entries", file=sys.stderr)
files_skipped += 1
continue
subset_rows = []
file_id_mismatches = 0
file_len_mismatches = 0
for i, (row, (fasta_id, seq)) in enumerate(zip(list_rows, fasta_entries)):
list_id = row["pdb_chain"]
list_len = row["len"]
seq_len = len(seq)
# Sanity 1: PDBchain in list must match first token in FASTA header
if list_id != fasta_id:
id_mismatches += 1
file_id_mismatches += 1
if file_id_mismatches <= 3:
print(f"ID mismatch in {base} row {i+2}: list={list_id!r} fasta_header={fasta_id!r}", file=sys.stderr)
continue
# Sanity 2: list length must match actual sequence length
if list_len != seq_len:
len_mismatches += 1
file_len_mismatches += 1
if file_len_mismatches <= 3:
print(f"LEN mismatch in {base} {list_id}: list len={list_len} seq len={seq_len}", file=sys.stderr)
continue
pdb_id, chain_id = split_pdb_chain(list_id)
out_row = {
"pdb_chain": list_id,
"pdb": pdb_id,
"chain": chain_id,
"sequence": seq,
"len": list_len,
"method": row["method"],
"resolution": row["resol"],
"rfac": row["rfac"],
"freerfac": row["freerfac"],
"pc": params["pc"],
"no_breaks": params["no_breaks"],
"R": params["R"],
"source_list": params["source_list"],
}
subset_rows.append(out_row)
if file_id_mismatches > 0 or file_len_mismatches > 0:
count_mismatches += 1
# Write one CSV per subset
out_path = SUBSET_CSV_DIR / f"{base}.csv"
with open(out_path, "w", newline="") as f:
w = csv.DictWriter(f, fieldnames=fieldnames)
w.writeheader()
w.writerows(subset_rows)
all_rows.extend(subset_rows)
rows_written_total += len(subset_rows)
files_written += 1
# Write single combined CSV for all downstream analysis
with open(COMBINED_CSV, "w", newline="") as f:
w = csv.DictWriter(f, fieldnames=fieldnames)
w.writeheader()
w.writerows(all_rows)
print(f"Wrote {COMBINED_CSV} with {len(all_rows)} chain rows (master for analysis).")
# Summary
print(f"Wrote {files_written} subset CSVs to {SUBSET_CSV_DIR} ({rows_written_total} chain rows total).")
print(f"Sanity check: ID mismatches (list PDBchain vs FASTA header) = {id_mismatches}")
print(f"Sanity check: length mismatches (list len vs len(sequence)) = {len_mismatches}")
print(f"Files with ID or len mismatch = {count_mismatches}; files skipped (list vs FASTA count) = {files_skipped}")
if id_mismatches > 0 or len_mismatches > 0 or files_skipped > 0:
sys.exit(1)
if __name__ == "__main__":
main()