SAAINTDB / src /04_dataset_split.py
Hani Park
Added some files
d3f6b75
# Separate PDB files based on train/validation/test splits
# - Build an index of all *_model_*.pdb files once, then copy by split.
# - Report counts in terms of number of .pdb files (not just unique PDB IDs).
# Use '04_run_dataset_split.sh' to run this script with appropriate arguments.
import os
import re
import glob
import shutil
import argparse
import pandas as pd
PDBID_PREFIX_RE = re.compile(r"^([0-9a-zA-Z]{4}).*\.pdb$", re.IGNORECASE)
def read_split_pdb_ids(csv_path: str, pdb_col: str = "PDB_ID") -> list[str]:
df = pd.read_csv(csv_path, dtype={pdb_col: "string"})
if pdb_col not in df.columns:
raise ValueError(f"Column '{pdb_col}' not found in {csv_path}. Columns: {list(df.columns)}")
pdb_ids = (
df[pdb_col]
.dropna()
.astype("string")
.str.strip()
.str.lower()
.unique()
.tolist()
)
return pdb_ids
def build_pdb_index(src_dirs: list[str]) -> dict[str, list[str]]:
pdb_to_files: dict[str, list[str]] = {}
all_files: list[str] = []
for sd in src_dirs:
all_files.extend(glob.glob(os.path.join(sd, "*.pdb")))
print("Found *.pdb files in source folders:", len(all_files))
kept_files = 0
skipped_badname = 0
for fpath in all_files:
fname = os.path.basename(fpath)
m = PDBID_PREFIX_RE.match(fname)
if not m:
skipped_badname += 1
continue
pid = m.group(1).lower()
pdb_to_files.setdefault(pid, []).append(fpath)
kept_files += 1
print("Indexed .pdb files (matched prefix rule):", kept_files)
print("Skipped files (could not parse PDB_ID from filename):", skipped_badname)
print("Unique PDB_IDs with at least one .pdb file:", len(pdb_to_files))
return pdb_to_files
def copy_by_split(
split_to_pdbids: dict[str, list[str]],
pdb_to_files: dict[str, list[str]],
out_dirs: dict[str, str],
) -> tuple[dict[str, int], dict[str, int], dict[str, list[str]], list[str]]:
report_files_copied: dict[str, int] = {}
report_expected_files: dict[str, int] = {}
missing_ids: dict[str, list[str]] = {}
duplicate_name_collisions: list[str] = []
for split_name, pdb_ids in split_to_pdbids.items():
out_dir = out_dirs[split_name]
copied_files = 0
expected_files = 0
missing: list[str] = []
for pid in pdb_ids:
files = pdb_to_files.get(pid, [])
if not files:
missing.append(pid)
continue
expected_files += len(files)
for fpath in files:
dst = os.path.join(out_dir, os.path.basename(fpath))
# Avoid overwrite if same filename already exists (e.g., duplicates across src dirs)
if os.path.exists(dst):
duplicate_name_collisions.append(dst)
continue
shutil.copy2(fpath, dst)
copied_files += 1
report_files_copied[split_name] = copied_files
report_expected_files[split_name] = expected_files
missing_ids[split_name] = missing
return report_files_copied, report_expected_files, missing_ids, duplicate_name_collisions
def save_missing_reports(missing_ids: dict[str, list[str]], out_dir: str):
os.makedirs(out_dir, exist_ok=True)
for split_name, ids in missing_ids.items():
out_csv = os.path.join(out_dir, f"missing_{split_name}_pdb_ids.csv")
# quoting=1 => csv.QUOTE_ALL (prevents 12e8-like auto parsing in Excel/Sheets)
pd.DataFrame({"missing_pdb_id": pd.Series(ids, dtype="string")}).to_csv(
out_csv, index=False, quoting=1
)
print("saved:", out_csv)
def main():
ap = argparse.ArgumentParser(
description="Separate PDB files into train/validation/test using split CSVs (PDB-level split)."
)
ap.add_argument(
"--splits_dir",
required=True,
help="Directory containing train.csv, validation.csv, test.csv",
)
ap.add_argument(
"--src_dirs",
required=True,
nargs="+",
help="One or more directories containing .pdb files (e.g., processed_pdb_models_..., processed_NoLongerMissing)",
)
ap.add_argument(
"--out_root",
required=True,
help="Output root directory. Creates PDB/{train,validation,test} inside.",
)
ap.add_argument(
"--pdb_col",
default="PDB_ID",
help="Column name in split CSVs that contains PDB IDs (default: PDB_ID).",
)
args = ap.parse_args()
# Output dirs
out_root = os.path.join(args.out_root, "PDB")
out_dirs = {
"train": os.path.join(out_root, "train"),
"validation": os.path.join(out_root, "validation"),
"test": os.path.join(out_root, "test"),
}
os.makedirs(out_root, exist_ok=True)
for d in out_dirs.values():
os.makedirs(d, exist_ok=True)
# Read split PDB IDs
split_to_pdbids = {
"train": read_split_pdb_ids(os.path.join(args.splits_dir, "train.csv"), pdb_col=args.pdb_col),
"validation": read_split_pdb_ids(os.path.join(args.splits_dir, "validation.csv"), pdb_col=args.pdb_col),
"test": read_split_pdb_ids(os.path.join(args.splits_dir, "test.csv"), pdb_col=args.pdb_col),
}
# Build index once
pdb_to_files = build_pdb_index(args.src_dirs)
# Copy by split
report_files_copied, report_expected_files, missing_ids, duplicate_name_collisions = copy_by_split(
split_to_pdbids, pdb_to_files, out_dirs
)
print("\n=== Copy summary (number of .pdb files) ===")
for split_name in ["train", "validation", "test"]:
print(
f"{split_name}: copied={report_files_copied.get(split_name, 0)} "
f"(expected={report_expected_files.get(split_name, 0)}), "
f"missing_PDB_IDs={len(missing_ids.get(split_name, []))}"
)
print("\n=== Output folder counts (actual *.pdb files on disk) ===")
for split_name, out_dir in out_dirs.items():
n_disk = len(glob.glob(os.path.join(out_dir, "*.pdb")))
print(f"{split_name}: {n_disk}")
print("\n=== Missing PDB_IDs (no .pdb found in either source folder) ===")
for split_name, ids in missing_ids.items():
print(f"{split_name}: {len(ids)}")
if ids:
print(" examples:", ids[:20])
if duplicate_name_collisions:
print("\n[Note] Some destination filenames already existed (possible duplicates across source dirs).")
print("Examples:", duplicate_name_collisions[:20])
print("Count:", len(duplicate_name_collisions))
# Save missing lists
missing_dir = os.path.join(args.out_root, "PDB_missing_reports")
save_missing_reports(missing_ids, missing_dir)
if __name__ == "__main__":
main()