ro4_vs_d2 / src /03_split_scaffold.py
vmsavla's picture
Upload 4 files
73a57ff verified
import pandas as pd
from rdkit import Chem
from rdkit.Chem.Scaffolds import MurckoScaffold
from collections import defaultdict
from concurrent.futures import ProcessPoolExecutor
from tqdm import tqdm
import argparse
import os
import pyarrow as pa
import pyarrow.parquet as pq
def get_murcko(smiles):
try:
mol = Chem.MolFromSmiles(smiles)
scaffold = MurckoScaffold.GetScaffoldForMol(mol)
return Chem.MolToSmiles(scaffold)
except:
return None
def parallel_get_murcko(smiles_list, n_jobs=10):
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
scaffolds = list(tqdm(executor.map(get_murcko, smiles_list), total=len(smiles_list)))
return scaffolds
def scaffold_split(df, frac_train=0.8, frac_val=0.1, seed=42):
scaffold_to_indices = defaultdict(list)
for idx, scaffold in enumerate(df['scaffold']):
scaffold_to_indices[scaffold].append(idx)
scaffolds = list(scaffold_to_indices.keys())
rng = pd.Series(scaffolds).sample(frac=1, random_state=seed).tolist()
train_idx, val_idx, test_idx = [], [], []
n_total = len(df)
n_train, n_val = int(frac_train * n_total), int(frac_val * n_total)
for scaffold in rng:
indices = scaffold_to_indices[scaffold]
if len(train_idx) + len(indices) <= n_train:
train_idx.extend(indices)
elif len(val_idx) + len(indices) <= n_val:
val_idx.extend(indices)
else:
test_idx.extend(indices)
return df.iloc[train_idx], df.iloc[val_idx], df.iloc[test_idx]
def main(args):
os.makedirs(args.output_dir, exist_ok=True)
print(f"Loading data from {args.input_parquet} ...")
df = pd.read_parquet(args.input_parquet)
print(f"Loaded {len(df)} molecules")
print(f"Extracting Murcko scaffolds using {args.num_cores} cores ...")
df['scaffold'] = parallel_get_murcko(df[args.smiles_column], n_jobs=args.num_cores)
print("Performing scaffold split ...")
train_df, val_df, test_df = scaffold_split(df, frac_train=0.8, frac_val=0.1, seed=args.seed)
print(f"Train: {len(train_df)} | Val: {len(val_df)} | Test: {len(test_df)}")
print(f"Saving splits to {args.output_dir} ...")
train_df.to_parquet(os.path.join(args.output_dir, "train.parquet"), index=False)
val_df.to_parquet(os.path.join(args.output_dir, "val.parquet"), index=False)
test_df.to_parquet(os.path.join(args.output_dir, "test.parquet"), index=False)
print("Done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Scaffold split virtual screening data and save as parquet.")
parser.add_argument("--input_parquet", type=str, required=True, help="Path to input .parquet file")
parser.add_argument("--output_dir", type=str, required=True, help="Output directory to save train/val/test splits")
parser.add_argument("--smiles_column", type=str, default="smiles", help="Column name for SMILES")
parser.add_argument("--num_cores", type=int, default=10, help="Number of CPU cores to use")
parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility")
args = parser.parse_args()
main(args)