|
|
|
|
|
""" |
|
|
scaffold_split.py |
|
|
|
|
|
Author: natelgrw |
|
|
Last Edited: 11/01/2025 |
|
|
|
|
|
Computes Bemis-Murcko scaffolds for the AMAX dataset using RDKit |
|
|
and splits scaffolds into 5 distinct folds with approximately balanced |
|
|
compound counts across folds. Computes UMAP, scaffold assignments, and |
|
|
lambda max distributions for visualizing scaffold splits. |
|
|
""" |
|
|
|
|
|
import pandas as pd |
|
|
import numpy as np |
|
|
from rdkit import Chem |
|
|
from rdkit.Chem.Scaffolds import MurckoScaffold |
|
|
from rdkit.Chem import AllChem |
|
|
import random |
|
|
import os |
|
|
from collections import defaultdict |
|
|
import matplotlib.pyplot as plt |
|
|
import seaborn as sns |
|
|
import umap |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
INPUT_CSV = "../amax_dataset.csv" |
|
|
OUTPUT_DIR = "../scaffold_split" |
|
|
N_FOLDS = 5 |
|
|
RANDOM_SEED = 42 |
|
|
|
|
|
random.seed(RANDOM_SEED) |
|
|
np.random.seed(RANDOM_SEED) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_murcko_scaffold(smiles): |
|
|
""" |
|
|
Compute Bemis–Murcko scaffold from SMILES string. |
|
|
|
|
|
Returns: |
|
|
str: Scaffold SMILES string, or "INVALID" if molecule is invalid, |
|
|
or "NO_SCAFFOLD" if scaffold cannot be computed |
|
|
""" |
|
|
try: |
|
|
mol = Chem.MolFromSmiles(smiles) |
|
|
if mol is None: |
|
|
return "INVALID" |
|
|
scaffold = MurckoScaffold.MurckoScaffoldSmiles(mol=mol) |
|
|
return scaffold if scaffold else "NO_SCAFFOLD" |
|
|
except Exception as e: |
|
|
print(f"Warning: Error processing SMILES '{smiles}': {e}") |
|
|
return "INVALID" |
|
|
|
|
|
|
|
|
def analyze_dataset(df): |
|
|
""" |
|
|
Print dataset statistics. |
|
|
""" |
|
|
print("=" * 60) |
|
|
print("Dataset Analysis") |
|
|
print("=" * 60) |
|
|
print(f"Total rows: {len(df):,}") |
|
|
print(f"Columns: {df.columns.tolist()}") |
|
|
print(f"\nUnique compounds: {df['compound'].nunique():,}") |
|
|
if 'solvent' in df.columns: |
|
|
print(f"Unique solvents: {df['solvent'].nunique():,}") |
|
|
if 'lambda_max' in df.columns: |
|
|
print(f"\nLambda_max statistics:") |
|
|
print(f" Min: {df['lambda_max'].min():.2f}") |
|
|
print(f" Max: {df['lambda_max'].max():.2f}") |
|
|
print(f" Mean: {df['lambda_max'].mean():.2f}") |
|
|
print(f" Median: {df['lambda_max'].median():.2f}") |
|
|
print() |
|
|
|
|
|
|
|
|
def assign_scaffolds_to_folds(scaffold_sizes, n_folds, total_rows): |
|
|
""" |
|
|
Assign scaffolds to folds using a greedy algorithm to balance compound counts. |
|
|
|
|
|
Args: |
|
|
scaffold_sizes: dict mapping scaffold SMILES to number of compounds |
|
|
n_folds: number of folds |
|
|
total_rows: total number of rows in dataset |
|
|
|
|
|
Returns: |
|
|
dict mapping fold_id (0 to n_folds-1) to list of scaffold SMILES |
|
|
""" |
|
|
fold_assignments = defaultdict(list) |
|
|
fold_counts = [0] * n_folds |
|
|
|
|
|
sorted_scaffolds = sorted(scaffold_sizes.items(), key=lambda x: x[1], reverse=True) |
|
|
|
|
|
|
|
|
for scaffold, size in sorted_scaffolds: |
|
|
min_fold = min(range(n_folds), key=lambda i: fold_counts[i]) |
|
|
fold_assignments[min_fold].append(scaffold) |
|
|
fold_counts[min_fold] += size |
|
|
|
|
|
return fold_assignments, fold_counts |
|
|
|
|
|
|
|
|
def create_visualizations(df, scaffold_sizes, fold_assignments, fold_counts, fold_dataframes, output_dir_path): |
|
|
""" |
|
|
Create visualizations for scaffold split analysis. |
|
|
|
|
|
Generates: |
|
|
1. Lambda_max distribution across folds (KDE plot) |
|
|
2. UMAP 2D visualization of scaffold assignments |
|
|
""" |
|
|
print("\nGenerating visualizations...") |
|
|
|
|
|
sns.set_style("whitegrid") |
|
|
plt.rcParams['figure.dpi'] = 100 |
|
|
plt.rcParams['savefig.dpi'] = 300 |
|
|
|
|
|
|
|
|
fig_dir = os.path.join(output_dir_path, "figures") |
|
|
os.makedirs(fig_dir, exist_ok=True) |
|
|
|
|
|
colors = sns.color_palette("husl", len(fold_counts)) |
|
|
|
|
|
|
|
|
if 'lambda_max' in df.columns: |
|
|
print("Creating lambda_max distribution plot...") |
|
|
fig, ax = plt.subplots(figsize=(12, 6)) |
|
|
|
|
|
for fold_id in range(len(fold_dataframes)): |
|
|
fold_df = fold_dataframes[fold_id] |
|
|
fold_label = f"Fold {fold_id + 1} (n={len(fold_df):,})" |
|
|
sns.kdeplot(data=fold_df, x='lambda_max', label=fold_label, |
|
|
ax=ax, linewidth=2.5) |
|
|
|
|
|
sns.kdeplot(data=df, x='lambda_max', label=f'Overall (n={len(df):,})', |
|
|
ax=ax, linewidth=2, linestyle='--', color='black', alpha=0.7) |
|
|
|
|
|
ax.set_xlabel('Lambda Max (nm)', fontsize=12, fontweight='bold') |
|
|
ax.set_ylabel('Density', fontsize=12, fontweight='bold') |
|
|
ax.set_title('Lambda Max Distribution Across Scaffold Splits', fontsize=14, fontweight='bold') |
|
|
ax.legend(loc='best', frameon=True, fancybox=True, shadow=True) |
|
|
ax.grid(alpha=0.3) |
|
|
|
|
|
plt.tight_layout() |
|
|
plt.savefig(os.path.join(fig_dir, 'scaffold_lmax.png'), bbox_inches='tight') |
|
|
print(f"Saved: figures/scaffold_lmax.png") |
|
|
plt.close() |
|
|
|
|
|
|
|
|
print("\nComputing UMAP embedding (this may take a few minutes)...") |
|
|
|
|
|
scaffold_to_fold = {} |
|
|
for fold_id in range(len(fold_assignments)): |
|
|
for scaffold in fold_assignments[fold_id]: |
|
|
scaffold_to_fold[scaffold] = fold_id |
|
|
|
|
|
df_with_fold = df.copy() |
|
|
df_with_fold['fold'] = df_with_fold['scaffold'].map(scaffold_to_fold) |
|
|
|
|
|
valid_mask = (~df_with_fold['scaffold'].isin(['INVALID', 'NO_SCAFFOLD'])) & (df_with_fold['fold'].notna()) |
|
|
compounds_for_umap = df_with_fold[valid_mask].copy() |
|
|
|
|
|
print(f"Computing fingerprints for {len(compounds_for_umap):,} data points...") |
|
|
|
|
|
unique_compounds = compounds_for_umap['compound'].unique() |
|
|
print(f" ({len(unique_compounds):,} unique compounds)") |
|
|
|
|
|
compound_to_fp = {} |
|
|
for smiles in unique_compounds: |
|
|
try: |
|
|
mol = Chem.MolFromSmiles(smiles) |
|
|
if mol is not None: |
|
|
fp = AllChem.GetMorganFingerprintAsBitVect(mol, radius=2, nBits=2048) |
|
|
compound_to_fp[smiles] = fp.ToBitString() |
|
|
except Exception: |
|
|
continue |
|
|
|
|
|
fps = [] |
|
|
valid_indices = [] |
|
|
for idx, row in compounds_for_umap.iterrows(): |
|
|
smiles = row['compound'] |
|
|
if smiles in compound_to_fp: |
|
|
fps.append(compound_to_fp[smiles]) |
|
|
valid_indices.append(idx) |
|
|
|
|
|
if len(fps) < 100: |
|
|
print("Warning: Too few valid compounds for UMAP. Skipping UMAP visualization.") |
|
|
else: |
|
|
fps_array = np.array([[int(bit) for bit in fp] for fp in fps]) |
|
|
|
|
|
print(f"Fitting UMAP (n={len(fps_array):,} data points, dim={fps_array.shape[1]})...") |
|
|
|
|
|
reducer = umap.UMAP(n_components=2, random_state=RANDOM_SEED, |
|
|
n_neighbors=15, min_dist=0.1, metric='jaccard', verbose=False) |
|
|
embedding = reducer.fit_transform(fps_array) |
|
|
|
|
|
valid_compounds_df = compounds_for_umap.loc[valid_indices].copy() |
|
|
valid_compounds_df['umap_x'] = embedding[:, 0] |
|
|
valid_compounds_df['umap_y'] = embedding[:, 1] |
|
|
|
|
|
fig, ax = plt.subplots(figsize=(14, 10)) |
|
|
|
|
|
for fold_id in range(len(fold_assignments)): |
|
|
fold_data = valid_compounds_df[valid_compounds_df['fold'] == fold_id] |
|
|
if len(fold_data) > 0: |
|
|
ax.scatter(fold_data['umap_x'], fold_data['umap_y'], |
|
|
label=f'Fold {fold_id + 1} (n={len(fold_data):,})', |
|
|
alpha=0.6, s=20, c=[colors[fold_id]]) |
|
|
|
|
|
ax.set_title('UMAP Projection of All Data Points (Colored by Scaffold Split)', |
|
|
fontsize=14, fontweight='bold') |
|
|
ax.legend(loc='best', frameon=True, fancybox=True, shadow=True, fontsize=10) |
|
|
ax.grid(alpha=0.3) |
|
|
|
|
|
plt.tight_layout() |
|
|
plt.savefig(os.path.join(fig_dir, 'scaffold_umap.png'), bbox_inches='tight') |
|
|
print(f"Saved: figures/scaffold_umap.png") |
|
|
plt.close() |
|
|
|
|
|
print(f"\nAll visualizations saved to: {os.path.join(output_dir_path, 'figures')}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
""" |
|
|
Main function to perform scaffold splitting pipeline. |
|
|
""" |
|
|
print("Loading dataset...") |
|
|
input_path = os.path.join(os.path.dirname(__file__), INPUT_CSV) |
|
|
if not os.path.exists(input_path): |
|
|
raise FileNotFoundError(f"Input file not found: {input_path}") |
|
|
|
|
|
df = pd.read_csv(input_path) |
|
|
|
|
|
if 'compound' not in df.columns: |
|
|
raise ValueError("Dataset must contain 'compound' column") |
|
|
|
|
|
analyze_dataset(df) |
|
|
|
|
|
print("Computing Bemis-Murcko scaffolds...") |
|
|
df['scaffold'] = df['compound'].apply(get_murcko_scaffold) |
|
|
|
|
|
invalid_count = (df['scaffold'] == "INVALID").sum() |
|
|
no_scaffold_count = (df['scaffold'] == "NO_SCAFFOLD").sum() |
|
|
|
|
|
if invalid_count > 0: |
|
|
print(f"Warning: {invalid_count:,} compounds have invalid SMILES") |
|
|
if no_scaffold_count > 0: |
|
|
print(f"Info: {no_scaffold_count:,} compounds have no scaffold (single atoms)") |
|
|
|
|
|
scaffold_groups = df.groupby('scaffold') |
|
|
scaffold_sizes = scaffold_groups.size().to_dict() |
|
|
|
|
|
print(f"\nScaffold Statistics:") |
|
|
print(f"Unique scaffolds: {len(scaffold_sizes):,}") |
|
|
print(f"Scaffolds with 1 compound: {(np.array(list(scaffold_sizes.values())) == 1).sum():,}") |
|
|
print(f"Scaffolds with >10 compounds: {(np.array(list(scaffold_sizes.values())) > 10).sum():,}") |
|
|
print(f"Scaffolds with >100 compounds: {(np.array(list(scaffold_sizes.values())) > 100).sum():,}") |
|
|
|
|
|
print(f"\nAssigning scaffolds to {N_FOLDS} folds...") |
|
|
fold_assignments, fold_counts = assign_scaffolds_to_folds( |
|
|
scaffold_sizes, N_FOLDS, len(df) |
|
|
) |
|
|
|
|
|
print("\nFold Statistics:") |
|
|
print("-" * 60) |
|
|
for fold_id in range(N_FOLDS): |
|
|
scaffolds = fold_assignments[fold_id] |
|
|
count = fold_counts[fold_id] |
|
|
percentage = 100 * count / len(df) |
|
|
print(f"Fold {fold_id + 1}: {count:,} compounds ({percentage:.2f}%) | " |
|
|
f"{len(scaffolds):,} scaffolds") |
|
|
print("-" * 60) |
|
|
print(f"Total: {sum(fold_counts):,} compounds") |
|
|
|
|
|
output_dir_path = os.path.join(os.path.dirname(__file__), OUTPUT_DIR) |
|
|
os.makedirs(output_dir_path, exist_ok=True) |
|
|
|
|
|
|
|
|
print(f"\nSaving folds to '{OUTPUT_DIR}' directory...") |
|
|
fold_dataframes = {} |
|
|
|
|
|
for fold_id in range(N_FOLDS): |
|
|
scaffolds_in_fold = set(fold_assignments[fold_id]) |
|
|
fold_mask = df['scaffold'].isin(scaffolds_in_fold) |
|
|
fold_df = df[fold_mask].copy() |
|
|
|
|
|
fold_df_output = fold_df.drop(columns=['scaffold']) |
|
|
|
|
|
output_file = os.path.join(output_dir_path, f"fold_{fold_id + 1}.csv") |
|
|
fold_df_output.to_csv(output_file, index=False) |
|
|
fold_dataframes[fold_id] = fold_df |
|
|
|
|
|
print(f"Saved fold_{fold_id + 1}.csv: {len(fold_df):,} rows") |
|
|
|
|
|
scaffold_assignments_data = [] |
|
|
for fold_id in range(N_FOLDS): |
|
|
for scaffold in fold_assignments[fold_id]: |
|
|
scaffold_assignments_data.append({ |
|
|
'scaffold': scaffold, |
|
|
'fold': fold_id + 1, |
|
|
'compound_count': scaffold_sizes[scaffold] |
|
|
}) |
|
|
|
|
|
scaffold_assignments_df = pd.DataFrame(scaffold_assignments_data) |
|
|
scaffold_assignments_df = scaffold_assignments_df.sort_values(['fold', 'compound_count'], |
|
|
ascending=[True, False]) |
|
|
|
|
|
print(f"\nSaved scaffold assignments to: scaffold_assignments.csv") |
|
|
print(f"Total scaffolds: {len(scaffold_assignments_df):,}") |
|
|
print(f"Columns: scaffold, fold, compound_count") |
|
|
|
|
|
|
|
|
create_visualizations(df, scaffold_sizes, fold_assignments, fold_counts, |
|
|
fold_dataframes, output_dir_path) |
|
|
|
|
|
scaffold_assignments_file = os.path.join(output_dir_path, "scaffold_assignments.csv") |
|
|
scaffold_assignments_df.to_csv(scaffold_assignments_file, index=False) |
|
|
|
|
|
print("\nVerifying scaffold separation...") |
|
|
all_fold_scaffolds = [set(fold_assignments[i]) for i in range(N_FOLDS)] |
|
|
for i in range(N_FOLDS): |
|
|
for j in range(i + 1, N_FOLDS): |
|
|
overlap = all_fold_scaffolds[i] & all_fold_scaffolds[j] |
|
|
if overlap: |
|
|
print(f"ERROR: Overlap between fold {i+1} and fold {j+1}: {len(overlap)} scaffolds") |
|
|
else: |
|
|
print(f"No overlap between fold {i+1} and fold {j+1}") |
|
|
|
|
|
all_assigned = set() |
|
|
for fold_id in range(N_FOLDS): |
|
|
all_assigned.update(fold_assignments[fold_id]) |
|
|
|
|
|
if len(all_assigned) == len(scaffold_sizes): |
|
|
print(f"All {len(scaffold_sizes):,} scaffolds assigned to folds") |
|
|
else: |
|
|
missing = set(scaffold_sizes.keys()) - all_assigned |
|
|
print(f"WARNING: {len(missing)} scaffolds not assigned to any fold") |
|
|
|
|
|
print("\n" + "=" * 60) |
|
|
print("5-fold scaffold split completed successfully!") |
|
|
print("=" * 60) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|
|
|
|