BULMA / scripts /snq2_glutathione_test.py
Ton Nom
Add full BULMA pipeline, data, code and results
9f9fb84
"""
SNQ2 Glutathione Prediction Script
===================================
Tests whether BULMA predicts SNQ2 binds glutathione and other endogenous molecules
This script extracts the trained BULMA model and makes predictions for:
1. Glutathione
2. NAD+/NADH
3. Known positive controls (4-NQO, caffeine)
4. Known negative controls (random compounds)
"""
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
import warnings
warnings.filterwarnings('ignore')
print("="*80)
print("SNQ2 ANTIOXIDANT DEPLETION HYPOTHESIS TEST")
print("="*80)
# ============================================================================
# STEP 1: Define the model architecture (from your BULMA notebook)
# ============================================================================
class MLPAtlas(nn.Module):
"""BULMA model architecture"""
def __init__(self, p_dim=1280, l_dim=384, hid=256, drop=0.30):
super().__init__()
self.p = nn.Sequential(
nn.Linear(p_dim, hid),
nn.ReLU(),
nn.Dropout(drop)
)
self.l = nn.Sequential(
nn.Linear(l_dim, hid),
nn.ReLU(),
nn.Dropout(drop)
)
self.out = nn.Sequential(
nn.Linear(2*hid, hid),
nn.ReLU(),
nn.Dropout(drop),
nn.Linear(hid, 1)
)
def forward(self, P, L):
return self.out(torch.cat([self.p(P), self.l(L)], dim=1)).squeeze(-1)
# ============================================================================
# STEP 2: Load embeddings and model
# ============================================================================
print("\n[1/5] Loading protein and ligand data...")
# You need to provide these paths from your BULMA notebook
# They should be in data/processed/ directory
try:
# Load protein embeddings (ESM-2)
P = pd.read_csv("data/processed/protein.csv")
# Load ligand embeddings (ChemBERTa)
L = pd.read_csv("data/processed/ligand.csv")
print(f" ✓ Loaded {len(P)} proteins")
print(f" ✓ Loaded {len(L)} ligands")
# Check if SNQ2 is in the data
if 'SNQ2' not in P['transporter'].values:
print(" ⚠ WARNING: SNQ2 not found in protein data!")
print(f" Available transporters: {P['transporter'].values[:10]}...")
else:
print(" ✓ SNQ2 found in protein data")
except FileNotFoundError as e:
print(f" ✗ ERROR: Could not load data files")
print(f" Make sure you have:")
print(f" - data/processed/protein.csv")
print(f" - data/processed/ligand.csv")
print(f"\n These should be generated from your BULMA notebook.")
exit(1)
# ============================================================================
# STEP 3: Define test molecules
# ============================================================================
print("\n[2/5] Defining test molecules...")
test_molecules = {
# HYPOTHESIS MOLECULES (endogenous antioxidants)
'Glutathione': {
'smiles': 'C(CC(=O)NC(CS)C(=O)NCC(=O)O)C(C(=O)O)N',
'category': 'Endogenous Antioxidant',
'expected': 'HIGH affinity if hypothesis correct'
},
'NAD+': {
'smiles': 'C1=CC(=C[N+](=C1)C2C(C(C(O2)COP(=O)([O-])OP(=O)([O-])OCC3C(C(C(O3)N4C=NC5=C(N=CN=C54)N)O)O)O)O)C(=O)N',
'category': 'Endogenous Redox Cofactor',
'expected': 'HIGH affinity if hypothesis correct'
},
'NADH': {
'smiles': 'C1=CN(C=CC1C(=O)N)C2C(C(C(O2)COP(=O)(O)OP(=O)(O)OCC3C(C(C(O3)N4C=NC5=C4N=CN=C5N)O)O)O)O',
'category': 'Endogenous Redox Cofactor',
'expected': 'HIGH affinity if hypothesis correct'
},
'Ascorbate': {
'smiles': 'C(C(C1C(=C(C(=O)O1)O)O)O)O',
'category': 'Endogenous Antioxidant',
'expected': 'HIGH affinity if hypothesis correct'
},
# POSITIVE CONTROLS (known SNQ2 substrates)
'4-NQO': {
'smiles': 'C1=CC2=NC=CC(=C2C=C1[N+](=O)[O-])[O-]',
'category': 'Known Substrate (Xenobiotic)',
'expected': 'HIGH affinity (positive control)'
},
'Caffeine': {
'smiles': 'CN1C=NC2=C1C(=O)N(C(=O)N2C)C',
'category': 'Known Substrate (Xenobiotic)',
'expected': 'HIGH affinity (positive control)'
},
# NEGATIVE CONTROLS (random small molecules)
'Glucose': {
'smiles': 'C(C1C(C(C(C(O1)O)O)O)O)O',
'category': 'Non-substrate Control',
'expected': 'LOW affinity (negative control)'
},
'Acetate': {
'smiles': 'CC(=O)[O-]',
'category': 'Non-substrate Control',
'expected': 'LOW affinity (negative control)'
}
}
print(f" ✓ Defined {len(test_molecules)} test molecules")
for name, info in test_molecules.items():
print(f" - {name}: {info['category']}")
# ============================================================================
# STEP 4: Generate molecular embeddings
# ============================================================================
print("\n[3/5] Generating molecular embeddings...")
print(" NOTE: This requires ChemBERTa model. Using Morgan fingerprints as fallback.")
def get_morgan_fingerprint(smiles, radius=2, nBits=2048):
"""Fallback embedding if ChemBERTa not available"""
try:
mol = Chem.MolFromSmiles(smiles)
if mol is None:
return None
fp = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=nBits)
return np.array(fp)
except:
return None
# Generate embeddings for test molecules
test_embeddings = {}
failed = []
for name, info in test_molecules.items():
emb = get_morgan_fingerprint(info['smiles'])
if emb is not None:
test_embeddings[name] = emb
print(f" ✓ {name}")
else:
failed.append(name)
print(f" ✗ {name} - failed to generate embedding")
if failed:
print(f"\n ⚠ WARNING: {len(failed)} molecules failed embedding generation")
# ============================================================================
# STEP 5: Load trained BULMA model
# ============================================================================
print("\n[4/5] Loading trained BULMA model...")
try:
# You need to provide the path to your trained model weights
# This should be from your BULMA notebook (usually in results/ or models/)
model_path = "results/atlas_mlp_best.pth" # Adjust this path
# Initialize model
p_dim = P.shape[1] - 1 # subtract transporter name column
l_dim = test_embeddings[list(test_embeddings.keys())[0]].shape[0]
print(f" Model dimensions: protein={p_dim}, ligand={l_dim}")
model = MLPAtlas(p_dim=p_dim, l_dim=l_dim, hid=256, drop=0.30)
# Try to load weights
try:
state_dict = torch.load(model_path, map_location='cpu')
model.load_state_dict(state_dict)
model.eval()
print(f" ✓ Model loaded from {model_path}")
except FileNotFoundError:
print(f" ⚠ WARNING: Model file not found at {model_path}")
print(f" Will demonstrate prediction workflow without trained weights")
print(f" Results will be random - you need to provide trained model!")
except Exception as e:
print(f" ✗ ERROR loading model: {e}")
print(f"\n You need to provide:")
print(f" 1. Path to trained BULMA model (.pth file)")
print(f" 2. Ensure protein/ligand dimensions match")
exit(1)
# ============================================================================
# STEP 6: Make predictions
# ============================================================================
print("\n[5/5] Making predictions...")
# Get SNQ2 embedding
if 'SNQ2' in P['transporter'].values:
snq2_idx = P[P['transporter'] == 'SNQ2'].index[0]
snq2_emb = P.drop(columns=['transporter']).iloc[snq2_idx].values.astype('float32')
snq2_tensor = torch.from_numpy(snq2_emb).unsqueeze(0)
else:
print(" ✗ ERROR: SNQ2 not found in protein data")
exit(1)
# Make predictions for each test molecule
results = []
with torch.no_grad():
for name, emb in test_embeddings.items():
# Prepare ligand tensor
lig_tensor = torch.from_numpy(emb.astype('float32')).unsqueeze(0)
# Pad or truncate to match expected dimension
if lig_tensor.shape[1] != l_dim:
if lig_tensor.shape[1] < l_dim:
# Pad with zeros
padding = torch.zeros(1, l_dim - lig_tensor.shape[1])
lig_tensor = torch.cat([lig_tensor, padding], dim=1)
else:
# Truncate
lig_tensor = lig_tensor[:, :l_dim]
# Predict
logit = model(snq2_tensor, lig_tensor)
prob = torch.sigmoid(logit).item()
results.append({
'Molecule': name,
'Category': test_molecules[name]['category'],
'Predicted_Affinity': prob,
'Expected': test_molecules[name]['expected']
})
# ============================================================================
# STEP 7: Analyze results
# ============================================================================
print("\n" + "="*80)
print("RESULTS: SNQ2 BINDING PREDICTIONS")
print("="*80)
results_df = pd.DataFrame(results)
results_df = results_df.sort_values('Predicted_Affinity', ascending=False)
print("\n{:<20} {:<30} {:<10} {}".format("Molecule", "Category", "Affinity", "Expected"))
print("-"*80)
for _, row in results_df.iterrows():
print("{:<20} {:<30} {:<10.3f} {}".format(
row['Molecule'],
row['Category'],
row['Predicted_Affinity'],
row['Expected']
))
# ============================================================================
# STEP 8: Hypothesis testing
# ============================================================================
print("\n" + "="*80)
print("HYPOTHESIS TEST: Does SNQ2 pump endogenous antioxidants?")
print("="*80)
# Get average affinity for each category
endogenous = results_df[results_df['Category'].str.contains('Endogenous')]
known_substrates = results_df[results_df['Category'].str.contains('Known Substrate')]
controls = results_df[results_df['Category'].str.contains('Control')]
print(f"\n1. Endogenous Antioxidants (n={len(endogenous)}):")
print(f" Mean affinity: {endogenous['Predicted_Affinity'].mean():.3f}")
print(f" Range: {endogenous['Predicted_Affinity'].min():.3f} - {endogenous['Predicted_Affinity'].max():.3f}")
if len(known_substrates) > 0:
print(f"\n2. Known Substrates (positive control, n={len(known_substrates)}):")
print(f" Mean affinity: {known_substrates['Predicted_Affinity'].mean():.3f}")
print(f" Range: {known_substrates['Predicted_Affinity'].min():.3f} - {known_substrates['Predicted_Affinity'].max():.3f}")
if len(controls) > 0:
print(f"\n3. Non-substrate Controls (n={len(controls)}):")
print(f" Mean affinity: {controls['Predicted_Affinity'].mean():.3f}")
print(f" Range: {controls['Predicted_Affinity'].min():.3f} - {controls['Predicted_Affinity'].max():.3f}")
# Decision logic
print("\n" + "="*80)
print("INTERPRETATION:")
print("="*80)
mean_endogenous = endogenous['Predicted_Affinity'].mean()
mean_known = known_substrates['Predicted_Affinity'].mean() if len(known_substrates) > 0 else 0.5
if mean_endogenous > 0.7:
print("\n✓ HYPOTHESIS SUPPORTED (Strong Evidence)")
print(f" SNQ2 shows HIGH predicted affinity for endogenous antioxidants")
print(f" Mean affinity: {mean_endogenous:.3f} > 0.7 threshold")
print(f"\n CONCLUSION: Antioxidant depletion is plausible mechanism")
print(f" SNQ2's harmful effect under oxidative stress likely due to:")
print(f" 1. Pumping out glutathione/NAD+ (depletes antioxidant capacity)")
print(f" 2. ATP consumption (energetic cost)")
elif mean_endogenous > mean_known * 0.7:
print("\n≈ HYPOTHESIS PARTIALLY SUPPORTED (Moderate Evidence)")
print(f" SNQ2 shows MODERATE predicted affinity for endogenous antioxidants")
print(f" Mean affinity: {mean_endogenous:.3f}")
print(f" Comparable to known substrates: {mean_known:.3f}")
print(f"\n CONCLUSION: Mixed mechanism likely")
print(f" SNQ2's harmful effect probably involves both:")
print(f" 1. Some antioxidant depletion (partial effect)")
print(f" 2. ATP cost as primary driver")
else:
print("\n✗ HYPOTHESIS NOT SUPPORTED")
print(f" SNQ2 shows LOW predicted affinity for endogenous antioxidants")
print(f" Mean affinity: {mean_endogenous:.3f}")
print(f" Much lower than known substrates: {mean_known:.3f}")
print(f"\n CONCLUSION: Antioxidant depletion unlikely")
print(f" SNQ2's harmful effect under oxidative stress likely due to:")
print(f" 1. Pure energetic cost (ATP depletion)")
print(f" 2. Promiscuous pumping of non-specific metabolites")
print(f" 3. No specific antioxidant targeting")
# Save results
results_df.to_csv('snq2_glutathione_predictions.csv', index=False)
print(f"\n✓ Results saved to: snq2_glutathione_predictions.csv")
print("\n" + "="*80)
print("NEXT STEPS:")
print("="*80)
print("1. If hypothesis supported → Focus paper on substrate specificity")
print("2. If hypothesis rejected → Focus paper on energetic cost + promiscuity")
print("3. Either way → You have testable computational predictions")
print("="*80)