BULMA / scripts /compute_embeddings_compound.py
HarriziSaad's picture
Update scripts/compute_embeddings_compound.py
f7fae64 verified
import argparse
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from transformers import AutoModel, AutoTokenizer
from tqdm.auto import tqdm
DATA_PROC = Path("data/processed"); DATA_PROC.mkdir(parents=True, exist_ok=True)
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
CHEMBERTA = "seyonec/ChemBERTa-77M-MTR" # 768-dim
CONTROLS = [("ETHANOL", "CCO"), ("H2O2", "OO")]
def _gen_alcohols(n=150):
lib = []
for c in range(1, 21):
lib.append((f"ALK_{c:02d}", "C" * c + "O"))
for c in range(3, 13):
lib.append((f"IALK_{c}", "C(C)" + "C" * (c - 2) + "O"))
return lib[:n]
def _gen_aromatics(n=200):
subs = ["Cl", "Br", "F", "N(=O)=O", "C(=O)O", "C#N", "OCC", "CCN", "CC(=O)O"]
cores = ["c1ccccc1", "c1ccc(cc1)"]
lib, k = [], 0
for s in subs:
for c in cores:
lib.append((f"ARO_{k:03d}", c + s)); k += 1
if k >= n: return lib
return lib
def _gen_heterocycles(n=200):
rings = ["c1ncccc1", "c1occcn1", "n1ccccc1", "c1ccncc1", "c1ccsc1", "c1ncncn1"]
lib, k = [], 0
for r in rings:
lib.append((f"HET_{k:03d}", r)); k += 1
lib.append((f"HETOH_{k:03d}", r + "O")); k += 1
if k >= n: break
while len(lib) < n:
lib.append((f"HETPAD_{len(lib):03d}", "c1ncncn1"))
return lib[:n]
def _classify(smiles: str) -> str:
if smiles == "CCO": return "solvent"
if smiles == "OO": return "oxidant"
if "c1" in smiles: return "aromatic/heterocycle"
if smiles.endswith("O"): return "alcohol"
return "other"
def build_library() -> pd.DataFrame:
lib = CONTROLS + _gen_alcohols(180) + _gen_aromatics(220) + _gen_heterocycles(210)
df = pd.DataFrame(lib, columns=["compound", "smiles"]).drop_duplicates("compound")
df["class"] = df["smiles"].map(_classify)
df["is_control"] = df["compound"].isin(["ETHANOL", "H2O2"])
return df.reset_index(drop=True)
def load_chemberta(model_name: str = CHEMBERTA):
tok = AutoTokenizer.from_pretrained(model_name)
mdl = AutoModel.from_pretrained(model_name).eval().to(DEVICE)
return tok, mdl
@torch.no_grad()
def embed_smiles(smiles: str, tok, mdl) -> np.ndarray:
"""Return CLS-token embedding as float32 array."""
inputs = tok(smiles, return_tensors="pt", truncation=True, max_length=512,
padding=True)
inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
out = mdl(**inputs)
return out.last_hidden_state[:, 0, :].squeeze().cpu().numpy().astype(np.float32)
def canonicalize(smiles: str) -> str:
try:
from rdkit import Chem, RDLogger
RDLogger.DisableLog("rdApp.*")
mol = Chem.MolFromSmiles(smiles)
return Chem.MolToSmiles(mol) if mol else smiles
except Exception:
return smiles
def main(mock: bool = False):
print(f"Device: {DEVICE} | mock={mock}")
df_lib = build_library()
df_lib["smiles"] = df_lib["smiles"].map(canonicalize)
print(f"Library: {len(df_lib)} compounds")
if not mock:
tok, mdl = load_chemberta()
d_lig = 768
else:
d_lig = 768
rng = np.random.default_rng(42)
rows = []
for _, row in tqdm(df_lib.iterrows(), total=len(df_lib)):
if mock:
emb = rng.normal(0, 1, d_lig).astype(np.float32)
else:
try:
emb = embed_smiles(row["smiles"], tok, mdl)
except Exception as e:
print(f" ⚠ {row['compound']}: {e}; using zeros")
emb = np.zeros(d_lig, dtype=np.float32)
rows.append(emb)
emb_df = pd.DataFrame(rows, columns=[f"d{j}" for j in range(d_lig)])
ligand_df = pd.concat([df_lib, emb_df], axis=1)
ligand_df.to_csv(DATA_PROC / "ligand.csv", index=False)
# Manifest (no embeddings)
df_lib.to_csv(DATA_PROC / "ligand_manifest.csv", index=False)
print(f"\n Saved ligand.csv ({len(ligand_df)} compounds, d={d_lig})")
print(f" Saved ligand_manifest.csv")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--mock", action="store_true",
help="Use random embeddings (offline mode)")
args = parser.parse_args()
main(mock=args.mock)