ChAFF / CHAFF_processing_scripts /st5_detergent_smiles_curation.py
haneulpark's picture
Upload 5 files
e623e49 verified
raw
history blame
3.59 kB
import os
import pandas as pd
import rdkit
import molvs
import tqdm
import glob
from rdkit import Chem
standardizer = molvs.Standardizer()
fragment_remover = molvs.fragment.FragmentRemover()
# AID mapping {without : with detergent}
filter_map = {
"585": "584",
"1476": "1478",
"485341": "485294"
}
# AID -> CID set maaping
cid_sets = {}
# Gather CIDs
for target_aid in filter_map.values():
file_name = f"pubchem_aid_{target_aid}_active.csv"
file_full_path = os.path.join("./active", file_name)
if os.path.exists(file_full_path):
df = pd.read_csv(file_full_path)
cid_sets[target_aid] = set(df["CID"].tolist())
else:
print(f"Warning: file for AID {target_aid} not found!")
# Fliter
for target_aid, filter_aid in filter_map.items():
file_name = f"pubchem_aid_{target_aid}_active.csv"
file_full_path = os.path.join("./active", file_name)
if not os.path.exists(file_full_path):
print(f"Skipping {target_aid}, file not found.")
continue
df = pd.read_csv(file_full_path)
before = len(df)
df = df[~df["CID"].isin(cid_sets[filter_aid])]
after = len(df)
output_path = f"./active/pubchem_aid_{target_aid}_active_filtered.csv"
df.to_csv(output_path, index=False)
print(f"{target_aid}: removed {before - after} compounds from {before}, saved to {output_path}")
filtered_files = [
"pubchem_aid_585_active_filtered.csv",
"pubchem_aid_1476_active_filtered.csv",
"pubchem_aid_485341_active_filtered.csv"
]
for file in filtered_files:
file_path = os.path.join("./active", file)
file_name = os.path.splitext(file)[0]
# Read file
active_df = pd.read_csv(file_path)
smiles_series = active_df["CanonicalSMILES"]
active_df["curated_SMILES"] = None
cid = active_df["CID"]
valid_indices = []
invalid_smiles = []
warning_smiles = []
for idx, smiles in smiles_series.items():
mol = Chem.MolFromSmiles(smiles)
compound_cid = cid.iloc[idx]
if mol is None:
invalid_smiles.append({
'CID': compound_cid,
'SMILES': smiles,
'Reason': "MolFromSmiles returned None"
})
continue
results = molvs.validate_smiles(smiles)
if len(results) > 0:
warning_smiles.append({
'CID': compound_cid,
'SMILES': smiles,
'Reason': results
})
continue
mol = standardizer.standardize(mol)
mol = fragment_remover.remove(mol)
standardized = Chem.MolToSmiles(mol)
active_df.at[idx, "curated_SMILES"] = standardized
valid_indices.append(idx)
# Save outputs
valid_df = active_df.loc[valid_indices].reset_index(drop=True)
valid_df = valid_df.drop(columns=["CanonicalSMILES"])
valid_df = valid_df.rename(columns={"curated_SMILES": "SMILES"})
aid = file_name.replace("pubchem_aid_", "").replace("_active_filtered", "")
valid_df.insert(0, "AID", aid)
invalid_df = pd.DataFrame(invalid_smiles)
warning_df = pd.DataFrame(warning_smiles)
invalid_df.insert(0, "AID", aid)
warning_df.insert(0, "AID", aid)
valid_df.to_csv(f'./curated/{file_name}_curated.csv', index=False)
invalid_df.to_csv(f'./curated/{file_name}_invalid_smiles.csv', index=False)
warning_df.to_csv(f'./curated/{file_name}_molvs_validation.csv', index=False)
print(f"Finished curation for {file_name}")
print(f" Valid: {len(valid_df)}, Invalid: {len(invalid_df)}, Warnings: {len(warning_df)}\n")