Datasets:
Upload 4 files
Browse files- src/02_sanitize_molecules.py +61 -0
- src/03_split_scaffold.py +82 -0
- src/04_fingerprints.py +47 -0
- src/05_xgboost_lowmem.py +85 -0
src/02_sanitize_molecules.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import rdkit
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import molvs
|
| 5 |
+
from rdkit import Chem
|
| 6 |
+
from concurrent.futures import ProcessPoolExecutor
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
import pyarrow as pa
|
| 9 |
+
import pyarrow.parquet as pq
|
| 10 |
+
import yaml
|
| 11 |
+
|
| 12 |
+
with open("parameters.yaml") as parameters_file:
|
| 13 |
+
parameters = yaml.safe_load(parameters_file)
|
| 14 |
+
|
| 15 |
+
data = pd.read_csv(
|
| 16 |
+
filepath_or_buffer = "data/ro4/a2a.ro4.tsv.gz", sep = "\t", compression='gzip', header=None, names=['smiles','id','value'])
|
| 17 |
+
|
| 18 |
+
# Convert the 'value' column to float, coercing errors to NaN
|
| 19 |
+
data['value'] = pd.to_numeric(data['value'], errors='coerce')
|
| 20 |
+
|
| 21 |
+
standardizer = molvs.Standardizer()
|
| 22 |
+
fragment_remover = molvs.fragment.FragmentRemover()
|
| 23 |
+
|
| 24 |
+
def sanitize_smiles(smiles_raw):
|
| 25 |
+
try:
|
| 26 |
+
mol = rdkit.Chem.MolFromSmiles(smiles_raw)
|
| 27 |
+
mol = standardizer.standardize(mol)
|
| 28 |
+
mol = fragment_remover.remove(mol)
|
| 29 |
+
smiles = rdkit.Chem.MolToSmiles(mol)
|
| 30 |
+
return smiles
|
| 31 |
+
except:
|
| 32 |
+
return None
|
| 33 |
+
|
| 34 |
+
#print(data.dtypes)
|
| 35 |
+
#print(data.info())
|
| 36 |
+
#types_in_value = set(type(x) for x in data['value'])
|
| 37 |
+
#print(types_in_value)
|
| 38 |
+
#num_str = data['value'].apply(lambda x: isinstance(x, str)).sum()
|
| 39 |
+
#print(f"Number of string entries in 'value': {num_str}")
|
| 40 |
+
|
| 41 |
+
def parallel_sanitize(smiles_list, n_jobs=10):
|
| 42 |
+
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
|
| 43 |
+
sanitized = list(tqdm(executor.map(sanitize_smiles, smiles_list), total=len(smiles_list)))
|
| 44 |
+
return sanitized
|
| 45 |
+
data['clean_smiles'] = parallel_sanitize(data['smiles'].tolist(), n_jobs=10)
|
| 46 |
+
|
| 47 |
+
# Drop failed rows (where sanitization failed)
|
| 48 |
+
data = data[data['clean_smiles'].notnull()].copy()
|
| 49 |
+
|
| 50 |
+
output_path = f"product/a2a_ro4_sanitized_{parameters['date_code']}.parquet"
|
| 51 |
+
table = pa.Table.from_pandas(data[['clean_smiles', 'id', 'value']])
|
| 52 |
+
pq.write_table(table, output_path, compression='snappy')
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
#data['smiles'] = data['smiles'].apply(sanitize_smiles)
|
| 56 |
+
|
| 57 |
+
#data.to_csv(
|
| 58 |
+
# path_or_buf = f"product/d2_ro4_sanitized_{parameters['date_code']}.tsv",
|
| 59 |
+
# sep = "\t",
|
| 60 |
+
# index = False)
|
| 61 |
+
|
src/03_split_scaffold.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
from rdkit import Chem
|
| 3 |
+
from rdkit.Chem.Scaffolds import MurckoScaffold
|
| 4 |
+
from collections import defaultdict
|
| 5 |
+
from concurrent.futures import ProcessPoolExecutor
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
import argparse
|
| 8 |
+
import os
|
| 9 |
+
import pyarrow as pa
|
| 10 |
+
import pyarrow.parquet as pq
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def get_murcko(smiles):
|
| 14 |
+
try:
|
| 15 |
+
mol = Chem.MolFromSmiles(smiles)
|
| 16 |
+
scaffold = MurckoScaffold.GetScaffoldForMol(mol)
|
| 17 |
+
return Chem.MolToSmiles(scaffold)
|
| 18 |
+
except:
|
| 19 |
+
return None
|
| 20 |
+
|
| 21 |
+
def parallel_get_murcko(smiles_list, n_jobs=10):
|
| 22 |
+
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
|
| 23 |
+
scaffolds = list(tqdm(executor.map(get_murcko, smiles_list), total=len(smiles_list)))
|
| 24 |
+
return scaffolds
|
| 25 |
+
|
| 26 |
+
def scaffold_split(df, frac_train=0.8, frac_val=0.1, seed=42):
|
| 27 |
+
scaffold_to_indices = defaultdict(list)
|
| 28 |
+
for idx, scaffold in enumerate(df['scaffold']):
|
| 29 |
+
scaffold_to_indices[scaffold].append(idx)
|
| 30 |
+
|
| 31 |
+
scaffolds = list(scaffold_to_indices.keys())
|
| 32 |
+
rng = pd.Series(scaffolds).sample(frac=1, random_state=seed).tolist()
|
| 33 |
+
|
| 34 |
+
train_idx, val_idx, test_idx = [], [], []
|
| 35 |
+
n_total = len(df)
|
| 36 |
+
n_train, n_val = int(frac_train * n_total), int(frac_val * n_total)
|
| 37 |
+
|
| 38 |
+
for scaffold in rng:
|
| 39 |
+
indices = scaffold_to_indices[scaffold]
|
| 40 |
+
if len(train_idx) + len(indices) <= n_train:
|
| 41 |
+
train_idx.extend(indices)
|
| 42 |
+
elif len(val_idx) + len(indices) <= n_val:
|
| 43 |
+
val_idx.extend(indices)
|
| 44 |
+
else:
|
| 45 |
+
test_idx.extend(indices)
|
| 46 |
+
|
| 47 |
+
return df.iloc[train_idx], df.iloc[val_idx], df.iloc[test_idx]
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def main(args):
|
| 51 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
| 52 |
+
|
| 53 |
+
print(f"Loading data from {args.input_parquet} ...")
|
| 54 |
+
df = pd.read_parquet(args.input_parquet)
|
| 55 |
+
print(f"Loaded {len(df)} molecules")
|
| 56 |
+
|
| 57 |
+
print(f"Extracting Murcko scaffolds using {args.num_cores} cores ...")
|
| 58 |
+
df['scaffold'] = parallel_get_murcko(df[args.smiles_column], n_jobs=args.num_cores)
|
| 59 |
+
|
| 60 |
+
print("Performing scaffold split ...")
|
| 61 |
+
train_df, val_df, test_df = scaffold_split(df, frac_train=0.8, frac_val=0.1, seed=args.seed)
|
| 62 |
+
|
| 63 |
+
print(f"Train: {len(train_df)} | Val: {len(val_df)} | Test: {len(test_df)}")
|
| 64 |
+
|
| 65 |
+
print(f"Saving splits to {args.output_dir} ...")
|
| 66 |
+
train_df.to_parquet(os.path.join(args.output_dir, "train.parquet"), index=False)
|
| 67 |
+
val_df.to_parquet(os.path.join(args.output_dir, "val.parquet"), index=False)
|
| 68 |
+
test_df.to_parquet(os.path.join(args.output_dir, "test.parquet"), index=False)
|
| 69 |
+
|
| 70 |
+
print("Done!")
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
if __name__ == "__main__":
|
| 74 |
+
parser = argparse.ArgumentParser(description="Scaffold split virtual screening data and save as parquet.")
|
| 75 |
+
parser.add_argument("--input_parquet", type=str, required=True, help="Path to input .parquet file")
|
| 76 |
+
parser.add_argument("--output_dir", type=str, required=True, help="Output directory to save train/val/test splits")
|
| 77 |
+
parser.add_argument("--smiles_column", type=str, default="smiles", help="Column name for SMILES")
|
| 78 |
+
parser.add_argument("--num_cores", type=int, default=10, help="Number of CPU cores to use")
|
| 79 |
+
parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility")
|
| 80 |
+
|
| 81 |
+
args = parser.parse_args()
|
| 82 |
+
main(args)
|
src/04_fingerprints.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import numpy as np
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import pyarrow as pa
|
| 5 |
+
import pyarrow.parquet as pq
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
|
| 8 |
+
from molfeat.calc import FPCalculator
|
| 9 |
+
from molfeat.trans import MoleculeTransformer
|
| 10 |
+
|
| 11 |
+
# Initialize transformer
|
| 12 |
+
calc = FPCalculator("ecfp")
|
| 13 |
+
mol_transf = MoleculeTransformer(calc, n_jobs=10)
|
| 14 |
+
|
| 15 |
+
def transform_and_save(df, output_path, split_name="", batch_size=100000):
|
| 16 |
+
start = time.time()
|
| 17 |
+
print(f"\nStarting transformation for {split_name}...")
|
| 18 |
+
|
| 19 |
+
smiles = df['clean_smiles'].values
|
| 20 |
+
values = df['value'].values
|
| 21 |
+
all_features = []
|
| 22 |
+
|
| 23 |
+
for i in tqdm(range(0, len(smiles), batch_size), desc=f"{split_name} batches"):
|
| 24 |
+
batch_smiles = smiles[i:i + batch_size]
|
| 25 |
+
batch_fps = mol_transf(batch_smiles)
|
| 26 |
+
batch_fps = np.stack(batch_fps)
|
| 27 |
+
all_features.append(batch_fps)
|
| 28 |
+
|
| 29 |
+
features = np.vstack(all_features)
|
| 30 |
+
df_fps = pd.DataFrame(features, columns=[f"feature_{i}" for i in range(features.shape[1])])
|
| 31 |
+
df_fps["value"] = values # Append the label
|
| 32 |
+
|
| 33 |
+
pq.write_table(pa.Table.from_pandas(df_fps), output_path)
|
| 34 |
+
|
| 35 |
+
end = time.time()
|
| 36 |
+
print(f"Finished {split_name} in {end - start:.2f} seconds.")
|
| 37 |
+
|
| 38 |
+
# Process each split
|
| 39 |
+
data_train = pq.read_table("product/d2_split/train.parquet").to_pandas()
|
| 40 |
+
transform_and_save(data_train, "intermediate_data/d2/data_train_features.parquet", "train")
|
| 41 |
+
|
| 42 |
+
data_val = pq.read_table("product/d2_split/val.parquet").to_pandas()
|
| 43 |
+
transform_and_save(data_val, "intermediate_data/d2/data_val_features.parquet", "validation")
|
| 44 |
+
|
| 45 |
+
data_test = pq.read_table("product/d2_split/test.parquet").to_pandas()
|
| 46 |
+
transform_and_save(data_test, "intermediate_data/d2/data_test_features.parquet", "test")
|
| 47 |
+
|
src/05_xgboost_lowmem.py
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pyarrow.parquet as pq
|
| 2 |
+
import numpy as np
|
| 3 |
+
import xgboost as xgb
|
| 4 |
+
import matplotlib.pyplot as plt
|
| 5 |
+
from sklearn.metrics import mean_squared_error, r2_score
|
| 6 |
+
|
| 7 |
+
def load_parquet_as_numpy(path):
|
| 8 |
+
# Read Parquet file into PyArrow table
|
| 9 |
+
table = pq.read_table(path)
|
| 10 |
+
|
| 11 |
+
# Convert to pandas DataFrame
|
| 12 |
+
df = table.to_pandas()
|
| 13 |
+
|
| 14 |
+
# Drop only the label column to get features
|
| 15 |
+
X = df.drop(columns=["value"]).values.astype(np.float32)
|
| 16 |
+
|
| 17 |
+
# Extract target column
|
| 18 |
+
y = df["value"].values.astype(np.float32)
|
| 19 |
+
|
| 20 |
+
return xgb.DMatrix(X, label=y), y
|
| 21 |
+
|
| 22 |
+
def load_parquet_as_dmatrix(path):
|
| 23 |
+
# Load selected columns only
|
| 24 |
+
cols = [f"feature_{i}" for i in range(2048)] + ["value"]
|
| 25 |
+
table = pq.read_table(path, columns=cols)
|
| 26 |
+
|
| 27 |
+
# Convert columns to NumPy arrays directly using Arrow
|
| 28 |
+
X = np.column_stack([table[col].to_numpy(zero_copy_only=False) for col in table.column_names if col != "value"]).astype(np.float32)
|
| 29 |
+
y = table["value"].to_numpy(zero_copy_only=False).astype(np.float32)
|
| 30 |
+
|
| 31 |
+
return xgb.DMatrix(X, label=y), y
|
| 32 |
+
|
| 33 |
+
def main():
|
| 34 |
+
print("Loading training data...")
|
| 35 |
+
dtrain, y_train = load_parquet_as_dmatrix("intermediate_data/d2/data_train_features.parquet")
|
| 36 |
+
|
| 37 |
+
print("Loading validation data...")
|
| 38 |
+
dval, y_val = load_parquet_as_dmatrix("intermediate_data/d2/data_val_features.parquet")
|
| 39 |
+
|
| 40 |
+
print("Loading test data...")
|
| 41 |
+
dtest, y_test = load_parquet_as_dmatrix("intermediate_data/d2/data_test_features.parquet")
|
| 42 |
+
|
| 43 |
+
print("Training model with histogram-based tree method...")
|
| 44 |
+
params = {
|
| 45 |
+
"objective": "reg:squarederror",
|
| 46 |
+
"tree_method": "hist",
|
| 47 |
+
"max_depth": 8,
|
| 48 |
+
"eta": 0.1,
|
| 49 |
+
"nthread": 10,
|
| 50 |
+
"verbosity": 1
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
evals_result = {}
|
| 54 |
+
model = xgb.train(
|
| 55 |
+
params,
|
| 56 |
+
dtrain,
|
| 57 |
+
num_boost_round=300,
|
| 58 |
+
evals=[(dtrain, "train"), (dval, "eval")],
|
| 59 |
+
early_stopping_rounds=20,
|
| 60 |
+
evals_result=evals_result,
|
| 61 |
+
verbose_eval=10
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
# Evaluate on test set
|
| 65 |
+
y_pred = model.predict(dtest)
|
| 66 |
+
rmse = mean_squared_error(y_test, y_pred, squared=False)
|
| 67 |
+
r2 = r2_score(y_test, y_pred)
|
| 68 |
+
|
| 69 |
+
print(f"Test RMSE: {rmse:.4f}")
|
| 70 |
+
print(f"Test R^2: {r2:.4f}")
|
| 71 |
+
# Plot learning curve
|
| 72 |
+
os.makedirs("results", exist_ok=True)
|
| 73 |
+
plt.figure()
|
| 74 |
+
plt.plot(evals_result["train"]["rmse"], label="Train RMSE")
|
| 75 |
+
plt.plot(evals_result["eval"]["rmse"], label="Validation RMSE")
|
| 76 |
+
plt.xlabel("Boosting Round")
|
| 77 |
+
plt.ylabel("RMSE")
|
| 78 |
+
plt.title("XGBoost RMSE over Epochs")
|
| 79 |
+
plt.legend()
|
| 80 |
+
plt.savefig("results/d2/xgboost_d2_learning_curve.png", dpi=300)
|
| 81 |
+
print("Saved learning curve to results/xgboost_d2_learning_curve.png")
|
| 82 |
+
|
| 83 |
+
if __name__ == "__main__":
|
| 84 |
+
main()
|
| 85 |
+
|