Upload 8 files
Browse files- src/.ipynb_checkpoints/01_install_packages-checkpoint.sh +32 -0
- src/01_install_packages.sh +32 -0
- src/02_load_data.py +26 -0
- src/03_datamol.py +27 -0
- src/04_join_data.R +42 -0
- src/05_predict_outcomes_CXCR4.py +66 -0
- src/05_predict_outcomes_MK14.py +75 -0
- src/05_predict_outcomes_WEE1.py +66 -0
src/.ipynb_checkpoints/01_install_packages-checkpoint.sh
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Install LiabilityPredictor
|
| 3 |
+
|
| 4 |
+
# clone LiabilityPredictor project
|
| 5 |
+
# it says you can use assay_liability_calculator.py to make
|
| 6 |
+
# predictions using the model, but as of 2/2025, this script is broken
|
| 7 |
+
# so instead use src/predict_liability.py instead
|
| 8 |
+
|
| 9 |
+
cd src
|
| 10 |
+
# git clone https://github.com/jimmyjbling/LiabilityPredictor.git
|
| 11 |
+
# cd LiabilityPredictor
|
| 12 |
+
|
| 13 |
+
# pip install -r requirements.txt
|
| 14 |
+
# cd ..
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
## h2o is an AutoML platform
|
| 18 |
+
pip install -f http://h2o-release.s3.amazonaws.com/h2o/latest_stable_Py.html h2o
|
| 19 |
+
|
| 20 |
+
# on OSX I had to also install the java JDK
|
| 21 |
+
# brew install java
|
| 22 |
+
# I then got an error that I was able to resolve using this
|
| 23 |
+
# https://stackoverflow.com/a/65601197
|
| 24 |
+
# sudo ln -sfn /opt/homebrew/opt/openjdk/libexec/openjdk.jdk \
|
| 25 |
+
# /Library/Java/JavaVirtualMachines/openjdk.jdk
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
## datamol / molfeat
|
| 29 |
+
pip install molfeat
|
| 30 |
+
pip install datamol
|
| 31 |
+
|
| 32 |
+
|
src/01_install_packages.sh
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Install LiabilityPredictor
|
| 3 |
+
|
| 4 |
+
# clone LiabilityPredictor project
|
| 5 |
+
# it says you can use assay_liability_calculator.py to make
|
| 6 |
+
# predictions using the model, but as of 2/2025, this script is broken
|
| 7 |
+
# so instead use src/predict_liability.py instead
|
| 8 |
+
|
| 9 |
+
cd src
|
| 10 |
+
# git clone https://github.com/jimmyjbling/LiabilityPredictor.git
|
| 11 |
+
# cd LiabilityPredictor
|
| 12 |
+
|
| 13 |
+
# pip install -r requirements.txt
|
| 14 |
+
# cd ..
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
## h2o is an AutoML platform
|
| 18 |
+
pip install -f http://h2o-release.s3.amazonaws.com/h2o/latest_stable_Py.html h2o
|
| 19 |
+
|
| 20 |
+
# on OSX I had to also install the java JDK
|
| 21 |
+
# brew install java
|
| 22 |
+
# I then got an error that I was able to resolve using this
|
| 23 |
+
# https://stackoverflow.com/a/65601197
|
| 24 |
+
# sudo ln -sfn /opt/homebrew/opt/openjdk/libexec/openjdk.jdk \
|
| 25 |
+
# /Library/Java/JavaVirtualMachines/openjdk.jdk
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
## datamol / molfeat
|
| 29 |
+
pip install molfeat
|
| 30 |
+
pip install datamol
|
| 31 |
+
|
| 32 |
+
|
src/02_load_data.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datasets
|
| 2 |
+
import yaml
|
| 3 |
+
|
| 4 |
+
import pyarrow as pa
|
| 5 |
+
import pyarrow.parquet as pq
|
| 6 |
+
from sklearn.model_selection import train_test_split
|
| 7 |
+
import pandas as pd
|
| 8 |
+
|
| 9 |
+
with open("parameters.yaml") as parameters_file:
|
| 10 |
+
parameters = yaml.safe_load(parameters_file)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
df = pd.read_csv("hf://datasets/aanyam/ESSENCEDock_595Project/ESSENCEDock_dataset_final.csv")
|
| 14 |
+
|
| 15 |
+
train_df, test_df = train_test_split(df, test_size=0.2, stratify=df['Target Name'], random_state=42)
|
| 16 |
+
|
| 17 |
+
data_train = train_df[train_df["Target Name"].isin(parameters['targets'])]
|
| 18 |
+
pq.write_table(
|
| 19 |
+
pa.Table.from_pandas(data_train),
|
| 20 |
+
"intermediate_data/data_train.parquet")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
data_test = test_df[test_df["Target Name"].isin(parameters['targets'])]
|
| 24 |
+
pq.write_table(
|
| 25 |
+
pa.Table.from_pandas(data_test),
|
| 26 |
+
"intermediate_data/data_test.parquet")
|
src/03_datamol.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
import pyarrow as pa
|
| 5 |
+
import pyarrow.parquet as pq
|
| 6 |
+
|
| 7 |
+
from molfeat.calc import FPCalculator
|
| 8 |
+
from molfeat.trans import MoleculeTransformer
|
| 9 |
+
|
| 10 |
+
calc = FPCalculator("ecfp")
|
| 11 |
+
mol_transf = MoleculeTransformer(calc, n_jobs=5)
|
| 12 |
+
|
| 13 |
+
data_train_moldata = pq.read_table("intermediate_data/data_train.parquet").to_pandas()
|
| 14 |
+
data_train_features = mol_transf(data_train_moldata["SMILES"].values)
|
| 15 |
+
data_train_features = np.stack(data_train_features)
|
| 16 |
+
data_train_features = pd.DataFrame(data_train_features)
|
| 17 |
+
pq.write_table(
|
| 18 |
+
pa.Table.from_pandas(data_train_features),
|
| 19 |
+
"intermediate_data/data_train_features.parquet")
|
| 20 |
+
|
| 21 |
+
data_test_moldata = pq.read_table("intermediate_data/data_test.parquet").to_pandas()
|
| 22 |
+
data_test_features = mol_transf(data_test_moldata['SMILES'].values)
|
| 23 |
+
data_test_features = np.stack(data_test_features)
|
| 24 |
+
data_test_features = pd.DataFrame(data_test_features)
|
| 25 |
+
pq.write_table(
|
| 26 |
+
pa.Table.from_pandas(data_test_features),
|
| 27 |
+
"intermediate_data/data_test_features.parquet")
|
src/04_join_data.R
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
### Train
|
| 2 |
+
data_train_moldata <- arrow::read_parquet(
|
| 3 |
+
"intermediate_data/data_train.parquet")
|
| 4 |
+
data_train_features <- arrow::read_parquet(
|
| 5 |
+
"intermediate_data/data_train_features.parquet")
|
| 6 |
+
|
| 7 |
+
names(data_train_features) <- paste0(
|
| 8 |
+
"feature_", names(data_train_features))
|
| 9 |
+
|
| 10 |
+
data_train <- data_train_moldata |>
|
| 11 |
+
dplyr::mutate(
|
| 12 |
+
dplyr::across(
|
| 13 |
+
dplyr::everything(),
|
| 14 |
+
~tidyr::replace_na(.x, 0))) |>
|
| 15 |
+
dplyr::bind_cols(
|
| 16 |
+
data_train_features)
|
| 17 |
+
|
| 18 |
+
data_train |>
|
| 19 |
+
arrow::write_parquet(
|
| 20 |
+
"intermediate_data/data_train_joined.parquet")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
### Test
|
| 24 |
+
data_test_moldata <- arrow::read_parquet(
|
| 25 |
+
"intermediate_data/data_test.parquet")
|
| 26 |
+
data_test_features <- arrow::read_parquet(
|
| 27 |
+
"intermediate_data/data_test_features.parquet")
|
| 28 |
+
|
| 29 |
+
names(data_test_features) <- paste0(
|
| 30 |
+
"feature_", names(data_test_features))
|
| 31 |
+
|
| 32 |
+
data_test <- data_test_moldata |>
|
| 33 |
+
dplyr::mutate(
|
| 34 |
+
dplyr::across(
|
| 35 |
+
dplyr::everything(),
|
| 36 |
+
~tidyr::replace_na(.x, 0))) |>
|
| 37 |
+
dplyr::bind_cols(
|
| 38 |
+
data_test_features)
|
| 39 |
+
|
| 40 |
+
data_test |>
|
| 41 |
+
arrow::write_parquet(
|
| 42 |
+
"intermediate_data/data_test_joined.parquet")
|
src/05_predict_outcomes_CXCR4.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
import pyarrow as pa
|
| 4 |
+
import pyarrow.parquet as pq
|
| 5 |
+
import h2o
|
| 6 |
+
from h2o.automl import H2OAutoML
|
| 7 |
+
from h2o.frame import H2OFrame
|
| 8 |
+
import pickle
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
# Define the target
|
| 12 |
+
target = "CXCR4" # Replace with WEE1 or CXCR4 as needed
|
| 13 |
+
|
| 14 |
+
# Start the H2O cluster
|
| 15 |
+
h2o.init()
|
| 16 |
+
|
| 17 |
+
# Load datasets
|
| 18 |
+
data_train = pq.read_table("intermediate_data/data_train_joined.parquet").to_pandas()
|
| 19 |
+
data_test = pq.read_table("intermediate_data/data_test_joined.parquet").to_pandas()
|
| 20 |
+
|
| 21 |
+
# Filter data for the selected target
|
| 22 |
+
data_train = data_train[data_train["Target Name"] == target]
|
| 23 |
+
data_test = data_test[data_test["Target Name"] == target]
|
| 24 |
+
|
| 25 |
+
# Define target column
|
| 26 |
+
target_column = " RMSD_Energy"
|
| 27 |
+
|
| 28 |
+
# Identify feature columns: all columns after "LF_score" (inclusive)
|
| 29 |
+
start_idx = list(data_train.columns).index("LF_score")
|
| 30 |
+
feature_columns = data_train.columns[start_idx:].tolist()
|
| 31 |
+
feature_columns = [col for col in feature_columns if col != " RMSD_Energy"]
|
| 32 |
+
|
| 33 |
+
# Convert to H2OFrame
|
| 34 |
+
train_h2o = H2OFrame(data_train)
|
| 35 |
+
test_h2o = H2OFrame(data_test)
|
| 36 |
+
|
| 37 |
+
# train_h2o[target_column] = train_h2o[target_column].asnumeric()
|
| 38 |
+
# test_h2o[target_column] = test_h2o[target_column].asnumeric()
|
| 39 |
+
|
| 40 |
+
# train_h2o[target_column] = train_h2o[target_column].asfactor()
|
| 41 |
+
# test_h2o[target_column] = test_h2o[target_column].asfactor()
|
| 42 |
+
|
| 43 |
+
# Train AutoML model
|
| 44 |
+
aml = H2OAutoML(max_models=1, seed=42)
|
| 45 |
+
aml.train(x=feature_columns, y=target_column, training_frame=train_h2o)
|
| 46 |
+
|
| 47 |
+
# Save the trained model
|
| 48 |
+
top_model = aml.leader
|
| 49 |
+
model_path = h2o.save_model(top_model, path = f"intermediate_data/top_model{target}", force=True)
|
| 50 |
+
|
| 51 |
+
# Generate predictions
|
| 52 |
+
for dataset_name, dataset_h2o, dataset_df in [
|
| 53 |
+
("train", train_h2o, data_train),
|
| 54 |
+
("test", test_h2o, data_test),
|
| 55 |
+
]:
|
| 56 |
+
predictions = aml.leader.predict(dataset_h2o).as_data_frame()
|
| 57 |
+
dataset_df["predictions"] = predictions["predict"]
|
| 58 |
+
|
| 59 |
+
# Save predictions
|
| 60 |
+
output_path = f"intermediate_data/data_{dataset_name}_{target}_pred.parquet"
|
| 61 |
+
pq.write_table(pa.Table.from_pandas(dataset_df), output_path)
|
| 62 |
+
|
| 63 |
+
# Shutdown H2O cluster
|
| 64 |
+
h2o.shutdown(prompt=False)
|
| 65 |
+
|
| 66 |
+
print(f"Completed training and predictions for {target}")
|
src/05_predict_outcomes_MK14.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
import pyarrow as pa
|
| 4 |
+
import pyarrow.parquet as pq
|
| 5 |
+
import h2o
|
| 6 |
+
from h2o.automl import H2OAutoML
|
| 7 |
+
from h2o.frame import H2OFrame
|
| 8 |
+
import pickle
|
| 9 |
+
import os
|
| 10 |
+
import matplotlib.pyplot as plt
|
| 11 |
+
|
| 12 |
+
# Define the target
|
| 13 |
+
target = "MK14" # Replace with WEE1 or CXCR4 as needed
|
| 14 |
+
|
| 15 |
+
# Start the H2O cluster
|
| 16 |
+
h2o.init()
|
| 17 |
+
|
| 18 |
+
# Load datasets
|
| 19 |
+
data_train = pq.read_table("intermediate_data/data_train_joined.parquet").to_pandas()
|
| 20 |
+
data_test = pq.read_table("intermediate_data/data_test_joined.parquet").to_pandas()
|
| 21 |
+
|
| 22 |
+
# Filter data for the selected target
|
| 23 |
+
data_train = data_train[data_train["Target Name"] == target]
|
| 24 |
+
data_test = data_test[data_test["Target Name"] == target]
|
| 25 |
+
|
| 26 |
+
# Define target column
|
| 27 |
+
target_column = " RMSD_Energy"
|
| 28 |
+
|
| 29 |
+
# Identify feature columns: all columns after "LF_score" (inclusive)
|
| 30 |
+
start_idx = list(data_train.columns).index("LF_score")
|
| 31 |
+
feature_columns = data_train.columns[start_idx:].tolist()
|
| 32 |
+
feature_columns = [col for col in feature_columns if col != " RMSD_Energy"]
|
| 33 |
+
|
| 34 |
+
# Convert to H2OFrame
|
| 35 |
+
train_h2o = H2OFrame(data_train)
|
| 36 |
+
test_h2o = H2OFrame(data_test)
|
| 37 |
+
|
| 38 |
+
# train_h2o[target_column] = train_h2o[target_column].asnumeric()
|
| 39 |
+
# test_h2o[target_column] = test_h2o[target_column].asnumeric()
|
| 40 |
+
|
| 41 |
+
# train_h2o[target_column] = train_h2o[target_column].asfactor()
|
| 42 |
+
# test_h2o[target_column] = test_h2o[target_column].asfactor()
|
| 43 |
+
|
| 44 |
+
# Train AutoML model
|
| 45 |
+
aml = H2OAutoML(max_models=1, seed=42)
|
| 46 |
+
aml.train(x=feature_columns, y=target_column, training_frame=train_h2o)
|
| 47 |
+
|
| 48 |
+
# Save the trained model
|
| 49 |
+
top_model = aml.leader
|
| 50 |
+
model_path = h2o.save_model(top_model, path = f"intermediate_data/top_model{target}", force=True)
|
| 51 |
+
|
| 52 |
+
# Generate predictions
|
| 53 |
+
for dataset_name, dataset_h2o, dataset_df in [
|
| 54 |
+
("train", train_h2o, data_train),
|
| 55 |
+
("test", test_h2o, data_test),
|
| 56 |
+
]:
|
| 57 |
+
predictions = aml.leader.predict(dataset_h2o).as_data_frame()
|
| 58 |
+
dataset_df["predictions"] = predictions["predict"]
|
| 59 |
+
|
| 60 |
+
# Save predictions
|
| 61 |
+
output_path = f"intermediate_data/data_{dataset_name}_{target}_pred.parquet"
|
| 62 |
+
pq.write_table(pa.Table.from_pandas(dataset_df), output_path)
|
| 63 |
+
|
| 64 |
+
test_h2o_no_smiles = test_h2o.drop("SMILES")
|
| 65 |
+
|
| 66 |
+
# Analysis
|
| 67 |
+
top_model.explain(test_h2o_no_smiles)
|
| 68 |
+
plt.savefig(
|
| 69 |
+
f"product/model_summary_{target}_{parameters["date_code"]}.pdf",
|
| 70 |
+
format = "pdf", bbox_inches = "tight")
|
| 71 |
+
|
| 72 |
+
# Shutdown H2O cluster
|
| 73 |
+
h2o.shutdown(prompt=False)
|
| 74 |
+
|
| 75 |
+
print(f"Completed training and predictions for {target}")
|
src/05_predict_outcomes_WEE1.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import numpy as np
|
| 3 |
+
import pyarrow as pa
|
| 4 |
+
import pyarrow.parquet as pq
|
| 5 |
+
import h2o
|
| 6 |
+
from h2o.automl import H2OAutoML
|
| 7 |
+
from h2o.frame import H2OFrame
|
| 8 |
+
import pickle
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
# Define the target
|
| 12 |
+
target = "WEE1" # Replace with WEE1 or CXCR4 as needed
|
| 13 |
+
|
| 14 |
+
# Start the H2O cluster
|
| 15 |
+
h2o.init()
|
| 16 |
+
|
| 17 |
+
# Load datasets
|
| 18 |
+
data_train = pq.read_table("intermediate_data/data_train_joined.parquet").to_pandas()
|
| 19 |
+
data_test = pq.read_table("intermediate_data/data_test_joined.parquet").to_pandas()
|
| 20 |
+
|
| 21 |
+
# Filter data for the selected target
|
| 22 |
+
data_train = data_train[data_train["Target Name"] == target]
|
| 23 |
+
data_test = data_test[data_test["Target Name"] == target]
|
| 24 |
+
|
| 25 |
+
# Define target column
|
| 26 |
+
target_column = " RMSD_Energy"
|
| 27 |
+
|
| 28 |
+
# Identify feature columns: all columns after "LF_score" (inclusive)
|
| 29 |
+
start_idx = list(data_train.columns).index("LF_score")
|
| 30 |
+
feature_columns = data_train.columns[start_idx:].tolist()
|
| 31 |
+
feature_columns = [col for col in feature_columns if col != " RMSD_Energy"]
|
| 32 |
+
|
| 33 |
+
# Convert to H2OFrame
|
| 34 |
+
train_h2o = H2OFrame(data_train)
|
| 35 |
+
test_h2o = H2OFrame(data_test)
|
| 36 |
+
|
| 37 |
+
# train_h2o[target_column] = train_h2o[target_column].asnumeric()
|
| 38 |
+
# test_h2o[target_column] = test_h2o[target_column].asnumeric()
|
| 39 |
+
|
| 40 |
+
# train_h2o[target_column] = train_h2o[target_column].asfactor()
|
| 41 |
+
# test_h2o[target_column] = test_h2o[target_column].asfactor()
|
| 42 |
+
|
| 43 |
+
# Train AutoML model
|
| 44 |
+
aml = H2OAutoML(max_models=1, seed=42)
|
| 45 |
+
aml.train(x=feature_columns, y=target_column, training_frame=train_h2o)
|
| 46 |
+
|
| 47 |
+
# Save the trained model
|
| 48 |
+
top_model = aml.leader
|
| 49 |
+
model_path = h2o.save_model(top_model, path = f"intermediate_data/top_model{target}", force=True)
|
| 50 |
+
|
| 51 |
+
# Generate predictions
|
| 52 |
+
for dataset_name, dataset_h2o, dataset_df in [
|
| 53 |
+
("train", train_h2o, data_train),
|
| 54 |
+
("test", test_h2o, data_test),
|
| 55 |
+
]:
|
| 56 |
+
predictions = aml.leader.predict(dataset_h2o).as_data_frame()
|
| 57 |
+
dataset_df["predictions"] = predictions["predict"]
|
| 58 |
+
|
| 59 |
+
# Save predictions
|
| 60 |
+
output_path = f"intermediate_data/data_{dataset_name}_{target}_pred.parquet"
|
| 61 |
+
pq.write_table(pa.Table.from_pandas(dataset_df), output_path)
|
| 62 |
+
|
| 63 |
+
# Shutdown H2O cluster
|
| 64 |
+
h2o.shutdown(prompt=False)
|
| 65 |
+
|
| 66 |
+
print(f"Completed training and predictions for {target}")
|