import pandas as pd import numpy as np import matplotlib.pyplot as plt import h2o from h2o.automl import H2OAutoML import pyarrow as pa import pyarrow.parquet as pq h2o.init() ###Data preparation df_train = pd.read_csv("./intermediate/train_rdkit_descriptors.csv") df_test = pd.read_csv("./intermediate/test_rdkit_descriptors.csv") x_train = df_train.drop(columns=["label", "Standardized_SMILES"]) y_train = df_train["label"] x_test = df_test.drop(columns=["label", "Standardized_SMILES"]) y_test = df_test["label"] train_h2o = h2o.H2OFrame(pd.concat([x_train, y_train], axis=1)) test_h2o = h2o.H2OFrame(pd.concat([x_test, y_test], axis=1)) train_h2o["label"] = train_h2o["label"].asfactor() test_h2o["label"] = test_h2o["label"].asfactor() feature_cols = x_train.columns.tolist() ###Reload the model from h2o import load_model restored_model1 = h2o.load_model("./product/top_model_1/StackedEnsemble_AllModels_1_AutoML_1_20250401_220205") restored_model2 = h2o.load_model("./product/top_model_2/StackedEnsemble_BestOfFamily_1_AutoML_1_20250401_220205") restored_model3 = h2o.load_model("./product/top_model_3/GBM_4_AutoML_1_20250401_220205") ###Test the model perf1=restored_model1.model_performance(test_h2o) perf2=restored_model2.model_performance(test_h2o) perf3=restored_model3.model_performance(test_h2o) ###Get the result with different threshold threshold = 0.5 acc = perf1.accuracy(thresholds=[threshold])[0][1] f1 = perf1.F1(thresholds=[threshold])[0][1] prec = perf1.precision(thresholds=[threshold])[0][1] rec = perf1.recall(thresholds=[threshold])[0][1] spec = perf1.specificity(thresholds=[threshold])[0][1] print(f"Threshold = {threshold}") print(f"Accuracy = {acc:.4f}") print(f"F1 Score = {f1:.4f}") print(f"Precision = {prec:.4f}") print(f"Recall = {rec:.4f}") print(f"Specificity = {spec:.4f}") threshold = 0.5 acc = perf2.accuracy(thresholds=[threshold])[0][1] f1 = perf2.F1(thresholds=[threshold])[0][1] prec = perf2.precision(thresholds=[threshold])[0][1] rec = perf2.recall(thresholds=[threshold])[0][1] spec = perf2.specificity(thresholds=[threshold])[0][1] print(f"Threshold = {threshold}") print(f"Accuracy = {acc:.4f}") print(f"F1 Score = {f1:.4f}") print(f"Precision = {prec:.4f}") print(f"Recall = {rec:.4f}") print(f"Specificity = {spec:.4f}") threshold = 0.5 acc = perf3.accuracy(thresholds=[threshold])[0][1] f1 = perf3.F1(thresholds=[threshold])[0][1] prec = perf3.precision(thresholds=[threshold])[0][1] rec = perf3.recall(thresholds=[threshold])[0][1] spec = perf3.specificity(thresholds=[threshold])[0][1] print(f"Threshold = {threshold}") print(f"Accuracy = {acc:.4f}") print(f"F1 Score = {f1:.4f}") print(f"Precision = {prec:.4f}") print(f"Recall = {rec:.4f}") print(f"Specificity = {spec:.4f}") metrics1 = { "AUC": perf1.auc(), "LogLoss": perf1.logloss(), "Accuracy": perf1.accuracy(), "F1": perf1.F1(), "Precision": perf1.precision(), "Recall": perf1.recall(), "Specificity": perf1.specificity() } metrics2 = { "AUC": perf2.auc(), "LogLoss": perf2.logloss(), "Accuracy": perf2.accuracy(), "F1": perf2.F1(), "Precision": perf2.precision(), "Recall": perf2.recall(), "Specificity": perf2.specificity() } metrics3 = { "AUC": perf3.auc(), "LogLoss": perf3.logloss(), "Accuracy": perf3.accuracy(), "F1": perf3.F1(), "Precision": perf3.precision(), "Recall": perf3.recall(), "Specificity": perf3.specificity() } ###Get the figure for roc and pr perf1.plot(type="roc") perf1.plot(type="pr") perf2.plot(type="roc") perf2.plot(type="pr") perf3.plot(type="roc") perf3.plot(type="pr") ### SHAP analysis restored_model3.shap_summary_plot(test_h2o[:,:-1]) fig = plt.gcf() fig.savefig("./product/3shap_summary_plot.png", dpi=300, bbox_inches="tight")