Upload 6 files
Browse files- src/01.install_packages.py +51 -0
- src/02.download_dataset.py +36 -0
- src/03.sanitize_data.py +46 -0
- src/04.prepare_data_for_ML.py +55 -0
- src/05.run_autoML_updated.py +199 -0
- src/06.upload_to_huggingface.py +36 -0
src/01.install_packages.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#-----Install packages to pull data from ORD-----
|
| 2 |
+
#User should check which of these packages are already installed and run pip install for the missing ones.
|
| 3 |
+
|
| 4 |
+
#pip install protobuf
|
| 5 |
+
#git clone https://github.com/Open-Reaction-Database/ord-schema.git
|
| 6 |
+
#cd ord-schema
|
| 7 |
+
# Install the ord_schema package (ensure setuptools is upgraded)
|
| 8 |
+
#pip install --upgrade setuptools
|
| 9 |
+
#pip install .
|
| 10 |
+
|
| 11 |
+
# Install protocol buffers compiler (for compiling .proto files if needed)
|
| 12 |
+
#apt-get install -y protobuf-compiler
|
| 13 |
+
|
| 14 |
+
#Install other packages if needed
|
| 15 |
+
#pip install wget
|
| 16 |
+
#pip install rdkit
|
| 17 |
+
#pip install scikit-learn
|
| 18 |
+
#pip install molvs
|
| 19 |
+
#pip install tensorflow
|
| 20 |
+
#pip install shap
|
| 21 |
+
|
| 22 |
+
#-----Install packages for H2O AutoML-----
|
| 23 |
+
#pip install xgboost
|
| 24 |
+
#pip install -f http://h2o-release.s3.amazonaws.com/h2o/latest_stable_Py.html h2o
|
| 25 |
+
|
| 26 |
+
import os
|
| 27 |
+
import sys
|
| 28 |
+
|
| 29 |
+
# Determine the path to ord_schema package
|
| 30 |
+
package_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'ord-schema'))
|
| 31 |
+
# Add the package directory to sys.path
|
| 32 |
+
if package_path not in sys.path:
|
| 33 |
+
sys.path.insert(0, package_path)
|
| 34 |
+
|
| 35 |
+
import ord_schema
|
| 36 |
+
from ord_schema import message_helpers, validations
|
| 37 |
+
from ord_schema.proto import dataset_pb2
|
| 38 |
+
|
| 39 |
+
import math
|
| 40 |
+
import pandas as pd
|
| 41 |
+
import numpy as np
|
| 42 |
+
import tensorflow as tf
|
| 43 |
+
import matplotlib.pyplot as plt
|
| 44 |
+
import os
|
| 45 |
+
import wget
|
| 46 |
+
|
| 47 |
+
from rdkit.Chem import AllChem
|
| 48 |
+
from sklearn import model_selection, metrics
|
| 49 |
+
from glob import glob
|
| 50 |
+
from rdkit import Chem
|
| 51 |
+
from molvs import Standardizer
|
src/02.download_dataset.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#Download dataset from ORD GitHub. Need to specify ORD dataset ID and file path.
|
| 2 |
+
|
| 3 |
+
import ord_schema
|
| 4 |
+
from ord_schema import message_helpers, validations
|
| 5 |
+
from ord_schema.proto import dataset_pb2
|
| 6 |
+
|
| 7 |
+
import math
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import numpy as np
|
| 10 |
+
import tensorflow as tf
|
| 11 |
+
import matplotlib.pyplot as plt
|
| 12 |
+
import os
|
| 13 |
+
import wget
|
| 14 |
+
|
| 15 |
+
from rdkit.Chem import AllChem
|
| 16 |
+
from sklearn import model_selection, metrics
|
| 17 |
+
from glob import glob
|
| 18 |
+
from rdkit import Chem
|
| 19 |
+
from molvs import Standardizer
|
| 20 |
+
|
| 21 |
+
# Download dataset from ord-data
|
| 22 |
+
url = "https://github.com/open-reaction-database/ord-data/blob/main/data/46/ord_dataset-46ff9a32d9e04016b9380b1b1ef949c3.pb.gz?raw=true"
|
| 23 |
+
pb = wget.download(url)
|
| 24 |
+
|
| 25 |
+
# Load Dataset message
|
| 26 |
+
data = message_helpers.load_message(pb, dataset_pb2.Dataset)
|
| 27 |
+
|
| 28 |
+
# Ensure dataset validates
|
| 29 |
+
valid_output = validations.validate_message(data)
|
| 30 |
+
|
| 31 |
+
# Convert dataset to pandas dataframe
|
| 32 |
+
df = message_helpers.messages_to_dataframe(data.reactions, drop_constant_columns=True)
|
| 33 |
+
|
| 34 |
+
# View dataframe
|
| 35 |
+
#df
|
| 36 |
+
df.to_csv('data/Ahneman_ORD_Data.csv', index=False)
|
src/03.sanitize_data.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#To sanitize the SMILES in the ORD dataset
|
| 2 |
+
|
| 3 |
+
import pandas as pd
|
| 4 |
+
from rdkit import Chem
|
| 5 |
+
from molvs import Standardizer
|
| 6 |
+
|
| 7 |
+
# Read the CSV file
|
| 8 |
+
file_path = 'data/Ahneman_ORD_Data.csv' # replace with your file path
|
| 9 |
+
df = pd.read_csv(file_path)
|
| 10 |
+
|
| 11 |
+
# Initialize the Standardizer from MolVS
|
| 12 |
+
standardizer = Standardizer()
|
| 13 |
+
|
| 14 |
+
# List the columns that contain SMILES strings
|
| 15 |
+
smiles_columns = ['inputs["catalyst"].components[0].identifiers[1].value',
|
| 16 |
+
'inputs["aryl halide"].components[0].identifiers[0].value',
|
| 17 |
+
'inputs["base"].components[0].identifiers[1].value',
|
| 18 |
+
'inputs["additive"].components[0].identifiers[0].value',
|
| 19 |
+
'inputs["additive"].components[0].identifiers[1].value',
|
| 20 |
+
'outcomes[0].products[0].identifiers[0].value'
|
| 21 |
+
]
|
| 22 |
+
|
| 23 |
+
def sanitize_smiles(smiles):
|
| 24 |
+
try:
|
| 25 |
+
if pd.isna(smiles):
|
| 26 |
+
return smiles # Return NA or None if the original data is NA
|
| 27 |
+
|
| 28 |
+
mol = Chem.MolFromSmiles(smiles)
|
| 29 |
+
if mol:
|
| 30 |
+
standardized_mol = standardizer.standardize(mol)
|
| 31 |
+
sanitized_smiles = Chem.MolToSmiles(standardized_mol)
|
| 32 |
+
print(f"SMILES successfully sanitized: {sanitized_smiles}")
|
| 33 |
+
return sanitized_smiles
|
| 34 |
+
else:
|
| 35 |
+
return None
|
| 36 |
+
except Exception as e:
|
| 37 |
+
print(f"Error standardizing SMILES: {smiles} -> {e}")
|
| 38 |
+
return None
|
| 39 |
+
|
| 40 |
+
# Apply sanitization to each SMILES column
|
| 41 |
+
for col in smiles_columns:
|
| 42 |
+
df[col] = df[col].apply(sanitize_smiles)
|
| 43 |
+
|
| 44 |
+
sanitized_df = df
|
| 45 |
+
# Save the sanitized SMILES back to a new CSV file
|
| 46 |
+
sanitized_df.to_csv('data/Sanitized_Ahneman_ORD_Data.csv', index=False)
|
src/04.prepare_data_for_ML.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#Prepare ORD data for training a machine learning model
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
import pandas as pd
|
| 5 |
+
import numpy as np
|
| 6 |
+
import matplotlib.pyplot as plt
|
| 7 |
+
from rdkit.Chem import AllChem
|
| 8 |
+
from sklearn import model_selection, metrics
|
| 9 |
+
from glob import glob
|
| 10 |
+
from rdkit import Chem
|
| 11 |
+
from molvs import Standardizer
|
| 12 |
+
|
| 13 |
+
# Create new dataframe containing only columns to be used in modeling
|
| 14 |
+
model_cols = ['inputs["catalyst"].components[0].identifiers[1].value', #Pd catalyst
|
| 15 |
+
'inputs["aryl halide"].components[0].identifiers[0].value', #Aryl Halide
|
| 16 |
+
'inputs["base"].components[0].identifiers[1].value', #Base
|
| 17 |
+
'inputs["additive"].components[0].identifiers[0].value', #Amine or solvent (in controls)
|
| 18 |
+
'outcomes[0].products[0].measurements[0].percentage.value' #% yield
|
| 19 |
+
]
|
| 20 |
+
|
| 21 |
+
#Read data from sanitized data .csv file
|
| 22 |
+
file_path = 'data/Sanitized_Ahneman_ORD_Data.csv' #Sanitized data
|
| 23 |
+
sanitized_df = pd.read_csv(file_path)
|
| 24 |
+
df = sanitized_df[model_cols] #Use sanitized data in the model with correct columns to use in the ML model
|
| 25 |
+
|
| 26 |
+
# Check for NaN values
|
| 27 |
+
print(f"number of NaN values: {df.isnull().sum().sum()}")
|
| 28 |
+
|
| 29 |
+
# Show column counts
|
| 30 |
+
print("Column Info")
|
| 31 |
+
df.info()
|
| 32 |
+
|
| 33 |
+
# Show dataset statistics for numerical fields
|
| 34 |
+
print("Dataset Statistics for Numerical Fields:")
|
| 35 |
+
df.describe()
|
| 36 |
+
|
| 37 |
+
#One-Hot Encoding (OHE)
|
| 38 |
+
# Convert reaction input labels to one-hot encoding
|
| 39 |
+
input_cols = model_cols[:-1]
|
| 40 |
+
|
| 41 |
+
# Assign names for each input
|
| 42 |
+
prefix = ["Catalyst", "Aryl Halide", "Base", "Additives"]
|
| 43 |
+
|
| 44 |
+
# Create one-hot encoded input dataset
|
| 45 |
+
ohe_df = pd.get_dummies(df[input_cols], prefix=prefix)
|
| 46 |
+
|
| 47 |
+
# Add yield column to ohe dataset
|
| 48 |
+
ohe_df["yield"] = df[model_cols[-1]] / 100 #yield is the target variable the ML model will learn to optimize
|
| 49 |
+
|
| 50 |
+
# View dataset
|
| 51 |
+
print(ohe_df.shape)
|
| 52 |
+
|
| 53 |
+
ohe_df.to_csv('data/Prepared_Data.csv', index=False)
|
| 54 |
+
|
| 55 |
+
|
src/05.run_autoML_updated.py
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#Split prepared data into training, validation, and test sets. Then train, run, and analyze AutoML model.
|
| 2 |
+
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import numpy as np
|
| 5 |
+
import matplotlib.pyplot as plt
|
| 6 |
+
from rdkit.Chem import AllChem
|
| 7 |
+
from sklearn import model_selection, metrics
|
| 8 |
+
import pyarrow as pa
|
| 9 |
+
import shap
|
| 10 |
+
|
| 11 |
+
import h2o
|
| 12 |
+
from h2o.automl import H2OAutoML
|
| 13 |
+
|
| 14 |
+
#Read data from one-hot encoded data .csv file
|
| 15 |
+
file_path = 'data/Prepared_Data.csv' #Sanitized data
|
| 16 |
+
ohe_df = pd.read_csv(file_path)
|
| 17 |
+
print("OHE DF Shape: ", ohe_df.shape)
|
| 18 |
+
|
| 19 |
+
# Create numpy arrays for inputs and outputs.
|
| 20 |
+
X = ohe_df.drop(columns=["yield"]).values
|
| 21 |
+
y = ohe_df["yield"].values
|
| 22 |
+
|
| 23 |
+
# Verify array shapes
|
| 24 |
+
print("Shapes of input and output arrays:")
|
| 25 |
+
print("X size: ", X.shape, ", Y size: ", y.shape)
|
| 26 |
+
|
| 27 |
+
#Split data into training, validation, and tests sets. 70% training/30% test split.
|
| 28 |
+
#Set training, validation, and test sets with random_state for reproducibility
|
| 29 |
+
|
| 30 |
+
_X_train, X_test, _y_train, y_test = model_selection.train_test_split(X, y, test_size=0.3, random_state=0)
|
| 31 |
+
X_train, X_valid, y_train, y_valid = model_selection.train_test_split(
|
| 32 |
+
_X_train, _y_train, test_size=(0.1 / 0.7), shuffle=False
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
# Check lengths
|
| 36 |
+
print("X_train size: ", X_train.shape, ", y_train size: ", y_train.shape)
|
| 37 |
+
print("X_valid size: ", X_valid.shape, ", y_valid size: ", y_valid.shape)
|
| 38 |
+
print("X_test size: ", X_test.shape, ", y_test size: ", y_test.shape)
|
| 39 |
+
print("Is length of data frame equal to sum of split data set lengths?",
|
| 40 |
+
len(ohe_df) == X_train.shape[0] + X_valid.shape[0] + X_test.shape[0])
|
| 41 |
+
|
| 42 |
+
print("X_train_Dataset", X_train[0:20])
|
| 43 |
+
print("Y_train_Dataset", y_train[0:20])
|
| 44 |
+
# Start the H2O cluster (locally)
|
| 45 |
+
h2o.init(nthreads=-1)
|
| 46 |
+
|
| 47 |
+
# Convert the TensorFlow dataset to a pandas DataFrame
|
| 48 |
+
data_train_df = pd.DataFrame(X_train)
|
| 49 |
+
data_train_df['yield'] = y_train
|
| 50 |
+
|
| 51 |
+
print("Length Data Train", data_train_df.shape)
|
| 52 |
+
|
| 53 |
+
#Shorten data frames for faster training/validation
|
| 54 |
+
data_train_df = data_train_df[0:250]
|
| 55 |
+
|
| 56 |
+
#Read pandas dataframe into H2O Frame
|
| 57 |
+
h2o_data_train = h2o.H2OFrame(data_train_df)
|
| 58 |
+
|
| 59 |
+
#Specify target columns & features
|
| 60 |
+
target = "yield" #Main objective is to predict yields based on reaction conditions. Yield is therefore the target variable.
|
| 61 |
+
features = [col for col in h2o_data_train.columns if col != target]
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
#Initialize AutoML and train models
|
| 65 |
+
aml = H2OAutoML(max_models=8, exclude_algos=['StackedEnsemble']) #Exclude stacked ensemble models
|
| 66 |
+
aml.train(x= features, y= target, training_frame= h2o_data_train)
|
| 67 |
+
|
| 68 |
+
#Display leaderbord for all models
|
| 69 |
+
lb = aml.leaderboard
|
| 70 |
+
print(lb.head(rows=lb.nrows))
|
| 71 |
+
|
| 72 |
+
#Store model leaderboard/peformance data for SHAP Analysis
|
| 73 |
+
best_model = aml.leader # Retrieve best-performing model
|
| 74 |
+
|
| 75 |
+
# Create a background frame from the training data
|
| 76 |
+
background_frame = h2o_data_train[0:100] # Use the first 100 rows of training data as background frame
|
| 77 |
+
|
| 78 |
+
# Use a smaller sample for SHAP analysis reduce computation time
|
| 79 |
+
#sample_data_train = h2o_data_train[:500]
|
| 80 |
+
sample_data_train = h2o_data_train
|
| 81 |
+
|
| 82 |
+
# Compute SHAP values using best model found by H2O AutoML
|
| 83 |
+
shap_values = best_model.predict_contributions(sample_data_train, background_frame=background_frame)
|
| 84 |
+
|
| 85 |
+
# Convert SHAP values to Pandas DataFrame for plotting using multi-threading
|
| 86 |
+
shap_df = shap_values.as_data_frame(use_pandas=True, use_multi_thread=True)
|
| 87 |
+
print("shap_df", shap_df[0:3])
|
| 88 |
+
|
| 89 |
+
# Drop BiasTerm if present
|
| 90 |
+
if 'BiasTerm' in shap_df.columns:
|
| 91 |
+
shap_df = shap_df.drop('BiasTerm', axis=1)
|
| 92 |
+
|
| 93 |
+
print("Original SHAP DataFrame columns:", shap_df.columns)
|
| 94 |
+
|
| 95 |
+
# Function to clean and consolidate column names in shap_df
|
| 96 |
+
def consolidate_shap_columns(shap_df):
|
| 97 |
+
# Clean column names by removing specific state suffixes ('.True', '.False', '.Missing')
|
| 98 |
+
shap_df.columns = shap_df.columns.str.replace(r'\.(True|False|Missing <math><mrow><mi>N</mi><mi>A</mi></mrow></math>)$', '', regex=True)
|
| 99 |
+
# Remove duplicated columns after consolidation
|
| 100 |
+
shap_df = shap_df.loc[:, ~shap_df.columns.duplicated()]
|
| 101 |
+
return shap_df
|
| 102 |
+
|
| 103 |
+
# Convert SHAP values dataframe columns to get rid of extensions
|
| 104 |
+
shap_df = consolidate_shap_columns(shap_df)
|
| 105 |
+
print("Cleaned SHAP Columns", shap_df.columns)
|
| 106 |
+
|
| 107 |
+
# Convert H2OFrame with original data to Pandas DataFrame
|
| 108 |
+
df_train_pandas = h2o_data_train.as_data_frame(use_pandas=True, use_multi_thread=True)
|
| 109 |
+
|
| 110 |
+
# List of feature names from training data
|
| 111 |
+
feature_columns = [col for col in df_train_pandas.columns if col != 'yield']
|
| 112 |
+
print("Feature columns", feature_columns)
|
| 113 |
+
|
| 114 |
+
# Ensure alignment between SHAP DataFrame and original training features
|
| 115 |
+
shap_df = shap_df[feature_columns]
|
| 116 |
+
|
| 117 |
+
print("Original data columns:")
|
| 118 |
+
print(df_train_pandas.columns)
|
| 119 |
+
print("SHAP data columns:")
|
| 120 |
+
print(shap_df.columns)
|
| 121 |
+
|
| 122 |
+
#Remove "yield" column from data_train_pandas df to ensure consistency with shap_df columns.
|
| 123 |
+
df_train_pandas = df_train_pandas.drop(columns=["yield"])
|
| 124 |
+
|
| 125 |
+
# Verifying column alignment between training data columns and shap columns
|
| 126 |
+
assert list(shap_df.columns) == list(df_train_pandas.columns), "Feature columns do not match between SHAP values and data"
|
| 127 |
+
|
| 128 |
+
# -----Visualize using SHAP summary plot-----
|
| 129 |
+
|
| 130 |
+
# Use SHAP summary plot
|
| 131 |
+
shap.summary_plot(shap_df.values, df_train_pandas, plot_type="bar")
|
| 132 |
+
|
| 133 |
+
# For detailed SHAP plots:
|
| 134 |
+
shap.summary_plot(shap_df.values, df_train_pandas)
|
| 135 |
+
|
| 136 |
+
# Show and save the plots
|
| 137 |
+
plt.tight_layout()
|
| 138 |
+
plt.show()
|
| 139 |
+
plt.savefig("SHAP_Analysis_Summary.png")
|
| 140 |
+
|
| 141 |
+
#------------Analyze Model Performance------------
|
| 142 |
+
# Convert datasets to H2OFrame
|
| 143 |
+
h2o_test = h2o.H2OFrame(pd.DataFrame(X_test, columns=ohe_df.drop(columns=["yield"]).columns))
|
| 144 |
+
|
| 145 |
+
#Set up loss curves for best model identified by AutoML
|
| 146 |
+
model_with_history = None
|
| 147 |
+
for model_id in aml.leaderboard.as_data_frame()['model_id']:
|
| 148 |
+
model = h2o.get_model(model_id)
|
| 149 |
+
if hasattr(model, 'scoring_history'):
|
| 150 |
+
model_with_history = model
|
| 151 |
+
break
|
| 152 |
+
|
| 153 |
+
# Check if model has available scoring history
|
| 154 |
+
if model_with_history and hasattr(model_with_history, 'scoring_history'):
|
| 155 |
+
scoring_history = model_with_history.scoring_history()
|
| 156 |
+
else:
|
| 157 |
+
print("No suitable model with scoring history found.")
|
| 158 |
+
scoring_history = pd.DataFrame() # Avoid further errors
|
| 159 |
+
|
| 160 |
+
# Extract metrics
|
| 161 |
+
preds_h2o = aml.leader.predict(h2o.H2OFrame(pd.DataFrame(X_test))).as_data_frame().values.flatten()
|
| 162 |
+
|
| 163 |
+
# Calculate RMSE and R^2
|
| 164 |
+
r2 = metrics.r2_score(y_test, preds_h2o)
|
| 165 |
+
rmse = np.sqrt(metrics.mean_squared_error(y_test, preds_h2o))
|
| 166 |
+
print(f"Test RMSE: {rmse}")
|
| 167 |
+
print(fr"Test $R^2$: {r2}")
|
| 168 |
+
|
| 169 |
+
# Plot model performance
|
| 170 |
+
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 12))
|
| 171 |
+
fig.suptitle("Buchwald-Hartwig AutoML Model Performance")
|
| 172 |
+
|
| 173 |
+
if not scoring_history.empty:
|
| 174 |
+
if 'training_rmse' in scoring_history.columns:
|
| 175 |
+
ax1.plot(scoring_history['training_rmse'], 'b', label='Training RMSE')
|
| 176 |
+
if 'validation_rmse' in scoring_history.columns:
|
| 177 |
+
ax1.plot(scoring_history['validation_rmse'], 'g', label='Validation RMSE')
|
| 178 |
+
else:
|
| 179 |
+
ax1.text(0.5, 0.5, 'Scoring history unavailable', horizontalalignment='center', verticalalignment='center')
|
| 180 |
+
|
| 181 |
+
ax1.legend()
|
| 182 |
+
ax1.set_ylabel("RMSE")
|
| 183 |
+
ax1.set_xlabel("Epoch/Tree Index")
|
| 184 |
+
ax1.set_title(f"Loss Curves for {best_model.model_id}")
|
| 185 |
+
|
| 186 |
+
# Plot predictions vs. ground truth
|
| 187 |
+
ax2.scatter(y_test, preds_h2o, c='b', marker='o', label='Predictions')
|
| 188 |
+
ax2.plot([min(y_test), max(y_test)], [min(y_test), max(y_test)], "r-", lw=2) # Line y=x
|
| 189 |
+
ax2.set_ylabel("Predicted Yield")
|
| 190 |
+
ax2.set_xlabel("Ground Truth Yield")
|
| 191 |
+
ax2.set_title("Predictions vs Ground Truth")
|
| 192 |
+
ax2.text(0.15, 0.9 * max(y_test), fr"Test RMSE: {round(rmse, 3)}", fontsize=12)
|
| 193 |
+
ax2.text(0.15, 0.8 * max(y_test), fr"Test $R^2$: {round(r2, 3)}", fontsize=12)
|
| 194 |
+
|
| 195 |
+
# View performance plots
|
| 196 |
+
plt.show()
|
| 197 |
+
plt.tight_layout()
|
| 198 |
+
plt.savefig("B-H AutoML Model Performance.png")
|
| 199 |
+
|
src/06.upload_to_huggingface.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datasets
|
| 2 |
+
#Each dataset/.csv file is separately pushed to Huggingface under a different directory.
|
| 3 |
+
|
| 4 |
+
original_dataset = datasets.load_dataset(
|
| 5 |
+
"csv",
|
| 6 |
+
data_files = f'data/Ahneman_ORD_Data.csv',
|
| 7 |
+
keep_in_memory = True,
|
| 8 |
+
sep = ",")
|
| 9 |
+
|
| 10 |
+
sanitized_dataset = datasets.load_dataset(
|
| 11 |
+
"csv",
|
| 12 |
+
data_files = f'data/Sanitized_Ahneman_ORD_Data.csv',
|
| 13 |
+
keep_in_memory = True,
|
| 14 |
+
sep = ",")
|
| 15 |
+
|
| 16 |
+
prepared_dataset = datasets.load_dataset(
|
| 17 |
+
"csv",
|
| 18 |
+
data_files = f'data/Prepared_Data.csv',
|
| 19 |
+
keep_in_memory = True,
|
| 20 |
+
sep = ",")
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
print("Pushing to cmmauro/ORD_Ahneman_2018")
|
| 24 |
+
sanitized_dataset.push_to_hub(
|
| 25 |
+
repo_id = "cmmauro/ORD_Ahneman_2018",
|
| 26 |
+
data_dir="Sanitized Dataset")
|
| 27 |
+
|
| 28 |
+
print("Pushing to cmmauro/ORD_Ahneman_2018")
|
| 29 |
+
original_dataset.push_to_hub(
|
| 30 |
+
repo_id = "cmmauro/ORD_Ahneman_2018",
|
| 31 |
+
data_dir = "Original Dataset")
|
| 32 |
+
|
| 33 |
+
print("Pushing to cmmauro/ORD_Ahneman_2018")
|
| 34 |
+
prepared_dataset.push_to_hub(
|
| 35 |
+
repo_id = "cmmauro/ORD_Ahneman_2018",
|
| 36 |
+
data_dir = "Prepared Dataset for ML")
|