File size: 3,232 Bytes
8af1909 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem
import h2o
from h2o.estimators import H2OGradientBoostingEstimator
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
# Initialize H2O
h2o.init()
def featurize_smiles(smiles):
try:
mol = Chem.MolFromSmiles(smiles)
# Generate a binary Morgan fingerprint with a radius of 2 and 1024 bits
fingerprint = AllChem.GetMorganFingerprintAsBitVect(mol, radius=2, nBits=1024)
features = np.array(fingerprint)
return features
except Exception as e:
print(f"Error featurizing SMILES: {smiles} -> {e}")
return np.zeros(1024) # Return a zero vector if there's an issue
# Specify the input and output column indices
input_column_index = 5 # Replace with your SMILES input column index
output_column_index = 3 # Replace with your output column index
# Prepare a H2OFrame for the first train-test split
file_path = '/Users/colestephens/Desktop/CodeFolders/ML_Dataset_Curation_Project/Split_data/homo_hydrogenation.xlsx'
splits = pd.ExcelFile(file_path)
# Let's use the first split as an example
train_key = 'train_split_0'
test_key = 'test_split_0'
# Read the train and test sets
train_df = pd.read_excel(splits, sheet_name=train_key)
test_df = pd.read_excel(splits, sheet_name=test_key)
# Determine the column names using indices
input_column_name = train_df.columns[input_column_index]
output_column_name = train_df.columns[output_column_index]
# Featurize the SMILES columns
train_features = np.array([featurize_smiles(smiles) for smiles in train_df[input_column_name]])
test_features = np.array([featurize_smiles(smiles) for smiles in test_df[input_column_name]])
# Print message after successful featurization
print("Successfully converted SMILES to Morgan fingerprints for train and test sets.")
# Convert to H2OFrames
train_X = pd.DataFrame(train_features)
test_X = pd.DataFrame(test_features)
# Combine features with the target column
train_h2o = h2o.H2OFrame(pd.concat([train_X, train_df[[output_column_name]]], axis=1))
test_h2o = h2o.H2OFrame(pd.concat([test_X, test_df[[output_column_name]]], axis=1))
# Set the target and features
y = output_column_name
X = train_h2o.columns[:-1] # All columns except the last one
# Initialize and train a Gradient Boosting model
gbm_model = H2OGradientBoostingEstimator(
ntrees=50, # Number of trees
max_depth=6, # Maximum depth
learn_rate=0.1, # Learning rate
seed=1 # For reproducibility
)
gbm_model.train(x=X, y=y, training_frame=train_h2o)
# Evaluate model performance
performance = gbm_model.model_performance(test_data=test_h2o)
print(performance)
# Visualization: Feature Importance
gbm_model.varimp_plot()
plt.show()
# Visualization: Confusion Matrix
predictions = gbm_model.predict(test_h2o).as_data_frame()
conf_matrix = confusion_matrix(test_df[output_column_name], predictions['predict'])
# Display confusion matrix
disp = ConfusionMatrixDisplay(confusion_matrix=conf_matrix)
disp.plot(cmap=plt.cm.Blues)
plt.title('Confusion Matrix')
plt.show()
# H2O Shutdown (optional)
h2o.shutdown(prompt=False) |