bioinf595_formed / scripts /st4_model_training.py
haneulpark's picture
Upload 11 files
d927342 verified
# Uni-Mol training
import torch
import torch.nn as nn
import pandas as pd
from torch.utils.data import DataLoader, Dataset
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
import os
import joblib
# Define Uni-Mol-like simple neural network
class UniMolModel(nn.Module):
def __init__(self, input_size):
super(UniMolModel, self).__init__()
self.fc1 = nn.Linear(input_size, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 1)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
# Padding function to standardize number of atoms (default max 20 atoms)
def pad_coords(coords, max_atoms=20):
padded = np.zeros((max_atoms, 3))
n_atoms = min(len(coords), max_atoms)
padded[:n_atoms] = coords[:n_atoms]
return padded.flatten()
# Function to safely parse xyz string into padded coords
def xyz_to_coords(xyz_str, max_atoms=20):
try:
coords = [list(map(float, line.split()[1:])) for line in xyz_str.strip().splitlines()]
except Exception:
coords = np.zeros((max_atoms, 3))
return pad_coords(coords, max_atoms)
# Custom Dataset
class MoleculeDataset(Dataset):
def __init__(self, dataframe, max_atoms=20):
self.data = dataframe
self.max_atoms = max_atoms
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
coords = xyz_to_coords(self.data.iloc[idx]['xyz'], self.max_atoms)
gap = self.data.iloc[idx]['gap']
return torch.tensor(coords, dtype=torch.float32), torch.tensor(gap, dtype=torch.float32)
# Load data
train_df = pd.read_csv('formed_xyz_train.csv')
test_df = pd.read_csv('formed_xyz_test.csv')
# Dataset & DataLoader
train_dataset = MoleculeDataset(train_df)
test_dataset = MoleculeDataset(test_df)
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
# Model initialization
input_size = len(train_dataset[0][0]) # 3D coordinates flattened
# List of models
models = {
'UniMol': UniMolModel(input_size=input_size),
'RandomForest': RandomForestRegressor(),
'GradientBoosting': GradientBoostingRegressor()
}
# Loss & optimizer
criterion = nn.MSELoss()
# Results list
results = []
# Training & Evaluation Loop
for model_id, model in models.items():
if model_id == 'UniMol':
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
num_epochs = 20
for epoch in range(num_epochs):
model.train()
running_loss = 0.0
for inputs, targets in train_loader:
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs.squeeze(), targets)
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f'{model_id} - Epoch {epoch+1}/{num_epochs}, Loss: {running_loss / len(train_loader)}')
model.eval()
predictions = []
true_values = []
names = test_df['name'].tolist()
with torch.no_grad():
for inputs, targets in test_loader:
outputs = model(inputs)
predictions.extend(outputs.squeeze().tolist())
true_values.extend(targets.tolist())
else:
X_train = np.array([xyz_to_coords(xyz, max_atoms=20) for xyz in train_df['xyz']])
y_train = train_df['gap'].values
X_test = np.array([xyz_to_coords(xyz, max_atoms=20) for xyz in test_df['xyz']])
y_test = test_df['gap'].values
model.fit(X_train, y_train)
predictions = model.predict(X_test)
true_values = y_test
names = test_df['name'].tolist()
# Evaluation metrics
mse = mean_squared_error(true_values, predictions)
rmse = np.sqrt(mse)
mae = mean_absolute_error(true_values, predictions)
r2 = r2_score(true_values, predictions)
print(f'{model_id} - MSE: {mse}, RMSE: {rmse}, MAE: {mae}, R2: {r2}')
# Save predictions
prediction_df = pd.DataFrame({
'name': names,
'true_gap': true_values,
'predicted_gap': predictions
})
prediction_df.to_csv(f'unimol_predictions_{model_id}.csv', index=False)
# Save model
if model_id == 'UniMol':
torch.save(model.state_dict(), f'unimol_model_{model_id}.pth')
else:
joblib.dump(model, f'{model_id}_model.pkl')
# Record performance
results.append({
'model_id': model_id,
'MSE': mse,
'RMSE': rmse,
'MAE': mae,
'R2': r2
})
# Save all scores
metrics_df = pd.DataFrame(results)
metrics_df.to_csv('unimol_model_scores.csv', index=False)
# Top 3 by R2
top_3_models = metrics_df.sort_values(by='R2', ascending=False).head(3)
print("Top 3 Models and their Performance Metrics:")
print(top_3_models)
'''
import torch
import torch.nn as nn
import pandas as pd
from torch.utils.data import DataLoader, Dataset
import numpy as np
# Define Uni-Mol-like simple neural network
class UniMolModel(nn.Module):
def __init__(self, input_size):
super(UniMolModel, self).__init__()
self.fc1 = nn.Linear(input_size, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 1)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
# Padding function to standardize number of atoms (default max 20 atoms)
def pad_coords(coords, max_atoms=20):
padded = np.zeros((max_atoms, 3))
n_atoms = min(len(coords), max_atoms)
padded[:n_atoms] = coords[:n_atoms]
return padded.flatten()
# Custom Dataset
class MoleculeDataset(Dataset):
def __init__(self, dataframe, max_atoms=20):
self.data = dataframe
self.max_atoms = max_atoms
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
coords = self.data.iloc[idx]['xyz']
try:
coords = np.array([list(map(float, line.split()[1:])) for line in coords.strip().splitlines()])
coords = pad_coords(coords, self.max_atoms)
except:
coords = np.zeros(self.max_atoms * 3)
gap = self.data.iloc[idx]['gap']
return torch.tensor(coords, dtype=torch.float32), torch.tensor(gap, dtype=torch.float32)
# Load data
train_df = pd.read_csv('formed_xyz_train.csv')
test_df = pd.read_csv('formed_xyz_test.csv')
# Dataset & DataLoader
train_dataset = MoleculeDataset(train_df)
test_dataset = MoleculeDataset(test_df)
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
# Model
input_size = len(train_dataset[0][0]) # 3D coordinates flattened
model = UniMolModel(input_size=input_size)
# Loss & optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# Training
num_epochs = 20
for epoch in range(num_epochs):
model.train()
running_loss = 0.0
for inputs, targets in train_loader:
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs.squeeze(), targets)
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f'Epoch {epoch+1}/{num_epochs}, Loss: {running_loss / len(train_loader)}')
# Evaluation & prediction
model.eval()
predictions = []
true_values = []
names = test_df['name'].tolist()
with torch.no_grad():
for inputs, targets in test_loader:
outputs = model(inputs)
predictions.extend(outputs.squeeze().tolist())
true_values.extend(targets.tolist())
# Compute MSE
mse = np.mean((np.array(predictions) - np.array(true_values)) ** 2)
print(f'Mean Squared Error on test set: {mse}')
# Save model
torch.save(model.state_dict(), 'unimol_model.pth')
# Save predictions to CSV
prediction_df = pd.DataFrame({
'name': names,
'true_gap': true_values,
'predicted_gap': predictions
})
prediction_df.to_csv('unimol_prediction.csv', index=False)
# Save MSE to file
with open('unimol_mse.txt', 'w') as f:
f.write(f'Mean Squared Error: {mse}\n')
'''