| |
| """Untitled17.ipynb |
| |
| Automatically generated by Colab. |
| |
| Original file is located at |
| https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV |
| """ |
|
|
| import pandas as pd |
| import numpy as np |
| from pathlib import Path |
| from sklearn.preprocessing import StandardScaler |
| from sklearn.metrics import roc_auc_score, average_precision_score, roc_curve, precision_recall_curve |
| from sklearn.linear_model import LogisticRegression |
| import matplotlib.pyplot as plt |
| from tqdm import tqdm |
| import pickle |
| import torch |
| import torch.nn as nn |
| import warnings |
| warnings.filterwarnings('ignore') |
|
|
|
|
| PATHS = { |
| 'features': BASE_PATH / 'features', |
| 'embeddings': BASE_PATH / 'embeddings', |
| 'models': BASE_PATH / 'models', |
| 'results': BASE_PATH / 'results', |
| 'figures': BASE_PATH / 'results' / 'figures', |
| } |
|
|
| df_features = pd.read_parquet(PATHS['features'] / 'features_classical_full.parquet') |
|
|
| id_cols = ['mutation_idx', 'uniprot_acc', 'gene_symbol', 'position', 'wt_aa', 'mut_aa', 'label'] |
| feature_cols = [c for c in df_features.columns if c not in id_cols] |
|
|
| X_features = df_features[feature_cols].values.astype(np.float32) |
| X_features = np.nan_to_num(X_features, nan=0.0, posinf=0.0, neginf=0.0) |
| y = df_features['label'].values |
| proteins = df_features['uniprot_acc'].values |
|
|
| X_emb_combined = np.load(PATHS['embeddings'] / 'embeddings_combined_full.npy').astype(np.float32) |
| X_emb_local = np.load(PATHS['embeddings'] / 'embeddings_local_full.npy').astype(np.float32) |
|
|
| print(f" {X_features.shape}") |
| print(f" {X_emb_combined.shape}") |
|
|
|
|
|
|
| from sklearn.decomposition import PCA |
|
|
| pca_combined = PCA(n_components=128, random_state=42) |
| X_emb_pca = pca_combined.fit_transform(X_emb_combined).astype(np.float32) |
|
|
| pca_local = PCA(n_components=64, random_state=42) |
| X_emb_local_pca = pca_local.fit_transform(X_emb_local).astype(np.float32) |
|
|
|
|
|
|
| configs = [ |
| {'name': 'Features classiques', 'X': X_features}, |
| {'name': 'Embeddings ESM-2', 'X': X_emb_pca}, |
| {'name': 'Features + Embeddings', 'X': np.concatenate([X_features, X_emb_pca], axis=1)}, |
| {'name': 'Features + Emb. Local', 'X': np.concatenate([X_features, X_emb_local_pca], axis=1)}, |
| ] |
|
|
| class SimpleMLP(nn.Module): |
| def __init__(self, input_dim, hidden_dim=256): |
| super().__init__() |
| self.net = nn.Sequential( |
| nn.Linear(input_dim, hidden_dim), |
| nn.ReLU(), |
| nn.Dropout(0.3), |
| nn.Linear(hidden_dim, hidden_dim // 2), |
| nn.ReLU(), |
| nn.Dropout(0.2), |
| nn.Linear(hidden_dim // 2, 1), |
| nn.Sigmoid() |
| ) |
|
|
| def forward(self, x): |
| return self.net(x).squeeze() |
|
|
| def train_mlp(X_train, y_train, X_test, input_dim, device, epochs=50, lr=0.001): |
|
|
| model = SimpleMLP(input_dim).to(device) |
| optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-4) |
| criterion = nn.BCELoss() |
|
|
| X_train_t = torch.FloatTensor(X_train).to(device) |
| y_train_t = torch.FloatTensor(y_train).to(device) |
| X_test_t = torch.FloatTensor(X_test).to(device) |
|
|
| model.train() |
| batch_size = 512 |
|
|
| for epoch in range(epochs): |
| perm = torch.randperm(len(X_train_t)) |
|
|
| for i in range(0, len(X_train_t), batch_size): |
| idx = perm[i:i+batch_size] |
|
|
| optimizer.zero_grad() |
| outputs = model(X_train_t[idx]) |
| loss = criterion(outputs, y_train_t[idx]) |
| loss.backward() |
| optimizer.step() |
|
|
| model.eval() |
| with torch.no_grad(): |
| y_pred = model(X_test_t).cpu().numpy() |
|
|
| return y_pred |
|
|
| def evaluate_lpocv_gpu(X, y, proteins, device, epochs=30): |
|
|
| unique_proteins = np.unique(proteins) |
| results = [] |
|
|
| scaler = StandardScaler() |
| X_scaled = scaler.fit_transform(X) |
|
|
| for protein in tqdm(unique_proteins, desc="LPOCV GPU"): |
| test_mask = proteins == protein |
| train_mask = ~test_mask |
|
|
| if test_mask.sum() < 2: |
| continue |
|
|
| X_train, y_train = X_scaled[train_mask], y[train_mask] |
| X_test, y_test = X_scaled[test_mask], y[test_mask] |
|
|
| y_pred = train_mlp(X_train, y_train, X_test, X.shape[1], device, epochs=epochs) |
|
|
| for pred, true in zip(y_pred, y_test): |
| results.append({'y_true': true, 'y_pred': float(pred)}) |
|
|
| df_res = pd.DataFrame(results) |
|
|
| if len(df_res) > 0 and len(df_res['y_true'].unique()) > 1: |
| auc_roc = roc_auc_score(df_res['y_true'], df_res['y_pred']) |
| auc_pr = average_precision_score(df_res['y_true'], df_res['y_pred']) |
| else: |
| auc_roc, auc_pr = 0, 0 |
|
|
| return auc_roc, auc_pr, df_res |
|
|
| results_all = {} |
|
|
| for cfg in configs: |
| print(f"\n 📊 {cfg['name']}...") |
|
|
| auc_roc, auc_pr, df_res = evaluate_lpocv_gpu( |
| cfg['X'], y, proteins, device, epochs=30 |
| ) |
|
|
| results_all[cfg['name']] = { |
| 'auc_roc': auc_roc, |
| 'auc_pr': auc_pr, |
| 'predictions': df_res, |
| 'n_features': cfg['X'].shape[1], |
| } |
|
|
| print(f" AUC-ROC: {auc_roc:.4f}") |
| print(f" AUC-PR: {auc_pr:.4f}") |
|
|
|
|
| def evaluate_lpocv_logreg(X, y, proteins): |
|
|
| unique_proteins = np.unique(proteins) |
| results = [] |
|
|
| scaler = StandardScaler() |
| X_scaled = scaler.fit_transform(X) |
|
|
| for protein in tqdm(unique_proteins, desc="LPOCV LogReg", leave=False): |
| test_mask = proteins == protein |
| train_mask = ~test_mask |
|
|
| if test_mask.sum() < 2: |
| continue |
|
|
| X_train, y_train = X_scaled[train_mask], y[train_mask] |
| X_test, y_test = X_scaled[test_mask], y[test_mask] |
|
|
| model = LogisticRegression(max_iter=500, C=0.1, random_state=42) |
| model.fit(X_train, y_train) |
| y_pred = model.predict_proba(X_test)[:, 1] |
|
|
| for pred, true in zip(y_pred, y_test): |
| results.append({'y_true': true, 'y_pred': pred}) |
|
|
| df_res = pd.DataFrame(results) |
|
|
| if len(df_res) > 0: |
| auc_roc = roc_auc_score(df_res['y_true'], df_res['y_pred']) |
| auc_pr = average_precision_score(df_res['y_true'], df_res['y_pred']) |
| else: |
| auc_roc, auc_pr = 0, 0 |
|
|
| return auc_roc, auc_pr |
|
|
| for cfg in configs: |
| auc_roc, auc_pr = evaluate_lpocv_logreg(cfg['X'], y, proteins) |
| results_all[cfg['name']]['auc_roc_logreg'] = auc_roc |
| results_all[cfg['name']]['auc_pr_logreg'] = auc_pr |
| print(f" {cfg['name']}: LogReg AUC-ROC = {auc_roc:.4f}") |
|
|
|
|
| comparison_data = [] |
| for name, res in results_all.items(): |
| comparison_data.append({ |
| 'Configuration': name, |
| 'Features': res['n_features'], |
| 'AUC-ROC (MLP)': res['auc_roc'], |
| 'AUC-PR (MLP)': res['auc_pr'], |
| 'AUC-ROC (LogReg)': res.get('auc_roc_logreg', 0), |
| }) |
|
|
| df_comparison = pd.DataFrame(comparison_data) |
| df_comparison = df_comparison.sort_values('AUC-ROC (MLP)', ascending=False) |
|
|
| print("\n" + df_comparison.to_string(index=False)) |
|
|
| best_name = df_comparison.iloc[0]['Configuration'] |
| best_auc = df_comparison.iloc[0]['AUC-ROC (MLP)'] |
| print(f"\n 🏆 Meilleur: {best_name} (AUC-ROC = {best_auc:.4f})") |
|
|
| df_comparison.to_csv(PATHS['results'] / 'comparison_final.csv', index=False) |