# -*- coding: utf-8 -*- """Untitled17.ipynb Automatically generated by Colab. Original file is located at https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV """ import pandas as pd import numpy as np from pathlib import Path from sklearn.ensemble import GradientBoostingClassifier from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.metrics import roc_auc_score, average_precision_score, roc_curve, precision_recall_curve import matplotlib.pyplot as plt from tqdm import tqdm import pickle import warnings warnings.filterwarnings('ignore') PATHS = { 'features': BASE_PATH / 'features', 'embeddings': BASE_PATH / 'embeddings', 'models': BASE_PATH / 'models', 'results': BASE_PATH / 'results', 'figures': BASE_PATH / 'results' / 'figures', } PATHS['figures'].mkdir(parents=True, exist_ok=True) df_features = pd.read_parquet(PATHS['features'] / 'features_classical_full.parquet') id_cols = ['mutation_idx', 'uniprot_acc', 'gene_symbol', 'position', 'wt_aa', 'mut_aa', 'label'] feature_cols = [c for c in df_features.columns if c not in id_cols] X_features = df_features[feature_cols].values X_features = np.nan_to_num(X_features, nan=0.0, posinf=0.0, neginf=0.0) y = df_features['label'].values proteins = df_features['uniprot_acc'].values print(f" Features classiques: {X_features.shape}") X_emb_combined = np.load(PATHS['embeddings'] / 'embeddings_combined_full.npy') X_emb_local = np.load(PATHS['embeddings'] / 'embeddings_local_full.npy') print(f"{X_emb_combined.shape}") print(f"{X_emb_local.shape}") print(f"\n {np.sum(y==1)} pathogènes, {np.sum(y==0)} bénins") print(f" {len(np.unique(proteins))}") n_components_combined = 128 pca_combined = PCA(n_components=n_components_combined, random_state=42) X_emb_pca = pca_combined.fit_transform(X_emb_combined) print(f" {X_emb_combined.shape[1]} → {n_components_combined}") print(f" {pca_combined.explained_variance_ratio_.sum():.2%}") n_components_local = 64 pca_local = PCA(n_components=n_components_local, random_state=42) X_emb_local_pca = pca_local.fit_transform(X_emb_local) print(f" {X_emb_local.shape[1]} → {n_components_local}") print(f" {pca_local.explained_variance_ratio_.sum():.2%}") configs = [ { 'name': 'Features classiques', 'X': X_features, }, { 'name': 'Embeddings ESM-2', 'X': X_emb_pca, }, { 'name': 'Features + Embeddings', 'X': np.concatenate([X_features, X_emb_pca], axis=1), }, { 'name': 'Features + Emb. Local', 'X': np.concatenate([X_features, X_emb_local_pca], axis=1), }, ] for cfg in configs: print(f" {cfg['name']}: {cfg['X'].shape[1]} features") def evaluate_lpocv_fast(X, y, proteins, n_estimators=100, max_depth=4): unique_proteins = np.unique(proteins) results = [] for protein in tqdm(unique_proteins, desc="LPOCV", leave=False): test_mask = proteins == protein train_mask = ~test_mask n_test = test_mask.sum() if n_test < 2: continue X_train, y_train = X[train_mask], y[train_mask] X_test, y_test = X[test_mask], y[test_mask] scaler = StandardScaler() X_train_s = scaler.fit_transform(X_train) X_test_s = scaler.transform(X_test) model = GradientBoostingClassifier( n_estimators=n_estimators, max_depth=max_depth, learning_rate=0.1, min_samples_leaf=10, subsample=0.8, random_state=42 ) model.fit(X_train_s, y_train) y_pred = model.predict_proba(X_test_s)[:, 1] for pred, true in zip(y_pred, y_test): results.append({'y_true': true, 'y_pred': pred}) df_res = pd.DataFrame(results) if len(df_res) > 0 and len(df_res['y_true'].unique()) > 1: auc_roc = roc_auc_score(df_res['y_true'], df_res['y_pred']) auc_pr = average_precision_score(df_res['y_true'], df_res['y_pred']) else: auc_roc, auc_pr = 0, 0 return auc_roc, auc_pr, df_res results_all = {} for cfg in configs: print(f"\n 📊 {cfg['name']}...") auc_roc, auc_pr, df_res = evaluate_lpocv_fast( cfg['X'], y, proteins, n_estimators=100, max_depth=4 ) results_all[cfg['name']] = { 'auc_roc': auc_roc, 'auc_pr': auc_pr, 'predictions': df_res, 'n_features': cfg['X'].shape[1], } print(f" AUC-ROC: {auc_roc:.4f}") print(f" AUC-PR: {auc_pr:.4f}") best_X = None for cfg in configs: if cfg['name'] == best_name: best_X = cfg['X'] break print(f" Entraînement: {best_name}...") scaler_final = StandardScaler() X_scaled = scaler_final.fit_transform(best_X) model_final = GradientBoostingClassifier( n_estimators=300, max_depth=5, learning_rate=0.05, min_samples_leaf=10, subsample=0.8, random_state=42 ) model_final.fit(X_scaled, y) if 'Features' in best_name: importances = model_final.feature_importances_ if best_name == 'Features classiques': imp_names = feature_cols elif best_name == 'Features + Embeddings': imp_names = feature_cols + [f'emb_pca_{i}' for i in range(X_emb_pca.shape[1])] else: imp_names = feature_cols + [f'emb_local_{i}' for i in range(X_emb_local_pca.shape[1])] importance_df = pd.DataFrame({ 'feature': imp_names, 'importance': importances }).sort_values('importance', ascending=False) print("\n Top 15 features:") for _, row in importance_df.head(15).iterrows(): print(f" {row['importance']:.4f} {row['feature']}") importance_df.to_csv(PATHS['results'] / 'feature_importances_best_model.csv', index=False) model_data = { 'model': model_final, 'scaler': scaler_final, 'pca_combined': pca_combined if 'Embeddings' in best_name and 'Local' not in best_name else None, 'pca_local': pca_local if 'Local' in best_name else None, 'feature_cols': feature_cols, 'config_name': best_name, 'metrics': { 'auc_roc_lpocv': results_all[best_name]['auc_roc'], 'auc_pr_lpocv': results_all[best_name]['auc_pr'], }, } with open(PATHS['models'] / 'model_best.pkl', 'wb') as f: pickle.dump(model_data, f) df_comparison.to_csv(PATHS['results'] / 'comparison_features_embeddings.csv', index=False)