IDP-Pathogenicity-Model / scripts /analyze_bias_and_train_strict_mito.py.py
HarriziSaad's picture
Upload 14 files
f07511a verified
# -*- coding: utf-8 -*-
"""Untitled17.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
"""
import pandas as pd
import numpy as np
from pathlib import Path
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score, average_precision_score, roc_curve, precision_recall_curve
import matplotlib.pyplot as plt
from tqdm import tqdm
import pickle
PATHS = {
'features': BASE_PATH / 'features',
'models': BASE_PATH / 'models',
'results': BASE_PATH / 'results',
'figures': BASE_PATH / 'results' / 'figures',
}
PATHS['figures'].mkdir(parents=True, exist_ok=True)
df_full = pd.read_parquet(PATHS['features'] / 'features_classical_full.parquet')
id_cols = ['mutation_idx', 'uniprot_acc', 'gene_symbol', 'position', 'wt_aa', 'mut_aa', 'label']
feature_cols_all = [c for c in df_full.columns if c not in id_cols]
length_related = ['protein_length', 'position_absolute', 'distance_to_n_term', 'distance_to_c_term']
feature_cols_no_length = [c for c in feature_cols_all if c not in length_related]
print(f" Features totales: {len(feature_cols_all)}")
print(f" Features sans longueur: {len(feature_cols_no_length)}")
X_all = df_full[feature_cols_all].values
X_no_length = df_full[feature_cols_no_length].values
y = df_full['label'].values
X_all = np.nan_to_num(X_all, nan=0.0, posinf=0.0, neginf=0.0)
X_no_length = np.nan_to_num(X_no_length, nan=0.0, posinf=0.0, neginf=0.0)
proteins = df_full['uniprot_acc'].unique()
def quick_lpocv(X, y, proteins_list, df, max_proteins=100):
"""LPOCV rapide sur un échantillon de protéines"""
results = []
np.random.seed(42)
sample_proteins = np.random.choice(proteins_list, size=min(max_proteins, len(proteins_list)), replace=False)
for protein in tqdm(sample_proteins, desc="LPOCV rapide"):
test_mask = df['uniprot_acc'] == protein
train_mask = ~test_mask
if test_mask.sum() < 2:
continue
X_train, y_train = X[train_mask], y[train_mask]
X_test, y_test = X[test_mask], y[test_mask]
scaler = StandardScaler()
X_train_s = scaler.fit_transform(X_train)
X_test_s = scaler.transform(X_test)
model = GradientBoostingClassifier(n_estimators=50, max_depth=3, random_state=42)
model.fit(X_train_s, y_train)
y_pred = model.predict_proba(X_test_s)[:, 1]
for pred, true in zip(y_pred, y_test):
results.append({'y_true': true, 'y_pred': pred})
df_res = pd.DataFrame(results)
if len(df_res) > 0 and len(df_res['y_true'].unique()) > 1:
return roc_auc_score(df_res['y_true'], df_res['y_pred'])
return 0
df_strict = pd.read_parquet(PATHS['features'] / 'features_classical_mito_strict.parquet')
print(f" Mutations: {len(df_strict):,}")
print(f" Pathogènes: {(df_strict['label']==1).sum():,}")
print(f" Bénins: {(df_strict['label']==0).sum():,}")
X_strict = df_strict[feature_cols_all].values
y_strict = df_strict['label'].values
X_strict = np.nan_to_num(X_strict, nan=0.0, posinf=0.0, neginf=0.0)
proteins_strict = df_strict['uniprot_acc'].unique()
print(f" Protéines: {len(proteins_strict)}")
lpocv_strict_results = []
for protein in tqdm(proteins_strict, desc="LPOCV strict"):
test_mask = df_strict['uniprot_acc'] == protein
train_mask = ~test_mask
if test_mask.sum() < 2:
continue
X_train = X_strict[train_mask]
y_train = y_strict[train_mask]
X_test = X_strict[test_mask]
y_test = y_strict[test_mask]
scaler = StandardScaler()
X_train_s = scaler.fit_transform(X_train)
X_test_s = scaler.transform(X_test)
model = GradientBoostingClassifier(n_estimators=100, max_depth=4, learning_rate=0.1, random_state=42)
model.fit(X_train_s, y_train)
y_pred = model.predict_proba(X_test_s)[:, 1]
for i, (pred, true) in enumerate(zip(y_pred, y_test)):
lpocv_strict_results.append({
'protein': protein,
'y_true': true,
'y_pred': pred,
})
df_lpocv_strict = pd.DataFrame(lpocv_strict_results)
if len(df_lpocv_strict) > 0 and len(df_lpocv_strict['y_true'].unique()) > 1:
auc_roc_strict = roc_auc_score(df_lpocv_strict['y_true'], df_lpocv_strict['y_pred'])
auc_pr_strict = average_precision_score(df_lpocv_strict['y_true'], df_lpocv_strict['y_pred'])
print(f" AUC-ROC: {auc_roc_strict:.4f}")
print(f" AUC-PR: {auc_pr_strict:.4f}")
else:
auc_roc_strict = 0
auc_pr_strict = 0
scaler_strict = StandardScaler()
X_strict_scaled = scaler_strict.fit_transform(X_strict)
model_strict = GradientBoostingClassifier(
n_estimators=300,
max_depth=5,
learning_rate=0.05,
min_samples_leaf=5,
subsample=0.8,
random_state=42
)
model_strict.fit(X_strict_scaled, y_strict)
importance_strict = pd.DataFrame({
'feature': feature_cols_all,
'importance': model_strict.feature_importances_
}).sort_values('importance', ascending=False)
print("\n Top 10 features (strict mito):")
for i, row in importance_strict.head(10).iterrows():
print(f" {row['importance']:.4f} {row['feature']}")
model_strict_data = {
'model': model_strict,
'scaler': scaler_strict,
'feature_cols': feature_cols_all,
'metrics': {
'auc_roc_lpocv': auc_roc_strict,
'auc_pr_lpocv': auc_pr_strict,
},
'n_samples': len(X_strict),
}
with open(PATHS['models'] / 'model_classical_mito_strict.pkl', 'wb') as f:
pickle.dump(model_strict_data, f)
df_lpocv_full = pd.read_parquet(PATHS['results'] / 'lpocv_predictions.parquet')
auc_roc_full = roc_auc_score(df_lpocv_full['y_true'], df_lpocv_full['y_pred_proba'])
auc_pr_full = average_precision_score(df_lpocv_full['y_true'], df_lpocv_full['y_pred_proba'])