IDP-Pathogenicity-Model / scripts /train_baseline_classical_model.py.py
HarriziSaad's picture
Upload 14 files
f07511a verified
# -*- coding: utf-8 -*-
"""Untitled17.ipynb
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1GwdSjrwh3f6QCzOa8KHr_XkWy0KmZvdV
"""
import pandas as pd
import numpy as np
from pathlib import Path
from math import log2
from tqdm import tqdm
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score, average_precision_score, classification_report
import warnings
warnings.filterwarnings('ignore')
PATHS = {
'data_frozen': BASE_PATH / 'data' / 'frozen',
'features': BASE_PATH / 'features',
'models': BASE_PATH / 'models',
'results': BASE_PATH / 'results',
}
for path in PATHS.values():
path.mkdir(parents=True, exist_ok=True)
def shannon_entropy(seq):
"""Calculer l'entropie de Shannon d'une séquence"""
if not seq or len(seq) == 0:
return 0.0
probs = [seq.count(aa)/len(seq) for aa in set(seq)]
return -sum(p * log2(p) for p in probs if p > 0)
df_features_full = pd.read_parquet(PATHS['features'] / 'features_classical_full.parquet')
df_features_strict = pd.read_parquet(PATHS['features'] / 'features_classical_mito_strict.parquet')
print(f" Features chargées (full): {len(df_features_full):,}")
print(f" Features chargées (strict): {len(df_features_strict):,}")
uniprot_file = BASE_PATH / 'data' / 'raw' / 'uniprot_human_reviewed.parquet'
df_uniprot = pd.read_parquet(uniprot_file)
seq_dict = dict(zip(df_uniprot['accession'], df_uniprot['sequence']))
print(f" Séquences: {len(seq_dict):,}")
def add_entropy_feature(df, seq_dict, window=15):
"""Ajouter la feature d'entropie locale"""
entropies = []
for _, row in tqdm(df.iterrows(), total=len(df), desc="Entropie"):
seq = seq_dict.get(row['uniprot_acc'], '')
pos = row['position']
if seq and 0 <= pos < len(seq):
start = max(0, pos - window)
end = min(len(seq), pos + window + 1)
local_seq = seq[start:end]
entropies.append(shannon_entropy(local_seq))
else:
entropies.append(0.0)
df['local_sequence_entropy'] = entropies
return df
print("\n Ajout de l'entropie locale...")
df_features_full = add_entropy_feature(df_features_full, seq_dict)
df_features_strict = add_entropy_feature(df_features_strict, seq_dict)
df_features_full.to_parquet(PATHS['features'] / 'features_classical_full.parquet')
df_features_strict.to_parquet(PATHS['features'] / 'features_classical_mito_strict.parquet')
id_cols = ['mutation_idx', 'uniprot_acc', 'gene_symbol', 'position', 'wt_aa', 'mut_aa', 'label']
feature_cols = [c for c in df_features_full.columns if c not in id_cols]
print(f" Features: {len(feature_cols)}")
X_full = df_features_full[feature_cols].values
y_full = df_features_full['label'].values
X_strict = df_features_strict[feature_cols].values
y_strict = df_features_strict['label'].values
print(f" X shape: {X_full.shape}")
print(f" y: {np.sum(y_full==1)} pathogènes, {np.sum(y_full==0)} bénins")
print(f" X shape: {X_strict.shape}")
print(f" y: {np.sum(y_strict==1)} pathogènes, {np.sum(y_strict==0)} bénins")
X_full = np.nan_to_num(X_full, nan=0.0, posinf=0.0, neginf=0.0)
X_strict = np.nan_to_num(X_strict, nan=0.0, posinf=0.0, neginf=0.0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X_full, y_full, test_size=0.2, random_state=42, stratify=y_full
)
print(f" Train: {len(X_train)} ({np.sum(y_train==1)} patho)")
print(f" Test: {len(X_test)} ({np.sum(y_test==1)} patho)")
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
model = GradientBoostingClassifier(
n_estimators=300,
max_depth=5,
learning_rate=0.05,
min_samples_leaf=10,
subsample=0.8,
random_state=42,
verbose=0
)
model.fit(X_train_scaled, y_train)
y_pred_proba = model.predict_proba(X_test_scaled)[:, 1]
y_pred = model.predict(X_test_scaled)
auc_roc = roc_auc_score(y_test, y_pred_proba)
auc_pr = average_precision_score(y_test, y_pred_proba)
print(f" AUC-ROC: {auc_roc:.4f}")
print(f" AUC-PR: {auc_pr:.4f}")
print(classification_report(y_test, y_pred, target_names=['Bénin', 'Pathogène']))
proteins = df_features_full['uniprot_acc'].unique()
print(f" Protéines uniques: {len(proteins)}")
lpocv_results = []
proteins_evaluated = 0
for protein in tqdm(proteins, desc="LPOCV"):
test_mask = df_features_full['uniprot_acc'] == protein
train_mask = ~test_mask
if test_mask.sum() < 2:
continue
X_train_lpo = X_full[train_mask]
y_train_lpo = y_full[train_mask]
X_test_lpo = X_full[test_mask]
y_test_lpo = y_full[test_mask]
if len(np.unique(y_test_lpo)) < 2:
pass
scaler_lpo = StandardScaler()
X_train_lpo_scaled = scaler_lpo.fit_transform(X_train_lpo)
X_test_lpo_scaled = scaler_lpo.transform(X_test_lpo)
model_lpo = GradientBoostingClassifier(
n_estimators=100,
max_depth=4,
learning_rate=0.1,
min_samples_leaf=10,
random_state=42,
verbose=0
)
model_lpo.fit(X_train_lpo_scaled, y_train_lpo)
y_pred_lpo = model_lpo.predict_proba(X_test_lpo_scaled)[:, 1]
for i, (pred, true) in enumerate(zip(y_pred_lpo, y_test_lpo)):
lpocv_results.append({
'protein': protein,
'y_true': true,
'y_pred_proba': pred,
})
proteins_evaluated += 1
df_lpocv = pd.DataFrame(lpocv_results)
if len(df_lpocv) > 0 and len(df_lpocv['y_true'].unique()) > 1:
auc_roc_lpocv = roc_auc_score(df_lpocv['y_true'], df_lpocv['y_pred_proba'])
auc_pr_lpocv = average_precision_score(df_lpocv['y_true'], df_lpocv['y_pred_proba'])
print(f"\n 📊 RÉSULTATS LPOCV:")
print(f" AUC-ROC: {auc_roc_lpocv:.4f}")
print(f" AUC-PR: {auc_pr_lpocv:.4f}")
else:
auc_roc_lpocv = 0
auc_pr_lpocv = 0
scaler_final = StandardScaler()
X_full_scaled = scaler_final.fit_transform(X_full)
model_final = GradientBoostingClassifier(
n_estimators=300,
max_depth=5,
learning_rate=0.05,
min_samples_leaf=10,
subsample=0.8,
random_state=42,
verbose=0
)
model_final.fit(X_full_scaled, y_full)
importances = model_final.feature_importances_
importance_df = pd.DataFrame({
'feature': feature_cols,
'importance': importances
}).sort_values('importance', ascending=False)
for i, row in importance_df.head(20).iterrows():
print(f" {row['importance']:.4f} {row['feature']}")
importance_df.to_csv(PATHS['results'] / 'feature_importances_classical.csv', index=False)
import pickle
model_data = {
'model': model_final,
'scaler': scaler_final,
'feature_cols': feature_cols,
'metrics': {
'auc_roc_split': auc_roc,
'auc_pr_split': auc_pr,
'auc_roc_lpocv': auc_roc_lpocv,
'auc_pr_lpocv': auc_pr_lpocv,
},
'n_samples': len(X_full),
'n_features': len(feature_cols),
}
with open(PATHS['models'] / 'model_classical_baseline.pkl', 'wb') as f:
pickle.dump(model_data, f)
df_lpocv.to_parquet(PATHS['results'] / 'lpocv_predictions.parquet')